diff --git a/.github/actions/setup/action.yml b/.github/actions/setup/action.yml index 2360f5c645..57d1afc515 100644 --- a/.github/actions/setup/action.yml +++ b/.github/actions/setup/action.yml @@ -3,6 +3,14 @@ inputs: pull_token: description: "Token to use for private repo access" required: true + setup_gcc: + description: "Whether to setup GCC or not" + required: false + default: 'true' + setup_aws_cli: + description: "Whether to install AWS CLI or not" + required: false + default: 'true' runs: using: "composite" steps: @@ -22,11 +30,41 @@ runs: shell: bash run: go version + - name: Check GCC version + id: check-gcc + shell: bash + run: | + if command -v gcc &> /dev/null; then + echo "gcc_exists=true" >> $GITHUB_OUTPUT + echo "gcc_version=$(gcc --version | head -n1 | awk '{print $NF}')" >> $GITHUB_OUTPUT + else + echo "gcc_exists=false" >> $GITHUB_OUTPUT + fi + - name: Setup GCC uses: Dup4/actions-setup-gcc@v1 + if: inputs.setup_gcc == 'true' && steps.check-gcc.outputs.gcc_exists != 'true' with: version: latest + - uses: actions/setup-python@v5 + with: + python-version: '3.10' + + - name: Install AWS CLI v2 + if: inputs.setup_aws_cli == 'true' + shell: bash + run: | + if ! command -v aws &> /dev/null; then + echo "AWS CLI not found. Installing..." + python3 -m pip install --user awscli + echo "$HOME/.local/bin" >> $GITHUB_PATH + else + echo "AWS CLI is already installed." + fi + export PATH="$HOME/.local/bin:$PATH" + aws --version + - name: rust-cache uses: actions/cache@v3 with: @@ -45,3 +83,20 @@ runs: shell: bash run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --default-toolchain 1.81.0 -y + . "$HOME/.cargo/env" + echo "$HOME/.cargo/bin" >> $GITHUB_PATH + + # install pkg-config and openssl + - name: Install pkg-config and openssl + shell: bash + run: | + if ! dpkg -s pkg-config libssl-dev &> /dev/null; then + echo "pkg-config and/or libssl-dev not found. Installing..." + sudo apt-get update + sudo apt-get install -y pkg-config libssl-dev + else + echo "pkg-config and libssl-dev are already installed." + fi + + - name: Set up Docker + uses: docker/setup-buildx-action@v3 diff --git a/.github/runs-on.yml b/.github/runs-on.yml new file mode 100644 index 0000000000..3cc378296f --- /dev/null +++ b/.github/runs-on.yml @@ -0,0 +1,17 @@ +images: + nvidia-linux: + platform: "linux" + arch: "x64" + ami: "ami-0a63dc9cb9e934ba3" + owner: "421253708207" + + dlami-x64: + platform: "linux" + arch: "x64" + owner: "898082745236" # AWS + name: "Deep Learning Base OSS Nvidia Driver GPU AMI (Ubuntu 22.04)*" + +runners: + gpu-nvidia: + family: ["g6.4xlarge"] + image: dlami-x64 \ No newline at end of file diff --git a/.github/workflows/docker-publish-gnark.yml b/.github/workflows/docker-publish-gnark.yml index 237d89aaf6..97863087ea 100644 --- a/.github/workflows/docker-publish-gnark.yml +++ b/.github/workflows/docker-publish-gnark.yml @@ -1,4 +1,3 @@ -# Source: https://raw.githubusercontent.com/foundry-rs/foundry/master/.github/workflows/docker-publish.yml name: docker-gnark on: @@ -9,7 +8,6 @@ on: - "v*.*.*" schedule: - cron: "0 0 * * *" - # Trigger without any parameters a proactive rebuild workflow_dispatch: inputs: tags: @@ -22,83 +20,85 @@ env: IMAGE_NAME: succinctlabs/sp1-gnark jobs: - container: - runs-on: ubuntu-latest - # https://docs.github.com/en/actions/reference/authentication-in-a-workflow + build-amd64: + runs-on: [runs-on, runner=64cpu-linux-x64, spot=false, "run-id=${{ github.run_id }}"] permissions: id-token: write packages: write contents: read - timeout-minutes: 120 steps: - name: Checkout repository - id: checkout uses: actions/checkout@v4 - - - name: Install Docker BuildX + - name: Set up Docker BuildX uses: docker/setup-buildx-action@v3 - id: buildx + - name: Log into registry ${{ env.REGISTRY }} + uses: docker/login-action@v3 with: - install: true + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Build and push AMD64 image + uses: docker/build-push-action@v5 + with: + context: . + file: ./Dockerfile.gnark-ffi + platforms: linux/amd64 + push: true + tags: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.sha }}-amd64 + cache-from: type=gha + cache-to: type=gha,mode=max - # Login against a Docker registry except on PR - # https://github.com/docker/login-action + build-arm64: + runs-on: [runs-on, runner=64cpu-linux-arm64, spot=false, "run-id=${{ github.run_id }}"] + permissions: + id-token: write + packages: write + contents: read + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Set up Docker BuildX + uses: docker/setup-buildx-action@v3 - name: Log into registry ${{ env.REGISTRY }} - # Ensure this doesn't trigger on PR's - if: github.event_name != 'pull_request' uses: docker/login-action@v3 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - # Extract metadata (tags, labels) for Docker - # https://github.com/docker/metadata-action - - name: Extract Docker metadata - id: meta - uses: docker/metadata-action@v5 + - name: Build and push ARM64 image + uses: docker/build-push-action@v5 with: - images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + context: . + file: ./Dockerfile.gnark-ffi + platforms: linux/arm64 + push: true + tags: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.sha }}-arm64 + cache-from: type=gha + cache-to: type=gha,mode=max - # Creates an additional 'latest' or 'nightly' tag - # If the job is triggered via cron schedule, tag nightly and nightly-{SHA} - # If the job is triggered via workflow dispatch and on a master branch, tag branch and latest - # Otherwise, just tag as the branch name - - name: Finalize Docker Metadata - id: docker_tagging + create-manifest: + needs: [build-amd64, build-arm64] + runs-on: ubuntu-latest + steps: + - name: Log into registry ${{ env.REGISTRY }} + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Create and push manifest run: | + TAGS="" if [[ "${{ github.event_name }}" == 'workflow_dispatch' ]]; then - echo "manual trigger from workflow_dispatch, assigning tag ${{ github.event.inputs.tags }}" - echo "docker_tags=${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.event.inputs.tags }}" >> $GITHUB_OUTPUT + TAGS="${{ github.event.inputs.tags }}" elif [[ "${{ github.event_name }}" == 'schedule' ]]; then - echo "cron trigger, assigning nightly tag" - echo "docker_tags=${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:nightly,${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:nightly-${GITHUB_SHA}" >> $GITHUB_OUTPUT + TAGS="nightly nightly-${{ github.sha }}" else - echo "Neither scheduled nor manual release from main branch. Just tagging as branch name" - echo "docker_tags=${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${GITHUB_REF##*/}" >> $GITHUB_OUTPUT + TAGS="${GITHUB_REF##*/}" fi - - # Log docker metadata to explicitly know what is being pushed - - name: Inspect Docker Metadata - run: | - echo "TAGS -> ${{ steps.docker_tagging.outputs.docker_tags }}" - echo "LABELS -> ${{ steps.meta.outputs.labels }}" - - # Build and push Docker image - # https://github.com/docker/build-push-action - # https://github.com/docker/build-push-action/blob/master/docs/advanced/cache.md - - name: Build and push Docker image - uses: docker/build-push-action@v6 - with: - context: . - file: ./Dockerfile.gnark-ffi - platforms: linux/amd64,linux/arm64 - push: true - tags: ${{ steps.docker_tagging.outputs.docker_tags }} - labels: ${{ steps.meta.outputs.labels }} - cache-from: type=gha - cache-to: type=gha,mode=max - build-args: | - BUILDTIME=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.created'] }} - VERSION=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.version'] }} - REVISION=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.revision'] }} + + for TAG in $TAGS; do + docker buildx imagetools create -t ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:$TAG \ + ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.sha }}-amd64 \ + ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.sha }}-arm64 + done \ No newline at end of file diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml index 5095967882..035d636735 100644 --- a/.github/workflows/pr.yml +++ b/.github/workflows/pr.yml @@ -18,37 +18,6 @@ concurrency: cancel-in-progress: true jobs: - test-fast: - name: Test (fast-experimental) - runs-on: runs-on,runner=64cpu-linux-x64,spot=false - env: - CARGO_NET_GIT_FETCH_WITH_CLI: "true" - steps: - - name: Checkout sources - uses: actions/checkout@v4 - - - name: Setup CI - uses: ./.github/actions/setup - - - name: Run cargo check - uses: actions-rs/cargo@v1 - with: - command: check - toolchain: 1.81.0 - args: --all-targets --all-features - - - name: Run cargo test core-v2 - uses: actions-rs/cargo@v1 - with: - command: test - toolchain: 1.81.0 - args: --release --package sp1-recursion-core-v2 --package sp1-recursion-circuit-v2 --features native-gnark - env: - RUSTFLAGS: -Copt-level=3 -Cdebug-assertions -Coverflow-checks=y -Cdebuginfo=0 -C target-cpu=native - RUST_BACKTRACE: 1 - FRI_QUERIES: 1 - SP1_DEV: 1 - test-x86: name: Test (x86-64) runs-on: @@ -83,7 +52,7 @@ jobs: env: RUSTFLAGS: -Copt-level=3 -Cdebug-assertions -Coverflow-checks=y -Cdebuginfo=0 -C target-cpu=native RUST_BACKTRACE: 1 - FRI_QUERIES: 1 + # FRI_QUERIES: 1 SP1_DEV: 1 test-arm: @@ -120,7 +89,7 @@ jobs: env: RUSTFLAGS: -Copt-level=3 -Cdebug-assertions -Coverflow-checks=y -Cdebuginfo=0 -C target-cpu=native RUST_BACKTRACE: 1 - FRI_QUERIES: 1 + # FRI_QUERIES: 1 SP1_DEV: 1 lint: @@ -151,18 +120,18 @@ jobs: env: CARGO_INCREMENTAL: 1 - - name: Add wasm target - run: rustup target add wasm32-unknown-unknown + # - name: Add wasm target + # run: rustup target add wasm32-unknown-unknown - - name: Check wasm compatibility for sdk - uses: actions-rs/cargo@v1 - with: - command: check - args: -p sp1-sdk --target wasm32-unknown-unknown --no-default-features + # - name: Check wasm compatibility for sdk + # uses: actions-rs/cargo@v1 + # with: + # command: check + # args: -p sp1-sdk --target wasm32-unknown-unknown --no-default-features examples: name: Examples - runs-on: [runs-on, runner=64cpu-linux-x64, "run-id=${{ github.run_id }}"] + runs-on: [runs-on, runner=64cpu-linux-x64, spot=false, "run-id=${{ github.run_id }}"] env: CARGO_NET_GIT_FETCH_WITH_CLI: "true" steps: @@ -203,7 +172,7 @@ jobs: - name: Install SP1 toolchain run: | - cargo install --locked --path crates/cli + cargo install --locked --force --path crates/cli cargo prove install-toolchain - name: Run cargo fmt @@ -295,127 +264,121 @@ jobs: --commit-hash "${{ github.sha }}" \ --author "${{ github.event.pull_request.user.login || github.actor }}" - low-memory: - name: Low Memory - strategy: - matrix: - mem_limit: [16, 32, 64] - runs-on: - [ - runs-on, - "ram=${{ matrix.mem_limit}}", - family=c7a, - image=ubuntu22-full-x64, - "run-id=${{ github.run_id }}", - ] - env: - CARGO_NET_GIT_FETCH_WITH_CLI: "true" - steps: - - name: Checkout sources - uses: actions/checkout@v4 - - - name: Setup CI - uses: ./.github/actions/setup - - - name: Install SP1 toolchain - run: | - curl -L https://sp1.succinct.xyz | bash - ~/.sp1/bin/sp1up - ~/.sp1/bin/cargo-prove prove --version - - - name: Install SP1 CLI - run: | - cd crates/cli - cargo install --force --locked --path . - cd ~ - - - name: Run tendermint script - run: | - cd examples/tendermint/program - cargo add sp1-zkvm --path $GITHUB_WORKSPACE/crates/zkvm/entrypoint - cargo prove build - cd ../script - cargo remove sp1-sdk - cargo add sp1-sdk --path $GITHUB_WORKSPACE/crates/sdk - SP1_DEV=1 RUST_LOG=info cargo run --release - - - name: Run cycle tracking script - run: | - cd examples/cycle-tracking/script - cargo add sp1-sdk --path $GITHUB_WORKSPACE/crates/sdk - SP1_DEV=1 RUST_LOG=info cargo run --release - - toolchain-test: - name: "Test toolchain installation (${{ matrix.name }})" - strategy: - fail-fast: false - matrix: - include: - - name: "Ubuntu 24.04 (x86_64)" - runner: "ubuntu-24.04" - template: evm - - name: "Ubuntu 22.04 (x86_64)" - runner: "ubuntu-22.04" - template: bare - - name: "Ubuntu 20.04 (x86_64)" - runner: "ubuntu-20.04" - template: evm - - name: "macOS Monterey (x86_64)" - runner: "macos-12" - template: evm - - name: "macOS Ventura (x86_64)" - runner: "macos-13" - template: bare - - name: "macOS Sonoma (ARM64)" - runner: "macos-14" - template: evm - - runs-on: "${{ matrix.runner }}" - steps: - - name: "Checkout source code" - uses: "actions/checkout@v4" - - - name: "Install cargo-prove" - run: | - cargo install --locked --path ./crates/cli - - - name: "Install SP1 toolchain" - run: | - cargo prove install-toolchain --token ${{ secrets.GITHUB_TOKEN }} - - - name: "Create SP1 project from template" - run: | - cargo prove new --${{ matrix.template }} hello - - - name: "Build SP1 project" - run: | - cd ./hello/program - cargo prove build - - toolchain-test-ec2: - name: "Test toolchain installation (${{ matrix.name }})" - strategy: - fail-fast: false - matrix: - include: - # AMI from `us-east-1` - - name: "Debian 12 (x86_64)" - ec2-instance: "c5.2xlarge" - ami: "ami-064519b8c76274859" - volume: "/dev/xvda" - - name: "Debian 12 (ARM64)" - ec2-instance: "c6g.2xlarge" - ami: "ami-0789039e34e739d67" - volume: "/dev/xvda" - uses: "./.github/workflows/toolchain-ec2.yml" - with: - image-id: "${{ matrix.ami }}" - instance-type: "${{ matrix.ec2-instance }}" - root-volume: "${{ matrix.volume }}" - secrets: - AWS_REGION: "${{ secrets.AWS_REGION }}" - AWS_ACCESS_KEY_ID: "${{ secrets.AWS_ACCESS_KEY_ID }}" - AWS_SECRET_ACCESS_KEY: "${{ secrets.AWS_SECRET_ACCESS_KEY }}" - AWS_SUBNET_ID: "${{ secrets.AWS_SUBNET_ID }}" - AWS_SG_ID: "${{ secrets.AWS_SG_ID }}" - GH_PAT: "${{ secrets.GH_PAT }}" + # low-memory: + # name: Low Memory + # strategy: + # matrix: + # mem_limit: [16, 32, 64] + # runs-on: + # [ + # runs-on, + # "ram=${{ matrix.mem_limit}}", + # family=c7a, + # image=ubuntu22-full-x64, + # "run-id=${{ github.run_id }}", + # ] + # env: + # CARGO_NET_GIT_FETCH_WITH_CLI: "true" + # steps: + # - name: Checkout sources + # uses: actions/checkout@v4 + + # - name: Setup CI + # uses: ./.github/actions/setup + + # - name: Install SP1 toolchain + # run: | + # curl -L https://sp1.succinct.xyz | bash + # ~/.sp1/bin/sp1up + # ~/.sp1/bin/cargo-prove prove --version + + # - name: Install SP1 CLI + # run: | + # cd crates/cli + # cargo install --force --locked --path . + # cd ~ + + # - name: Run tendermint script + # run: | + # cd examples/tendermint/program + # cargo add sp1-zkvm --path $GITHUB_WORKSPACE/crates/zkvm/entrypoint + # cargo prove build + # cd ../script + # cargo remove sp1-sdk + # cargo add sp1-sdk --path $GITHUB_WORKSPACE/crates/sdk + # SP1_DEV=1 RUST_LOG=info cargo run --release + + # - name: Run cycle tracking script + # run: | + # cd examples/cycle-tracking/script + # cargo add sp1-sdk --path $GITHUB_WORKSPACE/crates/sdk + # SP1_DEV=1 RUST_LOG=info cargo run --release + + # toolchain-test: + # name: "Test toolchain installation (${{ matrix.name }})" + # strategy: + # fail-fast: false + # matrix: + # include: + # - name: "Ubuntu 24.04 (x86_64)" + # runner: "ubuntu-24.04" + # - name: "Ubuntu 22.04 (x86_64)" + # runner: "ubuntu-22.04" + # - name: "Ubuntu 20.04 (x86_64)" + # runner: "ubuntu-20.04" + # - name: "macOS Monterey (x86_64)" + # runner: "macos-12" + # - name: "macOS Ventura (x86_64)" + # runner: "macos-13" + # - name: "macOS Sonoma (ARM64)" + # runner: "macos-14" + + # runs-on: "${{ matrix.runner }}" + # steps: + # - name: "Checkout source code" + # uses: "actions/checkout@v4" + + # - name: "Install cargo-prove" + # run: | + # cargo install --locked --path ./crates/cli + + # - name: "Install SP1 toolchain" + # run: | + # cargo prove install-toolchain --token ${{ secrets.GITHUB_TOKEN }} + + # - name: "Create SP1 project from template" + # run: | + # cargo prove new hello + + # - name: "Build SP1 project" + # run: | + # cd ./hello/program + # cargo prove build + + # toolchain-test-ec2: + # name: "Test toolchain installation (${{ matrix.name }})" + # strategy: + # fail-fast: false + # matrix: + # include: + # # AMI from `us-east-1` + # - name: "Debian 12 (x86_64)" + # ec2-instance: "c5.2xlarge" + # ami: "ami-064519b8c76274859" + # volume: "/dev/xvda" + # - name: "Debian 12 (ARM64)" + # ec2-instance: "c6g.2xlarge" + # ami: "ami-0789039e34e739d67" + # volume: "/dev/xvda" + # uses: "./.github/workflows/toolchain-ec2.yml" + # with: + # image-id: "${{ matrix.ami }}" + # instance-type: "${{ matrix.ec2-instance }}" + # root-volume: "${{ matrix.volume }}" + # secrets: + # AWS_REGION: "${{ secrets.AWS_REGION }}" + # AWS_ACCESS_KEY_ID: "${{ secrets.AWS_ACCESS_KEY_ID }}" + # AWS_SECRET_ACCESS_KEY: "${{ secrets.AWS_SECRET_ACCESS_KEY }}" + # AWS_SUBNET_ID: "${{ secrets.AWS_SUBNET_ID }}" + # AWS_SG_ID: "${{ secrets.AWS_SG_ID }}" + # GH_PAT: "${{ secrets.GH_PAT }}" diff --git a/.github/workflows/suite.yml b/.github/workflows/suite.yml new file mode 100644 index 0000000000..8d1fa3831e --- /dev/null +++ b/.github/workflows/suite.yml @@ -0,0 +1,159 @@ +name: Testing Suite + +on: + workflow_dispatch: + inputs: + cpu_workloads: + description: "list of cpu workloads to run" + required: true + cuda_workloads: + description: "list of cuda workloads to run" + required: true + network_workloads: + description: "list of network workloads to run" + required: true + merge_group: + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}- + cancel-in-progress: false + +jobs: + test-cpu: + if: ${{ fromJSON(github.event.inputs.cpu_workloads)[0] != null }} + strategy: + fail-fast: false + matrix: + workload: ${{ fromJSON(github.event.inputs.cpu_workloads) }} + name: ${{ matrix.workload }} (cpu) + runs-on: ["runs-on", "runner=64cpu-linux-x64", "spot=false", "run-id=${{ github.run_id }}"] + steps: + - name: Checkout sources + uses: actions/checkout@v4 + + - name: Setup CI + uses: ./.github/actions/setup + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_S3 }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_S3 }} + aws-region: us-west-2 + + - name: Copy files from S3 + run: | + mkdir -p workdir + aws s3 cp s3://sp1-testing-suite/${{ matrix.workload }}/program.bin workdir/program.bin + aws s3 cp s3://sp1-testing-suite/${{ matrix.workload }}/stdin.bin workdir/stdin.bin + + - name: Clean Cargo cache + uses: actions-rs/cargo@v1 + with: + command: clean + toolchain: 1.81.0 + + - name: Run sp1-perf + uses: actions-rs/cargo@v1 + with: + command: run + toolchain: 1.81.0 + args: --release -p sp1-perf -- --program workdir/program.bin --stdin workdir/stdin.bin --mode cpu + env: + RUST_LOG: info + RUSTFLAGS: -Copt-level=3 -Ctarget-cpu=native + RUST_BACKTRACE: 1 + + test-cuda: + if: ${{ fromJSON(github.event.inputs.cuda_workloads)[0] != null }} + strategy: + fail-fast: false + matrix: + workload: ${{ fromJSON(github.event.inputs.cuda_workloads) }} + name: ${{ matrix.workload }} (gpu) + runs-on: ["runs-on", "family=g6.4xlarge", "hdd=200", "ami=ami-0a63dc9cb9e934ba3", "spot=false", "run-id=${{ github.run_id }}"] + steps: + - name: Checkout sources + uses: actions/checkout@v4 + + - name: Setup CI + uses: ./.github/actions/setup + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_S3 }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_S3 }} + aws-region: us-west-2 + + - name: Copy files from S3 + run: | + mkdir -p workdir + aws s3 cp s3://sp1-testing-suite/${{ matrix.workload }}/program.bin workdir/program.bin + aws s3 cp s3://sp1-testing-suite/${{ matrix.workload }}/stdin.bin workdir/stdin.bin + + - name: Clean Cargo cache + uses: actions-rs/cargo@v1 + with: + command: clean + toolchain: 1.81.0 + + - name: Run sp1-perf + uses: actions-rs/cargo@v1 + with: + command: run + toolchain: 1.81.0 + args: --release -p sp1-perf -- --program workdir/program.bin --stdin workdir/stdin.bin --mode cuda + env: + RUST_LOG: debug + RUSTFLAGS: -Copt-level=3 -Ctarget-cpu=native + RUST_BACKTRACE: 1 + SP1_PROVER: cuda + + test-network: + if: ${{ fromJSON(github.event.inputs.network_workloads)[0] != null }} + strategy: + fail-fast: false + matrix: + workload: ${{ fromJSON(github.event.inputs.network_workloads) }} + name: ${{ matrix.workload }} (network) + runs-on: ["runs-on", "runner=16cpu-linux-x64", "spot=false", "run-id=${{ github.run_id }}"] + steps: + - name: Checkout sources + uses: actions/checkout@v4 + + - name: Setup CI + uses: ./.github/actions/setup + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_S3 }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_S3 }} + aws-region: us-west-2 + + - name: Copy files from S3 + run: | + mkdir -p workdir + aws s3 cp s3://sp1-testing-suite/${{ matrix.workload }}/program.bin workdir/program.bin + aws s3 cp s3://sp1-testing-suite/${{ matrix.workload }}/stdin.bin workdir/stdin.bin + + - name: Clean Cargo cache + uses: actions-rs/cargo@v1 + with: + command: clean + toolchain: 1.81.0 + + - name: Run sp1-perf + uses: actions-rs/cargo@v1 + with: + command: run + toolchain: 1.81.0 + args: --release -p sp1-perf --features native-gnark -- --program workdir/program.bin --stdin workdir/stdin.bin --mode network + env: + RUST_LOG: info + RUSTFLAGS: -Copt-level=3 -Ctarget-cpu=native + RUST_BACKTRACE: 1 + SP1_PROVER: network + SP1_PRIVATE_KEY: ${{ secrets.SP1_PRIVATE_KEY }} + PROVER_NETWORK_RPC: https://rpc-staging.succinct.xyz \ No newline at end of file diff --git a/.gitignore b/.gitignore index 2b4a7389e1..3dab66b55a 100644 --- a/.gitignore +++ b/.gitignore @@ -14,6 +14,8 @@ pgo-data.profdata # Proofs **/proof-with-pis.bin **/proof-with-io.bin +**/program.bin +**/stdin.bin # Benchmark benchmark.csv @@ -25,4 +27,10 @@ benchmark.csv recursion/gnark-ffi/build crates/prover/build crates/prover/data -crates/prover/*.tar.gz \ No newline at end of file +crates/prover/*.tar.gz +crates/prover/Groth16Verifier.sol +crates/prover/pk +crates/prover/powersOfTau28_hez_final.ptau +crates/prover/semaphore-gnark-11 +crates/prover/trusted-setup +crates/prover/vk \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json index 7e4fd0ca18..d1500a8b1a 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -53,6 +53,7 @@ // "tests/secp256k1-add/Cargo.toml", // "tests/secp256k1-decompress/Cargo.toml", // "tests/secp256k1-double/Cargo.toml", + // "tests/common/Cargo.toml", // "tests/sha-compress/Cargo.toml", // "tests/sha-extend/Cargo.toml", // "tests/sha2/Cargo.toml", diff --git a/Cargo.lock b/Cargo.lock index 5a02f5b027..94979e2078 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -25,18 +25,18 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.22.0" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] [[package]] -name = "adler" -version = "1.0.2" +name = "adler2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" [[package]] name = "aead" @@ -137,7 +137,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "629b62e38d471cc15fea534eb7283d2f8a4e8bdb1811bcc5d66dda6cfce6fae1" dependencies = [ "alloy-eips", - "alloy-primitives 0.8.5", + "alloy-primitives 0.8.8", "alloy-rlp", "alloy-serde", "c-kzg", @@ -150,7 +150,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0069cf0642457f87a01a014f6dc29d5d893cd4fd8fddf0c3cdfad1bb3ebafc41" dependencies = [ - "alloy-primitives 0.8.5", + "alloy-primitives 0.8.8", "alloy-rlp", "serde", ] @@ -161,7 +161,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea59dc42102bc9a1905dc57901edc6dd48b9f38115df86c7d252acba70d71d04" dependencies = [ - "alloy-primitives 0.8.5", + "alloy-primitives 0.8.8", "alloy-rlp", "serde", ] @@ -174,7 +174,7 @@ checksum = "f923dd5fca5f67a43d81ed3ebad0880bd41f6dd0ada930030353ac356c54cd0f" dependencies = [ "alloy-eip2930", "alloy-eip7702", - "alloy-primitives 0.8.5", + "alloy-primitives 0.8.8", "alloy-rlp", "alloy-serde", "c-kzg", @@ -190,8 +190,8 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3c717b5298fad078cd3a418335b266eba91b511383ca9bd497f742d5975d5ab" dependencies = [ - "alloy-primitives 0.8.5", - "alloy-sol-types 0.8.5", + "alloy-primitives 0.8.8", + "alloy-sol-types 0.8.8", "serde", "serde_json", "thiserror", @@ -208,11 +208,11 @@ dependencies = [ "alloy-eips", "alloy-json-rpc", "alloy-network-primitives", - "alloy-primitives 0.8.5", + "alloy-primitives 0.8.8", "alloy-rpc-types-eth", "alloy-serde", "alloy-signer", - "alloy-sol-types 0.8.5", + "alloy-sol-types 0.8.8", "async-trait", "auto_impl", "futures-utils-wasm", @@ -226,7 +226,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94ad40869867ed2d9cd3842b1e800889e5b49e6b92da346e93862b4a741bedf3" dependencies = [ "alloy-eips", - "alloy-primitives 0.8.5", + "alloy-primitives 0.8.8", "alloy-serde", "serde", ] @@ -238,7 +238,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccb3ead547f4532bc8af961649942f0b9c16ee9226e26caa3f38420651cc0bf4" dependencies = [ "alloy-rlp", - "bytes 1.7.1", + "bytes 1.7.2", "cfg-if", "const-hex", "derive_more 0.99.18", @@ -255,16 +255,17 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.5" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "260d3ff3bff0bb84599f032a2f2c6828180b0ea0cd41fdaf44f39cef3ba41861" +checksum = "38f35429a652765189c1c5092870d8360ee7b7769b09b06d89ebaefd34676446" dependencies = [ "alloy-rlp", - "bytes 1.7.1", + "bytes 1.7.2", "cfg-if", "const-hex", "derive_more 1.0.0", - "hashbrown 0.14.5", + "foldhash", + "hashbrown 0.15.0", "hex-literal", "indexmap 2.6.0", "itoa", @@ -288,7 +289,7 @@ checksum = "26154390b1d205a4a7ac7352aa2eb4f81f391399d4e2f546fb81a2f8bb383f62" dependencies = [ "alloy-rlp-derive", "arrayvec", - "bytes 1.7.1", + "bytes 1.7.2", ] [[package]] @@ -299,7 +300,7 @@ checksum = "4d0f2d905ebd295e7effec65e5f6868d153936130ae718352771de3e7d03c75c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", ] [[package]] @@ -311,10 +312,10 @@ dependencies = [ "alloy-consensus", "alloy-eips", "alloy-network-primitives", - "alloy-primitives 0.8.5", + "alloy-primitives 0.8.8", "alloy-rlp", "alloy-serde", - "alloy-sol-types 0.8.5", + "alloy-sol-types 0.8.8", "cfg-if", "derive_more 1.0.0", "hashbrown 0.14.5", @@ -329,7 +330,7 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "731f75ec5d383107fd745d781619bd9cedf145836c51ecb991623d41278e71fa" dependencies = [ - "alloy-primitives 0.8.5", + "alloy-primitives 0.8.8", "serde", "serde_json", ] @@ -340,7 +341,7 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "307324cca94354cd654d6713629f0383ec037e1ff9e3e3d547212471209860c0" dependencies = [ - "alloy-primitives 0.8.5", + "alloy-primitives 0.8.8", "async-trait", "auto_impl", "elliptic-curve 0.13.8", @@ -356,7 +357,7 @@ checksum = "9fabe917ab1778e760b4701628d1cae8e028ee9d52ac6307de4e1e9286ab6b5f" dependencies = [ "alloy-consensus", "alloy-network", - "alloy-primitives 0.8.5", + "alloy-primitives 0.8.8", "alloy-signer", "async-trait", "k256", @@ -375,21 +376,21 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", ] [[package]] name = "alloy-sol-macro" -version = "0.8.5" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68e7f6e8fe5b443f82b3f1e15abfa191128f71569148428e49449d01f6f49e8b" +checksum = "3b2395336745358cc47207442127c47c63801a7065ecc0aa928da844f8bb5576" dependencies = [ - "alloy-sol-macro-expander 0.8.5", - "alloy-sol-macro-input 0.8.5", + "alloy-sol-macro-expander 0.8.8", + "alloy-sol-macro-input 0.8.8", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", ] [[package]] @@ -405,26 +406,26 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", "syn-solidity 0.7.7", "tiny-keccak", ] [[package]] name = "alloy-sol-macro-expander" -version = "0.8.5" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b96ce28d2fde09abb6135f410c41fad670a3a770b6776869bd852f1df102e6f" +checksum = "9ed5047c9a241df94327879c2b0729155b58b941eae7805a7ada2e19436e6b39" dependencies = [ - "alloy-sol-macro-input 0.8.5", + "alloy-sol-macro-input 0.8.8", "const-hex", "heck", "indexmap 2.6.0", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.72", - "syn-solidity 0.8.5", + "syn 2.0.79", + "syn-solidity 0.8.8", "tiny-keccak", ] @@ -439,23 +440,23 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", "syn-solidity 0.7.7", ] [[package]] name = "alloy-sol-macro-input" -version = "0.8.5" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "906746396a8296537745711630d9185746c0b50c033d5e9d18b0a6eba3d53f90" +checksum = "5dee02a81f529c415082235129f0df8b8e60aa1601b9c9298ffe54d75f57210b" dependencies = [ "const-hex", "dunce", "heck", "proc-macro2", "quote", - "syn 2.0.72", - "syn-solidity 0.8.5", + "syn 2.0.79", + "syn-solidity 0.8.8", ] [[package]] @@ -472,12 +473,12 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "0.8.5" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d86a533ce22525969661b25dfe296c112d35eb6861f188fd284f8bd4bb3842ae" +checksum = "c2841af22d99e2c0f82a78fe107b6481be3dd20b89bfb067290092794734343a" dependencies = [ - "alloy-primitives 0.8.5", - "alloy-sol-macro 0.8.5", + "alloy-primitives 0.8.8", + "alloy-sol-macro 0.8.8", "const-hex", ] @@ -562,9 +563,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.86" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" dependencies = [ "backtrace", ] @@ -603,7 +604,7 @@ dependencies = [ "num-bigint 0.4.6", "num-traits", "paste", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "zeroize", ] @@ -695,15 +696,15 @@ dependencies = [ [[package]] name = "arrayref" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" [[package]] name = "arrayvec" -version = "0.7.4" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "async-attributes" @@ -740,13 +741,13 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7ebdfa2ebdab6b1760375fa7d6f382b9f486eac35fc994625a00e89280bdbb7" +checksum = "30ca9a001c1e8ba5149f91a74362376cc6bc5b919d92d988668657bd570bdcec" dependencies = [ "async-task", "concurrent-queue", - "fastrand 2.1.0", + "fastrand 2.1.1", "futures-lite 2.3.0", "slab", ] @@ -759,61 +760,32 @@ checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ "async-channel 2.3.1", "async-executor", - "async-io 2.3.4", - "async-lock 3.4.0", + "async-io", + "async-lock", "blocking", "futures-lite 2.3.0", "once_cell", ] -[[package]] -name = "async-io" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" -dependencies = [ - "async-lock 2.8.0", - "autocfg", - "cfg-if", - "concurrent-queue", - "futures-lite 1.13.0", - "log", - "parking", - "polling 2.8.0", - "rustix 0.37.27", - "slab", - "socket2 0.4.10", - "waker-fn", -] - [[package]] name = "async-io" version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "444b0228950ee6501b3568d3c93bf1176a1fdbc3b758dcd9475046d30f4dc7e8" dependencies = [ - "async-lock 3.4.0", + "async-lock", "cfg-if", "concurrent-queue", "futures-io", "futures-lite 2.3.0", "parking", - "polling 3.7.3", - "rustix 0.38.34", + "polling", + "rustix", "slab", "tracing", "windows-sys 0.59.0", ] -[[package]] -name = "async-lock" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" -dependencies = [ - "event-listener 2.5.3", -] - [[package]] name = "async-lock" version = "3.4.0" @@ -827,21 +799,21 @@ dependencies = [ [[package]] name = "async-std" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" +checksum = "c634475f29802fde2b8f0b505b1bd00dfe4df7d4a000f0b36f7671197d5c3615" dependencies = [ "async-attributes", "async-channel 1.9.0", "async-global-executor", - "async-io 1.13.0", - "async-lock 2.8.0", + "async-io", + "async-lock", "crossbeam-utils", "futures-channel", "futures-core", "futures-io", - "futures-lite 1.13.0", - "gloo-timers", + "futures-lite 2.3.0", + "gloo-timers 0.3.0", "kv-log-macro", "log", "memchr", @@ -871,7 +843,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", ] [[package]] @@ -895,13 +867,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.81" +version = "0.1.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" +checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", ] [[package]] @@ -925,7 +897,7 @@ checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" dependencies = [ "futures", "pharos", - "rustc_version 0.4.0", + "rustc_version 0.4.1", ] [[package]] @@ -942,20 +914,20 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", ] [[package]] name = "autocfg" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "aws-config" -version = "1.5.7" +version = "1.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8191fb3091fa0561d1379ef80333c3c7191c6f0435d986e85821bcf7acbd1126" +checksum = "7198e6f03240fdceba36656d8be440297b6b82270325908c7381f37d826a74f6" dependencies = [ "aws-credential-types", "aws-runtime", @@ -969,8 +941,8 @@ dependencies = [ "aws-smithy-runtime-api", "aws-smithy-types", "aws-types", - "bytes 1.7.1", - "fastrand 2.1.0", + "bytes 1.7.2", + "fastrand 2.1.1", "hex", "http 0.2.12", "ring 0.17.8", @@ -1008,8 +980,8 @@ dependencies = [ "aws-smithy-runtime-api", "aws-smithy-types", "aws-types", - "bytes 1.7.1", - "fastrand 2.1.0", + "bytes 1.7.2", + "fastrand 2.1.1", "http 0.2.12", "http-body 0.4.6", "once_cell", @@ -1021,9 +993,9 @@ dependencies = [ [[package]] name = "aws-sdk-s3" -version = "1.53.0" +version = "1.56.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43fad71130014e11f42fadbdcce5df12ee61866f8ab9bad773b138d4b3c11087" +checksum = "cecd672c8d4265fd4fbecacd4a479180e616881bbe639250cf81ddb604e4c301" dependencies = [ "ahash", "aws-credential-types", @@ -1039,8 +1011,8 @@ dependencies = [ "aws-smithy-types", "aws-smithy-xml", "aws-types", - "bytes 1.7.1", - "fastrand 2.1.0", + "bytes 1.7.2", + "fastrand 2.1.1", "hex", "hmac 0.12.1", "http 0.2.12", @@ -1056,9 +1028,9 @@ dependencies = [ [[package]] name = "aws-sdk-sso" -version = "1.44.0" +version = "1.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b90cfe6504115e13c41d3ea90286ede5aa14da294f3fe077027a6e83850843c" +checksum = "0dc2faec3205d496c7e57eff685dd944203df7ce16a4116d0281c44021788a7b" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1069,7 +1041,7 @@ dependencies = [ "aws-smithy-runtime-api", "aws-smithy-types", "aws-types", - "bytes 1.7.1", + "bytes 1.7.2", "http 0.2.12", "once_cell", "regex-lite", @@ -1078,9 +1050,9 @@ dependencies = [ [[package]] name = "aws-sdk-ssooidc" -version = "1.45.0" +version = "1.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "167c0fad1f212952084137308359e8e4c4724d1c643038ce163f06de9662c1d0" +checksum = "c93c241f52bc5e0476e259c953234dab7e2a35ee207ee202e86c0095ec4951dc" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1091,7 +1063,7 @@ dependencies = [ "aws-smithy-runtime-api", "aws-smithy-types", "aws-types", - "bytes 1.7.1", + "bytes 1.7.2", "http 0.2.12", "once_cell", "regex-lite", @@ -1100,9 +1072,9 @@ dependencies = [ [[package]] name = "aws-sdk-sts" -version = "1.44.0" +version = "1.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cb5f98188ec1435b68097daa2a37d74b9d17c9caa799466338a8d1544e71b9d" +checksum = "b259429be94a3459fa1b00c5684faee118d74f9577cc50aebadc36e507c63b5f" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1132,7 +1104,7 @@ dependencies = [ "aws-smithy-http", "aws-smithy-runtime-api", "aws-smithy-types", - "bytes 1.7.1", + "bytes 1.7.2", "crypto-bigint 0.5.5", "form_urlencoded", "hex", @@ -1169,7 +1141,7 @@ checksum = "598b1689d001c4d4dc3cb386adb07d37786783aee3ac4b324bcadac116bf3d23" dependencies = [ "aws-smithy-http", "aws-smithy-types", - "bytes 1.7.1", + "bytes 1.7.2", "crc32c", "crc32fast", "hex", @@ -1189,7 +1161,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cef7d0a272725f87e51ba2bf89f8c21e4df61b9e49ae1ac367a6d69916ef7c90" dependencies = [ "aws-smithy-types", - "bytes 1.7.1", + "bytes 1.7.2", "crc32fast", ] @@ -1202,7 +1174,7 @@ dependencies = [ "aws-smithy-eventstream", "aws-smithy-runtime-api", "aws-smithy-types", - "bytes 1.7.1", + "bytes 1.7.2", "bytes-utils", "futures-core", "http 0.2.12", @@ -1235,22 +1207,22 @@ dependencies = [ [[package]] name = "aws-smithy-runtime" -version = "1.7.1" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1ce695746394772e7000b39fe073095db6d45a862d0767dd5ad0ac0d7f8eb87" +checksum = "a065c0fe6fdbdf9f11817eb68582b2ab4aff9e9c39e986ae48f7ec576c6322db" dependencies = [ "aws-smithy-async", "aws-smithy-http", "aws-smithy-runtime-api", "aws-smithy-types", - "bytes 1.7.1", - "fastrand 2.1.0", + "bytes 1.7.2", + "fastrand 2.1.1", "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", "http-body 1.0.1", "httparse", - "hyper 0.14.30", + "hyper 0.14.31", "hyper-rustls 0.24.2", "once_cell", "pin-project-lite", @@ -1268,7 +1240,7 @@ checksum = "e086682a53d3aa241192aa110fa8dfce98f2f5ac2ead0de84d41582c7e8fdb96" dependencies = [ "aws-smithy-async", "aws-smithy-types", - "bytes 1.7.1", + "bytes 1.7.2", "http 0.2.12", "http 1.1.0", "pin-project-lite", @@ -1284,7 +1256,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147100a7bea70fa20ef224a6bad700358305f5dc0f84649c53769761395b355b" dependencies = [ "base64-simd", - "bytes 1.7.1", + "bytes 1.7.2", "bytes-utils", "futures-core", "http 0.2.12", @@ -1322,7 +1294,7 @@ dependencies = [ "aws-smithy-async", "aws-smithy-runtime-api", "aws-smithy-types", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "tracing", ] @@ -1334,12 +1306,12 @@ checksum = "504e3947307ac8326a5437504c517c4b56716c9d98fac0028c2acc7ca47d70ae" dependencies = [ "async-trait", "axum-core", - "bytes 1.7.1", + "bytes 1.7.2", "futures-util", "http 1.1.0", "http-body 1.0.1", "http-body-util", - "hyper 1.4.1", + "hyper 1.5.0", "hyper-util", "itoa", "matchit", @@ -1367,7 +1339,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" dependencies = [ "async-trait", - "bytes 1.7.1", + "bytes 1.7.2", "futures-util", "http 1.1.0", "http-body 1.0.1", @@ -1381,20 +1353,26 @@ dependencies = [ "tracing", ] +[[package]] +name = "az" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b7e4c2464d97fe331d41de9d5db0def0a96f4d823b8b32a2efd503578988973" + [[package]] name = "backtrace" -version = "0.3.73" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", - "cc", "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", "serde", + "windows-targets 0.52.6", ] [[package]] @@ -1481,7 +1459,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.72", + "syn 2.0.79", ] [[package]] @@ -1545,16 +1523,15 @@ dependencies = [ [[package]] name = "blake3" -version = "1.5.3" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9ec96fe9a81b5e365f9db71fe00edc4fe4ca2cc7dcb7861f0603012a7caa210" +checksum = "d82033247fd8e890df8f740e407ad4d038debb9eb1f40533fffb32e7d17dc6f7" dependencies = [ "arrayref", "arrayvec", "cc", "cfg-if", "constant_time_eq", - "rayon-core", ] [[package]] @@ -1637,9 +1614,9 @@ checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "bytemuck" -version = "1.16.3" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "102087e286b4677862ea56cf8fc58bb2cdfa8725c40ffb80fe3a008eb7f2fc83" +checksum = "8334215b81e418a0a7bdb8ef0849474f40bb10c8b71f1c4ed315cff49f32494d" [[package]] name = "byteorder" @@ -1655,9 +1632,9 @@ checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" [[package]] name = "bytes" -version = "1.7.1" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" dependencies = [ "serde", ] @@ -1668,7 +1645,7 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7dafe3a8757b027e2be6e4e5601ed563c55989fcf1546e933c66c8eb3a058d35" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "either", ] @@ -1689,33 +1666,13 @@ dependencies = [ [[package]] name = "camino" -version = "1.1.7" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0ec6b951b160caa93cc0c7b209e5a3bff7aae9062213451ac99493cd844c239" +checksum = "8b96ec4966b5813e2c0507c1f86115c8c5abaadc3980879c3424042a02fd1ad3" dependencies = [ "serde", ] -[[package]] -name = "capstone" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1097e608594dad3bad608295567f757742b883606fe150faf7a9740b849730d8" -dependencies = [ - "capstone-sys", - "libc", -] - -[[package]] -name = "capstone-sys" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e7f651d5ec4c2a2e6c508f2c8032655003cd728ec85663e9796616990e25b5a" -dependencies = [ - "cc", - "libc", -] - [[package]] name = "cargo-platform" version = "0.1.8" @@ -1747,12 +1704,13 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.1.7" +version = "1.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26a5c3fd7bfa1ce3897a3a3501d362b2d87b7f2583ebcb4a949ec25911025cbc" +checksum = "b16803a61b81d9eabb7eae2588776c4c1e584b738ede45fdbb4c972cec1e9945" dependencies = [ "jobserver", "libc", + "shlex", ] [[package]] @@ -1772,9 +1730,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "cfg_aliases" -version = "0.1.1" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "chrono" @@ -1785,7 +1743,6 @@ dependencies = [ "android-tzdata", "iana-time-zone", "num-traits", - "serde", "windows-targets 0.52.6", ] @@ -1848,9 +1805,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.13" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fbb260a053428790f3de475e304ff84cdbc4face759ea7a3e64c1edd938a7fc" +checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" dependencies = [ "clap_builder", "clap_derive", @@ -1858,9 +1815,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.13" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64b17d7ea74e9f833c7dbf2cbe4fb12ff26783eda4782a8975b72f895c9b4d99" +checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" dependencies = [ "anstream", "anstyle", @@ -1870,14 +1827,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.13" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" +checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", ] [[package]] @@ -1886,12 +1843,6 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" -[[package]] -name = "cobs" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67ba02a97a2bd10f4b59b25c7973101c79642302776489e030cd13cdab09ed15" - [[package]] name = "coins-bip32" version = "0.8.7" @@ -1974,9 +1925,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.12.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94fb8a24a26d37e1ffd45343323dc9fe6654ceea44c12f2fcb3d7ac29e610bc6" +checksum = "0121754e84117e65f9d90648ee6aa4882a6e63110307ab73967a4c5e7e69e586" dependencies = [ "cfg-if", "cpufeatures", @@ -1999,9 +1950,9 @@ checksum = "373e9fafaa20882876db20562275ff58d50e0caa2590077fe7ce7bef90211d0d" [[package]] name = "constant_time_eq" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2" +checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" [[package]] name = "convert_case" @@ -2038,15 +1989,15 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" dependencies = [ "libc", ] @@ -2063,7 +2014,7 @@ version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3a47af21622d091a8f0fb295b88bc886ac74efcc613efc19f5d0b21de5c89e47" dependencies = [ - "rustc_version 0.4.0", + "rustc_version 0.4.1", ] [[package]] @@ -2227,34 +2178,34 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.4.4" +version = "3.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "672465ae37dc1bc6380a6547a8883d5dd397b0f1faaad4f265726cc7042a5345" +checksum = "90eeab0aa92f3f9b4e87f258c72b139c207d251f9cbc1080a0086b86a8870dd3" dependencies = [ "nix", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "curl" -version = "0.4.46" +version = "0.4.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e2161dd6eba090ff1594084e95fd67aeccf04382ffea77999ea94ed42ec67b6" +checksum = "d9fb4d13a1be2b58f14d60adba57c9834b78c62fd86c3e76a148f732686e9265" dependencies = [ "curl-sys", "libc", "openssl-probe", "openssl-sys", "schannel", - "socket2 0.5.7", + "socket2", "windows-sys 0.52.0", ] [[package]] name = "curl-sys" -version = "0.4.74+curl-8.9.0" +version = "0.4.77+curl-8.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8af10b986114528fcdc4b63b6f5f021b7057618411046a4de2ba0f0149a097bf" +checksum = "f469e8a5991f277a208224f6c7ad72ecb5f986e36d09ae1f2c1bb9259478a480" dependencies = [ "cc", "libc", @@ -2276,7 +2227,7 @@ dependencies = [ "cpufeatures", "curve25519-dalek-derive", "fiat-crypto", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "subtle", "zeroize", ] @@ -2289,7 +2240,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", ] [[package]] @@ -2298,18 +2249,8 @@ version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" dependencies = [ - "darling_core 0.13.4", - "darling_macro 0.13.4", -] - -[[package]] -name = "darling" -version = "0.20.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" -dependencies = [ - "darling_core 0.20.10", - "darling_macro 0.20.10", + "darling_core", + "darling_macro", ] [[package]] @@ -2326,42 +2267,17 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "darling_core" -version = "0.20.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim 0.11.1", - "syn 2.0.72", -] - [[package]] name = "darling_macro" version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ - "darling_core 0.13.4", + "darling_core", "quote", "syn 1.0.109", ] -[[package]] -name = "darling_macro" -version = "0.20.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" -dependencies = [ - "darling_core 0.20.10", - "quote", - "syn 2.0.72", -] - [[package]] name = "dashu" version = "0.4.2" @@ -2467,7 +2383,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", - "serde", ] [[package]] @@ -2490,8 +2405,8 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "rustc_version 0.4.0", - "syn 2.0.72", + "rustc_version 0.4.1", + "syn 2.0.79", ] [[package]] @@ -2511,7 +2426,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", "unicode-xid", ] @@ -2590,19 +2505,6 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2" -[[package]] -name = "downloader" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d05213e96f184578b5f70105d4d0a644a168e99e12d7bea0b200c15d67b5c182" -dependencies = [ - "futures", - "rand 0.8.5", - "reqwest 0.11.27", - "thiserror", - "tokio", -] - [[package]] name = "dunce" version = "1.0.5" @@ -2686,18 +2588,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "embedded-io" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef1a6892d9eef45c8fa6b9e0086428a2cca8491aca8f787c534a3d6d0bcb3ced" - -[[package]] -name = "embedded-io" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edd0f118536f44f5ccd48bcb8b111bdc3de888b58c74639dfb034a357d0f206d" - [[package]] name = "encode_unicode" version = "0.3.6" @@ -2726,7 +2616,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a3d8dc56e02f954cac8eb489772c552c473346fc34f67412bb6244fd647f7e4" dependencies = [ "base64 0.21.7", - "bytes 1.7.1", + "bytes 1.7.2", "hex", "k256", "log", @@ -2755,7 +2645,7 @@ checksum = "f282cfdfe92516eb26c2af8589c274c7c17681f5ecc03c18255fe741c6aa64eb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", ] [[package]] @@ -2906,7 +2796,7 @@ dependencies = [ "regex", "serde", "serde_json", - "syn 2.0.72", + "syn 2.0.79", "toml", "walkdir", ] @@ -2924,7 +2814,7 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.72", + "syn 2.0.79", ] [[package]] @@ -2934,7 +2824,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82d80cc6ad30b14a48ab786523af33b37f28a8623fc06afd55324816ef18fb1f" dependencies = [ "arrayvec", - "bytes 1.7.1", + "bytes 1.7.2", "cargo_metadata", "chrono", "const-hex", @@ -2950,7 +2840,7 @@ dependencies = [ "serde", "serde_json", "strum", - "syn 2.0.72", + "syn 2.0.79", "tempfile", "thiserror", "tiny-keccak", @@ -2992,7 +2882,7 @@ dependencies = [ "async-trait", "auto_impl", "base64 0.21.7", - "bytes 1.7.1", + "bytes 1.7.2", "const-hex", "enr", "ethers-core", @@ -3086,9 +2976,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" +checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" [[package]] name = "fastrlp" @@ -3098,7 +2988,7 @@ checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" dependencies = [ "arrayvec", "auto_impl", - "bytes 1.7.1", + "bytes 1.7.2", ] [[package]] @@ -3182,6 +3072,12 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2" + [[package]] name = "foreign-types" version = "0.3.2" @@ -3214,9 +3110,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -3229,9 +3125,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -3239,15 +3135,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", @@ -3256,9 +3152,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" @@ -3281,7 +3177,7 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" dependencies = [ - "fastrand 2.1.0", + "fastrand 2.1.1", "futures-core", "futures-io", "parking", @@ -3300,26 +3196,26 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", ] [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-timer" @@ -3327,15 +3223,15 @@ version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" dependencies = [ - "gloo-timers", + "gloo-timers 0.2.6", "send_wrapper 0.4.0", ] [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", @@ -3409,10 +3305,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", - "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", - "wasm-bindgen", ] [[package]] @@ -3427,9 +3321,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.29.0" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "git2" @@ -3462,6 +3356,28 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "gloo-timers" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "gmp-mpfr-sys" +version = "1.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0205cd82059bc63b63cf516d714352a30c44f2c74da9961dfda2617ae6b5918" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + [[package]] name = "goblin" version = "0.8.2" @@ -3502,7 +3418,7 @@ version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "fnv", "futures-core", "futures-sink", @@ -3517,12 +3433,12 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" +checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" dependencies = [ "atomic-waker", - "bytes 1.7.1", + "bytes 1.7.2", "fnv", "futures-core", "futures-sink", @@ -3589,6 +3505,12 @@ name = "hashbrown" version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash", + "serde", +] [[package]] name = "hashers" @@ -3667,7 +3589,7 @@ version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "fnv", "itoa", ] @@ -3678,7 +3600,7 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "fnv", "itoa", ] @@ -3689,7 +3611,7 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "http 0.2.12", "pin-project-lite", ] @@ -3700,7 +3622,7 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "http 1.1.0", ] @@ -3710,7 +3632,7 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "futures-util", "http 1.1.0", "http-body 1.0.1", @@ -3755,9 +3677,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" [[package]] name = "httpdate" @@ -3767,11 +3689,11 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "0.14.30" +version = "0.14.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" +checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "futures-channel", "futures-core", "futures-util", @@ -3782,7 +3704,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.7", + "socket2", "tokio", "tower-service", "tracing", @@ -3791,14 +3713,14 @@ dependencies = [ [[package]] name = "hyper" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" +checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "futures-channel", "futures-util", - "h2 0.4.5", + "h2 0.4.6", "http 1.1.0", "http-body 1.0.1", "httparse", @@ -3818,7 +3740,7 @@ checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http 0.2.12", - "hyper 0.14.30", + "hyper 0.14.31", "log", "rustls 0.21.12", "rustls-native-certs 0.6.3", @@ -3828,20 +3750,20 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.2" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" +checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" dependencies = [ "futures-util", "http 1.1.0", - "hyper 1.4.1", + "hyper 1.5.0", "hyper-util", - "rustls 0.23.12", + "rustls 0.23.14", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", "tower-service", - "webpki-roots 0.26.3", + "webpki-roots 0.26.6", ] [[package]] @@ -3850,22 +3772,35 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" dependencies = [ - "hyper 1.4.1", + "hyper 1.5.0", "hyper-util", "pin-project-lite", "tokio", "tower-service", ] +[[package]] +name = "hyper-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +dependencies = [ + "bytes 1.7.2", + "hyper 0.14.31", + "native-tls", + "tokio", + "tokio-native-tls", +] + [[package]] name = "hyper-tls" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "http-body-util", - "hyper 1.4.1", + "hyper 1.5.0", "hyper-util", "native-tls", "tokio", @@ -3875,29 +3810,28 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.6" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ab92f4f49ee4fb4f997c784b7a2e0fa70050211e0b6a287f898c3c9785ca956" +checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "futures-channel", "futures-util", "http 1.1.0", "http-body 1.0.1", - "hyper 1.4.1", + "hyper 1.5.0", "pin-project-lite", - "socket2 0.5.7", + "socket2", "tokio", - "tower 0.4.13", "tower-service", "tracing", ] [[package]] name = "iana-time-zone" -version = "0.1.60" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -3984,7 +3918,6 @@ checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown 0.12.3", - "serde", ] [[package]] @@ -4035,30 +3968,19 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "io-lifetimes" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" -dependencies = [ - "hermit-abi 0.3.9", - "libc", - "windows-sys 0.48.0", -] - [[package]] name = "ipnet" -version = "2.9.0" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" [[package]] name = "is-terminal" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" +checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b" dependencies = [ - "hermit-abi 0.3.9", + "hermit-abi 0.4.0", "libc", "windows-sys 0.52.0", ] @@ -4136,9 +4058,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.69" +version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" +checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" dependencies = [ "wasm-bindgen", ] @@ -4173,9 +4095,9 @@ dependencies = [ [[package]] name = "k256" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" +checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" dependencies = [ "cfg-if", "ecdsa 0.16.9", @@ -4196,9 +4118,9 @@ dependencies = [ [[package]] name = "keccak-asm" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "422fbc7ff2f2f5bdffeb07718e5a5324dca72b0c9293d50df4026652385e3314" +checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" dependencies = [ "digest 0.10.7", "sha3-asm", @@ -4224,9 +4146,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.158" +version = "0.2.159" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" +checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" [[package]] name = "libgit2-sys" @@ -4278,9 +4200,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.18" +version = "1.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c15da26e5af7e25c90b37a2d75cdbf940cf4a55316de9d84c679c9b8bfabf82e" +checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" dependencies = [ "cc", "libc", @@ -4288,12 +4210,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "linux-raw-sys" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" - [[package]] name = "linux-raw-sys" version = "0.4.14" @@ -4321,11 +4237,11 @@ dependencies = [ [[package]] name = "lru" -version = "0.12.4" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ee39891760e7d94734f6f63fedc29a2e4a152f836120753a72503f09fcf904" +checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.14.5", + "hashbrown 0.15.0", ] [[package]] @@ -4389,18 +4305,18 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.4" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" +checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" dependencies = [ - "adler", + "adler2", ] [[package]] name = "mio" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4569e456d394deccd22ce1c1913e6ea0e54519f577285001215d33557431afe4" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ "hermit-abi 0.3.9", "libc", @@ -4433,9 +4349,9 @@ dependencies = [ [[package]] name = "nix" -version = "0.28.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ "bitflags 2.6.0", "cfg-if", @@ -4632,10 +4548,10 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" dependencies = [ - "proc-macro-crate 3.1.0", + "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", ] [[package]] @@ -4655,24 +4571,18 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.36.2" +version = "0.36.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f203fa8daa7bb185f760ae12bd8e097f63d17041dcdcaf675ac54cdf863170e" +checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" - -[[package]] -name = "oneshot" -version = "0.1.8" +version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e296cf87e61c9cfc1a61c3c63a0f7f286ed4554e0e22be84e8a38e1d264a2a29" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "oorandom" @@ -4694,7 +4604,7 @@ checksum = "786393f80485445794f6043fd3138854dd109cc6c4bd1a6383db304c9ce9b9ce" dependencies = [ "arrayvec", "auto_impl", - "bytes 1.7.1", + "bytes 1.7.2", "ethereum-types", "open-fastrlp-derive", ] @@ -4705,7 +4615,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "003b2be5c6c53c1cfeb0a238b8a1c3915cd410feb684457a36c10038f764bb1c" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "proc-macro2", "quote", "syn 1.0.109", @@ -4734,7 +4644,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", ] [[package]] @@ -4786,9 +4696,9 @@ dependencies = [ [[package]] name = "p3-air" -version = "0.1.3-succinct" +version = "0.1.4-succinct" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45e909ef66fa5d77ff0fd3cb5af4b33b27fa6fb68d02b9b1e70edbc29383e565" +checksum = "066f571b2e645505ed5972dd0e1e252ba03352150830c9566769ca711c0f1e9b" dependencies = [ "p3-field", "p3-matrix", @@ -4796,9 +4706,9 @@ dependencies = [ [[package]] name = "p3-baby-bear" -version = "0.1.3-succinct" +version = "0.1.4-succinct" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46965470aac1cddfe52f535424b59d52f2fffef0fdeb9dbed19da39b1d8f048a" +checksum = "ff00f571044d299310d9659c6e51c98422de3bf94b8577f7f30cf59cf2043e40" dependencies = [ "num-bigint 0.4.6", "p3-field", @@ -4811,9 +4721,9 @@ dependencies = [ [[package]] name = "p3-blake3" -version = "0.1.3-succinct" +version = "0.1.4-succinct" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36ef32d6ea21dd5cf9fec8a31bf0c64e6ceee8901dbf50966b83a443093c2aba" +checksum = "cc4cb69ae54a279bbbd477566d1bdb71aa879b528fd658d0fcfc36f54b00217c" dependencies = [ "blake3", "p3-symmetric", @@ -4821,9 +4731,9 @@ dependencies = [ [[package]] name = "p3-bn254-fr" -version = "0.1.3-succinct" +version = "0.1.4-succinct" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e3edfca6be3b3109adf8e3330baec30c3fc5f9f4d63d27aaec1b471ca51ed67" +checksum = "bf19917f986d45e9abb6d177e875824ced6eed096480d574fce16f2c45c721ea" dependencies = [ "ff 0.13.0", "num-bigint 0.4.6", @@ -4836,25 +4746,27 @@ dependencies = [ [[package]] name = "p3-challenger" -version = "0.1.3-succinct" +version = "0.1.4-succinct" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6662ea899a5d848b60c699944491d72757873b5e1fd46798e4712f90a03a4e9" +checksum = "3be7e4fbce4566a93091107eadfafa0b5374bd1ffd3e0f6b850da3ff72eb183f" dependencies = [ "p3-field", "p3-maybe-rayon", "p3-symmetric", "p3-util", + "serde", "tracing", ] [[package]] name = "p3-commit" -version = "0.1.3-succinct" +version = "0.1.4-succinct" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc3563918b5cc44ef5280bf9b51753e70dc78802de25e3fb81ed6c94617ccb6e" +checksum = "7a03eb0f99d68a712c41e658e9a7782a0705d4ffcfb6232a43bd3f1ef9591002" dependencies = [ "itertools 0.12.1", "p3-challenger", + "p3-dft", "p3-field", "p3-matrix", "p3-util", @@ -4863,9 +4775,9 @@ dependencies = [ [[package]] name = "p3-dft" -version = "0.1.3-succinct" +version = "0.1.4-succinct" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "510095701819d83c9509fe825bbf1ebfe50426ae75149df5fe1dcfd18261323a" +checksum = "1556de968523fbe5d804ab50600ea306fcceea3500cfd7601e40882480524664" dependencies = [ "p3-field", "p3-matrix", @@ -4876,9 +4788,9 @@ dependencies = [ [[package]] name = "p3-field" -version = "0.1.3-succinct" +version = "0.1.4-succinct" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61f1977a0a65789f719aa824119c332c4676b000bdbfe94d312fb6244a70d601" +checksum = "cec2af6e1ac47a2035af5165e668d64612c4b9ccabd06df37fc1fd381fdf8a71" dependencies = [ "itertools 0.12.1", "num-bigint 0.4.6", @@ -4890,9 +4802,9 @@ dependencies = [ [[package]] name = "p3-fri" -version = "0.1.3-succinct" +version = "0.1.4-succinct" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c22ddb958f200d9289cc73ff68847b0167ca0c14557b791dd9e318f98c2d1b28" +checksum = "f351ee9f9d4256455164565cd91e3e6d2487cc2a5355515fa2b6d479269188dd" dependencies = [ "itertools 0.12.1", "p3-challenger", @@ -4909,30 +4821,20 @@ dependencies = [ [[package]] name = "p3-interpolation" -version = "0.1.3-succinct" +version = "0.1.4-succinct" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d032cda212f6b408d7d5b0b9a8270a9455acb93742fe55a0880d82be8e90e500" +checksum = "d24d0f2907a374ebe4545fcff3120d6376d9630cf0bef30feedcfc5908ea2c37" dependencies = [ "p3-field", "p3-matrix", "p3-util", ] -[[package]] -name = "p3-keccak" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c56abdd5a8a780049d2f8e92cea1df57b55a2ef50a40d1103f2732f7a00e4b1" -dependencies = [ - "p3-symmetric", - "tiny-keccak", -] - [[package]] name = "p3-keccak-air" -version = "0.1.3-succinct" +version = "0.1.4-succinct" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8398f1694ccc38513df0b8cab5f9ef7325423f27cd9e4fa20bdc77d5079cf1b" +checksum = "e66badd47cedf6570e91a0cabc389b80dfd53ba1a6e9a45a3923fd54b86122ff" dependencies = [ "p3-air", "p3-field", @@ -4940,15 +4842,13 @@ dependencies = [ "p3-maybe-rayon", "p3-util", "tracing", - "tracing-forest", - "tracing-subscriber", ] [[package]] name = "p3-matrix" -version = "0.1.3-succinct" +version = "0.1.4-succinct" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d548ee0b834f8e2ebc5037073acd101a3b0ca41a2d1d28a15ba0ccd9059495b0" +checksum = "fa272f3ae77ed8d73478aa7c89e712efb15bda3ff4aff10fadfe11a012cd5389" dependencies = [ "itertools 0.12.1", "p3-field", @@ -4961,18 +4861,18 @@ dependencies = [ [[package]] name = "p3-maybe-rayon" -version = "0.1.3-succinct" +version = "0.1.4-succinct" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55f5575d3d61bedb3e05681abb0f36b8bb339d65aa395d50756bfa64e9cd3f46" +checksum = "3eecad6292021858f282d643d9d1284ab112a200494d589863a9c4080e578ef0" dependencies = [ "rayon", ] [[package]] name = "p3-mds" -version = "0.1.3-succinct" +version = "0.1.4-succinct" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6e57ed310d59245f93e24ee805ea7aa16fc9c505551b76a15f5e50f29d177e" +checksum = "716c4dbe68a02f1541eb09149d07b8663a3a5951b1864a31cd67ff3bb0826e57" dependencies = [ "itertools 0.12.1", "p3-dft", @@ -4985,9 +4885,9 @@ dependencies = [ [[package]] name = "p3-merkle-tree" -version = "0.1.3-succinct" +version = "0.1.4-succinct" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af46b41cba75d483ec8a553cbab1d2d794935ae3403d75394acfa4fb2c977cce" +checksum = "ad7ebab52a03c26025988663a135aed62f5084a2e2ea262176dc8748efb593e5" dependencies = [ "itertools 0.12.1", "p3-commit", @@ -5002,22 +4902,23 @@ dependencies = [ [[package]] name = "p3-poseidon2" -version = "0.1.3-succinct" +version = "0.1.4-succinct" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adaba6f14c197203530e233badce0ca1126ba3bf3c9ff766505b497bdad0bee1" +checksum = "39c042efa15beab7a8c4d0ca9b9e4cbda7582be0c08e121e830fec45f082935b" dependencies = [ "gcd", "p3-field", "p3-mds", "p3-symmetric", "rand 0.8.5", + "serde", ] [[package]] name = "p3-symmetric" -version = "0.1.3-succinct" +version = "0.1.4-succinct" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57ecc4282566eb14f48be7707f6745c4dff6be664984d59ec0fb1849cd82b5c2" +checksum = "b9896a831f5b688adc13f6fbe1dcf66ecfaa4622a500f81aa745610e777acb72" dependencies = [ "itertools 0.12.1", "p3-field", @@ -5026,9 +4927,9 @@ dependencies = [ [[package]] name = "p3-uni-stark" -version = "0.1.3-succinct" +version = "0.1.4-succinct" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1af5c038b22b058bf1d49fb1ea3dd6c240a3e46c3278fde5c444e0034f7ffe37" +checksum = "8437ebcd060c8a5479898030b114a93da8a86eb4c2e5f313d9eeaaf40c6e6f61" dependencies = [ "itertools 0.12.1", "p3-air", @@ -5039,18 +4940,15 @@ dependencies = [ "p3-matrix", "p3-maybe-rayon", "p3-util", - "postcard", "serde", "tracing", - "tracing-forest", - "tracing-subscriber", ] [[package]] name = "p3-util" -version = "0.1.3-succinct" +version = "0.1.4-succinct" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79f3fef0e00d9d7246385e758c4cd39b4efcbbcea31752471491ab502631385e" +checksum = "dedb9d27ba47ac314c6fac4ca54e55c3e486c864d51ec5ba55dbe47b75121157" dependencies = [ "serde", ] @@ -5084,7 +4982,7 @@ version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ - "proc-macro-crate 3.1.0", + "proc-macro-crate 3.2.0", "proc-macro2", "quote", "syn 1.0.109", @@ -5092,9 +4990,9 @@ dependencies = [ [[package]] name = "parking" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" @@ -5191,9 +5089,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.11" +version = "2.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd53dff83f26735fdc1ca837098ccf133605d794cdae66acfc2bfac3ec809d95" +checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442" dependencies = [ "memchr", "thiserror", @@ -5217,27 +5115,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" dependencies = [ "futures", - "rustc_version 0.4.0", + "rustc_version 0.4.1", ] [[package]] name = "pin-project" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +checksum = "baf123a161dde1e524adf36f90bc5d8d3462824a9c43553ad07a8183161189ec" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", ] [[package]] @@ -5259,7 +5157,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" dependencies = [ "atomic-waker", - "fastrand 2.1.0", + "fastrand 2.1.1", "futures-io", ] @@ -5285,9 +5183,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" [[package]] name = "plain" @@ -5297,9 +5195,9 @@ checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" [[package]] name = "plotters" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a15b6eccb8484002195a3e44fe65a4ce8e93a625797a063735536fd59cb01cf3" +checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" dependencies = [ "num-traits", "plotters-backend", @@ -5310,35 +5208,19 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "414cec62c6634ae900ea1c56128dfe87cf63e7caece0852ec76aba307cebadb7" +checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" [[package]] name = "plotters-svg" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81b30686a7d9c3e010b84284bdd26a29f2138574f52f5eb6f794fc0ad924e705" +checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" dependencies = [ "plotters-backend", ] -[[package]] -name = "polling" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" -dependencies = [ - "autocfg", - "bitflags 1.3.2", - "cfg-if", - "concurrent-queue", - "libc", - "log", - "pin-project-lite", - "windows-sys 0.48.0", -] - [[package]] name = "polling" version = "3.7.3" @@ -5349,7 +5231,7 @@ dependencies = [ "concurrent-queue", "hermit-abi 0.4.0", "pin-project-lite", - "rustix 0.38.34", + "rustix", "tracing", "windows-sys 0.59.0", ] @@ -5367,21 +5249,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da544ee218f0d287a911e9c99a39a8c9bc8fcad3cb8db5959940044ecfc67265" - -[[package]] -name = "postcard" -version = "1.0.10" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f7f0a8d620d71c457dd1d47df76bb18960378da56af4527aaa10f515eee732e" -dependencies = [ - "cobs", - "embedded-io 0.4.0", - "embedded-io 0.6.1", - "serde", -] +checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" [[package]] name = "powerfmt" @@ -5400,12 +5270,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.20" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" +checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" dependencies = [ "proc-macro2", - "syn 2.0.72", + "syn 2.0.79", ] [[package]] @@ -5448,11 +5318,11 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" +checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" dependencies = [ - "toml_edit 0.21.1", + "toml_edit 0.22.22", ] [[package]] @@ -5498,7 +5368,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", ] [[package]] @@ -5509,9 +5379,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "b3e4daa0dcf6feba26f985457cdf104d4b4256fc5a09547140f3631bb076b19a" dependencies = [ "unicode-ident", ] @@ -5530,7 +5400,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rand_xorshift", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", "rusty-fork", "tempfile", "unarray", @@ -5542,7 +5412,7 @@ version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b0487d90e047de87f984913713b85c601c05609aad5b0df4b4573fbf69aa13f" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "prost-derive", ] @@ -5552,7 +5422,7 @@ version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c1318b19085f08681016926435853bbf7858f9c082d0999b80550ff5d9abe15" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "heck", "itertools 0.13.0", "log", @@ -5563,7 +5433,7 @@ dependencies = [ "prost", "prost-types", "regex", - "syn 2.0.72", + "syn 2.0.79", "tempfile", ] @@ -5577,7 +5447,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", ] [[package]] @@ -5589,15 +5459,6 @@ dependencies = [ "prost", ] -[[package]] -name = "psm" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5787f7cda34e3033a72192c018bc5883100330f362ef279a8cbccfce8bb4e874" -dependencies = [ - "cc", -] - [[package]] name = "quick-error" version = "1.2.3" @@ -5606,17 +5467,17 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quinn" -version = "0.11.3" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b22d8e7369034b9a7132bc2008cac12f2013c8132b45e0554e6e20e2617f2156" +checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "pin-project-lite", "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.12", - "socket2 0.5.7", + "rustls 0.23.14", + "socket2", "thiserror", "tokio", "tracing", @@ -5624,15 +5485,15 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.11.6" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba92fb39ec7ad06ca2582c0ca834dfeadcaf06ddfc8e635c80aa7e1c05315fdd" +checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "rand 0.8.5", "ring 0.17.8", "rustc-hash 2.0.0", - "rustls 0.23.12", + "rustls 0.23.14", "slab", "thiserror", "tinyvec", @@ -5641,22 +5502,22 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bffec3605b73c6f1754535084a85229fa8a30f86014e6c81aeec4abb68b0285" +checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" dependencies = [ "libc", "once_cell", - "socket2 0.5.7", + "socket2", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "quote" -version = "1.0.36" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2", ] @@ -5779,18 +5640,18 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.3" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ "bitflags 2.6.0", ] [[package]] name = "redox_users" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ "getrandom 0.2.15", "libredox", @@ -5799,14 +5660,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.6" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.7", - "regex-syntax 0.8.4", + "regex-automata 0.4.8", + "regex-syntax 0.8.5", ] [[package]] @@ -5820,13 +5681,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", ] [[package]] @@ -5843,9 +5704,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" @@ -5854,58 +5715,57 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" dependencies = [ "base64 0.21.7", - "bytes 1.7.1", + "bytes 1.7.2", "encoding_rs", "futures-core", "futures-util", "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.30", - "hyper-rustls 0.24.2", + "hyper 0.14.31", + "hyper-tls 0.5.0", "ipnet", "js-sys", "log", "mime", + "native-tls", "once_cell", "percent-encoding", "pin-project-lite", - "rustls 0.21.12", "rustls-pemfile 1.0.4", "serde", "serde_json", "serde_urlencoded", "sync_wrapper 0.1.2", - "system-configuration", + "system-configuration 0.5.1", "tokio", - "tokio-rustls 0.24.1", + "tokio-native-tls", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "webpki-roots 0.25.4", - "winreg 0.50.0", + "winreg", ] [[package]] name = "reqwest" -version = "0.12.5" +version = "0.12.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7d6d2a27d57148378eb5e111173f4276ad26340ecc5c49a4a2152167a2d6a37" +checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" dependencies = [ "base64 0.22.1", - "bytes 1.7.1", + "bytes 1.7.2", "encoding_rs", "futures-core", "futures-util", - "h2 0.4.5", + "h2 0.4.6", "http 1.1.0", "http-body 1.0.1", "http-body-util", - "hyper 1.4.1", - "hyper-rustls 0.27.2", - "hyper-tls", + "hyper 1.5.0", + "hyper-rustls 0.27.3", + "hyper-tls 0.6.0", "hyper-util", "ipnet", "js-sys", @@ -5916,14 +5776,14 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.12", - "rustls-pemfile 2.1.3", + "rustls 0.23.14", + "rustls-pemfile 2.2.0", "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", "sync_wrapper 1.0.1", - "system-configuration", + "system-configuration 0.6.1", "tokio", "tokio-native-tls", "tokio-rustls 0.26.0", @@ -5934,20 +5794,20 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots 0.26.3", - "winreg 0.52.0", + "webpki-roots 0.26.6", + "windows-registry", ] [[package]] name = "reqwest-middleware" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39346a33ddfe6be00cbc17a34ce996818b97b230b87229f10114693becca1268" +checksum = "562ceb5a604d3f7c885a792d42c199fd8af239d0a51b2fa6a78aafa092452b04" dependencies = [ "anyhow", "async-trait", "http 1.1.0", - "reqwest 0.12.5", + "reqwest 0.12.8", "serde", "thiserror", "tower-service", @@ -6019,7 +5879,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "rlp-derive", "rustc-hex", ] @@ -6046,6 +5906,18 @@ dependencies = [ "paste", ] +[[package]] +name = "rug" +version = "1.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97ae2c1089ec0575193eb9222881310cc1ed8bce3646ef8b81b44b518595b79d" +dependencies = [ + "az", + "gmp-mpfr-sys", + "libc", + "libm", +] + [[package]] name = "ruint" version = "1.12.3" @@ -6055,7 +5927,7 @@ dependencies = [ "alloy-rlp", "ark-ff 0.3.0", "ark-ff 0.4.2", - "bytes 1.7.1", + "bytes 1.7.2", "fastrlp", "num-bigint 0.4.6", "num-traits", @@ -6120,37 +5992,23 @@ dependencies = [ [[package]] name = "rustc_version" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ "semver 1.0.23", ] [[package]] name = "rustix" -version = "0.37.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" -dependencies = [ - "bitflags 1.3.2", - "errno", - "io-lifetimes", - "libc", - "linux-raw-sys 0.3.8", - "windows-sys 0.48.0", -] - -[[package]] -name = "rustix" -version = "0.38.34" +version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ "bitflags 2.6.0", "errno", "libc", - "linux-raw-sys 0.4.14", + "linux-raw-sys", "windows-sys 0.52.0", ] @@ -6181,15 +6039,15 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.12" +version = "0.23.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" +checksum = "415d9944693cb90382053259f89fbb077ea730ad7273047ec63b19bc9b160ba8" dependencies = [ "log", "once_cell", "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.6", + "rustls-webpki 0.102.8", "subtle", "zeroize", ] @@ -6213,7 +6071,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcaf18a4f2be7326cd874a5fa579fae794320a0f388d365dca7e480e55f83f8a" dependencies = [ "openssl-probe", - "rustls-pemfile 2.1.3", + "rustls-pemfile 2.2.0", "rustls-pki-types", "schannel", "security-framework", @@ -6230,19 +6088,18 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.1.3" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" dependencies = [ - "base64 0.22.1", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.7.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" +checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" [[package]] name = "rustls-webpki" @@ -6256,9 +6113,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.6" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e6b52d4fda176fd835fdc55a835d4a89b8499cad995885a21149d5ad62f852e" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ "ring 0.17.8", "rustls-pki-types", @@ -6267,9 +6124,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" +checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" [[package]] name = "rusty-fork" @@ -6325,7 +6182,7 @@ version = "2.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d35494501194174bda522a32605929eefc9ecf7e0a326c26db1fdd85881eb62" dependencies = [ - "proc-macro-crate 3.1.0", + "proc-macro-crate 3.2.0", "proc-macro2", "quote", "syn 1.0.109", @@ -6333,20 +6190,20 @@ dependencies = [ [[package]] name = "scc" -version = "2.1.8" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d777f59627453628a9a5be1ee8d948745b94b1dfc2d0c3099cbd9e08ab89e7c" +checksum = "f2c1f7fc6deb21665a9060dfc7d271be784669295a31babdcd4dd2c79ae8cbfb" dependencies = [ "sdd", ] [[package]] name = "schannel" -version = "0.1.23" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" +checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -6372,7 +6229,7 @@ checksum = "7f81c2fde025af7e69b1d1420531c8a8811ca898919db177141a85313b1cb932" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", ] [[package]] @@ -6409,9 +6266,9 @@ dependencies = [ [[package]] name = "sdd" -version = "2.1.0" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "177258b64c0faaa9ffd3c65cd3262c2bc7e2588dbbd9c1641d0346145c1bbda8" +checksum = "49c1eeaf4b6a87c7479688c6d52b9f1153cedd3c489300564f932b065c6eab95" [[package]] name = "sec1" @@ -6456,9 +6313,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.11.1" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75da29fe9b9b08fe9d6b22b5b4bcbc75d8db3aa31e639aa56bb62e9d46bfceaf" +checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" dependencies = [ "core-foundation-sys", "libc", @@ -6520,29 +6377,29 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "serde" -version = "1.0.207" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5665e14a49a4ea1b91029ba7d3bca9f299e1f7cfa194388ccc20f14743e784f2" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.207" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6aea2634c86b0e8ef2cfdc0c340baede54ec27b1e46febd7f80dffb2aa44a00e" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", ] [[package]] name = "serde_json" -version = "1.0.122" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784b6203951c57ff748476b126ccb5e8e2959a5c19e5c617ab1956be3dbc68da" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "itoa", "memchr", @@ -6573,9 +6430,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" dependencies = [ "serde", ] @@ -6599,25 +6456,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff" dependencies = [ "serde", - "serde_with_macros 1.5.2", -] - -[[package]] -name = "serde_with" -version = "3.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cecfa94848272156ea67b2b1a53f20fc7bc638c4a46d2f8abde08f05f4b857" -dependencies = [ - "base64 0.22.1", - "chrono", - "hex", - "indexmap 1.9.3", - "indexmap 2.6.0", - "serde", - "serde_derive", - "serde_json", - "serde_with_macros 3.9.0", - "time 0.3.36", + "serde_with_macros", ] [[package]] @@ -6626,24 +6465,12 @@ version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ - "darling 0.13.4", + "darling", "proc-macro2", "quote", "syn 1.0.109", ] -[[package]] -name = "serde_with_macros" -version = "3.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8fee4991ef4f274617a51ad4af30519438dacb2f56ac773b08a1922ff743350" -dependencies = [ - "darling 0.20.10", - "proc-macro2", - "quote", - "syn 2.0.72", -] - [[package]] name = "serial_test" version = "3.1.1" @@ -6666,7 +6493,7 @@ checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", ] [[package]] @@ -6731,9 +6558,9 @@ dependencies = [ [[package]] name = "sha3-asm" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d79b758b7cb2085612b11a235055e485605a5103faccdd633f35bd7aee69dd" +checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" dependencies = [ "cc", "cfg-if", @@ -6825,7 +6652,7 @@ dependencies = [ "rustls 0.19.1", "serde", "serde_json", - "serde_with 1.14.0", + "serde_with", "surf", "url", ] @@ -6863,16 +6690,6 @@ dependencies = [ "scale-info", ] -[[package]] -name = "socket2" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "socket2" version = "0.5.7" @@ -6885,7 +6702,7 @@ dependencies = [ [[package]] name = "sp1-build" -version = "2.0.0" +version = "3.0.0-rc3" dependencies = [ "anyhow", "cargo_metadata", @@ -6896,33 +6713,26 @@ dependencies = [ [[package]] name = "sp1-cli" -version = "2.0.0" +version = "3.0.0-rc3" dependencies = [ "anstyle", "anyhow", - "capstone", - "cargo_metadata", "clap", "ctrlc", "dirs", - "downloader", - "futures-util", "goblin", "hex", "indicatif", "prettytable-rs", "rand 0.8.5", "regex", - "reqwest 0.12.5", + "reqwest 0.12.8", "rustc-demangle", - "serde", "serde_json", "sp1-build", "sp1-core-machine", - "sp1-prover", "sp1-sdk", "target-lexicon", - "tempfile", "textwrap", "tokio", "vergen", @@ -6931,14 +6741,13 @@ dependencies = [ [[package]] name = "sp1-core-executor" -version = "2.0.0" +version = "3.0.0-rc3" dependencies = [ "bincode", "bytemuck", "elf", "enum-map", "eyre", - "generic-array 1.1.0", "hashbrown 0.14.5", "hex", "itertools 0.13.0", @@ -6946,14 +6755,11 @@ dependencies = [ "nohash-hasher", "num", "p3-field", - "p3-keccak-air", "p3-maybe-rayon", "rand 0.8.5", "rrs-succinct", "serde", - "serde_with 3.9.0", "sp1-curves", - "sp1-derive", "sp1-primitives", "sp1-stark", "sp1-zkvm", @@ -6968,17 +6774,11 @@ dependencies = [ [[package]] name = "sp1-core-machine" -version = "2.0.0" +version = "3.0.0-rc3" dependencies = [ - "anyhow", - "arrayref", "bincode", - "blake3", - "bytemuck", "cfg-if", "criterion", - "curve25519-dalek", - "elf", "elliptic-curve 0.13.8", "generic-array 1.1.0", "hashbrown 0.14.5", @@ -6986,31 +6786,20 @@ dependencies = [ "itertools 0.13.0", "k256", "log", - "nohash-hasher", "num", "num_cpus", "p3-air", "p3-baby-bear", "p3-blake3", "p3-challenger", - "p3-commit", - "p3-dft", "p3-field", - "p3-fri", - "p3-keccak", "p3-keccak-air", "p3-matrix", "p3-maybe-rayon", - "p3-merkle-tree", - "p3-poseidon2", - "p3-symmetric", "p3-uni-stark", "p3-util", "rand 0.8.5", - "rayon-scan", - "rrs-succinct", "serde", - "serde_with 3.9.0", "size", "snowbridge-amcl", "sp1-core-executor", @@ -7034,29 +6823,26 @@ dependencies = [ [[package]] name = "sp1-cuda" -version = "2.0.0" +version = "3.0.0-rc3" dependencies = [ "bincode", "ctrlc", "prost", "prost-build", - "prost-types", "serde", - "serde_json", "sp1-core-machine", "sp1-prover", - "sp1-stark", "tokio", "tracing", - "tracing-subscriber", "twirp-build-rs", "twirp-rs", ] [[package]] name = "sp1-curves" -version = "2.0.0" +version = "3.0.0-rc3" dependencies = [ + "cfg-if", "curve25519-dalek", "dashu", "elliptic-curve 0.13.8", @@ -7066,6 +6852,7 @@ dependencies = [ "num", "p3-field", "rand 0.8.5", + "rug", "serde", "snowbridge-amcl", "sp1-primitives", @@ -7075,23 +6862,22 @@ dependencies = [ [[package]] name = "sp1-derive" -version = "2.0.0" +version = "3.0.0-rc3" dependencies = [ - "proc-macro2", "quote", "syn 1.0.109", ] [[package]] name = "sp1-eval" -version = "2.0.0" +version = "3.0.0-rc3" dependencies = [ "anyhow", "bincode", "clap", "csv", "p3-baby-bear", - "reqwest 0.12.5", + "reqwest 0.12.8", "serde", "serde_json", "slack-rust-rs", @@ -7104,30 +6890,46 @@ dependencies = [ [[package]] name = "sp1-helper" -version = "2.0.0" +version = "3.0.0-rc3" dependencies = [ "sp1-build", ] [[package]] name = "sp1-lib" -version = "2.0.0" +version = "3.0.0-rc3" +dependencies = [ + "bincode", + "serde", +] + +[[package]] +name = "sp1-perf" +version = "3.0.0-rc3" dependencies = [ "anyhow", "bincode", - "cfg-if", - "hex", + "clap", + "csv", + "p3-baby-bear", + "reqwest 0.12.8", "serde", - "snowbridge-amcl", + "serde_json", + "slack-rust-rs", + "sp1-cuda", + "sp1-prover", + "sp1-sdk", + "sp1-stark", + "time 0.3.36", + "tokio", ] [[package]] name = "sp1-primitives" -version = "2.0.0" +version = "3.0.0-rc3" dependencies = [ "bincode", "hex", - "itertools 0.13.0", "lazy_static", "num-bigint 0.4.6", "p3-baby-bear", @@ -7140,23 +6942,26 @@ dependencies = [ [[package]] name = "sp1-prover" -version = "2.0.0" +version = "3.0.0-rc3" dependencies = [ "anyhow", "bincode", "clap", "dirs", - "hex", + "eyre", "itertools 0.13.0", + "lazy_static", + "lru", "num-bigint 0.4.6", - "oneshot", "p3-baby-bear", "p3-bn254-fr", "p3-challenger", "p3-commit", "p3-field", "p3-matrix", + "p3-symmetric", "rayon", + "reqwest 0.11.27", "serde", "serde_json", "serial_test", @@ -7167,7 +6972,6 @@ dependencies = [ "sp1-recursion-compiler", "sp1-recursion-core", "sp1-recursion-gnark-ffi", - "sp1-recursion-program", "sp1-stark", "subtle-encoding", "tempfile", @@ -7178,44 +6982,12 @@ dependencies = [ [[package]] name = "sp1-recursion-circuit" -version = "2.0.0" +version = "3.0.0-rc3" dependencies = [ - "bincode", - "ff 0.13.0", - "itertools 0.13.0", - "p3-air", - "p3-baby-bear", - "p3-bn254-fr", - "p3-challenger", - "p3-commit", - "p3-dft", - "p3-field", - "p3-fri", - "p3-matrix", - "p3-merkle-tree", - "p3-poseidon2", - "p3-symmetric", - "p3-util", - "rand 0.8.5", - "serde", - "sp1-core-machine", - "sp1-recursion-compiler", - "sp1-recursion-core", - "sp1-recursion-derive", - "sp1-recursion-gnark-ffi", - "sp1-recursion-program", - "sp1-stark", - "zkhash", -] - -[[package]] -name = "sp1-recursion-circuit-v2" -version = "2.0.0" -dependencies = [ - "bincode", "ff 0.13.0", "hashbrown 0.14.5", "itertools 0.13.0", + "num-traits", "p3-air", "p3-baby-bear", "p3-bn254-fr", @@ -7225,55 +6997,44 @@ dependencies = [ "p3-field", "p3-fri", "p3-matrix", - "p3-maybe-rayon", "p3-merkle-tree", "p3-poseidon2", "p3-symmetric", "p3-util", "rand 0.8.5", + "rayon", "serde", "sp1-core-executor", "sp1-core-machine", + "sp1-derive", "sp1-primitives", - "sp1-recursion-circuit", "sp1-recursion-compiler", - "sp1-recursion-core-v2", - "sp1-recursion-derive", + "sp1-recursion-core", "sp1-recursion-gnark-ffi", - "sp1-recursion-program", "sp1-stark", - "stacker", "tracing", "zkhash", ] [[package]] name = "sp1-recursion-compiler" -version = "2.0.0" +version = "3.0.0-rc3" dependencies = [ "backtrace", "criterion", "itertools 0.13.0", - "p3-air", "p3-baby-bear", "p3-bn254-fr", "p3-challenger", - "p3-commit", "p3-dft", "p3-field", - "p3-fri", - "p3-matrix", "p3-merkle-tree", - "p3-poseidon2", "p3-symmetric", - "p3-util", "rand 0.8.5", - "rayon", "serde", "sp1-core-machine", "sp1-primitives", "sp1-recursion-core", - "sp1-recursion-core-v2", "sp1-recursion-derive", "sp1-stark", "tracing", @@ -7282,14 +7043,12 @@ dependencies = [ [[package]] name = "sp1-recursion-core" -version = "2.0.0" +version = "3.0.0-rc3" dependencies = [ - "arrayref", "backtrace", "ff 0.13.0", "hashbrown 0.14.5", "itertools 0.13.0", - "num_cpus", "p3-air", "p3-baby-bear", "p3-bn254-fr", @@ -7306,49 +7065,9 @@ dependencies = [ "p3-util", "rand 0.8.5", "serde", - "serde_with 3.9.0", - "sp1-core-executor", - "sp1-core-machine", - "sp1-derive", - "sp1-primitives", - "sp1-stark", - "static_assertions", - "tracing", - "zkhash", -] - -[[package]] -name = "sp1-recursion-core-v2" -version = "2.0.0" -dependencies = [ - "arrayref", - "backtrace", - "ff 0.13.0", - "hashbrown 0.14.5", - "itertools 0.13.0", - "num_cpus", - "p3-air", - "p3-baby-bear", - "p3-bn254-fr", - "p3-challenger", - "p3-commit", - "p3-dft", - "p3-field", - "p3-fri", - "p3-matrix", - "p3-maybe-rayon", - "p3-merkle-tree", - "p3-poseidon2", - "p3-symmetric", - "p3-util", - "rand 0.8.5", - "serde", - "serde_with 3.9.0", - "sp1-core-executor", "sp1-core-machine", "sp1-derive", "sp1-primitives", - "sp1-recursion-core", "sp1-stark", "static_assertions", "thiserror", @@ -7359,16 +7078,15 @@ dependencies = [ [[package]] name = "sp1-recursion-derive" -version = "2.0.0" +version = "3.0.0-rc3" dependencies = [ - "proc-macro2", "quote", "syn 1.0.109", ] [[package]] name = "sp1-recursion-gnark-cli" -version = "2.0.0" +version = "3.0.0-rc3" dependencies = [ "bincode", "clap", @@ -7377,7 +7095,7 @@ dependencies = [ [[package]] name = "sp1-recursion-gnark-ffi" -version = "2.0.0" +version = "3.0.0-rc3" dependencies = [ "anyhow", "bincode", @@ -7390,7 +7108,6 @@ dependencies = [ "p3-baby-bear", "p3-field", "p3-symmetric", - "rand 0.8.5", "serde", "serde_json", "sha2 0.10.8", @@ -7400,41 +7117,11 @@ dependencies = [ "tempfile", ] -[[package]] -name = "sp1-recursion-program" -version = "2.0.0" -dependencies = [ - "itertools 0.13.0", - "p3-air", - "p3-baby-bear", - "p3-challenger", - "p3-commit", - "p3-dft", - "p3-field", - "p3-fri", - "p3-matrix", - "p3-maybe-rayon", - "p3-merkle-tree", - "p3-poseidon2", - "p3-symmetric", - "p3-util", - "rand 0.8.5", - "serde", - "sp1-core-executor", - "sp1-core-machine", - "sp1-primitives", - "sp1-recursion-compiler", - "sp1-recursion-core", - "sp1-stark", - "stacker", - "tracing", -] - [[package]] name = "sp1-sdk" -version = "2.0.0" +version = "3.0.0-rc3" dependencies = [ - "alloy-primitives 0.8.5", + "alloy-primitives 0.8.8", "alloy-signer", "alloy-signer-local", "alloy-sol-types 0.7.7", @@ -7442,30 +7129,23 @@ dependencies = [ "async-trait", "aws-config", "aws-sdk-s3", - "axum", "bincode", "cfg-if", "dirs", "ethers", "futures", - "getrandom 0.2.15", "hashbrown 0.14.5", "hex", "indicatif", "itertools 0.13.0", "log", - "num-bigint 0.4.6", "p3-baby-bear", - "p3-commit", "p3-field", "p3-fri", - "p3-matrix", "prost", - "reqwest 0.12.5", + "reqwest 0.12.8", "reqwest-middleware", "serde", - "serde_json", - "sha2 0.10.8", "sp1-core-executor", "sp1-core-machine", "sp1-cuda", @@ -7474,7 +7154,6 @@ dependencies = [ "sp1-stark", "strum", "strum_macros", - "sysinfo", "tempfile", "thiserror", "tokio", @@ -7486,12 +7165,13 @@ dependencies = [ [[package]] name = "sp1-stark" -version = "2.0.0" +version = "3.0.0-rc3" dependencies = [ "arrayref", "getrandom 0.2.15", "hashbrown 0.14.5", "itertools 0.13.0", + "num-traits", "p3-air", "p3-baby-bear", "p3-challenger", @@ -7511,24 +7191,24 @@ dependencies = [ "sp1-derive", "sp1-primitives", "sp1-zkvm", + "strum", + "strum_macros", "sysinfo", + "thiserror", "tracing", ] [[package]] name = "sp1-zkvm" -version = "2.0.0" +version = "3.0.0-rc3" dependencies = [ - "bincode", "cfg-if", "getrandom 0.2.15", "lazy_static", "libm", - "once_cell", "p3-baby-bear", "p3-field", "rand 0.8.5", - "serde", "sha2 0.10.8", "sp1-lib", "sp1-primitives", @@ -7575,19 +7255,6 @@ dependencies = [ "der 0.7.9", ] -[[package]] -name = "stacker" -version = "0.1.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "799c883d55abdb5e98af1a7b3f23b9b6de8ecada0ecac058672d7635eb48ca7b" -dependencies = [ - "cc", - "cfg-if", - "libc", - "psm", - "windows-sys 0.59.0", -] - [[package]] name = "standback" version = "0.2.17" @@ -7683,7 +7350,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.72", + "syn 2.0.79", ] [[package]] @@ -7737,9 +7404,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.72" +version = "2.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af" +checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" dependencies = [ "proc-macro2", "quote", @@ -7755,19 +7422,19 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", ] [[package]] name = "syn-solidity" -version = "0.8.5" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ab661c8148c2261222a4d641ad5477fd4bea79406a99056096a0b41b35617a5" +checksum = "ebfc1bfd06acc78f16d8fd3ef846bc222ee7002468d10a7dce8d703d6eab89a3" dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", ] [[package]] @@ -7781,6 +7448,9 @@ name = "sync_wrapper" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +dependencies = [ + "futures-core", +] [[package]] name = "sysinfo" @@ -7805,7 +7475,18 @@ checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ "bitflags 1.3.2", "core-foundation", - "system-configuration-sys", + "system-configuration-sys 0.5.0", +] + +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags 2.6.0", + "core-foundation", + "system-configuration-sys 0.6.0", ] [[package]] @@ -7818,6 +7499,16 @@ dependencies = [ "libc", ] +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "tap" version = "1.0.1" @@ -7832,15 +7523,15 @@ checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1" [[package]] name = "tempfile" -version = "3.11.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fcd239983515c23a32fb82099f97d0b11b8c72f654ed659363a95c3dad7a53" +checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" dependencies = [ "cfg-if", - "fastrand 2.1.0", + "fastrand 2.1.1", "once_cell", - "rustix 0.38.34", - "windows-sys 0.52.0", + "rustix", + "windows-sys 0.59.0", ] [[package]] @@ -7867,22 +7558,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", ] [[package]] @@ -8016,13 +7707,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" dependencies = [ "backtrace", - "bytes 1.7.1", + "bytes 1.7.2", "libc", "mio", "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.7", + "socket2", "tokio-macros", "windows-sys 0.52.0", ] @@ -8035,7 +7726,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", ] [[package]] @@ -8064,7 +7755,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.12", + "rustls 0.23.14", "rustls-pki-types", "tokio", ] @@ -8082,11 +7773,11 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "futures-core", "futures-sink", "pin-project-lite", @@ -8102,7 +7793,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.20", + "toml_edit 0.22.22", ] [[package]] @@ -8127,26 +7818,15 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.21.1" +version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" -dependencies = [ - "indexmap 2.6.0", - "toml_datetime", - "winnow 0.5.40", -] - -[[package]] -name = "toml_edit" -version = "0.22.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ "indexmap 2.6.0", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.18", + "winnow 0.6.20", ] [[package]] @@ -8159,20 +7839,20 @@ dependencies = [ "async-trait", "axum", "base64 0.22.1", - "bytes 1.7.1", - "h2 0.4.5", + "bytes 1.7.2", + "h2 0.4.6", "http 1.1.0", "http-body 1.0.1", "http-body-util", - "hyper 1.4.1", + "hyper 1.5.0", "hyper-timeout", "hyper-util", "percent-encoding", "pin-project", "prost", "rustls-native-certs 0.8.0", - "rustls-pemfile 2.1.3", - "socket2 0.5.7", + "rustls-pemfile 2.2.0", + "socket2", "tokio", "tokio-rustls 0.26.0", "tokio-stream", @@ -8250,7 +7930,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", ] [[package]] @@ -8329,7 +8009,7 @@ checksum = "30ee6ab729cd4cf0fd55218530c4522ed30b7b6081752839b68fcec8d0960788" dependencies = [ "base64 0.13.1", "byteorder", - "bytes 1.7.1", + "bytes 1.7.2", "http 0.2.12", "httparse", "log", @@ -8360,9 +8040,9 @@ dependencies = [ "futures", "http 1.1.0", "http-body-util", - "hyper 1.4.1", + "hyper 1.5.0", "prost", - "reqwest 0.12.5", + "reqwest 0.12.8", "serde", "serde_json", "thiserror", @@ -8379,9 +8059,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "ucd-trie" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" [[package]] name = "uint" @@ -8412,15 +8092,15 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" +checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "unicode-linebreak" @@ -8430,24 +8110,24 @@ checksum = "3b09c83c3c29d37506a3e260c08c03743a6bb66a9cd432c6934ab501a190571f" [[package]] name = "unicode-normalization" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] [[package]] name = "unicode-width" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d" +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" [[package]] name = "unicode-xid" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229730647fbc343e3a80e463c1db7f78f3855d3f3739bee0dda773c9a037c90a" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" [[package]] name = "universal-hash" @@ -8617,34 +8297,35 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.92" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" dependencies = [ "cfg-if", + "once_cell", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.92" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.42" +version = "0.4.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" +checksum = "cc7ec4f8827a71586374db3e87abdb5a2bb3a15afed140221307c3ec06b1f63b" dependencies = [ "cfg-if", "js-sys", @@ -8654,9 +8335,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.92" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -8664,28 +8345,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.92" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.92" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" +checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" [[package]] name = "wasm-streams" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b65dc4c90b63b118468cf747d8bf3566c1913ef60be765b5730ead9e0a3ba129" +checksum = "4e072d4e72f700fb3443d8fe94a39315df013eef1104903cdb0a2abd322bbecd" dependencies = [ "futures-util", "js-sys", @@ -8696,9 +8377,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.69" +version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" +checksum = "f6488b90108c040df0fe62fa815cbdee25124641df01814dd7282749234c6112" dependencies = [ "js-sys", "wasm-bindgen", @@ -8735,15 +8416,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.25.4" +version = "0.26.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" - -[[package]] -name = "webpki-roots" -version = "0.26.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd7c23921eeb1713a4e851530e9b9756e4fb0e89978582942612524cf09f01cd" +checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" dependencies = [ "rustls-pki-types", ] @@ -8798,6 +8473,36 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-registry" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +dependencies = [ + "windows-result", + "windows-strings", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result", + "windows-targets 0.52.6", +] + [[package]] name = "windows-sys" version = "0.48.0" @@ -8957,9 +8662,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.18" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" dependencies = [ "memchr", ] @@ -8974,16 +8679,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "winreg" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" -dependencies = [ - "cfg-if", - "windows-sys 0.48.0", -] - [[package]] name = "ws_stream_wasm" version = "0.7.4" @@ -8995,7 +8690,7 @@ dependencies = [ "js-sys", "log", "pharos", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "send_wrapper 0.6.0", "thiserror", "wasm-bindgen", @@ -9042,7 +8737,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", ] [[package]] @@ -9062,7 +8757,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.79", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 3d0fd3a064..7bb6d7517b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "2.0.0" +version = "3.0.0-rc3" edition = "2021" license = "MIT OR Apache-2.0" repository = "https://github.com/succinctlabs/sp1" @@ -16,17 +16,15 @@ members = [ "crates/derive", "crates/eval", "crates/helper", + "crates/perf", "crates/primitives", "crates/prover", "crates/recursion/circuit", - "crates/recursion/circuit-v2", "crates/recursion/compiler", "crates/recursion/core", - "crates/recursion/core-v2", "crates/recursion/derive", "crates/recursion/gnark-cli", "crates/recursion/gnark-ffi", - "crates/recursion/program", "crates/sdk", "crates/cuda", "crates/stark", @@ -48,53 +46,72 @@ debug-assertions = true [workspace.dependencies] # sp1 -sp1-build = { path = "crates/build", version = "2.0.0" } -sp1-cli = { path = "crates/cli", version = "2.0.0", default-features = false } -sp1-core-machine = { path = "crates/core/machine", version = "2.0.0" } -sp1-core-executor = { path = "crates/core/executor", version = "2.0.0" } -sp1-curves = { path = "crates/curves", version = "2.0.0" } -sp1-derive = { path = "crates/derive", version = "2.0.0" } -sp1-eval = { path = "crates/eval", version = "2.0.0" } -sp1-helper = { path = "crates/helper", version = "2.0.0", default-features = false } -sp1-primitives = { path = "crates/primitives", version = "2.0.0" } -sp1-prover = { path = "crates/prover", version = "2.0.0" } -sp1-recursion-compiler = { path = "crates/recursion/compiler", version = "2.0.0" } -sp1-recursion-core = { path = "crates/recursion/core", version = "2.0.0", default-features = false } -sp1-recursion-core-v2 = { path = "crates/recursion/core-v2", version = "2.0.0", default-features = false } -sp1-recursion-derive = { path = "crates/recursion/derive", version = "2.0.0", default-features = false } -sp1-recursion-gnark-ffi = { path = "crates/recursion/gnark-ffi", version = "2.0.0", default-features = false } -sp1-recursion-program = { path = "crates/recursion/program", version = "2.0.0", default-features = false } -sp1-recursion-circuit = { path = "crates/recursion/circuit", version = "2.0.0", default-features = false } -sp1-sdk = { path = "crates/sdk", version = "2.0.0" } -sp1-cuda = { path = "crates/cuda", version = "2.0.0" } -sp1-stark = { path = "crates/stark", version = "2.0.0" } -sp1-lib = { path = "crates/zkvm/lib", version = "2.0.0", default-features = false } -sp1-zkvm = { path = "crates/zkvm/entrypoint", version = "2.0.0", default-features = false } +sp1-build = { path = "crates/build", version = "=3.0.0-rc3" } +sp1-cli = { path = "crates/cli", version = "=3.0.0-rc3", default-features = false } +sp1-core-machine = { path = "crates/core/machine", version = "=3.0.0-rc3" } +sp1-core-executor = { path = "crates/core/executor", version = "=3.0.0-rc3" } +sp1-curves = { path = "crates/curves", version = "=3.0.0-rc3" } +sp1-derive = { path = "crates/derive", version = "=3.0.0-rc3" } +sp1-eval = { path = "crates/eval", version = "=3.0.0-rc3" } +sp1-helper = { path = "crates/helper", version = "=3.0.0-rc3", default-features = false } +sp1-primitives = { path = "crates/primitives", version = "=3.0.0-rc3" } +sp1-prover = { path = "crates/prover", version = "=3.0.0-rc3" } +sp1-recursion-compiler = { path = "crates/recursion/compiler", version = "=3.0.0-rc3" } +sp1-recursion-core = { path = "crates/recursion/core", version = "=3.0.0-rc3", default-features = false } +sp1-recursion-derive = { path = "crates/recursion/derive", version = "=3.0.0-rc3", default-features = false } +sp1-recursion-gnark-ffi = { path = "crates/recursion/gnark-ffi", version = "=3.0.0-rc3", default-features = false } +sp1-recursion-circuit = { path = "crates/recursion/circuit", version = "=3.0.0-rc3", default-features = false } +sp1-sdk = { path = "crates/sdk", version = "=3.0.0-rc3" } +sp1-cuda = { path = "crates/cuda", version = "=3.0.0-rc3" } +sp1-stark = { path = "crates/stark", version = "=3.0.0-rc3" } +sp1-lib = { path = "crates/zkvm/lib", version = "=3.0.0-rc3", default-features = false } +sp1-zkvm = { path = "crates/zkvm/entrypoint", version = "=3.0.0-rc3", default-features = false } # p3 -p3-air = "0.1.3-succinct" -p3-field = "0.1.3-succinct" -p3-commit = "0.1.3-succinct" -p3-matrix = "0.1.3-succinct" -p3-baby-bear = { version = "0.1.3-succinct", features = ["nightly-features"] } -p3-util = "0.1.3-succinct" -p3-challenger = "0.1.3-succinct" -p3-dft = "0.1.3-succinct" -p3-fri = "0.1.3-succinct" -p3-goldilocks = "0.1.3-succinct" -p3-keccak = "0.1.3-succinct" -p3-keccak-air = "0.1.3-succinct" -p3-blake3 = "0.1.3-succinct" -p3-mds = "0.1.3-succinct" -p3-merkle-tree = "0.1.3-succinct" -p3-poseidon2 = "0.1.3-succinct" -p3-symmetric = "0.1.3-succinct" -p3-uni-stark = "0.1.3-succinct" -p3-maybe-rayon = "0.1.3-succinct" -p3-bn254-fr = "0.1.3-succinct" +p3-air = "0.1.4-succinct" +p3-field = "0.1.4-succinct" +p3-commit = "0.1.4-succinct" +p3-matrix = "0.1.4-succinct" +p3-baby-bear = { version = "0.1.4-succinct", features = ["nightly-features"] } +p3-util = "0.1.4-succinct" +p3-challenger = "0.1.4-succinct" +p3-dft = "0.1.4-succinct" +p3-fri = "0.1.4-succinct" +p3-goldilocks = "0.1.4-succinct" +p3-keccak = "0.1.4-succinct" +p3-keccak-air = "0.1.4-succinct" +p3-blake3 = "0.1.4-succinct" +p3-mds = "0.1.4-succinct" +p3-merkle-tree = "0.1.4-succinct" +p3-poseidon2 = "0.1.4-succinct" +p3-symmetric = "0.1.4-succinct" +p3-uni-stark = "0.1.4-succinct" +p3-maybe-rayon = "0.1.4-succinct" +p3-bn254-fr = "0.1.4-succinct" # For local development. +# p3-air = { git = "https://github.com/Plonky3/Plonky3", branch = "sp1-v3" } +# p3-field = { git = "https://github.com/Plonky3/Plonky3", branch = "sp1-v3" } +# p3-commit = { git = "https://github.com/Plonky3/Plonky3", branch = "sp1-v3" } +# p3-matrix = { git = "https://github.com/Plonky3/Plonky3", branch = "sp1-v3" } +# p3-baby-bear = { git = "https://github.com/Plonky3/Plonky3", branch = "sp1-v3" } +# p3-util = { git = "https://github.com/Plonky3/Plonky3", branch = "sp1-v3" } +# p3-challenger = { git = "https://github.com/Plonky3/Plonky3", branch = "sp1-v3" } +# p3-dft = { git = "https://github.com/Plonky3/Plonky3", branch = "sp1-v3" } +# p3-fri = { git = "https://github.com/Plonky3/Plonky3", branch = "sp1-v3" } +# p3-goldilocks = { git = "https://github.com/Plonky3/Plonky3", branch = "sp1-v3" } +# p3-keccak = { git = "https://github.com/Plonky3/Plonky3", branch = "sp1-v3" } +# p3-keccak-air = { git = "https://github.com/Plonky3/Plonky3", branch = "sp1-v3" } +# p3-blake3 = { git = "https://github.com/Plonky3/Plonky3", branch = "sp1-v3" } +# p3-mds = { git = "https://github.com/Plonky3/Plonky3", branch = "sp1-v3" } +# p3-merkle-tree = { git = "https://github.com/Plonky3/Plonky3", branch = "sp1-v3" } +# p3-poseidon2 = { git = "https://github.com/Plonky3/Plonky3", branch = "sp1-v3" } +# p3-symmetric = { git = "https://github.com/Plonky3/Plonky3", branch = "sp1-v3" } +# p3-uni-stark = { git = "https://github.com/Plonky3/Plonky3", branch = "sp1-v3" } +# p3-maybe-rayon = { git = "https://github.com/Plonky3/Plonky3", branch = "sp1-v3" } +# p3-bn254-fr = { git = "https://github.com/Plonky3/Plonky3", branch = "sp1-v3" } + # p3-air = { path = "../Plonky3/air" } # p3-field = { path = "../Plonky3/field" } # p3-commit = { path = "../Plonky3/commit" } diff --git a/crates/cli/Cargo.toml b/crates/cli/Cargo.toml index 3ccce01986..1e6bd0e088 100644 --- a/crates/cli/Cargo.toml +++ b/crates/cli/Cargo.toml @@ -19,10 +19,8 @@ vergen = { version = "8", default-features = false, features = [ [dependencies] anyhow = { version = "1.0.83", features = ["backtrace"] } -cargo_metadata = "0.18.1" clap = { version = "4.5.9", features = ["derive", "env"] } sp1-build = { workspace = true } -sp1-prover = { workspace = true } sp1-sdk = { workspace = true } sp1-core-machine = { workspace = true } reqwest = { version = "0.12.4", features = [ @@ -30,24 +28,17 @@ reqwest = { version = "0.12.4", features = [ "json", "rustls-tls", ], default-features = false } -futures-util = "0.3.30" indicatif = "0.17.8" tokio = { version = "1", features = ["full"] } dirs = "5.0" -serde = { version = "1", features = ["derive"] } rand = "0.8" -downloader = { version = "0.2", default-features = false, features = [ - "rustls-tls", -] } serde_json = "1.0.121" yansi = "1.0.1" hex = "0.4.3" anstyle = "1.0.8" target-lexicon = "0.12.15" -tempfile = "3.10.1" rustc-demangle = "0.1.18" goblin = "0.8" -capstone = "0.11.0" regex = "1.5.4" prettytable-rs = "0.10" textwrap = "0.16.0" diff --git a/crates/cli/src/commands/vkey.rs b/crates/cli/src/commands/vkey.rs index 58fd64e3d7..a8aeb1c183 100644 --- a/crates/cli/src/commands/vkey.rs +++ b/crates/cli/src/commands/vkey.rs @@ -2,8 +2,7 @@ use std::fs::File; use anyhow::Result; use clap::Parser; -use sp1_sdk::HashableKey; -use sp1_sdk::ProverClient; +use sp1_sdk::{HashableKey, ProverClient}; use std::io::Read; #[derive(Parser)] diff --git a/crates/core/executor/Cargo.toml b/crates/core/executor/Cargo.toml index 0e48407583..273b12aa4c 100644 --- a/crates/core/executor/Cargo.toml +++ b/crates/core/executor/Cargo.toml @@ -14,10 +14,8 @@ categories = { workspace = true } sp1-primitives = { workspace = true } sp1-curves = { workspace = true } sp1-stark = { workspace = true } -sp1-derive = { workspace = true } # p3 -p3-keccak-air = { workspace = true } p3-field = { workspace = true } p3-maybe-rayon = { workspace = true, features = ["parallel"] } @@ -26,19 +24,17 @@ serde = { version = "1.0.205", features = ["derive", "rc"] } elf = "0.7.4" rrs_lib = { package = "rrs-succinct", version = "0.1.0" } eyre = "0.6.12" -serde_with = "3.9.0" bincode = "1.3.3" hashbrown = { version = "0.14.5", features = ["serde", "inline-more"] } itertools = "0.13.0" rand = "0.8.5" -generic-array = { version = "1.1.0", features = ["alloc", "serde"] } num = { version = "0.4.3" } typenum = "1.17.0" nohash-hasher = "0.2.0" thiserror = "1.0.63" tracing = "0.1.40" strum_macros = "0.26.4" -strum = "0.26.3" +strum = { version = "0.26.3", features = ["derive"] } log = "0.4.22" hex = "0.4.3" bytemuck = "1.16.3" @@ -51,3 +47,4 @@ sp1-zkvm = { workspace = true } [features] programs = [] +bigint-rug = ["sp1-curves/bigint-rug"] diff --git a/crates/core/executor/src/dependencies.rs b/crates/core/executor/src/dependencies.rs new file mode 100644 index 0000000000..194d8d0eb2 --- /dev/null +++ b/crates/core/executor/src/dependencies.rs @@ -0,0 +1,290 @@ +use crate::{ + events::{create_alu_lookups, AluEvent, CpuEvent}, + utils::{get_msb, get_quotient_and_remainder, is_signed_operation}, + Executor, Opcode, +}; + +/// Emits the dependencies for division and remainder operations. +#[allow(clippy::too_many_lines)] +pub fn emit_divrem_dependencies(executor: &mut Executor, event: AluEvent) { + let (quotient, remainder) = get_quotient_and_remainder(event.b, event.c, event.opcode); + let c_msb = get_msb(event.c); + let rem_msb = get_msb(remainder); + let mut c_neg = 0; + let mut rem_neg = 0; + let is_signed_operation = is_signed_operation(event.opcode); + if is_signed_operation { + c_neg = c_msb; // same as abs_c_alu_event + rem_neg = rem_msb; // same as abs_rem_alu_event + } + + if c_neg == 1 { + executor.record.add_events.push(AluEvent { + lookup_id: event.sub_lookups[4], + shard: event.shard, + clk: event.clk, + opcode: Opcode::ADD, + a: 0, + b: event.c, + c: (event.c as i32).unsigned_abs(), + sub_lookups: create_alu_lookups(), + }); + } + if rem_neg == 1 { + executor.record.add_events.push(AluEvent { + lookup_id: event.sub_lookups[5], + shard: event.shard, + clk: event.clk, + opcode: Opcode::ADD, + a: 0, + b: remainder, + c: (remainder as i32).unsigned_abs(), + sub_lookups: create_alu_lookups(), + }); + } + + let c_times_quotient = { + if is_signed_operation { + (((quotient as i32) as i64) * ((event.c as i32) as i64)).to_le_bytes() + } else { + ((quotient as u64) * (event.c as u64)).to_le_bytes() + } + }; + let lower_word = u32::from_le_bytes(c_times_quotient[0..4].try_into().unwrap()); + let upper_word = u32::from_le_bytes(c_times_quotient[4..8].try_into().unwrap()); + + let lower_multiplication = AluEvent { + lookup_id: event.sub_lookups[0], + shard: event.shard, + clk: event.clk, + opcode: Opcode::MUL, + a: lower_word, + c: event.c, + b: quotient, + sub_lookups: create_alu_lookups(), + }; + executor.record.mul_events.push(lower_multiplication); + + let upper_multiplication = AluEvent { + lookup_id: event.sub_lookups[1], + shard: event.shard, + clk: event.clk, + opcode: { + if is_signed_operation { + Opcode::MULH + } else { + Opcode::MULHU + } + }, + a: upper_word, + c: event.c, + b: quotient, + sub_lookups: create_alu_lookups(), + }; + executor.record.mul_events.push(upper_multiplication); + + let lt_event = if is_signed_operation { + AluEvent { + lookup_id: event.sub_lookups[2], + shard: event.shard, + opcode: Opcode::SLTU, + a: 1, + b: (remainder as i32).unsigned_abs(), + c: u32::max(1, (event.c as i32).unsigned_abs()), + clk: event.clk, + sub_lookups: create_alu_lookups(), + } + } else { + AluEvent { + lookup_id: event.sub_lookups[3], + shard: event.shard, + opcode: Opcode::SLTU, + a: 1, + b: remainder, + c: u32::max(1, event.c), + clk: event.clk, + sub_lookups: create_alu_lookups(), + } + }; + + if event.c != 0 { + executor.record.lt_events.push(lt_event); + } +} + +/// Emit the dependencies for CPU events. +#[allow(clippy::too_many_lines)] +pub fn emit_cpu_dependencies(executor: &mut Executor, event: &CpuEvent) { + if matches!( + event.instruction.opcode, + Opcode::LB + | Opcode::LH + | Opcode::LW + | Opcode::LBU + | Opcode::LHU + | Opcode::SB + | Opcode::SH + | Opcode::SW + ) { + let memory_addr = event.b.wrapping_add(event.c); + // Add event to ALU check to check that addr == b + c + let add_event = AluEvent { + lookup_id: event.memory_add_lookup_id, + shard: event.shard, + clk: event.clk, + opcode: Opcode::ADD, + a: memory_addr, + b: event.b, + c: event.c, + sub_lookups: create_alu_lookups(), + }; + executor.record.add_events.push(add_event); + let addr_offset = (memory_addr % 4_u32) as u8; + let mem_value = event.memory_record.unwrap().value(); + + if matches!(event.instruction.opcode, Opcode::LB | Opcode::LH) { + let (unsigned_mem_val, most_sig_mem_value_byte, sign_value) = + match event.instruction.opcode { + Opcode::LB => { + let most_sig_mem_value_byte = mem_value.to_le_bytes()[addr_offset as usize]; + let sign_value = 256; + (most_sig_mem_value_byte as u32, most_sig_mem_value_byte, sign_value) + } + Opcode::LH => { + let sign_value = 65536; + let unsigned_mem_val = match (addr_offset >> 1) % 2 { + 0 => mem_value & 0x0000FFFF, + 1 => (mem_value & 0xFFFF0000) >> 16, + _ => unreachable!(), + }; + let most_sig_mem_value_byte = unsigned_mem_val.to_le_bytes()[1]; + (unsigned_mem_val, most_sig_mem_value_byte, sign_value) + } + _ => unreachable!(), + }; + + if most_sig_mem_value_byte >> 7 & 0x01 == 1 { + let sub_event = AluEvent { + lookup_id: event.memory_sub_lookup_id, + shard: event.shard, + clk: event.clk, + opcode: Opcode::SUB, + a: event.a, + b: unsigned_mem_val, + c: sign_value, + sub_lookups: create_alu_lookups(), + }; + executor.record.add_events.push(sub_event); + } + } + } + + if event.instruction.is_branch_instruction() { + let a_eq_b = event.a == event.b; + let use_signed_comparison = matches!(event.instruction.opcode, Opcode::BLT | Opcode::BGE); + let a_lt_b = if use_signed_comparison { + (event.a as i32) < (event.b as i32) + } else { + event.a < event.b + }; + let a_gt_b = if use_signed_comparison { + (event.a as i32) > (event.b as i32) + } else { + event.a > event.b + }; + + let alu_op_code = if use_signed_comparison { Opcode::SLT } else { Opcode::SLTU }; + // Add the ALU events for the comparisons + let lt_comp_event = AluEvent { + lookup_id: event.branch_lt_lookup_id, + shard: event.shard, + clk: event.clk, + opcode: alu_op_code, + a: a_lt_b as u32, + b: event.a, + c: event.b, + sub_lookups: create_alu_lookups(), + }; + let gt_comp_event = AluEvent { + lookup_id: event.branch_gt_lookup_id, + shard: event.shard, + clk: event.clk, + opcode: alu_op_code, + a: a_gt_b as u32, + b: event.b, + c: event.a, + sub_lookups: create_alu_lookups(), + }; + executor.record.lt_events.push(lt_comp_event); + executor.record.lt_events.push(gt_comp_event); + let branching = match event.instruction.opcode { + Opcode::BEQ => a_eq_b, + Opcode::BNE => !a_eq_b, + Opcode::BLT | Opcode::BLTU => a_lt_b, + Opcode::BGE | Opcode::BGEU => a_eq_b || a_gt_b, + _ => unreachable!(), + }; + if branching { + let next_pc = event.pc.wrapping_add(event.c); + let add_event = AluEvent { + lookup_id: event.branch_add_lookup_id, + shard: event.shard, + clk: event.clk, + opcode: Opcode::ADD, + a: next_pc, + b: event.pc, + c: event.c, + sub_lookups: create_alu_lookups(), + }; + executor.record.add_events.push(add_event); + } + } + + if event.instruction.is_jump_instruction() { + match event.instruction.opcode { + Opcode::JAL => { + let next_pc = event.pc.wrapping_add(event.b); + let add_event = AluEvent { + lookup_id: event.jump_jal_lookup_id, + shard: event.shard, + clk: event.clk, + opcode: Opcode::ADD, + a: next_pc, + b: event.pc, + c: event.b, + sub_lookups: create_alu_lookups(), + }; + executor.record.add_events.push(add_event); + } + Opcode::JALR => { + let next_pc = event.b.wrapping_add(event.c); + let add_event = AluEvent { + lookup_id: event.jump_jalr_lookup_id, + shard: event.shard, + clk: event.clk, + opcode: Opcode::ADD, + a: next_pc, + b: event.b, + c: event.c, + sub_lookups: create_alu_lookups(), + }; + executor.record.add_events.push(add_event); + } + _ => unreachable!(), + } + } + + if matches!(event.instruction.opcode, Opcode::AUIPC) { + let add_event = AluEvent { + lookup_id: event.auipc_lookup_id, + shard: event.shard, + clk: event.clk, + opcode: Opcode::ADD, + a: event.a, + b: event.pc, + c: event.b, + sub_lookups: create_alu_lookups(), + }; + executor.record.add_events.push(add_event); + } +} diff --git a/crates/core/executor/src/disassembler/elf.rs b/crates/core/executor/src/disassembler/elf.rs index d2ae639712..555b3f405b 100644 --- a/crates/core/executor/src/disassembler/elf.rs +++ b/crates/core/executor/src/disassembler/elf.rs @@ -1,4 +1,4 @@ -use std::{cmp::min, collections::BTreeMap}; +use std::cmp::min; use elf::{ abi::{EM_RISCV, ET_EXEC, PF_X, PT_LOAD}, @@ -6,6 +6,7 @@ use elf::{ file::Class, ElfBytes, }; +use hashbrown::HashMap; use sp1_primitives::consts::{MAXIMUM_MEMORY_SIZE, WORD_SIZE}; /// RISC-V 32IM ELF (Executable and Linkable Format) File. @@ -26,7 +27,7 @@ pub(crate) struct Elf { /// The base address of the program. pub(crate) pc_base: u32, /// The initial memory image, useful for global constants. - pub(crate) memory_image: BTreeMap, + pub(crate) memory_image: HashMap, } impl Elf { @@ -36,7 +37,7 @@ impl Elf { instructions: Vec, pc_start: u32, pc_base: u32, - memory_image: BTreeMap, + memory_image: HashMap, ) -> Self { Self { instructions, pc_start, pc_base, memory_image } } @@ -50,7 +51,7 @@ impl Elf { /// /// Reference: [Executable and Linkable Format](https://en.wikipedia.org/wiki/Executable_and_Linkable_Format) pub(crate) fn decode(input: &[u8]) -> eyre::Result { - let mut image: BTreeMap = BTreeMap::new(); + let mut image: HashMap = HashMap::new(); // Parse the ELF file assuming that it is little-endian.. let elf = ElfBytes::::minimal_parse(input)?; diff --git a/crates/core/executor/src/events/alu.rs b/crates/core/executor/src/events/alu.rs index b2ace9b673..4d72410a96 100644 --- a/crates/core/executor/src/events/alu.rs +++ b/crates/core/executor/src/events/alu.rs @@ -7,15 +7,13 @@ use super::{create_alu_lookups, LookupId}; /// Arithmetic Logic Unit (ALU) Event. /// /// This object encapsulated the information needed to prove an ALU operation. This includes its -/// shard, channel, opcode, operands, and other relevant information. +/// shard, opcode, operands, and other relevant information. #[derive(Debug, Clone, Copy, Serialize, Deserialize)] pub struct AluEvent { /// The lookup identifer. pub lookup_id: LookupId, /// The shard number. pub shard: u32, - /// The channel number. - pub channel: u8, /// The clock cycle. pub clk: u32, /// The opcode. @@ -33,11 +31,10 @@ pub struct AluEvent { impl AluEvent { /// Create a new [`AluEvent`]. #[must_use] - pub fn new(shard: u32, channel: u8, clk: u32, opcode: Opcode, a: u32, b: u32, c: u32) -> Self { + pub fn new(shard: u32, clk: u32, opcode: Opcode, a: u32, b: u32, c: u32) -> Self { Self { lookup_id: LookupId::default(), shard, - channel, clk, opcode, a, diff --git a/crates/core/executor/src/events/byte.rs b/crates/core/executor/src/events/byte.rs index 2f2f0f043d..4e5f254373 100644 --- a/crates/core/executor/src/events/byte.rs +++ b/crates/core/executor/src/events/byte.rs @@ -16,13 +16,11 @@ pub const NUM_BYTE_OPS: usize = 9; /// Byte Lookup Event. /// /// This object encapsulates the information needed to prove a byte lookup operation. This includes -/// the shard, channel, opcode, operands, and other relevant information. +/// the shard, opcode, operands, and other relevant information. #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Hash)] pub struct ByteLookupEvent { /// The shard number. pub shard: u32, - /// The channel number. - pub channel: u8, /// The opcode. pub opcode: ByteOpcode, /// The first operand. @@ -55,10 +53,9 @@ pub trait ByteRecord { } /// Adds a `ByteLookupEvent` to verify `a` and `b` are indeed bytes to the shard. - fn add_u8_range_check(&mut self, shard: u32, channel: u8, a: u8, b: u8) { + fn add_u8_range_check(&mut self, shard: u32, a: u8, b: u8) { self.add_byte_lookup_event(ByteLookupEvent { shard, - channel, opcode: ByteOpcode::U8Range, a1: 0, a2: 0, @@ -68,10 +65,9 @@ pub trait ByteRecord { } /// Adds a `ByteLookupEvent` to verify `a` is indeed u16. - fn add_u16_range_check(&mut self, shard: u32, channel: u8, a: u16) { + fn add_u16_range_check(&mut self, shard: u32, a: u16) { self.add_byte_lookup_event(ByteLookupEvent { shard, - channel, opcode: ByteOpcode::U16Range, a1: a, a2: 0, @@ -81,43 +77,36 @@ pub trait ByteRecord { } /// Adds `ByteLookupEvent`s to verify that all the bytes in the input slice are indeed bytes. - fn add_u8_range_checks(&mut self, shard: u32, channel: u8, bytes: &[u8]) { + fn add_u8_range_checks(&mut self, shard: u32, bytes: &[u8]) { let mut index = 0; while index + 1 < bytes.len() { - self.add_u8_range_check(shard, channel, bytes[index], bytes[index + 1]); + self.add_u8_range_check(shard, bytes[index], bytes[index + 1]); index += 2; } if index < bytes.len() { // If the input slice's length is odd, we need to add a check for the last byte. - self.add_u8_range_check(shard, channel, bytes[index], 0); + self.add_u8_range_check(shard, bytes[index], 0); } } /// Adds `ByteLookupEvent`s to verify that all the field elements in the input slice are indeed /// bytes. - fn add_u8_range_checks_field( - &mut self, - shard: u32, - channel: u8, - field_values: &[F], - ) { + fn add_u8_range_checks_field(&mut self, shard: u32, field_values: &[F]) { self.add_u8_range_checks( shard, - channel, &field_values.iter().map(|x| x.as_canonical_u32() as u8).collect::>(), ); } /// Adds `ByteLookupEvent`s to verify that all the bytes in the input slice are indeed bytes. - fn add_u16_range_checks(&mut self, shard: u32, channel: u8, ls: &[u16]) { - ls.iter().for_each(|x| self.add_u16_range_check(shard, channel, *x)); + fn add_u16_range_checks(&mut self, shard: u32, ls: &[u16]) { + ls.iter().for_each(|x| self.add_u16_range_check(shard, *x)); } /// Adds a `ByteLookupEvent` to compute the bitwise OR of the two input values. - fn lookup_or(&mut self, shard: u32, channel: u8, b: u8, c: u8) { + fn lookup_or(&mut self, shard: u32, b: u8, c: u8) { self.add_byte_lookup_event(ByteLookupEvent { shard, - channel, opcode: ByteOpcode::OR, a1: (b | c) as u16, a2: 0, @@ -130,8 +119,8 @@ pub trait ByteRecord { impl ByteLookupEvent { /// Creates a new `ByteLookupEvent`. #[must_use] - pub fn new(shard: u32, channel: u8, opcode: ByteOpcode, a1: u16, a2: u8, b: u8, c: u8) -> Self { - Self { shard, channel, opcode, a1, a2, b, c } + pub fn new(shard: u32, opcode: ByteOpcode, a1: u16, a2: u8, b: u8, c: u8) -> Self { + Self { shard, opcode, a1, a2, b, c } } } diff --git a/crates/core/executor/src/events/cpu.rs b/crates/core/executor/src/events/cpu.rs index cf6d9b4dae..b2d775cf12 100644 --- a/crates/core/executor/src/events/cpu.rs +++ b/crates/core/executor/src/events/cpu.rs @@ -7,13 +7,11 @@ use super::{memory::MemoryRecordEnum, LookupId}; /// CPU Event. /// /// This object encapsulates the information needed to prove a CPU operation. This includes its -/// shard, channel, opcode, operands, and other relevant information. +/// shard, opcode, operands, and other relevant information. #[derive(Debug, Copy, Clone, Serialize, Deserialize)] pub struct CpuEvent { /// The shard number. pub shard: u32, - /// The channel number. - pub channel: u8, /// The clock cycle. pub clk: u32, /// The program counter. diff --git a/crates/core/executor/src/events/memory.rs b/crates/core/executor/src/events/memory.rs index 588fb18e99..4372f21267 100644 --- a/crates/core/executor/src/events/memory.rs +++ b/crates/core/executor/src/events/memory.rs @@ -85,6 +85,42 @@ pub enum MemoryRecordEnum { Write(MemoryWriteRecord), } +impl MemoryRecordEnum { + /// Retrieve the current memory record. + #[must_use] + pub fn current_record(&self) -> MemoryRecord { + match self { + MemoryRecordEnum::Read(record) => MemoryRecord { + shard: record.shard, + timestamp: record.timestamp, + value: record.value, + }, + MemoryRecordEnum::Write(record) => MemoryRecord { + shard: record.shard, + timestamp: record.timestamp, + value: record.value, + }, + } + } + + /// Retrieve the previous memory record. + #[must_use] + pub fn previous_record(&self) -> MemoryRecord { + match self { + MemoryRecordEnum::Read(record) => MemoryRecord { + shard: record.prev_shard, + timestamp: record.prev_timestamp, + value: record.value, + }, + MemoryRecordEnum::Write(record) => MemoryRecord { + shard: record.prev_shard, + timestamp: record.prev_timestamp, + value: record.prev_value, + }, + } + } +} + /// Memory Initialize/Finalize Event. /// /// This object encapsulates the information needed to prove a memory initialize or finalize @@ -177,3 +213,18 @@ impl From for MemoryRecordEnum { MemoryRecordEnum::Write(write_record) } } + +/// Memory Local Event. +/// +/// This object encapsulates the information needed to prove a memory access operation within a +/// shard. This includes the address, initial memory access, and final memory access within a +/// shard. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MemoryLocalEvent { + /// The address. + pub addr: u32, + /// The initial memory access. + pub initial_mem_access: MemoryRecord, + /// The final memory access. + pub final_mem_access: MemoryRecord, +} diff --git a/crates/core/executor/src/events/mod.rs b/crates/core/executor/src/events/mod.rs index 9981e960af..da38bb83c2 100644 --- a/crates/core/executor/src/events/mod.rs +++ b/crates/core/executor/src/events/mod.rs @@ -5,6 +5,7 @@ mod byte; mod cpu; mod memory; mod precompiles; +mod syscall; mod utils; pub use alu::*; @@ -12,4 +13,5 @@ pub use byte::*; pub use cpu::*; pub use memory::*; pub use precompiles::*; +pub use syscall::*; pub use utils::*; diff --git a/crates/core/executor/src/events/precompiles/ec.rs b/crates/core/executor/src/events/precompiles/ec.rs index ff8fee9844..7b8f2305f2 100644 --- a/crates/core/executor/src/events/precompiles/ec.rs +++ b/crates/core/executor/src/events/precompiles/ec.rs @@ -11,7 +11,7 @@ use typenum::Unsigned; use crate::{ events::{ memory::{MemoryReadRecord, MemoryWriteRecord}, - LookupId, + LookupId, MemoryLocalEvent, }, syscalls::SyscallContext, }; @@ -19,13 +19,11 @@ use crate::{ /// Elliptic Curve Add Event. /// /// This event is emitted when an elliptic curve addition operation is performed. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Default, Debug, Clone, Serialize, Deserialize)] pub struct EllipticCurveAddEvent { pub(crate) lookup_id: LookupId, /// The shard number. pub shard: u32, - /// The channel number. - pub channel: u8, /// The clock cycle. pub clk: u32, /// The pointer to the first point. @@ -40,19 +38,19 @@ pub struct EllipticCurveAddEvent { pub p_memory_records: Vec, /// The memory records for the second point. pub q_memory_records: Vec, + /// The local memory access records. + pub local_mem_access: Vec, } /// Elliptic Curve Double Event. /// /// This event is emitted when an elliptic curve doubling operation is performed. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Default, Debug, Clone, Serialize, Deserialize)] pub struct EllipticCurveDoubleEvent { /// The lookup identifer. pub lookup_id: LookupId, /// The shard number. pub shard: u32, - /// The channel number. - pub channel: u8, /// The clock cycle. pub clk: u32, /// The pointer to the point. @@ -61,19 +59,19 @@ pub struct EllipticCurveDoubleEvent { pub p: Vec, /// The memory records for the point. pub p_memory_records: Vec, + /// The local memory access records. + pub local_mem_access: Vec, } /// Elliptic Curve Point Decompress Event. /// /// This event is emitted when an elliptic curve point decompression operation is performed. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Default, Debug, Clone, Serialize, Deserialize)] pub struct EllipticCurveDecompressEvent { /// The lookup identifer. pub lookup_id: LookupId, /// The shard number. pub shard: u32, - /// The channel number. - pub channel: u8, /// The clock cycle. pub clk: u32, /// The pointer to the point. @@ -88,6 +86,8 @@ pub struct EllipticCurveDecompressEvent { pub x_memory_records: Vec, /// The memory records for the y coordinate. pub y_memory_records: Vec, + /// The local memory access records. + pub local_mem_access: Vec, } /// Create an elliptic curve add event. It takes two pointers to memory locations, reads the points @@ -130,7 +130,6 @@ pub fn create_ec_add_event( EllipticCurveAddEvent { lookup_id: rt.syscall_lookup_id, shard: rt.current_shard(), - channel: rt.current_channel(), clk: start_clk, p_ptr, p, @@ -138,6 +137,7 @@ pub fn create_ec_add_event( q, p_memory_records, q_memory_records, + local_mem_access: rt.postprocess(), } } @@ -171,11 +171,11 @@ pub fn create_ec_double_event( EllipticCurveDoubleEvent { lookup_id: rt.syscall_lookup_id, shard: rt.current_shard(), - channel: rt.current_channel(), clk: start_clk, p_ptr, p, p_memory_records, + local_mem_access: rt.postprocess(), } } @@ -219,7 +219,6 @@ pub fn create_ec_decompress_event( EllipticCurveDecompressEvent { lookup_id: rt.syscall_lookup_id, shard: rt.current_shard(), - channel: rt.current_channel(), clk: start_clk, ptr: slice_ptr, sign_bit: sign_bit != 0, @@ -227,5 +226,6 @@ pub fn create_ec_decompress_event( decompressed_y_bytes, x_memory_records, y_memory_records, + local_mem_access: rt.postprocess(), } } diff --git a/crates/core/executor/src/events/precompiles/edwards.rs b/crates/core/executor/src/events/precompiles/edwards.rs index 17daf6837e..3d795b891a 100644 --- a/crates/core/executor/src/events/precompiles/edwards.rs +++ b/crates/core/executor/src/events/precompiles/edwards.rs @@ -3,20 +3,18 @@ use sp1_curves::{edwards::WORDS_FIELD_ELEMENT, COMPRESSED_POINT_BYTES, NUM_BYTES use crate::events::{ memory::{MemoryReadRecord, MemoryWriteRecord}, - LookupId, + LookupId, MemoryLocalEvent, }; /// Edwards Decompress Event. /// /// This event is emitted when an edwards decompression operation is performed. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Default, Debug, Clone, Serialize, Deserialize)] pub struct EdDecompressEvent { /// The lookup identifer. pub lookup_id: LookupId, /// The shard number. pub shard: u32, - /// The channel number. - pub channel: u8, /// The clock cycle. pub clk: u32, /// The pointer to the point. @@ -31,4 +29,6 @@ pub struct EdDecompressEvent { pub x_memory_records: [MemoryWriteRecord; WORDS_FIELD_ELEMENT], /// The memory records for the y coordinate. pub y_memory_records: [MemoryReadRecord; WORDS_FIELD_ELEMENT], + /// The local memory access events. + pub local_mem_access: Vec, } diff --git a/crates/core/executor/src/events/precompiles/fptower.rs b/crates/core/executor/src/events/precompiles/fptower.rs index 1ae2afd005..e4f1c96d59 100644 --- a/crates/core/executor/src/events/precompiles/fptower.rs +++ b/crates/core/executor/src/events/precompiles/fptower.rs @@ -1,11 +1,12 @@ use serde::{Deserialize, Serialize}; -use crate::events::{LookupId, MemoryReadRecord, MemoryWriteRecord}; +use crate::events::{LookupId, MemoryLocalEvent, MemoryReadRecord, MemoryWriteRecord}; /// This is an arithmetic operation for emulating modular arithmetic. -#[derive(PartialEq, Copy, Clone, Debug, Serialize, Deserialize)] +#[derive(Default, PartialEq, Copy, Clone, Debug, Serialize, Deserialize)] pub enum FieldOperation { /// Addition. + #[default] Add, /// Multiplication. Mul, @@ -18,14 +19,12 @@ pub enum FieldOperation { /// Emulated Field Operation Events. /// /// This event is emitted when an emulated field operation is performed on the input operands. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Default, Debug, Clone, Serialize, Deserialize)] pub struct FpOpEvent { /// The lookup id. pub lookup_id: LookupId, /// The shard number. pub shard: u32, - /// The channel number. - pub channel: u8, /// The clock cycle. pub clk: u32, /// The pointer to the x operand. @@ -42,19 +41,19 @@ pub struct FpOpEvent { pub x_memory_records: Vec, /// The memory records for the y operand. pub y_memory_records: Vec, + /// The local memory access records. + pub local_mem_access: Vec, } /// Emulated Degree 2 Field Addition/Subtraction Events. /// /// This event is emitted when an emulated degree 2 field operation is performed on the input -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Default, Debug, Clone, Serialize, Deserialize)] pub struct Fp2AddSubEvent { /// The lookup id. pub lookup_id: LookupId, /// The shard number. pub shard: u32, - /// The channel number. - pub channel: u8, /// The clock cycle. pub clk: u32, /// The operation to perform. @@ -71,17 +70,17 @@ pub struct Fp2AddSubEvent { pub x_memory_records: Vec, /// The memory records for the y operand. pub y_memory_records: Vec, + /// The local memory access records. + pub local_mem_access: Vec, } /// Emulated Degree 2 Field Multiplication Events. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Default, Debug, Clone, Serialize, Deserialize)] pub struct Fp2MulEvent { /// The lookup id. pub lookup_id: LookupId, /// The shard number. pub shard: u32, - /// The channel number. - pub channel: u8, /// The clock cycle. pub clk: u32, /// The pointer to the x operand. @@ -96,4 +95,6 @@ pub struct Fp2MulEvent { pub x_memory_records: Vec, /// The memory records for the y operand. pub y_memory_records: Vec, + /// The local memory access records. + pub local_mem_access: Vec, } diff --git a/crates/core/executor/src/events/precompiles/keccak256_permute.rs b/crates/core/executor/src/events/precompiles/keccak256_permute.rs index 19ad295034..ded58bbfed 100644 --- a/crates/core/executor/src/events/precompiles/keccak256_permute.rs +++ b/crates/core/executor/src/events/precompiles/keccak256_permute.rs @@ -2,7 +2,7 @@ use serde::{Deserialize, Serialize}; use crate::events::{ memory::{MemoryReadRecord, MemoryWriteRecord}, - LookupId, + LookupId, MemoryLocalEvent, }; pub(crate) const STATE_SIZE: usize = 25; @@ -10,14 +10,12 @@ pub(crate) const STATE_SIZE: usize = 25; /// Keccak-256 Permutation Event. /// /// This event is emitted when a keccak-256 permutation operation is performed. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Default, Debug, Clone, Serialize, Deserialize)] pub struct KeccakPermuteEvent { /// The lookup identifer. pub lookup_id: LookupId, /// The shard number. pub shard: u32, - /// The channel number. - pub channel: u8, /// The clock cycle. pub clk: u32, /// The pre-state as a list of u64 words. @@ -30,4 +28,6 @@ pub struct KeccakPermuteEvent { pub state_write_records: Vec, /// The address of the state. pub state_addr: u32, + /// The local memory access records. + pub local_mem_access: Vec, } diff --git a/crates/core/executor/src/events/precompiles/mod.rs b/crates/core/executor/src/events/precompiles/mod.rs index ded0f62cd2..cc4f54ed7d 100644 --- a/crates/core/executor/src/events/precompiles/mod.rs +++ b/crates/core/executor/src/events/precompiles/mod.rs @@ -9,7 +9,227 @@ mod uint256; pub use ec::*; pub use edwards::*; pub use fptower::*; +use hashbrown::HashMap; pub use keccak256_permute::*; +use serde::{Deserialize, Serialize}; pub use sha256_compress::*; pub use sha256_extend::*; +use strum::{EnumIter, IntoEnumIterator}; pub use uint256::*; + +use crate::syscalls::SyscallCode; + +use super::{MemoryLocalEvent, SyscallEvent}; + +#[derive(Clone, Debug, Serialize, Deserialize, EnumIter)] +/// Precompile event. There should be one variant for every precompile syscall. +pub enum PrecompileEvent { + /// Sha256 extend precompile event. + ShaExtend(ShaExtendEvent), + /// Sha256 compress precompile event. + ShaCompress(ShaCompressEvent), + /// Keccak256 permute precompile event. + KeccakPermute(KeccakPermuteEvent), + /// Edwards curve add precompile event. + EdAdd(EllipticCurveAddEvent), + /// Edwards curve decompress precompile event. + EdDecompress(EdDecompressEvent), + /// Secp256k1 curve add precompile event. + Secp256k1Add(EllipticCurveAddEvent), + /// Secp256k1 curve double precompile event. + Secp256k1Double(EllipticCurveDoubleEvent), + /// Secp256k1 curve decompress precompile event. + Secp256k1Decompress(EllipticCurveDecompressEvent), + /// K256 curve decompress precompile event. + K256Decompress(EllipticCurveDecompressEvent), + /// Bn254 curve add precompile event. + Bn254Add(EllipticCurveAddEvent), + /// Bn254 curve double precompile event. + Bn254Double(EllipticCurveDoubleEvent), + /// Bn254 base field operation precompile event. + Bn254Fp(FpOpEvent), + /// Bn254 quadratic field add/sub precompile event. + Bn254Fp2AddSub(Fp2AddSubEvent), + /// Bn254 quadratic field mul precompile event. + Bn254Fp2Mul(Fp2MulEvent), + /// Bls12-381 curve add precompile event. + Bls12381Add(EllipticCurveAddEvent), + /// Bls12-381 curve double precompile event. + Bls12381Double(EllipticCurveDoubleEvent), + /// Bls12-381 curve decompress precompile event. + Bls12381Decompress(EllipticCurveDecompressEvent), + /// Bls12-381 base field operation precompile event. + Bls12381Fp(FpOpEvent), + /// Bls12-381 quadratic field add/sub precompile event. + Bls12381Fp2AddSub(Fp2AddSubEvent), + /// Bls12-381 quadratic field mul precompile event. + Bls12381Fp2Mul(Fp2MulEvent), + /// Uint256 mul precompile event. + Uint256Mul(Uint256MulEvent), +} + +/// Trait to retrieve all the local memory events from a vec of precompile events. +pub trait PrecompileLocalMemory { + /// Get an iterator of all the local memory events. + fn get_local_mem_events(&self) -> impl IntoIterator; +} + +impl PrecompileLocalMemory for Vec<(SyscallEvent, PrecompileEvent)> { + fn get_local_mem_events(&self) -> impl IntoIterator { + let mut iterators = Vec::new(); + + for (_, event) in self.iter() { + match event { + PrecompileEvent::ShaExtend(e) => { + iterators.push(e.local_mem_access.iter()); + } + PrecompileEvent::ShaCompress(e) => { + iterators.push(e.local_mem_access.iter()); + } + PrecompileEvent::KeccakPermute(e) => { + iterators.push(e.local_mem_access.iter()); + } + PrecompileEvent::EdDecompress(e) => { + iterators.push(e.local_mem_access.iter()); + } + PrecompileEvent::Secp256k1Add(e) + | PrecompileEvent::EdAdd(e) + | PrecompileEvent::Bn254Add(e) + | PrecompileEvent::Bls12381Add(e) => { + iterators.push(e.local_mem_access.iter()); + } + PrecompileEvent::Secp256k1Double(e) + | PrecompileEvent::Bn254Double(e) + | PrecompileEvent::Bls12381Double(e) => { + iterators.push(e.local_mem_access.iter()); + } + PrecompileEvent::Secp256k1Decompress(e) + | PrecompileEvent::K256Decompress(e) + | PrecompileEvent::Bls12381Decompress(e) => { + iterators.push(e.local_mem_access.iter()); + } + PrecompileEvent::Uint256Mul(e) => { + iterators.push(e.local_mem_access.iter()); + } + PrecompileEvent::Bls12381Fp(e) | PrecompileEvent::Bn254Fp(e) => { + iterators.push(e.local_mem_access.iter()); + } + PrecompileEvent::Bls12381Fp2AddSub(e) | PrecompileEvent::Bn254Fp2AddSub(e) => { + iterators.push(e.local_mem_access.iter()); + } + PrecompileEvent::Bls12381Fp2Mul(e) | PrecompileEvent::Bn254Fp2Mul(e) => { + iterators.push(e.local_mem_access.iter()); + } + } + } + + iterators.into_iter().flatten() + } +} + +/// A record of all the precompile events. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct PrecompileEvents { + events: HashMap>, +} + +impl Default for PrecompileEvents { + fn default() -> Self { + let mut events = HashMap::new(); + for syscall_code in SyscallCode::iter() { + if syscall_code.should_send() == 1 { + events.insert(syscall_code, Vec::new()); + } + } + + Self { events } + } +} + +impl PrecompileEvents { + pub(crate) fn append(&mut self, other: &mut PrecompileEvents) { + for (syscall, events) in other.events.iter_mut() { + if !events.is_empty() { + self.events.entry(*syscall).or_default().append(events); + } + } + } + + #[inline] + /// Add a precompile event for a given syscall code. + pub(crate) fn add_event( + &mut self, + syscall_code: SyscallCode, + syscall_event: SyscallEvent, + event: PrecompileEvent, + ) { + assert!(syscall_code.should_send() == 1); + self.events.entry(syscall_code).or_default().push((syscall_event, event)); + } + + /// Checks if the precompile events are empty. + #[inline] + #[must_use] + pub fn is_empty(&self) -> bool { + self.events.is_empty() + } + + /// Get all the precompile events. + pub fn all_events(&self) -> impl Iterator { + self.events.values().flatten() + } + + #[inline] + /// Insert a vector of precompile events for a given syscall code. + pub(crate) fn insert( + &mut self, + syscall_code: SyscallCode, + events: Vec<(SyscallEvent, PrecompileEvent)>, + ) { + assert!(syscall_code.should_send() == 1); + self.events.insert(syscall_code, events); + } + + /// Get the number of precompile events. + #[inline] + #[must_use] + pub fn len(&self) -> usize { + self.events.len() + } + + #[inline] + pub(crate) fn into_iter( + self, + ) -> impl Iterator)> { + self.events.into_iter() + } + + #[inline] + pub(crate) fn iter( + &self, + ) -> impl Iterator)> { + self.events.iter() + } + + /// Get all the precompile events for a given syscall code. + #[inline] + #[must_use] + pub fn get_events( + &self, + syscall_code: SyscallCode, + ) -> Option<&Vec<(SyscallEvent, PrecompileEvent)>> { + assert!(syscall_code.should_send() == 1); + self.events.get(&syscall_code) + } + + /// Get all the local events from all the precompile events. + pub(crate) fn get_local_mem_events(&self) -> impl Iterator { + let mut iterators = Vec::new(); + + for (_, events) in self.events.iter() { + iterators.push(events.get_local_mem_events()); + } + + iterators.into_iter().flatten() + } +} diff --git a/crates/core/executor/src/events/precompiles/sha256_compress.rs b/crates/core/executor/src/events/precompiles/sha256_compress.rs index 4c3f42bf3d..5bf094611d 100644 --- a/crates/core/executor/src/events/precompiles/sha256_compress.rs +++ b/crates/core/executor/src/events/precompiles/sha256_compress.rs @@ -2,20 +2,18 @@ use serde::{Deserialize, Serialize}; use crate::events::{ memory::{MemoryReadRecord, MemoryWriteRecord}, - LookupId, + LookupId, MemoryLocalEvent, }; /// SHA-256 Compress Event. /// /// This event is emitted when a SHA-256 compress operation is performed. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Default, Debug, Clone, Serialize, Deserialize)] pub struct ShaCompressEvent { /// The lookup identifer. pub lookup_id: LookupId, /// The shard number. pub shard: u32, - /// The channel number. - pub channel: u8, /// The clock cycle. pub clk: u32, /// The pointer to the word. @@ -32,4 +30,6 @@ pub struct ShaCompressEvent { pub w_i_read_records: Vec, /// The memory records for the word. pub h_write_records: [MemoryWriteRecord; 8], + /// The local memory accesses. + pub local_mem_access: Vec, } diff --git a/crates/core/executor/src/events/precompiles/sha256_extend.rs b/crates/core/executor/src/events/precompiles/sha256_extend.rs index f63998362d..c6671cea9b 100644 --- a/crates/core/executor/src/events/precompiles/sha256_extend.rs +++ b/crates/core/executor/src/events/precompiles/sha256_extend.rs @@ -1,19 +1,19 @@ use serde::{Deserialize, Serialize}; -use crate::events::memory::{MemoryReadRecord, MemoryWriteRecord}; -use crate::events::LookupId; +use crate::events::{ + memory::{MemoryReadRecord, MemoryWriteRecord}, + LookupId, MemoryLocalEvent, +}; /// SHA-256 Extend Event. /// /// This event is emitted when a SHA-256 extend operation is performed. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Default, Debug, Clone, Serialize, Deserialize)] pub struct ShaExtendEvent { /// The lookup identifer. pub lookup_id: LookupId, /// The shard number. pub shard: u32, - /// The channel number. - pub channel: u8, /// The clock cycle. pub clk: u32, /// The pointer to the word. @@ -28,4 +28,6 @@ pub struct ShaExtendEvent { pub w_i_minus_7_reads: Vec, /// The memory writes of w[i]. pub w_i_writes: Vec, + /// The local memory accesses. + pub local_mem_access: Vec, } diff --git a/crates/core/executor/src/events/precompiles/uint256.rs b/crates/core/executor/src/events/precompiles/uint256.rs index bcd13e140b..4acbcfc3c6 100644 --- a/crates/core/executor/src/events/precompiles/uint256.rs +++ b/crates/core/executor/src/events/precompiles/uint256.rs @@ -2,20 +2,18 @@ use serde::{Deserialize, Serialize}; use crate::events::{ memory::{MemoryReadRecord, MemoryWriteRecord}, - LookupId, + LookupId, MemoryLocalEvent, }; /// Uint256 Mul Event. /// /// This event is emitted when a uint256 mul operation is performed. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Default, Debug, Clone, Serialize, Deserialize)] pub struct Uint256MulEvent { /// The lookup identifer. pub lookup_id: LookupId, /// The shard number. pub shard: u32, - /// The channel number. - pub channel: u8, /// The clock cycle. pub clk: u32, /// The pointer to the x value. @@ -34,4 +32,6 @@ pub struct Uint256MulEvent { pub y_memory_records: Vec, /// The memory records for the modulus. pub modulus_memory_records: Vec, + /// The local memory access records. + pub local_mem_access: Vec, } diff --git a/crates/core/executor/src/events/syscall.rs b/crates/core/executor/src/events/syscall.rs new file mode 100644 index 0000000000..23f9263ba8 --- /dev/null +++ b/crates/core/executor/src/events/syscall.rs @@ -0,0 +1,25 @@ +use serde::{Deserialize, Serialize}; + +use super::LookupId; + +/// Syscall Event. +/// +/// This object encapsulated the information needed to prove a syscall invocation from the CPU table. +/// This includes its shard, clk, syscall id, arguments, other relevant information. +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +pub struct SyscallEvent { + /// The shard number. + pub shard: u32, + /// The clock cycle. + pub clk: u32, + /// The lookup id. + pub lookup_id: LookupId, + /// The syscall id. + pub syscall_id: u32, + /// The first argument. + pub arg1: u32, + /// The second operand. + pub arg2: u32, + /// The nonce for the syscall. + pub nonce: u32, +} diff --git a/crates/core/executor/src/executor.rs b/crates/core/executor/src/executor.rs index 3614e520ae..5a250f3f03 100644 --- a/crates/core/executor/src/executor.rs +++ b/crates/core/executor/src/executor.rs @@ -11,10 +11,11 @@ use thiserror::Error; use crate::{ context::SP1Context, + dependencies::{emit_cpu_dependencies, emit_divrem_dependencies}, events::{ create_alu_lookup_id, create_alu_lookups, AluEvent, CpuEvent, LookupId, - MemoryAccessPosition, MemoryInitializeFinalizeEvent, MemoryReadRecord, MemoryRecord, - MemoryWriteRecord, + MemoryAccessPosition, MemoryInitializeFinalizeEvent, MemoryLocalEvent, MemoryReadRecord, + MemoryRecord, MemoryWriteRecord, SyscallEvent, }, hook::{HookEnv, HookRegistry}, memory::{Entry, PagedMemory}, @@ -30,10 +31,53 @@ use crate::{ /// /// The exeuctor is responsible for executing a user program and tracing important events which /// occur during execution (i.e., memory reads, alu operations, etc). +#[repr(C)] pub struct Executor<'a> { /// The program. pub program: Arc, + /// The mode the executor is running in. + pub executor_mode: ExecutorMode, + + /// Whether the runtime is in constrained mode or not. + /// + /// In unconstrained mode, any events, clock, register, or memory changes are reset after + /// leaving the unconstrained block. The only thing preserved is writes to the input + /// stream. + pub unconstrained: bool, + + /// Whether we should write to the report. + pub print_report: bool, + + /// The maximum size of each shard. + pub shard_size: u32, + + /// The maximimum number of shards to execute at once. + pub shard_batch_size: u32, + + /// The maximum number of cycles for a syscall. + pub max_syscall_cycles: u32, + + /// The mapping between syscall codes and their implementations. + pub syscall_map: HashMap>, + + /// The options for the runtime. + pub opts: SP1CoreOpts, + + /// Memory addresses that were touched in this batch of shards. Used to minimize the size of + /// checkpoints. + pub memory_checkpoint: PagedMemory>, + + /// Memory addresses that were initialized in this batch of shards. Used to minimize the size of + /// checkpoints. The value stored is whether or not it had a value at the beginning of the batch. + pub uninitialized_memory_checkpoint: PagedMemory, + + /// The memory accesses for the current cycle. + pub memory_accesses: MemoryAccessRecord, + + /// The maximum number of cpu cycles to use for execution. + pub max_cycles: Option, + /// The state of the execution. pub state: ExecutionState, @@ -43,14 +87,8 @@ pub struct Executor<'a> { /// The collected records, split by cpu cycles. pub records: Vec, - /// The memory accesses for the current cycle. - pub memory_accesses: MemoryAccessRecord, - - /// The maximum size of each shard. - pub shard_size: u32, - - /// The maximimum number of shards to execute at once. - pub shard_batch_size: u32, + /// Local memory access events. + pub local_memory_access: HashMap, /// A counter for the number of cycles that have been executed in certain functions. pub cycle_tracker: HashMap, @@ -61,46 +99,20 @@ pub struct Executor<'a> { /// A buffer for writing trace events to a file. pub trace_buf: Option>, - /// Whether the runtime is in constrained mode or not. - /// - /// In unconstrained mode, any events, clock, register, or memory changes are reset after - /// leaving the unconstrained block. The only thing preserved is writes to the input - /// stream. - pub unconstrained: bool, - /// The state of the runtime when in unconstrained mode. pub unconstrained_state: ForkState, - /// The mapping between syscall codes and their implementations. - pub syscall_map: HashMap>, - - /// The maximum number of cycles for a syscall. - pub max_syscall_cycles: u32, - - /// The mode the executor is running in. - pub executor_mode: ExecutorMode, - /// Report of the program execution. pub report: ExecutionReport, - /// Whether we should write to the report. - pub print_report: bool, - /// Verifier used to sanity check `verify_sp1_proof` during runtime. pub subproof_verifier: Arc, /// Registry of hooks, to be invoked by writing to certain file descriptors. pub hook_registry: HookRegistry<'a>, - /// The options for the runtime. - pub opts: SP1CoreOpts, - - /// The maximum number of cpu cycles to use for execution. - pub max_cycles: Option, - - /// Memory addresses that were touched in this batch of shards. Used to minimize the size of - /// checkpoints. - pub memory_checkpoint: PagedMemory>, + /// The maximal shapes for the program. + pub maximal_shapes: Option>>, } /// The different modes the executor can run in. @@ -175,7 +187,7 @@ impl<'a> Executor<'a> { let program = Arc::new(program); // Create a default record with the program. - let record = ExecutionRecord { program: program.clone(), ..Default::default() }; + let record = ExecutionRecord::new(program.clone()); // If `TRACE_FILE`` is set, initialize the trace buffer. let trace_buf = if let Ok(trace_file) = std::env::var("TRACE_FILE") { @@ -217,6 +229,9 @@ impl<'a> Executor<'a> { opts, max_cycles: context.max_cycles, memory_checkpoint: PagedMemory::new_preallocated(), + uninitialized_memory_checkpoint: PagedMemory::new_preallocated(), + local_memory_access: HashMap::new(), + maximal_shapes: None, } } @@ -345,15 +360,14 @@ impl<'a> Executor<'a> { self.state.current_shard } - /// Get the current channel. - #[must_use] - #[inline] - pub fn channel(&self) -> u8 { - self.state.channel - } - /// Read a word from memory and create an access record. - pub fn mr(&mut self, addr: u32, shard: u32, timestamp: u32) -> MemoryReadRecord { + pub fn mr( + &mut self, + addr: u32, + shard: u32, + timestamp: u32, + local_memory_access: Option<&mut HashMap>, + ) -> MemoryReadRecord { // Get the memory record entry. let entry = self.state.memory.entry(addr); if self.executor_mode == ExecutorMode::Checkpoint || self.unconstrained { @@ -384,21 +398,53 @@ impl<'a> Executor<'a> { Entry::Vacant(entry) => { // If addr has a specific value to be initialized with, use that, otherwise 0. let value = self.state.uninitialized_memory.get(addr).unwrap_or(&0); + self.uninitialized_memory_checkpoint.entry(addr).or_insert_with(|| *value != 0); entry.insert(MemoryRecord { value: *value, shard: 0, timestamp: 0 }) } }; - let value = record.value; - let prev_shard = record.shard; - let prev_timestamp = record.timestamp; + + let prev_record = *record; record.shard = shard; record.timestamp = timestamp; + if !self.unconstrained { + let local_memory_access = if let Some(local_memory_access) = local_memory_access { + local_memory_access + } else { + &mut self.local_memory_access + }; + + local_memory_access + .entry(addr) + .and_modify(|e| { + e.final_mem_access = *record; + }) + .or_insert(MemoryLocalEvent { + addr, + initial_mem_access: prev_record, + final_mem_access: *record, + }); + } + // Construct the memory read record. - MemoryReadRecord::new(value, shard, timestamp, prev_shard, prev_timestamp) + MemoryReadRecord::new( + record.value, + record.shard, + record.timestamp, + prev_record.shard, + prev_record.timestamp, + ) } /// Write a word to memory and create an access record. - pub fn mw(&mut self, addr: u32, value: u32, shard: u32, timestamp: u32) -> MemoryWriteRecord { + pub fn mw( + &mut self, + addr: u32, + value: u32, + shard: u32, + timestamp: u32, + local_memory_access: Option<&mut HashMap>, + ) -> MemoryWriteRecord { // Get the memory record entry. let entry = self.state.memory.entry(addr); if self.executor_mode == ExecutorMode::Checkpoint || self.unconstrained { @@ -429,19 +475,45 @@ impl<'a> Executor<'a> { Entry::Vacant(entry) => { // If addr has a specific value to be initialized with, use that, otherwise 0. let value = self.state.uninitialized_memory.get(addr).unwrap_or(&0); + self.uninitialized_memory_checkpoint.entry(addr).or_insert_with(|| *value != 0); entry.insert(MemoryRecord { value: *value, shard: 0, timestamp: 0 }) } }; - let prev_value = record.value; - let prev_shard = record.shard; - let prev_timestamp = record.timestamp; + + let prev_record = *record; record.value = value; record.shard = shard; record.timestamp = timestamp; + if !self.unconstrained { + let local_memory_access = if let Some(local_memory_access) = local_memory_access { + local_memory_access + } else { + &mut self.local_memory_access + }; + + local_memory_access + .entry(addr) + .and_modify(|e| { + e.final_mem_access = *record; + }) + .or_insert(MemoryLocalEvent { + addr, + initial_mem_access: prev_record, + final_mem_access: *record, + }); + } + // Construct the memory write record. - MemoryWriteRecord::new(value, shard, timestamp, prev_value, prev_shard, prev_timestamp) + MemoryWriteRecord::new( + record.value, + record.shard, + record.timestamp, + prev_record.value, + prev_record.shard, + prev_record.timestamp, + ) } /// Read from memory, assuming that all addresses are aligned. @@ -450,7 +522,7 @@ impl<'a> Executor<'a> { assert_valid_memory_access!(addr, position); // Read the address from memory and create a memory read record. - let record = self.mr(addr, self.shard(), self.timestamp(&position)); + let record = self.mr(addr, self.shard(), self.timestamp(&position), None); // If we're not in unconstrained mode, record the access for the current cycle. if !self.unconstrained && self.executor_mode == ExecutorMode::Trace { @@ -475,7 +547,7 @@ impl<'a> Executor<'a> { assert_valid_memory_access!(addr, position); // Read the address from memory and create a memory read record. - let record = self.mw(addr, value, self.shard(), self.timestamp(&position)); + let record = self.mw(addr, value, self.shard(), self.timestamp(&position), None); // If we're not in unconstrained mode, record the access for the current cycle. if !self.unconstrained && self.executor_mode == ExecutorMode::Trace { @@ -522,7 +594,6 @@ impl<'a> Executor<'a> { fn emit_cpu( &mut self, shard: u32, - channel: u8, clk: u32, pc: u32, next_pc: u32, @@ -538,7 +609,6 @@ impl<'a> Executor<'a> { ) { let cpu_event = CpuEvent { shard, - channel, clk, pc, next_pc, @@ -565,6 +635,7 @@ impl<'a> Executor<'a> { }; self.record.cpu_events.push(cpu_event); + emit_cpu_dependencies(self, &cpu_event); } /// Emit an ALU event. @@ -573,7 +644,6 @@ impl<'a> Executor<'a> { lookup_id, shard: self.shard(), clk, - channel: self.channel(), opcode, a, b, @@ -604,11 +674,45 @@ impl<'a> Executor<'a> { } Opcode::DIVU | Opcode::REMU | Opcode::DIV | Opcode::REM => { self.record.divrem_events.push(event); + emit_divrem_dependencies(self, event); } _ => {} } } + #[inline] + pub(crate) fn syscall_event( + &self, + clk: u32, + syscall_id: u32, + arg1: u32, + arg2: u32, + lookup_id: LookupId, + ) -> SyscallEvent { + SyscallEvent { + shard: self.shard(), + clk, + syscall_id, + arg1, + arg2, + lookup_id, + nonce: self.record.nonce_lookup[&lookup_id], + } + } + + fn emit_syscall( + &mut self, + clk: u32, + syscall_id: u32, + arg1: u32, + arg2: u32, + lookup_id: LookupId, + ) { + let syscall_event = self.syscall_event(clk, syscall_id, arg1, arg2, lookup_id); + + self.record.syscall_events.push(syscall_event); + } + /// Fetch the destination register and input operand values for an ALU instruction. fn alu_rr(&mut self, instruction: &Instruction) -> (Register, u32, u32) { if !instruction.imm_c { @@ -674,6 +778,7 @@ impl<'a> Executor<'a> { } /// Fetch the instruction at the current program counter. + #[inline] fn fetch(&self) -> Instruction { let idx = ((self.state.pc - self.program.pc_base) / 4) as usize; self.program.instructions[idx] @@ -707,8 +812,32 @@ impl<'a> Executor<'a> { LookupId::default() }; - if self.print_report && !self.unconstrained { + if !self.unconstrained { self.report.opcode_counts[instruction.opcode] += 1; + self.report.event_counts[instruction.opcode] += 1; + match instruction.opcode { + Opcode::LB | Opcode::LH | Opcode::LW | Opcode::LBU | Opcode::LHU => { + self.report.event_counts[Opcode::ADD] += 2; + } + Opcode::JAL | Opcode::JALR | Opcode::AUIPC => { + self.report.event_counts[Opcode::ADD] += 1; + } + Opcode::BEQ + | Opcode::BNE + | Opcode::BLT + | Opcode::BGE + | Opcode::BLTU + | Opcode::BGEU => { + self.report.event_counts[Opcode::ADD] += 1; + self.report.event_counts[Opcode::SLTU] += 2; + } + Opcode::DIVU | Opcode::REMU | Opcode::DIV | Opcode::REM => { + self.report.event_counts[Opcode::MUL] += 2; + self.report.event_counts[Opcode::ADD] += 2; + self.report.event_counts[Opcode::SLTU] += 1; + } + _ => {} + }; } match instruction.opcode { @@ -941,7 +1070,23 @@ impl<'a> Executor<'a> { return Err(ExecutionError::InvalidSyscallUsage(syscall_id as u64)); } + // Update the syscall counts. + let syscall_for_count = syscall.count_map(); + let syscall_count = self.state.syscall_counts.entry(syscall_for_count).or_insert(0); + let (threshold, multiplier) = match syscall_for_count { + SyscallCode::KECCAK_PERMUTE => (self.opts.split_opts.keccak, 24), + SyscallCode::SHA_EXTEND => (self.opts.split_opts.sha_extend, 48), + SyscallCode::SHA_COMPRESS => (self.opts.split_opts.sha_compress, 80), + _ => (self.opts.split_opts.deferred, 1), + }; + let nonce = (((*syscall_count as usize) % threshold) * multiplier) as u32; + self.record.nonce_lookup.insert(syscall_lookup_id, nonce); + *syscall_count += 1; + let syscall_impl = self.get_syscall(syscall).cloned(); + if syscall.should_send() != 0 { + self.emit_syscall(clk, syscall.syscall_id(), b, c, syscall_lookup_id); + } let mut precompile_rt = SyscallContext::new(self); precompile_rt.syscall_lookup_id = syscall_lookup_id; let (precompile_next_pc, precompile_cycles, returned_exit_code) = @@ -949,7 +1094,7 @@ impl<'a> Executor<'a> { // Executing a syscall optionally returns a value to write to the t0 // register. If it returns None, we just keep the // syscall_id in t0. - let res = syscall_impl.execute(&mut precompile_rt, b, c); + let res = syscall_impl.execute(&mut precompile_rt, syscall, b, c); if let Some(val) = res { a = val; } else { @@ -980,19 +1125,6 @@ impl<'a> Executor<'a> { next_pc = precompile_next_pc; self.state.clk += precompile_cycles; exit_code = returned_exit_code; - - // Update the syscall counts. - let syscall_for_count = syscall.count_map(); - let syscall_count = self.state.syscall_counts.entry(syscall_for_count).or_insert(0); - let (threshold, multiplier) = match syscall_for_count { - SyscallCode::KECCAK_PERMUTE => (self.opts.split_opts.keccak, 24), - SyscallCode::SHA_EXTEND => (self.opts.split_opts.sha_extend, 48), - SyscallCode::SHA_COMPRESS => (self.opts.split_opts.sha_compress, 80), - _ => (self.opts.split_opts.deferred, 1), - }; - let nonce = (((*syscall_count as usize) % threshold) * multiplier) as u32; - self.record.nonce_lookup.insert(syscall_lookup_id, nonce); - *syscall_count += 1; } Opcode::EBREAK => { return Err(ExecutionError::Breakpoint()); @@ -1068,18 +1200,10 @@ impl<'a> Executor<'a> { // Update the clk to the next cycle. self.state.clk += 4; - let channel = self.channel(); - - // Update the channel to the next cycle. - if !self.unconstrained { - self.state.channel = (self.state.channel + 1) % NUM_BYTE_LOOKUP_CHANNELS; - } - // Emit the CPU event for this cycle. if self.executor_mode == ExecutorMode::Trace { self.emit_cpu( self.shard(), - channel, clk, pc, next_pc, @@ -1099,11 +1223,13 @@ impl<'a> Executor<'a> { /// Executes one cycle of the program, returning whether the program has finished. #[inline] + #[allow(clippy::too_many_lines)] fn execute_cycle(&mut self) -> Result { // Fetch the instruction at the current program counter. let instruction = self.fetch(); // Log the current state of the runtime. + #[cfg(debug_assertions)] self.log(&instruction); // Execute the instruction. @@ -1112,14 +1238,135 @@ impl<'a> Executor<'a> { // Increment the clock. self.state.global_clk += 1; - // If there's not enough cycles left for another instruction, move to the next shard. - // We multiply by 4 because clk is incremented by 4 for each normal instruction. - if !self.unconstrained && self.max_syscall_cycles + self.state.clk >= self.shard_size { - self.state.current_shard += 1; - self.state.clk = 0; - self.state.channel = 0; + if !self.unconstrained { + // If there's not enough cycles left for another instruction, move to the next shard. + let cpu_exit = self.max_syscall_cycles + self.state.clk >= self.shard_size; + + // Every N cycles, check if there exists at least one shape that fits. + // + // If we're close to not fitting, early stop the shard to ensure we don't OOM. + let mut shape_match_found = true; + if self.state.global_clk % 16 == 0 { + let addsub_count = (self.report.event_counts[Opcode::ADD] + + self.report.event_counts[Opcode::SUB]) + as usize; + let mul_count = (self.report.event_counts[Opcode::MUL] + + self.report.event_counts[Opcode::MULH] + + self.report.event_counts[Opcode::MULHU] + + self.report.event_counts[Opcode::MULHSU]) + as usize; + let bitwise_count = (self.report.event_counts[Opcode::XOR] + + self.report.event_counts[Opcode::OR] + + self.report.event_counts[Opcode::AND]) + as usize; + let shift_left_count = self.report.event_counts[Opcode::SLL] as usize; + let shift_right_count = (self.report.event_counts[Opcode::SRL] + + self.report.event_counts[Opcode::SRA]) + as usize; + let divrem_count = (self.report.event_counts[Opcode::DIV] + + self.report.event_counts[Opcode::DIVU] + + self.report.event_counts[Opcode::REM] + + self.report.event_counts[Opcode::REMU]) + as usize; + let lt_count = (self.report.event_counts[Opcode::SLT] + + self.report.event_counts[Opcode::SLTU]) + as usize; + + if let Some(maximal_shapes) = &self.maximal_shapes { + shape_match_found = false; + + for shape in maximal_shapes.iter() { + let addsub_threshold = 1 << shape["AddSub"]; + if addsub_count > addsub_threshold { + continue; + } + let addsub_distance = addsub_threshold - addsub_count; - self.bump_record(); + let mul_threshold = 1 << shape["Mul"]; + if mul_count > mul_threshold { + continue; + } + let mul_distance = mul_threshold - mul_count; + + let bitwise_threshold = 1 << shape["Bitwise"]; + if bitwise_count > bitwise_threshold { + continue; + } + let bitwise_distance = bitwise_threshold - bitwise_count; + + let shift_left_threshold = 1 << shape["ShiftLeft"]; + if shift_left_count > shift_left_threshold { + continue; + } + let shift_left_distance = shift_left_threshold - shift_left_count; + + let shift_right_threshold = 1 << shape["ShiftRight"]; + if shift_right_count > shift_right_threshold { + continue; + } + let shift_right_distance = shift_right_threshold - shift_right_count; + + let divrem_threshold = 1 << shape["DivRem"]; + if divrem_count > divrem_threshold { + continue; + } + let divrem_distance = divrem_threshold - divrem_count; + + let lt_threshold = 1 << shape["Lt"]; + if lt_count > lt_threshold { + continue; + } + let lt_distance = lt_threshold - lt_count; + + let l_infinity = vec![ + addsub_distance, + mul_distance, + bitwise_distance, + shift_left_distance, + shift_right_distance, + divrem_distance, + lt_distance, + ] + .into_iter() + .min() + .unwrap(); + + if l_infinity >= 32 { + shape_match_found = true; + break; + } + } + + if !shape_match_found { + log::warn!( + "stopping shard early due to no shapes fitting: \ + nb_cycles={}, \ + addsub_count={}, \ + mul_count={}, \ + bitwise_count={}, \ + shift_left_count={}, \ + shift_right_count={}, \ + divrem_count={}, \ + lt_count={}", + self.state.clk / 4, + log2_ceil_usize(addsub_count), + log2_ceil_usize(mul_count), + log2_ceil_usize(bitwise_count), + log2_ceil_usize(shift_left_count), + log2_ceil_usize(shift_right_count), + log2_ceil_usize(divrem_count), + log2_ceil_usize(lt_count), + ); + } + } + } + + if cpu_exit || !shape_match_found { + self.state.current_shard += 1; + self.state.clk = 0; + self.report.event_counts = Box::default(); + self.bump_record(); + } } // If the cycle limit is exceeded, return an error. @@ -1141,6 +1388,11 @@ impl<'a> Executor<'a> { /// Bump the record. pub fn bump_record(&mut self) { + // Copy all of the existing local memory accesses to the record's local_memory_access vec. + for (_, event) in self.local_memory_access.drain() { + self.record.cpu_local_memory_access.push(event); + } + let removed_record = std::mem::replace(&mut self.record, ExecutionRecord::new(self.program.clone())); let public_values = removed_record.public_values; @@ -1171,16 +1423,20 @@ impl<'a> Executor<'a> { self.memory_checkpoint.clear(); self.executor_mode = ExecutorMode::Checkpoint; - // Take memory out of state before cloning it so that memory is not cloned. + // Clone self.state without memory and uninitialized_memory in it so it's faster. let memory = std::mem::take(&mut self.state.memory); + let uninitialized_memory = std::mem::take(&mut self.state.uninitialized_memory); let mut checkpoint = tracing::info_span!("clone").in_scope(|| self.state.clone()); self.state.memory = memory; + self.state.uninitialized_memory = uninitialized_memory; let done = tracing::info_span!("execute").in_scope(|| self.execute())?; // Create a checkpoint using `memory_checkpoint`. Just include all memory if `done` since we // need it all for MemoryFinalize. tracing::info_span!("create memory checkpoint").in_scope(|| { let memory_checkpoint = std::mem::take(&mut self.memory_checkpoint); + let uninitialized_memory_checkpoint = + std::mem::take(&mut self.uninitialized_memory_checkpoint); if done { // If we're done, we need to include all memory. But we need to reset any modified // memory to as it was before the execution. @@ -1192,11 +1448,23 @@ impl<'a> Executor<'a> { checkpoint.memory.remove(addr); } }); + checkpoint.uninitialized_memory = self.state.uninitialized_memory.clone(); + // Remove memory that was written to in this batch. + for (addr, is_old) in uninitialized_memory_checkpoint { + if !is_old { + checkpoint.uninitialized_memory.remove(addr); + } + } } else { checkpoint.memory = memory_checkpoint .into_iter() .filter_map(|(addr, record)| record.map(|record| (addr, record))) .collect(); + checkpoint.uninitialized_memory = uninitialized_memory_checkpoint + .into_iter() + .filter(|&(_, has_value)| has_value) + .map(|(addr, _)| (addr, *self.state.uninitialized_memory.get(addr).unwrap())) + .collect(); } }); Ok((checkpoint, done)) @@ -1204,7 +1472,6 @@ impl<'a> Executor<'a> { fn initialize(&mut self) { self.state.clk = 0; - self.state.channel = 0; tracing::debug!("loading memory image"); for (&addr, value) in &self.program.memory_image { @@ -1238,7 +1505,7 @@ impl<'a> Executor<'a> { /// Executes up to `self.shard_batch_size` cycles of the program, returning whether the program /// has finished. - fn execute(&mut self) -> Result { + pub fn execute(&mut self) -> Result { // Get the program. let program = self.program.clone(); @@ -1273,11 +1540,6 @@ impl<'a> Executor<'a> { // Get the final public values. let public_values = self.record.public_values; - // Push the remaining execution record, if there are any CPU events. - if !self.record.cpu_events.is_empty() { - self.bump_record(); - } - if done { self.postprocess(); @@ -1285,6 +1547,11 @@ impl<'a> Executor<'a> { self.bump_record(); } + // Push the remaining execution record, if there are any CPU events. + if !self.record.cpu_events.is_empty() { + self.bump_record(); + } + // Set the global public values for all shards. let mut last_next_pc = 0; let mut last_exit_code = 0; @@ -1341,49 +1608,51 @@ impl<'a> Executor<'a> { tracing::warn!("Not all input bytes were read."); } - // SECTION: Set up all MemoryInitializeFinalizeEvents needed for memory argument. - let memory_finalize_events = &mut self.record.memory_finalize_events; - - // We handle the addr = 0 case separately, as we constrain it to be 0 in the first row - // of the memory finalize table so it must be first in the array of events. - let addr_0_record = self.state.memory.get(0); + if self.executor_mode == ExecutorMode::Trace { + // SECTION: Set up all MemoryInitializeFinalizeEvents needed for memory argument. + let memory_finalize_events = &mut self.record.global_memory_finalize_events; - let addr_0_final_record = match addr_0_record { - Some(record) => record, - None => &MemoryRecord { value: 0, shard: 0, timestamp: 1 }, - }; - memory_finalize_events - .push(MemoryInitializeFinalizeEvent::finalize_from_record(0, addr_0_final_record)); - - let memory_initialize_events = &mut self.record.memory_initialize_events; - let addr_0_initialize_event = - MemoryInitializeFinalizeEvent::initialize(0, 0, addr_0_record.is_some()); - memory_initialize_events.push(addr_0_initialize_event); - - // Count the number of touched memory addresses manually, since `PagedMemory` doesn't - // already know its length. - self.report.touched_memory_addresses = 0; - for addr in self.state.memory.keys() { - self.report.touched_memory_addresses += 1; - if addr == 0 { - // Handled above. - continue; - } - - // Program memory is initialized in the MemoryProgram chip and doesn't require any - // events, so we only send init events for other memory addresses. - if !self.record.program.memory_image.contains_key(&addr) { - let initial_value = self.state.uninitialized_memory.get(addr).unwrap_or(&0); - memory_initialize_events.push(MemoryInitializeFinalizeEvent::initialize( - addr, - *initial_value, - true, - )); - } + // We handle the addr = 0 case separately, as we constrain it to be 0 in the first row + // of the memory finalize table so it must be first in the array of events. + let addr_0_record = self.state.memory.get(0); - let record = *self.state.memory.get(addr).unwrap(); + let addr_0_final_record = match addr_0_record { + Some(record) => record, + None => &MemoryRecord { value: 0, shard: 0, timestamp: 1 }, + }; memory_finalize_events - .push(MemoryInitializeFinalizeEvent::finalize_from_record(addr, &record)); + .push(MemoryInitializeFinalizeEvent::finalize_from_record(0, addr_0_final_record)); + + let memory_initialize_events = &mut self.record.global_memory_initialize_events; + let addr_0_initialize_event = + MemoryInitializeFinalizeEvent::initialize(0, 0, addr_0_record.is_some()); + memory_initialize_events.push(addr_0_initialize_event); + + // Count the number of touched memory addresses manually, since `PagedMemory` doesn't + // already know its length. + self.report.touched_memory_addresses = 0; + for addr in self.state.memory.keys() { + self.report.touched_memory_addresses += 1; + if addr == 0 { + // Handled above. + continue; + } + + // Program memory is initialized in the MemoryProgram chip and doesn't require any + // events, so we only send init events for other memory addresses. + if !self.record.program.memory_image.contains_key(&addr) { + let initial_value = self.state.uninitialized_memory.get(addr).unwrap_or(&0); + memory_initialize_events.push(MemoryInitializeFinalizeEvent::initialize( + addr, + *initial_value, + true, + )); + } + + let record = *self.state.memory.get(addr).unwrap(); + memory_finalize_events + .push(MemoryInitializeFinalizeEvent::finalize_from_record(addr, &record)); + } } } @@ -1392,6 +1661,7 @@ impl<'a> Executor<'a> { } #[inline] + #[cfg(debug_assertions)] fn log(&mut self, _: &Instruction) { // Write the current program counter to the trace buffer for the cycle tracer. if let Some(ref mut buf) = self.trace_buf { @@ -1419,9 +1689,9 @@ pub const fn align(addr: u32) -> u32 { addr - addr % 4 } -// TODO: FIX -/// The number of different byte lookup channels. -pub const NUM_BYTE_LOOKUP_CHANNELS: u8 = 16; +fn log2_ceil_usize(n: usize) -> usize { + (usize::BITS - n.saturating_sub(1).leading_zeros()) as usize +} #[cfg(test)] mod tests { diff --git a/crates/core/executor/src/io.rs b/crates/core/executor/src/io.rs index 682a0b8354..767697c604 100644 --- a/crates/core/executor/src/io.rs +++ b/crates/core/executor/src/io.rs @@ -1,9 +1,10 @@ use std::io::Read; use serde::{de::DeserializeOwned, Serialize}; -use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, ShardProof, StarkVerifyingKey}; +use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, StarkVerifyingKey}; use super::Executor; +use crate::SP1ReduceProof; impl<'a> Read for Executor<'a> { fn read(&mut self, buf: &mut [u8]) -> std::io::Result { @@ -35,7 +36,7 @@ impl<'a> Executor<'a> { /// Write a proof and verifying key to the proof stream. pub fn write_proof( &mut self, - proof: ShardProof, + proof: SP1ReduceProof, vk: StarkVerifyingKey, ) { self.state.proof_stream.push((proof, vk)); diff --git a/crates/core/executor/src/lib.rs b/crates/core/executor/src/lib.rs index 472bc7d7c8..a4b6a06ced 100644 --- a/crates/core/executor/src/lib.rs +++ b/crates/core/executor/src/lib.rs @@ -20,6 +20,7 @@ #![warn(missing_docs)] mod context; +mod dependencies; mod disassembler; pub mod events; mod executor; @@ -32,11 +33,14 @@ mod program; #[cfg(any(test, feature = "programs"))] pub mod programs; mod record; +mod reduce; mod register; mod report; +mod shape; mod state; pub mod subproof; pub mod syscalls; +mod utils; pub use context::*; pub use executor::*; @@ -45,6 +49,9 @@ pub use instruction::*; pub use opcode::*; pub use program::*; pub use record::*; +pub use reduce::*; pub use register::*; pub use report::*; +pub use shape::*; pub use state::*; +pub use utils::*; diff --git a/crates/core/executor/src/memory.rs b/crates/core/executor/src/memory.rs index e6166c1c0c..6e375753d4 100644 --- a/crates/core/executor/src/memory.rs +++ b/crates/core/executor/src/memory.rs @@ -1,45 +1,50 @@ -use std::mem::{replace, size_of}; - -use serde::{Deserialize, Serialize}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; use vec_map::VecMap; /// A page of memory. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Page(VecMap); -impl Page { - /// Create a `Page` with capacity `PAGE_LEN`. - pub fn with_capacity(capacity: usize) -> Self { - Self(VecMap::with_capacity(capacity)) +impl Default for Page { + fn default() -> Self { + Self(VecMap::default()) } } -impl Default for Page { +const LOG_PAGE_LEN: usize = 15; +const PAGE_LEN: usize = 1 << LOG_PAGE_LEN; +const MAX_PAGE_COUNT: usize = ((1 << 31) - (1 << 27)) / 4 / PAGE_LEN + 1; +const NO_PAGE: usize = usize::MAX; +const PAGE_MASK: usize = PAGE_LEN - 1; + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(bound(serialize = "V: Serialize"))] +#[serde(bound(deserialize = "V: DeserializeOwned"))] +pub struct NewPage(Vec>); + +impl NewPage { + pub fn new() -> Self { + Self(vec![None; PAGE_LEN]) + } +} + +impl Default for NewPage { fn default() -> Self { - Self(VecMap::default()) + Self::new() } } /// Paged memory. Balances both memory locality and total memory usage. #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct PagedMemory { +#[serde(bound(serialize = "V: Serialize"))] +#[serde(bound(deserialize = "V: DeserializeOwned"))] +pub struct PagedMemory { /// The internal page table. - pub page_table: VecMap>, + pub page_table: Vec>, + pub index: Vec, } -impl PagedMemory { - /// The base 2 logarithm of the (maximum) page size in bytes. - const LOG_PAGE_SIZE: usize = 12; - /// The base 2 logarithm of the length of each page, considered as an array of `Option`. - const LOG_PAGE_LEN: usize = - Self::LOG_PAGE_SIZE - size_of::>().next_power_of_two().ilog2() as usize; - /// The length of each page, considered as an array of `Option`. - const PAGE_LEN: usize = 1 << Self::LOG_PAGE_LEN; - /// The mask for retrieving the lowest bits necessary to index within a page. - const PAGE_MASK: usize = Self::PAGE_LEN - 1; - /// The maximum number of pages. Used for the length of the page table. - const MAX_PAGE_COUNT: usize = - 1 << (u32::BITS as usize - Self::LOG_PAGE_LEN - Self::NUM_IGNORED_LOWER_BITS); +impl PagedMemory { /// The number of lower bits to ignore, since addresses (except registers) are a multiple of 4. const NUM_IGNORED_LOWER_BITS: usize = 2; /// The number of registers in the virtual machine. @@ -50,82 +55,95 @@ impl PagedMemory { /// Create a `PagedMemory` with capacity `MAX_PAGE_COUNT`. pub fn new_preallocated() -> Self { - Self { page_table: VecMap::with_capacity(Self::MAX_PAGE_COUNT) } + Self { page_table: Vec::new(), index: vec![NO_PAGE; MAX_PAGE_COUNT] } } /// Get a reference to the memory value at the given address, if it exists. pub fn get(&self, addr: u32) -> Option<&V> { let (upper, lower) = Self::indices(addr); - self.page_table.get(upper)?.0.get(lower) + let index = self.index[upper]; + if index == NO_PAGE { + None + } else { + self.page_table[index].0[lower].as_ref() + } } /// Get a mutable reference to the memory value at the given address, if it exists. pub fn get_mut(&mut self, addr: u32) -> Option<&mut V> { let (upper, lower) = Self::indices(addr); - self.page_table.get_mut(upper)?.0.get_mut(lower) + let index = self.index[upper]; + if index == NO_PAGE { + None + } else { + self.page_table[index].0[lower].as_mut() + } } /// Insert a value at the given address. Returns the previous value, if any. pub fn insert(&mut self, addr: u32, value: V) -> Option { let (upper, lower) = Self::indices(addr); - self.page_table - .entry(upper) - .or_insert_with(PagedMemory::::new_page) - .0 - .insert(lower, value) + let mut index = self.index[upper]; + if index == NO_PAGE { + index = self.page_table.len(); + self.index[upper] = index; + self.page_table.push(NewPage::new()); + } + self.page_table[index].0[lower].replace(value) } /// Remove the value at the given address if it exists, returning it. pub fn remove(&mut self, addr: u32) -> Option { let (upper, lower) = Self::indices(addr); - match self.page_table.entry(upper) { - vec_map::Entry::Vacant(_) => None, - vec_map::Entry::Occupied(mut entry) => { - let res = entry.get_mut().0.remove(lower); - if entry.get().0.is_empty() { - entry.remove(); - } - res - } + let index = self.index[upper]; + if index == NO_PAGE { + None + } else { + self.page_table[index].0[lower].take() } } /// Gets the memory entry for the given address. pub fn entry(&mut self, addr: u32) -> Entry<'_, V> { let (upper, lower) = Self::indices(addr); - let page_table_entry = self.page_table.entry(upper); - if let vec_map::Entry::Occupied(occ_entry) = page_table_entry { - if occ_entry.get().0.contains_key(lower) { - Entry::Occupied(OccupiedEntry { lower, page_table_occupied_entry: occ_entry }) - } else { - Entry::Vacant(VacantEntry { - lower, - page_table_entry: vec_map::Entry::Occupied(occ_entry), - }) - } + let index = self.index[upper]; + if index == NO_PAGE { + let index = self.page_table.len(); + self.index[upper] = index; + self.page_table.push(NewPage::new()); + Entry::Vacant(VacantEntry { entry: &mut self.page_table[index].0[lower] }) } else { - Entry::Vacant(VacantEntry { lower, page_table_entry }) + let option = &mut self.page_table[index].0[lower]; + match option { + Some(_) => Entry::Occupied(OccupiedEntry { entry: option }), + None => Entry::Vacant(VacantEntry { entry: option }), + } } } /// Returns an iterator over the occupied addresses. pub fn keys(&self) -> impl Iterator + '_ { - self.page_table.iter().flat_map(|(upper, page)| { - let upper = upper << Self::LOG_PAGE_LEN; - page.0.iter().map(move |(lower, _)| Self::decompress_addr(upper + lower)) + self.index.iter().enumerate().filter(|(_, &i)| i != NO_PAGE).flat_map(|(i, index)| { + let upper = i << LOG_PAGE_LEN; + self.page_table[*index] + .0 + .iter() + .enumerate() + .filter_map(move |(lower, v)| v.map(|_| Self::decompress_addr(upper + lower))) }) } /// Clears the page table. Drops all `Page`s, but retains the memory used by the table itself. pub fn clear(&mut self) { self.page_table.clear(); + self.index.fill(NO_PAGE); } /// Break apart an address into an upper and lower index. #[inline] const fn indices(addr: u32) -> (usize, usize) { let index = Self::compress_addr(addr); - (index >> Self::LOG_PAGE_LEN, index & Self::PAGE_MASK) + (index >> LOG_PAGE_LEN, index & PAGE_MASK) } /// Compress an address from the sparse address space to a contiguous space. @@ -148,26 +166,21 @@ impl PagedMemory { ((addr - Self::ADDR_COMPRESS_OFFSET) << Self::NUM_IGNORED_LOWER_BITS) as u32 } } - - #[inline] - fn new_page() -> Page { - Page::with_capacity(Self::PAGE_LEN) - } } -impl Default for PagedMemory { +impl Default for PagedMemory { fn default() -> Self { - Self { page_table: VecMap::default() } + Self { page_table: Vec::new(), index: vec![NO_PAGE; MAX_PAGE_COUNT] } } } /// An entry of `PagedMemory`, for in-place manipulation. -pub enum Entry<'a, V> { +pub enum Entry<'a, V: Copy> { Vacant(VacantEntry<'a, V>), Occupied(OccupiedEntry<'a, V>), } -impl<'a, V> Entry<'a, V> { +impl<'a, V: Copy> Entry<'a, V> { /// Ensures a value is in the entry, inserting the provided value if necessary. /// Returns a mutable reference to the value. pub fn or_insert(self, default: V) -> &'a mut V { @@ -197,62 +210,52 @@ impl<'a, V> Entry<'a, V> { } /// A vacant entry of `PagedMemory`, for in-place manipulation. -pub struct VacantEntry<'a, V> { - lower: usize, - page_table_entry: vec_map::Entry<'a, Page>, +pub struct VacantEntry<'a, V: Copy> { + entry: &'a mut Option, } -impl<'a, V> VacantEntry<'a, V> { +impl<'a, V: Copy> VacantEntry<'a, V> { /// Insert a value into the `VacantEntry`, returning a mutable reference to it. pub fn insert(self, value: V) -> &'a mut V { // By construction, the slot in the page is `None`. - match self.page_table_entry.or_insert_with(PagedMemory::::new_page).0.entry(self.lower) { - vec_map::Entry::Vacant(entry) => entry.insert(value), - vec_map::Entry::Occupied(_) => { - panic!("entry with lower bits {:#x} should be vacant", self.lower) - } - } + *self.entry = Some(value); + self.entry.as_mut().unwrap() } } /// A vacant entry of `PagedMemory`, for in-place manipulation. pub struct OccupiedEntry<'a, V> { - lower: usize, - page_table_occupied_entry: vec_map::OccupiedEntry<'a, Page>, + entry: &'a mut Option, } -impl<'a, V> OccupiedEntry<'a, V> { +impl<'a, V: Copy> OccupiedEntry<'a, V> { /// Get a reference to the value in the `OccupiedEntry`. pub fn get(&self) -> &V { - self.page_table_occupied_entry.get().0.get(self.lower).unwrap() + self.entry.as_ref().unwrap() } /// Get a mutable reference to the value in the `OccupiedEntry`. pub fn get_mut(&mut self) -> &mut V { - self.page_table_occupied_entry.get_mut().0.get_mut(self.lower).unwrap() + self.entry.as_mut().unwrap() } /// Insert a value in the `OccupiedEntry`, returning the previous value. pub fn insert(&mut self, value: V) -> V { - replace(self.get_mut(), value) + self.entry.replace(value).unwrap() } /// Converts the `OccupiedEntry` the into a mutable reference to the associated value. pub fn into_mut(self) -> &'a mut V { - self.page_table_occupied_entry.into_mut().0.get_mut(self.lower).unwrap() + self.entry.as_mut().unwrap() } /// Removes the value from the `OccupiedEntry` and returns it. - pub fn remove(mut self) -> V { - let res = self.page_table_occupied_entry.get_mut().0.remove(self.lower).unwrap(); - if self.page_table_occupied_entry.get().0.is_empty() { - self.page_table_occupied_entry.remove(); - } - res + pub fn remove(self) -> V { + self.entry.take().unwrap() } } -impl FromIterator<(u32, V)> for PagedMemory { +impl FromIterator<(u32, V)> for PagedMemory { fn from_iter>(iter: T) -> Self { let mut mmu = Self::default(); for (k, v) in iter { @@ -262,48 +265,24 @@ impl FromIterator<(u32, V)> for PagedMemory { } } -impl IntoIterator for PagedMemory { - type Item = (u32, V); - - type IntoIter = IntoIter; - - fn into_iter(self) -> Self::IntoIter { - IntoIter { upper: 0, upper_iter: self.page_table.into_iter(), lower_iter: None } - } -} - -pub struct IntoIter { - upper: usize, - upper_iter: vec_map::IntoIter>, - lower_iter: Option>, -} - -impl Iterator for IntoIter { +impl IntoIterator for PagedMemory { type Item = (u32, V); - fn next(&mut self) -> Option { - loop { - // Populate the lower iterator. - let it = match &mut self.lower_iter { - Some(it) => it, - None => { - // Exit if the upper iterator has finished. - let (upper, page) = self.upper_iter.next()?; - self.upper = upper; - self.lower_iter.insert(page.0.into_iter()) - } - }; - // Yield the next item. - if let Some((lower, record)) = it.next() { - return Some(( - PagedMemory::::decompress_addr( - (self.upper << PagedMemory::::LOG_PAGE_LEN) + lower, - ), - record, - )); - } - // If no next item in the lower iterator, it must be finished. - self.lower_iter = None; - } + type IntoIter = Box>; + + fn into_iter(mut self) -> Self::IntoIter { + Box::new(self.index.into_iter().enumerate().filter(|(_, i)| *i != NO_PAGE).flat_map( + move |(i, index)| { + let upper = i << LOG_PAGE_LEN; + let replacement = NewPage::new(); + std::mem::replace(&mut self.page_table[index], replacement) + .0 + .into_iter() + .enumerate() + .filter_map(move |(lower, v)| { + v.map(|v| (Self::decompress_addr(upper + lower), v)) + }) + }, + )) } } diff --git a/crates/core/executor/src/program.rs b/crates/core/executor/src/program.rs index e0cfaf7c2e..29743a4c8f 100644 --- a/crates/core/executor/src/program.rs +++ b/crates/core/executor/src/program.rs @@ -1,14 +1,16 @@ //! Programs that can be executed by the SP1 zkVM. -use std::{collections::BTreeMap, fs::File, io::Read}; +use std::{fs::File, io::Read}; +use hashbrown::HashMap; use p3_field::Field; use serde::{Deserialize, Serialize}; -use sp1_stark::air::MachineProgram; +use sp1_stark::air::{MachineAir, MachineProgram}; use crate::{ disassembler::{transpile, Elf}, instruction::Instruction, + CoreShape, }; /// A program that can be executed by the SP1 zkVM. @@ -24,14 +26,22 @@ pub struct Program { /// The base address of the program. pub pc_base: u32, /// The initial memory image, useful for global constants. - pub memory_image: BTreeMap, + pub memory_image: HashMap, + /// The shape for the preprocessed tables. + pub preprocessed_shape: Option, } impl Program { /// Create a new [Program]. #[must_use] - pub const fn new(instructions: Vec, pc_start: u32, pc_base: u32) -> Self { - Self { instructions, pc_start, pc_base, memory_image: BTreeMap::new() } + pub fn new(instructions: Vec, pc_start: u32, pc_base: u32) -> Self { + Self { + instructions, + pc_start, + pc_base, + memory_image: HashMap::new(), + preprocessed_shape: None, + } } /// Disassemble a RV32IM ELF to a program that be executed by the VM. @@ -52,6 +62,7 @@ impl Program { pc_start: elf.pc_start, pc_base: elf.pc_base, memory_image: elf.memory_image, + preprocessed_shape: None, }) } @@ -65,6 +76,19 @@ impl Program { File::open(path)?.read_to_end(&mut elf_code)?; Program::from(&elf_code) } + + /// Custom logic for padding the trace to a power of two according to the proof shape. + pub fn fixed_log2_rows>(&self, air: &A) -> Option { + self.preprocessed_shape + .as_ref() + .map(|shape| { + shape + .inner + .get(&air.name()) + .unwrap_or_else(|| panic!("Chip {} not found in specified shape", air.name())) + }) + .copied() + } } impl MachineProgram for Program { diff --git a/crates/core/executor/src/record.rs b/crates/core/executor/src/record.rs index ab78477f02..b6c23c45f8 100644 --- a/crates/core/executor/src/record.rs +++ b/crates/core/executor/src/record.rs @@ -1,18 +1,23 @@ use hashbrown::HashMap; use itertools::{EitherOrBoth, Itertools}; -use p3_field::AbstractField; -use sp1_stark::{air::PublicValues, MachineRecord, SP1CoreOpts, SplitOpts}; -use std::sync::Arc; +use p3_field::{AbstractField, PrimeField}; +use sp1_stark::{ + air::{MachineAir, PublicValues}, + MachineRecord, SP1CoreOpts, SplitOpts, +}; +use std::{mem::take, sync::Arc}; use serde::{Deserialize, Serialize}; use super::{program::Program, Opcode}; -use crate::events::{ - add_sharded_byte_lookup_events, AluEvent, ByteLookupEvent, ByteRecord, CpuEvent, - EdDecompressEvent, EllipticCurveAddEvent, EllipticCurveDecompressEvent, - EllipticCurveDoubleEvent, Fp2AddSubEvent, Fp2MulEvent, FpOpEvent, KeccakPermuteEvent, LookupId, - MemoryInitializeFinalizeEvent, MemoryRecordEnum, ShaCompressEvent, ShaExtendEvent, - Uint256MulEvent, +use crate::{ + events::{ + add_sharded_byte_lookup_events, AluEvent, ByteLookupEvent, ByteRecord, CpuEvent, LookupId, + MemoryInitializeFinalizeEvent, MemoryLocalEvent, MemoryRecordEnum, PrecompileEvent, + PrecompileEvents, SyscallEvent, + }, + syscalls::SyscallCode, + CoreShape, }; /// A record of the execution of a program. @@ -42,54 +47,22 @@ pub struct ExecutionRecord { pub lt_events: Vec, /// A trace of the byte lookups that are needed. pub byte_lookups: HashMap>, - /// A trace of the sha256 extend events. - pub sha_extend_events: Vec, - /// A trace of the sha256 compress events. - pub sha_compress_events: Vec, - /// A trace of the keccak256 permute events. - pub keccak_permute_events: Vec, - /// A trace of the edwards add events. - pub ed_add_events: Vec, - /// A trace of the edwards decompress events. - pub ed_decompress_events: Vec, - /// A trace of the secp256k1 add events. - pub secp256k1_add_events: Vec, - /// A trace of the secp256k1 double events. - pub secp256k1_double_events: Vec, - /// A trace of the bn254 add events. - pub bn254_add_events: Vec, - /// A trace of the bn254 double events. - pub bn254_double_events: Vec, - /// A trace of the k256 decompress events. - pub k256_decompress_events: Vec, - /// A trace of the bls12381 add events. - pub bls12381_add_events: Vec, - /// A trace of the bls12381 double events. - pub bls12381_double_events: Vec, - /// A trace of the uint256 mul events. - pub uint256_mul_events: Vec, - /// A trace of the memory initialize events. - pub memory_initialize_events: Vec, - /// A trace of the memory finalize events. - pub memory_finalize_events: Vec, - /// A trace of the bls12381 decompress events. - pub bls12381_decompress_events: Vec, - /// A trace of the bls12381 fp events. - pub bls12381_fp_events: Vec, - /// A trace of the bls12381 fp2 add/sub events. - pub bls12381_fp2_addsub_events: Vec, - /// A trace of the bls12381 fp2 mul events. - pub bls12381_fp2_mul_events: Vec, - /// A trace of the bn254 fp events. - pub bn254_fp_events: Vec, - /// A trace of the bn254 fp2 add/sub events. - pub bn254_fp2_addsub_events: Vec, - /// A trace of the bn254 fp2 mul events. - pub bn254_fp2_mul_events: Vec, + /// A trace of the precompile events. + pub precompile_events: PrecompileEvents, + /// A trace of the global memory initialize events. + pub global_memory_initialize_events: Vec, + /// A trace of the global memory finalize events. + pub global_memory_finalize_events: Vec, + /// A trace of all the shard's local memory events. + pub cpu_local_memory_access: Vec, + /// A trace of all the syscall events. + pub syscall_events: Vec, /// The public values. pub public_values: PublicValues, /// The nonce lookup. pub nonce_lookup: HashMap, + /// The shape of the proof. + pub shape: Option, } impl ExecutionRecord { @@ -147,31 +120,13 @@ impl ExecutionRecord { /// included in every shard. #[must_use] pub fn defer(&mut self) -> ExecutionRecord { - ExecutionRecord { - keccak_permute_events: std::mem::take(&mut self.keccak_permute_events), - secp256k1_add_events: std::mem::take(&mut self.secp256k1_add_events), - secp256k1_double_events: std::mem::take(&mut self.secp256k1_double_events), - bn254_fp_events: std::mem::take(&mut self.bn254_fp_events), - bn254_fp2_addsub_events: std::mem::take(&mut self.bn254_fp2_addsub_events), - bn254_fp2_mul_events: std::mem::take(&mut self.bn254_fp2_mul_events), - bn254_add_events: std::mem::take(&mut self.bn254_add_events), - bn254_double_events: std::mem::take(&mut self.bn254_double_events), - bls12381_add_events: std::mem::take(&mut self.bls12381_add_events), - bls12381_double_events: std::mem::take(&mut self.bls12381_double_events), - sha_extend_events: std::mem::take(&mut self.sha_extend_events), - sha_compress_events: std::mem::take(&mut self.sha_compress_events), - ed_add_events: std::mem::take(&mut self.ed_add_events), - ed_decompress_events: std::mem::take(&mut self.ed_decompress_events), - k256_decompress_events: std::mem::take(&mut self.k256_decompress_events), - uint256_mul_events: std::mem::take(&mut self.uint256_mul_events), - bls12381_fp_events: std::mem::take(&mut self.bls12381_fp_events), - bls12381_fp2_addsub_events: std::mem::take(&mut self.bls12381_fp2_addsub_events), - bls12381_fp2_mul_events: std::mem::take(&mut self.bls12381_fp2_mul_events), - bls12381_decompress_events: std::mem::take(&mut self.bls12381_decompress_events), - memory_initialize_events: std::mem::take(&mut self.memory_initialize_events), - memory_finalize_events: std::mem::take(&mut self.memory_finalize_events), - ..Default::default() - } + let mut execution_record = ExecutionRecord::new(self.program.clone()); + execution_record.precompile_events = std::mem::take(&mut self.precompile_events); + execution_record.global_memory_initialize_events = + std::mem::take(&mut self.global_memory_initialize_events); + execution_record.global_memory_finalize_events = + std::mem::take(&mut self.global_memory_finalize_events); + execution_record } /// Splits the deferred [`ExecutionRecord`] into multiple [`ExecutionRecord`]s, each which @@ -179,67 +134,47 @@ impl ExecutionRecord { pub fn split(&mut self, last: bool, opts: SplitOpts) -> Vec { let mut shards = Vec::new(); - macro_rules! split_events { - ($self:ident, $events:ident, $shards:ident, $threshold:expr, $exact:expr) => { - let events = std::mem::take(&mut $self.$events); - let chunks = events.chunks_exact($threshold); - if !$exact { - $self.$events = chunks.remainder().to_vec(); - } else { - let remainder = chunks.remainder().to_vec(); - if !remainder.is_empty() { - $shards.push(ExecutionRecord { - $events: chunks.remainder().to_vec(), - program: self.program.clone(), - ..Default::default() - }); - } - } - let mut event_shards = chunks - .map(|chunk| ExecutionRecord { - $events: chunk.to_vec(), - program: self.program.clone(), - ..Default::default() - }) - .collect::>(); - $shards.append(&mut event_shards); + let precompile_events = take(&mut self.precompile_events); + + for (syscall_code, events) in precompile_events.into_iter() { + let threshold = match syscall_code { + SyscallCode::KECCAK_PERMUTE => opts.keccak, + SyscallCode::SHA_EXTEND => opts.sha_extend, + SyscallCode::SHA_COMPRESS => opts.sha_compress, + _ => opts.deferred, }; - } - split_events!(self, keccak_permute_events, shards, opts.keccak, last); - split_events!(self, secp256k1_add_events, shards, opts.deferred, last); - split_events!(self, secp256k1_double_events, shards, opts.deferred, last); - split_events!(self, bn254_add_events, shards, opts.deferred, last); - split_events!(self, bn254_double_events, shards, opts.deferred, last); - split_events!(self, bls12381_add_events, shards, opts.deferred, last); - split_events!(self, bls12381_double_events, shards, opts.deferred, last); - split_events!(self, sha_extend_events, shards, opts.sha_extend, last); - split_events!(self, sha_compress_events, shards, opts.sha_compress, last); - split_events!(self, ed_add_events, shards, opts.deferred, last); - split_events!(self, ed_decompress_events, shards, opts.deferred, last); - split_events!(self, k256_decompress_events, shards, opts.deferred, last); - split_events!(self, uint256_mul_events, shards, opts.deferred, last); - split_events!(self, bls12381_decompress_events, shards, opts.deferred, last); - split_events!(self, bls12381_fp_events, shards, opts.deferred, last); - split_events!(self, bls12381_fp2_addsub_events, shards, opts.deferred, last); - split_events!(self, bls12381_fp2_mul_events, shards, opts.deferred, last); - split_events!(self, bn254_fp_events, shards, opts.deferred, last); - split_events!(self, bn254_fp2_addsub_events, shards, opts.deferred, last); - split_events!(self, bn254_fp2_mul_events, shards, opts.deferred, last); - // _ = last_pct; + let chunks = events.chunks_exact(threshold); + if last { + let remainder = chunks.remainder().to_vec(); + if !remainder.is_empty() { + let mut execution_record = ExecutionRecord::new(self.program.clone()); + execution_record.precompile_events.insert(syscall_code, remainder); + shards.push(execution_record); + } + } else { + self.precompile_events.insert(syscall_code, chunks.remainder().to_vec()); + } + let mut event_shards = chunks + .map(|chunk| { + let mut execution_record = ExecutionRecord::new(self.program.clone()); + execution_record.precompile_events.insert(syscall_code, chunk.to_vec()); + execution_record + }) + .collect::>(); + shards.append(&mut event_shards); + } if last { - // shards.push(last_shard); - - self.memory_initialize_events.sort_by_key(|event| event.addr); - self.memory_finalize_events.sort_by_key(|event| event.addr); + self.global_memory_initialize_events.sort_by_key(|event| event.addr); + self.global_memory_finalize_events.sort_by_key(|event| event.addr); let mut init_addr_bits = [0; 32]; let mut finalize_addr_bits = [0; 32]; for mem_chunks in self - .memory_initialize_events + .global_memory_initialize_events .chunks(opts.memory) - .zip_longest(self.memory_finalize_events.chunks(opts.memory)) + .zip_longest(self.global_memory_finalize_events.chunks(opts.memory)) { let (mem_init_chunk, mem_finalize_chunk) = match mem_chunks { EitherOrBoth::Both(mem_init_chunk, mem_finalize_chunk) => { @@ -248,9 +183,8 @@ impl ExecutionRecord { EitherOrBoth::Left(mem_init_chunk) => (mem_init_chunk, [].as_slice()), EitherOrBoth::Right(mem_finalize_chunk) => ([].as_slice(), mem_finalize_chunk), }; - let mut shard = ExecutionRecord::default(); - shard.program = self.program.clone(); - shard.memory_initialize_events.extend_from_slice(mem_init_chunk); + let mut shard = ExecutionRecord::new(self.program.clone()); + shard.global_memory_initialize_events.extend_from_slice(mem_init_chunk); shard.public_values.previous_init_addr_bits = init_addr_bits; if let Some(last_event) = mem_init_chunk.last() { let last_init_addr_bits = core::array::from_fn(|i| (last_event.addr >> i) & 1); @@ -258,7 +192,7 @@ impl ExecutionRecord { } shard.public_values.last_init_addr_bits = init_addr_bits; - shard.memory_finalize_events.extend_from_slice(mem_finalize_chunk); + shard.global_memory_finalize_events.extend_from_slice(mem_finalize_chunk); shard.public_values.previous_finalize_addr_bits = finalize_addr_bits; if let Some(last_event) = mem_finalize_chunk.last() { let last_finalize_addr_bits = @@ -273,6 +207,54 @@ impl ExecutionRecord { shards } + + /// Return the number of rows needed for a chip, according to the proof shape specified in the + /// struct. + pub fn fixed_log2_rows>(&self, air: &A) -> Option { + self.shape + .as_ref() + .map(|shape| { + shape + .inner + .get(&air.name()) + .unwrap_or_else(|| panic!("Chip {} not found in specified shape", air.name())) + }) + .copied() + } + + /// Determines whether the execution record contains CPU events. + #[must_use] + pub fn contains_cpu(&self) -> bool { + !self.cpu_events.is_empty() + } + + #[inline] + /// Add a precompile event to the execution record. + pub fn add_precompile_event( + &mut self, + syscall_code: SyscallCode, + syscall_event: SyscallEvent, + event: PrecompileEvent, + ) { + self.precompile_events.add_event(syscall_code, syscall_event, event); + } + + /// Get all the precompile events for a syscall code. + #[inline] + #[must_use] + pub fn get_precompile_events( + &self, + syscall_code: SyscallCode, + ) -> &Vec<(SyscallEvent, PrecompileEvent)> { + self.precompile_events.get_events(syscall_code).expect("Precompile events not found") + } + + /// Get all the local memory events. + #[inline] + pub fn get_local_mem_events(&self) -> impl Iterator { + let precompile_local_mem_events = self.precompile_events.get_local_mem_events(); + precompile_local_mem_events.chain(self.cpu_local_memory_access.iter()) + } } /// A memory access record. @@ -302,34 +284,20 @@ impl MachineRecord for ExecutionRecord { stats.insert("shift_right_events".to_string(), self.shift_right_events.len()); stats.insert("divrem_events".to_string(), self.divrem_events.len()); stats.insert("lt_events".to_string(), self.lt_events.len()); - stats.insert("sha_extend_events".to_string(), self.sha_extend_events.len()); - stats.insert("sha_compress_events".to_string(), self.sha_compress_events.len()); - stats.insert("keccak_permute_events".to_string(), self.keccak_permute_events.len()); - stats.insert("ed_add_events".to_string(), self.ed_add_events.len()); - stats.insert("ed_decompress_events".to_string(), self.ed_decompress_events.len()); - stats.insert("secp256k1_add_events".to_string(), self.secp256k1_add_events.len()); - stats.insert("secp256k1_double_events".to_string(), self.secp256k1_double_events.len()); - stats.insert("bn254_add_events".to_string(), self.bn254_add_events.len()); - stats.insert("bn254_double_events".to_string(), self.bn254_double_events.len()); - stats.insert("k256_decompress_events".to_string(), self.k256_decompress_events.len()); - stats.insert("bls12381_add_events".to_string(), self.bls12381_add_events.len()); - stats.insert("bls12381_double_events".to_string(), self.bls12381_double_events.len()); - stats.insert("uint256_mul_events".to_string(), self.uint256_mul_events.len()); - stats.insert("bls12381_fp_event".to_string(), self.bls12381_fp_events.len()); + + for (syscall_code, events) in self.precompile_events.iter() { + stats.insert(format!("syscall {syscall_code:?}"), events.len()); + } + stats.insert( - "bls12381_fp2_addsub_events".to_string(), - self.bls12381_fp2_addsub_events.len(), + "global_memory_initialize_events".to_string(), + self.global_memory_initialize_events.len(), ); - stats.insert("bls12381_fp2_mul_events".to_string(), self.bls12381_fp2_mul_events.len()); - stats.insert("bn254_fp_events".to_string(), self.bn254_fp_events.len()); - stats.insert("bn254_fp2_addsub_events".to_string(), self.bn254_fp2_addsub_events.len()); - stats.insert("bn254_fp2_mul_events".to_string(), self.bn254_fp2_mul_events.len()); stats.insert( - "bls12381_decompress_events".to_string(), - self.bls12381_decompress_events.len(), + "global_memory_finalize_events".to_string(), + self.global_memory_finalize_events.len(), ); - stats.insert("memory_initialize_events".to_string(), self.memory_initialize_events.len()); - stats.insert("memory_finalize_events".to_string(), self.memory_finalize_events.len()); + stats.insert("local_memory_access_events".to_string(), self.cpu_local_memory_access.len()); if !self.cpu_events.is_empty() { let shard = self.cpu_events[0].shard; stats.insert( @@ -352,28 +320,9 @@ impl MachineRecord for ExecutionRecord { self.shift_right_events.append(&mut other.shift_right_events); self.divrem_events.append(&mut other.divrem_events); self.lt_events.append(&mut other.lt_events); - self.sha_extend_events.append(&mut other.sha_extend_events); - self.sha_compress_events.append(&mut other.sha_compress_events); - self.keccak_permute_events.append(&mut other.keccak_permute_events); - self.ed_add_events.append(&mut other.ed_add_events); - self.ed_decompress_events.append(&mut other.ed_decompress_events); - self.secp256k1_add_events.append(&mut other.secp256k1_add_events); - self.secp256k1_double_events.append(&mut other.secp256k1_double_events); - self.bn254_add_events.append(&mut other.bn254_add_events); - self.bn254_double_events.append(&mut other.bn254_double_events); - self.k256_decompress_events.append(&mut other.k256_decompress_events); - self.bls12381_add_events.append(&mut other.bls12381_add_events); - self.bls12381_double_events.append(&mut other.bls12381_double_events); - self.uint256_mul_events.append(&mut other.uint256_mul_events); - self.bls12381_fp_events.append(&mut other.bls12381_fp_events); - self.bls12381_fp2_addsub_events.append(&mut other.bls12381_fp2_addsub_events); - self.bls12381_fp2_mul_events.append(&mut other.bls12381_fp2_mul_events); - self.bn254_fp_events.append(&mut other.bn254_fp_events); - self.bn254_fp2_addsub_events.append(&mut other.bn254_fp2_addsub_events); - self.bn254_fp2_mul_events.append(&mut other.bn254_fp2_mul_events); - self.bls12381_decompress_events.append(&mut other.bls12381_decompress_events); - - self.bls12381_decompress_events.append(&mut other.bls12381_decompress_events); + self.syscall_events.append(&mut other.syscall_events); + + self.precompile_events.append(&mut other.precompile_events); if self.byte_lookups.is_empty() { self.byte_lookups = std::mem::take(&mut other.byte_lookups); @@ -381,8 +330,9 @@ impl MachineRecord for ExecutionRecord { self.add_sharded_byte_lookup_events(vec![&other.byte_lookups]); } - self.memory_initialize_events.append(&mut other.memory_initialize_events); - self.memory_finalize_events.append(&mut other.memory_finalize_events); + self.global_memory_initialize_events.append(&mut other.global_memory_initialize_events); + self.global_memory_finalize_events.append(&mut other.global_memory_finalize_events); + self.cpu_local_memory_access.append(&mut other.cpu_local_memory_access); } fn register_nonces(&mut self, _opts: &Self::Config) { diff --git a/crates/core/executor/src/reduce.rs b/crates/core/executor/src/reduce.rs new file mode 100644 index 0000000000..27a1d2f76f --- /dev/null +++ b/crates/core/executor/src/reduce.rs @@ -0,0 +1,21 @@ +use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use sp1_stark::{Dom, ShardProof, StarkGenericConfig, StarkVerifyingKey}; +/// An intermediate proof which proves the execution. +#[derive(Serialize, Deserialize, Clone)] +#[serde(bound(serialize = "ShardProof: Serialize, Dom: Serialize"))] +#[serde(bound(deserialize = "ShardProof: Deserialize<'de>, Dom: DeserializeOwned"))] +pub struct SP1ReduceProof { + /// The compress verifying key associated with the proof. + pub vk: StarkVerifyingKey, + /// The shard proof representing the compressed proof. + pub proof: ShardProof, +} + +impl std::fmt::Debug for SP1ReduceProof { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut debug_struct = f.debug_struct("SP1ReduceProof"); + debug_struct.field("vk", &self.vk); + debug_struct.field("proof", &self.proof); + debug_struct.finish() + } +} diff --git a/crates/core/executor/src/report.rs b/crates/core/executor/src/report.rs index 7459d2d91b..6f15579216 100644 --- a/crates/core/executor/src/report.rs +++ b/crates/core/executor/src/report.rs @@ -11,6 +11,8 @@ use crate::{events::sorted_table_lines, syscalls::SyscallCode, Opcode}; /// An execution report. #[derive(Default, Debug, Clone, PartialEq, Eq)] pub struct ExecutionReport { + /// The event counts. + pub event_counts: Box>, /// The opcode counts. pub opcode_counts: Box>, /// The syscall counts. diff --git a/crates/core/executor/src/shape.rs b/crates/core/executor/src/shape.rs new file mode 100644 index 0000000000..cf1477fbe4 --- /dev/null +++ b/crates/core/executor/src/shape.rs @@ -0,0 +1,137 @@ +use std::sync::Arc; + +use hashbrown::{HashMap, HashSet}; +use p3_field::PrimeField; +use serde::{Deserialize, Serialize}; +use sp1_stark::{air::MachineAir, ProofShape}; + +use crate::{ExecutionRecord, Program}; + +/// The shape of a core proof. +#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq)] +pub struct CoreShape { + /// The shape of the proof. + /// + /// Keys are the chip names and values are the log-heights of the chips. + pub inner: HashMap, +} + +impl CoreShape { + /// Create a dummy program with this shape. + /// + /// This can be used to generate a dummy preprocessed traces. + #[must_use] + pub fn dummy_program(&self) -> Program { + let mut program = Program::new(vec![], 1 << 5, 1 << 5); + program.preprocessed_shape = Some(self.clone()); + program + } + + /// Create a dummy execution record with this shape. + /// + /// This can be used to generate dummy traces. + #[must_use] + pub fn dummy_record(&self) -> ExecutionRecord { + let program = Arc::new(self.dummy_program()); + let mut record = ExecutionRecord::new(program); + record.shape = Some(self.clone()); + record + } + + /// Determines whether the execution record contains a trace for a given chip. + pub fn included>(&self, air: &A) -> bool { + self.inner.contains_key(&air.name()) + } +} + +impl Extend for CoreShape { + fn extend>(&mut self, iter: T) { + for shape in iter { + self.inner.extend(shape.inner); + } + } +} + +impl Extend<(String, usize)> for CoreShape { + fn extend>(&mut self, iter: T) { + self.inner.extend(iter); + } +} + +impl IntoIterator for CoreShape { + type Item = (String, usize); + + type IntoIter = hashbrown::hash_map::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.inner.into_iter() + } +} + +impl FromIterator<(String, usize)> for CoreShape { + fn from_iter>(iter: T) -> Self { + Self { inner: iter.into_iter().collect() } + } +} + +impl From for CoreShape { + fn from(value: ProofShape) -> Self { + Self { inner: value.into_iter().collect() } + } +} + +impl From for ProofShape { + fn from(value: CoreShape) -> Self { + value.inner.into_iter().collect() + } +} + +impl PartialOrd for CoreShape { + fn partial_cmp(&self, other: &Self) -> Option { + let set = self.inner.keys().collect::>(); + let other_set = other.inner.keys().collect::>(); + + if set.is_subset(&other_set) { + let mut less_seen = false; + let mut greater_seen = false; + for (name, &height) in self.inner.iter() { + let other_height = other.inner[name]; + match height.cmp(&other_height) { + std::cmp::Ordering::Less => less_seen = true, + std::cmp::Ordering::Greater => greater_seen = true, + std::cmp::Ordering::Equal => {} + } + } + if less_seen && greater_seen { + return None; + } + + if less_seen { + return Some(std::cmp::Ordering::Less); + } + } + + if other_set.is_subset(&set) { + let mut less_seen = false; + let mut greater_seen = false; + for (name, &height) in other.inner.iter() { + let other_height = self.inner[name]; + match height.cmp(&other_height) { + std::cmp::Ordering::Less => less_seen = true, + std::cmp::Ordering::Greater => greater_seen = true, + std::cmp::Ordering::Equal => {} + } + } + + if less_seen && greater_seen { + return None; + } + + if greater_seen { + return Some(std::cmp::Ordering::Greater); + } + } + + None + } +} diff --git a/crates/core/executor/src/state.rs b/crates/core/executor/src/state.rs index c749db9c76..d4258af3a7 100644 --- a/crates/core/executor/src/state.rs +++ b/crates/core/executor/src/state.rs @@ -5,40 +5,37 @@ use std::{ use hashbrown::HashMap; use serde::{Deserialize, Serialize}; -use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, ShardProof, StarkVerifyingKey}; +use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, StarkVerifyingKey}; use crate::{ events::MemoryRecord, memory::PagedMemory, record::{ExecutionRecord, MemoryAccessRecord}, syscalls::SyscallCode, - ExecutorMode, + ExecutorMode, SP1ReduceProof, }; /// Holds data describing the current state of a program's execution. #[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[repr(C)] pub struct ExecutionState { - /// The global clock keeps track of how many instrutions have been executed through all shards. - pub global_clk: u64, + /// The program counter. + pub pc: u32, /// The shard clock keeps track of how many shards have been executed. pub current_shard: u32, - /// The clock increments by 4 (possibly more in syscalls) for each instruction that has been - /// executed in this shard. - pub clk: u32, - - /// The channel alternates between 0 and [`crate::bytes::NUM_BYTE_LOOKUP_CHANNELS`], - /// used to controll byte lookup multiplicity. - pub channel: u8, - - /// The program counter. - pub pc: u32, - /// The memory which instructions operate over. Values contain the memory value and last shard /// + timestamp that each memory address was accessed. pub memory: PagedMemory, + /// The global clock keeps track of how many instrutions have been executed through all shards. + pub global_clk: u64, + + /// The clock increments by 4 (possibly more in syscalls) for each instruction that has been + /// executed in this shard. + pub clk: u32, + /// Uninitialized memory addresses that have a specific value they should be initialized with. /// `SyscallHintRead` uses this to write hint data into uninitialized memory. pub uninitialized_memory: PagedMemory, @@ -49,8 +46,9 @@ pub struct ExecutionState { /// A ptr to the current position in the input stream incremented by `HINT_READ` opcode. pub input_stream_ptr: usize, - /// A stream of proofs inputted to the program. - pub proof_stream: Vec<(ShardProof, StarkVerifyingKey)>, + /// A stream of proofs (reduce vk, proof, verifying key) inputted to the program. + pub proof_stream: + Vec<(SP1ReduceProof, StarkVerifyingKey)>, /// A ptr to the current position in the proof stream, incremented after verifying a proof. pub proof_stream_ptr: usize, @@ -75,7 +73,6 @@ impl ExecutionState { // Start at shard 1 since shard 0 is reserved for memory initialization. current_shard: 1, clk: 0, - channel: 0, pc: pc_start, memory: PagedMemory::new_preallocated(), uninitialized_memory: PagedMemory::default(), diff --git a/crates/core/executor/src/subproof.rs b/crates/core/executor/src/subproof.rs index 09d7c57a77..e821fd9959 100644 --- a/crates/core/executor/src/subproof.rs +++ b/crates/core/executor/src/subproof.rs @@ -1,10 +1,11 @@ //! Types and methods for subproof verification inside the [`crate::Executor`]. -use std::sync::atomic::AtomicBool; - use sp1_stark::{ - baby_bear_poseidon2::BabyBearPoseidon2, MachineVerificationError, ShardProof, StarkVerifyingKey, + baby_bear_poseidon2::BabyBearPoseidon2, MachineVerificationError, StarkVerifyingKey, }; +use std::sync::atomic::AtomicBool; + +use crate::SP1ReduceProof; /// Verifier used in runtime when `sp1_zkvm::precompiles::verify::verify_sp1_proof` is called. This /// is then used to sanity check that the user passed in the correct proof; the actual constraints @@ -16,7 +17,7 @@ pub trait SubproofVerifier: Sync + Send { /// Verify a deferred proof. fn verify_deferred_proof( &self, - proof: &ShardProof, + proof: &SP1ReduceProof, vk: &StarkVerifyingKey, vk_hash: [u32; 8], committed_value_digest: [u32; 8], @@ -40,7 +41,7 @@ impl DefaultSubproofVerifier { impl SubproofVerifier for DefaultSubproofVerifier { fn verify_deferred_proof( &self, - _proof: &ShardProof, + _proof: &SP1ReduceProof, _vk: &StarkVerifyingKey, _vk_hash: [u32; 8], _committed_value_digest: [u32; 8], @@ -59,7 +60,7 @@ pub struct NoOpSubproofVerifier; impl SubproofVerifier for NoOpSubproofVerifier { fn verify_deferred_proof( &self, - _proof: &ShardProof, + _proof: &SP1ReduceProof, _vk: &StarkVerifyingKey, _vk_hash: [u32; 8], _committed_value_digest: [u32; 8], diff --git a/crates/core/executor/src/syscalls/commit.rs b/crates/core/executor/src/syscalls/commit.rs index 89c462a3d4..2772fb6208 100644 --- a/crates/core/executor/src/syscalls/commit.rs +++ b/crates/core/executor/src/syscalls/commit.rs @@ -1,4 +1,4 @@ -use super::{Syscall, SyscallContext}; +use super::{Syscall, SyscallCode, SyscallContext}; pub(crate) struct CommitSyscall; @@ -7,6 +7,7 @@ impl Syscall for CommitSyscall { fn execute( &self, ctx: &mut SyscallContext, + _: SyscallCode, word_idx: u32, public_values_digest_word: u32, ) -> Option { diff --git a/crates/core/executor/src/syscalls/context.rs b/crates/core/executor/src/syscalls/context.rs index 60c93e478b..9db49c5cae 100644 --- a/crates/core/executor/src/syscalls/context.rs +++ b/crates/core/executor/src/syscalls/context.rs @@ -1,5 +1,7 @@ +use hashbrown::HashMap; + use crate::{ - events::{LookupId, MemoryReadRecord, MemoryWriteRecord}, + events::{LookupId, MemoryLocalEvent, MemoryReadRecord, MemoryWriteRecord}, record::ExecutionRecord, Executor, Register, }; @@ -20,6 +22,8 @@ pub struct SyscallContext<'a, 'b: 'a> { pub rt: &'a mut Executor<'b>, /// The syscall lookup id. pub syscall_lookup_id: LookupId, + /// The local memory access events for the syscall. + pub local_memory_access: HashMap, } impl<'a, 'b> SyscallContext<'a, 'b> { @@ -34,6 +38,7 @@ impl<'a, 'b> SyscallContext<'a, 'b> { exit_code: 0, rt: runtime, syscall_lookup_id: LookupId::default(), + local_memory_access: HashMap::new(), } } @@ -48,15 +53,10 @@ impl<'a, 'b> SyscallContext<'a, 'b> { self.rt.state.current_shard } - /// Get the current channel. - #[must_use] - pub fn current_channel(&self) -> u8 { - self.rt.state.channel - } - /// Read a word from memory. pub fn mr(&mut self, addr: u32) -> (MemoryReadRecord, u32) { - let record = self.rt.mr(addr, self.current_shard, self.clk); + let record = + self.rt.mr(addr, self.current_shard, self.clk, Some(&mut self.local_memory_access)); (record, record.value) } @@ -74,7 +74,7 @@ impl<'a, 'b> SyscallContext<'a, 'b> { /// Write a word to memory. pub fn mw(&mut self, addr: u32, value: u32) -> MemoryWriteRecord { - self.rt.mw(addr, value, self.current_shard, self.clk) + self.rt.mw(addr, value, self.current_shard, self.clk, Some(&mut self.local_memory_access)) } /// Write a slice of words to memory. @@ -87,6 +87,28 @@ impl<'a, 'b> SyscallContext<'a, 'b> { records } + /// Postprocess the syscall. Specifically will process the syscall's memory local events. + pub fn postprocess(&mut self) -> Vec { + let mut syscall_local_mem_events = Vec::new(); + + if !self.rt.unconstrained { + // Will need to transfer the existing memory local events in the executor to it's record, + // and return all the syscall memory local events. This is similar to what + // `bump_record` does. + for (addr, event) in self.local_memory_access.drain() { + let local_mem_access = self.rt.local_memory_access.remove(&addr); + + if let Some(local_mem_access) = local_mem_access { + self.rt.record.cpu_local_memory_access.push(local_mem_access); + } + + syscall_local_mem_events.push(event); + } + } + + syscall_local_mem_events + } + /// Get the current value of a register, but doesn't use a memory record. /// This is generally unconstrained, so you must be careful using it. #[must_use] diff --git a/crates/core/executor/src/syscalls/deferred.rs b/crates/core/executor/src/syscalls/deferred.rs index d8f5167f58..a1045f93d5 100644 --- a/crates/core/executor/src/syscalls/deferred.rs +++ b/crates/core/executor/src/syscalls/deferred.rs @@ -1,10 +1,16 @@ -use super::{Syscall, SyscallContext}; +use super::{Syscall, SyscallCode, SyscallContext}; pub(crate) struct CommitDeferredSyscall; impl Syscall for CommitDeferredSyscall { #[allow(clippy::mut_mut)] - fn execute(&self, ctx: &mut SyscallContext, word_idx: u32, word: u32) -> Option { + fn execute( + &self, + ctx: &mut SyscallContext, + _: SyscallCode, + word_idx: u32, + word: u32, + ) -> Option { let rt = &mut ctx.rt; rt.record.public_values.deferred_proofs_digest[word_idx as usize] = word; diff --git a/crates/core/executor/src/syscalls/halt.rs b/crates/core/executor/src/syscalls/halt.rs index 5488be00e3..e09690caad 100644 --- a/crates/core/executor/src/syscalls/halt.rs +++ b/crates/core/executor/src/syscalls/halt.rs @@ -1,9 +1,15 @@ -use super::{context::SyscallContext, Syscall}; +use super::{context::SyscallContext, Syscall, SyscallCode}; pub(crate) struct HaltSyscall; impl Syscall for HaltSyscall { - fn execute(&self, ctx: &mut SyscallContext, exit_code: u32, _: u32) -> Option { + fn execute( + &self, + ctx: &mut SyscallContext, + _: SyscallCode, + exit_code: u32, + _: u32, + ) -> Option { ctx.set_next_pc(0); ctx.set_exit_code(exit_code); None diff --git a/crates/core/executor/src/syscalls/hint.rs b/crates/core/executor/src/syscalls/hint.rs index 8f0b7f0b92..a740250fe9 100644 --- a/crates/core/executor/src/syscalls/hint.rs +++ b/crates/core/executor/src/syscalls/hint.rs @@ -1,9 +1,15 @@ -use super::{Syscall, SyscallContext}; +use super::{Syscall, SyscallCode, SyscallContext}; pub(crate) struct HintLenSyscall; impl Syscall for HintLenSyscall { - fn execute(&self, ctx: &mut SyscallContext, _arg1: u32, _arg2: u32) -> Option { + fn execute( + &self, + ctx: &mut SyscallContext, + _: SyscallCode, + _arg1: u32, + _arg2: u32, + ) -> Option { if ctx.rt.state.input_stream_ptr >= ctx.rt.state.input_stream.len() { panic!( "failed reading stdin due to insufficient input data: input_stream_ptr={}, input_stream_len={}", @@ -18,7 +24,7 @@ impl Syscall for HintLenSyscall { pub(crate) struct HintReadSyscall; impl Syscall for HintReadSyscall { - fn execute(&self, ctx: &mut SyscallContext, ptr: u32, len: u32) -> Option { + fn execute(&self, ctx: &mut SyscallContext, _: SyscallCode, ptr: u32, len: u32) -> Option { if ctx.rt.state.input_stream_ptr >= ctx.rt.state.input_stream.len() { panic!( "failed reading stdin due to insufficient input data: input_stream_ptr={}, input_stream_len={}", @@ -44,6 +50,7 @@ impl Syscall for HintReadSyscall { // Save the data into runtime state so the runtime will use the desired data instead of // 0 when first reading/writing from this address. + ctx.rt.uninitialized_memory_checkpoint.entry(ptr + i).or_insert_with(|| false); ctx.rt .state .uninitialized_memory diff --git a/crates/core/executor/src/syscalls/mod.rs b/crates/core/executor/src/syscalls/mod.rs index e4286bdaab..6633633102 100644 --- a/crates/core/executor/src/syscalls/mod.rs +++ b/crates/core/executor/src/syscalls/mod.rs @@ -57,7 +57,13 @@ pub trait Syscall: Send + Sync { /// X10 and X11, respectively. While not a hard requirement, the convention is that the return /// value is only for system calls such as `HALT`. Most precompiles use `arg1` and `arg2` to /// denote the addresses of the input data, and write the result to the memory at `arg1`. - fn execute(&self, ctx: &mut SyscallContext, arg1: u32, arg2: u32) -> Option; + fn execute( + &self, + ctx: &mut SyscallContext, + syscall_code: SyscallCode, + arg1: u32, + arg2: u32, + ) -> Option; /// The number of extra cycles that the syscall takes to execute. /// @@ -69,6 +75,7 @@ pub trait Syscall: Send + Sync { /// Creates the default syscall map. #[must_use] +#[allow(clippy::too_many_lines)] pub fn default_syscall_map() -> HashMap> { let mut syscall_map = HashMap::>::default(); diff --git a/crates/core/executor/src/syscalls/precompiles/edwards/add.rs b/crates/core/executor/src/syscalls/precompiles/edwards/add.rs index 3074392b64..935c580cc8 100644 --- a/crates/core/executor/src/syscalls/precompiles/edwards/add.rs +++ b/crates/core/executor/src/syscalls/precompiles/edwards/add.rs @@ -3,8 +3,8 @@ use std::marker::PhantomData; use sp1_curves::{edwards::EdwardsParameters, EllipticCurve}; use crate::{ - events::create_ec_add_event, - syscalls::{Syscall, SyscallContext}, + events::{create_ec_add_event, PrecompileEvent}, + syscalls::{Syscall, SyscallCode, SyscallContext}, }; pub(crate) struct EdwardsAddAssignSyscall { @@ -23,9 +23,21 @@ impl Syscall for EdwardsAddAssignSyscall Option { + fn execute( + &self, + rt: &mut SyscallContext, + syscall_code: SyscallCode, + arg1: u32, + arg2: u32, + ) -> Option { let event = create_ec_add_event::(rt, arg1, arg2); - rt.record_mut().ed_add_events.push(event); + let syscall_event = + rt.rt.syscall_event(event.clk, syscall_code.syscall_id(), arg1, arg2, event.lookup_id); + rt.record_mut().add_precompile_event( + syscall_code, + syscall_event, + PrecompileEvent::EdAdd(event), + ); None } } diff --git a/crates/core/executor/src/syscalls/precompiles/edwards/decompress.rs b/crates/core/executor/src/syscalls/precompiles/edwards/decompress.rs index e4d90a87b0..287101fbfd 100644 --- a/crates/core/executor/src/syscalls/precompiles/edwards/decompress.rs +++ b/crates/core/executor/src/syscalls/precompiles/edwards/decompress.rs @@ -8,8 +8,8 @@ use sp1_curves::{ use sp1_primitives::consts::{bytes_to_words_le, words_to_bytes_le}; use crate::{ - events::{EdDecompressEvent, MemoryReadRecord, MemoryWriteRecord}, - syscalls::{Syscall, SyscallContext}, + events::{EdDecompressEvent, MemoryReadRecord, MemoryWriteRecord, PrecompileEvent}, + syscalls::{Syscall, SyscallCode, SyscallContext}, }; pub(crate) struct EdwardsDecompressSyscall { @@ -24,7 +24,13 @@ impl EdwardsDecompressSyscall { } impl Syscall for EdwardsDecompressSyscall { - fn execute(&self, rt: &mut SyscallContext, arg1: u32, sign: u32) -> Option { + fn execute( + &self, + rt: &mut SyscallContext, + syscall_code: SyscallCode, + arg1: u32, + sign: u32, + ) -> Option { let start_clk = rt.clk; let slice_ptr = arg1; assert!(slice_ptr % 4 == 0, "Pointer must be 4-byte aligned."); @@ -60,11 +66,9 @@ impl Syscall for EdwardsDecompressSyscall { let lookup_id = rt.syscall_lookup_id; let shard = rt.current_shard(); - let channel = rt.current_channel(); - rt.record_mut().ed_decompress_events.push(EdDecompressEvent { + let event = EdDecompressEvent { lookup_id, shard, - channel, clk: start_clk, ptr: slice_ptr, sign: sign_bool, @@ -72,7 +76,15 @@ impl Syscall for EdwardsDecompressSyscall { decompressed_x_bytes: decompressed_x_bytes.try_into().unwrap(), x_memory_records, y_memory_records, - }); + local_mem_access: rt.postprocess(), + }; + let syscall_event = + rt.rt.syscall_event(start_clk, syscall_code.syscall_id(), arg1, sign, event.lookup_id); + rt.record_mut().add_precompile_event( + syscall_code, + syscall_event, + PrecompileEvent::EdDecompress(event), + ); None } diff --git a/crates/core/executor/src/syscalls/precompiles/fptower/fp.rs b/crates/core/executor/src/syscalls/precompiles/fptower/fp.rs index 703efa87a5..3ce04230ec 100644 --- a/crates/core/executor/src/syscalls/precompiles/fptower/fp.rs +++ b/crates/core/executor/src/syscalls/precompiles/fptower/fp.rs @@ -7,8 +7,8 @@ use std::marker::PhantomData; use typenum::Unsigned; use crate::{ - events::{FieldOperation, FpOpEvent}, - syscalls::{Syscall, SyscallContext}, + events::{FieldOperation, FpOpEvent, PrecompileEvent}, + syscalls::{Syscall, SyscallCode, SyscallContext}, }; pub struct FpOpSyscall

{ @@ -23,7 +23,13 @@ impl

FpOpSyscall

{ } impl Syscall for FpOpSyscall

{ - fn execute(&self, rt: &mut SyscallContext, arg1: u32, arg2: u32) -> Option { + fn execute( + &self, + rt: &mut SyscallContext, + syscall_code: SyscallCode, + arg1: u32, + arg2: u32, + ) -> Option { let clk = rt.clk; let x_ptr = arg1; if x_ptr % 4 != 0 { @@ -57,37 +63,67 @@ impl Syscall for FpOpSyscall

{ let lookup_id = rt.syscall_lookup_id; let shard = rt.current_shard(); - let channel = rt.current_channel(); + let event = FpOpEvent { + lookup_id, + shard, + clk, + x_ptr, + x, + y_ptr, + y, + op: self.op, + x_memory_records, + y_memory_records, + local_mem_access: rt.postprocess(), + }; + + // Since all the Fp events are on the same table, we need to preserve the ordering of the + // events b/c of the nonce. In this table's trace_gen, the nonce is simply the row number. + // Group all of the events for a specific curve into the same syscall code key. + // TODO: FIX THIS. + match P::FIELD_TYPE { FieldType::Bn254 => { - rt.record_mut().bn254_fp_events.push(FpOpEvent { - lookup_id, - shard, - channel, + let syscall_code_key = match syscall_code { + SyscallCode::BN254_FP_ADD + | SyscallCode::BN254_FP_SUB + | SyscallCode::BN254_FP_MUL => SyscallCode::BN254_FP_ADD, + _ => unreachable!(), + }; + + let syscall_event = rt.rt.syscall_event( clk, - x_ptr, - x, - y_ptr, - y, - op: self.op, - x_memory_records, - y_memory_records, - }); + syscall_code.syscall_id(), + arg1, + arg2, + event.lookup_id, + ); + rt.record_mut().add_precompile_event( + syscall_code_key, + syscall_event, + PrecompileEvent::Bn254Fp(event), + ); } FieldType::Bls12381 => { - rt.record_mut().bls12381_fp_events.push(FpOpEvent { - lookup_id, - shard, - channel, + let syscall_code_key = match syscall_code { + SyscallCode::BLS12381_FP_ADD + | SyscallCode::BLS12381_FP_SUB + | SyscallCode::BLS12381_FP_MUL => SyscallCode::BLS12381_FP_ADD, + _ => unreachable!(), + }; + + let syscall_event = rt.rt.syscall_event( clk, - x_ptr, - x, - y_ptr, - y, - op: self.op, - x_memory_records, - y_memory_records, - }); + syscall_code.syscall_id(), + arg1, + arg2, + event.lookup_id, + ); + rt.record_mut().add_precompile_event( + syscall_code_key, + syscall_event, + PrecompileEvent::Bls12381Fp(event), + ); } } diff --git a/crates/core/executor/src/syscalls/precompiles/fptower/fp2_addsub.rs b/crates/core/executor/src/syscalls/precompiles/fptower/fp2_addsub.rs index 548566d61c..e2563a6bce 100644 --- a/crates/core/executor/src/syscalls/precompiles/fptower/fp2_addsub.rs +++ b/crates/core/executor/src/syscalls/precompiles/fptower/fp2_addsub.rs @@ -7,8 +7,8 @@ use std::marker::PhantomData; use typenum::Unsigned; use crate::{ - events::{FieldOperation, Fp2AddSubEvent}, - syscalls::{Syscall, SyscallContext}, + events::{FieldOperation, Fp2AddSubEvent, PrecompileEvent}, + syscalls::{Syscall, SyscallCode, SyscallContext}, }; pub struct Fp2AddSubSyscall

{ @@ -23,7 +23,13 @@ impl

Fp2AddSubSyscall

{ } impl Syscall for Fp2AddSubSyscall

{ - fn execute(&self, rt: &mut SyscallContext, arg1: u32, arg2: u32) -> Option { + fn execute( + &self, + rt: &mut SyscallContext, + syscall_code: SyscallCode, + arg1: u32, + arg2: u32, + ) -> Option { let clk = rt.clk; let x_ptr = arg1; if x_ptr % 4 != 0 { @@ -65,38 +71,65 @@ impl Syscall for Fp2AddSubSyscall

{ let lookup_id = rt.syscall_lookup_id; let shard = rt.current_shard(); - let channel = rt.current_channel(); let op = self.op; + let event = Fp2AddSubEvent { + lookup_id, + shard, + clk, + op, + x_ptr, + x, + y_ptr, + y, + x_memory_records, + y_memory_records, + local_mem_access: rt.postprocess(), + }; match P::FIELD_TYPE { + // All the fp2 add and sub events for a given curve are coalesced to the curve's fp2 add operation. Only check for + // that operation. + // TODO: Fix this. FieldType::Bn254 => { - rt.record_mut().bn254_fp2_addsub_events.push(Fp2AddSubEvent { - lookup_id, - shard, - channel, + let syscall_code_key = match syscall_code { + SyscallCode::BN254_FP2_ADD | SyscallCode::BN254_FP2_SUB => { + SyscallCode::BN254_FP2_ADD + } + _ => unreachable!(), + }; + + let syscall_event = rt.rt.syscall_event( clk, - op, - x_ptr, - x, - y_ptr, - y, - x_memory_records, - y_memory_records, - }); + syscall_code.syscall_id(), + arg1, + arg2, + event.lookup_id, + ); + rt.record_mut().add_precompile_event( + syscall_code_key, + syscall_event, + PrecompileEvent::Bn254Fp2AddSub(event), + ); } FieldType::Bls12381 => { - rt.record_mut().bls12381_fp2_addsub_events.push(Fp2AddSubEvent { - lookup_id, - shard, - channel, + let syscall_code_key = match syscall_code { + SyscallCode::BLS12381_FP2_ADD | SyscallCode::BLS12381_FP2_SUB => { + SyscallCode::BLS12381_FP2_ADD + } + _ => unreachable!(), + }; + + let syscall_event = rt.rt.syscall_event( clk, - op, - x_ptr, - x, - y_ptr, - y, - x_memory_records, - y_memory_records, - }); + syscall_code.syscall_id(), + arg1, + arg2, + event.lookup_id, + ); + rt.record_mut().add_precompile_event( + syscall_code_key, + syscall_event, + PrecompileEvent::Bls12381Fp2AddSub(event), + ); } } None diff --git a/crates/core/executor/src/syscalls/precompiles/fptower/fp2_mul.rs b/crates/core/executor/src/syscalls/precompiles/fptower/fp2_mul.rs index 652bba0037..46598ef2b2 100644 --- a/crates/core/executor/src/syscalls/precompiles/fptower/fp2_mul.rs +++ b/crates/core/executor/src/syscalls/precompiles/fptower/fp2_mul.rs @@ -8,8 +8,8 @@ use sp1_curves::{ use typenum::Unsigned; use crate::{ - events::Fp2MulEvent, - syscalls::{Syscall, SyscallContext}, + events::{Fp2MulEvent, PrecompileEvent}, + syscalls::{Syscall, SyscallCode, SyscallContext}, }; pub struct Fp2MulSyscall

{ @@ -23,7 +23,13 @@ impl

Fp2MulSyscall

{ } impl Syscall for Fp2MulSyscall

{ - fn execute(&self, rt: &mut SyscallContext, arg1: u32, arg2: u32) -> Option { + fn execute( + &self, + rt: &mut SyscallContext, + syscall_code: SyscallCode, + arg1: u32, + arg2: u32, + ) -> Option { let clk = rt.clk; let x_ptr = arg1; if x_ptr % 4 != 0 { @@ -64,32 +70,31 @@ impl Syscall for Fp2MulSyscall

{ let lookup_id = rt.syscall_lookup_id; let shard = rt.current_shard(); - let channel = rt.current_channel(); + let event = Fp2MulEvent { + lookup_id, + shard, + clk, + x_ptr, + x, + y_ptr, + y, + x_memory_records, + y_memory_records, + local_mem_access: rt.postprocess(), + }; + let syscall_event = + rt.rt.syscall_event(clk, syscall_code.syscall_id(), arg1, arg2, event.lookup_id); match P::FIELD_TYPE { - FieldType::Bn254 => rt.record_mut().bn254_fp2_mul_events.push(Fp2MulEvent { - lookup_id, - shard, - channel, - clk, - x_ptr, - x, - y_ptr, - y, - x_memory_records, - y_memory_records, - }), - FieldType::Bls12381 => rt.record_mut().bls12381_fp2_mul_events.push(Fp2MulEvent { - lookup_id, - shard, - channel, - clk, - x_ptr, - x, - y_ptr, - y, - x_memory_records, - y_memory_records, - }), + FieldType::Bn254 => rt.record_mut().add_precompile_event( + syscall_code, + syscall_event, + PrecompileEvent::Bn254Fp2Mul(event), + ), + FieldType::Bls12381 => rt.record_mut().add_precompile_event( + syscall_code, + syscall_event, + PrecompileEvent::Bls12381Fp2Mul(event), + ), }; None } diff --git a/crates/core/executor/src/syscalls/precompiles/keccak256/permute.rs b/crates/core/executor/src/syscalls/precompiles/keccak256/permute.rs index 70c64950dd..f33019f8b2 100644 --- a/crates/core/executor/src/syscalls/precompiles/keccak256/permute.rs +++ b/crates/core/executor/src/syscalls/precompiles/keccak256/permute.rs @@ -1,6 +1,6 @@ use crate::{ - events::KeccakPermuteEvent, - syscalls::{Syscall, SyscallContext}, + events::{KeccakPermuteEvent, PrecompileEvent}, + syscalls::{Syscall, SyscallCode, SyscallContext}, }; use tiny_keccak::keccakf; @@ -17,7 +17,13 @@ impl Syscall for Keccak256PermuteSyscall { 1 } - fn execute(&self, rt: &mut SyscallContext, arg1: u32, arg2: u32) -> Option { + fn execute( + &self, + rt: &mut SyscallContext, + syscall_code: SyscallCode, + arg1: u32, + arg2: u32, + ) -> Option { let start_clk = rt.clk; let state_ptr = arg1; if arg2 != 0 { @@ -58,19 +64,21 @@ impl Syscall for Keccak256PermuteSyscall { // Push the Keccak permute event. let shard = rt.current_shard(); - let channel = rt.current_channel(); let lookup_id = rt.syscall_lookup_id; - rt.record_mut().keccak_permute_events.push(KeccakPermuteEvent { + let event = PrecompileEvent::KeccakPermute(KeccakPermuteEvent { lookup_id, shard, - channel, clk: start_clk, pre_state: saved_state.as_slice().try_into().unwrap(), post_state: state.as_slice().try_into().unwrap(), state_read_records, state_write_records, state_addr: state_ptr, + local_mem_access: rt.postprocess(), }); + let syscall_event = + rt.rt.syscall_event(start_clk, syscall_code.syscall_id(), arg1, arg2, lookup_id); + rt.record_mut().add_precompile_event(syscall_code, syscall_event, event); None } diff --git a/crates/core/executor/src/syscalls/precompiles/sha256/compress.rs b/crates/core/executor/src/syscalls/precompiles/sha256/compress.rs index 1c8317cfc9..8d87fe7376 100644 --- a/crates/core/executor/src/syscalls/precompiles/sha256/compress.rs +++ b/crates/core/executor/src/syscalls/precompiles/sha256/compress.rs @@ -1,6 +1,6 @@ use crate::{ - events::ShaCompressEvent, - syscalls::{Syscall, SyscallContext}, + events::{PrecompileEvent, ShaCompressEvent}, + syscalls::{Syscall, SyscallCode, SyscallContext}, }; pub const SHA_COMPRESS_K: [u32; 64] = [ @@ -21,8 +21,15 @@ impl Syscall for Sha256CompressSyscall { 1 } + #[allow(clippy::too_many_lines)] #[allow(clippy::many_single_char_names)] - fn execute(&self, rt: &mut SyscallContext, arg1: u32, arg2: u32) -> Option { + fn execute( + &self, + rt: &mut SyscallContext, + syscall_code: SyscallCode, + arg1: u32, + arg2: u32, + ) -> Option { let w_ptr = arg1; let h_ptr = arg2; assert_ne!(w_ptr, h_ptr); @@ -88,11 +95,9 @@ impl Syscall for Sha256CompressSyscall { // Push the SHA extend event. let lookup_id = rt.syscall_lookup_id; let shard = rt.current_shard(); - let channel = rt.current_channel(); - rt.record_mut().sha_compress_events.push(ShaCompressEvent { + let event = PrecompileEvent::ShaCompress(ShaCompressEvent { lookup_id, shard, - channel, clk: start_clk, w_ptr, h_ptr, @@ -101,7 +106,11 @@ impl Syscall for Sha256CompressSyscall { h_read_records: h_read_records.try_into().unwrap(), w_i_read_records, h_write_records: h_write_records.try_into().unwrap(), + local_mem_access: rt.postprocess(), }); + let syscall_event = + rt.rt.syscall_event(start_clk, syscall_code.syscall_id(), arg1, arg2, lookup_id); + rt.record_mut().add_precompile_event(syscall_code, syscall_event, event); None } diff --git a/crates/core/executor/src/syscalls/precompiles/sha256/extend.rs b/crates/core/executor/src/syscalls/precompiles/sha256/extend.rs index 842fc678b8..36f1b56542 100644 --- a/crates/core/executor/src/syscalls/precompiles/sha256/extend.rs +++ b/crates/core/executor/src/syscalls/precompiles/sha256/extend.rs @@ -1,6 +1,6 @@ use crate::{ - events::ShaExtendEvent, - syscalls::{Syscall, SyscallContext}, + events::{PrecompileEvent, ShaExtendEvent}, + syscalls::{Syscall, SyscallCode, SyscallContext}, }; pub(crate) struct Sha256ExtendSyscall; @@ -10,7 +10,13 @@ impl Syscall for Sha256ExtendSyscall { 48 } - fn execute(&self, rt: &mut SyscallContext, arg1: u32, arg2: u32) -> Option { + fn execute( + &self, + rt: &mut SyscallContext, + syscall_code: SyscallCode, + arg1: u32, + arg2: u32, + ) -> Option { let clk_init = rt.clk; let w_ptr = arg1; assert!(arg2 == 0, "arg2 must be 0"); @@ -57,11 +63,9 @@ impl Syscall for Sha256ExtendSyscall { // Push the SHA extend event. let lookup_id = rt.syscall_lookup_id; let shard = rt.current_shard(); - let channel = rt.current_channel(); - rt.record_mut().sha_extend_events.push(ShaExtendEvent { + let event = PrecompileEvent::ShaExtend(ShaExtendEvent { lookup_id, shard, - channel, clk: clk_init, w_ptr: w_ptr_init, w_i_minus_15_reads, @@ -69,7 +73,11 @@ impl Syscall for Sha256ExtendSyscall { w_i_minus_16_reads, w_i_minus_7_reads, w_i_writes, + local_mem_access: rt.postprocess(), }); + let syscall_event = + rt.rt.syscall_event(clk_init, syscall_code.syscall_id(), arg1, arg2, lookup_id); + rt.record_mut().add_precompile_event(syscall_code, syscall_event, event); None } diff --git a/crates/core/executor/src/syscalls/precompiles/uint256.rs b/crates/core/executor/src/syscalls/precompiles/uint256.rs index 4592f999fc..a8d5c54fdc 100644 --- a/crates/core/executor/src/syscalls/precompiles/uint256.rs +++ b/crates/core/executor/src/syscalls/precompiles/uint256.rs @@ -4,14 +4,20 @@ use sp1_curves::edwards::WORDS_FIELD_ELEMENT; use sp1_primitives::consts::{bytes_to_words_le, words_to_bytes_le_vec, WORD_SIZE}; use crate::{ - events::Uint256MulEvent, - syscalls::{Syscall, SyscallContext}, + events::{PrecompileEvent, Uint256MulEvent}, + syscalls::{Syscall, SyscallCode, SyscallContext}, }; pub(crate) struct Uint256MulSyscall; impl Syscall for Uint256MulSyscall { - fn execute(&self, rt: &mut SyscallContext, arg1: u32, arg2: u32) -> Option { + fn execute( + &self, + rt: &mut SyscallContext, + syscall_code: SyscallCode, + arg1: u32, + arg2: u32, + ) -> Option { let clk = rt.clk; let x_ptr = arg1; @@ -60,11 +66,9 @@ impl Syscall for Uint256MulSyscall { let lookup_id = rt.syscall_lookup_id; let shard = rt.current_shard(); - let channel = rt.current_channel(); - rt.record_mut().uint256_mul_events.push(Uint256MulEvent { + let event = PrecompileEvent::Uint256Mul(Uint256MulEvent { lookup_id, shard, - channel, clk, x_ptr, x, @@ -74,7 +78,11 @@ impl Syscall for Uint256MulSyscall { x_memory_records, y_memory_records, modulus_memory_records, + local_mem_access: rt.postprocess(), }); + let sycall_event = + rt.rt.syscall_event(clk, syscall_code.syscall_id(), arg1, arg2, lookup_id); + rt.record_mut().add_precompile_event(syscall_code, sycall_event, event); None } diff --git a/crates/core/executor/src/syscalls/precompiles/weierstrass/add.rs b/crates/core/executor/src/syscalls/precompiles/weierstrass/add.rs index 9d39ca325a..1456b9870f 100644 --- a/crates/core/executor/src/syscalls/precompiles/weierstrass/add.rs +++ b/crates/core/executor/src/syscalls/precompiles/weierstrass/add.rs @@ -3,8 +3,8 @@ use std::marker::PhantomData; use sp1_curves::{CurveType, EllipticCurve}; use crate::{ - events::create_ec_add_event, - syscalls::{Syscall, SyscallContext}, + events::{create_ec_add_event, PrecompileEvent}, + syscalls::{Syscall, SyscallCode, SyscallContext}, }; pub(crate) struct WeierstrassAddAssignSyscall { @@ -19,12 +19,34 @@ impl WeierstrassAddAssignSyscall { } impl Syscall for WeierstrassAddAssignSyscall { - fn execute(&self, rt: &mut SyscallContext, arg1: u32, arg2: u32) -> Option { + fn execute( + &self, + rt: &mut SyscallContext, + syscall_code: SyscallCode, + arg1: u32, + arg2: u32, + ) -> Option { let event = create_ec_add_event::(rt, arg1, arg2); + let syscall_event = + rt.rt.syscall_event(event.clk, syscall_code.syscall_id(), arg1, arg2, event.lookup_id); match E::CURVE_TYPE { - CurveType::Secp256k1 => rt.record_mut().secp256k1_add_events.push(event), - CurveType::Bn254 => rt.record_mut().bn254_add_events.push(event), - CurveType::Bls12381 => rt.record_mut().bls12381_add_events.push(event), + CurveType::Secp256k1 => rt.record_mut().add_precompile_event( + syscall_code, + syscall_event, + PrecompileEvent::Secp256k1Add(event), + ), + CurveType::Bn254 => { + rt.record_mut().add_precompile_event( + syscall_code, + syscall_event, + PrecompileEvent::Bn254Add(event), + ); + } + CurveType::Bls12381 => rt.record_mut().add_precompile_event( + syscall_code, + syscall_event, + PrecompileEvent::Bls12381Add(event), + ), _ => panic!("Unsupported curve"), } None diff --git a/crates/core/executor/src/syscalls/precompiles/weierstrass/decompress.rs b/crates/core/executor/src/syscalls/precompiles/weierstrass/decompress.rs index a2b476281e..df056337a1 100644 --- a/crates/core/executor/src/syscalls/precompiles/weierstrass/decompress.rs +++ b/crates/core/executor/src/syscalls/precompiles/weierstrass/decompress.rs @@ -3,8 +3,8 @@ use std::marker::PhantomData; use sp1_curves::{CurveType, EllipticCurve}; use crate::{ - events::create_ec_decompress_event, - syscalls::{Syscall, SyscallContext}, + events::{create_ec_decompress_event, PrecompileEvent}, + syscalls::{Syscall, SyscallCode, SyscallContext}, }; pub(crate) struct WeierstrassDecompressSyscall { @@ -19,11 +19,27 @@ impl WeierstrassDecompressSyscall { } impl Syscall for WeierstrassDecompressSyscall { - fn execute(&self, rt: &mut SyscallContext, arg1: u32, arg2: u32) -> Option { + fn execute( + &self, + rt: &mut SyscallContext, + syscall_code: SyscallCode, + arg1: u32, + arg2: u32, + ) -> Option { let event = create_ec_decompress_event::(rt, arg1, arg2); + let syscall_event = + rt.rt.syscall_event(event.clk, syscall_code.syscall_id(), arg1, arg2, event.lookup_id); match E::CURVE_TYPE { - CurveType::Secp256k1 => rt.record_mut().k256_decompress_events.push(event), - CurveType::Bls12381 => rt.record_mut().bls12381_decompress_events.push(event), + CurveType::Secp256k1 => rt.record_mut().add_precompile_event( + syscall_code, + syscall_event, + PrecompileEvent::Secp256k1Decompress(event), + ), + CurveType::Bls12381 => rt.record_mut().add_precompile_event( + syscall_code, + syscall_event, + PrecompileEvent::Bls12381Decompress(event), + ), _ => panic!("Unsupported curve"), } None diff --git a/crates/core/executor/src/syscalls/precompiles/weierstrass/double.rs b/crates/core/executor/src/syscalls/precompiles/weierstrass/double.rs index 507e78f05c..70e7074853 100644 --- a/crates/core/executor/src/syscalls/precompiles/weierstrass/double.rs +++ b/crates/core/executor/src/syscalls/precompiles/weierstrass/double.rs @@ -3,8 +3,8 @@ use std::marker::PhantomData; use sp1_curves::{CurveType, EllipticCurve}; use crate::{ - events::create_ec_double_event, - syscalls::{Syscall, SyscallContext}, + events::{create_ec_double_event, PrecompileEvent}, + syscalls::{Syscall, SyscallCode, SyscallContext}, }; pub(crate) struct WeierstrassDoubleAssignSyscall { @@ -19,12 +19,38 @@ impl WeierstrassDoubleAssignSyscall { } impl Syscall for WeierstrassDoubleAssignSyscall { - fn execute(&self, rt: &mut SyscallContext, arg1: u32, arg2: u32) -> Option { + fn execute( + &self, + rt: &mut SyscallContext, + syscall_code: SyscallCode, + arg1: u32, + arg2: u32, + ) -> Option { let event = create_ec_double_event::(rt, arg1, arg2); + let syscall_event = + rt.rt.syscall_event(event.clk, syscall_code.syscall_id(), arg1, arg2, event.lookup_id); match E::CURVE_TYPE { - CurveType::Secp256k1 => rt.record_mut().secp256k1_double_events.push(event), - CurveType::Bn254 => rt.record_mut().bn254_double_events.push(event), - CurveType::Bls12381 => rt.record_mut().bls12381_double_events.push(event), + CurveType::Secp256k1 => { + rt.record_mut().add_precompile_event( + syscall_code, + syscall_event, + PrecompileEvent::Secp256k1Double(event), + ); + } + CurveType::Bn254 => { + rt.record_mut().add_precompile_event( + syscall_code, + syscall_event, + PrecompileEvent::Bn254Double(event), + ); + } + CurveType::Bls12381 => { + rt.record_mut().add_precompile_event( + syscall_code, + syscall_event, + PrecompileEvent::Bls12381Double(event), + ); + } _ => panic!("Unsupported curve"), } None diff --git a/crates/core/executor/src/syscalls/unconstrained.rs b/crates/core/executor/src/syscalls/unconstrained.rs index 2a957c56d7..a84338eb0f 100644 --- a/crates/core/executor/src/syscalls/unconstrained.rs +++ b/crates/core/executor/src/syscalls/unconstrained.rs @@ -2,12 +2,12 @@ use hashbrown::HashMap; use crate::{state::ForkState, ExecutorMode}; -use super::{Syscall, SyscallContext}; +use super::{Syscall, SyscallCode, SyscallContext}; pub(crate) struct EnterUnconstrainedSyscall; impl Syscall for EnterUnconstrainedSyscall { - fn execute(&self, ctx: &mut SyscallContext, _: u32, _: u32) -> Option { + fn execute(&self, ctx: &mut SyscallContext, _: SyscallCode, _: u32, _: u32) -> Option { if ctx.rt.unconstrained { panic!("Unconstrained block is already active."); } @@ -29,7 +29,7 @@ impl Syscall for EnterUnconstrainedSyscall { pub(crate) struct ExitUnconstrainedSyscall; impl Syscall for ExitUnconstrainedSyscall { - fn execute(&self, ctx: &mut SyscallContext, _: u32, _: u32) -> Option { + fn execute(&self, ctx: &mut SyscallContext, _: SyscallCode, _: u32, _: u32) -> Option { // Reset the state of the runtime. if ctx.rt.unconstrained { ctx.rt.state.global_clk = ctx.rt.unconstrained_state.global_clk; diff --git a/crates/core/executor/src/syscalls/verify.rs b/crates/core/executor/src/syscalls/verify.rs index cf314b1ad8..0197199e51 100644 --- a/crates/core/executor/src/syscalls/verify.rs +++ b/crates/core/executor/src/syscalls/verify.rs @@ -1,10 +1,16 @@ -use super::{Syscall, SyscallContext}; +use super::{Syscall, SyscallCode, SyscallContext}; pub(crate) struct VerifySyscall; impl Syscall for VerifySyscall { #[allow(clippy::mut_mut)] - fn execute(&self, ctx: &mut SyscallContext, vkey_ptr: u32, pv_digest_ptr: u32) -> Option { + fn execute( + &self, + ctx: &mut SyscallContext, + _: SyscallCode, + vkey_ptr: u32, + pv_digest_ptr: u32, + ) -> Option { let rt = &mut ctx.rt; // vkey_ptr is a pointer to [u32; 8] which contains the verification key. diff --git a/crates/core/executor/src/syscalls/write.rs b/crates/core/executor/src/syscalls/write.rs index d40133903f..d0db44cae8 100644 --- a/crates/core/executor/src/syscalls/write.rs +++ b/crates/core/executor/src/syscalls/write.rs @@ -2,7 +2,7 @@ use sp1_primitives::consts::num_to_comma_separated; use crate::{Executor, Register}; -use super::{Syscall, SyscallContext}; +use super::{Syscall, SyscallCode, SyscallContext}; pub(crate) struct WriteSyscall; @@ -27,7 +27,13 @@ impl Syscall for WriteSyscall { /// /// Else, log a warning. #[allow(clippy::pedantic)] - fn execute(&self, ctx: &mut SyscallContext, arg1: u32, arg2: u32) -> Option { + fn execute( + &self, + ctx: &mut SyscallContext, + _: SyscallCode, + arg1: u32, + arg2: u32, + ) -> Option { let a2 = Register::X12; let rt = &mut ctx.rt; let fd = arg1; diff --git a/crates/core/executor/src/utils.rs b/crates/core/executor/src/utils.rs new file mode 100644 index 0000000000..ec4203aa50 --- /dev/null +++ b/crates/core/executor/src/utils.rs @@ -0,0 +1,48 @@ +use hashbrown::HashMap; + +use nohash_hasher::BuildNoHashHasher; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; + +use crate::Opcode; + +/// Serialize a `HashMap` as a `Vec<(u32, V)>`. +pub fn serialize_hashmap_as_vec( + map: &HashMap>, + serializer: S, +) -> Result { + Serialize::serialize(&map.iter().collect::>(), serializer) +} + +/// Deserialize a `Vec<(u32, V)>` as a `HashMap`. +pub fn deserialize_hashmap_as_vec<'de, V: Deserialize<'de>, D: Deserializer<'de>>( + deserializer: D, +) -> Result>, D::Error> { + let seq: Vec<(u32, V)> = Deserialize::deserialize(deserializer)?; + Ok(seq.into_iter().collect()) +} + +/// Returns `true` if the given `opcode` is a signed operation. +#[must_use] +pub fn is_signed_operation(opcode: Opcode) -> bool { + opcode == Opcode::DIV || opcode == Opcode::REM +} + +/// Calculate the correct `quotient` and `remainder` for the given `b` and `c` per RISC-V spec. +#[must_use] +pub fn get_quotient_and_remainder(b: u32, c: u32, opcode: Opcode) -> (u32, u32) { + if c == 0 { + // When c is 0, the quotient is 2^32 - 1 and the remainder is b regardless of whether we + // perform signed or unsigned division. + (u32::MAX, b) + } else if is_signed_operation(opcode) { + ((b as i32).wrapping_div(c as i32) as u32, (b as i32).wrapping_rem(c as i32) as u32) + } else { + (b.wrapping_div(c), b.wrapping_rem(c)) + } +} + +/// Calculate the most significant bit of the given 32-bit integer `a`, and returns it as a u8. +#[must_use] +pub const fn get_msb(a: u32) -> u8 { + ((a >> 31) & 1) as u8 +} diff --git a/crates/core/machine/Cargo.toml b/crates/core/machine/Cargo.toml index f264ff4194..7e2bd5a50a 100644 --- a/crates/core/machine/Cargo.toml +++ b/crates/core/machine/Cargo.toml @@ -12,47 +12,32 @@ categories = { workspace = true } [dependencies] bincode = "1.3.3" serde = { version = "1.0", features = ["derive", "rc"] } -elf = "0.7.4" itertools = "0.13.0" log = "0.4.22" -nohash-hasher = "0.2.0" num = { version = "0.4.3" } p3-air = { workspace = true } p3-baby-bear = { workspace = true } -p3-blake3 = { workspace = true, features = ["parallel"] } p3-challenger = { workspace = true } -p3-commit = { workspace = true } -p3-dft = { workspace = true } p3-field = { workspace = true } -p3-fri = { workspace = true } -p3-keccak = { workspace = true } +p3-blake3 = { workspace = true } p3-keccak-air = { workspace = true } p3-matrix = { workspace = true } p3-maybe-rayon = { workspace = true, features = ["parallel"] } -p3-merkle-tree = { workspace = true } -p3-poseidon2 = { workspace = true } -p3-symmetric = { workspace = true } p3-uni-stark = { workspace = true } p3-util = { workspace = true } -rrs_lib = { package = "rrs-succinct", version = "0.1.0" } sp1-derive = { workspace = true } sp1-primitives = { workspace = true } -anyhow = "1.0.83" amcl = { package = "snowbridge-amcl", version = "1.0.2", default-features = false, features = [ "bls381", ] } -arrayref = "0.3.8" -blake3 = "1.5" cfg-if = "1.0.0" generic-array = { version = "1.1.0", features = ["alloc", "serde"] } typenum = "1.17.0" -curve25519-dalek = { version = "4.1.2" } elliptic-curve = "0.13.8" hex = "0.4.3" k256 = { version = "0.13.3", features = ["expose-field"] } num_cpus = "1.16.0" -serde_with = "3.9.0" size = "0.4.1" tempfile = "3.10.1" tracing = "0.1.40" @@ -61,10 +46,8 @@ tracing-subscriber = { version = "0.3.18", features = ["std", "env-filter"] } strum_macros = "0.26" strum = "0.26" web-time = "1.1.0" -rayon-scan = "0.1.1" thiserror = "1.0.63" rand = "0.8.5" -bytemuck = "1.16.0" hashbrown = { version = "0.14.5", features = ["serde", "inline-more"] } static_assertions = "1.1.0" @@ -84,6 +67,7 @@ sp1-core-executor = { workspace = true, features = ["programs"] } neon = ["p3-blake3/neon"] programs = [] debug = [] +bigint-rug = ["sp1-curves/bigint-rug"] [lib] bench = false diff --git a/crates/core/machine/src/air/memory.rs b/crates/core/machine/src/air/memory.rs index 8d0c9fb909..53d243f63b 100644 --- a/crates/core/machine/src/air/memory.rs +++ b/crates/core/machine/src/air/memory.rs @@ -4,7 +4,7 @@ use p3_air::AirBuilder; use p3_field::AbstractField; use sp1_core_executor::ByteOpcode; use sp1_stark::{ - air::{AirInteraction, BaseAirBuilder, ByteAirBuilder}, + air::{AirInteraction, BaseAirBuilder, ByteAirBuilder, InteractionScope}, InteractionKind, }; @@ -18,7 +18,6 @@ pub trait MemoryAirBuilder: BaseAirBuilder { fn eval_memory_access + Clone>( &mut self, shard: impl Into, - channel: impl Into, clk: impl Into, addr: impl Into, memory_access: &impl MemoryCols, @@ -26,20 +25,13 @@ pub trait MemoryAirBuilder: BaseAirBuilder { ) { let do_check: Self::Expr = do_check.into(); let shard: Self::Expr = shard.into(); - let channel: Self::Expr = channel.into(); let clk: Self::Expr = clk.into(); let mem_access = memory_access.access(); self.assert_bool(do_check.clone()); // Verify that the current memory access time is greater than the previous's. - self.eval_memory_access_timestamp( - mem_access, - do_check.clone(), - shard.clone(), - channel, - clk.clone(), - ); + self.eval_memory_access_timestamp(mem_access, do_check.clone(), shard.clone(), clk.clone()); // Add to the memory argument. let addr = addr.into(); @@ -57,21 +49,22 @@ pub trait MemoryAirBuilder: BaseAirBuilder { .collect(); // The previous values get sent with multiplicity = 1, for "read". - self.send(AirInteraction::new(prev_values, do_check.clone(), InteractionKind::Memory)); + self.send( + AirInteraction::new(prev_values, do_check.clone(), InteractionKind::Memory), + InteractionScope::Local, + ); // The current values get "received", i.e. multiplicity = -1 - self.receive(AirInteraction::new( - current_values, - do_check.clone(), - InteractionKind::Memory, - )); + self.receive( + AirInteraction::new(current_values, do_check.clone(), InteractionKind::Memory), + InteractionScope::Local, + ); } /// Constraints a memory read or write to a slice of `MemoryAccessCols`. fn eval_memory_access_slice + Copy>( &mut self, shard: impl Into + Copy, - channel: impl Into + Clone, clk: impl Into + Clone, initial_addr: impl Into + Clone, memory_access_slice: &[impl MemoryCols], @@ -80,7 +73,6 @@ pub trait MemoryAirBuilder: BaseAirBuilder { for (i, access_slice) in memory_access_slice.iter().enumerate() { self.eval_memory_access( shard, - channel.clone(), clk.clone(), initial_addr.clone().into() + Self::Expr::from_canonical_usize(i * 4), access_slice, @@ -100,7 +92,6 @@ pub trait MemoryAirBuilder: BaseAirBuilder { mem_access: &MemoryAccessCols + Clone>, do_check: impl Into, shard: impl Into + Clone, - channel: impl Into + Clone, clk: impl Into, ) { let do_check: Self::Expr = do_check.into(); @@ -138,8 +129,6 @@ pub trait MemoryAirBuilder: BaseAirBuilder { diff_minus_one, mem_access.diff_16bit_limb.clone(), mem_access.diff_8bit_limb.clone(), - shard.clone(), - channel.clone(), do_check, ); } @@ -155,8 +144,6 @@ pub trait MemoryAirBuilder: BaseAirBuilder { value: impl Into, limb_16: impl Into + Clone, limb_8: impl Into + Clone, - shard: impl Into + Clone, - channel: impl Into + Clone, do_check: impl Into + Clone, ) { // Verify that value = limb_16 + limb_8 * 2^16. @@ -172,8 +159,6 @@ pub trait MemoryAirBuilder: BaseAirBuilder { limb_16, Self::Expr::zero(), Self::Expr::zero(), - shard.clone(), - channel.clone(), do_check.clone(), ); @@ -182,8 +167,6 @@ pub trait MemoryAirBuilder: BaseAirBuilder { Self::Expr::zero(), Self::Expr::zero(), limb_8, - shard.clone(), - channel.clone(), do_check, ) } diff --git a/crates/core/machine/src/air/program.rs b/crates/core/machine/src/air/program.rs index d94f006d1b..4efaa97f3f 100644 --- a/crates/core/machine/src/air/program.rs +++ b/crates/core/machine/src/air/program.rs @@ -2,7 +2,7 @@ use std::iter::once; use p3_air::AirBuilder; use sp1_stark::{ - air::{AirInteraction, BaseAirBuilder}, + air::{AirInteraction, BaseAirBuilder, InteractionScope}, InteractionKind, }; @@ -26,7 +26,10 @@ pub trait ProgramAirBuilder: BaseAirBuilder { .chain(once(shard.into())) .collect(); - self.send(AirInteraction::new(values, multiplicity.into(), InteractionKind::Program)); + self.send( + AirInteraction::new(values, multiplicity.into(), InteractionKind::Program), + InteractionScope::Local, + ); } /// Receives an instruction. @@ -45,6 +48,9 @@ pub trait ProgramAirBuilder: BaseAirBuilder { .chain(once(shard.into())) .collect(); - self.receive(AirInteraction::new(values, multiplicity.into(), InteractionKind::Program)); + self.receive( + AirInteraction::new(values, multiplicity.into(), InteractionKind::Program), + InteractionScope::Local, + ); } } diff --git a/crates/core/machine/src/air/word.rs b/crates/core/machine/src/air/word.rs index d96a1c6d31..b4e68c67ac 100644 --- a/crates/core/machine/src/air/word.rs +++ b/crates/core/machine/src/air/word.rs @@ -55,8 +55,6 @@ pub trait WordAirBuilder: ByteAirBuilder { fn slice_range_check_u8( &mut self, input: &[impl Into + Clone], - shard: impl Into + Clone, - channel: impl Into + Clone, mult: impl Into + Clone, ) { let mut index = 0; @@ -66,8 +64,6 @@ pub trait WordAirBuilder: ByteAirBuilder { Self::Expr::zero(), input[index].clone(), input[index + 1].clone(), - shard.clone(), - channel.clone(), mult.clone(), ); index += 2; @@ -78,8 +74,6 @@ pub trait WordAirBuilder: ByteAirBuilder { Self::Expr::zero(), input[index].clone(), Self::Expr::zero(), - shard.clone(), - channel.clone(), mult.clone(), ); } @@ -89,8 +83,6 @@ pub trait WordAirBuilder: ByteAirBuilder { fn slice_range_check_u16( &mut self, input: &[impl Into + Copy], - shard: impl Into + Clone, - channel: impl Into + Clone, mult: impl Into + Clone, ) { input.iter().for_each(|limb| { @@ -99,8 +91,6 @@ pub trait WordAirBuilder: ByteAirBuilder { *limb, Self::Expr::zero(), Self::Expr::zero(), - shard.clone(), - channel.clone(), mult.clone(), ); }); diff --git a/crates/core/machine/src/alu/add_sub/mod.rs b/crates/core/machine/src/alu/add_sub/mod.rs index ee8efafb03..982012e562 100644 --- a/crates/core/machine/src/alu/add_sub/mod.rs +++ b/crates/core/machine/src/alu/add_sub/mod.rs @@ -19,7 +19,7 @@ use sp1_stark::{ Word, }; -use crate::{operations::AddOperation, utils::pad_to_power_of_two}; +use crate::{operations::AddOperation, utils::pad_rows_fixed}; /// The number of main trace columns for `AddSubChip`. pub const NUM_ADD_SUB_COLS: usize = size_of::>(); @@ -40,9 +40,6 @@ pub struct AddSubCols { /// The shard number, used for byte lookup table. pub shard: T, - /// The channel number, used for byte lookup table. - pub channel: T, - /// The nonce of the operation. pub nonce: T, @@ -105,13 +102,15 @@ impl MachineAir for AddSubChip { rows.extend(row_batch); } + pad_rows_fixed( + &mut rows, + || [F::zero(); NUM_ADD_SUB_COLS], + input.fixed_log2_rows::(self), + ); // Convert the trace to a row major matrix. let mut trace = RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), NUM_ADD_SUB_COLS); - // Pad the trace to a power of two. - pad_to_power_of_two::(&mut trace.values); - // Write the nonces to the trace. for i in 0..trace.height() { let cols: &mut AddSubCols = @@ -146,7 +145,11 @@ impl MachineAir for AddSubChip { } fn included(&self, shard: &Self::Record) -> bool { - !shard.add_events.is_empty() || !shard.sub_events.is_empty() + if let Some(shape) = shard.shape.as_ref() { + shape.included::(self) + } else { + !shard.add_events.is_empty() + } } } @@ -160,14 +163,13 @@ impl AddSubChip { ) { let is_add = event.opcode == Opcode::ADD; cols.shard = F::from_canonical_u32(event.shard); - cols.channel = F::from_canonical_u8(event.channel); cols.is_add = F::from_bool(is_add); cols.is_sub = F::from_bool(!is_add); let operand_1 = if is_add { event.b } else { event.a }; let operand_2 = event.c; - cols.add_operation.populate(blu, event.shard, event.channel, operand_1, operand_2); + cols.add_operation.populate(blu, event.shard, operand_1, operand_2); cols.operand_1 = Word::from(operand_1); cols.operand_2 = Word::from(operand_2); } @@ -200,8 +202,6 @@ where local.operand_1, local.operand_2, local.add_operation, - local.shard, - local.channel, local.is_add + local.is_sub, ); @@ -213,7 +213,6 @@ where local.operand_1, local.operand_2, local.shard, - local.channel, local.nonce, local.is_add, ); @@ -225,7 +224,6 @@ where local.add_operation.value, local.operand_2, local.shard, - local.channel, local.nonce, local.is_sub, ); @@ -251,7 +249,7 @@ mod tests { #[test] fn generate_trace() { let mut shard = ExecutionRecord::default(); - shard.add_events = vec![AluEvent::new(0, 0, 0, Opcode::ADD, 14, 8, 6)]; + shard.add_events = vec![AluEvent::new(0, 0, Opcode::ADD, 14, 8, 6)]; let chip = AddSubChip::default(); let trace: RowMajorMatrix = chip.generate_trace(&shard, &mut ExecutionRecord::default()); @@ -269,7 +267,6 @@ mod tests { let operand_2 = thread_rng().gen_range(0..u32::MAX); let result = operand_1.wrapping_add(operand_2); shard.add_events.push(AluEvent::new( - 0, i % 2, 0, Opcode::ADD, @@ -283,7 +280,6 @@ mod tests { let operand_2 = thread_rng().gen_range(0..u32::MAX); let result = operand_1.wrapping_sub(operand_2); shard.add_events.push(AluEvent::new( - 0, i % 2, 0, Opcode::SUB, diff --git a/crates/core/machine/src/alu/bitwise/mod.rs b/crates/core/machine/src/alu/bitwise/mod.rs index 556bd22dcc..88156e286c 100644 --- a/crates/core/machine/src/alu/bitwise/mod.rs +++ b/crates/core/machine/src/alu/bitwise/mod.rs @@ -19,7 +19,7 @@ use sp1_stark::{ Word, }; -use crate::utils::pad_to_power_of_two; +use crate::utils::pad_rows_fixed; /// The number of main trace columns for `BitwiseChip`. pub const NUM_BITWISE_COLS: usize = size_of::>(); @@ -35,9 +35,6 @@ pub struct BitwiseCols { /// The shard number, used for byte lookup table. pub shard: T, - /// The channel number, used for byte lookup table. - pub channel: T, - /// The nonce of the operation. pub nonce: T, @@ -74,7 +71,7 @@ impl MachineAir for BitwiseChip { input: &ExecutionRecord, _: &mut ExecutionRecord, ) -> RowMajorMatrix { - let rows = input + let mut rows = input .bitwise_events .par_iter() .map(|event| { @@ -86,13 +83,17 @@ impl MachineAir for BitwiseChip { }) .collect::>(); + // Pad the trace to a power of two. + pad_rows_fixed( + &mut rows, + || [F::zero(); NUM_BITWISE_COLS], + input.fixed_log2_rows::(self), + ); + // Convert the trace to a row major matrix. let mut trace = RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), NUM_BITWISE_COLS); - // Pad the trace to a power of two. - pad_to_power_of_two::(&mut trace.values); - for i in 0..trace.height() { let cols: &mut BitwiseCols = trace.values[i * NUM_BITWISE_COLS..(i + 1) * NUM_BITWISE_COLS].borrow_mut(); @@ -123,7 +124,11 @@ impl MachineAir for BitwiseChip { } fn included(&self, shard: &Self::Record) -> bool { - !shard.bitwise_events.is_empty() + if let Some(shape) = shard.shape.as_ref() { + shape.included::(self) + } else { + !shard.bitwise_events.is_empty() + } } } @@ -140,7 +145,6 @@ impl BitwiseChip { let c = event.c.to_le_bytes(); cols.shard = F::from_canonical_u32(event.shard); - cols.channel = F::from_canonical_u8(event.channel); cols.a = Word::from(event.a); cols.b = Word::from(event.b); cols.c = Word::from(event.c); @@ -152,7 +156,6 @@ impl BitwiseChip { for ((b_a, b_b), b_c) in a.into_iter().zip(b).zip(c) { let byte_event = ByteLookupEvent { shard: event.shard, - channel: event.channel, opcode: ByteOpcode::from(event.opcode), a1: b_a as u16, a2: 0, @@ -193,7 +196,7 @@ where // Get a multiplicity of `1` only for a true row. let mult = local.is_xor + local.is_or + local.is_and; for ((a, b), c) in local.a.into_iter().zip(local.b).zip(local.c) { - builder.send_byte(opcode.clone(), a, b, c, local.shard, local.channel, mult.clone()); + builder.send_byte(opcode.clone(), a, b, c, mult.clone()); } // Get the cpu opcode, which corresponds to the opcode being sent in the CPU table. @@ -208,7 +211,6 @@ where local.b, local.c, local.shard, - local.channel, local.nonce, local.is_xor + local.is_or + local.is_and, ); @@ -235,7 +237,7 @@ mod tests { #[test] fn generate_trace() { let mut shard = ExecutionRecord::default(); - shard.bitwise_events = vec![AluEvent::new(0, 0, 0, Opcode::XOR, 25, 10, 19)]; + shard.bitwise_events = vec![AluEvent::new(0, 0, Opcode::XOR, 25, 10, 19)]; let chip = BitwiseChip::default(); let trace: RowMajorMatrix = chip.generate_trace(&shard, &mut ExecutionRecord::default()); @@ -249,9 +251,9 @@ mod tests { let mut shard = ExecutionRecord::default(); shard.bitwise_events = [ - AluEvent::new(0, 0, 0, Opcode::XOR, 25, 10, 19), - AluEvent::new(0, 1, 0, Opcode::OR, 27, 10, 19), - AluEvent::new(0, 0, 0, Opcode::AND, 2, 10, 19), + AluEvent::new(0, 0, Opcode::XOR, 25, 10, 19), + AluEvent::new(0, 0, Opcode::OR, 27, 10, 19), + AluEvent::new(0, 0, Opcode::AND, 2, 10, 19), ] .repeat(1000); let chip = BitwiseChip::default(); diff --git a/crates/core/machine/src/alu/divrem/mod.rs b/crates/core/machine/src/alu/divrem/mod.rs index 9beb1ddf13..d811b90d45 100644 --- a/crates/core/machine/src/alu/divrem/mod.rs +++ b/crates/core/machine/src/alu/divrem/mod.rs @@ -60,20 +60,18 @@ //! # b = 0 * quotient + b is satisfied by any quotient. //! assert quotient = 0xffffffff -mod utils; - use core::{ borrow::{Borrow, BorrowMut}, mem::size_of, }; -use hashbrown::HashMap; use p3_air::{Air, AirBuilder, BaseAir}; use p3_field::{AbstractField, PrimeField}; use p3_matrix::{dense::RowMajorMatrix, Matrix}; use sp1_core_executor::{ - events::{create_alu_lookups, AluEvent, ByteLookupEvent, ByteRecord}, - ByteOpcode, ExecutionRecord, Opcode, Program, + events::{ByteLookupEvent, ByteRecord}, + get_msb, get_quotient_and_remainder, is_signed_operation, ByteOpcode, ExecutionRecord, Opcode, + Program, }; use sp1_derive::AlignedBorrow; use sp1_primitives::consts::WORD_SIZE; @@ -81,9 +79,8 @@ use sp1_stark::{air::MachineAir, Word}; use crate::{ air::SP1CoreAirBuilder, - alu::divrem::utils::{get_msb, get_quotient_and_remainder, is_signed_operation}, operations::{IsEqualWordOperation, IsZeroWordOperation}, - utils::pad_to_power_of_two, + utils::pad_rows_fixed, }; /// The number of main trace columns for `DivRemChip`. @@ -106,9 +103,6 @@ pub struct DivRemCols { /// The shard number, used for byte lookup table. pub shard: T, - /// The channel number, used for byte lookup table. - pub channel: T, - /// The nonce of the operation. pub nonce: T, @@ -248,7 +242,6 @@ impl MachineAir for DivRemChip { cols.b = Word::from(event.b); cols.c = Word::from(event.c); cols.shard = F::from_canonical_u32(event.shard); - cols.channel = F::from_canonical_u8(event.channel); cols.is_real = F::one(); cols.is_divu = F::from_bool(event.opcode == Opcode::DIVU); cols.is_remu = F::from_bool(event.opcode == Opcode::REMU); @@ -301,7 +294,6 @@ impl MachineAir for DivRemChip { let most_significant_byte = word.to_le_bytes()[WORD_SIZE - 1]; blu_events.push(ByteLookupEvent { shard: event.shard, - channel: event.channel, opcode: ByteOpcode::MSB, a1: get_msb(*word) as u16, a2: 0, @@ -350,93 +342,14 @@ impl MachineAir for DivRemChip { } // Insert the necessary multiplication & LT events. - // - // This generate_trace for div must be executed _before_ calling generate_trace for - // mul and LT upon which div depends. This ordering is critical as mul and LT - // require all the mul and LT events be added before we can call generate_trace. { - // Insert the absolute value computation events. - { - let mut add_events: Vec = vec![]; - if cols.abs_c_alu_event == F::one() { - add_events.push(AluEvent { - lookup_id: event.sub_lookups[4], - shard: event.shard, - channel: event.channel, - clk: event.clk, - opcode: Opcode::ADD, - a: 0, - b: event.c, - c: (event.c as i32).abs() as u32, - sub_lookups: create_alu_lookups(), - }) - } - if cols.abs_rem_alu_event == F::one() { - add_events.push(AluEvent { - lookup_id: event.sub_lookups[5], - shard: event.shard, - channel: event.channel, - clk: event.clk, - opcode: Opcode::ADD, - a: 0, - b: remainder, - c: (remainder as i32).abs() as u32, - sub_lookups: create_alu_lookups(), - }) - } - let mut alu_events = HashMap::new(); - alu_events.insert(Opcode::ADD, add_events); - output.add_alu_events(alu_events); - } - - let mut lower_word = 0; - for i in 0..WORD_SIZE { - lower_word += (c_times_quotient[i] as u32) << (i * BYTE_SIZE); - } - - let mut upper_word = 0; - for i in 0..WORD_SIZE { - upper_word += (c_times_quotient[WORD_SIZE + i] as u32) << (i * BYTE_SIZE); - } - - let lower_multiplication = AluEvent { - lookup_id: event.sub_lookups[0], - shard: event.shard, - channel: event.channel, - clk: event.clk, - opcode: Opcode::MUL, - a: lower_word, - c: event.c, - b: quotient, - sub_lookups: create_alu_lookups(), - }; cols.lower_nonce = F::from_canonical_u32( input.nonce_lookup.get(&event.sub_lookups[0]).copied().unwrap_or_default(), ); - output.add_mul_event(lower_multiplication); - - let upper_multiplication = AluEvent { - lookup_id: event.sub_lookups[1], - shard: event.shard, - channel: event.channel, - clk: event.clk, - opcode: { - if is_signed_operation(event.opcode) { - Opcode::MULH - } else { - Opcode::MULHU - } - }, - a: upper_word, - c: event.c, - b: quotient, - sub_lookups: create_alu_lookups(), - }; cols.upper_nonce = F::from_canonical_u32( input.nonce_lookup.get(&event.sub_lookups[1]).copied().unwrap_or_default(), ); - output.add_mul_event(upper_multiplication); - let lt_event = if is_signed_operation(event.opcode) { + if is_signed_operation(event.opcode) { cols.abs_nonce = F::from_canonical_u32( input .nonce_lookup @@ -444,17 +357,6 @@ impl MachineAir for DivRemChip { .copied() .unwrap_or_default(), ); - AluEvent { - lookup_id: event.sub_lookups[2], - shard: event.shard, - channel: event.channel, - opcode: Opcode::SLTU, - a: 1, - b: (remainder as i32).abs() as u32, - c: u32::max(1, (event.c as i32).abs() as u32), - clk: event.clk, - sub_lookups: create_alu_lookups(), - } } else { cols.abs_nonce = F::from_canonical_u32( input @@ -463,46 +365,31 @@ impl MachineAir for DivRemChip { .copied() .unwrap_or_default(), ); - AluEvent { - lookup_id: event.sub_lookups[3], - shard: event.shard, - channel: event.channel, - opcode: Opcode::SLTU, - a: 1, - b: remainder, - c: u32::max(1, event.c), - clk: event.clk, - sub_lookups: create_alu_lookups(), - } }; - - if cols.remainder_check_multiplicity == F::one() { - output.add_lt_event(lt_event); - } } // Range check. { - output.add_u8_range_checks(event.shard, event.channel, "ient.to_le_bytes()); - output.add_u8_range_checks( - event.shard, - event.channel, - &remainder.to_le_bytes(), - ); - output.add_u8_range_checks(event.shard, event.channel, &c_times_quotient); + output.add_u8_range_checks(event.shard, "ient.to_le_bytes()); + output.add_u8_range_checks(event.shard, &remainder.to_le_bytes()); + output.add_u8_range_checks(event.shard, &c_times_quotient); } } rows.push(row); } + // Pad the trace to a power of two depending on the proof shape in `input`. + pad_rows_fixed( + &mut rows, + || [F::zero(); NUM_DIVREM_COLS], + input.fixed_log2_rows::(self), + ); + // Convert the trace to a row major matrix. let mut trace = RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), NUM_DIVREM_COLS); - // Pad the trace to a power of two. - pad_to_power_of_two::(&mut trace.values); - // Create the template for the padded rows. These are fake rows that don't fail on some // sanity checks. let padded_row_template = { @@ -534,7 +421,11 @@ impl MachineAir for DivRemChip { } fn included(&self, shard: &Self::Record) -> bool { - !shard.divrem_events.is_empty() + if let Some(shape) = shard.shape.as_ref() { + shape.included::(self) + } else { + !shard.divrem_events.is_empty() + } } } @@ -595,7 +486,6 @@ where local.quotient, local.c, local.shard, - local.channel, local.lower_nonce, local.is_real, ); @@ -621,7 +511,6 @@ where local.quotient, local.c, local.shard, - local.channel, local.upper_nonce, local.is_real, ); @@ -776,7 +665,6 @@ where local.c, local.abs_c, local.shard, - local.channel, local.abs_c_alu_event_nonce, local.abs_c_alu_event, ); @@ -786,7 +674,6 @@ where local.remainder, local.abs_remainder, local.shard, - local.channel, local.abs_rem_alu_event_nonce, local.abs_rem_alu_event, ); @@ -833,7 +720,6 @@ where local.abs_remainder, local.max_abs_c_or_1, local.shard, - local.channel, local.abs_nonce, local.remainder_check_multiplicity, ); @@ -850,43 +736,20 @@ where for msb_pair in msb_pairs.iter() { let msb = msb_pair.0; let byte = msb_pair.1; - builder.send_byte( - opcode, - msb, - byte, - zero.clone(), - local.shard, - local.channel, - local.is_real, - ); + builder.send_byte(opcode, msb, byte, zero.clone(), local.is_real); } } // Range check all the bytes. { - builder.slice_range_check_u8( - &local.quotient.0, - local.shard, - local.channel, - local.is_real, - ); - builder.slice_range_check_u8( - &local.remainder.0, - local.shard, - local.channel, - local.is_real, - ); + builder.slice_range_check_u8(&local.quotient.0, local.is_real); + builder.slice_range_check_u8(&local.remainder.0, local.is_real); local.carry.iter().for_each(|carry| { builder.assert_bool(*carry); }); - builder.slice_range_check_u8( - &local.c_times_quotient, - local.shard, - local.channel, - local.is_real, - ); + builder.slice_range_check_u8(&local.c_times_quotient, local.is_real); } // Check that the flags are boolean. @@ -939,7 +802,6 @@ where local.b, local.c, local.shard, - local.channel, local.nonce, local.is_real, ); @@ -961,7 +823,7 @@ mod tests { #[test] fn generate_trace() { let mut shard = ExecutionRecord::default(); - shard.divrem_events = vec![AluEvent::new(0, 0, 0, Opcode::DIVU, 2, 17, 3)]; + shard.divrem_events = vec![AluEvent::new(0, 0, Opcode::DIVU, 2, 17, 3)]; let chip = DivRemChip::default(); let trace: RowMajorMatrix = chip.generate_trace(&shard, &mut ExecutionRecord::default()); @@ -1014,12 +876,12 @@ mod tests { (Opcode::REM, 0, 1 << 31, neg(1)), ]; for t in divrems.iter() { - divrem_events.push(AluEvent::new(0, 9, 0, t.0, t.1, t.2, t.3)); + divrem_events.push(AluEvent::new(0, 0, t.0, t.1, t.2, t.3)); } // Append more events until we have 1000 tests. for _ in 0..(1000 - divrems.len()) { - divrem_events.push(AluEvent::new(0, 0, 0, Opcode::DIVU, 1, 1, 1)); + divrem_events.push(AluEvent::new(0, 0, Opcode::DIVU, 1, 1, 1)); } let mut shard = ExecutionRecord::default(); diff --git a/crates/core/machine/src/alu/divrem/utils.rs b/crates/core/machine/src/alu/divrem/utils.rs deleted file mode 100644 index 5147d20f6c..0000000000 --- a/crates/core/machine/src/alu/divrem/utils.rs +++ /dev/null @@ -1,24 +0,0 @@ -use sp1_core_executor::Opcode; - -/// Returns `true` if the given `opcode` is a signed operation. -pub fn is_signed_operation(opcode: Opcode) -> bool { - opcode == Opcode::DIV || opcode == Opcode::REM -} - -/// Calculate the correct `quotient` and `remainder` for the given `b` and `c` per RISC-V spec. -pub fn get_quotient_and_remainder(b: u32, c: u32, opcode: Opcode) -> (u32, u32) { - if c == 0 { - // When c is 0, the quotient is 2^32 - 1 and the remainder is b regardless of whether we - // perform signed or unsigned division. - (u32::MAX, b) - } else if is_signed_operation(opcode) { - ((b as i32).wrapping_div(c as i32) as u32, (b as i32).wrapping_rem(c as i32) as u32) - } else { - ((b as u32).wrapping_div(c as u32) as u32, (b as u32).wrapping_rem(c as u32) as u32) - } -} - -/// Calculate the most significant bit of the given 32-bit integer `a`, and returns it as a u8. -pub const fn get_msb(a: u32) -> u8 { - ((a >> 31) & 1) as u8 -} diff --git a/crates/core/machine/src/alu/lt/mod.rs b/crates/core/machine/src/alu/lt/mod.rs index 1ae8b47893..a9462b8def 100644 --- a/crates/core/machine/src/alu/lt/mod.rs +++ b/crates/core/machine/src/alu/lt/mod.rs @@ -19,7 +19,7 @@ use sp1_stark::{ Word, }; -use crate::utils::pad_to_power_of_two; +use crate::utils::pad_rows_fixed; /// The number of main trace columns for `LtChip`. pub const NUM_LT_COLS: usize = size_of::>(); @@ -35,9 +35,6 @@ pub struct LtCols { /// The shard number, used for byte lookup table. pub shard: T, - /// The channel number, used for byte lookup table. - pub channel: T, - /// The nonce of the operation. pub nonce: T, @@ -110,7 +107,7 @@ impl MachineAir for LtChip { _: &mut ExecutionRecord, ) -> RowMajorMatrix { // Generate the trace rows for each event. - let rows = input + let mut rows = input .lt_events .par_iter() .map(|event| { @@ -123,13 +120,17 @@ impl MachineAir for LtChip { }) .collect::>(); + // Pad the trace to a power of two depending on the proof shape in `input`. + pad_rows_fixed( + &mut rows, + || [F::zero(); NUM_LT_COLS], + input.fixed_log2_rows::(self), + ); + // Convert the trace to a row major matrix. let mut trace = RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), NUM_LT_COLS); - // Pad the trace to a power of two. - pad_to_power_of_two::(&mut trace.values); - // Write the nonces to the trace. for i in 0..trace.height() { let cols: &mut LtCols = @@ -161,7 +162,11 @@ impl MachineAir for LtChip { } fn included(&self, shard: &Self::Record) -> bool { - !shard.lt_events.is_empty() + if let Some(shape) = shard.shape.as_ref() { + shape.included::(self) + } else { + !shard.lt_events.is_empty() + } } } @@ -178,7 +183,6 @@ impl LtChip { let c = event.c.to_le_bytes(); cols.shard = F::from_canonical_u32(event.shard); - cols.channel = F::from_canonical_u8(event.channel); cols.a = Word(a.map(F::from_canonical_u8)); cols.b = Word(b.map(F::from_canonical_u8)); cols.c = Word(c.map(F::from_canonical_u8)); @@ -192,7 +196,6 @@ impl LtChip { // Send the masked interaction. blu.add_byte_lookup_event(ByteLookupEvent { shard: event.shard, - channel: event.channel, opcode: ByteOpcode::AND, a1: masked_b as u16, a2: 0, @@ -201,7 +204,6 @@ impl LtChip { }); blu.add_byte_lookup_event(ByteLookupEvent { shard: event.shard, - channel: event.channel, opcode: ByteOpcode::AND, a1: masked_c as u16, a2: 0, @@ -251,7 +253,6 @@ impl LtChip { blu.add_byte_lookup_event(ByteLookupEvent { shard: event.shard, - channel: event.channel, opcode: ByteOpcode::LTU, a1: cols.sltu.as_canonical_u32() as u16, a2: 0, @@ -315,8 +316,6 @@ where local.b_masked, local.b[3], AB::F::from_canonical_u8(0x7f), - local.shard, - local.channel, is_real.clone(), ); builder.send_byte( @@ -324,8 +323,6 @@ where local.c_masked, local.c[3], AB::F::from_canonical_u8(0x7f), - local.shard, - local.channel, is_real.clone(), ); @@ -436,8 +433,6 @@ where local.sltu, b_comp_byte, c_comp_byte, - local.shard, - local.channel, is_real.clone(), ); @@ -460,7 +455,6 @@ where local.b, local.c, local.shard, - local.channel, local.nonce, is_real, ); @@ -481,7 +475,7 @@ mod tests { #[test] fn generate_trace() { let mut shard = ExecutionRecord::default(); - shard.lt_events = vec![AluEvent::new(0, 1, 0, Opcode::SLT, 0, 3, 2)]; + shard.lt_events = vec![AluEvent::new(0, 0, Opcode::SLT, 0, 3, 2)]; let chip = LtChip::default(); let generate_trace = chip.generate_trace(&shard, &mut ExecutionRecord::default()); let trace: RowMajorMatrix = generate_trace; @@ -509,21 +503,21 @@ mod tests { const NEG_4: u32 = 0b11111111111111111111111111111100; shard.lt_events = vec![ // 0 == 3 < 2 - AluEvent::new(0, 0, 0, Opcode::SLT, 0, 3, 2), + AluEvent::new(0, 0, Opcode::SLT, 0, 3, 2), // 1 == 2 < 3 - AluEvent::new(0, 0, 1, Opcode::SLT, 1, 2, 3), + AluEvent::new(0, 1, Opcode::SLT, 1, 2, 3), // 0 == 5 < -3 - AluEvent::new(0, 0, 3, Opcode::SLT, 0, 5, NEG_3), + AluEvent::new(0, 3, Opcode::SLT, 0, 5, NEG_3), // 1 == -3 < 5 - AluEvent::new(0, 0, 2, Opcode::SLT, 1, NEG_3, 5), + AluEvent::new(0, 2, Opcode::SLT, 1, NEG_3, 5), // 0 == -3 < -4 - AluEvent::new(0, 0, 4, Opcode::SLT, 0, NEG_3, NEG_4), + AluEvent::new(0, 4, Opcode::SLT, 0, NEG_3, NEG_4), // 1 == -4 < -3 - AluEvent::new(0, 0, 4, Opcode::SLT, 1, NEG_4, NEG_3), + AluEvent::new(0, 4, Opcode::SLT, 1, NEG_4, NEG_3), // 0 == 3 < 3 - AluEvent::new(0, 0, 5, Opcode::SLT, 0, 3, 3), + AluEvent::new(0, 5, Opcode::SLT, 0, 3, 3), // 0 == -3 < -3 - AluEvent::new(0, 0, 5, Opcode::SLT, 0, NEG_3, NEG_3), + AluEvent::new(0, 5, Opcode::SLT, 0, NEG_3, NEG_3), ]; prove_babybear_template(&mut shard); @@ -536,17 +530,17 @@ mod tests { const LARGE: u32 = 0b11111111111111111111111111111101; shard.lt_events = vec![ // 0 == 3 < 2 - AluEvent::new(0, 0, 0, Opcode::SLTU, 0, 3, 2), + AluEvent::new(0, 0, Opcode::SLTU, 0, 3, 2), // 1 == 2 < 3 - AluEvent::new(0, 0, 1, Opcode::SLTU, 1, 2, 3), + AluEvent::new(0, 1, Opcode::SLTU, 1, 2, 3), // 0 == LARGE < 5 - AluEvent::new(0, 0, 2, Opcode::SLTU, 0, LARGE, 5), + AluEvent::new(0, 2, Opcode::SLTU, 0, LARGE, 5), // 1 == 5 < LARGE - AluEvent::new(0, 0, 3, Opcode::SLTU, 1, 5, LARGE), + AluEvent::new(0, 3, Opcode::SLTU, 1, 5, LARGE), // 0 == 0 < 0 - AluEvent::new(0, 0, 5, Opcode::SLTU, 0, 0, 0), + AluEvent::new(0, 5, Opcode::SLTU, 0, 0, 0), // 0 == LARGE < LARGE - AluEvent::new(0, 0, 5, Opcode::SLTU, 0, LARGE, LARGE), + AluEvent::new(0, 5, Opcode::SLTU, 0, LARGE, LARGE), ]; prove_babybear_template(&mut shard); diff --git a/crates/core/machine/src/alu/mul/mod.rs b/crates/core/machine/src/alu/mul/mod.rs index 98dbc7a80e..a9a32cd5c4 100644 --- a/crates/core/machine/src/alu/mul/mod.rs +++ b/crates/core/machine/src/alu/mul/mod.rs @@ -47,7 +47,7 @@ use sp1_derive::AlignedBorrow; use sp1_primitives::consts::WORD_SIZE; use sp1_stark::{air::MachineAir, MachineRecord, Word}; -use crate::{air::SP1CoreAirBuilder, alu::mul::utils::get_msb, utils::pad_to_power_of_two}; +use crate::{air::SP1CoreAirBuilder, alu::mul::utils::get_msb, utils::pad_rows_fixed}; /// The number of main trace columns for `MulChip`. pub const NUM_MUL_COLS: usize = size_of::>(); @@ -73,9 +73,6 @@ pub struct MulCols { /// The shard number, used for byte lookup table. pub shard: T, - /// The channel number, used for byte lookup table. - pub channel: T, - /// The nonce of the operation. pub nonce: T, @@ -194,7 +191,6 @@ impl MachineAir for MulChip { let most_significant_byte = word[WORD_SIZE - 1]; blu_events.push(ByteLookupEvent { shard: event.shard, - channel: event.channel, opcode: ByteOpcode::MSB, a1: get_msb(*word) as u16, a2: 0, @@ -238,20 +234,11 @@ impl MachineAir for MulChip { cols.is_mulhu = F::from_bool(event.opcode == Opcode::MULHU); cols.is_mulhsu = F::from_bool(event.opcode == Opcode::MULHSU); cols.shard = F::from_canonical_u32(event.shard); - cols.channel = F::from_canonical_u8(event.channel); // Range check. { - record.add_u16_range_checks( - event.shard, - event.channel, - &carry.map(|x| x as u16), - ); - record.add_u8_range_checks( - event.shard, - event.channel, - &product.map(|x| x as u8), - ); + record.add_u16_range_checks(event.shard, &carry.map(|x| x as u16)); + record.add_u8_range_checks(event.shard, &product.map(|x| x as u8)); } row }) @@ -267,13 +254,17 @@ impl MachineAir for MulChip { output.append(&mut row_and_record.1); } + // Pad the trace to a power of two depending on the proof shape in `input`. + pad_rows_fixed( + &mut rows, + || [F::zero(); NUM_MUL_COLS], + input.fixed_log2_rows::(self), + ); + // Convert the trace to a row major matrix. let mut trace = RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), NUM_MUL_COLS); - // Pad the trace to a power of two. - pad_to_power_of_two::(&mut trace.values); - // Write the nonces to the trace. for i in 0..trace.height() { let cols: &mut MulCols = @@ -285,7 +276,11 @@ impl MachineAir for MulChip { } fn included(&self, shard: &Self::Record) -> bool { - !shard.mul_events.is_empty() + if let Some(shape) = shard.shape.as_ref() { + shape.included::(self) + } else { + !shard.mul_events.is_empty() + } } } @@ -323,15 +318,7 @@ where for msb_pair in msb_pairs.iter() { let msb = msb_pair.0; let byte = msb_pair.1; - builder.send_byte( - opcode, - msb, - byte, - zero.clone(), - local.shard, - local.channel, - local.is_real, - ); + builder.send_byte(opcode, msb, byte, zero.clone(), local.is_real); } (local.b_msb, local.c_msb) }; @@ -443,9 +430,9 @@ where // Ensure that the carry is at most 2^16. This ensures that // product_before_carry_propagation - carry * base + last_carry never overflows or // underflows enough to "wrap" around to create a second solution. - builder.slice_range_check_u16(&local.carry, local.shard, local.channel, local.is_real); + builder.slice_range_check_u16(&local.carry, local.is_real); - builder.slice_range_check_u8(&local.product, local.shard, local.channel, local.is_real); + builder.slice_range_check_u8(&local.product, local.is_real); } // Receive the arguments. @@ -455,7 +442,6 @@ where local.b, local.c, local.shard, - local.channel, local.nonce, local.is_real, ); @@ -481,7 +467,6 @@ mod tests { let mut mul_events: Vec = Vec::new(); for _ in 0..10i32.pow(7) { mul_events.push(AluEvent::new( - 0, 0, 0, Opcode::MULHSU, @@ -557,12 +542,12 @@ mod tests { (Opcode::MULH, 0xffffffff, 0x00000001, 0xffffffff), ]; for t in mul_instructions.iter() { - mul_events.push(AluEvent::new(0, 0, 0, t.0, t.1, t.2, t.3)); + mul_events.push(AluEvent::new(0, 0, t.0, t.1, t.2, t.3)); } // Append more events until we have 1000 tests. for _ in 0..(1000 - mul_instructions.len()) { - mul_events.push(AluEvent::new(0, 0, 0, Opcode::MUL, 1, 1, 1)); + mul_events.push(AluEvent::new(0, 0, Opcode::MUL, 1, 1, 1)); } shard.mul_events = mul_events; diff --git a/crates/core/machine/src/alu/sll/mod.rs b/crates/core/machine/src/alu/sll/mod.rs index 50b3694073..1b2f6cbd3d 100644 --- a/crates/core/machine/src/alu/sll/mod.rs +++ b/crates/core/machine/src/alu/sll/mod.rs @@ -49,7 +49,7 @@ use sp1_derive::AlignedBorrow; use sp1_primitives::consts::WORD_SIZE; use sp1_stark::{air::MachineAir, Word}; -use crate::{air::SP1CoreAirBuilder, utils::pad_to_power_of_two}; +use crate::{air::SP1CoreAirBuilder, utils::pad_rows_fixed}; /// The number of main trace columns for `ShiftLeft`. pub const NUM_SHIFT_LEFT_COLS: usize = size_of::>(); @@ -68,9 +68,6 @@ pub struct ShiftLeftCols { /// The shard number, used for byte lookup table. pub shard: T, - /// The channel number, used for byte lookup table. - pub channel: T, - /// The nonce of the operation. pub nonce: T, @@ -129,15 +126,19 @@ impl MachineAir for ShiftLeft { rows.push(row); } + // Pad the trace to a power of two depending on the proof shape in `input`. + pad_rows_fixed( + &mut rows, + || [F::zero(); NUM_SHIFT_LEFT_COLS], + input.fixed_log2_rows::(self), + ); + // Convert the trace to a row major matrix. let mut trace = RowMajorMatrix::new( rows.into_iter().flatten().collect::>(), NUM_SHIFT_LEFT_COLS, ); - // Pad the trace to a power of two. - pad_to_power_of_two::(&mut trace.values); - // Create the template for the padded rows. These are fake rows that don't fail on some // sanity checks. let padded_row_template = { @@ -183,7 +184,11 @@ impl MachineAir for ShiftLeft { } fn included(&self, shard: &Self::Record) -> bool { - !shard.shift_left_events.is_empty() + if let Some(shape) = shard.shape.as_ref() { + shape.included::(self) + } else { + !shard.shift_left_events.is_empty() + } } } @@ -199,7 +204,6 @@ impl ShiftLeft { let b = event.b.to_le_bytes(); let c = event.c.to_le_bytes(); cols.shard = F::from_canonical_u32(event.shard); - cols.channel = F::from_canonical_u8(event.channel); cols.a = Word(a.map(F::from_canonical_u8)); cols.b = Word(b.map(F::from_canonical_u8)); cols.c = Word(c.map(F::from_canonical_u8)); @@ -238,8 +242,8 @@ impl ShiftLeft { // Range checks. { - blu.add_u8_range_checks(event.shard, event.channel, &bit_shift_result); - blu.add_u8_range_checks(event.shard, event.channel, &bit_shift_result_carry); + blu.add_u8_range_checks(event.shard, &bit_shift_result); + blu.add_u8_range_checks(event.shard, &bit_shift_result_carry); } // Sanity check. @@ -365,18 +369,8 @@ where // Range check. { - builder.slice_range_check_u8( - &local.bit_shift_result, - local.shard, - local.channel, - local.is_real, - ); - builder.slice_range_check_u8( - &local.bit_shift_result_carry, - local.shard, - local.channel, - local.is_real, - ); + builder.slice_range_check_u8(&local.bit_shift_result, local.is_real); + builder.slice_range_check_u8(&local.bit_shift_result_carry, local.is_real); } for shift in local.shift_by_n_bytes.iter() { @@ -397,7 +391,6 @@ where local.b, local.c, local.shard, - local.channel, local.nonce, local.is_real, ); @@ -418,7 +411,7 @@ mod tests { #[test] fn generate_trace() { let mut shard = ExecutionRecord::default(); - shard.shift_left_events = vec![AluEvent::new(0, 0, 0, Opcode::SLL, 16, 8, 1)]; + shard.shift_left_events = vec![AluEvent::new(0, 0, Opcode::SLL, 16, 8, 1)]; let chip = ShiftLeft::default(); let trace: RowMajorMatrix = chip.generate_trace(&shard, &mut ExecutionRecord::default()); @@ -453,7 +446,7 @@ mod tests { (Opcode::SLL, 0x00000000, 0x21212120, 0xffffffff), ]; for t in shift_instructions.iter() { - shift_events.push(AluEvent::new(0, 0, 0, t.0, t.1, t.2, t.3)); + shift_events.push(AluEvent::new(0, 0, t.0, t.1, t.2, t.3)); } // Append more events until we have 1000 tests. diff --git a/crates/core/machine/src/alu/sr/mod.rs b/crates/core/machine/src/alu/sr/mod.rs index 88ca2eacd4..84990a9c0b 100644 --- a/crates/core/machine/src/alu/sr/mod.rs +++ b/crates/core/machine/src/alu/sr/mod.rs @@ -65,7 +65,7 @@ use crate::{ air::SP1CoreAirBuilder, alu::sr::utils::{nb_bits_to_shift, nb_bytes_to_shift}, bytes::utils::shr_carry, - utils::pad_to_power_of_two, + utils::pad_rows_fixed, }; /// The number of main trace columns for `ShiftRightChip`. @@ -88,9 +88,6 @@ pub struct ShiftRightCols { /// The shard number, used for byte lookup table. pub shard: T, - /// The channel number, used for byte lookup table. - pub channel: T, - /// The nonce of the operation. pub nonce: T, @@ -163,15 +160,19 @@ impl MachineAir for ShiftRightChip { rows.push(row); } + // Pad the trace to a power of two depending on the proof shape in `input`. + pad_rows_fixed( + &mut rows, + || [F::zero(); NUM_SHIFT_RIGHT_COLS], + input.fixed_log2_rows::(self), + ); + // Convert the trace to a row major matrix. let mut trace = RowMajorMatrix::new( rows.into_iter().flatten().collect::>(), NUM_SHIFT_RIGHT_COLS, ); - // Pad the trace to a power of two. - pad_to_power_of_two::(&mut trace.values); - // Create the template for the padded rows. These are fake rows that don't fail on some // sanity checks. let padded_row_template = { @@ -219,7 +220,11 @@ impl MachineAir for ShiftRightChip { } fn included(&self, shard: &Self::Record) -> bool { - !shard.shift_right_events.is_empty() + if let Some(shape) = shard.shape.as_ref() { + shape.included::(self) + } else { + !shard.shift_right_events.is_empty() + } } } @@ -234,7 +239,6 @@ impl ShiftRightChip { // Initialize cols with basic operands and flags derived from the current event. { cols.shard = F::from_canonical_u32(event.shard); - cols.channel = F::from_canonical_u8(event.channel); cols.a = Word::from(event.a); cols.b = Word::from(event.b); cols.c = Word::from(event.c); @@ -254,7 +258,6 @@ impl ShiftRightChip { let most_significant_byte = event.b.to_le_bytes()[WORD_SIZE - 1]; blu.add_byte_lookup_events(vec![ByteLookupEvent { shard: event.shard, - channel: event.channel, opcode: ByteOpcode::MSB, a1: ((most_significant_byte >> 7) & 1) as u16, a2: 0, @@ -304,7 +307,6 @@ impl ShiftRightChip { let byte_event = ByteLookupEvent { shard: event.shard, - channel: event.channel, opcode: ByteOpcode::ShrCarry, a1: shift as u16, a2: carry, @@ -326,10 +328,10 @@ impl ShiftRightChip { debug_assert_eq!(cols.a[i], cols.bit_shift_result[i].clone()); } // Range checks. - blu.add_u8_range_checks(event.shard, event.channel, &byte_shift_result); - blu.add_u8_range_checks(event.shard, event.channel, &bit_shift_result); - blu.add_u8_range_checks(event.shard, event.channel, &shr_carry_output_carry); - blu.add_u8_range_checks(event.shard, event.channel, &shr_carry_output_shifted_byte); + blu.add_u8_range_checks(event.shard, &byte_shift_result); + blu.add_u8_range_checks(event.shard, &bit_shift_result); + blu.add_u8_range_checks(event.shard, &shr_carry_output_carry); + blu.add_u8_range_checks(event.shard, &shr_carry_output_shifted_byte); } } } @@ -362,15 +364,7 @@ where let byte = local.b[WORD_SIZE - 1]; let opcode = AB::F::from_canonical_u32(ByteOpcode::MSB as u32); let msb = local.b_msb; - builder.send_byte( - opcode, - msb, - byte, - zero.clone(), - local.shard, - local.channel, - local.is_real, - ); + builder.send_byte(opcode, msb, byte, zero.clone(), local.is_real); } // Calculate the number of bits and bytes to shift by from c. @@ -469,8 +463,6 @@ where local.shr_carry_output_carry[i], local.byte_shift_result[i], num_bits_to_shift.clone(), - local.shard, - local.channel, local.is_real, ); } @@ -520,7 +512,7 @@ where ]; for long_word in long_words.iter() { - builder.slice_range_check_u8(long_word, local.shard, local.channel, local.is_real); + builder.slice_range_check_u8(long_word, local.is_real); } } @@ -540,7 +532,6 @@ where local.b, local.c, local.shard, - local.channel, local.nonce, local.is_real, ); @@ -560,7 +551,7 @@ mod tests { #[test] fn generate_trace() { let mut shard = ExecutionRecord::default(); - shard.shift_right_events = vec![AluEvent::new(0, 0, 0, Opcode::SRL, 6, 12, 1)]; + shard.shift_right_events = vec![AluEvent::new(0, 0, Opcode::SRL, 6, 12, 1)]; let chip = ShiftRightChip::default(); let trace: RowMajorMatrix = chip.generate_trace(&shard, &mut ExecutionRecord::default()); @@ -611,7 +602,7 @@ mod tests { ]; let mut shift_events: Vec = Vec::new(); for t in shifts.iter() { - shift_events.push(AluEvent::new(0, 0, 0, t.0, t.1, t.2, t.3)); + shift_events.push(AluEvent::new(0, 0, t.0, t.1, t.2, t.3)); } let mut shard = ExecutionRecord::default(); shard.shift_right_events = shift_events; diff --git a/crates/core/machine/src/bytes/air.rs b/crates/core/machine/src/bytes/air.rs index 210783a89e..3aa2bbe1fa 100644 --- a/crates/core/machine/src/bytes/air.rs +++ b/crates/core/machine/src/bytes/air.rs @@ -8,7 +8,7 @@ use sp1_stark::air::SP1AirBuilder; use super::{ columns::{ByteMultCols, BytePreprocessedCols, NUM_BYTE_MULT_COLS}, - ByteChip, NUM_BYTE_LOOKUP_CHANNELS, + ByteChip, }; impl BaseAir for ByteChip { @@ -28,66 +28,44 @@ impl Air for ByteChip { let local: &BytePreprocessedCols = (*prep).borrow(); // Send all the lookups for each operation. - for channel in 0..NUM_BYTE_LOOKUP_CHANNELS { - let channel_f = AB::F::from_canonical_u8(channel); - let channel = channel as usize; - for (i, opcode) in ByteOpcode::all().iter().enumerate() { - let field_op = opcode.as_field::(); - let mult = local_mult.mult_channels[channel].multiplicities[i]; - let shard = local_mult.shard; - match opcode { - ByteOpcode::AND => builder.receive_byte( - field_op, local.and, local.b, local.c, shard, channel_f, mult, - ), - ByteOpcode::OR => builder - .receive_byte(field_op, local.or, local.b, local.c, shard, channel_f, mult), - ByteOpcode::XOR => builder.receive_byte( - field_op, local.xor, local.b, local.c, shard, channel_f, mult, - ), - ByteOpcode::SLL => builder.receive_byte( - field_op, local.sll, local.b, local.c, shard, channel_f, mult, - ), - ByteOpcode::U8Range => builder.receive_byte( - field_op, - AB::F::zero(), - local.b, - local.c, - shard, - channel_f, - mult, - ), - ByteOpcode::ShrCarry => builder.receive_byte_pair( - field_op, - local.shr, - local.shr_carry, - local.b, - local.c, - shard, - channel_f, - mult, - ), - ByteOpcode::LTU => builder.receive_byte( - field_op, local.ltu, local.b, local.c, shard, channel_f, mult, - ), - ByteOpcode::MSB => builder.receive_byte( - field_op, - local.msb, - local.b, - AB::F::zero(), - shard, - channel_f, - mult, - ), - ByteOpcode::U16Range => builder.receive_byte( - field_op, - local.value_u16, - AB::F::zero(), - AB::F::zero(), - shard, - channel_f, - mult, - ), + for (i, opcode) in ByteOpcode::all().iter().enumerate() { + let field_op = opcode.as_field::(); + let mult = local_mult.multiplicities[i]; + match opcode { + ByteOpcode::AND => { + builder.receive_byte(field_op, local.and, local.b, local.c, mult) } + ByteOpcode::OR => builder.receive_byte(field_op, local.or, local.b, local.c, mult), + ByteOpcode::XOR => { + builder.receive_byte(field_op, local.xor, local.b, local.c, mult) + } + ByteOpcode::SLL => { + builder.receive_byte(field_op, local.sll, local.b, local.c, mult) + } + ByteOpcode::U8Range => { + builder.receive_byte(field_op, AB::F::zero(), local.b, local.c, mult) + } + ByteOpcode::ShrCarry => builder.receive_byte_pair( + field_op, + local.shr, + local.shr_carry, + local.b, + local.c, + mult, + ), + ByteOpcode::LTU => { + builder.receive_byte(field_op, local.ltu, local.b, local.c, mult) + } + ByteOpcode::MSB => { + builder.receive_byte(field_op, local.msb, local.b, AB::F::zero(), mult) + } + ByteOpcode::U16Range => builder.receive_byte( + field_op, + local.value_u16, + AB::F::zero(), + AB::F::zero(), + mult, + ), } } } diff --git a/crates/core/machine/src/bytes/columns.rs b/crates/core/machine/src/bytes/columns.rs index 7134331f63..49e3edab5b 100644 --- a/crates/core/machine/src/bytes/columns.rs +++ b/crates/core/machine/src/bytes/columns.rs @@ -1,7 +1,7 @@ use sp1_derive::AlignedBorrow; use std::mem::size_of; -use super::{NUM_BYTE_LOOKUP_CHANNELS, NUM_BYTE_OPS}; +use super::NUM_BYTE_OPS; /// The number of main trace columns for `ByteChip`. pub const NUM_BYTE_PREPROCESSED_COLS: usize = size_of::>(); @@ -44,22 +44,11 @@ pub struct BytePreprocessedCols { pub value_u16: T, } -/// For each byte operation in the preprocessed table, a corresponding ByteMultCols row tracks the -/// number of times the operation is used. -#[derive(Debug, Clone, Copy, AlignedBorrow)] -#[repr(C)] -pub struct MultiplicitiesCols { - pub multiplicities: [T; NUM_BYTE_OPS], -} - /// For each byte operation in the preprocessed table, a corresponding ByteMultCols row tracks the /// number of times the operation is used. #[derive(Debug, Clone, Copy, AlignedBorrow)] #[repr(C)] pub struct ByteMultCols { - /// Shard number is tracked so that the multiplicities do not overflow. - pub shard: T, - /// The multiplicites of each byte operation. - pub mult_channels: [MultiplicitiesCols; NUM_BYTE_LOOKUP_CHANNELS as usize], + pub multiplicities: [T; NUM_BYTE_OPS], } diff --git a/crates/core/machine/src/bytes/mod.rs b/crates/core/machine/src/bytes/mod.rs index 91e72bf7f2..3d3798f404 100644 --- a/crates/core/machine/src/bytes/mod.rs +++ b/crates/core/machine/src/bytes/mod.rs @@ -18,14 +18,11 @@ use self::{ columns::{BytePreprocessedCols, NUM_BYTE_PREPROCESSED_COLS}, utils::shr_carry, }; -use crate::bytes::trace::NUM_ROWS; +use crate::{bytes::trace::NUM_ROWS, utils::zeroed_f_vec}; /// The number of different byte operations. pub const NUM_BYTE_OPS: usize = 9; -/// The number of different byte lookup channels. -pub const NUM_BYTE_LOOKUP_CHANNELS: u8 = 16; - /// A chip for computing byte operations. /// /// The chip contains a preprocessed table of all possible byte operations. Other chips can then @@ -40,7 +37,7 @@ impl ByteChip { pub fn trace() -> RowMajorMatrix { // The trace containing all values, with all multiplicities set to zero. let mut initial_trace = RowMajorMatrix::new( - vec![F::zero(); NUM_ROWS * NUM_BYTE_PREPROCESSED_COLS], + zeroed_f_vec(NUM_ROWS * NUM_BYTE_PREPROCESSED_COLS), NUM_BYTE_PREPROCESSED_COLS, ); @@ -59,55 +56,51 @@ impl ByteChip { // Iterate over all operations for results and updating the table map. let shard = 0; - for channel in 0..NUM_BYTE_LOOKUP_CHANNELS { - for opcode in opcodes.iter() { - match opcode { - ByteOpcode::AND => { - let and = b & c; - col.and = F::from_canonical_u8(and); - ByteLookupEvent::new(shard, channel, *opcode, and as u16, 0, b, c) - } - ByteOpcode::OR => { - let or = b | c; - col.or = F::from_canonical_u8(or); - ByteLookupEvent::new(shard, channel, *opcode, or as u16, 0, b, c) - } - ByteOpcode::XOR => { - let xor = b ^ c; - col.xor = F::from_canonical_u8(xor); - ByteLookupEvent::new(shard, channel, *opcode, xor as u16, 0, b, c) - } - ByteOpcode::SLL => { - let sll = b << (c & 7); - col.sll = F::from_canonical_u8(sll); - ByteLookupEvent::new(shard, channel, *opcode, sll as u16, 0, b, c) - } - ByteOpcode::U8Range => { - ByteLookupEvent::new(shard, channel, *opcode, 0, 0, b, c) - } - ByteOpcode::ShrCarry => { - let (res, carry) = shr_carry(b, c); - col.shr = F::from_canonical_u8(res); - col.shr_carry = F::from_canonical_u8(carry); - ByteLookupEvent::new(shard, channel, *opcode, res as u16, carry, b, c) - } - ByteOpcode::LTU => { - let ltu = b < c; - col.ltu = F::from_bool(ltu); - ByteLookupEvent::new(shard, channel, *opcode, ltu as u16, 0, b, c) - } - ByteOpcode::MSB => { - let msb = (b & 0b1000_0000) != 0; - col.msb = F::from_bool(msb); - ByteLookupEvent::new(shard, channel, *opcode, msb as u16, 0, b, 0) - } - ByteOpcode::U16Range => { - let v = ((b as u32) << 8) + c as u32; - col.value_u16 = F::from_canonical_u32(v); - ByteLookupEvent::new(shard, channel, *opcode, v as u16, 0, 0, 0) - } - }; - } + for opcode in opcodes.iter() { + match opcode { + ByteOpcode::AND => { + let and = b & c; + col.and = F::from_canonical_u8(and); + ByteLookupEvent::new(shard, *opcode, and as u16, 0, b, c) + } + ByteOpcode::OR => { + let or = b | c; + col.or = F::from_canonical_u8(or); + ByteLookupEvent::new(shard, *opcode, or as u16, 0, b, c) + } + ByteOpcode::XOR => { + let xor = b ^ c; + col.xor = F::from_canonical_u8(xor); + ByteLookupEvent::new(shard, *opcode, xor as u16, 0, b, c) + } + ByteOpcode::SLL => { + let sll = b << (c & 7); + col.sll = F::from_canonical_u8(sll); + ByteLookupEvent::new(shard, *opcode, sll as u16, 0, b, c) + } + ByteOpcode::U8Range => ByteLookupEvent::new(shard, *opcode, 0, 0, b, c), + ByteOpcode::ShrCarry => { + let (res, carry) = shr_carry(b, c); + col.shr = F::from_canonical_u8(res); + col.shr_carry = F::from_canonical_u8(carry); + ByteLookupEvent::new(shard, *opcode, res as u16, carry, b, c) + } + ByteOpcode::LTU => { + let ltu = b < c; + col.ltu = F::from_bool(ltu); + ByteLookupEvent::new(shard, *opcode, ltu as u16, 0, b, c) + } + ByteOpcode::MSB => { + let msb = (b & 0b1000_0000) != 0; + col.msb = F::from_bool(msb); + ByteLookupEvent::new(shard, *opcode, msb as u16, 0, b, 0) + } + ByteOpcode::U16Range => { + let v = ((b as u32) << 8) + c as u32; + col.value_u16 = F::from_canonical_u32(v); + ByteLookupEvent::new(shard, *opcode, v as u16, 0, 0, 0) + } + }; } } diff --git a/crates/core/machine/src/bytes/trace.rs b/crates/core/machine/src/bytes/trace.rs index 0ea612ca44..6cfb81bbfc 100644 --- a/crates/core/machine/src/bytes/trace.rs +++ b/crates/core/machine/src/bytes/trace.rs @@ -1,11 +1,12 @@ use std::borrow::BorrowMut; -use hashbrown::HashMap; use p3_field::Field; use p3_matrix::dense::RowMajorMatrix; use sp1_core_executor::{ByteOpcode, ExecutionRecord, Program}; use sp1_stark::air::MachineAir; +use crate::utils::zeroed_f_vec; + use super::{ columns::{ByteMultCols, NUM_BYTE_MULT_COLS, NUM_BYTE_PREPROCESSED_COLS}, ByteChip, @@ -41,21 +42,20 @@ impl MachineAir for ByteChip { _output: &mut ExecutionRecord, ) -> RowMajorMatrix { let mut trace = - RowMajorMatrix::new(vec![F::zero(); NUM_BYTE_MULT_COLS * NUM_ROWS], NUM_BYTE_MULT_COLS); + RowMajorMatrix::new(zeroed_f_vec(NUM_BYTE_MULT_COLS * NUM_ROWS), NUM_BYTE_MULT_COLS); - let shard = input.public_values.execution_shard; - for (lookup, mult) in input.byte_lookups.get(&shard).unwrap_or(&HashMap::new()).iter() { - let row = if lookup.opcode != ByteOpcode::U16Range { - (((lookup.b as u16) << 8) + lookup.c as u16) as usize - } else { - lookup.a1 as usize - }; - let index = lookup.opcode as usize; - let channel = lookup.channel as usize; + for (_, blu) in input.byte_lookups.iter() { + for (lookup, mult) in blu.iter() { + let row = if lookup.opcode != ByteOpcode::U16Range { + (((lookup.b as u16) << 8) + lookup.c as u16) as usize + } else { + lookup.a1 as usize + }; + let index = lookup.opcode as usize; - let cols: &mut ByteMultCols = trace.row_mut(row).borrow_mut(); - cols.mult_channels[channel].multiplicities[index] += F::from_canonical_usize(*mult); - cols.shard = F::from_canonical_u32(shard); + let cols: &mut ByteMultCols = trace.row_mut(row).borrow_mut(); + cols.multiplicities[index] += F::from_canonical_usize(*mult); + } } trace diff --git a/crates/core/machine/src/cpu/air/branch.rs b/crates/core/machine/src/cpu/air/branch.rs index 1377d1a14e..d8c615682f 100644 --- a/crates/core/machine/src/cpu/air/branch.rs +++ b/crates/core/machine/src/cpu/air/branch.rs @@ -88,7 +88,6 @@ impl CpuChip { branch_cols.pc, local.op_c_val(), local.shard, - local.channel, branch_cols.next_pc_nonce, local.branching, ); @@ -186,7 +185,6 @@ impl CpuChip { local.op_a_val(), local.op_b_val(), local.shard, - local.channel, branch_cols.a_lt_b_nonce, is_branch_instruction.clone(), ); @@ -199,7 +197,6 @@ impl CpuChip { local.op_b_val(), local.op_a_val(), local.shard, - local.channel, branch_cols.a_gt_b_nonce, is_branch_instruction.clone(), ); diff --git a/crates/core/machine/src/cpu/air/ecall.rs b/crates/core/machine/src/cpu/air/ecall.rs index 8a88453a7a..9c05105bcc 100644 --- a/crates/core/machine/src/cpu/air/ecall.rs +++ b/crates/core/machine/src/cpu/air/ecall.rs @@ -2,7 +2,10 @@ use p3_air::AirBuilder; use p3_field::AbstractField; use sp1_core_executor::syscalls::SyscallCode; use sp1_stark::{ - air::{BaseAirBuilder, PublicValues, SP1AirBuilder, POSEIDON_NUM_WORDS, PV_DIGEST_NUM_WORDS}, + air::{ + BaseAirBuilder, InteractionScope, PublicValues, SP1AirBuilder, POSEIDON_NUM_WORDS, + PV_DIGEST_NUM_WORDS, + }, Word, }; @@ -50,13 +53,13 @@ impl CpuChip { builder.send_syscall( local.shard, - local.channel, local.clk, ecall_cols.syscall_nonce, syscall_id, local.op_b_val().reduce::(), local.op_c_val().reduce::(), local.ecall_mul_send_to_table, + InteractionScope::Local, ); // Compute whether this ecall is ENTER_UNCONSTRAINED. diff --git a/crates/core/machine/src/cpu/air/memory.rs b/crates/core/machine/src/cpu/air/memory.rs index 4ab7e1036e..efceb98f97 100644 --- a/crates/core/machine/src/cpu/air/memory.rs +++ b/crates/core/machine/src/cpu/air/memory.rs @@ -72,7 +72,6 @@ impl CpuChip { local.op_b_val(), local.op_c_val(), local.shard, - local.channel, memory_columns.addr_word_nonce, is_memory_instruction.clone(), ); @@ -86,12 +85,7 @@ impl CpuChip { ); // Check that each addr_word element is a byte. - builder.slice_range_check_u8( - &memory_columns.addr_word.0, - local.shard, - local.channel, - is_memory_instruction.clone(), - ); + builder.slice_range_check_u8(&memory_columns.addr_word.0, is_memory_instruction.clone()); // Evaluate the addr_offset column and offset flags. self.eval_offset_value_flags(builder, memory_columns, local); @@ -129,7 +123,6 @@ impl CpuChip { // value into the memory columns. builder.eval_memory_access( local.shard, - local.channel, local.clk + AB::F::from_canonical_u32(MemoryAccessPosition::Memory as u32), memory_columns.addr_aligned, &memory_columns.memory_access, @@ -181,7 +174,6 @@ impl CpuChip { local.unsigned_mem_val, signed_value, local.shard, - local.channel, local.unsigned_mem_val_nonce, local.mem_value_is_neg_not_x0, ); diff --git a/crates/core/machine/src/cpu/air/mod.rs b/crates/core/machine/src/cpu/air/mod.rs index 880e76e36c..bc925b30ac 100644 --- a/crates/core/machine/src/cpu/air/mod.rs +++ b/crates/core/machine/src/cpu/air/mod.rs @@ -23,7 +23,7 @@ use crate::{ }; use sp1_core_executor::Opcode; -use super::columns::{eval_channel_selectors, OPCODE_SELECTORS_COL_MAP}; +use super::columns::OPCODE_SELECTORS_COL_MAP; impl Air for CpuChip where @@ -63,16 +63,6 @@ where self.eval_memory_load::(builder, local); self.eval_memory_store::(builder, local); - // Channel constraints. - eval_channel_selectors( - builder, - &local.channel_selectors, - &next.channel_selectors, - local.channel, - local.is_real, - next.is_real, - ); - // ALU instructions. builder.send_alu( local.instruction.opcode, @@ -80,7 +70,6 @@ where local.op_b_val(), local.op_c_val(), local.shard, - local.channel, local.nonce, is_alu_instruction, ); @@ -207,7 +196,6 @@ impl CpuChip { jump_columns.pc, local.op_b_val(), local.shard, - local.channel, jump_columns.jal_nonce, local.selectors.is_jal, ); @@ -219,7 +207,6 @@ impl CpuChip { local.op_b_val(), local.op_c_val(), local.shard, - local.channel, jump_columns.jalr_nonce, local.selectors.is_jalr, ); @@ -248,7 +235,6 @@ impl CpuChip { auipc_columns.pc, local.op_b_val(), local.shard, - local.channel, auipc_columns.auipc_nonce, local.selectors.is_auipc, ); @@ -276,8 +262,6 @@ impl CpuChip { local.shard, AB::Expr::zero(), AB::Expr::zero(), - local.shard, - local.channel, local.is_real, ); @@ -300,8 +284,6 @@ impl CpuChip { local.clk, local.clk_16bit_limb, local.clk_8bit_limb, - local.shard, - local.channel, local.is_real, ); } diff --git a/crates/core/machine/src/cpu/air/register.rs b/crates/core/machine/src/cpu/air/register.rs index 8ffa86248c..7be28c6099 100644 --- a/crates/core/machine/src/cpu/air/register.rs +++ b/crates/core/machine/src/cpu/air/register.rs @@ -27,7 +27,6 @@ impl CpuChip { // If they are not immediates, read `b` and `c` from memory. builder.eval_memory_access( local.shard, - local.channel, local.clk + AB::F::from_canonical_u32(MemoryAccessPosition::B as u32), local.instruction.op_b[0], &local.op_b_access, @@ -36,7 +35,6 @@ impl CpuChip { builder.eval_memory_access( local.shard, - local.channel, local.clk + AB::F::from_canonical_u32(MemoryAccessPosition::C as u32), local.instruction.op_c[0], &local.op_c_access, @@ -50,7 +48,6 @@ impl CpuChip { // we are performing a branch or a store. builder.eval_memory_access( local.shard, - local.channel, local.clk + AB::F::from_canonical_u32(MemoryAccessPosition::A as u32), local.instruction.op_a[0], &local.op_a_access, @@ -59,12 +56,7 @@ impl CpuChip { // Always range check the word value in `op_a`, as JUMP instructions may witness // an invalid word and write it to memory. - builder.slice_range_check_u8( - &local.op_a_access.access.value.0, - local.shard, - local.channel, - local.is_real, - ); + builder.slice_range_check_u8(&local.op_a_access.access.value.0, local.is_real); // If we are performing a branch or a store, then the value of `a` is the previous value. builder diff --git a/crates/core/machine/src/cpu/columns/channel.rs b/crates/core/machine/src/cpu/columns/channel.rs deleted file mode 100644 index 1f1d6ec535..0000000000 --- a/crates/core/machine/src/cpu/columns/channel.rs +++ /dev/null @@ -1,59 +0,0 @@ -use p3_air::AirBuilder; -use p3_field::{AbstractField, Field}; -use sp1_derive::AlignedBorrow; -use sp1_stark::air::SP1AirBuilder; - -use crate::bytes::NUM_BYTE_LOOKUP_CHANNELS; - -#[derive(AlignedBorrow, Default, Debug, Clone, Copy)] -#[repr(C)] -pub struct ChannelSelectorCols { - pub channel_selectors: [T; NUM_BYTE_LOOKUP_CHANNELS as usize], -} - -impl ChannelSelectorCols { - #[inline(always)] - pub fn populate(&mut self, channel: u8) { - self.channel_selectors = [F::zero(); NUM_BYTE_LOOKUP_CHANNELS as usize]; - self.channel_selectors[channel as usize] = F::one(); - } -} - -pub fn eval_channel_selectors( - builder: &mut AB, - local: &ChannelSelectorCols, - next: &ChannelSelectorCols, - channel: impl Into + Clone, - local_is_real: impl Into + Clone, - next_is_real: impl Into + Clone, -) { - // Constrain: - // - the value of the channel is given by the channel selectors. - // - all selectors are boolean and disjoint. - let mut sum = AB::Expr::zero(); - let mut reconstruct_channel = AB::Expr::zero(); - for (i, selector) in local.channel_selectors.into_iter().enumerate() { - // Constrain that the selector is boolean. - builder.assert_bool(selector); - // Accumulate the sum of the selectors. - sum += selector.into(); - // Accumulate the reconstructed channel. - reconstruct_channel += selector.into() * AB::Expr::from_canonical_u32(i as u32); - } - // Assert that the reconstructed channel is the same as the channel. - builder.assert_eq(reconstruct_channel, channel.clone()); - // For disjointness, assert the sum of the selectors is 1. - builder.when(local_is_real.clone()).assert_eq(sum, AB::Expr::one()); - - // Constrain the first row by asserting that the first selector on the first line is true. - builder.when_first_row().assert_one(local.channel_selectors[0]); - - // Constrain the transition by asserting that the selectors satisfy the recursion relation: - // selectors_next[(i + 1) % NUM_BYTE_LOOKUP_CHANNELS] = selectors[i] - for i in 0..NUM_BYTE_LOOKUP_CHANNELS as usize { - builder.when_transition().when(next_is_real.clone()).assert_eq( - local.channel_selectors[i], - next.channel_selectors[(i + 1) % NUM_BYTE_LOOKUP_CHANNELS as usize], - ); - } -} diff --git a/crates/core/machine/src/cpu/columns/mod.rs b/crates/core/machine/src/cpu/columns/mod.rs index 3f382677ee..7a32b03db5 100644 --- a/crates/core/machine/src/cpu/columns/mod.rs +++ b/crates/core/machine/src/cpu/columns/mod.rs @@ -1,6 +1,5 @@ mod auipc; mod branch; -mod channel; mod ecall; mod instruction; mod jump; @@ -10,7 +9,6 @@ mod opcode_specific; pub use auipc::*; pub use branch::*; -pub use channel::*; pub use ecall::*; pub use instruction::*; pub use jump::*; @@ -35,8 +33,6 @@ pub const CPU_COL_MAP: CpuCols = make_col_map(); pub struct CpuCols { /// The current shard. pub shard: T, - /// The channel value, used for byte lookup multiplicity. - pub channel: T, pub nonce: T, @@ -56,9 +52,6 @@ pub struct CpuCols { /// Columns related to the instruction. pub instruction: InstructionCols, - /// Columns related to the byte lookup channel. - pub channel_selectors: ChannelSelectorCols, - /// Selectors for the opcode. pub selectors: OpcodeSelectorCols, diff --git a/crates/core/machine/src/cpu/trace.rs b/crates/core/machine/src/cpu/trace.rs index 5189d77747..01b1489127 100644 --- a/crates/core/machine/src/cpu/trace.rs +++ b/crates/core/machine/src/cpu/trace.rs @@ -1,13 +1,10 @@ use hashbrown::HashMap; use itertools::Itertools; use sp1_core_executor::{ - events::{ - create_alu_lookups, AluEvent, ByteLookupEvent, ByteRecord, CpuEvent, LookupId, - MemoryRecordEnum, - }, + events::{ByteLookupEvent, ByteRecord, CpuEvent, LookupId, MemoryRecordEnum}, syscalls::SyscallCode, ByteOpcode::{self, U16Range}, - ExecutionRecord, Opcode, Program, + CoreShape, ExecutionRecord, Opcode, Program, Register::X0, }; use sp1_primitives::consts::WORD_SIZE; @@ -25,7 +22,7 @@ use super::{ columns::{CPU_COL_MAP, NUM_CPU_COLS}, CpuChip, }; -use crate::{cpu::columns::CpuCols, memory::MemoryCols}; +use crate::{cpu::columns::CpuCols, memory::MemoryCols, utils::zeroed_f_vec}; impl MachineAir for CpuChip { type Record = ExecutionRecord; @@ -41,7 +38,7 @@ impl MachineAir for CpuChip { input: &ExecutionRecord, _: &mut ExecutionRecord, ) -> RowMajorMatrix { - let mut values = vec![F::zero(); input.cpu_events.len() * NUM_CPU_COLS]; + let mut values = zeroed_f_vec(input.cpu_events.len() * NUM_CPU_COLS); let chunk_size = std::cmp::max(input.cpu_events.len() / num_cpus::get(), 1); values.chunks_mut(chunk_size * NUM_CPU_COLS).enumerate().par_bridge().for_each( @@ -64,7 +61,7 @@ impl MachineAir for CpuChip { let mut trace = RowMajorMatrix::new(values, NUM_CPU_COLS); // Pad the trace to a power of two. - Self::pad_to_power_of_two::(&mut trace.values); + Self::pad_to_power_of_two::(self, &input.shape, &mut trace.values); trace } @@ -74,34 +71,30 @@ impl MachineAir for CpuChip { // Generate the trace rows for each event. let chunk_size = std::cmp::max(input.cpu_events.len() / num_cpus::get(), 1); - let (alu_events, blu_events): (Vec<_>, Vec<_>) = input + let blu_events: Vec<_> = input .cpu_events .par_chunks(chunk_size) .map(|ops: &[CpuEvent]| { - let mut alu = HashMap::new(); // The blu map stores shard -> map(byte lookup event -> multiplicity). let mut blu: HashMap> = HashMap::new(); ops.iter().for_each(|op| { let mut row = [F::zero(); NUM_CPU_COLS]; let cols: &mut CpuCols = row.as_mut_slice().borrow_mut(); - let alu_events = self.event_to_row::(op, &HashMap::new(), cols, &mut blu); - alu_events.into_iter().for_each(|(key, value)| { - alu.entry(key).or_insert(Vec::default()).extend(value); - }); + self.event_to_row::(op, &HashMap::new(), cols, &mut blu); }); - (alu, blu) + blu }) - .unzip(); - - for alu_events_chunk in alu_events.into_iter() { - output.add_alu_events(alu_events_chunk); - } + .collect::>(); output.add_sharded_byte_lookup_events(blu_events.iter().collect_vec()); } - fn included(&self, input: &Self::Record) -> bool { - !input.cpu_events.is_empty() + fn included(&self, shard: &Self::Record) -> bool { + if let Some(shape) = shard.shape.as_ref() { + shape.included::(self) + } else { + shard.contains_cpu() + } } } @@ -113,9 +106,7 @@ impl CpuChip { nonce_lookup: &HashMap, cols: &mut CpuCols, blu_events: &mut impl ByteRecord, - ) -> HashMap> { - let mut new_alu_events = HashMap::new(); - + ) { // Populate shard and clk columns. self.populate_shard_clk(cols, event, blu_events); @@ -135,13 +126,13 @@ impl CpuChip { // Populate memory accesses for a, b, and c. if let Some(record) = event.a_record { - cols.op_a_access.populate(event.channel, record, blu_events); + cols.op_a_access.populate(record, blu_events); } if let Some(MemoryRecordEnum::Read(record)) = event.b_record { - cols.op_b_access.populate(event.channel, record, blu_events); + cols.op_b_access.populate(record, blu_events); } if let Some(MemoryRecordEnum::Read(record)) = event.c_record { - cols.op_c_access.populate(event.channel, record, blu_events); + cols.op_c_access.populate(record, blu_events); } // Populate range checks for a. @@ -155,7 +146,6 @@ impl CpuChip { .collect::>(); blu_events.add_byte_lookup_event(ByteLookupEvent { shard: event.shard, - channel: event.channel, opcode: ByteOpcode::U8Range, a1: 0, a2: 0, @@ -164,7 +154,6 @@ impl CpuChip { }); blu_events.add_byte_lookup_event(ByteLookupEvent { shard: event.shard, - channel: event.channel, opcode: ByteOpcode::U8Range, a1: 0, a2: 0, @@ -176,14 +165,14 @@ impl CpuChip { assert_eq!(event.memory_record.is_some(), event.memory.is_some()); let memory_columns = cols.opcode_specific_columns.memory_mut(); if let Some(record) = event.memory_record { - memory_columns.memory_access.populate(event.channel, record, blu_events) + memory_columns.memory_access.populate(record, blu_events) } // Populate memory, branch, jump, and auipc specific fields. - self.populate_memory(cols, event, &mut new_alu_events, blu_events, nonce_lookup); - self.populate_branch(cols, event, &mut new_alu_events, nonce_lookup); - self.populate_jump(cols, event, &mut new_alu_events, nonce_lookup); - self.populate_auipc(cols, event, &mut new_alu_events, nonce_lookup); + self.populate_memory(cols, event, blu_events, nonce_lookup); + self.populate_branch(cols, event, nonce_lookup); + self.populate_jump(cols, event, nonce_lookup); + self.populate_auipc(cols, event, nonce_lookup); let is_halt = self.populate_ecall(cols, event, nonce_lookup); cols.is_sequential_instr = F::from_bool( @@ -194,11 +183,9 @@ impl CpuChip { // Assert that the instruction is not a no-op. cols.is_real = F::one(); - - new_alu_events } - /// Populates the shard, channel, and clk related rows. + /// Populates the shard and clk related rows. fn populate_shard_clk( &self, cols: &mut CpuCols, @@ -206,7 +193,6 @@ impl CpuChip { blu_events: &mut impl ByteRecord, ) { cols.shard = F::from_canonical_u32(event.shard); - cols.channel = F::from_canonical_u8(event.channel); cols.clk = F::from_canonical_u32(event.clk); let clk_16bit_limb = (event.clk & 0xffff) as u16; @@ -214,11 +200,8 @@ impl CpuChip { cols.clk_16bit_limb = F::from_canonical_u16(clk_16bit_limb); cols.clk_8bit_limb = F::from_canonical_u8(clk_8bit_limb); - cols.channel_selectors.populate(event.channel); - blu_events.add_byte_lookup_event(ByteLookupEvent::new( event.shard, - event.channel, U16Range, event.shard as u16, 0, @@ -227,7 +210,6 @@ impl CpuChip { )); blu_events.add_byte_lookup_event(ByteLookupEvent::new( event.shard, - event.channel, U16Range, clk_16bit_limb, 0, @@ -236,7 +218,6 @@ impl CpuChip { )); blu_events.add_byte_lookup_event(ByteLookupEvent::new( event.shard, - event.channel, ByteOpcode::U8Range, 0, 0, @@ -250,7 +231,6 @@ impl CpuChip { &self, cols: &mut CpuCols, event: &CpuEvent, - new_alu_events: &mut HashMap>, blu_events: &mut impl ByteRecord, nonce_lookup: &HashMap, ) { @@ -281,23 +261,6 @@ impl CpuChip { let aligned_addr_ls_byte = (aligned_addr & 0x000000FF) as u8; let bits: [bool; 8] = array::from_fn(|i| aligned_addr_ls_byte & (1 << i) != 0); memory_columns.aa_least_sig_byte_decomp = array::from_fn(|i| F::from_bool(bits[i + 2])); - - // Add event to ALU check to check that addr == b + c - let add_event = AluEvent { - lookup_id: event.memory_add_lookup_id, - shard: event.shard, - channel: event.channel, - clk: event.clk, - opcode: Opcode::ADD, - a: memory_addr, - b: event.b, - c: event.c, - sub_lookups: create_alu_lookups(), - }; - new_alu_events - .entry(Opcode::ADD) - .and_modify(|op_new_events| op_new_events.push(add_event)) - .or_insert(vec![add_event]); memory_columns.addr_word_nonce = F::from_canonical_u32( nonce_lookup.get(&event.memory_add_lookup_id).copied().unwrap_or_default(), ); @@ -336,15 +299,10 @@ impl CpuChip { // For the signed load instructions, we need to check if the loaded value is negative. if matches!(event.instruction.opcode, Opcode::LB | Opcode::LH) { - let most_sig_mem_value_byte: u8; - let sign_value: u32; - if matches!(event.instruction.opcode, Opcode::LB) { - sign_value = 256; - most_sig_mem_value_byte = cols.unsigned_mem_val.to_u32().to_le_bytes()[0]; + let most_sig_mem_value_byte = if matches!(event.instruction.opcode, Opcode::LB) { + cols.unsigned_mem_val.to_u32().to_le_bytes()[0] } else { - // LHU case - sign_value = 65536; - most_sig_mem_value_byte = cols.unsigned_mem_val.to_u32().to_le_bytes()[1]; + cols.unsigned_mem_val.to_u32().to_le_bytes()[1] }; for i in (0..8).rev() { @@ -354,25 +312,9 @@ impl CpuChip { if memory_columns.most_sig_byte_decomp[7] == F::one() { cols.mem_value_is_neg_not_x0 = F::from_bool(event.instruction.op_a != (X0 as u32)); - let sub_event = AluEvent { - lookup_id: event.memory_sub_lookup_id, - channel: event.channel, - shard: event.shard, - clk: event.clk, - opcode: Opcode::SUB, - a: event.a, - b: cols.unsigned_mem_val.to_u32(), - c: sign_value, - sub_lookups: create_alu_lookups(), - }; cols.unsigned_mem_val_nonce = F::from_canonical_u32( nonce_lookup.get(&event.memory_sub_lookup_id).copied().unwrap_or_default(), ); - - new_alu_events - .entry(Opcode::SUB) - .and_modify(|op_new_events| op_new_events.push(sub_event)) - .or_insert(vec![sub_event]); } } @@ -390,7 +332,6 @@ impl CpuChip { for byte_pair in addr_bytes.chunks_exact(2) { blu_events.add_byte_lookup_event(ByteLookupEvent { shard: event.shard, - channel: event.channel, opcode: ByteOpcode::U8Range, a1: 0, a2: 0, @@ -405,7 +346,6 @@ impl CpuChip { &self, cols: &mut CpuCols, event: &CpuEvent, - alu_events: &mut HashMap>, nonce_lookup: &HashMap, ) { if event.instruction.is_branch_instruction() { @@ -427,49 +367,14 @@ impl CpuChip { event.a > event.b }; - let alu_op_code = if use_signed_comparison { Opcode::SLT } else { Opcode::SLTU }; - - // Add the ALU events for the comparisons - let lt_comp_event = AluEvent { - lookup_id: event.branch_lt_lookup_id, - shard: event.shard, - channel: event.channel, - clk: event.clk, - opcode: alu_op_code, - a: a_lt_b as u32, - b: event.a, - c: event.b, - sub_lookups: create_alu_lookups(), - }; branch_columns.a_lt_b_nonce = F::from_canonical_u32( nonce_lookup.get(&event.branch_lt_lookup_id).copied().unwrap_or_default(), ); - alu_events - .entry(alu_op_code) - .and_modify(|op_new_events| op_new_events.push(lt_comp_event)) - .or_insert(vec![lt_comp_event]); - - let gt_comp_event = AluEvent { - lookup_id: event.branch_gt_lookup_id, - shard: event.shard, - channel: event.channel, - clk: event.clk, - opcode: alu_op_code, - a: a_gt_b as u32, - b: event.b, - c: event.a, - sub_lookups: create_alu_lookups(), - }; branch_columns.a_gt_b_nonce = F::from_canonical_u32( nonce_lookup.get(&event.branch_gt_lookup_id).copied().unwrap_or_default(), ); - alu_events - .entry(alu_op_code) - .and_modify(|op_new_events| op_new_events.push(gt_comp_event)) - .or_insert(vec![gt_comp_event]); - branch_columns.a_eq_b = F::from_bool(a_eq_b); branch_columns.a_lt_b = F::from_bool(a_lt_b); branch_columns.a_gt_b = F::from_bool(a_gt_b); @@ -490,26 +395,9 @@ impl CpuChip { if branching { cols.branching = F::one(); - - let add_event = AluEvent { - lookup_id: event.branch_add_lookup_id, - shard: event.shard, - channel: event.channel, - clk: event.clk, - opcode: Opcode::ADD, - a: next_pc, - b: event.pc, - c: event.c, - sub_lookups: create_alu_lookups(), - }; branch_columns.next_pc_nonce = F::from_canonical_u32( nonce_lookup.get(&event.branch_add_lookup_id).copied().unwrap_or_default(), ); - - alu_events - .entry(Opcode::ADD) - .and_modify(|op_new_events| op_new_events.push(add_event)) - .or_insert(vec![add_event]); } else { cols.not_branching = F::one(); } @@ -521,7 +409,6 @@ impl CpuChip { &self, cols: &mut CpuCols, event: &CpuEvent, - alu_events: &mut HashMap>, nonce_lookup: &HashMap, ) { if event.instruction.is_jump_instruction() { @@ -535,52 +422,18 @@ impl CpuChip { jump_columns.pc_range_checker.populate(event.pc); jump_columns.next_pc = Word::from(next_pc); jump_columns.next_pc_range_checker.populate(next_pc); - - let add_event = AluEvent { - lookup_id: event.jump_jal_lookup_id, - shard: event.shard, - channel: event.channel, - clk: event.clk, - opcode: Opcode::ADD, - a: next_pc, - b: event.pc, - c: event.b, - sub_lookups: create_alu_lookups(), - }; jump_columns.jal_nonce = F::from_canonical_u32( nonce_lookup.get(&event.jump_jal_lookup_id).copied().unwrap_or_default(), ); - - alu_events - .entry(Opcode::ADD) - .and_modify(|op_new_events| op_new_events.push(add_event)) - .or_insert(vec![add_event]); } Opcode::JALR => { let next_pc = event.b.wrapping_add(event.c); jump_columns.op_a_range_checker.populate(event.a); jump_columns.next_pc = Word::from(next_pc); jump_columns.next_pc_range_checker.populate(next_pc); - - let add_event = AluEvent { - lookup_id: event.jump_jalr_lookup_id, - shard: event.shard, - channel: event.channel, - clk: event.clk, - opcode: Opcode::ADD, - a: next_pc, - b: event.b, - c: event.c, - sub_lookups: create_alu_lookups(), - }; jump_columns.jalr_nonce = F::from_canonical_u32( nonce_lookup.get(&event.jump_jalr_lookup_id).copied().unwrap_or_default(), ); - - alu_events - .entry(Opcode::ADD) - .and_modify(|op_new_events| op_new_events.push(add_event)) - .or_insert(vec![add_event]); } _ => unreachable!(), } @@ -592,7 +445,6 @@ impl CpuChip { &self, cols: &mut CpuCols, event: &CpuEvent, - alu_events: &mut HashMap>, nonce_lookup: &HashMap, ) { if matches!(event.instruction.opcode, Opcode::AUIPC) { @@ -600,26 +452,9 @@ impl CpuChip { auipc_columns.pc = Word::from(event.pc); auipc_columns.pc_range_checker.populate(event.pc); - - let add_event = AluEvent { - lookup_id: event.auipc_lookup_id, - shard: event.shard, - channel: event.channel, - clk: event.clk, - opcode: Opcode::ADD, - a: event.a, - b: event.pc, - c: event.b, - sub_lookups: create_alu_lookups(), - }; auipc_columns.auipc_nonce = F::from_canonical_u32( nonce_lookup.get(&event.auipc_lookup_id).copied().unwrap_or_default(), ); - - alu_events - .entry(Opcode::ADD) - .and_modify(|op_new_events| op_new_events.push(add_event)) - .or_insert(vec![add_event]); } } @@ -706,9 +541,15 @@ impl CpuChip { is_halt } - fn pad_to_power_of_two(values: &mut Vec) { + fn pad_to_power_of_two(&self, shape: &Option, values: &mut Vec) { let n_real_rows = values.len() / NUM_CPU_COLS; - let padded_nb_rows = if n_real_rows < 16 { 16 } else { n_real_rows.next_power_of_two() }; + let padded_nb_rows = if let Some(shape) = shape { + 1 << shape.inner[&MachineAir::::name(self)] + } else if n_real_rows < 16 { + 16 + } else { + n_real_rows.next_power_of_two() + }; values.resize(padded_nb_rows * NUM_CPU_COLS, F::zero()); // Interpret values as a slice of arrays of length `NUM_CPU_COLS` diff --git a/crates/core/machine/src/io.rs b/crates/core/machine/src/io.rs index e4b417955b..e2ac2856a6 100644 --- a/crates/core/machine/src/io.rs +++ b/crates/core/machine/src/io.rs @@ -1,5 +1,6 @@ use serde::{de::DeserializeOwned, Deserialize, Serialize}; -use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, ShardProof, StarkVerifyingKey}; +use sp1_core_executor::SP1ReduceProof; +use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, StarkVerifyingKey}; /// Standard input for the prover. #[derive(Debug, Clone, Serialize, Deserialize, Default)] @@ -8,7 +9,7 @@ pub struct SP1Stdin { /// a vec of bytes at a time. pub buffer: Vec>, pub ptr: usize, - pub proofs: Vec<(ShardProof, StarkVerifyingKey)>, + pub proofs: Vec<(SP1ReduceProof, StarkVerifyingKey)>, } impl SP1Stdin { @@ -54,7 +55,7 @@ impl SP1Stdin { pub fn write_proof( &mut self, - proof: ShardProof, + proof: SP1ReduceProof, vk: StarkVerifyingKey, ) { self.proofs.push((proof, vk)); diff --git a/crates/core/machine/src/lib.rs b/crates/core/machine/src/lib.rs index 6fd698c897..f50ee422c6 100644 --- a/crates/core/machine/src/lib.rs +++ b/crates/core/machine/src/lib.rs @@ -30,4 +30,12 @@ pub mod utils; /// This string should be updated whenever any step in verifying an SP1 proof changes, including /// core, recursion, and plonk-bn254. This string is used to download SP1 artifacts and the gnark /// docker image. -pub const SP1_CIRCUIT_VERSION: &str = "v2.0.0"; +pub const SP1_CIRCUIT_VERSION: &str = "v3.0.0-rc4"; + +// Re-export the `SP1ReduceProof` struct from sp1_core_machine. +// +// This is done to avoid a circular dependency between sp1_core_machine and sp1_core_executor, and +// enable crates that depend on sp1_core_machine to import the `SP1ReduceProof` type directly. +pub mod reduce { + pub use sp1_core_executor::SP1ReduceProof; +} diff --git a/crates/core/machine/src/memory/global.rs b/crates/core/machine/src/memory/global.rs index b956f8155e..30b470f7fc 100644 --- a/crates/core/machine/src/memory/global.rs +++ b/crates/core/machine/src/memory/global.rs @@ -11,7 +11,7 @@ use sp1_core_executor::{events::MemoryInitializeFinalizeEvent, ExecutionRecord, use sp1_derive::AlignedBorrow; use sp1_stark::{ air::{ - AirInteraction, BaseAirBuilder, MachineAir, PublicValues, SP1AirBuilder, + AirInteraction, BaseAirBuilder, InteractionScope, MachineAir, PublicValues, SP1AirBuilder, SP1_PROOF_NUM_PV_ELTS, }, InteractionKind, Word, @@ -19,43 +19,38 @@ use sp1_stark::{ use crate::{ operations::{AssertLtColsBits, BabyBearBitDecomposition, IsZeroOperation}, - utils::pad_to_power_of_two, + utils::pad_rows_fixed, }; -/// The type of memory chip that is being initialized. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum MemoryChipType { - Initialize, - Finalize, -} +use super::MemoryChipType; /// A memory chip that can initialize or finalize values in memory. -pub struct MemoryChip { +pub struct MemoryGlobalChip { pub kind: MemoryChipType, } -impl MemoryChip { +impl MemoryGlobalChip { /// Creates a new memory chip with a certain type. pub const fn new(kind: MemoryChipType) -> Self { Self { kind } } } -impl BaseAir for MemoryChip { +impl BaseAir for MemoryGlobalChip { fn width(&self) -> usize { NUM_MEMORY_INIT_COLS } } -impl MachineAir for MemoryChip { +impl MachineAir for MemoryGlobalChip { type Record = ExecutionRecord; type Program = Program; fn name(&self) -> String { match self.kind { - MemoryChipType::Initialize => "MemoryInit".to_string(), - MemoryChipType::Finalize => "MemoryFinalize".to_string(), + MemoryChipType::Initialize => "MemoryGlobalInit".to_string(), + MemoryChipType::Finalize => "MemoryGlobalFinalize".to_string(), } } @@ -69,8 +64,8 @@ impl MachineAir for MemoryChip { _output: &mut ExecutionRecord, ) -> RowMajorMatrix { let mut memory_events = match self.kind { - MemoryChipType::Initialize => input.memory_initialize_events.clone(), - MemoryChipType::Finalize => input.memory_finalize_events.clone(), + MemoryChipType::Initialize => input.global_memory_initialize_events.clone(), + MemoryChipType::Finalize => input.global_memory_finalize_events.clone(), }; let previous_addr_bits = match self.kind { @@ -79,7 +74,7 @@ impl MachineAir for MemoryChip { }; memory_events.sort_by_key(|event| event.addr); - let rows: Vec<[F; NUM_MEMORY_INIT_COLS]> = (0..memory_events.len()) // OPT: change this to par_iter + let mut rows: Vec<[F; NUM_MEMORY_INIT_COLS]> = (0..memory_events.len()) // OPT: change this to par_iter .map(|i| { let MemoryInitializeFinalizeEvent { addr, value, shard, timestamp, used } = memory_events[i]; @@ -127,22 +122,30 @@ impl MachineAir for MemoryChip { }) .collect::>(); - let mut trace = RowMajorMatrix::new( - rows.into_iter().flatten().collect::>(), - NUM_MEMORY_INIT_COLS, + // Pad the trace to a power of two depending on the proof shape in `input`. + pad_rows_fixed( + &mut rows, + || [F::zero(); NUM_MEMORY_INIT_COLS], + input.fixed_log2_rows::(self), ); - pad_to_power_of_two::(&mut trace.values); - - trace + RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), NUM_MEMORY_INIT_COLS) } fn included(&self, shard: &Self::Record) -> bool { - match self.kind { - MemoryChipType::Initialize => !shard.memory_initialize_events.is_empty(), - MemoryChipType::Finalize => !shard.memory_finalize_events.is_empty(), + if let Some(shape) = shard.shape.as_ref() { + shape.included::(self) + } else { + match self.kind { + MemoryChipType::Initialize => !shard.global_memory_initialize_events.is_empty(), + MemoryChipType::Finalize => !shard.global_memory_finalize_events.is_empty(), + } } } + + fn commit_scope(&self) -> InteractionScope { + InteractionScope::Global + } } #[derive(AlignedBorrow, Debug, Clone, Copy)] @@ -184,7 +187,7 @@ pub struct MemoryInitCols { pub(crate) const NUM_MEMORY_INIT_COLS: usize = size_of::>(); -impl Air for MemoryChip +impl Air for MemoryGlobalChip where AB: SP1AirBuilder, { @@ -215,19 +218,17 @@ where if self.kind == MemoryChipType::Initialize { let mut values = vec![AB::Expr::zero(), AB::Expr::zero(), local.addr.into()]; values.extend(value.map(Into::into)); - builder.receive(AirInteraction::new( - values, - local.is_real.into(), - InteractionKind::Memory, - )); + builder.send( + AirInteraction::new(values, local.is_real.into(), InteractionKind::Memory), + InteractionScope::Global, + ); } else { let mut values = vec![local.shard.into(), local.timestamp.into(), local.addr.into()]; values.extend(value); - builder.send(AirInteraction::new( - values, - local.is_real.into(), - InteractionKind::Memory, - )); + builder.receive( + AirInteraction::new(values, local.is_real.into(), InteractionKind::Memory), + InteractionScope::Global, + ); } // Canonically decompose the address into bits so we can do comparisons. @@ -380,18 +381,18 @@ mod tests { runtime.run().unwrap(); let shard = runtime.record.clone(); - let chip: MemoryChip = MemoryChip::new(MemoryChipType::Initialize); + let chip: MemoryGlobalChip = MemoryGlobalChip::new(MemoryChipType::Initialize); let trace: RowMajorMatrix = chip.generate_trace(&shard, &mut ExecutionRecord::default()); println!("{:?}", trace.values); - let chip: MemoryChip = MemoryChip::new(MemoryChipType::Finalize); + let chip: MemoryGlobalChip = MemoryGlobalChip::new(MemoryChipType::Finalize); let trace: RowMajorMatrix = chip.generate_trace(&shard, &mut ExecutionRecord::default()); println!("{:?}", trace.values); - for mem_event in shard.memory_finalize_events { + for mem_event in shard.global_memory_finalize_events { println!("{:?}", mem_event); } } @@ -407,15 +408,24 @@ mod tests { RiscvAir::machine(BabyBearPoseidon2::new()); let (pkey, _) = machine.setup(&program_clone); let opts = SP1CoreOpts::default(); - machine.generate_dependencies(&mut runtime.records, &opts); + machine.generate_dependencies(&mut runtime.records, &opts, None); let shards = runtime.records; - assert_eq!(shards.len(), 2); + for shard in shards.clone() { + debug_interactions_with_all_chips::>( + &machine, + &pkey, + &[shard], + vec![InteractionKind::Memory], + InteractionScope::Local, + ); + } debug_interactions_with_all_chips::>( &machine, &pkey, &shards, vec![InteractionKind::Memory], + InteractionScope::Global, ); } @@ -429,15 +439,15 @@ mod tests { let machine = RiscvAir::machine(BabyBearPoseidon2::new()); let (pkey, _) = machine.setup(&program_clone); let opts = SP1CoreOpts::default(); - machine.generate_dependencies(&mut runtime.records, &opts); + machine.generate_dependencies(&mut runtime.records, &opts, None); let shards = runtime.records; - assert_eq!(shards.len(), 2); debug_interactions_with_all_chips::>( &machine, &pkey, &shards, vec![InteractionKind::Byte], + InteractionScope::Global, ); } } diff --git a/crates/core/machine/src/memory/local.rs b/crates/core/machine/src/memory/local.rs new file mode 100644 index 0000000000..bf109d028e --- /dev/null +++ b/crates/core/machine/src/memory/local.rs @@ -0,0 +1,277 @@ +use std::{ + borrow::{Borrow, BorrowMut}, + mem::size_of, +}; + +use crate::utils::pad_rows_fixed; +use itertools::Itertools; +use p3_air::{Air, BaseAir}; +use p3_field::PrimeField32; +use p3_matrix::{dense::RowMajorMatrix, Matrix}; +use sp1_core_executor::{ExecutionRecord, Program}; +use sp1_derive::AlignedBorrow; +use sp1_stark::{ + air::{AirInteraction, InteractionScope, MachineAir, SP1AirBuilder}, + InteractionKind, Word, +}; + +pub const NUM_LOCAL_MEMORY_ENTRIES_PER_ROW: usize = 4; + +pub(crate) const NUM_MEMORY_LOCAL_INIT_COLS: usize = size_of::>(); + +#[derive(AlignedBorrow, Debug, Clone, Copy)] +#[repr(C)] +struct SingleMemoryLocal { + /// The address of the memory access. + pub addr: T, + + /// The initial shard of the memory access. + pub initial_shard: T, + + /// The final shard of the memory access. + pub final_shard: T, + + /// The initial clk of the memory access. + pub initial_clk: T, + + /// The final clk of the memory access. + pub final_clk: T, + + /// The initial value of the memory access. + pub initial_value: Word, + + /// The final value of the memory access. + pub final_value: Word, + + /// Whether the memory access is a real access. + pub is_real: T, +} + +#[derive(AlignedBorrow, Debug, Clone, Copy)] +#[repr(C)] +pub struct MemoryLocalCols { + memory_local_entries: [SingleMemoryLocal; NUM_LOCAL_MEMORY_ENTRIES_PER_ROW], +} + +pub struct MemoryLocalChip {} + +impl MemoryLocalChip { + /// Creates a new memory chip with a certain type. + pub const fn new() -> Self { + Self {} + } +} + +impl BaseAir for MemoryLocalChip { + fn width(&self) -> usize { + NUM_MEMORY_LOCAL_INIT_COLS + } +} + +impl MachineAir for MemoryLocalChip { + type Record = ExecutionRecord; + + type Program = Program; + + fn name(&self) -> String { + "MemoryLocal".to_string() + } + + fn generate_dependencies(&self, _input: &ExecutionRecord, _output: &mut ExecutionRecord) { + // Do nothing since this chip has no dependencies. + } + + fn generate_trace( + &self, + input: &ExecutionRecord, + _output: &mut ExecutionRecord, + ) -> RowMajorMatrix { + let mut rows = Vec::<[F; NUM_MEMORY_LOCAL_INIT_COLS]>::new(); + + for local_mem_events in + &input.get_local_mem_events().chunks(NUM_LOCAL_MEMORY_ENTRIES_PER_ROW) + { + let mut row = [F::zero(); NUM_MEMORY_LOCAL_INIT_COLS]; + let cols: &mut MemoryLocalCols = row.as_mut_slice().borrow_mut(); + + for (cols, event) in cols.memory_local_entries.iter_mut().zip(local_mem_events) { + cols.addr = F::from_canonical_u32(event.addr); + cols.initial_shard = F::from_canonical_u32(event.initial_mem_access.shard); + cols.final_shard = F::from_canonical_u32(event.final_mem_access.shard); + cols.initial_clk = F::from_canonical_u32(event.initial_mem_access.timestamp); + cols.final_clk = F::from_canonical_u32(event.final_mem_access.timestamp); + cols.initial_value = event.initial_mem_access.value.into(); + cols.final_value = event.final_mem_access.value.into(); + cols.is_real = F::one(); + } + + rows.push(row); + } + + // Pad the trace to a power of two depending on the proof shape in `input`. + pad_rows_fixed( + &mut rows, + || [F::zero(); NUM_MEMORY_LOCAL_INIT_COLS], + input.fixed_log2_rows::(self), + ); + + RowMajorMatrix::new( + rows.into_iter().flatten().collect::>(), + NUM_MEMORY_LOCAL_INIT_COLS, + ) + } + + fn included(&self, shard: &Self::Record) -> bool { + if let Some(shape) = shard.shape.as_ref() { + shape.included::(self) + } else { + shard.get_local_mem_events().nth(0).is_some() + } + } + + fn commit_scope(&self) -> InteractionScope { + InteractionScope::Global + } +} + +impl Air for MemoryLocalChip +where + AB: SP1AirBuilder, +{ + fn eval(&self, builder: &mut AB) { + let main = builder.main(); + let local = main.row_slice(0); + let local: &MemoryLocalCols = (*local).borrow(); + + for local in local.memory_local_entries.iter() { + builder.assert_eq( + local.is_real * local.is_real * local.is_real, + local.is_real * local.is_real * local.is_real, + ); + + for scope in [InteractionScope::Global, InteractionScope::Local] { + let mut values = + vec![local.initial_shard.into(), local.initial_clk.into(), local.addr.into()]; + values.extend(local.initial_value.map(Into::into)); + builder.receive( + AirInteraction::new( + values.clone(), + local.is_real.into(), + InteractionKind::Memory, + ), + scope, + ); + + let mut values = + vec![local.final_shard.into(), local.final_clk.into(), local.addr.into()]; + values.extend(local.final_value.map(Into::into)); + builder.send( + AirInteraction::new( + values.clone(), + local.is_real.into(), + InteractionKind::Memory, + ), + scope, + ); + } + } + } +} + +#[cfg(test)] +mod tests { + use p3_baby_bear::BabyBear; + use p3_matrix::dense::RowMajorMatrix; + use sp1_core_executor::{programs::tests::simple_program, ExecutionRecord, Executor}; + use sp1_stark::{ + air::{InteractionScope, MachineAir}, + baby_bear_poseidon2::BabyBearPoseidon2, + debug_interactions_with_all_chips, InteractionKind, SP1CoreOpts, StarkMachine, + }; + + use crate::{ + memory::MemoryLocalChip, riscv::RiscvAir, + syscall::precompiles::sha256::extend_tests::sha_extend_program, utils::setup_logger, + }; + + #[test] + fn test_local_memory_generate_trace() { + let program = simple_program(); + let mut runtime = Executor::new(program, SP1CoreOpts::default()); + runtime.run().unwrap(); + let shard = runtime.records[0].clone(); + + let chip: MemoryLocalChip = MemoryLocalChip::new(); + + let trace: RowMajorMatrix = + chip.generate_trace(&shard, &mut ExecutionRecord::default()); + println!("{:?}", trace.values); + + for mem_event in shard.global_memory_finalize_events { + println!("{:?}", mem_event); + } + } + + #[test] + fn test_memory_lookup_interactions() { + setup_logger(); + let program = sha_extend_program(); + let program_clone = program.clone(); + let mut runtime = Executor::new(program, SP1CoreOpts::default()); + runtime.run().unwrap(); + let machine: StarkMachine> = + RiscvAir::machine(BabyBearPoseidon2::new()); + let (pkey, _) = machine.setup(&program_clone); + let opts = SP1CoreOpts::default(); + machine.generate_dependencies(&mut runtime.records, &opts, None); + + let shards = runtime.records; + for shard in shards.clone() { + debug_interactions_with_all_chips::>( + &machine, + &pkey, + &[shard], + vec![InteractionKind::Memory], + InteractionScope::Local, + ); + } + debug_interactions_with_all_chips::>( + &machine, + &pkey, + &shards, + vec![InteractionKind::Memory], + InteractionScope::Global, + ); + } + + #[test] + fn test_byte_lookup_interactions() { + setup_logger(); + let program = sha_extend_program(); + let program_clone = program.clone(); + let mut runtime = Executor::new(program, SP1CoreOpts::default()); + runtime.run().unwrap(); + let machine = RiscvAir::machine(BabyBearPoseidon2::new()); + let (pkey, _) = machine.setup(&program_clone); + let opts = SP1CoreOpts::default(); + machine.generate_dependencies(&mut runtime.records, &opts, None); + + let shards = runtime.records; + for shard in shards.clone() { + debug_interactions_with_all_chips::>( + &machine, + &pkey, + &[shard], + vec![InteractionKind::Memory], + InteractionScope::Local, + ); + } + debug_interactions_with_all_chips::>( + &machine, + &pkey, + &shards, + vec![InteractionKind::Byte], + InteractionScope::Global, + ); + } +} diff --git a/crates/core/machine/src/memory/mod.rs b/crates/core/machine/src/memory/mod.rs index 13f9050f31..ba4a7a650a 100644 --- a/crates/core/machine/src/memory/mod.rs +++ b/crates/core/machine/src/memory/mod.rs @@ -1,8 +1,17 @@ mod columns; mod global; +mod local; mod program; mod trace; pub use columns::*; pub use global::*; +pub use local::*; pub use program::*; + +/// The type of memory chip that is being initialized. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum MemoryChipType { + Initialize, + Finalize, +} diff --git a/crates/core/machine/src/memory/program.rs b/crates/core/machine/src/memory/program.rs index 8f396fa7a5..7b68661624 100644 --- a/crates/core/machine/src/memory/program.rs +++ b/crates/core/machine/src/memory/program.rs @@ -2,6 +2,7 @@ use core::{ borrow::{Borrow, BorrowMut}, mem::size_of, }; +use itertools::Itertools; use p3_air::{Air, AirBuilder, AirBuilderWithPublicValues, BaseAir, PairBuilder}; use p3_field::{AbstractField, PrimeField}; use p3_matrix::{dense::RowMajorMatrix, Matrix}; @@ -9,11 +10,14 @@ use p3_matrix::{dense::RowMajorMatrix, Matrix}; use sp1_core_executor::{ExecutionRecord, Program}; use sp1_derive::AlignedBorrow; use sp1_stark::{ - air::{AirInteraction, MachineAir, PublicValues, SP1AirBuilder, SP1_PROOF_NUM_PV_ELTS}, + air::{ + AirInteraction, InteractionScope, MachineAir, PublicValues, SP1AirBuilder, + SP1_PROOF_NUM_PV_ELTS, + }, InteractionKind, Word, }; -use crate::{operations::IsZeroOperation, utils::pad_to_power_of_two}; +use crate::{operations::IsZeroOperation, utils::pad_rows_fixed}; pub const NUM_MEMORY_PROGRAM_PREPROCESSED_COLS: usize = size_of::>(); @@ -67,12 +71,13 @@ impl MachineAir for MemoryProgramChip { } fn generate_preprocessed_trace(&self, program: &Self::Program) -> Option> { - let program_memory = program.memory_image.clone(); + let program_memory = &program.memory_image; // Note that BTreeMap is guaranteed to be sorted by key. This makes the row order // deterministic. - let rows = program_memory - .into_iter() - .map(|(addr, word)| { + let mut rows = program_memory + .iter() + .sorted() + .map(|(&addr, &word)| { let mut row = [F::zero(); NUM_MEMORY_PROGRAM_PREPROCESSED_COLS]; let cols: &mut MemoryProgramPreprocessedCols = row.as_mut_slice().borrow_mut(); cols.addr = F::from_canonical_u32(addr); @@ -82,15 +87,18 @@ impl MachineAir for MemoryProgramChip { }) .collect::>(); + // Pad the trace to a power of two depending on the proof shape in `input`. + pad_rows_fixed( + &mut rows, + || [F::zero(); NUM_MEMORY_PROGRAM_PREPROCESSED_COLS], + program.fixed_log2_rows::(self), + ); + // Convert the trace to a row major matrix. - let mut trace = RowMajorMatrix::new( + let trace = RowMajorMatrix::new( rows.into_iter().flatten().collect::>(), NUM_MEMORY_PROGRAM_PREPROCESSED_COLS, ); - - // Pad the trace to a power of two. - pad_to_power_of_two::(&mut trace.values); - Some(trace) } @@ -103,12 +111,12 @@ impl MachineAir for MemoryProgramChip { input: &ExecutionRecord, _output: &mut ExecutionRecord, ) -> RowMajorMatrix { - let program_memory_addrs = input.program.memory_image.keys().copied().collect::>(); + let program_memory_addrs = input.program.memory_image.keys().copied().sorted(); let mult = if input.public_values.shard == 1 { F::one() } else { F::zero() }; // Generate the trace rows for each event. - let rows = program_memory_addrs + let mut rows = program_memory_addrs .into_iter() .map(|_| { let mut row = [F::zero(); NUM_MEMORY_PROGRAM_MULT_COLS]; @@ -119,21 +127,28 @@ impl MachineAir for MemoryProgramChip { }) .collect::>(); - // Convert the trace to a row major matrix. - let mut trace = RowMajorMatrix::new( - rows.into_iter().flatten().collect::>(), - NUM_MEMORY_PROGRAM_MULT_COLS, + // Pad the trace to a power of two depending on the proof shape in `input`. + pad_rows_fixed( + &mut rows, + || [F::zero(); NUM_MEMORY_PROGRAM_MULT_COLS], + input.fixed_log2_rows::(self), ); - // Pad the trace to a power of two. - pad_to_power_of_two::(&mut trace.values); + // Convert the trace to a row major matrix. - trace + RowMajorMatrix::new( + rows.into_iter().flatten().collect::>(), + NUM_MEMORY_PROGRAM_MULT_COLS, + ) } fn included(&self, _: &Self::Record) -> bool { true } + + fn commit_scope(&self) -> InteractionScope { + InteractionScope::Global + } } impl BaseAir for MemoryProgramChip { @@ -183,10 +198,9 @@ where let mut values = vec![AB::Expr::zero(), AB::Expr::zero(), prep_local.addr.into()]; values.extend(prep_local.value.map(Into::into)); - builder.receive(AirInteraction::new( - values, - mult_local.multiplicity.into(), - InteractionKind::Memory, - )); + builder.send( + AirInteraction::new(values, mult_local.multiplicity.into(), InteractionKind::Memory), + InteractionScope::Global, + ); } } diff --git a/crates/core/machine/src/memory/trace.rs b/crates/core/machine/src/memory/trace.rs index 7dd5b12586..3b451ad241 100644 --- a/crates/core/machine/src/memory/trace.rs +++ b/crates/core/machine/src/memory/trace.rs @@ -6,12 +6,7 @@ use sp1_core_executor::events::{ use super::{MemoryAccessCols, MemoryReadCols, MemoryReadWriteCols, MemoryWriteCols}; impl MemoryWriteCols { - pub fn populate( - &mut self, - channel: u8, - record: MemoryWriteRecord, - output: &mut impl ByteRecord, - ) { + pub fn populate(&mut self, record: MemoryWriteRecord, output: &mut impl ByteRecord) { let current_record = MemoryRecord { value: record.value, shard: record.shard, timestamp: record.timestamp }; let prev_record = MemoryRecord { @@ -20,17 +15,12 @@ impl MemoryWriteCols { timestamp: record.prev_timestamp, }; self.prev_value = prev_record.value.into(); - self.access.populate_access(channel, current_record, prev_record, output); + self.access.populate_access(current_record, prev_record, output); } } impl MemoryReadCols { - pub fn populate( - &mut self, - channel: u8, - record: MemoryReadRecord, - output: &mut impl ByteRecord, - ) { + pub fn populate(&mut self, record: MemoryReadRecord, output: &mut impl ByteRecord) { let current_record = MemoryRecord { value: record.value, shard: record.shard, timestamp: record.timestamp }; let prev_record = MemoryRecord { @@ -38,31 +28,19 @@ impl MemoryReadCols { shard: record.prev_shard, timestamp: record.prev_timestamp, }; - self.access.populate_access(channel, current_record, prev_record, output); + self.access.populate_access(current_record, prev_record, output); } } impl MemoryReadWriteCols { - pub fn populate( - &mut self, - channel: u8, - record: MemoryRecordEnum, - output: &mut impl ByteRecord, - ) { + pub fn populate(&mut self, record: MemoryRecordEnum, output: &mut impl ByteRecord) { match record { - MemoryRecordEnum::Read(read_record) => self.populate_read(channel, read_record, output), - MemoryRecordEnum::Write(write_record) => { - self.populate_write(channel, write_record, output) - } + MemoryRecordEnum::Read(read_record) => self.populate_read(read_record, output), + MemoryRecordEnum::Write(write_record) => self.populate_write(write_record, output), } } - pub fn populate_write( - &mut self, - channel: u8, - record: MemoryWriteRecord, - output: &mut impl ByteRecord, - ) { + pub fn populate_write(&mut self, record: MemoryWriteRecord, output: &mut impl ByteRecord) { let current_record = MemoryRecord { value: record.value, shard: record.shard, timestamp: record.timestamp }; let prev_record = MemoryRecord { @@ -71,15 +49,10 @@ impl MemoryReadWriteCols { timestamp: record.prev_timestamp, }; self.prev_value = prev_record.value.into(); - self.access.populate_access(channel, current_record, prev_record, output); + self.access.populate_access(current_record, prev_record, output); } - pub fn populate_read( - &mut self, - channel: u8, - record: MemoryReadRecord, - output: &mut impl ByteRecord, - ) { + pub fn populate_read(&mut self, record: MemoryReadRecord, output: &mut impl ByteRecord) { let current_record = MemoryRecord { value: record.value, shard: record.shard, timestamp: record.timestamp }; let prev_record = MemoryRecord { @@ -88,14 +61,13 @@ impl MemoryReadWriteCols { timestamp: record.prev_timestamp, }; self.prev_value = prev_record.value.into(); - self.access.populate_access(channel, current_record, prev_record, output); + self.access.populate_access(current_record, prev_record, output); } } impl MemoryAccessCols { pub(crate) fn populate_access( &mut self, - channel: u8, current_record: MemoryRecord, prev_record: MemoryRecord, output: &mut impl ByteRecord, @@ -123,9 +95,9 @@ impl MemoryAccessCols { let shard = current_record.shard; // Add a byte table lookup with the 16Range op. - output.add_u16_range_check(shard, channel, diff_16bit_limb); + output.add_u16_range_check(shard, diff_16bit_limb); // Add a byte table lookup with the U8Range op. - output.add_u8_range_check(shard, channel, 0, diff_8bit_limb as u8); + output.add_u8_range_check(shard, 0, diff_8bit_limb as u8); } } diff --git a/crates/core/machine/src/operations/add.rs b/crates/core/machine/src/operations/add.rs index 5e1172b487..1ba8eb127f 100644 --- a/crates/core/machine/src/operations/add.rs +++ b/crates/core/machine/src/operations/add.rs @@ -23,7 +23,6 @@ impl AddOperation { &mut self, record: &mut impl ByteRecord, shard: u32, - channel: u8, a_u32: u32, b_u32: u32, ) -> u32 { @@ -52,9 +51,9 @@ impl AddOperation { // Range check { - record.add_u8_range_checks(shard, channel, &a); - record.add_u8_range_checks(shard, channel, &b); - record.add_u8_range_checks(shard, channel, &expected.to_le_bytes()); + record.add_u8_range_checks(shard, &a); + record.add_u8_range_checks(shard, &b); + record.add_u8_range_checks(shard, &expected.to_le_bytes()); } expected } @@ -64,8 +63,6 @@ impl AddOperation { a: Word, b: Word, cols: AddOperation, - shard: AB::Var, - channel: impl Into + Clone, is_real: AB::Expr, ) { let one = AB::Expr::one(); @@ -102,9 +99,9 @@ impl AddOperation { // Range check each byte. { - builder.slice_range_check_u8(&a.0, shard, channel.clone(), is_real.clone()); - builder.slice_range_check_u8(&b.0, shard, channel.clone(), is_real.clone()); - builder.slice_range_check_u8(&cols.value.0, shard, channel.clone(), is_real); + builder.slice_range_check_u8(&a.0, is_real.clone()); + builder.slice_range_check_u8(&b.0, is_real.clone()); + builder.slice_range_check_u8(&cols.value.0, is_real); } } } diff --git a/crates/core/machine/src/operations/add4.rs b/crates/core/machine/src/operations/add4.rs index 5fe5ec0fd1..fc010e9ab1 100644 --- a/crates/core/machine/src/operations/add4.rs +++ b/crates/core/machine/src/operations/add4.rs @@ -38,7 +38,6 @@ impl Add4Operation { &mut self, record: &mut impl ByteRecord, shard: u32, - channel: u8, a_u32: u32, b_u32: u32, c_u32: u32, @@ -70,11 +69,11 @@ impl Add4Operation { // Range check. { - record.add_u8_range_checks(shard, channel, &a); - record.add_u8_range_checks(shard, channel, &b); - record.add_u8_range_checks(shard, channel, &c); - record.add_u8_range_checks(shard, channel, &d); - record.add_u8_range_checks(shard, channel, &expected.to_le_bytes()); + record.add_u8_range_checks(shard, &a); + record.add_u8_range_checks(shard, &b); + record.add_u8_range_checks(shard, &c); + record.add_u8_range_checks(shard, &d); + record.add_u8_range_checks(shard, &expected.to_le_bytes()); } expected } @@ -86,18 +85,16 @@ impl Add4Operation { b: Word, c: Word, d: Word, - shard: AB::Var, - channel: impl Into + Copy, is_real: AB::Var, cols: Add4Operation, ) { // Range check each byte. { - builder.slice_range_check_u8(&a.0, shard, channel, is_real); - builder.slice_range_check_u8(&b.0, shard, channel, is_real); - builder.slice_range_check_u8(&c.0, shard, channel, is_real); - builder.slice_range_check_u8(&d.0, shard, channel, is_real); - builder.slice_range_check_u8(&cols.value.0, shard, channel, is_real); + builder.slice_range_check_u8(&a.0, is_real); + builder.slice_range_check_u8(&b.0, is_real); + builder.slice_range_check_u8(&c.0, is_real); + builder.slice_range_check_u8(&d.0, is_real); + builder.slice_range_check_u8(&cols.value.0, is_real); } builder.assert_bool(is_real); diff --git a/crates/core/machine/src/operations/add5.rs b/crates/core/machine/src/operations/add5.rs index 959d06dd10..dcd011f7f8 100644 --- a/crates/core/machine/src/operations/add5.rs +++ b/crates/core/machine/src/operations/add5.rs @@ -40,7 +40,6 @@ impl Add5Operation { &mut self, record: &mut impl ByteRecord, shard: u32, - channel: u8, a_u32: u32, b_u32: u32, c_u32: u32, @@ -78,12 +77,12 @@ impl Add5Operation { // Range check. { - record.add_u8_range_checks(shard, channel, &a); - record.add_u8_range_checks(shard, channel, &b); - record.add_u8_range_checks(shard, channel, &c); - record.add_u8_range_checks(shard, channel, &d); - record.add_u8_range_checks(shard, channel, &e); - record.add_u8_range_checks(shard, channel, &expected.to_le_bytes()); + record.add_u8_range_checks(shard, &a); + record.add_u8_range_checks(shard, &b); + record.add_u8_range_checks(shard, &c); + record.add_u8_range_checks(shard, &d); + record.add_u8_range_checks(shard, &e); + record.add_u8_range_checks(shard, &expected.to_le_bytes()); } expected @@ -92,18 +91,14 @@ impl Add5Operation { pub fn eval( builder: &mut AB, words: &[Word; 5], - shard: AB::Var, - channel: impl Into + Copy, is_real: AB::Var, cols: Add5Operation, ) { builder.assert_bool(is_real); // Range check each byte. { - words - .iter() - .for_each(|word| builder.slice_range_check_u8(&word.0, shard, channel, is_real)); - builder.slice_range_check_u8(&cols.value.0, shard, channel, is_real); + words.iter().for_each(|word| builder.slice_range_check_u8(&word.0, is_real)); + builder.slice_range_check_u8(&cols.value.0, is_real); } let mut builder_is_real = builder.when(is_real); diff --git a/crates/core/machine/src/operations/and.rs b/crates/core/machine/src/operations/and.rs index 5aaae62013..6f3dfd788a 100644 --- a/crates/core/machine/src/operations/and.rs +++ b/crates/core/machine/src/operations/and.rs @@ -17,14 +17,7 @@ pub struct AndOperation { } impl AndOperation { - pub fn populate( - &mut self, - record: &mut impl ByteRecord, - shard: u32, - channel: u8, - x: u32, - y: u32, - ) -> u32 { + pub fn populate(&mut self, record: &mut impl ByteRecord, shard: u32, x: u32, y: u32) -> u32 { let expected = x & y; let x_bytes = x.to_le_bytes(); let y_bytes = y.to_le_bytes(); @@ -34,7 +27,6 @@ impl AndOperation { let byte_event = ByteLookupEvent { shard, - channel, opcode: ByteOpcode::AND, a1: and as u16, a2: 0, @@ -52,8 +44,6 @@ impl AndOperation { a: Word, b: Word, cols: AndOperation, - shard: AB::Var, - channel: impl Into + Copy, is_real: AB::Var, ) { for i in 0..WORD_SIZE { @@ -62,8 +52,6 @@ impl AndOperation { cols.value[i], a[i], b[i], - shard, - channel, is_real, ); } diff --git a/crates/core/machine/src/operations/field/field_den.rs b/crates/core/machine/src/operations/field/field_den.rs index 610cbf66af..3c61a08e1d 100644 --- a/crates/core/machine/src/operations/field/field_den.rs +++ b/crates/core/machine/src/operations/field/field_den.rs @@ -35,7 +35,6 @@ impl FieldDenCols { &mut self, record: &mut impl ByteRecord, shard: u32, - channel: u8, a: &BigUint, b: &BigUint, sign: bool, @@ -83,10 +82,10 @@ impl FieldDenCols { self.witness_high = Limbs(p_witness_high.try_into().unwrap()); // Range checks - record.add_u8_range_checks_field(shard, channel, &self.result.0); - record.add_u8_range_checks_field(shard, channel, &self.carry.0); - record.add_u8_range_checks_field(shard, channel, &self.witness_low.0); - record.add_u8_range_checks_field(shard, channel, &self.witness_high.0); + record.add_u8_range_checks_field(shard, &self.result.0); + record.add_u8_range_checks_field(shard, &self.carry.0); + record.add_u8_range_checks_field(shard, &self.witness_low.0); + record.add_u8_range_checks_field(shard, &self.witness_high.0); result } @@ -103,8 +102,6 @@ where a: &Limbs, b: &Limbs, sign: bool, - shard: impl Into + Clone, - channel: impl Into + Clone, is_real: impl Into + Clone, ) where V: Into, @@ -133,25 +130,10 @@ where eval_field_operation::(builder, &p_vanishing, &p_witness_low, &p_witness_high); // Range checks for the result, carry, and witness columns. - builder.slice_range_check_u8( - &self.result.0, - shard.clone(), - channel.clone(), - is_real.clone(), - ); - builder.slice_range_check_u8( - &self.carry.0, - shard.clone(), - channel.clone(), - is_real.clone(), - ); - builder.slice_range_check_u8( - &self.witness_low.0, - shard.clone(), - channel.clone(), - is_real.clone(), - ); - builder.slice_range_check_u8(&self.witness_high.0, shard, channel.clone(), is_real); + builder.slice_range_check_u8(&self.result.0, is_real.clone()); + builder.slice_range_check_u8(&self.carry.0, is_real.clone()); + builder.slice_range_check_u8(&self.witness_low.0, is_real.clone()); + builder.slice_range_check_u8(&self.witness_high.0, is_real); } } @@ -245,7 +227,7 @@ mod tests { let cols: &mut TestCols = row.as_mut_slice().borrow_mut(); cols.a = P::to_limbs_field::(a); cols.b = P::to_limbs_field::(b); - cols.a_den_b.populate(output, 1, 0, a, b, self.sign); + cols.a_den_b.populate(output, 0, a, b, self.sign); row }) .collect::>(); @@ -276,15 +258,7 @@ mod tests { let main = builder.main(); let local = main.row_slice(0); let local: &TestCols = (*local).borrow(); - local.a_den_b.eval( - builder, - &local.a, - &local.b, - self.sign, - AB::F::one(), - AB::F::zero(), - AB::F::one(), - ); + local.a_den_b.eval(builder, &local.a, &local.b, self.sign, AB::F::zero()); } } diff --git a/crates/core/machine/src/operations/field/field_inner_product.rs b/crates/core/machine/src/operations/field/field_inner_product.rs index c90bf035d8..6d2e7cb1db 100644 --- a/crates/core/machine/src/operations/field/field_inner_product.rs +++ b/crates/core/machine/src/operations/field/field_inner_product.rs @@ -34,7 +34,6 @@ impl FieldInnerProductCols { &mut self, record: &mut impl ByteRecord, shard: u32, - channel: u8, a: &[BigUint], b: &[BigUint], ) -> BigUint { @@ -78,10 +77,10 @@ impl FieldInnerProductCols { self.witness_high = Limbs(p_witness_high.try_into().unwrap()); // Range checks - record.add_u8_range_checks_field(shard, channel, &self.result.0); - record.add_u8_range_checks_field(shard, channel, &self.carry.0); - record.add_u8_range_checks_field(shard, channel, &self.witness_low.0); - record.add_u8_range_checks_field(shard, channel, &self.witness_high.0); + record.add_u8_range_checks_field(shard, &self.result.0); + record.add_u8_range_checks_field(shard, &self.carry.0); + record.add_u8_range_checks_field(shard, &self.witness_low.0); + record.add_u8_range_checks_field(shard, &self.witness_high.0); result.clone() } @@ -96,8 +95,6 @@ where builder: &mut AB, a: &[Limbs], b: &[Limbs], - shard: impl Into + Clone, - channel: impl Into + Clone, is_real: impl Into + Clone, ) where V: Into, @@ -127,25 +124,10 @@ where eval_field_operation::(builder, &p_vanishing, &p_witness_low, &p_witness_high); // Range checks for the result, carry, and witness columns. - builder.slice_range_check_u8( - &self.result.0, - shard.clone(), - channel.clone(), - is_real.clone(), - ); - builder.slice_range_check_u8( - &self.carry.0, - shard.clone(), - channel.clone(), - is_real.clone(), - ); - builder.slice_range_check_u8( - &self.witness_low.0, - shard.clone(), - channel.clone(), - is_real.clone(), - ); - builder.slice_range_check_u8(&self.witness_high.0, shard, channel.clone(), is_real); + builder.slice_range_check_u8(&self.result.0, is_real.clone()); + builder.slice_range_check_u8(&self.carry.0, is_real.clone()); + builder.slice_range_check_u8(&self.witness_low.0, is_real.clone()); + builder.slice_range_check_u8(&self.witness_high.0, is_real); } } @@ -231,7 +213,7 @@ mod tests { let cols: &mut TestCols = row.as_mut_slice().borrow_mut(); cols.a[0] = P::to_limbs_field::(&a[0]); cols.b[0] = P::to_limbs_field::(&b[0]); - cols.a_ip_b.populate(output, 1, 0, a, b); + cols.a_ip_b.populate(output, 1, a, b); row }) .collect::>(); @@ -265,14 +247,7 @@ mod tests { let main = builder.main(); let local = main.row_slice(0); let local: &TestCols = (*local).borrow(); - local.a_ip_b.eval( - builder, - &local.a, - &local.b, - AB::F::one(), - AB::F::zero(), - AB::F::one(), - ); + local.a_ip_b.eval(builder, &local.a, &local.b, AB::F::one()); } } diff --git a/crates/core/machine/src/operations/field/field_op.rs b/crates/core/machine/src/operations/field/field_op.rs index b2120dad99..971b9f6f30 100644 --- a/crates/core/machine/src/operations/field/field_op.rs +++ b/crates/core/machine/src/operations/field/field_op.rs @@ -107,7 +107,6 @@ impl FieldOpCols { &mut self, record: &mut impl ByteRecord, shard: u32, - channel: u8, a: &BigUint, b: &BigUint, modulus: &BigUint, @@ -135,9 +134,21 @@ impl FieldOpCols { FieldOperation::Div => { // As modulus is prime, we can use Fermat's little theorem to compute the // inverse. - let result = - (a * b.modpow(&(modulus.clone() - 2u32), &modulus.clone())) % modulus.clone(); - + cfg_if::cfg_if! { + if #[cfg(feature = "bigint-rug")] { + use sp1_curves::utils::{biguint_to_rug, rug_to_biguint}; + let rug_a = biguint_to_rug(a); + let rug_b = biguint_to_rug(b); + let rug_modulus = biguint_to_rug(modulus); + let rug_result = (rug_a + * rug_b.pow_mod(&(rug_modulus.clone() - 2u32), &rug_modulus.clone()).unwrap()) + % rug_modulus.clone(); + let result = rug_to_biguint(&rug_result); + } else { + let result = + (a * b.modpow(&(modulus.clone() - 2u32), &modulus.clone())) % modulus.clone(); + } + } // We populate the carry, witness_low, witness_high as if we were doing a // multiplication with result * b. But we populate `result` with the // actual result of the multiplication because those columns are @@ -152,10 +163,10 @@ impl FieldOpCols { }; // Range checks - record.add_u8_range_checks_field(shard, channel, &self.result.0); - record.add_u8_range_checks_field(shard, channel, &self.carry.0); - record.add_u8_range_checks_field(shard, channel, &self.witness_low.0); - record.add_u8_range_checks_field(shard, channel, &self.witness_high.0); + record.add_u8_range_checks_field(shard, &self.result.0); + record.add_u8_range_checks_field(shard, &self.carry.0); + record.add_u8_range_checks_field(shard, &self.witness_low.0); + record.add_u8_range_checks_field(shard, &self.witness_high.0); result } @@ -166,12 +177,11 @@ impl FieldOpCols { &mut self, record: &mut impl ByteRecord, shard: u32, - channel: u8, a: &BigUint, b: &BigUint, op: FieldOperation, ) -> BigUint { - self.populate_with_modulus(record, shard, channel, a, b, &P::modulus(), op) + self.populate_with_modulus(record, shard, a, b, &P::modulus(), op) } } @@ -188,8 +198,6 @@ impl FieldOpCols { is_sub: impl Into + Clone, is_mul: impl Into + Clone, is_div: impl Into + Clone, - shard: impl Into + Clone, - channel: impl Into + Clone, is_real: impl Into + Clone, ) where V: Into, @@ -213,15 +221,7 @@ impl FieldOpCols { let p_div = p_res_param * p_b.clone(); let p_op = p_add * is_add + p_sub * is_sub + p_mul * is_mul + p_div * is_div; - self.eval_with_polynomials( - builder, - p_op, - modulus.clone(), - p_result, - shard, - channel, - is_real, - ); + self.eval_with_polynomials(builder, p_op, modulus.clone(), p_result, is_real); } #[allow(clippy::too_many_arguments)] @@ -232,8 +232,6 @@ impl FieldOpCols { b: &(impl Into> + Clone), modulus: &(impl Into> + Clone), op: FieldOperation, - shard: impl Into + Clone, - channel: impl Into + Clone, is_real: impl Into + Clone, ) where V: Into, @@ -250,15 +248,7 @@ impl FieldOpCols { FieldOperation::Add | FieldOperation::Sub => p_a + p_b, FieldOperation::Mul | FieldOperation::Div => p_a * p_b, }; - self.eval_with_polynomials( - builder, - p_op, - modulus.clone(), - p_result, - shard, - channel, - is_real, - ); + self.eval_with_polynomials(builder, p_op, modulus.clone(), p_result, is_real); } #[allow(clippy::too_many_arguments)] @@ -268,8 +258,6 @@ impl FieldOpCols { op: impl Into>, modulus: impl Into>, result: impl Into>, - shard: impl Into + Clone, - channel: impl Into + Clone, is_real: impl Into + Clone, ) where V: Into, @@ -286,30 +274,10 @@ impl FieldOpCols { eval_field_operation::(builder, &p_vanishing, &p_witness_low, &p_witness_high); // Range checks for the result, carry, and witness columns. - builder.slice_range_check_u8( - &self.result.0, - shard.clone(), - channel.clone(), - is_real.clone(), - ); - builder.slice_range_check_u8( - &self.carry.0, - shard.clone(), - channel.clone(), - is_real.clone(), - ); - builder.slice_range_check_u8( - p_witness_low.coefficients(), - shard.clone(), - channel.clone(), - is_real.clone(), - ); - builder.slice_range_check_u8( - p_witness_high.coefficients(), - shard.clone(), - channel.clone(), - is_real, - ); + builder.slice_range_check_u8(&self.result.0, is_real.clone()); + builder.slice_range_check_u8(&self.carry.0, is_real.clone()); + builder.slice_range_check_u8(p_witness_low.coefficients(), is_real.clone()); + builder.slice_range_check_u8(p_witness_high.coefficients(), is_real); } #[allow(clippy::too_many_arguments)] @@ -319,15 +287,13 @@ impl FieldOpCols { a: &(impl Into> + Clone), b: &(impl Into> + Clone), op: FieldOperation, - shard: impl Into + Clone, - channel: impl Into + Clone, is_real: impl Into + Clone, ) where V: Into, Limbs: Copy, { let p_limbs = Polynomial::from_iter(P::modulus_field_iter::().map(AB::Expr::from)); - self.eval_with_modulus::(builder, a, b, &p_limbs, op, shard, channel, is_real); + self.eval_with_modulus::(builder, a, b, &p_limbs, op, is_real); } } @@ -423,7 +389,7 @@ mod tests { let cols: &mut TestCols = row.as_mut_slice().borrow_mut(); cols.a = P::to_limbs_field::(a); cols.b = P::to_limbs_field::(b); - cols.a_op_b.populate(&mut blu_events, 1, 0, a, b, self.operation); + cols.a_op_b.populate(&mut blu_events, 1, a, b, self.operation); output.add_byte_lookup_events(blu_events); row }) @@ -458,15 +424,7 @@ mod tests { let main = builder.main(); let local = main.row_slice(0); let local: &TestCols = (*local).borrow(); - local.a_op_b.eval( - builder, - &local.a, - &local.b, - self.operation, - AB::F::one(), - AB::F::zero(), - AB::F::one(), - ); + local.a_op_b.eval(builder, &local.a, &local.b, self.operation, AB::F::one()); } } diff --git a/crates/core/machine/src/operations/field/field_sqrt.rs b/crates/core/machine/src/operations/field/field_sqrt.rs index 50d2cc4729..a0f40c6a48 100644 --- a/crates/core/machine/src/operations/field/field_sqrt.rs +++ b/crates/core/machine/src/operations/field/field_sqrt.rs @@ -43,7 +43,6 @@ impl FieldSqrtCols { &mut self, record: &mut impl ByteRecord, shard: u32, - channel: u8, a: &BigUint, sqrt_fn: impl Fn(&BigUint) -> BigUint, ) -> BigUint { @@ -53,7 +52,7 @@ impl FieldSqrtCols { // Use FieldOpCols to compute result * result. let sqrt_squared = - self.multiplication.populate(record, shard, channel, &sqrt, &sqrt, FieldOperation::Mul); + self.multiplication.populate(record, shard, &sqrt, &sqrt, FieldOperation::Mul); // If the result is indeed the square root of a, then result * result = a. assert_eq!(sqrt_squared, a.clone()); @@ -63,14 +62,13 @@ impl FieldSqrtCols { self.multiplication.result = P::to_limbs_field::(&sqrt); // Populate the range columns. - self.range.populate(record, shard, channel, &sqrt, &modulus); + self.range.populate(record, shard, &sqrt, &modulus); let sqrt_bytes = P::to_limbs(&sqrt); self.lsb = F::from_canonical_u8(sqrt_bytes[0] & 1); let and_event = ByteLookupEvent { shard, - channel, opcode: ByteOpcode::AND, a1: self.lsb.as_canonical_u32() as u16, a2: 0, @@ -82,7 +80,6 @@ impl FieldSqrtCols { // Add the byte range check for `sqrt`. record.add_u8_range_checks( shard, - channel, self.multiplication .result .0 @@ -107,8 +104,6 @@ where builder: &mut AB, a: &Limbs, is_odd: impl Into, - shard: impl Into + Clone, - channel: impl Into + Clone, is_real: impl Into + Clone, ) where V: Into, @@ -121,33 +116,18 @@ where multiplication.result = *a; // Compute sqrt * sqrt. We pass in P since we want its BaseField to be the mod. - multiplication.eval( - builder, - &sqrt, - &sqrt, - FieldOperation::Mul, - shard.clone(), - channel.clone(), - is_real.clone(), - ); + multiplication.eval(builder, &sqrt, &sqrt, FieldOperation::Mul, is_real.clone()); let modulus_limbs = P::to_limbs_field_vec(&P::modulus()); self.range.eval( builder, &sqrt, &limbs_from_vec::(modulus_limbs), - shard.clone(), - channel.clone(), is_real.clone(), ); // Range check that `sqrt` limbs are bytes. - builder.slice_range_check_u8( - sqrt.0.as_slice(), - shard.clone(), - channel.clone(), - is_real.clone(), - ); + builder.slice_range_check_u8(sqrt.0.as_slice(), is_real.clone()); // Assert that the square root is the positive one, i.e., with least significant bit 0. // This is done by computing LSB = least_significant_byte & 1. @@ -158,8 +138,6 @@ where self.lsb, sqrt[0], AB::F::one(), - shard, - channel, is_real, ); } @@ -246,7 +224,7 @@ mod tests { let mut row = [F::zero(); NUM_TEST_COLS]; let cols: &mut TestCols = row.as_mut_slice().borrow_mut(); cols.a = P::to_limbs_field::(a); - cols.sqrt.populate(&mut blu_events, 1, 0, a, ed25519_sqrt); + cols.sqrt.populate(&mut blu_events, 1, a, ed25519_sqrt); output.add_byte_lookup_events(blu_events); row }) @@ -283,14 +261,7 @@ mod tests { let local: &TestCols = (*local).borrow(); // eval verifies that local.sqrt.result is indeed the square root of local.a. - local.sqrt.eval( - builder, - &local.a, - AB::F::zero(), - AB::F::one(), - AB::F::zero(), - AB::F::one(), - ); + local.sqrt.eval(builder, &local.a, AB::F::zero(), AB::F::one()); } } diff --git a/crates/core/machine/src/operations/field/range.rs b/crates/core/machine/src/operations/field/range.rs index 0f6dce4144..b7490c584b 100644 --- a/crates/core/machine/src/operations/field/range.rs +++ b/crates/core/machine/src/operations/field/range.rs @@ -31,7 +31,6 @@ impl FieldLtCols { &mut self, record: &mut impl ByteRecord, shard: u32, - channel: u8, lhs: &BigUint, rhs: &BigUint, ) { @@ -53,7 +52,6 @@ impl FieldLtCols { record.add_byte_lookup_event(ByteLookupEvent { opcode: ByteOpcode::LTU, shard, - channel, a1: 1, a2: 0, b: *byte, @@ -79,8 +77,6 @@ impl FieldLtCols { builder: &mut AB, lhs: &E1, rhs: &E2, - shard: impl Into + Clone, - channel: impl Into + Clone, is_real: impl Into + Clone, ) where V: Into, @@ -145,8 +141,6 @@ impl FieldLtCols { AB::F::one(), self.lhs_comparison_byte, self.rhs_comparison_byte, - shard, - channel, is_real, ) } diff --git a/crates/core/machine/src/operations/fixed_rotate_right.rs b/crates/core/machine/src/operations/fixed_rotate_right.rs index 5f97ec2923..03a4454fdd 100644 --- a/crates/core/machine/src/operations/fixed_rotate_right.rs +++ b/crates/core/machine/src/operations/fixed_rotate_right.rs @@ -43,7 +43,6 @@ impl FixedRotateRightOperation { &mut self, record: &mut impl ByteRecord, shard: u32, - channel: u8, input: u32, rotation: usize, ) -> u32 { @@ -75,7 +74,6 @@ impl FixedRotateRightOperation { let byte_event = ByteLookupEvent { shard, - channel, opcode: ByteOpcode::ShrCarry, a1: shift as u16, a2: carry, @@ -110,8 +108,6 @@ impl FixedRotateRightOperation { input: Word, rotation: usize, cols: FixedRotateRightOperation, - shard: AB::Var, - channel: impl Into + Clone, is_real: AB::Var, ) { // Compute some constants with respect to the rotation needed for the rotation. @@ -138,8 +134,6 @@ impl FixedRotateRightOperation { cols.carry[i], input_bytes_rotated[i], AB::F::from_canonical_usize(nb_bits_to_shift), - shard, - channel.clone(), is_real, ); diff --git a/crates/core/machine/src/operations/fixed_shift_right.rs b/crates/core/machine/src/operations/fixed_shift_right.rs index d77acb9b42..50aa896205 100644 --- a/crates/core/machine/src/operations/fixed_shift_right.rs +++ b/crates/core/machine/src/operations/fixed_shift_right.rs @@ -43,7 +43,6 @@ impl FixedShiftRightOperation { &mut self, record: &mut impl ByteRecord, shard: u32, - channel: u8, input: u32, rotation: usize, ) -> u32 { @@ -74,7 +73,6 @@ impl FixedShiftRightOperation { let (shift, carry) = shr_carry(b, c); let byte_event = ByteLookupEvent { shard, - channel, opcode: ByteOpcode::ShrCarry, a1: shift as u16, a2: carry, @@ -109,8 +107,6 @@ impl FixedShiftRightOperation { input: Word, rotation: usize, cols: FixedShiftRightOperation, - shard: impl Into + Copy, - channel: impl Into + Copy, is_real: AB::Var, ) { // Compute some constants with respect to the rotation needed for the rotation. @@ -138,8 +134,6 @@ impl FixedShiftRightOperation { cols.carry[i], input_bytes_rotated[i].clone(), AB::F::from_canonical_usize(nb_bits_to_shift), - shard, - channel, is_real, ); diff --git a/crates/core/machine/src/operations/lt.rs b/crates/core/machine/src/operations/lt.rs index 2f531f18c8..5d9f000983 100644 --- a/crates/core/machine/src/operations/lt.rs +++ b/crates/core/machine/src/operations/lt.rs @@ -22,14 +22,7 @@ pub struct AssertLtColsBytes { } impl AssertLtColsBytes { - pub fn populate( - &mut self, - record: &mut impl ByteRecord, - shard: u32, - channel: u8, - a: &[u8], - b: &[u8], - ) { + pub fn populate(&mut self, record: &mut impl ByteRecord, shard: u32, a: &[u8], b: &[u8]) { let mut byte_flags = vec![0u8; N]; for (a_byte, b_byte, flag) in @@ -43,7 +36,6 @@ impl AssertLtColsBytes { record.add_byte_lookup_event(ByteLookupEvent { opcode: ByteOpcode::LTU, shard, - channel, a1: 1, a2: 0, b: *a_byte, @@ -69,8 +61,6 @@ impl AssertLtColsBytes { builder: &mut AB, a: &[Ea], b: &[Eb], - shard: impl Into + Clone, - channel: impl Into + Clone, is_real: impl Into + Clone, ) where V: Into, @@ -134,8 +124,6 @@ impl AssertLtColsBytes { AB::F::one(), self.a_comparison_byte, self.b_comparison_byte, - shard, - channel, is_real, ) } diff --git a/crates/core/machine/src/operations/not.rs b/crates/core/machine/src/operations/not.rs index bb09ff58e4..e9d32e5adf 100644 --- a/crates/core/machine/src/operations/not.rs +++ b/crates/core/machine/src/operations/not.rs @@ -14,19 +14,13 @@ pub struct NotOperation { } impl NotOperation { - pub fn populate( - &mut self, - record: &mut impl ByteRecord, - shard: u32, - channel: u8, - x: u32, - ) -> u32 { + pub fn populate(&mut self, record: &mut impl ByteRecord, shard: u32, x: u32) -> u32 { let expected = !x; let x_bytes = x.to_le_bytes(); for i in 0..WORD_SIZE { self.value[i] = F::from_canonical_u8(!x_bytes[i]); } - record.add_u8_range_checks(shard, channel, &x_bytes); + record.add_u8_range_checks(shard, &x_bytes); expected } @@ -35,8 +29,6 @@ impl NotOperation { builder: &mut AB, a: Word, cols: NotOperation, - shard: impl Into + Copy, - channel: impl Into + Copy, is_real: impl Into + Copy, ) { for i in (0..WORD_SIZE).step_by(2) { @@ -46,8 +38,6 @@ impl NotOperation { AB::F::zero(), a[i], a[i + 1], - shard, - channel, is_real, ); } diff --git a/crates/core/machine/src/operations/or.rs b/crates/core/machine/src/operations/or.rs index fb4a675820..2eaa3aadea 100644 --- a/crates/core/machine/src/operations/or.rs +++ b/crates/core/machine/src/operations/or.rs @@ -13,20 +13,13 @@ pub struct OrOperation { } impl OrOperation { - pub fn populate( - &mut self, - record: &mut ExecutionRecord, - shard: u32, - channel: u8, - x: u32, - y: u32, - ) -> u32 { + pub fn populate(&mut self, record: &mut ExecutionRecord, shard: u32, x: u32, y: u32) -> u32 { let expected = x | y; let x_bytes = x.to_le_bytes(); let y_bytes = y.to_le_bytes(); for i in 0..WORD_SIZE { self.value[i] = F::from_canonical_u8(x_bytes[i] | y_bytes[i]); - record.lookup_or(shard, channel, x_bytes[i], y_bytes[i]); + record.lookup_or(shard, x_bytes[i], y_bytes[i]); } expected } @@ -36,8 +29,6 @@ impl OrOperation { a: Word, b: Word, cols: OrOperation, - shard: impl Into + Copy, - channel: impl Into + Copy, is_real: AB::Var, ) { for i in 0..WORD_SIZE { @@ -46,8 +37,6 @@ impl OrOperation { cols.value[i], a[i], b[i], - shard, - channel, is_real, ); } diff --git a/crates/core/machine/src/operations/xor.rs b/crates/core/machine/src/operations/xor.rs index 93fa41ac44..c69113988c 100644 --- a/crates/core/machine/src/operations/xor.rs +++ b/crates/core/machine/src/operations/xor.rs @@ -16,14 +16,7 @@ pub struct XorOperation { } impl XorOperation { - pub fn populate( - &mut self, - record: &mut impl ByteRecord, - shard: u32, - channel: u8, - x: u32, - y: u32, - ) -> u32 { + pub fn populate(&mut self, record: &mut impl ByteRecord, shard: u32, x: u32, y: u32) -> u32 { let expected = x ^ y; let x_bytes = x.to_le_bytes(); let y_bytes = y.to_le_bytes(); @@ -33,7 +26,6 @@ impl XorOperation { let byte_event = ByteLookupEvent { shard, - channel, opcode: ByteOpcode::XOR, a1: xor as u16, a2: 0, @@ -51,8 +43,6 @@ impl XorOperation { a: Word, b: Word, cols: XorOperation, - shard: AB::Var, - channel: impl Into + Clone, is_real: AB::Var, ) { for i in 0..WORD_SIZE { @@ -61,8 +51,6 @@ impl XorOperation { cols.value[i], a[i], b[i], - shard, - channel.clone(), is_real, ); } diff --git a/crates/core/machine/src/program/mod.rs b/crates/core/machine/src/program/mod.rs index dc8daf2d0f..6e35a9283d 100644 --- a/crates/core/machine/src/program/mod.rs +++ b/crates/core/machine/src/program/mod.rs @@ -4,7 +4,7 @@ use core::{ }; use std::collections::HashMap; -use crate::air::ProgramAirBuilder; +use crate::{air::ProgramAirBuilder, utils::pad_rows_fixed}; use p3_air::{Air, BaseAir, PairBuilder}; use p3_field::PrimeField; use p3_matrix::{dense::RowMajorMatrix, Matrix}; @@ -12,10 +12,7 @@ use sp1_core_executor::{ExecutionRecord, Program}; use sp1_derive::AlignedBorrow; use sp1_stark::air::{MachineAir, SP1AirBuilder}; -use crate::{ - cpu::columns::{InstructionCols, OpcodeSelectorCols}, - utils::pad_to_power_of_two, -}; +use crate::cpu::columns::{InstructionCols, OpcodeSelectorCols}; /// The number of preprocessed program columns. pub const NUM_PROGRAM_PREPROCESSED_COLS: usize = size_of::>(); @@ -64,13 +61,15 @@ impl MachineAir for ProgramChip { } fn generate_preprocessed_trace(&self, program: &Self::Program) -> Option> { - debug_assert!(!program.instructions.is_empty(), "empty program"); - let rows = program + debug_assert!( + !program.instructions.is_empty() || program.preprocessed_shape.is_some(), + "empty program" + ); + let mut rows = program .instructions - .clone() - .into_iter() + .iter() .enumerate() - .map(|(i, instruction)| { + .map(|(i, &instruction)| { let pc = program.pc_base + (i as u32 * 4); let mut row = [F::zero(); NUM_PROGRAM_PREPROCESSED_COLS]; let cols: &mut ProgramPreprocessedCols = row.as_mut_slice().borrow_mut(); @@ -82,15 +81,19 @@ impl MachineAir for ProgramChip { }) .collect::>(); + // Pad the trace to a power of two depending on the proof shape in `input`. + pad_rows_fixed( + &mut rows, + || [F::zero(); NUM_PROGRAM_PREPROCESSED_COLS], + program.fixed_log2_rows::(self), + ); + // Convert the trace to a row major matrix. - let mut trace = RowMajorMatrix::new( + let trace = RowMajorMatrix::new( rows.into_iter().flatten().collect::>(), NUM_PROGRAM_PREPROCESSED_COLS, ); - // Pad the trace to a power of two. - pad_to_power_of_two::(&mut trace.values); - Some(trace) } @@ -113,7 +116,7 @@ impl MachineAir for ProgramChip { instruction_counts.entry(pc).and_modify(|count| *count += 1).or_insert(1); }); - let rows = input + let mut rows = input .program .instructions .clone() @@ -130,16 +133,14 @@ impl MachineAir for ProgramChip { }) .collect::>(); - // Convert the trace to a row major matrix. - let mut trace = RowMajorMatrix::new( - rows.into_iter().flatten().collect::>(), - NUM_PROGRAM_MULT_COLS, + // Pad the trace to a power of two depending on the proof shape in `input`. + pad_rows_fixed( + &mut rows, + || [F::zero(); NUM_PROGRAM_MULT_COLS], + input.fixed_log2_rows::(self), ); - // Pad the trace to a power of two. - pad_to_power_of_two::(&mut trace.values); - - trace + RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), NUM_PROGRAM_MULT_COLS) } fn included(&self, _: &Self::Record) -> bool { @@ -180,8 +181,9 @@ where #[cfg(test)] mod tests { - use std::{collections::BTreeMap, sync::Arc}; + use std::sync::Arc; + use hashbrown::HashMap; use p3_baby_bear::BabyBear; use p3_matrix::dense::RowMajorMatrix; @@ -206,7 +208,8 @@ mod tests { instructions, pc_start: 0, pc_base: 0, - memory_image: BTreeMap::new(), + memory_image: HashMap::new(), + preprocessed_shape: None, }), ..Default::default() }; diff --git a/crates/core/machine/src/riscv/cost.rs b/crates/core/machine/src/riscv/cost.rs index 0da65a34cc..27469ee00a 100644 --- a/crates/core/machine/src/riscv/cost.rs +++ b/crates/core/machine/src/riscv/cost.rs @@ -127,6 +127,15 @@ impl CostEstimator for ExecutionReport { (bls12381_decompress_events as u64) * costs[&RiscvAirDiscriminants::Bls12381Decompress]; total_chips += 1; + let syscall_events = self.syscall_counts.values().sum::(); + total_area += (syscall_events as u64) * costs[&RiscvAirDiscriminants::SyscallCore]; + total_chips += 1; + + let syscall_precompile_events = self.syscall_counts.len(); + total_area += + (syscall_precompile_events as u64) * costs[&RiscvAirDiscriminants::SyscallPrecompile]; + total_chips += 1; + let divrem_events = self.opcode_counts[Opcode::DIV] + self.opcode_counts[Opcode::REM] + self.opcode_counts[Opcode::DIVU] @@ -163,12 +172,19 @@ impl CostEstimator for ExecutionReport { total_area += (lt_events as u64) * costs[&RiscvAirDiscriminants::Lt]; total_chips += 1; - let memory_initialize_events = self.touched_memory_addresses; - total_area += (memory_initialize_events as u64) * costs[&RiscvAirDiscriminants::MemoryInit]; + let memory_global_initialize_events = self.touched_memory_addresses; + total_area += (memory_global_initialize_events as u64) + * costs[&RiscvAirDiscriminants::MemoryGlobalInit]; + total_chips += 1; + + let memory_global_finalize_events = self.touched_memory_addresses; + total_area += (memory_global_finalize_events as u64) + * costs[&RiscvAirDiscriminants::MemoryGlobalFinal]; total_chips += 1; - let memory_finalize_events = self.touched_memory_addresses; - total_area += (memory_finalize_events as u64) * costs[&RiscvAirDiscriminants::MemoryFinal]; + let memory_local_initialize_events = self.touched_memory_addresses; + total_area += + (memory_local_initialize_events as u64) * costs[&RiscvAirDiscriminants::MemoryLocal]; total_chips += 1; assert_eq!(total_chips, chips.len(), "chip count mismatch"); diff --git a/crates/core/machine/src/riscv/mod.rs b/crates/core/machine/src/riscv/mod.rs index 7fba0e1ec2..34319f6f96 100644 --- a/crates/core/machine/src/riscv/mod.rs +++ b/crates/core/machine/src/riscv/mod.rs @@ -1,35 +1,53 @@ pub mod cost; +mod shape; + +use itertools::Itertools; +pub use shape::*; +use sp1_core_executor::{ + events::PrecompileLocalMemory, syscalls::SyscallCode, ExecutionRecord, Program, +}; + use crate::{ - memory::{MemoryChipType, MemoryProgramChip}, + memory::{ + MemoryChipType, MemoryLocalChip, MemoryProgramChip, NUM_LOCAL_MEMORY_ENTRIES_PER_ROW, + }, + riscv::MemoryChipType::{Finalize, Initialize}, syscall::precompiles::fptower::{Fp2AddSubAssignChip, Fp2MulAssignChip, FpOpChip}, }; -use hashbrown::HashMap; +use hashbrown::{HashMap, HashSet}; use p3_field::PrimeField32; pub use riscv_chips::*; use sp1_curves::weierstrass::{bls12_381::Bls12381BaseField, bn254::Bn254BaseField}; use sp1_stark::{ - air::{MachineAir, SP1_PROOF_NUM_PV_ELTS}, - Chip, StarkGenericConfig, StarkMachine, + air::{InteractionScope, MachineAir, SP1_PROOF_NUM_PV_ELTS}, + Chip, InteractionKind, StarkGenericConfig, StarkMachine, }; use strum_macros::{EnumDiscriminants, EnumIter}; use tracing::instrument; +pub const MAX_LOG_NUMBER_OF_SHARDS: usize = 16; +pub const MAX_NUMBER_OF_SHARDS: usize = 1 << MAX_LOG_NUMBER_OF_SHARDS; + /// A module for importing all the different RISC-V chips. pub(crate) mod riscv_chips { pub use crate::{ alu::{AddSubChip, BitwiseChip, DivRemChip, LtChip, MulChip, ShiftLeft, ShiftRightChip}, bytes::ByteChip, cpu::CpuChip, - memory::MemoryChip, + memory::MemoryGlobalChip, program::ProgramChip, - syscall::precompiles::{ - edwards::{EdAddAssignChip, EdDecompressChip}, - keccak256::KeccakPermuteChip, - sha256::{ShaCompressChip, ShaExtendChip}, - uint256::Uint256MulChip, - weierstrass::{ - WeierstrassAddAssignChip, WeierstrassDecompressChip, WeierstrassDoubleAssignChip, + syscall::{ + chip::SyscallChip, + precompiles::{ + edwards::{EdAddAssignChip, EdDecompressChip}, + keccak256::KeccakPermuteChip, + sha256::{ShaCompressChip, ShaExtendChip}, + uint256::Uint256MulChip, + weierstrass::{ + WeierstrassAddAssignChip, WeierstrassDecompressChip, + WeierstrassDoubleAssignChip, + }, }, }, }; @@ -70,12 +88,18 @@ pub enum RiscvAir { ShiftRight(ShiftRightChip), /// A lookup table for byte operations. ByteLookup(ByteChip), - /// A table for initializing the memory state. - MemoryInit(MemoryChip), - /// A table for finalizing the memory state. - MemoryFinal(MemoryChip), + /// A table for initializing the global memory state. + MemoryGlobalInit(MemoryGlobalChip), + /// A table for finalizing the global memory state. + MemoryGlobalFinal(MemoryGlobalChip), + /// A table for the local memory state. + MemoryLocal(MemoryLocalChip), /// A table for initializing the program memory. ProgramMemory(MemoryProgramChip), + /// A table for all the syscall invocations. + SyscallCore(SyscallChip), + /// A table for all the precompile invocations. + SyscallPrecompile(SyscallChip), /// A precompile for sha256 extend. Sha256Extend(ShaExtendChip), /// A precompile for sha256 compress. @@ -122,7 +146,7 @@ impl RiscvAir { #[instrument("construct RiscvAir machine", level = "debug", skip_all)] pub fn machine>(config: SC) -> StarkMachine { let chips = Self::chips(); - StarkMachine::new(config, chips, SP1_PROOF_NUM_PV_ELTS) + StarkMachine::new(config, chips, SP1_PROOF_NUM_PV_ELTS, true) } /// Get all the different RISC-V AIRs. @@ -137,6 +161,11 @@ impl RiscvAir { costs } + pub fn get_airs_and_costs() -> (Vec, HashMap) { + let (chips, costs) = Self::get_chips_and_costs(); + (chips.into_iter().map(|chip| chip.into_inner()).collect(), costs) + } + /// Get all the different RISC-V AIRs. pub fn get_chips_and_costs() -> (Vec>, HashMap) { let mut costs: HashMap = HashMap::new(); @@ -256,6 +285,14 @@ impl RiscvAir { costs.insert(RiscvAirDiscriminants::Bls12381Decompress, bls12381_decompress.cost()); chips.push(bls12381_decompress); + let syscall_core = Chip::new(RiscvAir::SyscallCore(SyscallChip::core())); + costs.insert(RiscvAirDiscriminants::SyscallCore, syscall_core.cost()); + chips.push(syscall_core); + + let syscall_precompile = Chip::new(RiscvAir::SyscallPrecompile(SyscallChip::precompile())); + costs.insert(RiscvAirDiscriminants::SyscallPrecompile, syscall_precompile.cost()); + chips.push(syscall_precompile); + let div_rem = Chip::new(RiscvAir::DivRem(DivRemChip::default())); costs.insert(RiscvAirDiscriminants::DivRem, div_rem.cost()); chips.push(div_rem); @@ -284,15 +321,20 @@ impl RiscvAir { costs.insert(RiscvAirDiscriminants::Lt, lt.cost()); chips.push(lt); - let memory_init = - Chip::new(RiscvAir::MemoryInit(MemoryChip::new(MemoryChipType::Initialize))); - costs.insert(RiscvAirDiscriminants::MemoryInit, memory_init.cost()); - chips.push(memory_init); + let memory_global_init = Chip::new(RiscvAir::MemoryGlobalInit(MemoryGlobalChip::new( + MemoryChipType::Initialize, + ))); + costs.insert(RiscvAirDiscriminants::MemoryGlobalInit, memory_global_init.cost()); + chips.push(memory_global_init); - let memory_finalize = - Chip::new(RiscvAir::MemoryFinal(MemoryChip::new(MemoryChipType::Finalize))); - costs.insert(RiscvAirDiscriminants::MemoryFinal, memory_finalize.cost()); - chips.push(memory_finalize); + let memory_global_finalize = + Chip::new(RiscvAir::MemoryGlobalFinal(MemoryGlobalChip::new(MemoryChipType::Finalize))); + costs.insert(RiscvAirDiscriminants::MemoryGlobalFinal, memory_global_finalize.cost()); + chips.push(memory_global_finalize); + + let memory_local = Chip::new(RiscvAir::MemoryLocal(MemoryLocalChip::new())); + costs.insert(RiscvAirDiscriminants::MemoryLocal, memory_local.cost()); + chips.push(memory_local); let memory_program = Chip::new(RiscvAir::ProgramMemory(MemoryProgramChip::default())); costs.insert(RiscvAirDiscriminants::ProgramMemory, memory_program.cost()); @@ -304,6 +346,180 @@ impl RiscvAir { (chips, costs) } + + /// Get the heights of the preprocessed chips for a given program. + pub(crate) fn preprocessed_heights(program: &Program) -> Vec<(Self, usize)> { + vec![ + (RiscvAir::Program(ProgramChip::default()), program.instructions.len()), + (RiscvAir::ProgramMemory(MemoryProgramChip::default()), program.memory_image.len()), + (RiscvAir::ByteLookup(ByteChip::default()), 1 << 16), + ] + } + + /// Get the heights of the chips for a given execution record. + pub(crate) fn core_heights(record: &ExecutionRecord) -> Vec<(Self, usize)> { + vec![ + (RiscvAir::Cpu(CpuChip::default()), record.cpu_events.len()), + (RiscvAir::DivRem(DivRemChip::default()), record.divrem_events.len()), + ( + RiscvAir::Add(AddSubChip::default()), + record.add_events.len() + record.sub_events.len(), + ), + (RiscvAir::Bitwise(BitwiseChip::default()), record.bitwise_events.len()), + (RiscvAir::Mul(MulChip::default()), record.mul_events.len()), + (RiscvAir::ShiftRight(ShiftRightChip::default()), record.shift_right_events.len()), + (RiscvAir::ShiftLeft(ShiftLeft::default()), record.shift_left_events.len()), + (RiscvAir::Lt(LtChip::default()), record.lt_events.len()), + ( + RiscvAir::MemoryLocal(MemoryLocalChip::new()), + record + .get_local_mem_events() + .chunks(NUM_LOCAL_MEMORY_ENTRIES_PER_ROW) + .into_iter() + .count(), + ), + (RiscvAir::SyscallCore(SyscallChip::core()), record.syscall_events.len()), + ] + } + + pub(crate) fn get_all_core_airs() -> Vec { + vec![ + RiscvAir::Cpu(CpuChip::default()), + RiscvAir::Add(AddSubChip::default()), + RiscvAir::Bitwise(BitwiseChip::default()), + RiscvAir::Mul(MulChip::default()), + RiscvAir::DivRem(DivRemChip::default()), + RiscvAir::Lt(LtChip::default()), + RiscvAir::ShiftLeft(ShiftLeft::default()), + RiscvAir::ShiftRight(ShiftRightChip::default()), + RiscvAir::MemoryLocal(MemoryLocalChip::new()), + RiscvAir::SyscallCore(SyscallChip::core()), + ] + } + + pub(crate) fn memory_init_final_airs() -> Vec { + vec![ + RiscvAir::MemoryGlobalInit(MemoryGlobalChip::new(MemoryChipType::Initialize)), + RiscvAir::MemoryGlobalFinal(MemoryGlobalChip::new(MemoryChipType::Finalize)), + ] + } + + pub(crate) fn get_memory_init_final_heights(record: &ExecutionRecord) -> Vec<(Self, usize)> { + vec![ + ( + RiscvAir::MemoryGlobalInit(MemoryGlobalChip::new(Initialize)), + record.global_memory_initialize_events.len(), + ), + ( + RiscvAir::MemoryGlobalFinal(MemoryGlobalChip::new(Finalize)), + record.global_memory_finalize_events.len(), + ), + ] + } + + pub(crate) fn get_all_precompile_airs() -> Vec<(Self, usize)> { + let mut airs: HashSet<_> = Self::get_airs_and_costs().0.into_iter().collect(); + for core_air in Self::get_all_core_airs() { + airs.remove(&core_air); + } + for memory_air in Self::memory_init_final_airs() { + airs.remove(&memory_air); + } + airs.remove(&Self::SyscallPrecompile(SyscallChip::precompile())); + + // Remove the preprocessed chips. + airs.remove(&Self::Program(ProgramChip::default())); + airs.remove(&Self::ProgramMemory(MemoryProgramChip::default())); + airs.remove(&Self::ByteLookup(ByteChip::default())); + + airs.into_iter() + .map(|air| { + let chip = Chip::new(air); + let local_mem_events: usize = chip + .sends() + .iter() + .chain(chip.receives()) + .filter(|interaction| { + interaction.kind == InteractionKind::Memory + && interaction.scope == InteractionScope::Local + }) + .count(); + + (chip.into_inner(), local_mem_events) + }) + .collect() + } + + pub(crate) fn rows_per_event(&self) -> usize { + match self { + Self::Sha256Compress(_) => 80, + Self::Sha256Extend(_) => 48, + Self::KeccakP(_) => 24, + _ => 1, + } + } + + pub(crate) fn syscall_code(&self) -> SyscallCode { + match self { + Self::Bls12381Add(_) => SyscallCode::BLS12381_ADD, + Self::Bn254Add(_) => SyscallCode::BN254_ADD, + Self::Bn254Double(_) => SyscallCode::BN254_DOUBLE, + Self::Bn254Fp(_) => SyscallCode::BN254_FP_ADD, + Self::Bn254Fp2AddSub(_) => SyscallCode::BN254_FP2_ADD, + Self::Bn254Fp2Mul(_) => SyscallCode::BN254_FP2_MUL, + Self::Ed25519Add(_) => SyscallCode::ED_ADD, + Self::Ed25519Decompress(_) => SyscallCode::ED_DECOMPRESS, + Self::KeccakP(_) => SyscallCode::KECCAK_PERMUTE, + Self::Secp256k1Add(_) => SyscallCode::SECP256K1_ADD, + Self::Secp256k1Double(_) => SyscallCode::SECP256K1_DOUBLE, + Self::Sha256Compress(_) => SyscallCode::SHA_COMPRESS, + Self::Sha256Extend(_) => SyscallCode::SHA_EXTEND, + Self::Uint256Mul(_) => SyscallCode::UINT256_MUL, + Self::Bls12381Decompress(_) => SyscallCode::BLS12381_DECOMPRESS, + Self::K256Decompress(_) => SyscallCode::SECP256K1_DECOMPRESS, + Self::Bls12381Double(_) => SyscallCode::BLS12381_DOUBLE, + Self::Bls12381Fp(_) => SyscallCode::BLS12381_FP_ADD, + Self::Bls12381Fp2Mul(_) => SyscallCode::BLS12381_FP2_MUL, + Self::Bls12381Fp2AddSub(_) => SyscallCode::BLS12381_FP2_ADD, + Self::Add(_) => unreachable!("Invalid for core chip"), + Self::Bitwise(_) => unreachable!("Invalid for core chip"), + Self::DivRem(_) => unreachable!("Invalid for core chip"), + Self::Cpu(_) => unreachable!("Invalid for core chip"), + Self::MemoryGlobalInit(_) => unreachable!("Invalid for memory init/final"), + Self::MemoryGlobalFinal(_) => unreachable!("Invalid for memory init/final"), + Self::MemoryLocal(_) => unreachable!("Invalid for memory local"), + Self::ProgramMemory(_) => unreachable!("Invalid for memory program"), + Self::Program(_) => unreachable!("Invalid for core chip"), + Self::Mul(_) => unreachable!("Invalid for core chip"), + Self::Lt(_) => unreachable!("Invalid for core chip"), + Self::ShiftRight(_) => unreachable!("Invalid for core chip"), + Self::ShiftLeft(_) => unreachable!("Invalid for core chip"), + Self::ByteLookup(_) => unreachable!("Invalid for core chip"), + Self::SyscallCore(_) => unreachable!("Invalid for core chip"), + Self::SyscallPrecompile(_) => unreachable!("Invalid for syscall precompile chip"), + } + } + + /// Get the height of the corresponding precompile chip. + /// + /// If the precompile is not included in the record, returns `None`. Otherwise, returns + /// `Some(num_rows, num_local_mem_events)`, where `num_rows` is the number of rows of the + /// corresponding chip and `num_local_mem_events` is the number of local memory events. + pub(crate) fn get_precompile_heights( + &self, + record: &ExecutionRecord, + ) -> Option<(usize, usize)> { + record + .precompile_events + .get_events(self.syscall_code()) + .filter(|events| !events.is_empty()) + .map(|events| { + ( + events.len() * self.rows_per_event(), + events.get_local_mem_events().into_iter().count(), + ) + }) + } } impl PartialEq for RiscvAir { @@ -482,7 +698,7 @@ pub mod tests { let mut opts = SP1CoreOpts::default(); opts.shard_size = 1024; opts.shard_batch_size = 2; - prove::<_, CpuProver<_, _>>(program, &stdin, BabyBearPoseidon2::new(), opts).unwrap(); + prove::<_, CpuProver<_, _>>(program, &stdin, BabyBearPoseidon2::new(), opts, None).unwrap(); } #[test] @@ -495,6 +711,7 @@ pub mod tests { &stdin, BabyBearPoseidon2::new(), SP1CoreOpts::default(), + None, ) .unwrap(); } diff --git a/crates/core/machine/src/riscv/shape.rs b/crates/core/machine/src/riscv/shape.rs new file mode 100644 index 0000000000..2eb57338df --- /dev/null +++ b/crates/core/machine/src/riscv/shape.rs @@ -0,0 +1,814 @@ +use itertools::Itertools; + +use hashbrown::HashMap; +use num::Integer; +use p3_field::PrimeField32; +use p3_util::log2_ceil_usize; +use sp1_core_executor::{CoreShape, ExecutionRecord, Program}; +use sp1_stark::{air::MachineAir, MachineRecord, ProofShape}; +use thiserror::Error; + +use crate::{ + memory::{MemoryLocalChip, MemoryProgramChip, NUM_LOCAL_MEMORY_ENTRIES_PER_ROW}, + riscv::MemoryChipType::{Finalize, Initialize}, +}; + +use super::{ + AddSubChip, BitwiseChip, ByteChip, CpuChip, DivRemChip, LtChip, MemoryGlobalChip, MulChip, + ProgramChip, RiscvAir, ShiftLeft, ShiftRightChip, SyscallChip, +}; + +#[derive(Debug, Error)] +pub enum CoreShapeError { + #[error("no preprocessed shape found")] + PreprocessedShapeError, + #[error("Preprocessed shape already fixed")] + PreprocessedShapeAlreadyFixed, + #[error("no shape found {0:?}")] + ShapeError(HashMap), + #[error("Preprocessed shape missing")] + PrepcocessedShapeMissing, + #[error("Shape already fixed")] + ShapeAlreadyFixed, + #[error("Precompile not included in allowed shapes {0:?}")] + PrecompileNotIncluded(HashMap), +} + +/// A structure that enables fixing the shape of an executionrecord. +pub struct CoreShapeConfig { + included_shapes: Vec>, + allowed_preprocessed_log_heights: HashMap, Vec>>, + allowed_core_log_heights: Vec, Vec>>>, + maximal_core_log_heights_mask: Vec, + memory_allowed_log_heights: HashMap, Vec>>, + precompile_allowed_log_heights: HashMap, (usize, Vec)>, +} + +struct CoreShapeSpec { + cpu_height: Vec>, + add_sub_height: Vec>, + divrem_height: Vec>, + bitwise_height: Vec>, + mul_height: Vec>, + shift_right_height: Vec>, + shift_left_height: Vec>, + lt_height: Vec>, + memory_local_height: Vec>, + syscall_core_height: Vec>, + is_potentially_maximal: bool, +} + +impl CoreShapeConfig { + /// Fix the preprocessed shape of the proof. + pub fn fix_preprocessed_shape(&self, program: &mut Program) -> Result<(), CoreShapeError> { + if program.preprocessed_shape.is_some() { + return Err(CoreShapeError::PreprocessedShapeAlreadyFixed); + } + + let heights = RiscvAir::::preprocessed_heights(program); + let prep_shape = + Self::find_shape_from_allowed_heights(&heights, &self.allowed_preprocessed_log_heights) + .ok_or(CoreShapeError::PreprocessedShapeError)?; + + program.preprocessed_shape = Some(prep_shape); + Ok(()) + } + + #[inline] + fn find_shape_from_allowed_heights( + heights: &[(RiscvAir, usize)], + allowed_log_heights: &HashMap, Vec>>, + ) -> Option { + let shape: Option> = heights + .iter() + .map(|(air, height)| { + for maybe_allowed_log_height in allowed_log_heights.get(air).into_iter().flatten() { + let allowed_log_height = maybe_allowed_log_height.unwrap_or_default(); + let allowed_height = + if allowed_log_height != 0 { 1 << allowed_log_height } else { 0 }; + if *height <= allowed_height { + return Some((air.name(), allowed_log_height)); + } + } + None + }) + .collect(); + + let mut inner = shape?; + inner.retain(|_, &mut value| value != 0); + + let shape = CoreShape { inner }; + Some(shape) + } + + /// Fix the shape of the proof. + pub fn fix_shape(&self, record: &mut ExecutionRecord) -> Result<(), CoreShapeError> { + if record.program.preprocessed_shape.is_none() { + return Err(CoreShapeError::PrepcocessedShapeMissing); + } + if record.shape.is_some() { + return Err(CoreShapeError::ShapeAlreadyFixed); + } + + // Set the shape of the chips with prepcoded shapes to match the preprocessed shape from the + // program. + record.shape.clone_from(&record.program.preprocessed_shape); + + // If cpu is included, try to fix the shape as a core. + if record.contains_cpu() { + // If cpu is included, try to fix the shape as a core. + + // Get the heights of the core airs in the record. + let heights = RiscvAir::::core_heights(record); + + // Try to find a shape within the included shapes. + for (i, allowed_log_heights) in self.allowed_core_log_heights.iter().enumerate() { + if let Some(shape) = + Self::find_shape_from_allowed_heights(&heights, allowed_log_heights) + { + tracing::debug!( + "Shard Lifted: Index={}, Cluster={}", + record.public_values.shard, + i + ); + for (air, height) in heights.iter() { + if shape.inner.contains_key(&air.name()) { + tracing::debug!( + "Chip {:<20}: {:<3} -> {:<3}", + air.name(), + log2_ceil_usize(*height), + shape.inner[&air.name()], + ); + } + } + + record.shape.as_mut().unwrap().extend(shape); + return Ok(()); + } + } + + // No shape found, so return an error. + return Err(CoreShapeError::ShapeError(record.stats())); + } + + // If the record is a global memory init/finalize record, try to fix the shape as such. + if !record.global_memory_initialize_events.is_empty() + || !record.global_memory_finalize_events.is_empty() + { + let heights = RiscvAir::::get_memory_init_final_heights(record); + let shape = + Self::find_shape_from_allowed_heights(&heights, &self.memory_allowed_log_heights) + .ok_or(CoreShapeError::ShapeError(record.stats()))?; + record.shape.as_mut().unwrap().extend(shape); + return Ok(()); + } + + // Try to fix the shape as a precompile record. + for (air, (mem_events_per_row, allowed_log_heights)) in + self.precompile_allowed_log_heights.iter() + { + if let Some((height, mem_events)) = air.get_precompile_heights(record) { + for allowed_log_height in allowed_log_heights { + if height <= (1 << allowed_log_height) { + for shape in self.get_precompile_shapes( + air, + *mem_events_per_row, + *allowed_log_height, + ) { + let mem_events_height = shape[2].1; + if mem_events + <= (1 << mem_events_height) * NUM_LOCAL_MEMORY_ENTRIES_PER_ROW + { + record.shape.as_mut().unwrap().extend(shape); + return Ok(()); + } + } + return Ok(()); + } + } + tracing::warn!( + "Cannot find shape for precompile {:?}, height {:?}, and mem events {:?}", + air.name(), + height, + mem_events + ); + return Err(CoreShapeError::ShapeError(record.stats())); + } + } + Err(CoreShapeError::PrecompileNotIncluded(record.stats())) + } + + fn get_precompile_shapes( + &self, + air: &RiscvAir, + mem_events_per_row: usize, + allowed_log_height: usize, + ) -> Vec<[(String, usize); 3]> { + (1..=air.rows_per_event()) + .rev() + .map(|rows_per_event| { + [ + (air.name(), allowed_log_height), + ( + RiscvAir::::SyscallPrecompile(SyscallChip::precompile()).name(), + ((1 << allowed_log_height) + .div_ceil(&air.rows_per_event()) + .next_power_of_two() + .ilog2() as usize) + .max(4), + ), + ( + RiscvAir::::MemoryLocal(MemoryLocalChip::new()).name(), + (((1 << allowed_log_height) * mem_events_per_row) + .div_ceil(NUM_LOCAL_MEMORY_ENTRIES_PER_ROW * rows_per_event) + .next_power_of_two() + .ilog2() as usize) + .max(4), + ), + ] + }) + .collect() + } + + fn generate_all_shapes_from_allowed_log_heights( + allowed_log_heights: impl IntoIterator>)>, + ) -> impl Iterator { + // for chip in allowed_heights. + allowed_log_heights + .into_iter() + .map(|(name, heights)| heights.into_iter().map(move |height| (name.clone(), height))) + .multi_cartesian_product() + .map(|iter| { + iter.into_iter() + .filter_map(|(name, maybe_height)| { + maybe_height.map(|log_height| (name, log_height)) + }) + .collect::() + }) + } + + pub fn generate_all_allowed_shapes(&self) -> impl Iterator + '_ { + let preprocessed_heights = self + .allowed_preprocessed_log_heights + .iter() + .map(|(air, heights)| (air.name(), heights.clone())); + + let mut memory_heights = self + .memory_allowed_log_heights + .iter() + .map(|(air, heights)| (air.name(), heights.clone())) + .collect::>(); + memory_heights.extend(preprocessed_heights.clone()); + + let included_shapes = + self.included_shapes.iter().cloned().map(|map| map.into_iter().collect::()); + + let precompile_only_shapes = self.precompile_allowed_log_heights.iter().flat_map( + move |(air, (mem_events_per_row, allowed_log_heights))| { + allowed_log_heights.iter().flat_map(move |allowed_log_height| { + self.get_precompile_shapes(air, *mem_events_per_row, *allowed_log_height) + }) + }, + ); + + let precompile_shapes = + Self::generate_all_shapes_from_allowed_log_heights(preprocessed_heights.clone()) + .flat_map(move |preprocessed_shape| { + precompile_only_shapes.clone().map(move |precompile_shape| { + preprocessed_shape + .clone() + .into_iter() + .chain(precompile_shape) + .collect::() + }) + }); + + included_shapes + .chain(self.allowed_core_log_heights.iter().flat_map(move |allowed_log_heights| { + Self::generate_all_shapes_from_allowed_log_heights({ + let mut log_heights = allowed_log_heights + .iter() + .map(|(air, heights)| (air.name(), heights.clone())) + .collect::>(); + log_heights.extend(preprocessed_heights.clone()); + log_heights + }) + })) + .chain(Self::generate_all_shapes_from_allowed_log_heights(memory_heights)) + .chain(precompile_shapes) + } + + pub fn maximal_core_shapes(&self) -> Vec { + let max_preprocessed = self + .allowed_preprocessed_log_heights + .iter() + .map(|(air, allowed_heights)| (air.name(), allowed_heights.last().unwrap().unwrap())); + + let max_core_shapes = self + .allowed_core_log_heights + .iter() + .zip(self.maximal_core_log_heights_mask.iter()) + .filter(|(_, mask)| **mask) + .map(|(allowed_log_heights, _)| { + max_preprocessed + .clone() + .chain(allowed_log_heights.iter().map(|(air, allowed_heights)| { + (air.name(), allowed_heights.last().unwrap().unwrap()) + })) + .collect::() + }); + + max_core_shapes.collect() + } +} + +impl Default for CoreShapeConfig { + fn default() -> Self { + // Preprocessed chip heights. + let program_heights = vec![Some(19), Some(20), Some(21), Some(22)]; + let program_memory_heights = vec![Some(19), Some(20), Some(21), Some(22)]; + + let allowed_preprocessed_log_heights = HashMap::from([ + (RiscvAir::Program(ProgramChip::default()), program_heights), + (RiscvAir::ProgramMemory(MemoryProgramChip::default()), program_memory_heights), + (RiscvAir::ByteLookup(ByteChip::default()), vec![Some(16)]), + ]); + + let core_shapes = [ + // Small program shapes: 2^14 -> 2^18. + CoreShapeSpec { + cpu_height: vec![Some(14)], + add_sub_height: vec![Some(14)], + lt_height: vec![Some(14)], + bitwise_height: vec![Some(14)], + shift_right_height: vec![Some(14)], + shift_left_height: vec![Some(14)], + syscall_core_height: vec![Some(14)], + memory_local_height: vec![Some(14)], + mul_height: vec![Some(14)], + divrem_height: vec![Some(14)], + is_potentially_maximal: false, + }, + CoreShapeSpec { + cpu_height: vec![Some(15)], + add_sub_height: vec![Some(15)], + lt_height: vec![Some(15)], + bitwise_height: vec![Some(15)], + shift_right_height: vec![Some(15)], + shift_left_height: vec![Some(15)], + syscall_core_height: vec![Some(15)], + memory_local_height: vec![Some(15)], + mul_height: vec![Some(15)], + divrem_height: vec![Some(15)], + is_potentially_maximal: false, + }, + CoreShapeSpec { + cpu_height: vec![Some(16)], + add_sub_height: vec![Some(16)], + lt_height: vec![Some(16)], + bitwise_height: vec![Some(16)], + shift_right_height: vec![Some(16)], + shift_left_height: vec![Some(16)], + syscall_core_height: vec![Some(16)], + memory_local_height: vec![Some(16)], + mul_height: vec![Some(16)], + divrem_height: vec![Some(16)], + is_potentially_maximal: false, + }, + CoreShapeSpec { + cpu_height: vec![Some(17)], + add_sub_height: vec![Some(17)], + lt_height: vec![Some(17)], + bitwise_height: vec![Some(17)], + shift_right_height: vec![Some(17)], + shift_left_height: vec![Some(17)], + syscall_core_height: vec![Some(17)], + memory_local_height: vec![Some(17)], + mul_height: vec![Some(17)], + divrem_height: vec![Some(17)], + is_potentially_maximal: false, + }, + CoreShapeSpec { + cpu_height: vec![Some(18)], + add_sub_height: vec![Some(18)], + lt_height: vec![Some(18)], + bitwise_height: vec![Some(18)], + shift_right_height: vec![Some(18)], + shift_left_height: vec![Some(18)], + syscall_core_height: vec![Some(18)], + memory_local_height: vec![Some(18)], + mul_height: vec![Some(18)], + divrem_height: vec![Some(18)], + is_potentially_maximal: false, + }, + // Small 2^19 shape variants. + CoreShapeSpec { + cpu_height: vec![Some(19)], + add_sub_height: vec![Some(21)], + lt_height: vec![Some(16)], + bitwise_height: vec![Some(16)], + shift_right_height: vec![Some(16)], + shift_left_height: vec![Some(16)], + syscall_core_height: vec![Some(16)], + memory_local_height: vec![Some(16)], + mul_height: vec![Some(16)], + divrem_height: vec![Some(16)], + is_potentially_maximal: false, + }, + CoreShapeSpec { + cpu_height: vec![Some(19)], + add_sub_height: vec![Some(20)], + lt_height: vec![Some(20)], + bitwise_height: vec![Some(16)], + shift_right_height: vec![Some(16)], + shift_left_height: vec![Some(16)], + syscall_core_height: vec![Some(16)], + memory_local_height: vec![Some(16)], + mul_height: vec![Some(16)], + divrem_height: vec![Some(16)], + is_potentially_maximal: false, + }, + CoreShapeSpec { + cpu_height: vec![Some(19)], + add_sub_height: vec![Some(19)], + lt_height: vec![Some(19)], + bitwise_height: vec![Some(19)], + shift_right_height: vec![Some(19)], + shift_left_height: vec![Some(19)], + syscall_core_height: vec![Some(19)], + memory_local_height: vec![Some(19)], + mul_height: vec![Some(19)], + divrem_height: vec![Some(19)], + is_potentially_maximal: false, + }, + // All no-add chips in <= 1<<19. + // + // Most shapes should be included in this cluster. + CoreShapeSpec { + cpu_height: vec![Some(21)], + add_sub_height: vec![Some(21)], + lt_height: vec![Some(19)], + bitwise_height: vec![Some(18), Some(19)], + shift_right_height: vec![Some(16), Some(17), Some(18), Some(19)], + shift_left_height: vec![Some(16), Some(17), Some(18), Some(19)], + syscall_core_height: vec![Some(16), Some(17), Some(18)], + memory_local_height: vec![Some(16), Some(18), Some(18)], + mul_height: vec![Some(10), Some(16), Some(18)], + divrem_height: vec![Some(10), Some(16), Some(17)], + is_potentially_maximal: true, + }, + CoreShapeSpec { + cpu_height: vec![Some(21)], + add_sub_height: vec![Some(21)], + lt_height: vec![Some(20)], + bitwise_height: vec![None, Some(18), Some(19)], + shift_right_height: vec![None, Some(16), Some(17)], + shift_left_height: vec![None, Some(16), Some(17)], + syscall_core_height: vec![Some(16), Some(17)], + memory_local_height: vec![Some(16), Some(18), Some(18)], + mul_height: vec![None, Some(10), Some(16), Some(18)], + divrem_height: vec![None, Some(10), Some(16), Some(17)], + is_potentially_maximal: true, + }, + CoreShapeSpec { + cpu_height: vec![Some(21)], + add_sub_height: vec![Some(21)], + lt_height: vec![Some(19)], + bitwise_height: vec![Some(17), Some(18)], + shift_right_height: vec![Some(16), Some(17), Some(18), Some(19)], + shift_left_height: vec![Some(16), Some(17), Some(18), Some(19)], + syscall_core_height: vec![Some(16), Some(17), Some(19)], + memory_local_height: vec![Some(16), Some(18), Some(19)], + mul_height: vec![Some(10), Some(16), Some(18)], + divrem_height: vec![Some(10), Some(16), Some(17)], + is_potentially_maximal: true, + }, + CoreShapeSpec { + cpu_height: vec![Some(21)], + add_sub_height: vec![Some(21)], + lt_height: vec![Some(19)], + bitwise_height: vec![Some(17), Some(18)], + shift_right_height: vec![Some(16), Some(17), Some(18), Some(19)], + shift_left_height: vec![Some(16), Some(17), Some(18), Some(19)], + syscall_core_height: vec![Some(16), Some(17), Some(19)], + memory_local_height: vec![Some(16), Some(18), Some(19)], + mul_height: vec![Some(10), Some(16), Some(18)], + divrem_height: vec![Some(10), Some(16), Some(17)], + is_potentially_maximal: true, + }, + CoreShapeSpec { + cpu_height: vec![Some(21)], + add_sub_height: vec![Some(19), Some(20)], + lt_height: vec![Some(19)], + bitwise_height: vec![Some(20)], + shift_right_height: vec![Some(16), Some(17), Some(18), Some(19)], + shift_left_height: vec![Some(16), Some(17), Some(18), Some(19)], + syscall_core_height: vec![Some(16), Some(17), Some(19)], + memory_local_height: vec![Some(16), Some(18), Some(19)], + mul_height: vec![Some(10), Some(16), Some(18)], + divrem_height: vec![Some(10), Some(16), Some(17)], + is_potentially_maximal: true, + }, + // LT in <= 1<<20 + // + // For records with a lot of `LT` instructions, but less than 1<<20, this cluster is + // appropriate. + CoreShapeSpec { + cpu_height: vec![Some(21)], + add_sub_height: vec![Some(21)], + lt_height: vec![Some(20)], + bitwise_height: vec![Some(17), Some(18)], + shift_right_height: vec![Some(17), Some(18)], + shift_left_height: vec![Some(17), Some(18)], + syscall_core_height: vec![Some(17), Some(18)], + memory_local_height: vec![Some(16), Some(18), Some(19)], + mul_height: vec![Some(10), Some(16), Some(18)], + divrem_height: vec![Some(10), Some(16), Some(17)], + is_potentially_maximal: true, + }, + CoreShapeSpec { + cpu_height: vec![Some(21)], + add_sub_height: vec![Some(20)], + lt_height: vec![Some(20)], + bitwise_height: vec![Some(17), Some(18), Some(19)], + shift_right_height: vec![Some(17), Some(18)], + shift_left_height: vec![Some(17), Some(18)], + syscall_core_height: vec![Some(17), Some(18)], + memory_local_height: vec![Some(16), Some(18), Some(19)], + mul_height: vec![Some(10), Some(16), Some(18)], + divrem_height: vec![Some(10), Some(16), Some(17)], + is_potentially_maximal: true, + }, + // LT in <= 1<<21 + // + // For records with a lot of `LT` instructions, and more than 1<<20, this cluster is + // appropriate. + CoreShapeSpec { + cpu_height: vec![Some(21)], + add_sub_height: vec![Some(21)], + lt_height: vec![Some(21)], + bitwise_height: vec![Some(17)], + shift_right_height: vec![Some(17)], + shift_left_height: vec![Some(17)], + syscall_core_height: vec![Some(17)], + memory_local_height: vec![Some(16), Some(18)], + mul_height: vec![Some(10), Some(16), Some(18)], + divrem_height: vec![Some(10), Some(16), Some(17)], + is_potentially_maximal: true, + }, + // Bitwise in <= 1<<20 + CoreShapeSpec { + cpu_height: vec![Some(21)], + add_sub_height: vec![Some(21)], + lt_height: vec![Some(19)], + bitwise_height: vec![Some(20)], + shift_right_height: vec![Some(19)], + shift_left_height: vec![Some(19)], + syscall_core_height: vec![Some(18)], + memory_local_height: vec![Some(16), Some(18)], + mul_height: vec![Some(10), Some(16), Some(18)], + divrem_height: vec![Some(10), Some(16)], + is_potentially_maximal: true, + }, + // Bitwise in <= 1<<21 + CoreShapeSpec { + cpu_height: vec![Some(21)], + add_sub_height: vec![Some(21)], + lt_height: vec![Some(17)], + bitwise_height: vec![Some(21)], + shift_right_height: vec![Some(17)], + shift_left_height: vec![Some(17)], + syscall_core_height: vec![Some(16), Some(17)], + memory_local_height: vec![Some(16), Some(18)], + mul_height: vec![Some(10), Some(16), Some(18)], + divrem_height: vec![Some(10), Some(16), Some(17)], + is_potentially_maximal: true, + }, + // SLL in <= 1<<20 + CoreShapeSpec { + cpu_height: vec![Some(21)], + add_sub_height: vec![Some(18)], + lt_height: vec![Some(20)], + bitwise_height: vec![Some(18)], + shift_right_height: vec![Some(18)], + shift_left_height: vec![Some(20)], + syscall_core_height: vec![Some(16), Some(18)], + memory_local_height: vec![Some(16), Some(18), Some(19)], + mul_height: vec![Some(10), Some(16), Some(18)], + divrem_height: vec![Some(10), Some(16), Some(17)], + is_potentially_maximal: true, + }, + // SLL in <= 1<<21 + CoreShapeSpec { + cpu_height: vec![Some(21)], + add_sub_height: vec![Some(21)], + lt_height: vec![Some(17)], + bitwise_height: vec![Some(17)], + shift_right_height: vec![Some(17)], + shift_left_height: vec![Some(21)], + syscall_core_height: vec![Some(17)], + memory_local_height: vec![Some(16), Some(18)], + mul_height: vec![Some(10), Some(16), Some(18)], + divrem_height: vec![Some(10), Some(16), Some(17)], + is_potentially_maximal: true, + }, + // SRL in <= 1<<20 + CoreShapeSpec { + cpu_height: vec![Some(21)], + add_sub_height: vec![Some(18)], + lt_height: vec![Some(20)], + bitwise_height: vec![Some(18)], + shift_right_height: vec![Some(20)], + shift_left_height: vec![Some(19)], + syscall_core_height: vec![Some(18)], + memory_local_height: vec![Some(16), Some(18), Some(19)], + mul_height: vec![Some(10), Some(16), Some(18)], + divrem_height: vec![Some(10), Some(16), Some(17)], + is_potentially_maximal: true, + }, + // Shards with basic arithmetic and branching. + CoreShapeSpec { + cpu_height: vec![Some(21)], + add_sub_height: vec![Some(21)], + lt_height: vec![Some(19)], + bitwise_height: vec![Some(6)], + shift_right_height: vec![Some(19)], + shift_left_height: vec![Some(6)], + syscall_core_height: vec![Some(6)], + memory_local_height: vec![Some(16)], + mul_height: vec![Some(19)], + divrem_height: vec![Some(6)], + is_potentially_maximal: true, + }, + // Shards with many mul events. + CoreShapeSpec { + cpu_height: vec![Some(21)], + add_sub_height: vec![Some(21)], + lt_height: vec![Some(20)], + bitwise_height: vec![Some(17), Some(18)], + shift_right_height: vec![Some(17)], + shift_left_height: vec![Some(17)], + syscall_core_height: vec![Some(16)], + memory_local_height: vec![Some(16)], + mul_height: vec![Some(19), Some(20)], + divrem_height: vec![Some(10), Some(16)], + is_potentially_maximal: true, + }, + ]; + + let mut allowed_core_log_heights = vec![]; + let mut maximal_core_log_heights_mask = vec![]; + for spec in core_shapes { + let short_allowed_log_heights = HashMap::from([ + (RiscvAir::Cpu(CpuChip::default()), spec.cpu_height), + (RiscvAir::Add(AddSubChip::default()), spec.add_sub_height), + (RiscvAir::Bitwise(BitwiseChip::default()), spec.bitwise_height), + (RiscvAir::DivRem(DivRemChip::default()), spec.divrem_height), + (RiscvAir::Mul(MulChip::default()), spec.mul_height), + (RiscvAir::ShiftRight(ShiftRightChip::default()), spec.shift_right_height), + (RiscvAir::ShiftLeft(ShiftLeft::default()), spec.shift_left_height), + (RiscvAir::Lt(LtChip::default()), spec.lt_height), + (RiscvAir::MemoryLocal(MemoryLocalChip::new()), spec.memory_local_height), + (RiscvAir::SyscallCore(SyscallChip::core()), spec.syscall_core_height), + ]); + allowed_core_log_heights.push(short_allowed_log_heights); + maximal_core_log_heights_mask.push(spec.is_potentially_maximal); + } + + // Set the memory init and finalize heights. + let memory_init_heights = + vec![None, Some(10), Some(16), Some(18), Some(19), Some(20), Some(21)]; + let memory_finalize_heights = + vec![None, Some(10), Some(16), Some(18), Some(19), Some(20), Some(21)]; + let memory_allowed_log_heights = HashMap::from([ + (RiscvAir::MemoryGlobalInit(MemoryGlobalChip::new(Initialize)), memory_init_heights), + (RiscvAir::MemoryGlobalFinal(MemoryGlobalChip::new(Finalize)), memory_finalize_heights), + ]); + + let mut precompile_allowed_log_heights = HashMap::new(); + let precompile_heights = (3..19).collect::>(); + for (air, mem_events_per_row) in RiscvAir::::get_all_precompile_airs() { + precompile_allowed_log_heights + .insert(air, (mem_events_per_row, precompile_heights.clone())); + } + + Self { + included_shapes: vec![], + allowed_preprocessed_log_heights, + allowed_core_log_heights, + maximal_core_log_heights_mask, + memory_allowed_log_heights, + precompile_allowed_log_heights, + } + } +} + +#[cfg(any(test, feature = "programs"))] +pub mod tests { + use std::fmt::Debug; + + use p3_challenger::{CanObserve, FieldChallenger}; + use sp1_stark::{air::InteractionScope, Dom, MachineProver, StarkGenericConfig}; + + use super::*; + + pub fn try_generate_dummy_proof< + SC: StarkGenericConfig, + P: MachineProver>, + >( + prover: &P, + shape: &CoreShape, + ) where + SC::Val: PrimeField32, + Dom: Debug, + { + let program = shape.dummy_program(); + let record = shape.dummy_record(); + + // Try doing setup. + let (pk, _) = prover.setup(&program); + + // Try to generate traces. + let global_traces = prover.generate_traces(&record, InteractionScope::Global); + let local_traces = prover.generate_traces(&record, InteractionScope::Local); + + // Try to commit the traces. + let global_data = prover.commit(&record, global_traces); + let local_data = prover.commit(&record, local_traces); + + let mut challenger = prover.machine().config().challenger(); + challenger.observe(global_data.main_commit.clone()); + challenger.observe(local_data.main_commit.clone()); + + let global_permutation_challenges: [::Challenge; 2] = + [challenger.sample_ext_element(), challenger.sample_ext_element()]; + + // Try to "open". + prover + .open( + &pk, + Some(global_data), + local_data, + &mut challenger, + &global_permutation_challenges, + ) + .unwrap(); + } + + #[test] + #[ignore] + fn test_making_shapes() { + use p3_baby_bear::BabyBear; + let shape_config = CoreShapeConfig::::default(); + let num_shapes = shape_config.generate_all_allowed_shapes().count(); + println!("There are {} core shapes", num_shapes); + assert!(num_shapes < 1 << 24); + } + + #[test] + fn test_dummy_record() { + use crate::utils::setup_logger; + use p3_baby_bear::BabyBear; + use sp1_stark::baby_bear_poseidon2::BabyBearPoseidon2; + use sp1_stark::CpuProver; + + type SC = BabyBearPoseidon2; + type A = RiscvAir; + + setup_logger(); + + let preprocessed_log_heights = [ + (RiscvAir::::Program(ProgramChip::default()), 10), + (RiscvAir::::ProgramMemory(MemoryProgramChip::default()), 10), + (RiscvAir::::ByteLookup(ByteChip::default()), 16), + ]; + + let core_log_heights = [ + (RiscvAir::::Cpu(CpuChip::default()), 11), + (RiscvAir::::DivRem(DivRemChip::default()), 11), + (RiscvAir::::Add(AddSubChip::default()), 10), + (RiscvAir::::Bitwise(BitwiseChip::default()), 10), + (RiscvAir::::Mul(MulChip::default()), 10), + (RiscvAir::::ShiftRight(ShiftRightChip::default()), 10), + (RiscvAir::::ShiftLeft(ShiftLeft::default()), 10), + (RiscvAir::::Lt(LtChip::default()), 10), + (RiscvAir::::MemoryLocal(MemoryLocalChip::new()), 10), + (RiscvAir::::SyscallCore(SyscallChip::core()), 10), + ]; + + let height_map = preprocessed_log_heights + .into_iter() + .chain(core_log_heights) + .map(|(air, log_height)| (air.name(), log_height)) + .collect::>(); + + let shape = CoreShape { inner: height_map }; + + // Try generating preprocessed traces. + let config = SC::default(); + let machine = A::machine(config); + let prover = CpuProver::new(machine); + + try_generate_dummy_proof(&prover, &shape); + } +} diff --git a/crates/core/machine/src/syscall/chip.rs b/crates/core/machine/src/syscall/chip.rs new file mode 100644 index 0000000000..00257d46aa --- /dev/null +++ b/crates/core/machine/src/syscall/chip.rs @@ -0,0 +1,228 @@ +use core::fmt; +use std::{ + borrow::{Borrow, BorrowMut}, + mem::size_of, +}; + +use p3_air::{Air, BaseAir}; +use p3_field::PrimeField32; +use p3_matrix::{dense::RowMajorMatrix, Matrix}; +use sp1_core_executor::{events::SyscallEvent, ExecutionRecord, Program}; +use sp1_derive::AlignedBorrow; +use sp1_stark::air::{InteractionScope, MachineAir, SP1AirBuilder}; + +use crate::utils::pad_rows_fixed; + +/// The number of main trace columns for `SyscallChip`. +pub const NUM_SYSCALL_COLS: usize = size_of::>(); + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub enum SyscallShardKind { + Core, + Precompile, +} + +/// A chip that stores the syscall invocations. +pub struct SyscallChip { + shard_kind: SyscallShardKind, +} + +impl SyscallChip { + pub const fn new(shard_kind: SyscallShardKind) -> Self { + Self { shard_kind } + } + + pub const fn core() -> Self { + Self::new(SyscallShardKind::Core) + } + + pub const fn precompile() -> Self { + Self::new(SyscallShardKind::Precompile) + } +} + +/// The column layout for the chip. +#[derive(AlignedBorrow, Default, Clone, Copy)] +#[repr(C)] +pub struct SyscallCols { + /// The shard number of the syscall. + pub shard: T, + + /// The clk of the syscall. + pub clk: T, + + pub nonce: T, + + /// The syscall_id of the syscall. + pub syscall_id: T, + + /// The arg1. + pub arg1: T, + + /// The arg2. + pub arg2: T, + + pub is_real: T, +} + +impl MachineAir for SyscallChip { + type Record = ExecutionRecord; + + type Program = Program; + + fn name(&self) -> String { + format!("Syscall{}", self.shard_kind).to_string() + } + + fn generate_dependencies(&self, _input: &ExecutionRecord, _output: &mut ExecutionRecord) { + // Do nothing since this chip has no dependencies. + } + + fn generate_trace( + &self, + input: &ExecutionRecord, + _output: &mut ExecutionRecord, + ) -> RowMajorMatrix { + let mut rows = Vec::new(); + + let row_fn = |syscall_event: &SyscallEvent| { + let mut row = [F::zero(); NUM_SYSCALL_COLS]; + let cols: &mut SyscallCols = row.as_mut_slice().borrow_mut(); + + cols.shard = F::from_canonical_u32(syscall_event.shard); + cols.clk = F::from_canonical_u32(syscall_event.clk); + cols.syscall_id = F::from_canonical_u32(syscall_event.syscall_id); + cols.nonce = F::from_canonical_u32(syscall_event.nonce); + cols.arg1 = F::from_canonical_u32(syscall_event.arg1); + cols.arg2 = F::from_canonical_u32(syscall_event.arg2); + cols.is_real = F::one(); + row + }; + + match self.shard_kind { + SyscallShardKind::Core => { + for event in input.syscall_events.iter() { + let row = row_fn(event); + rows.push(row); + } + } + SyscallShardKind::Precompile => { + for event in input.precompile_events.all_events().map(|(event, _)| event) { + let row = row_fn(event); + rows.push(row); + } + } + }; + + // Pad the trace to a power of two depending on the proof shape in `input`. + pad_rows_fixed( + &mut rows, + || [F::zero(); NUM_SYSCALL_COLS], + input.fixed_log2_rows::(self), + ); + + RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), NUM_SYSCALL_COLS) + } + + fn included(&self, shard: &Self::Record) -> bool { + if let Some(shape) = shard.shape.as_ref() { + shape.included::(self) + } else { + match self.shard_kind { + SyscallShardKind::Core => !shard.syscall_events.is_empty(), + SyscallShardKind::Precompile => { + !shard.precompile_events.is_empty() + && shard.cpu_events.is_empty() + && shard.global_memory_initialize_events.is_empty() + && shard.global_memory_finalize_events.is_empty() + } + } + } + } + + fn commit_scope(&self) -> InteractionScope { + InteractionScope::Global + } +} + +impl Air for SyscallChip +where + AB: SP1AirBuilder, +{ + fn eval(&self, builder: &mut AB) { + let main = builder.main(); + let local = main.row_slice(0); + let local: &SyscallCols = (*local).borrow(); + + builder.assert_eq( + local.is_real * local.is_real * local.is_real, + local.is_real * local.is_real * local.is_real, + ); + + match self.shard_kind { + SyscallShardKind::Core => { + builder.receive_syscall( + local.shard, + local.clk, + local.nonce, + local.syscall_id, + local.arg1, + local.arg2, + local.is_real, + InteractionScope::Local, + ); + + // Send the call to the global bus to/from the precompile chips. + builder.send_syscall( + local.shard, + local.clk, + local.nonce, + local.syscall_id, + local.arg1, + local.arg2, + local.is_real, + InteractionScope::Global, + ); + } + SyscallShardKind::Precompile => { + builder.send_syscall( + local.shard, + local.clk, + local.nonce, + local.syscall_id, + local.arg1, + local.arg2, + local.is_real, + InteractionScope::Local, + ); + + // Send the call to the global bus to/from the precompile chips. + builder.receive_syscall( + local.shard, + local.clk, + local.nonce, + local.syscall_id, + local.arg1, + local.arg2, + local.is_real, + InteractionScope::Global, + ); + } + } + } +} + +impl BaseAir for SyscallChip { + fn width(&self) -> usize { + NUM_SYSCALL_COLS + } +} + +impl fmt::Display for SyscallShardKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + SyscallShardKind::Core => write!(f, "Core"), + SyscallShardKind::Precompile => write!(f, "Precompile"), + } + } +} diff --git a/crates/core/machine/src/syscall/mod.rs b/crates/core/machine/src/syscall/mod.rs index c0363937e7..ab4b7db7fc 100644 --- a/crates/core/machine/src/syscall/mod.rs +++ b/crates/core/machine/src/syscall/mod.rs @@ -1 +1,2 @@ +pub mod chip; pub mod precompiles; diff --git a/crates/core/machine/src/syscall/precompiles/README.md b/crates/core/machine/src/syscall/precompiles/README.md index 6ddd0e1e1c..1b7c645dd5 100644 --- a/crates/core/machine/src/syscall/precompiles/README.md +++ b/crates/core/machine/src/syscall/precompiles/README.md @@ -27,7 +27,6 @@ Define the necessary data structures that your chip will use. This might include #[repr(C)] pub struct CustomOpCols { pub shard: T, - pub channel: T, pub clk: T, pub x_ptr: T, pub y_ptr: T, @@ -47,7 +46,7 @@ impl Syscall for Uint256MulChip { 1 } - fn execute(&self, rt: &mut SyscallContext, arg1: u32, arg2: u32) -> Option { + fn execute(&self, rt: &mut SyscallContext, syscall: SyscallCode, arg1: u32, arg2: u32) -> Option { // Your execution logic here // Parse input pointers, perform the multiplication, and write the result } @@ -80,76 +79,40 @@ impl MachineAir for CustomOpChip { } } ``` -You will also have to update `core/src/runtime/record.rs` accordingly to handle these new events. +You will also have to update `core/executor/src/events/precompiles/mod.rs` accordingly to register the new precompile op. #### Add a new field for your chip's events -In the `ExecutionRecord` struct, add a new field to track events specific to your chip. This field will store all events generated by your chip during execution. +In the `PrecompileEvent` enum, add a new variant for you precompile op. ```rust -#[derive(Default, Clone, Debug, Serialize, Deserialize)] -pub struct ExecutionRecord { - // Other existing fields... +#[derive(Clone, Debug, Serialize, Deserialize, EnumIter)] +/// Precompile event. There should be one variant for every precompile syscall. +pub enum PrecompileEvent { + // Other existing variants... - /// A trace of the events for your custom operation. - pub custom_op_events: Vec, + /// A variant for your custom operation. + pub CustomOp(CustomOpEvent), } ``` -#### Update the `stats` method -In the `stats` method, add an entry to track the number of events associated with your chip. +#### Update the `get_local_mem_events` method +In the `get_local_mem_events` method, add your variant to the match statement to add an iterator of the op's local +memory events (if it has local memory events). ```rust -fn stats(&self) -> HashMap { - let mut stats = HashMap::new(); - // Other existing entries... +fn get_local_mem_events(&self) -> impl IntoIterator { + let mut iterators = Vec::new(); - stats.insert("custom_op_events".to_string(), self.custom_op_events.len()); - // Add other stats as necessary - stats -} -``` -#### Update the `append` method -In the append method, ensure that events from your chip are correctly appended when merging two `ExecutionRecord` instances. - -```rust -fn append(&mut self, other: &mut ExecutionRecord) { - // Other existing append operations... - - self.custom_op_events.append(&mut other.custom_op_events); -} -``` - -#### Update the `defer` method -Modify the `defer` method to handle the deferring of events specific to your chip. - -```rust -pub fn defer(&mut self) -> ExecutionRecord { - ExecutionRecord { - // Other deferred events... + for event in self.iter() { + match event { + // Other existing variants... - custom_op_events: std::mem::take(&mut self.custom_op_events), - ..Default::default() + PrecompileEvent::CustomOp(e) => { + iterators.push(e.local_mem_access.iter()); + } + } } -} -``` - -#### Update the `split` method -In the `split` method, ensure that events associated with your chip are properly split when distributing deferred events across shards. - -```rust -pub fn split(&mut self, last: bool, opts: SplitOpts) -> Vec { - let mut shards = Vec::new(); - - split_events!( - self, - custom_op_events, - shards, - opts.deferred_shift_threshold, - last - ); - - // Other event splits... - shards + iterators.into_iter().flatten() } ``` diff --git a/crates/core/machine/src/syscall/precompiles/edwards/ed_add.rs b/crates/core/machine/src/syscall/precompiles/edwards/ed_add.rs index 78633e269d..45c69af419 100644 --- a/crates/core/machine/src/syscall/precompiles/edwards/ed_add.rs +++ b/crates/core/machine/src/syscall/precompiles/edwards/ed_add.rs @@ -14,7 +14,7 @@ use p3_field::{AbstractField, PrimeField32}; use p3_matrix::{dense::RowMajorMatrix, Matrix}; use p3_maybe_rayon::prelude::{IntoParallelRefIterator, ParallelIterator, ParallelSlice}; use sp1_core_executor::{ - events::{ByteLookupEvent, ByteRecord, EllipticCurveAddEvent, FieldOperation}, + events::{ByteLookupEvent, ByteRecord, EllipticCurveAddEvent, FieldOperation, PrecompileEvent}, syscalls::SyscallCode, ExecutionRecord, Program, }; @@ -24,14 +24,14 @@ use sp1_curves::{ AffinePoint, EllipticCurve, }; use sp1_derive::AlignedBorrow; -use sp1_stark::air::{BaseAirBuilder, MachineAir, SP1AirBuilder}; +use sp1_stark::air::{BaseAirBuilder, InteractionScope, MachineAir, SP1AirBuilder}; use crate::{ memory::{value_as_limbs, MemoryReadCols, MemoryWriteCols}, operations::field::{ field_den::FieldDenCols, field_inner_product::FieldInnerProductCols, field_op::FieldOpCols, }, - utils::{limbs_from_prev_access, pad_rows}, + utils::{limbs_from_prev_access, pad_rows_fixed}, }; pub const NUM_ED_ADD_COLS: usize = size_of::>(); @@ -44,7 +44,6 @@ pub const NUM_ED_ADD_COLS: usize = size_of::>(); pub struct EdAddAssignCols { pub is_real: T, pub shard: T, - pub channel: T, pub clk: T, pub nonce: T, pub p_ptr: T, @@ -75,7 +74,6 @@ impl EdAddAssignChip { fn populate_field_ops( record: &mut impl ByteRecord, shard: u32, - channel: u8, cols: &mut EdAddAssignCols, p_x: BigUint, p_y: BigUint, @@ -85,29 +83,24 @@ impl EdAddAssignChip { let x3_numerator = cols.x3_numerator.populate( record, shard, - channel, &[p_x.clone(), q_x.clone()], &[q_y.clone(), p_y.clone()], ); let y3_numerator = cols.y3_numerator.populate( record, shard, - channel, &[p_y.clone(), p_x.clone()], &[q_y.clone(), q_x.clone()], ); - let x1_mul_y1 = - cols.x1_mul_y1.populate(record, shard, channel, &p_x, &p_y, FieldOperation::Mul); - let x2_mul_y2 = - cols.x2_mul_y2.populate(record, shard, channel, &q_x, &q_y, FieldOperation::Mul); - let f = - cols.f.populate(record, shard, channel, &x1_mul_y1, &x2_mul_y2, FieldOperation::Mul); + let x1_mul_y1 = cols.x1_mul_y1.populate(record, shard, &p_x, &p_y, FieldOperation::Mul); + let x2_mul_y2 = cols.x2_mul_y2.populate(record, shard, &q_x, &q_y, FieldOperation::Mul); + let f = cols.f.populate(record, shard, &x1_mul_y1, &x2_mul_y2, FieldOperation::Mul); let d = E::d_biguint(); - let d_mul_f = cols.d_mul_f.populate(record, shard, channel, &f, &d, FieldOperation::Mul); + let d_mul_f = cols.d_mul_f.populate(record, shard, &f, &d, FieldOperation::Mul); - cols.x3_ins.populate(record, shard, channel, &x3_numerator, &d_mul_f, true); - cols.y3_ins.populate(record, shard, channel, &y3_numerator, &d_mul_f, false); + cols.x3_ins.populate(record, shard, &x3_numerator, &d_mul_f, true); + cols.y3_ins.populate(record, shard, &y3_numerator, &d_mul_f, false); } } @@ -125,10 +118,17 @@ impl MachineAir for Ed input: &ExecutionRecord, _: &mut ExecutionRecord, ) -> RowMajorMatrix { - let mut rows = input - .ed_add_events + let events = input.get_precompile_events(SyscallCode::ED_ADD); + + let mut rows = events .par_iter() - .map(|event| { + .map(|(_, event)| { + let event = if let PrecompileEvent::EdAdd(event) = event { + event + } else { + unreachable!(); + }; + let mut row = [F::zero(); NUM_ED_ADD_COLS]; let cols: &mut EdAddAssignCols = row.as_mut_slice().borrow_mut(); let mut blu = Vec::new(); @@ -137,22 +137,25 @@ impl MachineAir for Ed }) .collect::>(); - pad_rows(&mut rows, || { - let mut row = [F::zero(); NUM_ED_ADD_COLS]; - let cols: &mut EdAddAssignCols = row.as_mut_slice().borrow_mut(); - let zero = BigUint::zero(); - Self::populate_field_ops( - &mut vec![], - 0, - 0, - cols, - zero.clone(), - zero.clone(), - zero.clone(), - zero, - ); - row - }); + pad_rows_fixed( + &mut rows, + || { + let mut row = [F::zero(); NUM_ED_ADD_COLS]; + let cols: &mut EdAddAssignCols = row.as_mut_slice().borrow_mut(); + let zero = BigUint::zero(); + Self::populate_field_ops( + &mut vec![], + 0, + cols, + zero.clone(), + zero.clone(), + zero.clone(), + zero, + ); + row + }, + input.fixed_log2_rows::(self), + ); // Convert the trace to a row major matrix. let mut trace = @@ -169,14 +172,20 @@ impl MachineAir for Ed } fn generate_dependencies(&self, input: &Self::Record, output: &mut Self::Record) { - let chunk_size = std::cmp::max(input.ed_add_events.len() / num_cpus::get(), 1); + let events = input.get_precompile_events(SyscallCode::ED_ADD); + let chunk_size = std::cmp::max(events.len() / num_cpus::get(), 1); - let blu_batches = input - .ed_add_events + let blu_batches = events .par_chunks(chunk_size) .map(|events| { let mut blu: HashMap> = HashMap::new(); - events.iter().for_each(|event| { + events.iter().for_each(|(_, event)| { + let event = if let PrecompileEvent::EdAdd(event) = event { + event + } else { + unreachable!(); + }; + let mut row = [F::zero(); NUM_ED_ADD_COLS]; let cols: &mut EdAddAssignCols = row.as_mut_slice().borrow_mut(); self.event_to_row(event, cols, &mut blu); @@ -189,7 +198,11 @@ impl MachineAir for Ed } fn included(&self, shard: &Self::Record) -> bool { - !shard.ed_add_events.is_empty() + if let Some(shape) = shard.shape.as_ref() { + shape.included::(self) + } else { + !shard.get_precompile_events(SyscallCode::ED_ADD).is_empty() + } } } @@ -212,19 +225,18 @@ impl EdAddAssignChip { // Populate basic columns. cols.is_real = F::one(); cols.shard = F::from_canonical_u32(event.shard); - cols.channel = F::from_canonical_u8(event.channel); cols.clk = F::from_canonical_u32(event.clk); cols.p_ptr = F::from_canonical_u32(event.p_ptr); cols.q_ptr = F::from_canonical_u32(event.q_ptr); - Self::populate_field_ops(blu, event.shard, event.channel, cols, p_x, p_y, q_x, q_y); + Self::populate_field_ops(blu, event.shard, cols, p_x, p_y, q_x, q_y); // Populate the memory access columns. for i in 0..WORDS_CURVE_POINT { - cols.q_access[i].populate(event.channel, event.q_memory_records[i], blu); + cols.q_access[i].populate(event.q_memory_records[i], blu); } for i in 0..WORDS_CURVE_POINT { - cols.p_access[i].populate(event.channel, event.p_memory_records[i], blu); + cols.p_access[i].populate(event.p_memory_records[i], blu); } } } @@ -256,94 +268,32 @@ where let y2 = limbs_from_prev_access(&local.q_access[8..16]); // x3_numerator = x1 * y2 + x2 * y1. - local.x3_numerator.eval( - builder, - &[x1, x2], - &[y2, y1], - local.shard, - local.channel, - local.is_real, - ); + local.x3_numerator.eval(builder, &[x1, x2], &[y2, y1], local.is_real); // y3_numerator = y1 * y2 + x1 * x2. - local.y3_numerator.eval( - builder, - &[y1, x1], - &[y2, x2], - local.shard, - local.channel, - local.is_real, - ); + local.y3_numerator.eval(builder, &[y1, x1], &[y2, x2], local.is_real); // f = x1 * x2 * y1 * y2. - local.x1_mul_y1.eval( - builder, - &x1, - &y1, - FieldOperation::Mul, - local.shard, - local.channel, - local.is_real, - ); - local.x2_mul_y2.eval( - builder, - &x2, - &y2, - FieldOperation::Mul, - local.shard, - local.channel, - local.is_real, - ); + local.x1_mul_y1.eval(builder, &x1, &y1, FieldOperation::Mul, local.is_real); + local.x2_mul_y2.eval(builder, &x2, &y2, FieldOperation::Mul, local.is_real); let x1_mul_y1 = local.x1_mul_y1.result; let x2_mul_y2 = local.x2_mul_y2.result; - local.f.eval( - builder, - &x1_mul_y1, - &x2_mul_y2, - FieldOperation::Mul, - local.shard, - local.channel, - local.is_real, - ); + local.f.eval(builder, &x1_mul_y1, &x2_mul_y2, FieldOperation::Mul, local.is_real); // d * f. let f = local.f.result; let d_biguint = E::d_biguint(); let d_const = E::BaseField::to_limbs_field::(&d_biguint); - local.d_mul_f.eval( - builder, - &f, - &d_const, - FieldOperation::Mul, - local.shard, - local.channel, - local.is_real, - ); + local.d_mul_f.eval(builder, &f, &d_const, FieldOperation::Mul, local.is_real); let d_mul_f = local.d_mul_f.result; // x3 = x3_numerator / (1 + d * f). - local.x3_ins.eval( - builder, - &local.x3_numerator.result, - &d_mul_f, - true, - local.shard, - local.channel, - local.is_real, - ); + local.x3_ins.eval(builder, &local.x3_numerator.result, &d_mul_f, true, local.is_real); // y3 = y3_numerator / (1 - d * f). - local.y3_ins.eval( - builder, - &local.y3_numerator.result, - &d_mul_f, - false, - local.shard, - local.channel, - local.is_real, - ); + local.y3_ins.eval(builder, &local.y3_numerator.result, &d_mul_f, false, local.is_real); // Constraint self.p_access.value = [self.x3_ins.result, self.y3_ins.result] // This is to ensure that p_access is updated with the new value. @@ -357,7 +307,6 @@ where builder.eval_memory_access_slice( local.shard, - local.channel, local.clk.into(), local.q_ptr, &local.q_access, @@ -366,7 +315,6 @@ where builder.eval_memory_access_slice( local.shard, - local.channel, local.clk + AB::F::from_canonical_u32(1), local.p_ptr, &local.p_access, @@ -375,13 +323,13 @@ where builder.receive_syscall( local.shard, - local.channel, local.clk, local.nonce, AB::F::from_canonical_u32(SyscallCode::ED_ADD.syscall_id()), local.p_ptr, local.q_ptr, local.is_real, + InteractionScope::Local, ); } } diff --git a/crates/core/machine/src/syscall/precompiles/edwards/ed_decompress.rs b/crates/core/machine/src/syscall/precompiles/edwards/ed_decompress.rs index 63a73b2f0b..c9c5ae93c8 100644 --- a/crates/core/machine/src/syscall/precompiles/edwards/ed_decompress.rs +++ b/crates/core/machine/src/syscall/precompiles/edwards/ed_decompress.rs @@ -11,7 +11,7 @@ use p3_air::{Air, AirBuilder, BaseAir}; use p3_field::{AbstractField, PrimeField32}; use p3_matrix::{dense::RowMajorMatrix, Matrix}; use sp1_core_executor::{ - events::{ByteLookupEvent, ByteRecord, EdDecompressEvent, FieldOperation}, + events::{ByteLookupEvent, ByteRecord, EdDecompressEvent, FieldOperation, PrecompileEvent}, syscalls::SyscallCode, ExecutionRecord, Program, }; @@ -23,13 +23,13 @@ use sp1_curves::{ params::{limbs_from_vec, FieldParameters, Limbs}, }; use sp1_derive::AlignedBorrow; -use sp1_stark::air::{BaseAirBuilder, MachineAir, SP1AirBuilder}; +use sp1_stark::air::{BaseAirBuilder, InteractionScope, MachineAir, SP1AirBuilder}; use typenum::U32; use crate::{ memory::{MemoryReadCols, MemoryWriteCols}, operations::field::{field_op::FieldOpCols, field_sqrt::FieldSqrtCols, range::FieldLtCols}, - utils::{limbs_from_access, limbs_from_prev_access, pad_rows}, + utils::{limbs_from_access, limbs_from_prev_access, pad_rows_fixed}, }; pub const NUM_ED_DECOMPRESS_COLS: usize = size_of::>(); @@ -44,7 +44,6 @@ pub const NUM_ED_DECOMPRESS_COLS: usize = size_of::>(); pub struct EdDecompressCols { pub is_real: T, pub shard: T, - pub channel: T, pub clk: T, pub nonce: T, pub ptr: T, @@ -70,7 +69,6 @@ impl EdDecompressCols { let mut new_byte_lookup_events = Vec::new(); self.is_real = F::from_bool(true); self.shard = F::from_canonical_u32(event.shard); - self.channel = F::from_canonical_u8(event.channel); self.clk = F::from_canonical_u32(event.clk); self.ptr = F::from_canonical_u32(event.ptr); self.nonce = F::from_canonical_u32( @@ -78,20 +76,12 @@ impl EdDecompressCols { ); self.sign = F::from_bool(event.sign); for i in 0..8 { - self.x_access[i].populate( - event.channel, - event.x_memory_records[i], - &mut new_byte_lookup_events, - ); - self.y_access[i].populate( - event.channel, - event.y_memory_records[i], - &mut new_byte_lookup_events, - ); + self.x_access[i].populate(event.x_memory_records[i], &mut new_byte_lookup_events); + self.y_access[i].populate(event.y_memory_records[i], &mut new_byte_lookup_events); } let y = &BigUint::from_bytes_le(&event.y_bytes); - self.populate_field_ops::(&mut new_byte_lookup_events, event.shard, event.channel, y); + self.populate_field_ops::(&mut new_byte_lookup_events, event.shard, y); record.add_byte_lookup_events(new_byte_lookup_events); } @@ -100,26 +90,17 @@ impl EdDecompressCols { &mut self, blu_events: &mut Vec, shard: u32, - channel: u8, y: &BigUint, ) { let one = BigUint::one(); - self.y_range.populate(blu_events, shard, channel, y, &Ed25519BaseField::modulus()); - let yy = self.yy.populate(blu_events, shard, channel, y, y, FieldOperation::Mul); - let u = self.u.populate(blu_events, shard, channel, &yy, &one, FieldOperation::Sub); - let dyy = self.dyy.populate( - blu_events, - shard, - channel, - &E::d_biguint(), - &yy, - FieldOperation::Mul, - ); - let v = self.v.populate(blu_events, shard, channel, &one, &dyy, FieldOperation::Add); - let u_div_v = - self.u_div_v.populate(blu_events, shard, channel, &u, &v, FieldOperation::Div); - let x = self.x.populate(blu_events, shard, channel, &u_div_v, ed25519_sqrt); - self.neg_x.populate(blu_events, shard, channel, &BigUint::zero(), &x, FieldOperation::Sub); + self.y_range.populate(blu_events, shard, y, &Ed25519BaseField::modulus()); + let yy = self.yy.populate(blu_events, shard, y, y, FieldOperation::Mul); + let u = self.u.populate(blu_events, shard, &yy, &one, FieldOperation::Sub); + let dyy = self.dyy.populate(blu_events, shard, &E::d_biguint(), &yy, FieldOperation::Mul); + let v = self.v.populate(blu_events, shard, &one, &dyy, FieldOperation::Add); + let u_div_v = self.u_div_v.populate(blu_events, shard, &u, &v, FieldOperation::Div); + let x = self.x.populate(blu_events, shard, &u_div_v, ed25519_sqrt); + self.neg_x.populate(blu_events, shard, &BigUint::zero(), &x, FieldOperation::Sub); } } @@ -138,38 +119,24 @@ impl EdDecompressCols { builder, &y, &limbs_from_vec::(max_num_limbs), - self.shard, - self.channel, self.is_real, ); - self.yy.eval(builder, &y, &y, FieldOperation::Mul, self.shard, self.channel, self.is_real); + self.yy.eval(builder, &y, &y, FieldOperation::Mul, self.is_real); self.u.eval( builder, &self.yy.result, &[AB::Expr::one()].iter(), FieldOperation::Sub, - self.shard, - self.channel, self.is_real, ); let d_biguint = E::d_biguint(); let d_const = E::BaseField::to_limbs_field::(&d_biguint); - self.dyy.eval( - builder, - &d_const, - &self.yy.result, - FieldOperation::Mul, - self.shard, - self.channel, - self.is_real, - ); + self.dyy.eval(builder, &d_const, &self.yy.result, FieldOperation::Mul, self.is_real); self.v.eval( builder, &[AB::Expr::one()].iter(), &self.dyy.result, FieldOperation::Add, - self.shard, - self.channel, self.is_real, ); self.u_div_v.eval( @@ -177,31 +144,19 @@ impl EdDecompressCols { &self.u.result, &self.v.result, FieldOperation::Div, - self.shard, - self.channel, - self.is_real, - ); - self.x.eval( - builder, - &self.u_div_v.result, - AB::F::zero(), - self.shard, - self.channel, self.is_real, ); + self.x.eval(builder, &self.u_div_v.result, AB::F::zero(), self.is_real); self.neg_x.eval( builder, &[AB::Expr::zero()].iter(), &self.x.multiplication.result, FieldOperation::Sub, - self.shard, - self.channel, self.is_real, ); builder.eval_memory_access_slice( self.shard, - self.channel, self.clk, self.ptr, &self.x_access, @@ -209,7 +164,6 @@ impl EdDecompressCols { ); builder.eval_memory_access_slice( self.shard, - self.channel, self.clk, self.ptr.into() + AB::F::from_canonical_u32(32), &self.y_access, @@ -226,13 +180,13 @@ impl EdDecompressCols { builder.receive_syscall( self.shard, - self.channel, self.clk, self.nonce, AB::F::from_canonical_u32(SyscallCode::ED_DECOMPRESS.syscall_id()), self.ptr, self.sign, self.is_real, + InteractionScope::Local, ); } } @@ -263,9 +217,14 @@ impl MachineAir for EdDecompressChip RowMajorMatrix { let mut rows = Vec::new(); - - for i in 0..input.ed_decompress_events.len() { - let event = &input.ed_decompress_events[i]; + let events = input.get_precompile_events(SyscallCode::ED_DECOMPRESS); + + for (_, event) in events { + let event = if let PrecompileEvent::EdDecompress(event) = event { + event + } else { + unreachable!(); + }; let mut row = [F::zero(); NUM_ED_DECOMPRESS_COLS]; let cols: &mut EdDecompressCols = row.as_mut_slice().borrow_mut(); cols.populate::(event.clone(), output); @@ -273,13 +232,17 @@ impl MachineAir for EdDecompressChip = row.as_mut_slice().borrow_mut(); - let zero = BigUint::zero(); - cols.populate_field_ops::(&mut vec![], 0, 0, &zero); - row - }); + pad_rows_fixed( + &mut rows, + || { + let mut row = [F::zero(); NUM_ED_DECOMPRESS_COLS]; + let cols: &mut EdDecompressCols = row.as_mut_slice().borrow_mut(); + let zero = BigUint::zero(); + cols.populate_field_ops::(&mut vec![], 0, &zero); + row + }, + input.fixed_log2_rows::(self), + ); let mut trace = RowMajorMatrix::new( rows.into_iter().flatten().collect::>(), @@ -298,7 +261,11 @@ impl MachineAir for EdDecompressChip bool { - !shard.ed_decompress_events.is_empty() + if let Some(shape) = shard.shape.as_ref() { + shape.included::(self) + } else { + !shard.get_precompile_events(SyscallCode::ED_DECOMPRESS).is_empty() + } } } diff --git a/crates/core/machine/src/syscall/precompiles/fptower/fp.rs b/crates/core/machine/src/syscall/precompiles/fptower/fp.rs index 35a04f8238..a9c21016c7 100644 --- a/crates/core/machine/src/syscall/precompiles/fptower/fp.rs +++ b/crates/core/machine/src/syscall/precompiles/fptower/fp.rs @@ -4,7 +4,7 @@ use std::{ mem::size_of, }; -use crate::air::MemoryAirBuilder; +use crate::{air::MemoryAirBuilder, utils::zeroed_f_vec}; use generic_array::GenericArray; use itertools::Itertools; use num::{BigUint, Zero}; @@ -12,7 +12,7 @@ use p3_air::{Air, AirBuilder, BaseAir}; use p3_field::{AbstractField, PrimeField32}; use p3_matrix::{dense::RowMajorMatrix, Matrix}; use sp1_core_executor::{ - events::{ByteLookupEvent, ByteRecord, FieldOperation}, + events::{ByteLookupEvent, ByteRecord, FieldOperation, PrecompileEvent}, syscalls::SyscallCode, ExecutionRecord, Program, }; @@ -21,12 +21,12 @@ use sp1_curves::{ weierstrass::{FieldType, FpOpField}, }; use sp1_derive::AlignedBorrow; -use sp1_stark::air::{BaseAirBuilder, MachineAir, Polynomial, SP1AirBuilder}; +use sp1_stark::air::{BaseAirBuilder, InteractionScope, MachineAir, Polynomial, SP1AirBuilder}; use crate::{ memory::{value_as_limbs, MemoryReadCols, MemoryWriteCols}, operations::field::field_op::FieldOpCols, - utils::{limbs_from_prev_access, pad_rows, words_to_bytes_le_vec}, + utils::{limbs_from_prev_access, pad_rows_fixed, words_to_bytes_le_vec}, }; pub const fn num_fp_cols() -> usize { @@ -43,7 +43,6 @@ pub struct FpOpChip

{ pub struct FpOpCols { pub is_real: T, pub shard: T, - pub channel: T, pub nonce: T, pub clk: T, pub is_add: T, @@ -65,7 +64,6 @@ impl FpOpChip

{ fn populate_field_ops( blu_events: &mut Vec, shard: u32, - channel: u8, cols: &mut FpOpCols, p: BigUint, q: BigUint, @@ -73,7 +71,7 @@ impl FpOpChip

{ ) { let modulus_bytes = P::MODULUS; let modulus = BigUint::from_bytes_le(modulus_bytes); - cols.output.populate_with_modulus(blu_events, shard, channel, &p, &q, &modulus, op); + cols.output.populate_with_modulus(blu_events, shard, &p, &q, &modulus, op); } } @@ -90,18 +88,26 @@ impl MachineAir for FpOpChip

{ } fn generate_trace(&self, input: &Self::Record, output: &mut Self::Record) -> RowMajorMatrix { + // All the fp events for a given curve are coalesce to the curve's Add operation. Only retrieve + // precompile events for that operation. + // TODO: Fix this. + let events = match P::FIELD_TYPE { - FieldType::Bn254 => &input.bn254_fp_events, - FieldType::Bls12381 => &input.bls12381_fp_events, + FieldType::Bn254 => input.get_precompile_events(SyscallCode::BN254_FP_ADD).iter(), + FieldType::Bls12381 => input.get_precompile_events(SyscallCode::BLS12381_FP_ADD).iter(), }; let mut rows = Vec::new(); let mut new_byte_lookup_events = Vec::new(); - for i in 0..events.len() { - let event = &events[i]; + for (_, event) in events { + let event = match (P::FIELD_TYPE, event) { + (FieldType::Bn254, PrecompileEvent::Bn254Fp(event)) => event, + (FieldType::Bls12381, PrecompileEvent::Bls12381Fp(event)) => event, + _ => unreachable!(), + }; - let mut row = vec![F::zero(); num_fp_cols::

()]; + let mut row = zeroed_f_vec(num_fp_cols::

()); let cols: &mut FpOpCols = row.as_mut_slice().borrow_mut(); let modulus = &BigUint::from_bytes_le(P::MODULUS); @@ -113,7 +119,6 @@ impl MachineAir for FpOpChip

{ cols.is_mul = F::from_canonical_u8((event.op == FieldOperation::Mul) as u8); cols.is_real = F::one(); cols.shard = F::from_canonical_u32(event.shard); - cols.channel = F::from_canonical_u8(event.channel); cols.clk = F::from_canonical_u32(event.clk); cols.x_ptr = F::from_canonical_u32(event.x_ptr); cols.y_ptr = F::from_canonical_u32(event.y_ptr); @@ -121,7 +126,6 @@ impl MachineAir for FpOpChip

{ Self::populate_field_ops( &mut new_byte_lookup_events, event.shard, - event.channel, cols, p, q, @@ -130,40 +134,35 @@ impl MachineAir for FpOpChip

{ // Populate the memory access columns. for i in 0..cols.y_access.len() { - cols.y_access[i].populate( - event.channel, - event.y_memory_records[i], - &mut new_byte_lookup_events, - ); + cols.y_access[i].populate(event.y_memory_records[i], &mut new_byte_lookup_events); } for i in 0..cols.x_access.len() { - cols.x_access[i].populate( - event.channel, - event.x_memory_records[i], - &mut new_byte_lookup_events, - ); + cols.x_access[i].populate(event.x_memory_records[i], &mut new_byte_lookup_events); } - rows.push(row) + rows.push(row); } output.add_byte_lookup_events(new_byte_lookup_events); - pad_rows(&mut rows, || { - let mut row = vec![F::zero(); num_fp_cols::

()]; - let cols: &mut FpOpCols = row.as_mut_slice().borrow_mut(); - let zero = BigUint::zero(); - cols.is_add = F::from_canonical_u8(1); - Self::populate_field_ops( - &mut vec![], - 0, - 0, - cols, - zero.clone(), - zero, - FieldOperation::Add, - ); - row - }); + pad_rows_fixed( + &mut rows, + || { + let mut row = zeroed_f_vec(num_fp_cols::

()); + let cols: &mut FpOpCols = row.as_mut_slice().borrow_mut(); + let zero = BigUint::zero(); + cols.is_add = F::from_canonical_u8(1); + Self::populate_field_ops( + &mut vec![], + 0, + cols, + zero.clone(), + zero, + FieldOperation::Add, + ); + row + }, + input.fixed_log2_rows::(self), + ); // Convert the trace to a row major matrix. let mut trace = @@ -180,9 +179,27 @@ impl MachineAir for FpOpChip

{ } fn included(&self, shard: &Self::Record) -> bool { - match P::FIELD_TYPE { - FieldType::Bn254 => !shard.bn254_fp_events.is_empty(), - FieldType::Bls12381 => !shard.bls12381_fp_events.is_empty(), + // All the fp events for a given curve are coalesce to the curve's Add operation. Only + // check for that operation. + + assert!( + shard.get_precompile_events(SyscallCode::BN254_FP_SUB).is_empty() + && shard.get_precompile_events(SyscallCode::BN254_FP_MUL).is_empty() + && shard.get_precompile_events(SyscallCode::BLS12381_FP_SUB).is_empty() + && shard.get_precompile_events(SyscallCode::BLS12381_FP_MUL).is_empty() + ); + + if let Some(shape) = shard.shape.as_ref() { + shape.included::(self) + } else { + match P::FIELD_TYPE { + FieldType::Bn254 => { + !shard.get_precompile_events(SyscallCode::BN254_FP_ADD).is_empty() + } + FieldType::Bls12381 => { + !shard.get_precompile_events(SyscallCode::BLS12381_FP_ADD).is_empty() + } + } } } } @@ -233,8 +250,6 @@ where local.is_sub, local.is_mul, AB::F::zero(), - local.shard, - local.channel, local.is_real, ); @@ -244,7 +259,6 @@ where builder.eval_memory_access_slice( local.shard, - local.channel, local.clk.into(), local.y_ptr, &local.y_access, @@ -252,7 +266,6 @@ where ); builder.eval_memory_access_slice( local.shard, - local.channel, local.clk + AB::F::from_canonical_u32(1), /* We read p at +1 since p, q could be the * same. */ local.x_ptr, @@ -281,13 +294,13 @@ where builder.receive_syscall( local.shard, - local.channel, local.clk, local.nonce, syscall_id_felt, local.x_ptr, local.y_ptr, local.is_real, + InteractionScope::Local, ); } } diff --git a/crates/core/machine/src/syscall/precompiles/fptower/fp2_addsub.rs b/crates/core/machine/src/syscall/precompiles/fptower/fp2_addsub.rs index 5024fad6c4..7f28309597 100644 --- a/crates/core/machine/src/syscall/precompiles/fptower/fp2_addsub.rs +++ b/crates/core/machine/src/syscall/precompiles/fptower/fp2_addsub.rs @@ -4,7 +4,7 @@ use std::{ mem::size_of, }; -use crate::air::MemoryAirBuilder; +use crate::{air::MemoryAirBuilder, utils::zeroed_f_vec}; use generic_array::GenericArray; use itertools::Itertools; use num::{BigUint, Zero}; @@ -12,7 +12,7 @@ use p3_air::{Air, AirBuilder, BaseAir}; use p3_field::{AbstractField, PrimeField32}; use p3_matrix::{dense::RowMajorMatrix, Matrix}; use sp1_core_executor::{ - events::{ByteLookupEvent, ByteRecord, FieldOperation}, + events::{ByteLookupEvent, ByteRecord, FieldOperation, PrecompileEvent}, syscalls::SyscallCode, ExecutionRecord, Program, }; @@ -21,13 +21,13 @@ use sp1_curves::{ weierstrass::{FieldType, FpOpField}, }; use sp1_derive::AlignedBorrow; -use sp1_stark::air::{BaseAirBuilder, MachineAir, Polynomial, SP1AirBuilder}; +use sp1_stark::air::{BaseAirBuilder, InteractionScope, MachineAir, Polynomial, SP1AirBuilder}; use typenum::Unsigned; use crate::{ memory::{value_as_limbs, MemoryReadCols, MemoryWriteCols}, operations::field::field_op::FieldOpCols, - utils::{limbs_from_prev_access, pad_rows, words_to_bytes_le_vec}, + utils::{limbs_from_prev_access, pad_rows_fixed, words_to_bytes_le_vec}, }; pub const fn num_fp2_addsub_cols() -> usize { @@ -40,7 +40,6 @@ pub const fn num_fp2_addsub_cols() -> usize { pub struct Fp2AddSubAssignCols { pub is_real: T, pub shard: T, - pub channel: T, pub nonce: T, pub clk: T, pub is_add: T, @@ -65,7 +64,6 @@ impl Fp2AddSubAssignChip

{ fn populate_field_ops( blu_events: &mut Vec, shard: u32, - channel: u8, cols: &mut Fp2AddSubAssignCols, p_x: BigUint, p_y: BigUint, @@ -75,8 +73,8 @@ impl Fp2AddSubAssignChip

{ ) { let modulus_bytes = P::MODULUS; let modulus = BigUint::from_bytes_le(modulus_bytes); - cols.c0.populate_with_modulus(blu_events, shard, channel, &p_x, &q_x, &modulus, op); - cols.c1.populate_with_modulus(blu_events, shard, channel, &p_y, &q_y, &modulus, op); + cols.c0.populate_with_modulus(blu_events, shard, &p_x, &q_x, &modulus, op); + cols.c1.populate_with_modulus(blu_events, shard, &p_y, &q_y, &modulus, op); } } @@ -93,18 +91,28 @@ impl MachineAir for Fp2AddSubAssignChip

{ } fn generate_trace(&self, input: &Self::Record, output: &mut Self::Record) -> RowMajorMatrix { + // All the fp2 sub and add events for a given curve are coalesce to the curve's Add operation. Only retrieve + // precompile events for that operation. + // TODO: Fix this. + let events = match P::FIELD_TYPE { - FieldType::Bn254 => &input.bn254_fp2_addsub_events, - FieldType::Bls12381 => &input.bls12381_fp2_addsub_events, + FieldType::Bn254 => input.get_precompile_events(SyscallCode::BN254_FP2_ADD).iter(), + FieldType::Bls12381 => { + input.get_precompile_events(SyscallCode::BLS12381_FP2_ADD).iter() + } }; let mut rows = Vec::new(); let mut new_byte_lookup_events = Vec::new(); - for i in 0..events.len() { - let event = &events[i]; + for (_, event) in events { + let event = match (P::FIELD_TYPE, event) { + (FieldType::Bn254, PrecompileEvent::Bn254Fp2AddSub(event)) => event, + (FieldType::Bls12381, PrecompileEvent::Bls12381Fp2AddSub(event)) => event, + _ => unreachable!(), + }; - let mut row = vec![F::zero(); num_fp2_addsub_cols::

()]; + let mut row = zeroed_f_vec(num_fp2_addsub_cols::

()); let cols: &mut Fp2AddSubAssignCols = row.as_mut_slice().borrow_mut(); let p = &event.x; @@ -117,7 +125,6 @@ impl MachineAir for Fp2AddSubAssignChip

{ cols.is_real = F::one(); cols.is_add = F::from_bool(event.op == FieldOperation::Add); cols.shard = F::from_canonical_u32(event.shard); - cols.channel = F::from_canonical_u8(event.channel); cols.clk = F::from_canonical_u32(event.clk); cols.x_ptr = F::from_canonical_u32(event.x_ptr); cols.y_ptr = F::from_canonical_u32(event.y_ptr); @@ -125,7 +132,6 @@ impl MachineAir for Fp2AddSubAssignChip

{ Self::populate_field_ops( &mut new_byte_lookup_events, event.shard, - event.channel, cols, p_x, p_y, @@ -136,42 +142,37 @@ impl MachineAir for Fp2AddSubAssignChip

{ // Populate the memory access columns. for i in 0..cols.y_access.len() { - cols.y_access[i].populate( - event.channel, - event.y_memory_records[i], - &mut new_byte_lookup_events, - ); + cols.y_access[i].populate(event.y_memory_records[i], &mut new_byte_lookup_events); } for i in 0..cols.x_access.len() { - cols.x_access[i].populate( - event.channel, - event.x_memory_records[i], - &mut new_byte_lookup_events, - ); + cols.x_access[i].populate(event.x_memory_records[i], &mut new_byte_lookup_events); } - rows.push(row) + rows.push(row); } output.add_byte_lookup_events(new_byte_lookup_events); - pad_rows(&mut rows, || { - let mut row = vec![F::zero(); num_fp2_addsub_cols::

()]; - let cols: &mut Fp2AddSubAssignCols = row.as_mut_slice().borrow_mut(); - cols.is_add = F::one(); - let zero = BigUint::zero(); - Self::populate_field_ops( - &mut vec![], - 0, - 0, - cols, - zero.clone(), - zero.clone(), - zero.clone(), - zero, - FieldOperation::Add, - ); - row - }); + pad_rows_fixed( + &mut rows, + || { + let mut row = zeroed_f_vec(num_fp2_addsub_cols::

()); + let cols: &mut Fp2AddSubAssignCols = row.as_mut_slice().borrow_mut(); + cols.is_add = F::one(); + let zero = BigUint::zero(); + Self::populate_field_ops( + &mut vec![], + 0, + cols, + zero.clone(), + zero.clone(), + zero.clone(), + zero, + FieldOperation::Add, + ); + row + }, + input.fixed_log2_rows::(self), + ); // Convert the trace to a row major matrix. let mut trace = RowMajorMatrix::new( @@ -191,9 +192,26 @@ impl MachineAir for Fp2AddSubAssignChip

{ } fn included(&self, shard: &Self::Record) -> bool { - match P::FIELD_TYPE { - FieldType::Bn254 => !shard.bn254_fp2_addsub_events.is_empty(), - FieldType::Bls12381 => !shard.bls12381_fp2_addsub_events.is_empty(), + // All the fp2 sub and add events for a given curve are coalesce to the curve's Add operation. Only retrieve + // precompile events for that operation. + // TODO: Fix this. + + assert!( + shard.get_precompile_events(SyscallCode::BN254_FP_SUB).is_empty() + && shard.get_precompile_events(SyscallCode::BLS12381_FP_SUB).is_empty() + ); + + if let Some(shape) = shard.shape.as_ref() { + shape.included::(self) + } else { + match P::FIELD_TYPE { + FieldType::Bn254 => { + !shard.get_precompile_events(SyscallCode::BN254_FP2_ADD).is_empty() + } + FieldType::Bls12381 => { + !shard.get_precompile_events(SyscallCode::BLS12381_FP2_ADD).is_empty() + } + } } } } @@ -243,8 +261,6 @@ where AB::Expr::one() - local.is_add, AB::F::zero(), AB::F::zero(), - local.shard, - local.channel, local.is_real, ); @@ -257,8 +273,6 @@ where AB::Expr::one() - local.is_add, AB::F::zero(), AB::F::zero(), - local.shard, - local.channel, local.is_real, ); } @@ -273,7 +287,6 @@ where ); builder.eval_memory_access_slice( local.shard, - local.channel, local.clk.into(), local.y_ptr, &local.y_access, @@ -281,7 +294,6 @@ where ); builder.eval_memory_access_slice( local.shard, - local.channel, local.clk + AB::F::from_canonical_u32(1), /* We read p at +1 since p, q could be the * same. */ local.x_ptr, @@ -305,13 +317,13 @@ where builder.receive_syscall( local.shard, - local.channel, local.clk, local.nonce, syscall_id_felt, local.x_ptr, local.y_ptr, local.is_real, + InteractionScope::Local, ); } } diff --git a/crates/core/machine/src/syscall/precompiles/fptower/fp2_mul.rs b/crates/core/machine/src/syscall/precompiles/fptower/fp2_mul.rs index 7987986011..95a624cc2f 100644 --- a/crates/core/machine/src/syscall/precompiles/fptower/fp2_mul.rs +++ b/crates/core/machine/src/syscall/precompiles/fptower/fp2_mul.rs @@ -3,7 +3,7 @@ use std::{ marker::PhantomData, }; -use crate::air::MemoryAirBuilder; +use crate::{air::MemoryAirBuilder, utils::zeroed_f_vec}; use generic_array::GenericArray; use itertools::Itertools; use num::{BigUint, Zero}; @@ -11,7 +11,7 @@ use p3_air::{Air, AirBuilder, BaseAir}; use p3_field::{AbstractField, PrimeField32}; use p3_matrix::{dense::RowMajorMatrix, Matrix}; use sp1_core_executor::{ - events::{ByteLookupEvent, ByteRecord, FieldOperation}, + events::{ByteLookupEvent, ByteRecord, FieldOperation, PrecompileEvent}, syscalls::SyscallCode, ExecutionRecord, Program, }; @@ -20,14 +20,14 @@ use sp1_curves::{ weierstrass::{FieldType, FpOpField}, }; use sp1_derive::AlignedBorrow; -use sp1_stark::air::{BaseAirBuilder, MachineAir, Polynomial, SP1AirBuilder}; +use sp1_stark::air::{BaseAirBuilder, InteractionScope, MachineAir, Polynomial, SP1AirBuilder}; use std::mem::size_of; use typenum::Unsigned; use crate::{ memory::{value_as_limbs, MemoryReadCols, MemoryWriteCols}, operations::field::field_op::FieldOpCols, - utils::{limbs_from_prev_access, pad_rows, words_to_bytes_le_vec}, + utils::{limbs_from_prev_access, pad_rows_fixed, words_to_bytes_le_vec}, }; pub const fn num_fp2_mul_cols() -> usize { @@ -40,7 +40,6 @@ pub const fn num_fp2_mul_cols() -> usize { pub struct Fp2MulAssignCols { pub is_real: T, pub shard: T, - pub channel: T, pub nonce: T, pub clk: T, pub x_ptr: T, @@ -69,7 +68,6 @@ impl Fp2MulAssignChip

{ fn populate_field_ops( blu_events: &mut Vec, shard: u32, - channel: u8, cols: &mut Fp2MulAssignCols, p_x: BigUint, p_y: BigUint, @@ -81,7 +79,6 @@ impl Fp2MulAssignChip

{ let a0_mul_b0 = cols.a0_mul_b0.populate_with_modulus( blu_events, shard, - channel, &p_x, &q_x, &modulus, @@ -90,7 +87,6 @@ impl Fp2MulAssignChip

{ let a1_mul_b1 = cols.a1_mul_b1.populate_with_modulus( blu_events, shard, - channel, &p_y, &q_y, &modulus, @@ -99,7 +95,6 @@ impl Fp2MulAssignChip

{ let a0_mul_b1 = cols.a0_mul_b1.populate_with_modulus( blu_events, shard, - channel, &p_x, &q_y, &modulus, @@ -108,7 +103,6 @@ impl Fp2MulAssignChip

{ let a1_mul_b0 = cols.a1_mul_b0.populate_with_modulus( blu_events, shard, - channel, &p_y, &q_x, &modulus, @@ -117,7 +111,6 @@ impl Fp2MulAssignChip

{ cols.c0.populate_with_modulus( blu_events, shard, - channel, &a0_mul_b0, &a1_mul_b1, &modulus, @@ -126,7 +119,6 @@ impl Fp2MulAssignChip

{ cols.c1.populate_with_modulus( blu_events, shard, - channel, &a0_mul_b1, &a1_mul_b0, &modulus, @@ -149,16 +141,21 @@ impl MachineAir for Fp2MulAssignChip

{ fn generate_trace(&self, input: &Self::Record, output: &mut Self::Record) -> RowMajorMatrix { let events = match P::FIELD_TYPE { - FieldType::Bn254 => &input.bn254_fp2_mul_events, - FieldType::Bls12381 => &input.bls12381_fp2_mul_events, + FieldType::Bn254 => input.get_precompile_events(SyscallCode::BN254_FP2_MUL), + FieldType::Bls12381 => input.get_precompile_events(SyscallCode::BLS12381_FP2_MUL), }; let mut rows = Vec::new(); let mut new_byte_lookup_events = Vec::new(); - for i in 0..events.len() { - let event = &events[i]; - let mut row = vec![F::zero(); num_fp2_mul_cols::

()]; + for (_, event) in events { + let event = match (P::FIELD_TYPE, event) { + (FieldType::Bn254, PrecompileEvent::Bn254Fp2Mul(event)) => event, + (FieldType::Bls12381, PrecompileEvent::Bls12381Fp2Mul(event)) => event, + _ => unreachable!(), + }; + + let mut row = zeroed_f_vec(num_fp2_mul_cols::

()); let cols: &mut Fp2MulAssignCols = row.as_mut_slice().borrow_mut(); let p = &event.x; @@ -170,7 +167,6 @@ impl MachineAir for Fp2MulAssignChip

{ cols.is_real = F::one(); cols.shard = F::from_canonical_u32(event.shard); - cols.channel = F::from_canonical_u8(event.channel); cols.clk = F::from_canonical_u32(event.clk); cols.x_ptr = F::from_canonical_u32(event.x_ptr); cols.y_ptr = F::from_canonical_u32(event.y_ptr); @@ -178,7 +174,6 @@ impl MachineAir for Fp2MulAssignChip

{ Self::populate_field_ops( &mut new_byte_lookup_events, event.shard, - event.channel, cols, p_x, p_y, @@ -188,40 +183,35 @@ impl MachineAir for Fp2MulAssignChip

{ // Populate the memory access columns. for i in 0..cols.y_access.len() { - cols.y_access[i].populate( - event.channel, - event.y_memory_records[i], - &mut new_byte_lookup_events, - ); + cols.y_access[i].populate(event.y_memory_records[i], &mut new_byte_lookup_events); } for i in 0..cols.x_access.len() { - cols.x_access[i].populate( - event.channel, - event.x_memory_records[i], - &mut new_byte_lookup_events, - ); + cols.x_access[i].populate(event.x_memory_records[i], &mut new_byte_lookup_events); } - rows.push(row) + rows.push(row); } output.add_byte_lookup_events(new_byte_lookup_events); - pad_rows(&mut rows, || { - let mut row = vec![F::zero(); num_fp2_mul_cols::

()]; - let cols: &mut Fp2MulAssignCols = row.as_mut_slice().borrow_mut(); - let zero = BigUint::zero(); - Self::populate_field_ops( - &mut vec![], - 0, - 0, - cols, - zero.clone(), - zero.clone(), - zero.clone(), - zero, - ); - row - }); + pad_rows_fixed( + &mut rows, + || { + let mut row = zeroed_f_vec(num_fp2_mul_cols::

()); + let cols: &mut Fp2MulAssignCols = row.as_mut_slice().borrow_mut(); + let zero = BigUint::zero(); + Self::populate_field_ops( + &mut vec![], + 0, + cols, + zero.clone(), + zero.clone(), + zero.clone(), + zero, + ); + row + }, + input.fixed_log2_rows::(self), + ); // Convert the trace to a row major matrix. let mut trace = RowMajorMatrix::new( @@ -241,9 +231,17 @@ impl MachineAir for Fp2MulAssignChip

{ } fn included(&self, shard: &Self::Record) -> bool { - match P::FIELD_TYPE { - FieldType::Bn254 => !shard.bn254_fp2_mul_events.is_empty(), - FieldType::Bls12381 => !shard.bls12381_fp2_mul_events.is_empty(), + if let Some(shape) = shard.shape.as_ref() { + shape.included::(self) + } else { + match P::FIELD_TYPE { + FieldType::Bn254 => { + !shard.get_precompile_events(SyscallCode::BN254_FP2_MUL).is_empty() + } + FieldType::Bls12381 => { + !shard.get_precompile_events(SyscallCode::BLS12381_FP2_MUL).is_empty() + } + } } } } @@ -287,8 +285,6 @@ where &q_x, &p_modulus, FieldOperation::Mul, - local.shard, - local.channel, local.is_real, ); @@ -298,8 +294,6 @@ where &q_y, &p_modulus, FieldOperation::Mul, - local.shard, - local.channel, local.is_real, ); @@ -309,8 +303,6 @@ where &local.a1_mul_b1.result, &p_modulus, FieldOperation::Sub, - local.shard, - local.channel, local.is_real, ); } @@ -322,8 +314,6 @@ where &q_y, &p_modulus, FieldOperation::Mul, - local.shard, - local.channel, local.is_real, ); @@ -333,8 +323,6 @@ where &q_x, &p_modulus, FieldOperation::Mul, - local.shard, - local.channel, local.is_real, ); @@ -344,8 +332,6 @@ where &local.a1_mul_b0.result, &p_modulus, FieldOperation::Add, - local.shard, - local.channel, local.is_real, ); } @@ -361,7 +347,6 @@ where builder.eval_memory_access_slice( local.shard, - local.channel, local.clk.into(), local.y_ptr, &local.y_access, @@ -369,7 +354,6 @@ where ); builder.eval_memory_access_slice( local.shard, - local.channel, local.clk + AB::F::from_canonical_u32(1), /* We read p at +1 since p, q could be the * same. */ local.x_ptr, @@ -386,13 +370,13 @@ where builder.receive_syscall( local.shard, - local.channel, local.clk, local.nonce, syscall_id_felt, local.x_ptr, local.y_ptr, local.is_real, + InteractionScope::Local, ); } } diff --git a/crates/core/machine/src/syscall/precompiles/fptower/mod.rs b/crates/core/machine/src/syscall/precompiles/fptower/mod.rs index f9aa9d26e7..c0d63d3ab4 100644 --- a/crates/core/machine/src/syscall/precompiles/fptower/mod.rs +++ b/crates/core/machine/src/syscall/precompiles/fptower/mod.rs @@ -21,7 +21,7 @@ mod tests { use crate::utils; #[test] - fn test_bls12381_fp() { + fn test_bls12381_fp_ops() { utils::setup_logger(); let program = Program::from(BLS12381_FP_ELF).unwrap(); utils::run_test::>(program).unwrap(); @@ -42,7 +42,7 @@ mod tests { } #[test] - fn test_bn254_fp() { + fn test_bn254_fp_ops() { utils::setup_logger(); let program = Program::from(BN254_FP_ELF).unwrap(); utils::run_test::>(program).unwrap(); diff --git a/crates/core/machine/src/syscall/precompiles/keccak256/air.rs b/crates/core/machine/src/syscall/precompiles/keccak256/air.rs index 7cce1d615c..a5925fb6c8 100644 --- a/crates/core/machine/src/syscall/precompiles/keccak256/air.rs +++ b/crates/core/machine/src/syscall/precompiles/keccak256/air.rs @@ -5,7 +5,7 @@ use p3_field::AbstractField; use p3_keccak_air::{KeccakAir, NUM_KECCAK_COLS, NUM_ROUNDS, U64_LIMBS}; use p3_matrix::Matrix; use sp1_core_executor::syscalls::SyscallCode; -use sp1_stark::air::{SP1AirBuilder, SubAirBuilder}; +use sp1_stark::air::{InteractionScope, SP1AirBuilder, SubAirBuilder}; use super::{ columns::{KeccakMemCols, NUM_KECCAK_MEM_COLS}, @@ -54,7 +54,6 @@ where builder.eval_memory_access( local.shard, - local.channel, local.clk + final_step, // The clk increments by 1 after a final step local.state_addr + AB::Expr::from_canonical_u32(i * 4), &local.state_mem[i as usize], @@ -66,13 +65,13 @@ where builder.assert_eq(local.receive_ecall, first_step * local.is_real); builder.receive_syscall( local.shard, - local.channel, local.clk, local.nonce, AB::F::from_canonical_u32(SyscallCode::KECCAK_PERMUTE.syscall_id()), local.state_addr, AB::Expr::zero(), local.receive_ecall, + InteractionScope::Local, ); // Constrain that the inputs stay the same throughout the 24 rows of each cycle @@ -80,7 +79,6 @@ where let mut transition_not_final_builder = transition_builder.when(not_final_step); transition_not_final_builder.assert_eq(local.shard, next.shard); transition_not_final_builder.assert_eq(local.clk, next.clk); - transition_not_final_builder.assert_eq(local.channel, next.channel); transition_not_final_builder.assert_eq(local.state_addr, next.state_addr); transition_not_final_builder.assert_eq(local.is_real, next.is_real); @@ -126,12 +124,7 @@ where // Range check all the values in `state_mem` to be bytes. for i in 0..STATE_NUM_WORDS { - builder.slice_range_check_u8( - &local.state_mem[i].value().0, - local.shard, - local.channel, - local.do_memory_check, - ); + builder.slice_range_check_u8(&local.state_mem[i].value().0, local.do_memory_check); } let mut sub_builder = @@ -188,7 +181,8 @@ mod test { let program = Program::from(KECCAK256_ELF).unwrap(); let (proof, public_values, _) = - prove::<_, CpuProver<_, _>>(program, &stdin, config, SP1CoreOpts::default()).unwrap(); + prove::<_, CpuProver<_, _>>(program, &stdin, config, SP1CoreOpts::default(), None) + .unwrap(); let mut public_values = SP1PublicValues::from(&public_values); let config = BabyBearPoseidon2::new(); diff --git a/crates/core/machine/src/syscall/precompiles/keccak256/columns.rs b/crates/core/machine/src/syscall/precompiles/keccak256/columns.rs index ad3aa5f099..7b622b3bc1 100644 --- a/crates/core/machine/src/syscall/precompiles/keccak256/columns.rs +++ b/crates/core/machine/src/syscall/precompiles/keccak256/columns.rs @@ -18,7 +18,6 @@ pub(crate) struct KeccakMemCols { pub keccak: KeccakCols, pub shard: T, - pub channel: T, pub clk: T, pub nonce: T, pub state_addr: T, diff --git a/crates/core/machine/src/syscall/precompiles/keccak256/trace.rs b/crates/core/machine/src/syscall/precompiles/keccak256/trace.rs index bca7080734..020b28c9f0 100644 --- a/crates/core/machine/src/syscall/precompiles/keccak256/trace.rs +++ b/crates/core/machine/src/syscall/precompiles/keccak256/trace.rs @@ -3,9 +3,15 @@ use std::borrow::BorrowMut; use p3_field::PrimeField32; use p3_keccak_air::{generate_trace_rows, NUM_KECCAK_COLS, NUM_ROUNDS}; use p3_matrix::{dense::RowMajorMatrix, Matrix}; -use p3_maybe_rayon::prelude::{ParallelIterator, ParallelSlice}; -use sp1_core_executor::{ExecutionRecord, Program}; -use sp1_stark::{air::MachineAir, MachineRecord}; +use p3_maybe_rayon::prelude::{ParallelBridge, ParallelIterator, ParallelSlice}; +use sp1_core_executor::{ + events::{ByteLookupEvent, KeccakPermuteEvent, PrecompileEvent, SyscallEvent}, + syscalls::SyscallCode, + ExecutionRecord, Program, +}; +use sp1_stark::air::MachineAir; + +use crate::utils::zeroed_f_vec; use super::{ columns::{KeccakMemCols, NUM_KECCAK_MEM_COLS}, @@ -21,140 +27,76 @@ impl MachineAir for KeccakPermuteChip { "KeccakPermute".to_string() } + fn generate_dependencies(&self, input: &Self::Record, output: &mut Self::Record) { + let chunk_size = 8; + + let blu_events: Vec> = input + .get_precompile_events(SyscallCode::KECCAK_PERMUTE) + .par_chunks(chunk_size) + .map(|ops: &[(SyscallEvent, PrecompileEvent)]| { + // The blu map stores shard -> map(byte lookup event -> multiplicity). + let mut blu = Vec::new(); + let mut chunk = zeroed_f_vec::(NUM_KECCAK_MEM_COLS * NUM_ROUNDS); + ops.iter().for_each(|(_, op)| { + if let PrecompileEvent::KeccakPermute(event) = op { + Self::populate_chunk(event, &mut chunk, &mut blu); + } else { + unreachable!(); + } + }); + blu + }) + .collect(); + for blu in blu_events { + output.add_byte_lookup_events(blu); + } + } + fn generate_trace( &self, input: &ExecutionRecord, - output: &mut ExecutionRecord, + _: &mut ExecutionRecord, ) -> RowMajorMatrix { - let num_events = input.keccak_permute_events.len(); - let chunk_size = std::cmp::max(num_events / num_cpus::get(), 1); - - // Use par_chunks to generate the trace in parallel. - let rows_and_records = (0..num_events) - .collect::>() - .par_chunks(chunk_size) - .map(|chunk| { - let mut record = ExecutionRecord::default(); - let mut new_byte_lookup_events = Vec::new(); - - // First generate all the p3_keccak_air traces at once. - let perm_inputs = chunk - .iter() - .map(|event_index| input.keccak_permute_events[*event_index].pre_state) - .collect::>(); - let p3_keccak_trace = generate_trace_rows::(perm_inputs); - - let rows = chunk - .iter() - .enumerate() - .flat_map(|(index_in_chunk, event_index)| { - let mut rows = Vec::new(); - - let event = &input.keccak_permute_events[*event_index]; - let start_clk = event.clk; - let shard = event.shard; - let channel = event.channel; - - // Create all the rows for the permutation. - for i in 0..NUM_ROUNDS { - let p3_keccak_row = - p3_keccak_trace.row(i + index_in_chunk * NUM_ROUNDS); - let mut row = [F::zero(); NUM_KECCAK_MEM_COLS]; - // Copy p3_keccak_row into start of cols - row[..NUM_KECCAK_COLS] - .copy_from_slice(p3_keccak_row.collect::>().as_slice()); - let cols: &mut KeccakMemCols = row.as_mut_slice().borrow_mut(); - - cols.shard = F::from_canonical_u32(shard); - cols.channel = F::from_canonical_u8(channel); - cols.clk = F::from_canonical_u32(start_clk); - cols.state_addr = F::from_canonical_u32(event.state_addr); - cols.is_real = F::one(); - - // If this is the first row, then populate read memory accesses - if i == 0 { - for (j, read_record) in event.state_read_records.iter().enumerate() - { - cols.state_mem[j].populate_read( - channel, - *read_record, - &mut new_byte_lookup_events, - ); - new_byte_lookup_events.add_u8_range_checks( - shard, - channel, - &read_record.value.to_le_bytes(), - ); - } - cols.do_memory_check = F::one(); - cols.receive_ecall = F::one(); - } + let events = input.get_precompile_events(SyscallCode::KECCAK_PERMUTE); + let num_events = events.len(); + let num_rows = (num_events * NUM_ROUNDS).next_power_of_two(); + let chunk_size = 8; + let values = vec![0u32; num_rows * NUM_KECCAK_MEM_COLS]; + let mut values = unsafe { std::mem::transmute::, Vec>(values) }; + + let dummy_keccak_rows = generate_trace_rows::(vec![[0; STATE_SIZE]]); + let mut dummy_chunk = Vec::new(); + for i in 0..NUM_ROUNDS { + let dummy_row = dummy_keccak_rows.row(i); + let mut row = [F::zero(); NUM_KECCAK_MEM_COLS]; + row[..NUM_KECCAK_COLS].copy_from_slice(dummy_row.collect::>().as_slice()); + dummy_chunk.extend_from_slice(&row); + } - // If this is the last row, then populate write memory accesses - if i == NUM_ROUNDS - 1 { - for (j, write_record) in - event.state_write_records.iter().enumerate() - { - cols.state_mem[j].populate_write( - channel, - *write_record, - &mut new_byte_lookup_events, - ); - new_byte_lookup_events.add_u8_range_checks( - shard, - channel, - &write_record.value.to_le_bytes(), - ); - } - cols.do_memory_check = F::one(); + values + .chunks_mut(chunk_size * NUM_KECCAK_MEM_COLS * NUM_ROUNDS) + .enumerate() + .par_bridge() + .for_each(|(i, rows)| { + rows.chunks_mut(NUM_ROUNDS * NUM_KECCAK_MEM_COLS).enumerate().for_each( + |(j, rounds)| { + let idx = i * chunk_size + j; + if idx < num_events { + let mut new_byte_lookup_events = Vec::new(); + if let PrecompileEvent::KeccakPermute(event) = &events[idx].1 { + Self::populate_chunk(event, rounds, &mut new_byte_lookup_events); + } else { + unreachable!(); } - - rows.push(row); + } else { + rounds.copy_from_slice(&dummy_chunk[..rounds.len()]); } - rows - }) - .collect::>(); - record.add_byte_lookup_events(new_byte_lookup_events); - (rows, record) - }) - .collect::>(); - - // Generate the trace rows for each event. - let mut rows: Vec<[F; NUM_KECCAK_MEM_COLS]> = vec![]; - for (mut row, mut record) in rows_and_records { - rows.append(&mut row); - output.append(&mut record); - } - - let nb_rows = rows.len(); - let mut padded_nb_rows = nb_rows.next_power_of_two(); - if padded_nb_rows == 2 || padded_nb_rows == 1 { - padded_nb_rows = 4; - } - if padded_nb_rows > nb_rows { - let dummy_keccak_rows = generate_trace_rows::(vec![[0; STATE_SIZE]]); - let mut dummy_rows = Vec::new(); - for i in 0..NUM_ROUNDS { - let dummy_row = dummy_keccak_rows.row(i); - let mut row = [F::zero(); NUM_KECCAK_MEM_COLS]; - row[..NUM_KECCAK_COLS].copy_from_slice(dummy_row.collect::>().as_slice()); - dummy_rows.push(row); - } - rows.append( - &mut dummy_rows - .iter() - .cloned() - .cycle() - .take(padded_nb_rows - nb_rows) - .collect::>(), - ); - } + }, + ); + }); // Convert the trace to a row major matrix. - let mut trace = RowMajorMatrix::new( - rows.into_iter().flatten().collect::>(), - NUM_KECCAK_MEM_COLS, - ); + let mut trace = RowMajorMatrix::new(values, NUM_KECCAK_MEM_COLS); // Write the nonce to the trace. for i in 0..trace.height() { @@ -167,6 +109,58 @@ impl MachineAir for KeccakPermuteChip { } fn included(&self, shard: &Self::Record) -> bool { - !shard.keccak_permute_events.is_empty() + if let Some(shape) = shard.shape.as_ref() { + shape.included::(self) + } else { + !shard.get_precompile_events(SyscallCode::KECCAK_PERMUTE).is_empty() + } + } +} + +impl KeccakPermuteChip { + pub fn populate_chunk( + event: &KeccakPermuteEvent, + chunk: &mut [F], + new_byte_lookup_events: &mut Vec, + ) { + let start_clk = event.clk; + let shard = event.shard; + + let p3_keccak_trace = generate_trace_rows::(vec![event.pre_state]); + + // Create all the rows for the permutation. + for i in 0..NUM_ROUNDS { + let p3_keccak_row = p3_keccak_trace.row(i); + let row = &mut chunk[i * NUM_KECCAK_MEM_COLS..(i + 1) * NUM_KECCAK_MEM_COLS]; + // Copy p3_keccak_row into start of cols + row[..NUM_KECCAK_COLS].copy_from_slice(p3_keccak_row.collect::>().as_slice()); + let cols: &mut KeccakMemCols = row.borrow_mut(); + + cols.shard = F::from_canonical_u32(shard); + cols.clk = F::from_canonical_u32(start_clk); + cols.state_addr = F::from_canonical_u32(event.state_addr); + cols.is_real = F::one(); + + // If this is the first row, then populate read memory accesses + if i == 0 { + for (j, read_record) in event.state_read_records.iter().enumerate() { + cols.state_mem[j].populate_read(*read_record, new_byte_lookup_events); + new_byte_lookup_events + .add_u8_range_checks(shard, &read_record.value.to_le_bytes()); + } + cols.do_memory_check = F::one(); + cols.receive_ecall = F::one(); + } + + // If this is the last row, then populate write memory accesses + if i == NUM_ROUNDS - 1 { + for (j, write_record) in event.state_write_records.iter().enumerate() { + cols.state_mem[j].populate_write(*write_record, new_byte_lookup_events); + new_byte_lookup_events + .add_u8_range_checks(shard, &write_record.value.to_le_bytes()); + } + cols.do_memory_check = F::one(); + } + } } } diff --git a/crates/core/machine/src/syscall/precompiles/sha256/compress/air.rs b/crates/core/machine/src/syscall/precompiles/sha256/compress/air.rs index db1a58a7f0..648f9cdd34 100644 --- a/crates/core/machine/src/syscall/precompiles/sha256/compress/air.rs +++ b/crates/core/machine/src/syscall/precompiles/sha256/compress/air.rs @@ -4,7 +4,10 @@ use p3_air::{Air, AirBuilder, BaseAir}; use p3_field::AbstractField; use p3_matrix::Matrix; use sp1_core_executor::syscalls::SyscallCode; -use sp1_stark::{air::SP1AirBuilder, Word}; +use sp1_stark::{ + air::{InteractionScope, SP1AirBuilder}, + Word, +}; use super::{ columns::{ShaCompressCols, NUM_SHA_COMPRESS_COLS}, @@ -51,13 +54,13 @@ where builder.assert_eq(local.start, local.is_real * local.octet[0] * local.octet_num[0]); builder.receive_syscall( local.shard, - local.channel, local.clk, local.nonce, AB::F::from_canonical_u32(SyscallCode::SHA_COMPRESS.syscall_id()), local.w_ptr, local.h_ptr, local.start, + InteractionScope::Local, ); } } @@ -172,10 +175,6 @@ impl ShaCompressChip { .when(local.is_real) .when_not(local.is_last_row) .assert_eq(local.clk, next.clk); - builder - .when_transition() - .when_not(local.is_last_row) - .assert_eq(local.channel, next.channel); builder .when_transition() .when(local.is_real) @@ -209,7 +208,6 @@ impl ShaCompressChip { fn eval_memory(&self, builder: &mut AB, local: &ShaCompressCols) { builder.eval_memory_access( local.shard, - local.channel, local.clk + local.is_finalize, local.mem_addr, &local.mem, @@ -296,8 +294,6 @@ impl ShaCompressChip { local.e, 6, local.e_rr_6, - local.shard, - local.channel, local.is_compression, ); // Calculate e rightrotate 11. @@ -306,8 +302,6 @@ impl ShaCompressChip { local.e, 11, local.e_rr_11, - local.shard, - local.channel, local.is_compression, ); // Calculate e rightrotate 25. @@ -316,8 +310,6 @@ impl ShaCompressChip { local.e, 25, local.e_rr_25, - local.shard, - local.channel, local.is_compression, ); // Calculate (e rightrotate 6) xor (e rightrotate 11). @@ -326,8 +318,6 @@ impl ShaCompressChip { local.e_rr_6.value, local.e_rr_11.value, local.s1_intermediate, - local.shard, - local.channel, local.is_compression, ); // Calculate S1 := ((e rightrotate 6) xor (e rightrotate 11)) xor (e rightrotate 25). @@ -336,39 +326,20 @@ impl ShaCompressChip { local.s1_intermediate.value, local.e_rr_25.value, local.s1, - local.shard, - local.channel, local.is_compression, ); // Calculate ch := (e and f) xor ((not e) and g). // Calculate e and f. - AndOperation::::eval( - builder, - local.e, - local.f, - local.e_and_f, - local.shard, - local.channel, - local.is_compression, - ); + AndOperation::::eval(builder, local.e, local.f, local.e_and_f, local.is_compression); // Calculate not e. - NotOperation::::eval( - builder, - local.e, - local.e_not, - local.shard, - local.channel, - local.is_compression, - ); + NotOperation::::eval(builder, local.e, local.e_not, local.is_compression); // Calculate (not e) and g. AndOperation::::eval( builder, local.e_not.value, local.g, local.e_not_and_g, - local.shard, - local.channel, local.is_compression, ); // Calculate ch := (e and f) xor ((not e) and g). @@ -377,8 +348,6 @@ impl ShaCompressChip { local.e_and_f.value, local.e_not_and_g.value, local.ch, - local.shard, - local.channel, local.is_compression, ); @@ -386,8 +355,6 @@ impl ShaCompressChip { Add5Operation::::eval( builder, &[local.h, local.s1.value, local.ch.value, local.k, local.mem.access.value], - local.shard, - local.channel, local.is_compression, local.temp1, ); @@ -399,8 +366,6 @@ impl ShaCompressChip { local.a, 2, local.a_rr_2, - local.shard, - local.channel, local.is_compression, ); // Calculate a rightrotate 13. @@ -409,8 +374,6 @@ impl ShaCompressChip { local.a, 13, local.a_rr_13, - local.shard, - local.channel, local.is_compression, ); // Calculate a rightrotate 22. @@ -419,8 +382,6 @@ impl ShaCompressChip { local.a, 22, local.a_rr_22, - local.shard, - local.channel, local.is_compression, ); // Calculate (a rightrotate 2) xor (a rightrotate 13). @@ -429,8 +390,6 @@ impl ShaCompressChip { local.a_rr_2.value, local.a_rr_13.value, local.s0_intermediate, - local.shard, - local.channel, local.is_compression, ); // Calculate S0 := ((a rightrotate 2) xor (a rightrotate 13)) xor (a rightrotate 22). @@ -439,50 +398,22 @@ impl ShaCompressChip { local.s0_intermediate.value, local.a_rr_22.value, local.s0, - local.shard, - local.channel, local.is_compression, ); // Calculate maj := (a and b) xor (a and c) xor (b and c). // Calculate a and b. - AndOperation::::eval( - builder, - local.a, - local.b, - local.a_and_b, - local.shard, - local.channel, - local.is_compression, - ); + AndOperation::::eval(builder, local.a, local.b, local.a_and_b, local.is_compression); // Calculate a and c. - AndOperation::::eval( - builder, - local.a, - local.c, - local.a_and_c, - local.shard, - local.channel, - local.is_compression, - ); + AndOperation::::eval(builder, local.a, local.c, local.a_and_c, local.is_compression); // Calculate b and c. - AndOperation::::eval( - builder, - local.b, - local.c, - local.b_and_c, - local.shard, - local.channel, - local.is_compression, - ); + AndOperation::::eval(builder, local.b, local.c, local.b_and_c, local.is_compression); // Calculate (a and b) xor (a and c). XorOperation::::eval( builder, local.a_and_b.value, local.a_and_c.value, local.maj_intermediate, - local.shard, - local.channel, local.is_compression, ); // Calculate maj := ((a and b) xor (a and c)) xor (b and c). @@ -491,8 +422,6 @@ impl ShaCompressChip { local.maj_intermediate.value, local.b_and_c.value, local.maj, - local.shard, - local.channel, local.is_compression, ); @@ -502,8 +431,6 @@ impl ShaCompressChip { local.s0.value, local.maj.value, local.temp2, - local.shard, - local.channel, local.is_compression.into(), ); @@ -513,8 +440,6 @@ impl ShaCompressChip { local.d, local.temp1.value, local.d_add_temp1, - local.shard, - local.channel, local.is_compression.into(), ); @@ -524,8 +449,6 @@ impl ShaCompressChip { local.temp1.value, local.temp2.value, local.temp1_add_temp2, - local.shard, - local.channel, local.is_compression.into(), ); @@ -581,8 +504,6 @@ impl ShaCompressChip { local.mem.prev_value, local.finalized_operand, local.finalize_add, - local.shard, - local.channel, local.is_finalize.into(), ); diff --git a/crates/core/machine/src/syscall/precompiles/sha256/compress/columns.rs b/crates/core/machine/src/syscall/precompiles/sha256/compress/columns.rs index d45b03ac7c..5d48b9edcc 100644 --- a/crates/core/machine/src/syscall/precompiles/sha256/compress/columns.rs +++ b/crates/core/machine/src/syscall/precompiles/sha256/compress/columns.rs @@ -25,7 +25,6 @@ pub const NUM_SHA_COMPRESS_COLS: usize = size_of::>(); pub struct ShaCompressCols { /// Inputs. pub shard: T, - pub channel: T, pub nonce: T, pub clk: T, pub w_ptr: T, diff --git a/crates/core/machine/src/syscall/precompiles/sha256/compress/trace.rs b/crates/core/machine/src/syscall/precompiles/sha256/compress/trace.rs index c8c0d910e1..ef5b28c636 100644 --- a/crates/core/machine/src/syscall/precompiles/sha256/compress/trace.rs +++ b/crates/core/machine/src/syscall/precompiles/sha256/compress/trace.rs @@ -6,7 +6,8 @@ use p3_field::PrimeField32; use p3_matrix::{dense::RowMajorMatrix, Matrix}; use p3_maybe_rayon::prelude::{ParallelIterator, ParallelSlice}; use sp1_core_executor::{ - events::{ByteLookupEvent, ByteRecord, ShaCompressEvent}, + events::{ByteLookupEvent, ByteRecord, PrecompileEvent, ShaCompressEvent}, + syscalls::SyscallCode, ExecutionRecord, Program, }; use sp1_stark::{air::MachineAir, Word}; @@ -15,7 +16,7 @@ use super::{ columns::{ShaCompressCols, NUM_SHA_COMPRESS_COLS}, ShaCompressChip, SHA_COMPRESS_K, }; -use crate::utils::pad_rows; +use crate::utils::pad_rows_fixed; impl MachineAir for ShaCompressChip { type Record = ExecutionRecord; @@ -34,15 +35,23 @@ impl MachineAir for ShaCompressChip { let rows = Vec::new(); let mut wrapped_rows = Some(rows); - for i in 0..input.sha_compress_events.len() { - let event = input.sha_compress_events[i].clone(); - self.event_to_rows(&event, &mut wrapped_rows, &mut Vec::new()); + for (_, event) in input.get_precompile_events(SyscallCode::SHA_COMPRESS) { + let event = if let PrecompileEvent::ShaCompress(event) = event { + event + } else { + unreachable!() + }; + self.event_to_rows(event, &mut wrapped_rows, &mut Vec::new()); } let mut rows = wrapped_rows.unwrap(); let num_real_rows = rows.len(); - pad_rows(&mut rows, || [F::zero(); NUM_SHA_COMPRESS_COLS]); + pad_rows_fixed( + &mut rows, + || [F::zero(); NUM_SHA_COMPRESS_COLS], + input.fixed_log2_rows::(self), + ); // Set the octet_num and octect columns for the padded rows. let mut octet_num = 0; @@ -85,14 +94,19 @@ impl MachineAir for ShaCompressChip { } fn generate_dependencies(&self, input: &Self::Record, output: &mut Self::Record) { - let chunk_size = std::cmp::max(input.sha_compress_events.len() / num_cpus::get(), 1); + let events = input.get_precompile_events(SyscallCode::SHA_COMPRESS); + let chunk_size = std::cmp::max(events.len() / num_cpus::get(), 1); - let blu_batches = input - .sha_compress_events + let blu_batches = events .par_chunks(chunk_size) .map(|events| { let mut blu: HashMap> = HashMap::new(); - events.iter().for_each(|event| { + events.iter().for_each(|(_, event)| { + let event = if let PrecompileEvent::ShaCompress(event) = event { + event + } else { + unreachable!() + }; self.event_to_rows::(event, &mut None, &mut blu); }); blu @@ -103,7 +117,11 @@ impl MachineAir for ShaCompressChip { } fn included(&self, shard: &Self::Record) -> bool { - !shard.sha_compress_events.is_empty() + if let Some(shape) = shard.shape.as_ref() { + shape.included::(self) + } else { + !shard.get_precompile_events(SyscallCode::SHA_COMPRESS).is_empty() + } } } @@ -115,7 +133,6 @@ impl ShaCompressChip { blu: &mut impl ByteRecord, ) { let shard = event.shard; - let channel = event.channel; let og_h = event.h; @@ -127,7 +144,6 @@ impl ShaCompressChip { let cols: &mut ShaCompressCols = row.as_mut_slice().borrow_mut(); cols.shard = F::from_canonical_u32(event.shard); - cols.channel = F::from_canonical_u8(event.channel); cols.clk = F::from_canonical_u32(event.clk); cols.w_ptr = F::from_canonical_u32(event.w_ptr); cols.h_ptr = F::from_canonical_u32(event.h_ptr); @@ -136,7 +152,7 @@ impl ShaCompressChip { cols.octet_num[octet_num_idx] = F::one(); cols.is_initialize = F::one(); - cols.mem.populate_read(channel, event.h_read_records[j], blu); + cols.mem.populate_read(event.h_read_records[j], blu); cols.mem_addr = F::from_canonical_u32(event.h_ptr + (j * 4) as u32); cols.a = Word::from(event.h_read_records[0].value); @@ -170,11 +186,10 @@ impl ShaCompressChip { cols.octet_num[octet_num_idx] = F::one(); cols.shard = F::from_canonical_u32(event.shard); - cols.channel = F::from_canonical_u8(event.channel); cols.clk = F::from_canonical_u32(event.clk); cols.w_ptr = F::from_canonical_u32(event.w_ptr); cols.h_ptr = F::from_canonical_u32(event.h_ptr); - cols.mem.populate_read(channel, event.w_i_read_records[j], blu); + cols.mem.populate_read(event.w_i_read_records[j], blu); cols.mem_addr = F::from_canonical_u32(event.w_ptr + (j * 4) as u32); let a = h_array[0]; @@ -194,39 +209,35 @@ impl ShaCompressChip { cols.g = Word::from(g); cols.h = Word::from(h); - let e_rr_6 = cols.e_rr_6.populate(blu, shard, channel, e, 6); - let e_rr_11 = cols.e_rr_11.populate(blu, shard, channel, e, 11); - let e_rr_25 = cols.e_rr_25.populate(blu, shard, channel, e, 25); - let s1_intermediate = - cols.s1_intermediate.populate(blu, shard, channel, e_rr_6, e_rr_11); - let s1 = cols.s1.populate(blu, shard, channel, s1_intermediate, e_rr_25); + let e_rr_6 = cols.e_rr_6.populate(blu, shard, e, 6); + let e_rr_11 = cols.e_rr_11.populate(blu, shard, e, 11); + let e_rr_25 = cols.e_rr_25.populate(blu, shard, e, 25); + let s1_intermediate = cols.s1_intermediate.populate(blu, shard, e_rr_6, e_rr_11); + let s1 = cols.s1.populate(blu, shard, s1_intermediate, e_rr_25); - let e_and_f = cols.e_and_f.populate(blu, shard, channel, e, f); - let e_not = cols.e_not.populate(blu, shard, channel, e); - let e_not_and_g = cols.e_not_and_g.populate(blu, shard, channel, e_not, g); - let ch = cols.ch.populate(blu, shard, channel, e_and_f, e_not_and_g); + let e_and_f = cols.e_and_f.populate(blu, shard, e, f); + let e_not = cols.e_not.populate(blu, shard, e); + let e_not_and_g = cols.e_not_and_g.populate(blu, shard, e_not, g); + let ch = cols.ch.populate(blu, shard, e_and_f, e_not_and_g); - let temp1 = - cols.temp1.populate(blu, shard, channel, h, s1, ch, event.w[j], SHA_COMPRESS_K[j]); + let temp1 = cols.temp1.populate(blu, shard, h, s1, ch, event.w[j], SHA_COMPRESS_K[j]); - let a_rr_2 = cols.a_rr_2.populate(blu, shard, channel, a, 2); - let a_rr_13 = cols.a_rr_13.populate(blu, shard, channel, a, 13); - let a_rr_22 = cols.a_rr_22.populate(blu, shard, channel, a, 22); - let s0_intermediate = - cols.s0_intermediate.populate(blu, shard, channel, a_rr_2, a_rr_13); - let s0 = cols.s0.populate(blu, shard, channel, s0_intermediate, a_rr_22); + let a_rr_2 = cols.a_rr_2.populate(blu, shard, a, 2); + let a_rr_13 = cols.a_rr_13.populate(blu, shard, a, 13); + let a_rr_22 = cols.a_rr_22.populate(blu, shard, a, 22); + let s0_intermediate = cols.s0_intermediate.populate(blu, shard, a_rr_2, a_rr_13); + let s0 = cols.s0.populate(blu, shard, s0_intermediate, a_rr_22); - let a_and_b = cols.a_and_b.populate(blu, shard, channel, a, b); - let a_and_c = cols.a_and_c.populate(blu, shard, channel, a, c); - let b_and_c = cols.b_and_c.populate(blu, shard, channel, b, c); - let maj_intermediate = - cols.maj_intermediate.populate(blu, shard, channel, a_and_b, a_and_c); - let maj = cols.maj.populate(blu, shard, channel, maj_intermediate, b_and_c); + let a_and_b = cols.a_and_b.populate(blu, shard, a, b); + let a_and_c = cols.a_and_c.populate(blu, shard, a, c); + let b_and_c = cols.b_and_c.populate(blu, shard, b, c); + let maj_intermediate = cols.maj_intermediate.populate(blu, shard, a_and_b, a_and_c); + let maj = cols.maj.populate(blu, shard, maj_intermediate, b_and_c); - let temp2 = cols.temp2.populate(blu, shard, channel, s0, maj); + let temp2 = cols.temp2.populate(blu, shard, s0, maj); - let d_add_temp1 = cols.d_add_temp1.populate(blu, shard, channel, d, temp1); - let temp1_add_temp2 = cols.temp1_add_temp2.populate(blu, shard, channel, temp1, temp2); + let d_add_temp1 = cols.d_add_temp1.populate(blu, shard, d, temp1); + let temp1_add_temp2 = cols.temp1_add_temp2.populate(blu, shard, temp1, temp2); h_array[7] = g; h_array[6] = f; @@ -254,7 +265,6 @@ impl ShaCompressChip { let cols: &mut ShaCompressCols = row.as_mut_slice().borrow_mut(); cols.shard = F::from_canonical_u32(event.shard); - cols.channel = F::from_canonical_u8(event.channel); cols.clk = F::from_canonical_u32(event.clk); cols.w_ptr = F::from_canonical_u32(event.w_ptr); cols.h_ptr = F::from_canonical_u32(event.h_ptr); @@ -263,8 +273,8 @@ impl ShaCompressChip { cols.octet_num[octet_num_idx] = F::one(); cols.is_finalize = F::one(); - cols.finalize_add.populate(blu, shard, channel, og_h[j], h_array[j]); - cols.mem.populate_write(channel, event.h_write_records[j], blu); + cols.finalize_add.populate(blu, shard, og_h[j], h_array[j]); + cols.mem.populate_write(event.h_write_records[j], blu); cols.mem_addr = F::from_canonical_u32(event.h_ptr + (j * 4) as u32); v[j] = h_array[j]; diff --git a/crates/core/machine/src/syscall/precompiles/sha256/extend/air.rs b/crates/core/machine/src/syscall/precompiles/sha256/extend/air.rs index 0344bf1b6c..f5da0f247a 100644 --- a/crates/core/machine/src/syscall/precompiles/sha256/extend/air.rs +++ b/crates/core/machine/src/syscall/precompiles/sha256/extend/air.rs @@ -2,7 +2,7 @@ use p3_air::{Air, AirBuilder, BaseAir}; use p3_field::AbstractField; use p3_matrix::Matrix; use sp1_core_executor::syscalls::SyscallCode; -use sp1_stark::air::SP1AirBuilder; +use sp1_stark::air::{InteractionScope, SP1AirBuilder}; use super::{ShaExtendChip, ShaExtendCols, NUM_SHA_EXTEND_COLS}; use crate::{ @@ -52,10 +52,6 @@ where .when_transition() .when_not(local.cycle_16_end.result * local.cycle_48[2]) .assert_eq(local.clk, next.clk); - builder - .when_transition() - .when_not(local.cycle_16_end.result * local.cycle_48[2]) - .assert_eq(local.channel, next.channel); builder .when_transition() .when_not(local.cycle_16_end.result * local.cycle_48[2]) @@ -64,7 +60,6 @@ where // Read w[i-15]. builder.eval_memory_access( local.shard, - local.channel, local.clk + (local.i - i_start), local.w_ptr + (local.i - AB::F::from_canonical_u32(15)) * nb_bytes_in_word, &local.w_i_minus_15, @@ -74,7 +69,6 @@ where // Read w[i-2]. builder.eval_memory_access( local.shard, - local.channel, local.clk + (local.i - i_start), local.w_ptr + (local.i - AB::F::from_canonical_u32(2)) * nb_bytes_in_word, &local.w_i_minus_2, @@ -84,7 +78,6 @@ where // Read w[i-16]. builder.eval_memory_access( local.shard, - local.channel, local.clk + (local.i - i_start), local.w_ptr + (local.i - AB::F::from_canonical_u32(16)) * nb_bytes_in_word, &local.w_i_minus_16, @@ -94,7 +87,6 @@ where // Read w[i-7]. builder.eval_memory_access( local.shard, - local.channel, local.clk + (local.i - i_start), local.w_ptr + (local.i - AB::F::from_canonical_u32(7)) * nb_bytes_in_word, &local.w_i_minus_7, @@ -108,8 +100,6 @@ where *local.w_i_minus_15.value(), 7, local.w_i_minus_15_rr_7, - local.shard, - local.channel, local.is_real, ); // w[i-15] rightrotate 18. @@ -118,8 +108,6 @@ where *local.w_i_minus_15.value(), 18, local.w_i_minus_15_rr_18, - local.shard, - local.channel, local.is_real, ); // w[i-15] rightshift 3. @@ -128,8 +116,6 @@ where *local.w_i_minus_15.value(), 3, local.w_i_minus_15_rs_3, - local.shard, - local.channel, local.is_real, ); // (w[i-15] rightrotate 7) xor (w[i-15] rightrotate 18) @@ -138,8 +124,6 @@ where local.w_i_minus_15_rr_7.value, local.w_i_minus_15_rr_18.value, local.s0_intermediate, - local.shard, - local.channel, local.is_real, ); // s0 := (w[i-15] rightrotate 7) xor (w[i-15] rightrotate 18) xor (w[i-15] rightshift 3) @@ -148,8 +132,6 @@ where local.s0_intermediate.value, local.w_i_minus_15_rs_3.value, local.s0, - local.shard, - local.channel, local.is_real, ); @@ -160,8 +142,6 @@ where *local.w_i_minus_2.value(), 17, local.w_i_minus_2_rr_17, - local.shard, - local.channel, local.is_real, ); // w[i-2] rightrotate 19. @@ -170,8 +150,6 @@ where *local.w_i_minus_2.value(), 19, local.w_i_minus_2_rr_19, - local.shard, - local.channel, local.is_real, ); // w[i-2] rightshift 10. @@ -180,8 +158,6 @@ where *local.w_i_minus_2.value(), 10, local.w_i_minus_2_rs_10, - local.shard, - local.channel, local.is_real, ); // (w[i-2] rightrotate 17) xor (w[i-2] rightrotate 19) @@ -190,8 +166,6 @@ where local.w_i_minus_2_rr_17.value, local.w_i_minus_2_rr_19.value, local.s1_intermediate, - local.shard, - local.channel, local.is_real, ); // s1 := (w[i-2] rightrotate 17) xor (w[i-2] rightrotate 19) xor (w[i-2] rightshift 10) @@ -200,8 +174,6 @@ where local.s1_intermediate.value, local.w_i_minus_2_rs_10.value, local.s1, - local.shard, - local.channel, local.is_real, ); @@ -212,8 +184,6 @@ where local.s0.value, *local.w_i_minus_7.value(), local.s1.value, - local.shard, - local.channel, local.is_real, local.s2, ); @@ -221,7 +191,6 @@ where // Write `s2` to `w[i]`. builder.eval_memory_access( local.shard, - local.channel, local.clk + (local.i - i_start), local.w_ptr + local.i * nb_bytes_in_word, &local.w_i, @@ -233,13 +202,13 @@ where // Receive syscall event in first row of 48-cycle. builder.receive_syscall( local.shard, - local.channel, local.clk, local.nonce, AB::F::from_canonical_u32(SyscallCode::SHA_EXTEND.syscall_id()), local.w_ptr, AB::Expr::zero(), local.cycle_48_start, + InteractionScope::Local, ); // Assert that is_real is a bool. diff --git a/crates/core/machine/src/syscall/precompiles/sha256/extend/columns.rs b/crates/core/machine/src/syscall/precompiles/sha256/extend/columns.rs index 2bb4f11afc..ff7a5f5f7c 100644 --- a/crates/core/machine/src/syscall/precompiles/sha256/extend/columns.rs +++ b/crates/core/machine/src/syscall/precompiles/sha256/extend/columns.rs @@ -17,7 +17,6 @@ pub const NUM_SHA_EXTEND_COLS: usize = size_of::>(); pub struct ShaExtendCols { /// Inputs. pub shard: T, - pub channel: T, pub nonce: T, pub clk: T, pub w_ptr: T, diff --git a/crates/core/machine/src/syscall/precompiles/sha256/extend/mod.rs b/crates/core/machine/src/syscall/precompiles/sha256/extend/mod.rs index 998ef38663..cb3aea1bbb 100644 --- a/crates/core/machine/src/syscall/precompiles/sha256/extend/mod.rs +++ b/crates/core/machine/src/syscall/precompiles/sha256/extend/mod.rs @@ -66,7 +66,7 @@ pub mod extend_tests { #[test] fn generate_trace() { let mut shard = ExecutionRecord::default(); - shard.add_events = vec![AluEvent::new(0, 0, 0, Opcode::ADD, 14, 8, 6)]; + shard.add_events = vec![AluEvent::new(0, 0, Opcode::ADD, 14, 8, 6)]; let chip = ShaExtendChip::new(); let trace: RowMajorMatrix = chip.generate_trace(&shard, &mut ExecutionRecord::default()); diff --git a/crates/core/machine/src/syscall/precompiles/sha256/extend/trace.rs b/crates/core/machine/src/syscall/precompiles/sha256/extend/trace.rs index d38b32d85b..75e1a16533 100644 --- a/crates/core/machine/src/syscall/precompiles/sha256/extend/trace.rs +++ b/crates/core/machine/src/syscall/precompiles/sha256/extend/trace.rs @@ -4,7 +4,8 @@ use p3_field::PrimeField32; use p3_matrix::{dense::RowMajorMatrix, Matrix}; use p3_maybe_rayon::prelude::{ParallelIterator, ParallelSlice}; use sp1_core_executor::{ - events::{ByteLookupEvent, ByteRecord, ShaExtendEvent}, + events::{ByteLookupEvent, ByteRecord, PrecompileEvent, ShaExtendEvent}, + syscalls::SyscallCode, ExecutionRecord, Program, }; use sp1_stark::air::MachineAir; @@ -30,12 +31,10 @@ impl MachineAir for ShaExtendChip { let mut new_byte_lookup_events = Vec::new(); let mut wrapped_rows = Some(rows); - for i in 0..input.sha_extend_events.len() { - self.event_to_rows( - &input.sha_extend_events[i], - &mut wrapped_rows, - &mut new_byte_lookup_events, - ); + for (_, event) in input.get_precompile_events(SyscallCode::SHA_EXTEND).iter() { + let event = + if let PrecompileEvent::ShaExtend(event) = event { event } else { unreachable!() }; + self.event_to_rows(event, &mut wrapped_rows, &mut new_byte_lookup_events); } let mut rows = wrapped_rows.unwrap(); @@ -68,14 +67,19 @@ impl MachineAir for ShaExtendChip { } fn generate_dependencies(&self, input: &Self::Record, output: &mut Self::Record) { - let chunk_size = std::cmp::max(input.sha_extend_events.len() / num_cpus::get(), 1); + let events = input.get_precompile_events(SyscallCode::SHA_EXTEND); + let chunk_size = std::cmp::max(events.len() / num_cpus::get(), 1); - let blu_batches = input - .sha_extend_events + let blu_batches = events .par_chunks(chunk_size) .map(|events| { let mut blu: HashMap> = HashMap::new(); - events.iter().for_each(|event| { + events.iter().for_each(|(_, event)| { + let event = if let PrecompileEvent::ShaExtend(event) = event { + event + } else { + unreachable!() + }; self.event_to_rows::(event, &mut None, &mut blu); }); blu @@ -86,7 +90,11 @@ impl MachineAir for ShaExtendChip { } fn included(&self, shard: &Self::Record) -> bool { - !shard.sha_extend_events.is_empty() + if let Some(shape) = shard.shape.as_ref() { + shape.included::(self) + } else { + !shard.get_precompile_events(SyscallCode::SHA_EXTEND).is_empty() + } } } @@ -104,59 +112,40 @@ impl ShaExtendChip { cols.is_real = F::one(); cols.populate_flags(j); cols.shard = F::from_canonical_u32(event.shard); - cols.channel = F::from_canonical_u8(event.channel); cols.clk = F::from_canonical_u32(event.clk); cols.w_ptr = F::from_canonical_u32(event.w_ptr); - cols.w_i_minus_15.populate(event.channel, event.w_i_minus_15_reads[j], blu); - cols.w_i_minus_2.populate(event.channel, event.w_i_minus_2_reads[j], blu); - cols.w_i_minus_16.populate(event.channel, event.w_i_minus_16_reads[j], blu); - cols.w_i_minus_7.populate(event.channel, event.w_i_minus_7_reads[j], blu); + cols.w_i_minus_15.populate(event.w_i_minus_15_reads[j], blu); + cols.w_i_minus_2.populate(event.w_i_minus_2_reads[j], blu); + cols.w_i_minus_16.populate(event.w_i_minus_16_reads[j], blu); + cols.w_i_minus_7.populate(event.w_i_minus_7_reads[j], blu); // `s0 := (w[i-15] rightrotate 7) xor (w[i-15] rightrotate 18) xor (w[i-15] rightshift // 3)`. let w_i_minus_15 = event.w_i_minus_15_reads[j].value; - let w_i_minus_15_rr_7 = - cols.w_i_minus_15_rr_7.populate(blu, shard, event.channel, w_i_minus_15, 7); - let w_i_minus_15_rr_18 = - cols.w_i_minus_15_rr_18.populate(blu, shard, event.channel, w_i_minus_15, 18); - let w_i_minus_15_rs_3 = - cols.w_i_minus_15_rs_3.populate(blu, shard, event.channel, w_i_minus_15, 3); - let s0_intermediate = cols.s0_intermediate.populate( - blu, - shard, - event.channel, - w_i_minus_15_rr_7, - w_i_minus_15_rr_18, - ); - let s0 = - cols.s0.populate(blu, shard, event.channel, s0_intermediate, w_i_minus_15_rs_3); + let w_i_minus_15_rr_7 = cols.w_i_minus_15_rr_7.populate(blu, shard, w_i_minus_15, 7); + let w_i_minus_15_rr_18 = cols.w_i_minus_15_rr_18.populate(blu, shard, w_i_minus_15, 18); + let w_i_minus_15_rs_3 = cols.w_i_minus_15_rs_3.populate(blu, shard, w_i_minus_15, 3); + let s0_intermediate = + cols.s0_intermediate.populate(blu, shard, w_i_minus_15_rr_7, w_i_minus_15_rr_18); + let s0 = cols.s0.populate(blu, shard, s0_intermediate, w_i_minus_15_rs_3); // `s1 := (w[i-2] rightrotate 17) xor (w[i-2] rightrotate 19) xor (w[i-2] rightshift // 10)`. let w_i_minus_2 = event.w_i_minus_2_reads[j].value; - let w_i_minus_2_rr_17 = - cols.w_i_minus_2_rr_17.populate(blu, shard, event.channel, w_i_minus_2, 17); - let w_i_minus_2_rr_19 = - cols.w_i_minus_2_rr_19.populate(blu, shard, event.channel, w_i_minus_2, 19); - let w_i_minus_2_rs_10 = - cols.w_i_minus_2_rs_10.populate(blu, shard, event.channel, w_i_minus_2, 10); - let s1_intermediate = cols.s1_intermediate.populate( - blu, - shard, - event.channel, - w_i_minus_2_rr_17, - w_i_minus_2_rr_19, - ); - let s1 = - cols.s1.populate(blu, shard, event.channel, s1_intermediate, w_i_minus_2_rs_10); + let w_i_minus_2_rr_17 = cols.w_i_minus_2_rr_17.populate(blu, shard, w_i_minus_2, 17); + let w_i_minus_2_rr_19 = cols.w_i_minus_2_rr_19.populate(blu, shard, w_i_minus_2, 19); + let w_i_minus_2_rs_10 = cols.w_i_minus_2_rs_10.populate(blu, shard, w_i_minus_2, 10); + let s1_intermediate = + cols.s1_intermediate.populate(blu, shard, w_i_minus_2_rr_17, w_i_minus_2_rr_19); + let s1 = cols.s1.populate(blu, shard, s1_intermediate, w_i_minus_2_rs_10); // Compute `s2`. let w_i_minus_7 = event.w_i_minus_7_reads[j].value; let w_i_minus_16 = event.w_i_minus_16_reads[j].value; - cols.s2.populate(blu, shard, event.channel, w_i_minus_16, s0, w_i_minus_7, s1); + cols.s2.populate(blu, shard, w_i_minus_16, s0, w_i_minus_7, s1); - cols.w_i.populate(event.channel, event.w_i_writes[j], blu); + cols.w_i.populate(event.w_i_writes[j], blu); if rows.as_ref().is_some() { rows.as_mut().unwrap().push(row); diff --git a/crates/core/machine/src/syscall/precompiles/uint256/air.rs b/crates/core/machine/src/syscall/precompiles/uint256/air.rs index b0e77a6e51..54e0925f9e 100644 --- a/crates/core/machine/src/syscall/precompiles/uint256/air.rs +++ b/crates/core/machine/src/syscall/precompiles/uint256/air.rs @@ -7,7 +7,7 @@ use crate::{ air::MemoryAirBuilder, operations::{field::range::FieldLtCols, IsZeroOperation}, utils::{ - limbs_from_access, limbs_from_prev_access, pad_rows, words_to_bytes_le, + limbs_from_access, limbs_from_prev_access, pad_rows_fixed, words_to_bytes_le, words_to_bytes_le_vec, }, }; @@ -18,7 +18,7 @@ use p3_air::{Air, AirBuilder, BaseAir}; use p3_field::{AbstractField, PrimeField32}; use p3_matrix::{dense::RowMajorMatrix, Matrix}; use sp1_core_executor::{ - events::{ByteRecord, FieldOperation}, + events::{ByteRecord, FieldOperation, PrecompileEvent}, syscalls::SyscallCode, ExecutionRecord, Program, }; @@ -28,7 +28,7 @@ use sp1_curves::{ }; use sp1_derive::AlignedBorrow; use sp1_stark::{ - air::{BaseAirBuilder, MachineAir, Polynomial, SP1AirBuilder}, + air::{BaseAirBuilder, InteractionScope, MachineAir, Polynomial, SP1AirBuilder}, MachineRecord, }; use std::{ @@ -59,9 +59,6 @@ pub struct Uint256MulCols { /// The shard number of the syscall. pub shard: T, - /// The byte lookup channel. - pub channel: T, - /// The clock cycle of the syscall. pub clk: T, @@ -110,7 +107,7 @@ impl MachineAir for Uint256MulChip { ) -> RowMajorMatrix { // Generate the trace rows & corresponding records for each chunk of events concurrently. let rows_and_records = input - .uint256_mul_events + .get_precompile_events(SyscallCode::UINT256_MUL) .chunks(1) .map(|events| { let mut records = ExecutionRecord::default(); @@ -118,7 +115,12 @@ impl MachineAir for Uint256MulChip { let rows = events .iter() - .map(|event| { + .map(|(_, event)| { + let event = if let PrecompileEvent::Uint256Mul(event) = event { + event + } else { + unreachable!() + }; let mut row: [F; NUM_COLS] = [F::zero(); NUM_COLS]; let cols: &mut Uint256MulCols = row.as_mut_slice().borrow_mut(); @@ -131,25 +133,17 @@ impl MachineAir for Uint256MulChip { // Assign basic values to the columns. cols.is_real = F::one(); cols.shard = F::from_canonical_u32(event.shard); - cols.channel = F::from_canonical_u8(event.channel); cols.clk = F::from_canonical_u32(event.clk); cols.x_ptr = F::from_canonical_u32(event.x_ptr); cols.y_ptr = F::from_canonical_u32(event.y_ptr); // Populate memory columns. for i in 0..WORDS_FIELD_ELEMENT { - cols.x_memory[i].populate( - event.channel, - event.x_memory_records[i], - &mut new_byte_lookup_events, - ); - cols.y_memory[i].populate( - event.channel, - event.y_memory_records[i], - &mut new_byte_lookup_events, - ); + cols.x_memory[i] + .populate(event.x_memory_records[i], &mut new_byte_lookup_events); + cols.y_memory[i] + .populate(event.y_memory_records[i], &mut new_byte_lookup_events); cols.modulus_memory[i].populate( - event.channel, event.modulus_memory_records[i], &mut new_byte_lookup_events, ); @@ -165,7 +159,6 @@ impl MachineAir for Uint256MulChip { let result = cols.output.populate_with_modulus( &mut new_byte_lookup_events, event.shard, - event.channel, &x, &y, &effective_modulus, @@ -178,7 +171,6 @@ impl MachineAir for Uint256MulChip { cols.output_range_check.populate( &mut new_byte_lookup_events, event.shard, - event.channel, &result, &effective_modulus, ); @@ -199,16 +191,20 @@ impl MachineAir for Uint256MulChip { output.append(&mut record); } - pad_rows(&mut rows, || { - let mut row: [F; NUM_COLS] = [F::zero(); NUM_COLS]; - let cols: &mut Uint256MulCols = row.as_mut_slice().borrow_mut(); + pad_rows_fixed( + &mut rows, + || { + let mut row: [F; NUM_COLS] = [F::zero(); NUM_COLS]; + let cols: &mut Uint256MulCols = row.as_mut_slice().borrow_mut(); - let x = BigUint::zero(); - let y = BigUint::zero(); - cols.output.populate(&mut vec![], 0, 0, &x, &y, FieldOperation::Mul); + let x = BigUint::zero(); + let y = BigUint::zero(); + cols.output.populate(&mut vec![], 0, &x, &y, FieldOperation::Mul); - row - }); + row + }, + input.fixed_log2_rows::(self), + ); // Convert the trace to a row major matrix. let mut trace = @@ -225,7 +221,11 @@ impl MachineAir for Uint256MulChip { } fn included(&self, shard: &Self::Record) -> bool { - !shard.uint256_mul_events.is_empty() + if let Some(shape) = shard.shape.as_ref() { + shape.included::(self) + } else { + !shard.get_precompile_events(SyscallCode::UINT256_MUL).is_empty() + } } } @@ -287,8 +287,6 @@ where &y_limbs, &p_modulus, FieldOperation::Mul, - local.shard, - local.channel, local.is_real, ); @@ -298,8 +296,6 @@ where builder, &local.output.result, &modulus_limbs, - local.shard, - local.channel, local.modulus_is_not_zero, ); builder.assert_eq( @@ -315,7 +311,6 @@ where // Read and write x. builder.eval_memory_access_slice( local.shard, - local.channel, local.clk.into() + AB::Expr::one(), local.x_ptr, &local.x_memory, @@ -326,7 +321,6 @@ where // we read it contiguously from the y_ptr memory location. builder.eval_memory_access_slice( local.shard, - local.channel, local.clk.into(), local.y_ptr, &[local.y_memory, local.modulus_memory].concat(), @@ -336,13 +330,13 @@ where // Receive the arguments. builder.receive_syscall( local.shard, - local.channel, local.clk, local.nonce, AB::F::from_canonical_u32(SyscallCode::UINT256_MUL.syscall_id()), local.x_ptr, local.y_ptr, local.is_real, + InteractionScope::Local, ); // Assert that is_real is a boolean. diff --git a/crates/core/machine/src/syscall/precompiles/weierstrass/weierstrass_add.rs b/crates/core/machine/src/syscall/precompiles/weierstrass/weierstrass_add.rs index 40d82b9adb..65e03f1c17 100644 --- a/crates/core/machine/src/syscall/precompiles/weierstrass/weierstrass_add.rs +++ b/crates/core/machine/src/syscall/precompiles/weierstrass/weierstrass_add.rs @@ -4,14 +4,18 @@ use core::{ }; use std::{fmt::Debug, marker::PhantomData}; -use crate::air::MemoryAirBuilder; +use crate::{air::MemoryAirBuilder, utils::zeroed_f_vec}; use generic_array::GenericArray; use num::{BigUint, Zero}; use p3_air::{Air, AirBuilder, BaseAir}; use p3_field::{AbstractField, PrimeField32}; use p3_matrix::{dense::RowMajorMatrix, Matrix}; +use p3_maybe_rayon::prelude::{ParallelBridge, ParallelIterator, ParallelSlice}; use sp1_core_executor::{ - events::{ByteLookupEvent, ByteRecord, FieldOperation}, + events::{ + ByteLookupEvent, ByteRecord, EllipticCurveAddEvent, FieldOperation, PrecompileEvent, + SyscallEvent, + }, syscalls::SyscallCode, ExecutionRecord, Program, }; @@ -21,13 +25,13 @@ use sp1_curves::{ AffinePoint, CurveType, EllipticCurve, }; use sp1_derive::AlignedBorrow; -use sp1_stark::air::{MachineAir, SP1AirBuilder}; +use sp1_stark::air::{InteractionScope, MachineAir, SP1AirBuilder}; use typenum::Unsigned; use crate::{ memory::{MemoryCols, MemoryReadCols, MemoryWriteCols}, operations::field::field_op::FieldOpCols, - utils::{limbs_from_prev_access, pad_rows}, + utils::limbs_from_prev_access, }; pub const fn num_weierstrass_add_cols() -> usize { @@ -43,7 +47,6 @@ pub const fn num_weierstrass_add_cols() -> usize pub struct WeierstrassAddAssignCols { pub is_real: T, pub shard: T, - pub channel: T, pub nonce: T, pub clk: T, pub p_ptr: T, @@ -75,7 +78,6 @@ impl WeierstrassAddAssignChip { fn populate_field_ops( blu_events: &mut Vec, shard: u32, - channel: u8, cols: &mut WeierstrassAddAssignCols, p_x: BigUint, p_y: BigUint, @@ -87,28 +89,15 @@ impl WeierstrassAddAssignChip { // slope = (q.y - p.y) / (q.x - p.x). let slope = { - let slope_numerator = cols.slope_numerator.populate( - blu_events, - shard, - channel, - &q_y, - &p_y, - FieldOperation::Sub, - ); + let slope_numerator = + cols.slope_numerator.populate(blu_events, shard, &q_y, &p_y, FieldOperation::Sub); - let slope_denominator = cols.slope_denominator.populate( - blu_events, - shard, - channel, - &q_x, - &p_x, - FieldOperation::Sub, - ); + let slope_denominator = + cols.slope_denominator.populate(blu_events, shard, &q_x, &p_x, FieldOperation::Sub); cols.slope.populate( blu_events, shard, - channel, &slope_numerator, &slope_denominator, FieldOperation::Div, @@ -117,26 +106,13 @@ impl WeierstrassAddAssignChip { // x = slope * slope - (p.x + q.x). let x = { - let slope_squared = cols.slope_squared.populate( - blu_events, - shard, - channel, - &slope, - &slope, - FieldOperation::Mul, - ); - let p_x_plus_q_x = cols.p_x_plus_q_x.populate( - blu_events, - shard, - channel, - &p_x, - &q_x, - FieldOperation::Add, - ); + let slope_squared = + cols.slope_squared.populate(blu_events, shard, &slope, &slope, FieldOperation::Mul); + let p_x_plus_q_x = + cols.p_x_plus_q_x.populate(blu_events, shard, &p_x, &q_x, FieldOperation::Add); cols.x3_ins.populate( blu_events, shard, - channel, &slope_squared, &p_x_plus_q_x, FieldOperation::Sub, @@ -145,18 +121,11 @@ impl WeierstrassAddAssignChip { // y = slope * (p.x - x_3n) - p.y. { - let p_x_minus_x = cols.p_x_minus_x.populate( - blu_events, - shard, - channel, - &p_x, - &x, - FieldOperation::Sub, - ); + let p_x_minus_x = + cols.p_x_minus_x.populate(blu_events, shard, &p_x, &x, FieldOperation::Sub); let slope_times_p_x_minus_x = cols.slope_times_p_x_minus_x.populate( blu_events, shard, - channel, &slope, &p_x_minus_x, FieldOperation::Mul, @@ -164,7 +133,6 @@ impl WeierstrassAddAssignChip { cols.y3_ins.populate( blu_events, shard, - channel, &slope_times_p_x_minus_x, &p_y, FieldOperation::Sub, @@ -188,98 +156,98 @@ impl MachineAir } } + fn generate_dependencies(&self, input: &Self::Record, output: &mut Self::Record) { + let events = match E::CURVE_TYPE { + CurveType::Secp256k1 => &input.get_precompile_events(SyscallCode::SECP256K1_ADD), + CurveType::Bn254 => &input.get_precompile_events(SyscallCode::BN254_ADD), + CurveType::Bls12381 => &input.get_precompile_events(SyscallCode::BLS12381_ADD), + _ => panic!("Unsupported curve"), + }; + + let num_cols = num_weierstrass_add_cols::(); + let chunk_size = std::cmp::max(events.len() / num_cpus::get(), 1); + + let blu_events: Vec> = events + .par_chunks(chunk_size) + .map(|ops: &[(SyscallEvent, PrecompileEvent)]| { + // The blu map stores shard -> map(byte lookup event -> multiplicity). + let mut blu = Vec::new(); + ops.iter().for_each(|(_, op)| match op { + PrecompileEvent::Secp256k1Add(event) + | PrecompileEvent::Bn254Add(event) + | PrecompileEvent::Bls12381Add(event) => { + let mut row = zeroed_f_vec(num_cols); + let cols: &mut WeierstrassAddAssignCols = + row.as_mut_slice().borrow_mut(); + Self::populate_row(event, cols, &mut blu); + } + _ => unreachable!(), + }); + blu + }) + .collect(); + + for blu in blu_events { + output.add_byte_lookup_events(blu); + } + } + fn generate_trace( &self, input: &ExecutionRecord, - output: &mut ExecutionRecord, + _: &mut ExecutionRecord, ) -> RowMajorMatrix { let events = match E::CURVE_TYPE { - CurveType::Secp256k1 => &input.secp256k1_add_events, - CurveType::Bn254 => &input.bn254_add_events, - CurveType::Bls12381 => &input.bls12381_add_events, + CurveType::Secp256k1 => input.get_precompile_events(SyscallCode::SECP256K1_ADD), + CurveType::Bn254 => input.get_precompile_events(SyscallCode::BN254_ADD), + CurveType::Bls12381 => input.get_precompile_events(SyscallCode::BLS12381_ADD), _ => panic!("Unsupported curve"), }; - let mut rows = Vec::new(); - - let mut new_byte_lookup_events = Vec::new(); - - for i in 0..events.len() { - let event = &events[i]; - let mut row = vec![F::zero(); num_weierstrass_add_cols::()]; - let cols: &mut WeierstrassAddAssignCols = - row.as_mut_slice().borrow_mut(); - - // Decode affine points. - let p = &event.p; - let q = &event.q; - let p = AffinePoint::::from_words_le(p); - let (p_x, p_y) = (p.x, p.y); - let q = AffinePoint::::from_words_le(q); - let (q_x, q_y) = (q.x, q.y); - - // Populate basic columns. - cols.is_real = F::one(); - cols.shard = F::from_canonical_u32(event.shard); - cols.channel = F::from_canonical_u8(event.channel); - cols.clk = F::from_canonical_u32(event.clk); - cols.p_ptr = F::from_canonical_u32(event.p_ptr); - cols.q_ptr = F::from_canonical_u32(event.q_ptr); - - Self::populate_field_ops( - &mut new_byte_lookup_events, - event.shard, - event.channel, - cols, - p_x, - p_y, - q_x, - q_y, - ); - - // Populate the memory access columns. - for i in 0..cols.q_access.len() { - cols.q_access[i].populate( - event.channel, - event.q_memory_records[i], - &mut new_byte_lookup_events, - ); - } - for i in 0..cols.p_access.len() { - cols.p_access[i].populate( - event.channel, - event.p_memory_records[i], - &mut new_byte_lookup_events, - ); - } + let num_cols = num_weierstrass_add_cols::(); + let num_rows = input + .fixed_log2_rows::(self) + .map(|x| 1 << x) + .unwrap_or(std::cmp::max(events.len().next_power_of_two(), 4)); + let mut values = zeroed_f_vec(num_rows * num_cols); + let chunk_size = 64; + + let mut dummy_row = zeroed_f_vec(num_weierstrass_add_cols::()); + let cols: &mut WeierstrassAddAssignCols = + dummy_row.as_mut_slice().borrow_mut(); + let zero = BigUint::zero(); + Self::populate_field_ops( + &mut vec![], + 0, + cols, + zero.clone(), + zero.clone(), + zero.clone(), + zero, + ); - rows.push(row); - } - output.add_byte_lookup_events(new_byte_lookup_events); - - pad_rows(&mut rows, || { - let mut row = vec![F::zero(); num_weierstrass_add_cols::()]; - let cols: &mut WeierstrassAddAssignCols = - row.as_mut_slice().borrow_mut(); - let zero = BigUint::zero(); - Self::populate_field_ops( - &mut vec![], - 0, - 0, - cols, - zero.clone(), - zero.clone(), - zero.clone(), - zero, - ); - row + values.chunks_mut(chunk_size * num_cols).enumerate().par_bridge().for_each(|(i, rows)| { + rows.chunks_mut(num_cols).enumerate().for_each(|(j, row)| { + let idx = i * chunk_size + j; + if idx < events.len() { + let mut new_byte_lookup_events = Vec::new(); + let cols: &mut WeierstrassAddAssignCols = row.borrow_mut(); + match &events[idx].1 { + PrecompileEvent::Secp256k1Add(event) + | PrecompileEvent::Bn254Add(event) + | PrecompileEvent::Bls12381Add(event) => { + Self::populate_row(event, cols, &mut new_byte_lookup_events); + } + _ => unreachable!(), + } + } else { + row.copy_from_slice(&dummy_row); + } + }); }); // Convert the trace to a row major matrix. - let mut trace = RowMajorMatrix::new( - rows.into_iter().flatten().collect::>(), - num_weierstrass_add_cols::(), - ); + let mut trace = RowMajorMatrix::new(values, num_weierstrass_add_cols::()); // Write the nonces to the trace. for i in 0..trace.height() { @@ -294,11 +262,19 @@ impl MachineAir } fn included(&self, shard: &Self::Record) -> bool { - match E::CURVE_TYPE { - CurveType::Secp256k1 => !shard.secp256k1_add_events.is_empty(), - CurveType::Bn254 => !shard.bn254_add_events.is_empty(), - CurveType::Bls12381 => !shard.bls12381_add_events.is_empty(), - _ => panic!("Unsupported curve"), + if let Some(shape) = shard.shape.as_ref() { + shape.included::(self) + } else { + match E::CURVE_TYPE { + CurveType::Secp256k1 => { + !shard.get_precompile_events(SyscallCode::SECP256K1_ADD).is_empty() + } + CurveType::Bn254 => !shard.get_precompile_events(SyscallCode::BN254_ADD).is_empty(), + CurveType::Bls12381 => { + !shard.get_precompile_events(SyscallCode::BLS12381_ADD).is_empty() + } + _ => panic!("Unsupported curve"), + } } } } @@ -335,33 +311,15 @@ where // slope = (q.y - p.y) / (q.x - p.x). let slope = { - local.slope_numerator.eval( - builder, - &q_y, - &p_y, - FieldOperation::Sub, - local.shard, - local.channel, - local.is_real, - ); + local.slope_numerator.eval(builder, &q_y, &p_y, FieldOperation::Sub, local.is_real); - local.slope_denominator.eval( - builder, - &q_x, - &p_x, - FieldOperation::Sub, - local.shard, - local.channel, - local.is_real, - ); + local.slope_denominator.eval(builder, &q_x, &p_x, FieldOperation::Sub, local.is_real); local.slope.eval( builder, &local.slope_numerator.result, &local.slope_denominator.result, FieldOperation::Div, - local.shard, - local.channel, local.is_real, ); @@ -370,33 +328,15 @@ where // x = slope * slope - self.x - other.x. let x = { - local.slope_squared.eval( - builder, - slope, - slope, - FieldOperation::Mul, - local.shard, - local.channel, - local.is_real, - ); + local.slope_squared.eval(builder, slope, slope, FieldOperation::Mul, local.is_real); - local.p_x_plus_q_x.eval( - builder, - &p_x, - &q_x, - FieldOperation::Add, - local.shard, - local.channel, - local.is_real, - ); + local.p_x_plus_q_x.eval(builder, &p_x, &q_x, FieldOperation::Add, local.is_real); local.x3_ins.eval( builder, &local.slope_squared.result, &local.p_x_plus_q_x.result, FieldOperation::Sub, - local.shard, - local.channel, local.is_real, ); @@ -405,23 +345,13 @@ where // y = slope * (p.x - x_3n) - q.y. { - local.p_x_minus_x.eval( - builder, - &p_x, - x, - FieldOperation::Sub, - local.shard, - local.channel, - local.is_real, - ); + local.p_x_minus_x.eval(builder, &p_x, x, FieldOperation::Sub, local.is_real); local.slope_times_p_x_minus_x.eval( builder, slope, &local.p_x_minus_x.result, FieldOperation::Mul, - local.shard, - local.channel, local.is_real, ); @@ -430,8 +360,6 @@ where &local.slope_times_p_x_minus_x.result, &p_y, FieldOperation::Sub, - local.shard, - local.channel, local.is_real, ); } @@ -450,7 +378,6 @@ where builder.eval_memory_access_slice( local.shard, - local.channel, local.clk.into(), local.q_ptr, &local.q_access, @@ -458,7 +385,6 @@ where ); builder.eval_memory_access_slice( local.shard, - local.channel, local.clk + AB::F::from_canonical_u32(1), /* We read p at +1 since p, q could be the * same. */ local.p_ptr, @@ -480,17 +406,50 @@ where builder.receive_syscall( local.shard, - local.channel, local.clk, local.nonce, syscall_id_felt, local.p_ptr, local.q_ptr, local.is_real, + InteractionScope::Local, ); } } +impl WeierstrassAddAssignChip { + pub fn populate_row( + event: &EllipticCurveAddEvent, + cols: &mut WeierstrassAddAssignCols, + new_byte_lookup_events: &mut Vec, + ) { + // Decode affine points. + let p = &event.p; + let q = &event.q; + let p = AffinePoint::::from_words_le(p); + let (p_x, p_y) = (p.x, p.y); + let q = AffinePoint::::from_words_le(q); + let (q_x, q_y) = (q.x, q.y); + + // Populate basic columns. + cols.is_real = F::one(); + cols.shard = F::from_canonical_u32(event.shard); + cols.clk = F::from_canonical_u32(event.clk); + cols.p_ptr = F::from_canonical_u32(event.p_ptr); + cols.q_ptr = F::from_canonical_u32(event.q_ptr); + + Self::populate_field_ops(new_byte_lookup_events, event.shard, cols, p_x, p_y, q_x, q_y); + + // Populate the memory access columns. + for i in 0..cols.q_access.len() { + cols.q_access[i].populate(event.q_memory_records[i], new_byte_lookup_events); + } + for i in 0..cols.p_access.len() { + cols.p_access[i].populate(event.p_memory_records[i], new_byte_lookup_events); + } + } +} + #[cfg(test)] mod tests { diff --git a/crates/core/machine/src/syscall/precompiles/weierstrass/weierstrass_decompress.rs b/crates/core/machine/src/syscall/precompiles/weierstrass/weierstrass_decompress.rs index 01fed80a01..686f71ec77 100644 --- a/crates/core/machine/src/syscall/precompiles/weierstrass/weierstrass_decompress.rs +++ b/crates/core/machine/src/syscall/precompiles/weierstrass/weierstrass_decompress.rs @@ -4,14 +4,14 @@ use core::{ }; use std::fmt::Debug; -use crate::air::MemoryAirBuilder; +use crate::{air::MemoryAirBuilder, utils::zeroed_f_vec}; use generic_array::GenericArray; use num::{BigUint, Zero}; use p3_air::{Air, AirBuilder, BaseAir}; use p3_field::{AbstractField, PrimeField32}; use p3_matrix::{dense::RowMajorMatrix, Matrix}; use sp1_core_executor::{ - events::{ByteRecord, FieldOperation}, + events::{ByteRecord, FieldOperation, PrecompileEvent}, syscalls::SyscallCode, ExecutionRecord, Program, }; @@ -21,14 +21,14 @@ use sp1_curves::{ CurveType, EllipticCurve, }; use sp1_derive::AlignedBorrow; -use sp1_stark::air::{BaseAirBuilder, MachineAir, SP1AirBuilder}; +use sp1_stark::air::{BaseAirBuilder, InteractionScope, MachineAir, SP1AirBuilder}; use std::marker::PhantomData; use typenum::Unsigned; use crate::{ memory::{MemoryReadCols, MemoryReadWriteCols}, operations::field::{field_op::FieldOpCols, field_sqrt::FieldSqrtCols, range::FieldLtCols}, - utils::{bytes_to_words_le_vec, limbs_from_access, limbs_from_prev_access, pad_rows}, + utils::{bytes_to_words_le_vec, limbs_from_access, limbs_from_prev_access, pad_rows_fixed}, }; pub const fn num_weierstrass_decompress_cols() -> usize { @@ -42,7 +42,6 @@ pub const fn num_weierstrass_decompress_cols() -> pub struct WeierstrassDecompressCols { pub is_real: T, pub shard: T, - pub channel: T, pub clk: T, pub nonce: T, pub ptr: T, @@ -105,28 +104,25 @@ impl WeierstrassDecompressChip { fn populate_field_ops( record: &mut impl ByteRecord, shard: u32, - channel: u8, cols: &mut WeierstrassDecompressCols, x: BigUint, ) { // Y = sqrt(x^3 + b) - cols.range_x.populate(record, shard, channel, &x, &E::BaseField::modulus()); - let x_2 = - cols.x_2.populate(record, shard, channel, &x.clone(), &x.clone(), FieldOperation::Mul); - let x_3 = cols.x_3.populate(record, shard, channel, &x_2, &x, FieldOperation::Mul); + cols.range_x.populate(record, shard, &x, &E::BaseField::modulus()); + let x_2 = cols.x_2.populate(record, shard, &x.clone(), &x.clone(), FieldOperation::Mul); + let x_3 = cols.x_3.populate(record, shard, &x_2, &x, FieldOperation::Mul); let b = E::b_int(); - let x_3_plus_b = - cols.x_3_plus_b.populate(record, shard, channel, &x_3, &b, FieldOperation::Add); + let x_3_plus_b = cols.x_3_plus_b.populate(record, shard, &x_3, &b, FieldOperation::Add); let sqrt_fn = match E::CURVE_TYPE { CurveType::Secp256k1 => secp256k1_sqrt, CurveType::Bls12381 => bls12381_sqrt, _ => panic!("Unsupported curve"), }; - let y = cols.y.populate(record, shard, channel, &x_3_plus_b, sqrt_fn); + let y = cols.y.populate(record, shard, &x_3_plus_b, sqrt_fn); let zero = BigUint::zero(); - cols.neg_y.populate(record, shard, channel, &zero, &y, FieldOperation::Sub); + cols.neg_y.populate(record, shard, &zero, &y, FieldOperation::Sub); } } @@ -150,8 +146,8 @@ impl MachineAir output: &mut ExecutionRecord, ) -> RowMajorMatrix { let events = match E::CURVE_TYPE { - CurveType::Secp256k1 => &input.k256_decompress_events, - CurveType::Bls12381 => &input.bls12381_decompress_events, + CurveType::Secp256k1 => input.get_precompile_events(SyscallCode::SECP256K1_DECOMPRESS), + CurveType::Bls12381 => input.get_precompile_events(SyscallCode::BLS12381_DECOMPRESS), _ => panic!("Unsupported curve"), }; @@ -163,42 +159,32 @@ impl MachineAir let modulus = E::BaseField::modulus(); - for i in 0..events.len() { - let event = events[i].clone(); - let mut row = vec![F::zero(); width]; + for (_, event) in events { + let event = match (E::CURVE_TYPE, event) { + (CurveType::Secp256k1, PrecompileEvent::Secp256k1Decompress(event)) => event, + (CurveType::Bls12381, PrecompileEvent::Bls12381Decompress(event)) => event, + _ => panic!("Unsupported curve"), + }; + + let mut row = zeroed_f_vec(width); let cols: &mut WeierstrassDecompressCols = row[0..weierstrass_width].borrow_mut(); cols.is_real = F::from_bool(true); cols.shard = F::from_canonical_u32(event.shard); - cols.channel = F::from_canonical_u8(event.channel); - cols.channel = F::from_canonical_u8(event.channel); cols.clk = F::from_canonical_u32(event.clk); cols.ptr = F::from_canonical_u32(event.ptr); cols.sign_bit = F::from_bool(event.sign_bit); let x = BigUint::from_bytes_le(&event.x_bytes); - Self::populate_field_ops( - &mut new_byte_lookup_events, - event.shard, - event.channel, - cols, - x, - ); + Self::populate_field_ops(&mut new_byte_lookup_events, event.shard, cols, x); for i in 0..cols.x_access.len() { - cols.x_access[i].populate( - event.channel, - event.x_memory_records[i], - &mut new_byte_lookup_events, - ); + cols.x_access[i].populate(event.x_memory_records[i], &mut new_byte_lookup_events); } for i in 0..cols.y_access.len() { - cols.y_access[i].populate_write( - event.channel, - event.y_memory_records[i], - &mut new_byte_lookup_events, - ); + cols.y_access[i] + .populate_write(event.y_memory_records[i], &mut new_byte_lookup_events); } if matches!(self.sign_rule, SignChoiceRule::Lexicographic) { @@ -217,7 +203,6 @@ impl MachineAir choice_cols.neg_y_range_check.populate( &mut new_byte_lookup_events, event.shard, - event.channel, &neg_y, &modulus, ); @@ -225,7 +210,6 @@ impl MachineAir choice_cols.neg_y_range_check.populate( &mut new_byte_lookup_events, event.shard, - event.channel, &decompressed_y, &modulus, ); @@ -237,7 +221,6 @@ impl MachineAir choice_cols.comparison_lt_cols.populate( &mut new_byte_lookup_events, event.shard, - event.channel, &neg_y, &decompressed_y, ); @@ -248,7 +231,6 @@ impl MachineAir choice_cols.comparison_lt_cols.populate( &mut new_byte_lookup_events, event.shard, - event.channel, &decompressed_y, &neg_y, ); @@ -259,22 +241,26 @@ impl MachineAir } output.add_byte_lookup_events(new_byte_lookup_events); - pad_rows(&mut rows, || { - let mut row = vec![F::zero(); width]; - let cols: &mut WeierstrassDecompressCols = - row.as_mut_slice()[0..weierstrass_width].borrow_mut(); - - // take X of the generator as a dummy value to make sure Y^2 = X^3 + b holds - let dummy_value = E::generator().0; - let dummy_bytes = dummy_value.to_bytes_le(); - let words = bytes_to_words_le_vec(&dummy_bytes); - for i in 0..cols.x_access.len() { - cols.x_access[i].access.value = words[i].into(); - } + pad_rows_fixed( + &mut rows, + || { + let mut row = zeroed_f_vec(width); + let cols: &mut WeierstrassDecompressCols = + row.as_mut_slice()[0..weierstrass_width].borrow_mut(); + + // take X of the generator as a dummy value to make sure Y^2 = X^3 + b holds + let dummy_value = E::generator().0; + let dummy_bytes = dummy_value.to_bytes_le(); + let words = bytes_to_words_le_vec(&dummy_bytes); + for i in 0..cols.x_access.len() { + cols.x_access[i].access.value = words[i].into(); + } - Self::populate_field_ops(&mut vec![], 0, 0, cols, dummy_value); - row - }); + Self::populate_field_ops(&mut vec![], 0, cols, dummy_value); + row + }, + input.fixed_log2_rows::(self), + ); let mut trace = RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), width); @@ -289,10 +275,18 @@ impl MachineAir } fn included(&self, shard: &Self::Record) -> bool { - match E::CURVE_TYPE { - CurveType::Secp256k1 => !shard.k256_decompress_events.is_empty(), - CurveType::Bls12381 => !shard.bls12381_decompress_events.is_empty(), - _ => panic!("Unsupported curve"), + if let Some(shape) = shard.shape.as_ref() { + shape.included::(self) + } else { + match E::CURVE_TYPE { + CurveType::Secp256k1 => { + !shard.get_precompile_events(SyscallCode::SECP256K1_DECOMPRESS).is_empty() + } + CurveType::Bls12381 => { + !shard.get_precompile_events(SyscallCode::BLS12381_DECOMPRESS).is_empty() + } + _ => panic!("Unsupported curve"), + } } } } @@ -341,28 +335,10 @@ where builder, &x, &limbs_from_vec::::Limbs, AB::F>(max_num_limbs), - local.shard, - local.channel, - local.is_real, - ); - local.x_2.eval( - builder, - &x, - &x, - FieldOperation::Mul, - local.shard, - local.channel, - local.is_real, - ); - local.x_3.eval( - builder, - &local.x_2.result, - &x, - FieldOperation::Mul, - local.shard, - local.channel, local.is_real, ); + local.x_2.eval(builder, &x, &x, FieldOperation::Mul, local.is_real); + local.x_3.eval(builder, &local.x_2.result, &x, FieldOperation::Mul, local.is_real); let b = E::b_int(); let b_const = E::BaseField::to_limbs_field::(&b); local.x_3_plus_b.eval( @@ -370,8 +346,6 @@ where &local.x_3.result, &b_const, FieldOperation::Add, - local.shard, - local.channel, local.is_real, ); @@ -380,19 +354,10 @@ where &[AB::Expr::zero()].iter(), &local.y.multiplication.result, FieldOperation::Sub, - local.shard, - local.channel, local.is_real, ); - local.y.eval( - builder, - &local.x_3_plus_b.result, - local.y.lsb, - local.shard, - local.channel, - local.is_real, - ); + local.y.eval(builder, &local.x_3_plus_b.result, local.y.lsb, local.is_real); let y_limbs: Limbs::Limbs> = limbs_from_access(&local.y_access); @@ -438,8 +403,6 @@ where builder, &local.neg_y.result, &modulus_limbs, - local.shard, - local.channel, local.is_real, ); @@ -494,8 +457,6 @@ where builder, &local.y.multiplication.result, &local.neg_y.result, - local.shard, - local.channel, choice_cols.when_sqrt_y_res_is_lt, ); @@ -503,8 +464,6 @@ where builder, &local.neg_y.result, &local.y.multiplication.result, - local.shard, - local.channel, choice_cols.when_neg_y_res_is_lt, ); } @@ -513,7 +472,6 @@ where for i in 0..num_words_field_element { builder.eval_memory_access( local.shard, - local.channel, local.clk, local.ptr.into() + AB::F::from_canonical_u32((i as u32) * 4 + num_limbs as u32), &local.x_access[i], @@ -523,7 +481,6 @@ where for i in 0..num_words_field_element { builder.eval_memory_access( local.shard, - local.channel, local.clk, local.ptr.into() + AB::F::from_canonical_u32((i as u32) * 4), &local.y_access[i], @@ -543,13 +500,13 @@ where builder.receive_syscall( local.shard, - local.channel, local.clk, local.nonce, syscall_id, local.ptr, local.sign_bit, local.is_real, + InteractionScope::Local, ); } } diff --git a/crates/core/machine/src/syscall/precompiles/weierstrass/weierstrass_double.rs b/crates/core/machine/src/syscall/precompiles/weierstrass/weierstrass_double.rs index 16dfe59264..fc39b858c0 100644 --- a/crates/core/machine/src/syscall/precompiles/weierstrass/weierstrass_double.rs +++ b/crates/core/machine/src/syscall/precompiles/weierstrass/weierstrass_double.rs @@ -4,15 +4,18 @@ use core::{ }; use std::{fmt::Debug, marker::PhantomData}; -use crate::air::MemoryAirBuilder; +use crate::{air::MemoryAirBuilder, utils::zeroed_f_vec}; use generic_array::GenericArray; use num::{BigUint, Zero}; use p3_air::{Air, AirBuilder, BaseAir}; use p3_field::{AbstractField, PrimeField32}; use p3_matrix::{dense::RowMajorMatrix, Matrix}; -use p3_maybe_rayon::prelude::{ParallelIterator, ParallelSlice}; +use p3_maybe_rayon::prelude::{ParallelBridge, ParallelIterator, ParallelSlice}; use sp1_core_executor::{ - events::{ByteLookupEvent, ByteRecord, FieldOperation}, + events::{ + ByteLookupEvent, ByteRecord, EllipticCurveDoubleEvent, FieldOperation, PrecompileEvent, + SyscallEvent, + }, syscalls::SyscallCode, ExecutionRecord, Program, }; @@ -22,15 +25,12 @@ use sp1_curves::{ AffinePoint, CurveType, EllipticCurve, }; use sp1_derive::AlignedBorrow; -use sp1_stark::{ - air::{MachineAir, SP1AirBuilder}, - MachineRecord, -}; +use sp1_stark::air::{InteractionScope, MachineAir, SP1AirBuilder}; use crate::{ memory::{MemoryCols, MemoryWriteCols}, operations::field::field_op::FieldOpCols, - utils::{limbs_from_prev_access, pad_rows}, + utils::limbs_from_prev_access, }; pub const fn num_weierstrass_double_cols() -> usize { @@ -46,7 +46,6 @@ pub const fn num_weierstrass_double_cols() -> usi pub struct WeierstrassDoubleAssignCols { pub is_real: T, pub shard: T, - pub channel: T, pub nonce: T, pub clk: T, pub p_ptr: T, @@ -77,7 +76,6 @@ impl WeierstrassDoubleAssignChip { fn populate_field_ops( blu_events: &mut Vec, shard: u32, - channel: u8, cols: &mut WeierstrassDoubleAssignCols, p_x: BigUint, p_y: BigUint, @@ -90,18 +88,11 @@ impl WeierstrassDoubleAssignChip { let slope = { // slope_numerator = a + (p.x * p.x) * 3. let slope_numerator = { - let p_x_squared = cols.p_x_squared.populate( - blu_events, - shard, - channel, - &p_x, - &p_x, - FieldOperation::Mul, - ); + let p_x_squared = + cols.p_x_squared.populate(blu_events, shard, &p_x, &p_x, FieldOperation::Mul); let p_x_squared_times_3 = cols.p_x_squared_times_3.populate( blu_events, shard, - channel, &p_x_squared, &BigUint::from(3u32), FieldOperation::Mul, @@ -109,7 +100,6 @@ impl WeierstrassDoubleAssignChip { cols.slope_numerator.populate( blu_events, shard, - channel, &a, &p_x_squared_times_3, FieldOperation::Add, @@ -120,7 +110,6 @@ impl WeierstrassDoubleAssignChip { let slope_denominator = cols.slope_denominator.populate( blu_events, shard, - channel, &BigUint::from(2u32), &p_y, FieldOperation::Mul, @@ -129,7 +118,6 @@ impl WeierstrassDoubleAssignChip { cols.slope.populate( blu_events, shard, - channel, &slope_numerator, &slope_denominator, FieldOperation::Div, @@ -138,26 +126,13 @@ impl WeierstrassDoubleAssignChip { // x = slope * slope - (p.x + p.x). let x = { - let slope_squared = cols.slope_squared.populate( - blu_events, - shard, - channel, - &slope, - &slope, - FieldOperation::Mul, - ); - let p_x_plus_p_x = cols.p_x_plus_p_x.populate( - blu_events, - shard, - channel, - &p_x, - &p_x, - FieldOperation::Add, - ); + let slope_squared = + cols.slope_squared.populate(blu_events, shard, &slope, &slope, FieldOperation::Mul); + let p_x_plus_p_x = + cols.p_x_plus_p_x.populate(blu_events, shard, &p_x, &p_x, FieldOperation::Add); cols.x3_ins.populate( blu_events, shard, - channel, &slope_squared, &p_x_plus_p_x, FieldOperation::Sub, @@ -166,18 +141,11 @@ impl WeierstrassDoubleAssignChip { // y = slope * (p.x - x) - p.y. { - let p_x_minus_x = cols.p_x_minus_x.populate( - blu_events, - shard, - channel, - &p_x, - &x, - FieldOperation::Sub, - ); + let p_x_minus_x = + cols.p_x_minus_x.populate(blu_events, shard, &p_x, &x, FieldOperation::Sub); let slope_times_p_x_minus_x = cols.slope_times_p_x_minus_x.populate( blu_events, shard, - channel, &slope, &p_x_minus_x, FieldOperation::Mul, @@ -185,7 +153,6 @@ impl WeierstrassDoubleAssignChip { cols.y3_ins.populate( blu_events, shard, - channel, &slope_times_p_x_minus_x, &p_y, FieldOperation::Sub, @@ -209,94 +176,91 @@ impl MachineAir } } - fn generate_trace( - &self, - input: &ExecutionRecord, - output: &mut ExecutionRecord, - ) -> RowMajorMatrix { - // collects the events based on the curve type. + fn generate_dependencies(&self, input: &Self::Record, output: &mut Self::Record) { let events = match E::CURVE_TYPE { - CurveType::Secp256k1 => &input.secp256k1_double_events, - CurveType::Bn254 => &input.bn254_double_events, - CurveType::Bls12381 => &input.bls12381_double_events, + CurveType::Secp256k1 => &input.get_precompile_events(SyscallCode::SECP256K1_DOUBLE), + CurveType::Bn254 => &input.get_precompile_events(SyscallCode::BN254_DOUBLE), + CurveType::Bls12381 => &input.get_precompile_events(SyscallCode::BLS12381_DOUBLE), _ => panic!("Unsupported curve"), }; + let num_cols = num_weierstrass_double_cols::(); let chunk_size = std::cmp::max(events.len() / num_cpus::get(), 1); - // Generate the trace rows & corresponding records for each chunk of events in parallel. - let rows_and_records = events + let blu_events: Vec> = events .par_chunks(chunk_size) - .map(|events| { - let mut record = ExecutionRecord::default(); - let mut new_byte_lookup_events = Vec::new(); - - let rows = events - .iter() - .map(|event| { - let mut row = - vec![F::zero(); num_weierstrass_double_cols::()]; + .map(|ops: &[(SyscallEvent, PrecompileEvent)]| { + // The blu map stores shard -> map(byte lookup event -> multiplicity). + let mut blu = Vec::new(); + ops.iter().for_each(|(_, op)| match op { + PrecompileEvent::Secp256k1Double(event) + | PrecompileEvent::Bn254Double(event) + | PrecompileEvent::Bls12381Double(event) => { + let mut row = zeroed_f_vec(num_cols); let cols: &mut WeierstrassDoubleAssignCols = row.as_mut_slice().borrow_mut(); - - // Decode affine points. - let p = &event.p; - let p = AffinePoint::::from_words_le(p); - let (p_x, p_y) = (p.x, p.y); - - // Populate basic columns. - cols.is_real = F::one(); - cols.shard = F::from_canonical_u32(event.shard); - cols.channel = F::from_canonical_u8(event.channel); - cols.clk = F::from_canonical_u32(event.clk); - cols.p_ptr = F::from_canonical_u32(event.p_ptr); - - Self::populate_field_ops( - &mut new_byte_lookup_events, - event.shard, - event.channel, - cols, - p_x, - p_y, - ); - - // Populate the memory access columns. - for i in 0..cols.p_access.len() { - cols.p_access[i].populate( - event.channel, - event.p_memory_records[i], - &mut new_byte_lookup_events, - ); - } - row - }) - .collect::>(); - record.add_byte_lookup_events(new_byte_lookup_events); - (rows, record) + Self::populate_row(event, cols, &mut blu); + } + _ => unreachable!(), + }); + blu }) - .collect::>(); + .collect(); - // Generate the trace rows for each event. - let mut rows = Vec::new(); - for mut row_and_record in rows_and_records { - rows.extend(row_and_record.0); - output.append(&mut row_and_record.1); + for blu in blu_events { + output.add_byte_lookup_events(blu); } + } + + fn generate_trace( + &self, + input: &ExecutionRecord, + _: &mut ExecutionRecord, + ) -> RowMajorMatrix { + // collects the events based on the curve type. + let events = match E::CURVE_TYPE { + CurveType::Secp256k1 => input.get_precompile_events(SyscallCode::SECP256K1_DOUBLE), + CurveType::Bn254 => input.get_precompile_events(SyscallCode::BN254_DOUBLE), + CurveType::Bls12381 => input.get_precompile_events(SyscallCode::BLS12381_DOUBLE), + _ => panic!("Unsupported curve"), + }; - pad_rows(&mut rows, || { - let mut row = vec![F::zero(); num_weierstrass_double_cols::()]; - let cols: &mut WeierstrassDoubleAssignCols = - row.as_mut_slice().borrow_mut(); - let zero = BigUint::zero(); - Self::populate_field_ops(&mut vec![], 0, 0, cols, zero.clone(), zero.clone()); - row + let num_cols = num_weierstrass_double_cols::(); + let num_rows = input + .fixed_log2_rows::(self) + .map(|x| 1 << x) + .unwrap_or(std::cmp::max(events.len().next_power_of_two(), 4)); + let mut values = zeroed_f_vec(num_rows * num_cols); + let chunk_size = 64; + + let mut dummy_row = zeroed_f_vec(num_cols); + let cols: &mut WeierstrassDoubleAssignCols = + dummy_row.as_mut_slice().borrow_mut(); + let zero = BigUint::zero(); + Self::populate_field_ops(&mut vec![], 0, cols, zero.clone(), zero); + + values.chunks_mut(chunk_size * num_cols).enumerate().par_bridge().for_each(|(i, rows)| { + rows.chunks_mut(num_cols).enumerate().for_each(|(j, row)| { + let idx = i * chunk_size + j; + if idx < events.len() { + let mut new_byte_lookup_events = Vec::new(); + let cols: &mut WeierstrassDoubleAssignCols = row.borrow_mut(); + match &events[idx].1 { + PrecompileEvent::Secp256k1Double(event) + | PrecompileEvent::Bn254Double(event) + | PrecompileEvent::Bls12381Double(event) => { + Self::populate_row(event, cols, &mut new_byte_lookup_events); + } + _ => unreachable!(), + } + } else { + row.copy_from_slice(&dummy_row); + } + }); }); // Convert the trace to a row major matrix. - let mut trace = RowMajorMatrix::new( - rows.into_iter().flatten().collect::>(), - num_weierstrass_double_cols::(), - ); + let mut trace = RowMajorMatrix::new(values, num_weierstrass_double_cols::()); // Write the nonces to the trace. for i in 0..trace.height() { @@ -311,11 +275,47 @@ impl MachineAir } fn included(&self, shard: &Self::Record) -> bool { - match E::CURVE_TYPE { - CurveType::Secp256k1 => !shard.secp256k1_double_events.is_empty(), - CurveType::Bn254 => !shard.bn254_double_events.is_empty(), - CurveType::Bls12381 => !shard.bls12381_double_events.is_empty(), - _ => panic!("Unsupported curve"), + if let Some(shape) = shard.shape.as_ref() { + shape.included::(self) + } else { + match E::CURVE_TYPE { + CurveType::Secp256k1 => { + !shard.get_precompile_events(SyscallCode::SECP256K1_DOUBLE).is_empty() + } + CurveType::Bn254 => { + !shard.get_precompile_events(SyscallCode::BN254_DOUBLE).is_empty() + } + CurveType::Bls12381 => { + !shard.get_precompile_events(SyscallCode::BLS12381_DOUBLE).is_empty() + } + _ => panic!("Unsupported curve"), + } + } + } +} + +impl WeierstrassDoubleAssignChip { + pub fn populate_row( + event: &EllipticCurveDoubleEvent, + cols: &mut WeierstrassDoubleAssignCols, + new_byte_lookup_events: &mut Vec, + ) { + // Decode affine points. + let p = &event.p; + let p = AffinePoint::::from_words_le(p); + let (p_x, p_y) = (p.x, p.y); + + // Populate basic columns. + cols.is_real = F::one(); + cols.shard = F::from_canonical_u32(event.shard); + cols.clk = F::from_canonical_u32(event.clk); + cols.p_ptr = F::from_canonical_u32(event.p_ptr); + + Self::populate_field_ops(new_byte_lookup_events, event.shard, cols, p_x, p_y); + + // Populate the memory access columns. + for i in 0..cols.p_access.len() { + cols.p_access[i].populate(event.p_memory_records[i], new_byte_lookup_events); } } } @@ -353,23 +353,13 @@ where let slope = { // slope_numerator = a + (p.x * p.x) * 3. { - local.p_x_squared.eval( - builder, - &p_x, - &p_x, - FieldOperation::Mul, - local.shard, - local.channel, - local.is_real, - ); + local.p_x_squared.eval(builder, &p_x, &p_x, FieldOperation::Mul, local.is_real); local.p_x_squared_times_3.eval( builder, &local.p_x_squared.result, &E::BaseField::to_limbs_field::(&BigUint::from(3u32)), FieldOperation::Mul, - local.shard, - local.channel, local.is_real, ); @@ -378,8 +368,6 @@ where &a, &local.p_x_squared_times_3.result, FieldOperation::Add, - local.shard, - local.channel, local.is_real, ); }; @@ -390,8 +378,6 @@ where &E::BaseField::to_limbs_field::(&BigUint::from(2u32)), &p_y, FieldOperation::Mul, - local.shard, - local.channel, local.is_real, ); @@ -400,8 +386,6 @@ where &local.slope_numerator.result, &local.slope_denominator.result, FieldOperation::Div, - local.shard, - local.channel, local.is_real, ); @@ -410,31 +394,13 @@ where // x = slope * slope - (p.x + p.x). let x = { - local.slope_squared.eval( - builder, - slope, - slope, - FieldOperation::Mul, - local.shard, - local.channel, - local.is_real, - ); - local.p_x_plus_p_x.eval( - builder, - &p_x, - &p_x, - FieldOperation::Add, - local.shard, - local.channel, - local.is_real, - ); + local.slope_squared.eval(builder, slope, slope, FieldOperation::Mul, local.is_real); + local.p_x_plus_p_x.eval(builder, &p_x, &p_x, FieldOperation::Add, local.is_real); local.x3_ins.eval( builder, &local.slope_squared.result, &local.p_x_plus_p_x.result, FieldOperation::Sub, - local.shard, - local.channel, local.is_real, ); &local.x3_ins.result @@ -442,22 +408,12 @@ where // y = slope * (p.x - x) - p.y. { - local.p_x_minus_x.eval( - builder, - &p_x, - x, - FieldOperation::Sub, - local.shard, - local.channel, - local.is_real, - ); + local.p_x_minus_x.eval(builder, &p_x, x, FieldOperation::Sub, local.is_real); local.slope_times_p_x_minus_x.eval( builder, slope, &local.p_x_minus_x.result, FieldOperation::Mul, - local.shard, - local.channel, local.is_real, ); local.y3_ins.eval( @@ -465,8 +421,6 @@ where &local.slope_times_p_x_minus_x.result, &p_y, FieldOperation::Sub, - local.shard, - local.channel, local.is_real, ); } @@ -485,7 +439,6 @@ where builder.eval_memory_access_slice( local.shard, - local.channel, local.clk.into(), local.p_ptr, &local.p_access, @@ -506,13 +459,13 @@ where builder.receive_syscall( local.shard, - local.channel, local.clk, local.nonce, syscall_id_felt, local.p_ptr, AB::Expr::zero(), local.is_real, + InteractionScope::Local, ); } } diff --git a/crates/core/machine/src/utils/mod.rs b/crates/core/machine/src/utils/mod.rs index cb54e568d5..124cd402f9 100644 --- a/crates/core/machine/src/utils/mod.rs +++ b/crates/core/machine/src/utils/mod.rs @@ -7,6 +7,7 @@ mod span; mod tracer; pub use logger::*; +use p3_field::Field; pub use prove::*; use sp1_curves::params::Limbs; pub use span::*; @@ -54,19 +55,12 @@ pub fn limbs_from_access>(cols: &[M]) Limbs(sized) } -pub fn pad_rows(rows: &mut Vec, row_fn: impl Fn() -> T) { - let nb_rows = rows.len(); - let mut padded_nb_rows = nb_rows.next_power_of_two(); - if padded_nb_rows < 16 { - padded_nb_rows = 16; - } - if padded_nb_rows == nb_rows { - return; - } - let dummy_row = row_fn(); - rows.resize(padded_nb_rows, dummy_row); -} - +/// Pad to a power of two, with an option to specify the power. +// +// The `rows` argument represents the rows of a matrix stored in row-major order. The function will +// pad the rows using `row_fn` to create the padded rows. The padding will be to the next power of +// of two of `size_log_2` is `None`, or to the specified `size_log_2` if it is not `None`. The +// function will panic of the number of rows is larger than the specified `size_log2` pub fn pad_rows_fixed( rows: &mut Vec, row_fn: impl Fn() -> R, @@ -84,7 +78,7 @@ pub fn next_power_of_two(n: usize, fixed_power: Option) -> usize { Some(power) => { let padded_nb_rows = 1 << power; if n * 2 < padded_nb_rows { - tracing::warn!( + tracing::debug!( "fixed log2 rows can be potentially reduced: got {}, expected {}", n, padded_nb_rows @@ -205,3 +199,14 @@ pub fn sp1_debug_mode() -> bool { let value = std::env::var("SP1_DEBUG").unwrap_or_else(|_| "false".to_string()); value == "1" || value.to_lowercase() == "true" } + +/// Returns a vector of zeros of the given length. This is faster than vec![F::zero(); len] which +/// requires copying. +/// +/// This function is safe to use only for fields that can be transmuted from 0u32. +pub fn zeroed_f_vec(len: usize) -> Vec { + debug_assert!(std::mem::size_of::() == 4); + + let vec = vec![0u32; len]; + unsafe { std::mem::transmute::, Vec>(vec) } +} diff --git a/crates/core/machine/src/utils/prove.rs b/crates/core/machine/src/utils/prove.rs index 24263b7fdb..04d06dc3f9 100644 --- a/crates/core/machine/src/utils/prove.rs +++ b/crates/core/machine/src/utils/prove.rs @@ -8,24 +8,28 @@ use std::{ }; use web_time::Instant; -use crate::riscv::RiscvAir; -use p3_challenger::CanObserve; +use crate::riscv::{CoreShapeConfig, RiscvAir}; +use p3_challenger::FieldChallenger; use p3_maybe_rayon::prelude::*; use serde::{de::DeserializeOwned, Serialize}; use size::Size; -use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, MachineVerificationError}; +use sp1_stark::{ + air::InteractionScope, baby_bear_poseidon2::BabyBearPoseidon2, MachineProvingKey, + MachineVerificationError, +}; use std::thread::ScopedJoinHandle; use thiserror::Error; use p3_baby_bear::BabyBear; use p3_field::PrimeField32; +use p3_matrix::Matrix; use crate::{ io::SP1Stdin, riscv::cost::CostEstimator, utils::{chunk_vec, concurrency::TurnBasedSync}, }; -use sp1_core_executor::events::sorted_table_lines; +use sp1_core_executor::{events::sorted_table_lines, ExecutionState}; use sp1_primitives::io::SP1PublicValues; use sp1_core_executor::{ @@ -97,6 +101,7 @@ pub fn prove>>( stdin: &SP1Stdin, config: SC, opts: SP1CoreOpts, + shape_config: Option<&CoreShapeConfig>, ) -> Result<(MachineProof, Vec, u64), SP1CoreProverError> where SC::Challenger: 'static + Clone + Send, @@ -108,16 +113,25 @@ where let machine = RiscvAir::machine(config); let prover = P::new(machine); let (pk, _) = prover.setup(&program); - prove_with_context::(&prover, &pk, program, stdin, opts, Default::default()) + prove_with_context::( + &prover, + &pk, + program, + stdin, + opts, + Default::default(), + shape_config, + ) } pub fn prove_with_context>>( prover: &P, - pk: &StarkProvingKey, + pk: &P::DeviceProvingKey, program: Program, stdin: &SP1Stdin, opts: SP1CoreOpts, context: SP1Context, + shape_config: Option<&CoreShapeConfig>, ) -> Result<(MachineProof, Vec, u64), SP1CoreProverError> where SC::Val: PrimeField32, @@ -128,9 +142,15 @@ where { // Setup the runtime. let mut runtime = Executor::with_context(program.clone(), opts, context); + let maximal_shapes = match shape_config.as_ref() { + Some(shape_config) => shape_config.maximal_core_shapes(), + None => vec![], + }; + runtime.maximal_shapes = Some(maximal_shapes.into_iter().map(|s| s.inner).collect()); runtime.write_vecs(&stdin.buffer); for proof in stdin.proofs.iter() { - runtime.write_proof(proof.0.clone(), proof.1.clone()); + let (proof, vk) = proof.clone(); + runtime.write_proof(proof, vk); } #[cfg(feature = "debug")] @@ -208,9 +228,6 @@ where let span = tracing::Span::current().clone(); - #[cfg(feature = "debug")] - let all_records_tx = all_records_tx.clone(); - let handle = s.spawn(move || { let _span = span.enter(); tracing::debug_span!("phase 1 trace generation").in_scope(|| { @@ -221,15 +238,19 @@ where if let Ok((index, mut checkpoint, done)) = received { // Trace the checkpoint and reconstruct the execution records. let (mut records, _) = tracing::debug_span!("trace checkpoint") - .in_scope(|| trace_checkpoint(program.clone(), &checkpoint, opts)); + .in_scope(|| { + trace_checkpoint::( + program.clone(), + &checkpoint, + opts, + shape_config, + ) + }); + log::info!("generated {} records", records.len()); reset_seek(&mut checkpoint); - // Generate the dependencies. - tracing::debug_span!("generate dependencies").in_scope(|| { - prover.machine().generate_dependencies(&mut records, &opts) - }); - // Wait for our turn to update the state. + log::info!("waiting for turn {}", index); record_gen_sync.wait_for_turn(index); // Update the public values & prover state for the shards which contain @@ -255,6 +276,7 @@ where // See if any deferred shards are ready to be commited to. let mut deferred = deferred.split(done, opts.split_opts); + log::info!("deferred {} records", deferred.len()); // Update the public values & prover state for the shards which do not // contain "cpu events" before committing to them. @@ -277,20 +299,31 @@ where records.append(&mut deferred); // Collect the checkpoints to be used again in the phase 2 prover. + log::info!("collecting checkpoints"); let mut checkpoints = checkpoints.lock().unwrap(); checkpoints.push_back((index, checkpoint, done)); // Let another worker update the state. record_gen_sync.advance_turn(); - #[cfg(feature = "debug")] - all_records_tx.send(records.clone()).unwrap(); + // Fix the shape of the records. + if let Some(shape_config) = shape_config { + for record in records.iter_mut() { + tracing::info!("fixing shape"); + shape_config.fix_shape(record).unwrap(); + } + } // Generate the traces. - let traces = records - .par_iter() - .map(|record| prover.generate_traces(record)) - .collect::>(); + let mut traces = vec![]; + tracing::debug_span!("generate traces", index).in_scope(|| { + traces = records + .par_iter() + .map(|record| { + prover.generate_traces(record, InteractionScope::Global) + }) + .collect::>(); + }); // Wait for our turn. trace_gen_sync.wait_for_turn(index); @@ -318,13 +351,10 @@ where p1_record_and_trace_gen_handles.push(handle); } drop(p1_records_and_traces_tx); - #[cfg(feature = "debug")] - drop(all_records_tx); // Create the challenger and observe the verifying key. let mut challenger = prover.config().challenger(); - challenger.observe(pk.commit.clone()); - challenger.observe(pk.pc_start); + pk.observe_into(&mut challenger); // Spawn the phase 1 prover thread. let phase_1_prover_span = tracing::Span::current().clone(); @@ -350,18 +380,32 @@ where .zip(traces.into_par_iter()) .map(|(record, traces)| { let _span = span.enter(); - let data = prover.commit(record, traces); - let main_commit = data.main_commit.clone(); + + for (name, trace) in traces.clone() { + let trace_width = trace.width(); + let trace_height = trace.height(); + tracing::debug!( + "Phase 1 area: {:<15} | Main Cols = {:<5} | Rows = {:<5} | Cells = {:<10}", + name, + trace_width, + trace_height, + trace_width * trace_height, + ); + + } + + let data = prover.commit(&record, traces); + let phase1_main_commit = data.main_commit.clone(); drop(data); - main_commit + phase1_main_commit }) .collect::>(); - // Observe the commitments. + // the commitments. for (commit, public_values) in commitments.into_iter().zip(public_values.into_iter()) { - prover.observe(&mut challenger, commit, &public_values); + prover.observe(&mut challenger, commit.clone(), &public_values); } }); } @@ -377,15 +421,26 @@ where p1_record_and_trace_gen_handles.into_iter().for_each(|handle| handle.join().unwrap()); // Wait until the phase 1 prover has completely finished. - let challenger = phase_1_prover_handle.join().unwrap(); + let mut challenger = phase_1_prover_handle.join().unwrap(); + + // Sample for the global permutation challenges. + // Obtain the challenges used for the global permutation argument. + let mut global_permutation_challenges: Vec = Vec::new(); + for _ in 0..2 { + global_permutation_challenges.push(challenger.sample_ext_element()); + } // Spawn the phase 2 record generator thread. let p2_record_gen_sync = Arc::new(TurnBasedSync::new()); let p2_trace_gen_sync = Arc::new(TurnBasedSync::new()); let (p2_records_and_traces_tx, p2_records_and_traces_rx) = - sync_channel::<(Vec, Vec>)>>)>( - opts.records_and_traces_channel_capacity, - ); + sync_channel::<( + Vec, + ( + Vec>)>>, + Vec>)>>, + ), + )>(opts.records_and_traces_channel_capacity); let p2_records_and_traces_tx = Arc::new(Mutex::new(p2_records_and_traces_tx)); let report_aggregate = Arc::new(Mutex::new(ExecutionReport::default())); @@ -404,6 +459,10 @@ where let program = program.clone(); let span = tracing::Span::current().clone(); + + #[cfg(feature = "debug")] + let all_records_tx = all_records_tx.clone(); + let handle = s.spawn(move || { let _span = span.enter(); tracing::debug_span!("phase 2 trace generation").in_scope(|| { @@ -413,15 +472,18 @@ where if let Some((index, mut checkpoint, done)) = received { // Trace the checkpoint and reconstruct the execution records. let (mut records, report) = tracing::debug_span!("trace checkpoint") - .in_scope(|| trace_checkpoint(program.clone(), &checkpoint, opts)); + .in_scope(|| { + trace_checkpoint::( + program.clone(), + &checkpoint, + opts, + shape_config, + ) + }); + log::info!("generated {} records", records.len()); *report_aggregate.lock().unwrap() += report; reset_seek(&mut checkpoint); - // Generate the dependencies. - tracing::debug_span!("generate dependencies").in_scope(|| { - prover.machine().generate_dependencies(&mut records, &opts) - }); - // Wait for our turn to update the state. record_gen_sync.wait_for_turn(index); @@ -448,6 +510,7 @@ where // See if any deferred shards are ready to be commited to. let mut deferred = deferred.split(done, opts.split_opts); + log::info!("deferred {} records", deferred.len()); // Update the public values & prover state for the shards which do not // contain "cpu events" before committing to them. @@ -469,29 +532,64 @@ where } records.append(&mut deferred); + // Generate the dependencies. + tracing::debug_span!("generate dependencies", index).in_scope(|| { + prover.machine().generate_dependencies(&mut records, &opts, None); + }); + // Let another worker update the state. record_gen_sync.advance_turn(); + // Fix the shape of the records. + if let Some(shape_config) = shape_config { + for record in records.iter_mut() { + shape_config.fix_shape(record).unwrap(); + } + } + + #[cfg(feature = "debug")] + all_records_tx.send(records.clone()).unwrap(); + // Generate the traces. - let traces = records - .par_iter() - .map(|record| prover.generate_traces(record)) - .collect::>(); + let mut local_traces = Vec::new(); + tracing::debug_span!("generate local traces", index).in_scope(|| { + local_traces = records + .par_iter() + .map(|record| { + prover.generate_traces(record, InteractionScope::Local) + }) + .collect::>(); + }); + + let mut global_traces = Vec::new(); + tracing::debug_span!("generate global traces", index).in_scope(|| { + global_traces = records + .par_iter() + .map(|record| { + prover.generate_traces(record, InteractionScope::Global) + }) + .collect::>(); + }); trace_gen_sync.wait_for_turn(index); - // Send the records to the phase 1 prover. + // Send the records to the phase 2 prover. let chunked_records = chunk_vec(records, opts.shard_batch_size); - let chunked_traces = chunk_vec(traces, opts.shard_batch_size); - chunked_records.into_iter().zip(chunked_traces).for_each( - |(records, traces)| { + let chunked_global_traces = + chunk_vec(global_traces, opts.shard_batch_size); + let chunked_local_traces = + chunk_vec(local_traces, opts.shard_batch_size); + chunked_records + .into_iter() + .zip(chunked_global_traces.into_iter()) + .zip(chunked_local_traces.into_iter()) + .for_each(|((records, global_traces), local_traces)| { records_and_traces_tx .lock() .unwrap() - .send((records, traces)) + .send((records, (global_traces, local_traces))) .unwrap(); - }, - ); + }); trace_gen_sync.advance_turn(); } else { @@ -503,6 +601,8 @@ where p2_record_and_trace_gen_handles.push(handle); } drop(p2_records_and_traces_tx); + #[cfg(feature = "debug")] + drop(all_records_tx); // Spawn the phase 2 prover thread. let p2_prover_span = tracing::Span::current().clone(); @@ -515,10 +615,32 @@ where let span = tracing::Span::current().clone(); shard_proofs.par_extend( records.into_par_iter().zip(traces.into_par_iter()).map( - |(record, traces)| { + |(record, (global_traces, local_traces))| { let _span = span.enter(); - let data = prover.commit(record, traces); - prover.open(pk, data, &mut challenger.clone()).unwrap() + + let global_data = prover.commit(&record, global_traces); + let local_data = prover.commit(&record, local_traces); + + let proof = prover + .open( + pk, + Some(global_data), + local_data, + &mut challenger.clone(), + &global_permutation_challenges, + ) + .unwrap(); + + #[cfg(debug_assertions)] + { + if let Some(shape) = record.shape { + assert_eq!( + proof.shape(), + shape.clone().into_iter().collect(), + ); + } + } + proof }, ), ); @@ -572,7 +694,7 @@ where { let all_records = all_records_rx.iter().flatten().collect::>(); let mut challenger = prover.machine().config().challenger(); - prover.machine().debug_constraints(pk, all_records, &mut challenger); + prover.machine().debug_constraints(&pk.to_host(), all_records, &mut challenger); } Ok((proof, public_values_stream, cycles)) @@ -581,39 +703,50 @@ where /// Runs a program and returns the public values stream. pub fn run_test_io>>( - program: Program, + mut program: Program, inputs: SP1Stdin, ) -> Result> { + let shape_config = CoreShapeConfig::::default(); + shape_config.fix_preprocessed_shape(&mut program).unwrap(); let runtime = tracing::debug_span!("runtime.run(...)").in_scope(|| { let mut runtime = Executor::new(program, SP1CoreOpts::default()); + runtime.maximal_shapes = + Some(shape_config.maximal_core_shapes().into_iter().map(|s| s.inner).collect()); runtime.write_vecs(&inputs.buffer); runtime.run().unwrap(); runtime }); let public_values = SP1PublicValues::from(&runtime.state.public_values_stream); - let _ = run_test_core::

(runtime, inputs)?; + + let _ = run_test_core::

(runtime, inputs, Some(&shape_config))?; Ok(public_values) } pub fn run_test>>( - program: Program, + mut program: Program, ) -> Result, MachineVerificationError> { + let shape_config = CoreShapeConfig::default(); + shape_config.fix_preprocessed_shape(&mut program).unwrap(); let runtime = tracing::debug_span!("runtime.run(...)").in_scope(|| { let mut runtime = Executor::new(program, SP1CoreOpts::default()); + runtime.maximal_shapes = + Some(shape_config.maximal_core_shapes().into_iter().map(|s| s.inner).collect()); runtime.run().unwrap(); runtime }); - run_test_core::

(runtime, SP1Stdin::new()) + run_test_core::

(runtime, SP1Stdin::new(), Some(&shape_config)) } #[allow(unused_variables)] pub fn run_test_core>>( runtime: Executor, inputs: SP1Stdin, + shape_config: Option<&CoreShapeConfig>, ) -> Result, MachineVerificationError> { let config = BabyBearPoseidon2::new(); let machine = RiscvAir::machine(config); let prover = P::new(machine); + let (pk, _) = prover.setup(runtime.program.as_ref()); let (proof, output, _) = prove_with_context( &prover, @@ -622,6 +755,7 @@ pub fn run_test_core>>( &inputs, SP1CoreOpts::default(), SP1Context::default(), + shape_config, ) .unwrap(); @@ -636,9 +770,9 @@ pub fn run_test_core>>( #[allow(unused_variables)] pub fn run_test_machine_with_prover>( + prover: &P, records: Vec, - machine: StarkMachine, - pk: StarkProvingKey, + pk: P::DeviceProvingKey, vk: StarkVerifyingKey, ) -> Result, MachineVerificationError> where @@ -654,9 +788,12 @@ where PcsProverData: Send + Sync + Serialize + DeserializeOwned, OpeningProof: Send + Sync, { - let prover = P::new(machine); let mut challenger = prover.config().challenger(); let prove_span = tracing::debug_span!("prove").entered(); + + #[cfg(feature = "debug")] + prover.machine().debug_constraints(&pk.to_host(), records.clone(), &mut challenger.clone()); + let proof = prover.prove(&pk, records, &mut challenger, SP1CoreOpts::default()).unwrap(); prove_span.exit(); let nb_bytes = bincode::serialize(&proof).unwrap().len(); @@ -688,22 +825,37 @@ where PcsProverData: Send + Sync + Serialize + DeserializeOwned, OpeningProof: Send + Sync, { - run_test_machine_with_prover::>(records, machine, pk, vk) + let prover = CpuProver::new(machine); + run_test_machine_with_prover::>(&prover, records, pk, vk) } -fn trace_checkpoint( +fn trace_checkpoint( program: Program, file: &File, opts: SP1CoreOpts, -) -> (Vec, ExecutionReport) { + shape_config: Option<&CoreShapeConfig>, +) -> (Vec, ExecutionReport) +where + ::Val: PrimeField32, +{ + let maximal_shapes = match shape_config { + Some(shape_config) => shape_config.maximal_core_shapes(), + None => vec![], + }; let mut reader = std::io::BufReader::new(file); - let state = bincode::deserialize_from(&mut reader).expect("failed to deserialize state"); - let mut runtime = Executor::recover(program.clone(), state, opts); + let state: ExecutionState = + bincode::deserialize_from(&mut reader).expect("failed to deserialize state"); + let mut runtime = Executor::recover(program.clone(), state.clone(), opts); + runtime.maximal_shapes = Some(maximal_shapes.into_iter().map(|s| s.inner).collect()); + // We already passed the deferred proof verifier when creating checkpoints, so the proofs were // already verified. So here we use a noop verifier to not print any warnings. runtime.subproof_verifier = Arc::new(NoOpSubproofVerifier); - let (events, _) = runtime.execute_record().unwrap(); - (events, runtime.report) + + // Execute from the checkpoint. + let (records, _) = runtime.execute_record().unwrap(); + + (records, runtime.report) } fn reset_seek(file: &mut File) { diff --git a/crates/cuda/Cargo.toml b/crates/cuda/Cargo.toml index 4409e479f2..851d6ee8c4 100644 --- a/crates/cuda/Cargo.toml +++ b/crates/cuda/Cargo.toml @@ -12,15 +12,11 @@ categories = { workspace = true } [dependencies] sp1-core-machine = { workspace = true } sp1-prover = { workspace = true } -sp1-stark = { workspace = true } prost = "0.13" -prost-types = "0.13" bincode = "1.3.3" serde = { version = "1.0.197", features = ["derive"] } -serde_json = "1.0.114" tokio = { version = "^1.38.0", features = ["full"] } tracing = "0.1.40" -tracing-subscriber = "0.3.18" twirp = { package = "twirp-rs", version = "0.13.0-succinct" } ctrlc = "3.4.4" diff --git a/crates/cuda/src/lib.rs b/crates/cuda/src/lib.rs index fa86e7364c..f299d59d52 100644 --- a/crates/cuda/src/lib.rs +++ b/crates/cuda/src/lib.rs @@ -11,17 +11,21 @@ use std::{ }; use crate::proto::api::ProverServiceClient; - +use async_trait::async_trait; use proto::api::ReadyRequest; +use reqwest::{Request, Response}; use serde::{Deserialize, Serialize}; -use sp1_core_machine::{io::SP1Stdin, utils::SP1CoreProverError}; +use sp1_core_machine::{io::SP1Stdin, reduce::SP1ReduceProof, utils::SP1CoreProverError}; use sp1_prover::{ - types::SP1ProvingKey, InnerSC, OuterSC, SP1CoreProof, SP1RecursionProverError, SP1ReduceProof, - SP1VerifyingKey, + types::SP1ProvingKey, InnerSC, OuterSC, SP1CoreProof, SP1RecursionProverError, SP1VerifyingKey, }; -use sp1_stark::ShardProof; use tokio::task::block_in_place; -use twirp::{url::Url, Client}; +use twirp::{ + async_trait, + reqwest::{self}, + url::Url, + Client, ClientError, Middleware, Next, +}; #[rustfmt::skip] pub mod proto { @@ -63,7 +67,7 @@ pub struct CompressRequestPayload { /// The core proof. pub proof: SP1CoreProof, /// The deferred proofs. - pub deferred_proofs: Vec>, + pub deferred_proofs: Vec>, } /// The payload for the [sp1_prover::SP1Prover::shrink] method. @@ -87,7 +91,7 @@ impl SP1CudaProver { /// [SP1ProverClient] that can be used to communicate with the container. pub fn new() -> Result> { let container_name = "sp1-gpu"; - let image_name = "succinctlabs/sp1-gpu:v1.2.0-rc2"; + let image_name = "public.ecr.aws/succinct-labs/sp1-gpu:445c33b"; let cleaned_up = Arc::new(AtomicBool::new(false)); let cleanup_name = container_name; @@ -176,7 +180,7 @@ impl SP1CudaProver { ) .expect("failed to create client"); - let timeout = Duration::from_secs(60); // Set a 60-second timeout + let timeout = Duration::from_secs(300); let start_time = Instant::now(); block_on(async { @@ -204,11 +208,15 @@ impl SP1CudaProver { Ok(()) })?; + let client = Client::new( + Url::parse("http://localhost:3000/twirp/").expect("failed to parse url"), + reqwest::Client::new(), + vec![Box::new(LoggingMiddleware) as Box], + ) + .expect("failed to create client"); + Ok(SP1CudaProver { - client: Client::from_base_url( - Url::parse("http://localhost:3000/twirp/").expect("failed to parse url"), - ) - .expect("failed to create client"), + client, container_name: container_name.to_string(), cleaned_up: cleaned_up.clone(), }) @@ -248,7 +256,7 @@ impl SP1CudaProver { &self, vk: &SP1VerifyingKey, proof: SP1CoreProof, - deferred_proofs: Vec>, + deferred_proofs: Vec>, ) -> Result, SP1RecursionProverError> { let payload = CompressRequestPayload { vk: vk.clone(), proof, deferred_proofs }; let request = @@ -315,7 +323,10 @@ impl Drop for SP1CudaProver { /// Cleans up the a docker container with the given name. fn cleanup_container(container_name: &str) { if let Err(e) = Command::new("docker").args(["rm", "-f", container_name]).output() { - eprintln!("Failed to remove container: {}. You may need to manually remove it using 'docker rm -f {}'", e, container_name); + eprintln!( + "Failed to remove container: {}. You may need to manually remove it using 'docker rm -f {}'", + e, container_name + ); } } @@ -334,13 +345,32 @@ pub fn block_on(fut: impl Future) -> T { } } +struct LoggingMiddleware; + +pub type Result = std::result::Result; + +#[async_trait] +impl Middleware for LoggingMiddleware { + async fn handle(&self, req: Request, next: Next<'_>) -> Result { + let response = next.run(req).await; + match response { + Ok(response) => { + tracing::info!("{:?}", response); + Ok(response) + } + Err(e) => Err(e), + } + } +} + #[cfg(feature = "protobuf")] #[cfg(test)] mod tests { - use sp1_core_machine::utils::{setup_logger, tests::FIBONACCI_ELF}; - use sp1_prover::{ - components::DefaultProverComponents, InnerSC, SP1CoreProof, SP1Prover, SP1ReduceProof, + use sp1_core_machine::{ + reduce::SP1ReduceProof, + utils::{setup_logger, tests::FIBONACCI_ELF}, }; + use sp1_prover::{components::DefaultProverComponents, InnerSC, SP1CoreProof, SP1Prover}; use twirp::{url::Url, Client}; use crate::{ diff --git a/crates/curves/Cargo.toml b/crates/curves/Cargo.toml index 85e468f7b7..8d7f16afcc 100644 --- a/crates/curves/Cargo.toml +++ b/crates/curves/Cargo.toml @@ -27,6 +27,12 @@ sp1-stark = { workspace = true } sp1-primitives = { workspace = true } p3-field = { workspace = true } itertools = "0.13.0" +rug = { version = "1.26.1", optional = true } +cfg-if = "1.0.0" [dev-dependencies] rand = "0.8.5" +num = { version = "0.4.3", features = ["rand"] } + +[features] +bigint-rug = ["rug"] diff --git a/crates/curves/src/utils.rs b/crates/curves/src/utils.rs index 23a19c11f9..65cc20bb8b 100644 --- a/crates/curves/src/utils.rs +++ b/crates/curves/src/utils.rs @@ -26,3 +26,20 @@ pub fn biguint_to_limbs(integer: &BigUint) -> [u8; N] { pub fn biguint_from_limbs(limbs: &[u8]) -> BigUint { BigUint::from_bytes_le(limbs) } + +cfg_if::cfg_if! { + if #[cfg(feature = "bigint-rug")] { + pub fn biguint_to_rug(integer: &BigUint) -> rug::Integer { + let mut int = rug::Integer::new(); + unsafe { + int.assign_bytes_radix_unchecked(integer.to_bytes_be().as_slice(), 256, false); + } + int + } + + pub fn rug_to_biguint(integer: &rug::Integer) -> BigUint { + let be_bytes = integer.to_digits::(rug::integer::Order::MsfBe); + BigUint::from_bytes_be(&be_bytes) + } + } +} diff --git a/crates/curves/src/weierstrass/mod.rs b/crates/curves/src/weierstrass/mod.rs index 266a93c1ca..871fdc9b67 100644 --- a/crates/curves/src/weierstrass/mod.rs +++ b/crates/curves/src/weierstrass/mod.rs @@ -9,6 +9,9 @@ use crate::{ AffinePoint, EllipticCurve, EllipticCurveParameters, }; +#[cfg(feature = "bigint-rug")] +use crate::utils::{biguint_to_rug, rug_to_biguint}; + pub mod bls12_381; pub mod bn254; pub mod secp256k1; @@ -173,44 +176,105 @@ impl AffinePoint> { panic!("Error: Points are the same. Use sw_double instead."); } - let p = biguint_to_dashu(&E::BaseField::modulus()); - let self_x = biguint_to_dashu(&self.x); - let self_y = biguint_to_dashu(&self.y); - let other_x = biguint_to_dashu(&other.x); - let other_y = biguint_to_dashu(&other.y); + cfg_if::cfg_if! { + if #[cfg(feature = "bigint-rug")] { + self.sw_add_rug(other) + } else { + let p = biguint_to_dashu(&E::BaseField::modulus()); + let self_x = biguint_to_dashu(&self.x); + let self_y = biguint_to_dashu(&self.y); + let other_x = biguint_to_dashu(&other.x); + let other_y = biguint_to_dashu(&other.y); + + let slope_numerator = (&p + &other_y - &self_y) % &p; + let slope_denominator = (&p + &other_x - &self_x) % &p; + let slope_denom_inverse = + dashu_modpow(&slope_denominator, &(&p - &dashu::integer::UBig::from(2u32)), &p); + let slope = (slope_numerator * &slope_denom_inverse) % &p; + + let x_3n = (&slope * &slope + &p + &p - &self_x - &other_x) % &p; + let y_3n = (&slope * &(&p + &self_x - &x_3n) + &p - &self_y) % &p; + + AffinePoint::new(dashu_to_biguint(&x_3n), dashu_to_biguint(&y_3n)) + } + } + } + + pub fn sw_double(&self) -> AffinePoint> { + cfg_if::cfg_if! { + if #[cfg(feature = "bigint-rug")] { + self.sw_double_rug() + } else { + let p = biguint_to_dashu(&E::BaseField::modulus()); + let a = biguint_to_dashu(&E::a_int()); + + let self_x = biguint_to_dashu(&self.x); + let self_y = biguint_to_dashu(&self.y); + + let slope_numerator = (&a + &(&self_x * &self_x) * 3u32) % &p; + + let slope_denominator = (&self_y * 2u32) % &p; + let slope_denom_inverse = + dashu_modpow(&slope_denominator, &(&p - &dashu::integer::UBig::from(2u32)), &p); + // let slope_denom_inverse = slope_denominator.modpow(&(&p - 2u32), &p); + let slope = (slope_numerator * &slope_denom_inverse) % &p; - let slope_numerator = (&p + &other_y - &self_y) % &p; - let slope_denominator = (&p + &other_x - &self_x) % &p; - let slope_denom_inverse = - dashu_modpow(&slope_denominator, &(&p - &dashu::integer::UBig::from(2u32)), &p); + let x_3n = (&slope * &slope + &p + &p - &self_x - &self_x) % &p; + + let y_3n = (&slope * &(&p + &self_x - &x_3n) + &p - &self_y) % &p; + + AffinePoint::new(dashu_to_biguint(&x_3n), dashu_to_biguint(&y_3n)) + } + } + } + + #[cfg(feature = "bigint-rug")] + pub fn sw_add_rug(&self, other: &AffinePoint>) -> AffinePoint> { + use rug::Complete; + let p = biguint_to_rug(&E::BaseField::modulus()); + let self_x = biguint_to_rug(&self.x); + let self_y = biguint_to_rug(&self.y); + let other_x = biguint_to_rug(&other.x); + let other_y = biguint_to_rug(&other.y); + + let slope_numerator = ((&p + &other_y).complete() - &self_y) % &p; + let slope_denominator = ((&p + &other_x).complete() - &self_x) % &p; + let slope_denom_inverse = slope_denominator + .pow_mod_ref(&(&p - &rug::Integer::from(2u32)).complete(), &p) + .unwrap() + .complete(); let slope = (slope_numerator * &slope_denom_inverse) % &p; - let x_3n = (&slope * &slope + &p + &p - &self_x - &other_x) % &p; - let y_3n = (&slope * &(&p + &self_x - &x_3n) + &p - &self_y) % &p; + let x_3n = ((&slope * &slope + &p).complete() + &p - &self_x - &other_x) % &p; + let y_3n = ((&slope * &((&p + &self_x).complete() - &x_3n) + &p).complete() - &self_y) % &p; - AffinePoint::new(dashu_to_biguint(&x_3n), dashu_to_biguint(&y_3n)) + AffinePoint::new(rug_to_biguint(&x_3n), rug_to_biguint(&y_3n)) } - pub fn sw_double(&self) -> AffinePoint> { - let p = biguint_to_dashu(&E::BaseField::modulus()); - let a = biguint_to_dashu(&E::a_int()); + #[cfg(feature = "bigint-rug")] + pub fn sw_double_rug(&self) -> AffinePoint> { + use rug::Complete; + let p = biguint_to_rug(&E::BaseField::modulus()); + let a = biguint_to_rug(&E::a_int()); + + let self_x = biguint_to_rug(&self.x); + let self_y = biguint_to_rug(&self.y); - let self_x = biguint_to_dashu(&self.x); - let self_y = biguint_to_dashu(&self.y); + let slope_numerator = (&a + &(&self_x * &self_x).complete() * 3u32).complete() % &p; - let slope_numerator = (&a + &(&self_x * &self_x) * 3u32) % &p; + let slope_denominator = (&self_y * 2u32).complete() % &p; + let slope_denom_inverse = slope_denominator + .pow_mod_ref(&(&p - &rug::Integer::from(2u32)).complete(), &p) + .unwrap() + .complete(); - let slope_denominator = (&self_y * 2u32) % &p; - let slope_denom_inverse = - dashu_modpow(&slope_denominator, &(&p - &dashu::integer::UBig::from(2u32)), &p); - // let slope_denom_inverse = slope_denominator.modpow(&(&p - 2u32), &p); let slope = (slope_numerator * &slope_denom_inverse) % &p; - let x_3n = (&slope * &slope + &p + &p - &self_x - &self_x) % &p; + let x_3n = ((&slope * &slope + &p).complete() + ((&p - &self_x).complete() - &self_x)) % &p; - let y_3n = (&slope * &(&p + &self_x - &x_3n) + &p - &self_y) % &p; + let y_3n = ((&slope * &((&p + &self_x).complete() - &x_3n) + &p).complete() - &self_y) % &p; - AffinePoint::new(dashu_to_biguint(&x_3n), dashu_to_biguint(&y_3n)) + AffinePoint::new(rug_to_biguint(&x_3n), rug_to_biguint(&y_3n)) } } diff --git a/crates/derive/Cargo.toml b/crates/derive/Cargo.toml index 23f2facd54..cd53029341 100644 --- a/crates/derive/Cargo.toml +++ b/crates/derive/Cargo.toml @@ -13,6 +13,5 @@ categories = { workspace = true } proc-macro = true [dependencies] -proc-macro2 = "1.0" quote = "1.0" syn = { version = "1.0", features = ["full"] } diff --git a/crates/derive/src/lib.rs b/crates/derive/src/lib.rs index 1896c312ef..924be7d913 100644 --- a/crates/derive/src/lib.rs +++ b/crates/derive/src/lib.rs @@ -184,6 +184,13 @@ pub fn machine_air_derive(input: TokenStream) -> TokenStream { } }); + let commit_scope_arms = variants.iter().map(|(variant_name, field)| { + let field_ty = &field.ty; + quote! { + #name::#variant_name(x) => <#field_ty as sp1_stark::air::MachineAir>::commit_scope(x) + } + }); + let machine_air = quote! { impl #impl_generics sp1_stark::air::MachineAir for #name #ty_generics #where_clause { type Record = #execution_record_path; @@ -236,6 +243,12 @@ pub fn machine_air_derive(input: TokenStream) -> TokenStream { #(#included_arms,)* } } + + fn commit_scope(&self) -> InteractionScope { + match self { + #(#commit_scope_arms,)* + } + } } }; diff --git a/crates/perf/Cargo.toml b/crates/perf/Cargo.toml new file mode 100644 index 0000000000..fd4e92c8b2 --- /dev/null +++ b/crates/perf/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "sp1-perf" +description = "A performance evaluation tool for SP1 programs." +readme = "../../README.md" +version = { workspace = true } +edition = { workspace = true } +license = { workspace = true } +repository = { workspace = true } +keywords = { workspace = true } +categories = { workspace = true } + +[dependencies] +sp1-prover = { workspace = true } +sp1-sdk = { workspace = true } +p3-baby-bear = { workspace = true } +sp1-stark = { workspace = true } +sp1-cuda = { workspace = true } + +anyhow = "1.0.83" +clap = { version = "4.5.9", features = ["derive"] } +csv = "1.3.0" +serde = "1.0.204" +bincode = "1.3.3" +time = "0.3.26" +slack-rust = { package = "slack-rust-rs", version = "0.0.1" } +tokio = { version = "1.39.0", features = ["full"] } +reqwest = { version = "0.12.4", features = ["json"] } +serde_json = "1.0.104" + +[features] +native-gnark = ["sp1-sdk/native-gnark"] diff --git a/crates/perf/README.md b/crates/perf/README.md new file mode 100644 index 0000000000..1ed8833542 --- /dev/null +++ b/crates/perf/README.md @@ -0,0 +1,36 @@ +# SP1 Testing Suite + +## Prerequisites + +- [GitHub CLI](https://cli.github.com/) + +## Run the testing suite + +Set the workloads you want to run in the `workflow.sh` file. The workloads are keys in the +`sp1-testing-suite` s3 bucket. + +``` +CPU_WORKLOADS=("fibonacci-17k" "ssz-withdrawals") +CUDA_WORKLOADS=() +NETWORK_WORKLOADS=() +``` + +Run the workflow. +``` +./workflow.sh +``` + +## View the results + +Visit the [actions](https://github.com/succinctlabs/sp1/actions) tab on GitHub to view the results. + +## Uploading new workloads + +Take any existing binary that uses `sp1-sdk` and run it with `SP1_DUMP=1`. This will dump the +program and stdin to the current directory. + +``` +SP1_DUMP=1 cargo run --release +aws s3 cp program.bin s3://sp1-testing-suite//program.bin +aws s3 cp stdin.bin s3://sp1-testing-suite//stdin.bin +``` diff --git a/crates/perf/run_s3.sh b/crates/perf/run_s3.sh new file mode 100755 index 0000000000..f26278ee9b --- /dev/null +++ b/crates/perf/run_s3.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +# Check if both arguments are provided +if [ $# -ne 2 ]; then + echo "Usage: $0 " + exit 1 +fi + +s3_path=$1 +stage=$2 + +# Download files from S3 +aws s3 cp s3://sp1-testing-suite/$s3_path/program.bin /tmp/program.bin +aws s3 cp s3://sp1-testing-suite/$s3_path/stdin.bin /tmp/stdin.bin + +# Set environment variables +export RUSTFLAGS="-Copt-level=3 -Ctarget-cpu=native" +export RUST_BACKTRACE=1 +export RUST_LOG=debug + +# Run moongate-perf +cargo run --release -p sp1-perf -- --program /tmp/program.bin --stdin /tmp/stdin.bin --mode $stage \ No newline at end of file diff --git a/crates/perf/src/main.rs b/crates/perf/src/main.rs new file mode 100644 index 0000000000..51f2e70f3c --- /dev/null +++ b/crates/perf/src/main.rs @@ -0,0 +1,169 @@ +use std::time::{Duration, Instant}; + +use clap::{command, Parser, ValueEnum}; +use sp1_cuda::SP1CudaProver; +use sp1_prover::components::DefaultProverComponents; +use sp1_sdk::{self, ProverClient, SP1Context, SP1Prover, SP1Stdin}; +use sp1_stark::SP1ProverOpts; + +#[derive(Parser, Clone)] +#[command(about = "Evaluate the performance of SP1 on programs.")] +struct PerfArgs { + #[arg(short, long)] + pub program: String, + #[arg(short, long)] + pub stdin: String, + #[arg(short, long)] + pub mode: ProverMode, +} + +#[derive(Debug, Clone)] +#[allow(dead_code)] +struct PerfResult { + pub cycles: u64, + pub execution_duration: Duration, + pub prove_core_duration: Duration, + pub verify_core_duration: Duration, + pub compress_duration: Duration, + pub verify_compressed_duration: Duration, + pub shrink_duration: Duration, + pub verify_shrink_duration: Duration, + pub wrap_duration: Duration, + pub verify_wrap_duration: Duration, +} + +#[derive(Debug, Clone, ValueEnum, PartialEq, Eq)] +enum ProverMode { + Cpu, + Cuda, + Network, +} + +pub fn time_operation T>(operation: F) -> (T, std::time::Duration) { + let start = Instant::now(); + let result = operation(); + let duration = start.elapsed(); + (result, duration) +} + +fn main() { + sp1_sdk::utils::setup_logger(); + let args = PerfArgs::parse(); + + let elf = std::fs::read(args.program).expect("failed to read program"); + let stdin = std::fs::read(args.stdin).expect("failed to read stdin"); + let stdin: SP1Stdin = bincode::deserialize(&stdin).expect("failed to deserialize stdin"); + + let prover = SP1Prover::::new(); + let (pk, vk) = prover.setup(&elf); + let cycles = sp1_prover::utils::get_cycles(&elf, &stdin); + let opts = SP1ProverOpts::default(); + + match args.mode { + ProverMode::Cpu => { + let context = SP1Context::default(); + let (_, execution_duration) = + time_operation(|| prover.execute(&elf, &stdin, context.clone())); + + let (core_proof, prove_core_duration) = + time_operation(|| prover.prove_core(&pk, &stdin, opts, context).unwrap()); + + let (_, verify_core_duration) = + time_operation(|| prover.verify(&core_proof.proof, &vk)); + + let (compress_proof, compress_duration) = + time_operation(|| prover.compress(&vk, core_proof, vec![], opts).unwrap()); + + let (_, verify_compressed_duration) = + time_operation(|| prover.verify_compressed(&compress_proof, &vk)); + + let (shrink_proof, shrink_duration) = + time_operation(|| prover.shrink(compress_proof, opts).unwrap()); + + let (_, verify_shrink_duration) = + time_operation(|| prover.verify_shrink(&shrink_proof, &vk)); + + let (wrapped_bn254_proof, wrap_duration) = + time_operation(|| prover.wrap_bn254(shrink_proof, opts).unwrap()); + + let (_, verify_wrap_duration) = + time_operation(|| prover.verify_wrap_bn254(&wrapped_bn254_proof, &vk)); + + let result = PerfResult { + cycles, + execution_duration, + prove_core_duration, + verify_core_duration, + compress_duration, + verify_compressed_duration, + shrink_duration, + verify_shrink_duration, + wrap_duration, + verify_wrap_duration, + }; + + println!("{:?}", result); + } + ProverMode::Cuda => { + let server = SP1CudaProver::new().expect("failed to initialize CUDA prover"); + + let context = SP1Context::default(); + let (_, execution_duration) = + time_operation(|| prover.execute(&elf, &stdin, context.clone())); + + let (core_proof, prove_core_duration) = + time_operation(|| server.prove_core(&pk, &stdin).unwrap()); + + let (_, verify_core_duration) = time_operation(|| { + prover.verify(&core_proof.proof, &vk).expect("Proof verification failed") + }); + + let (compress_proof, compress_duration) = + time_operation(|| server.compress(&vk, core_proof, vec![]).unwrap()); + + let (_, verify_compressed_duration) = + time_operation(|| prover.verify_compressed(&compress_proof, &vk)); + + let (shrink_proof, shrink_duration) = + time_operation(|| server.shrink(compress_proof).unwrap()); + + let (_, verify_shrink_duration) = + time_operation(|| prover.verify_shrink(&shrink_proof, &vk)); + + let (_, wrap_duration) = time_operation(|| server.wrap_bn254(shrink_proof).unwrap()); + + // TODO: Verify wrapped bn254 proofs. + // let (_, verify_wrap_duration) = + // time_operation(|| prover.verify_wrap_bn254(&wrapped_bn254_proof, &vk)); + let verify_wrap_duration = Duration::from_secs(0); + + let result = PerfResult { + cycles, + execution_duration, + prove_core_duration, + verify_core_duration, + compress_duration, + verify_compressed_duration, + shrink_duration, + verify_shrink_duration, + wrap_duration, + verify_wrap_duration, + }; + + println!("{:?}", result); + } + ProverMode::Network => { + let prover = ProverClient::network(); + let (_, _) = time_operation(|| prover.execute(&elf, stdin.clone())); + + let (proof, _) = + time_operation(|| prover.prove(&pk, stdin.clone()).groth16().run().unwrap()); + + let (_, _) = time_operation(|| prover.verify(&proof, &vk)); + + let (proof, _) = time_operation(|| prover.prove(&pk, stdin).plonk().run().unwrap()); + + let (_, _) = time_operation(|| prover.verify(&proof, &vk)); + } + }; +} diff --git a/crates/perf/workflow.sh b/crates/perf/workflow.sh new file mode 100755 index 0000000000..c6b91f7268 --- /dev/null +++ b/crates/perf/workflow.sh @@ -0,0 +1,78 @@ +#! /bin/bash + +# Get the current git branch. +GIT_REF=$(git rev-parse --abbrev-ref HEAD) + +# Define the list of CPU workloads. +CPU_WORKLOADS=( + "fibonacci-17k" + "ssz-withdrawals" + "tendermint" + "rsp-20526624" + "rsa" + "regex" + "chess" + "json" + "blobstream-01j6z63fgafrc8jeh0k12gbtvw" + "blobstream-01j6z95bdme9svevmfyc974bja" + "blobstream-01j6z9ak0ke9srsppgywgke6fj" + "vector-01j6xsv35re96tkgyda115320t" + "vector-01j6xzy366ff5tbkzcrs8pma02" + "vector-01j6y06de0fdaafemr8b1t69z3" + "raiko-a7-10" +) + +# Define the list of CUDA workloads. +CUDA_WORKLOADS=( + "fibonacci-17k" + "ssz-withdrawals" + "tendermint" + "rsp-20526624" + "rsa" + "regex" + "chess" + "json" + "blobstream-01j6z63fgafrc8jeh0k12gbtvw" + "blobstream-01j6z95bdme9svevmfyc974bja" + "blobstream-01j6z9ak0ke9srsppgywgke6fj" + "vector-01j6xsv35re96tkgyda115320t" + "vector-01j6xzy366ff5tbkzcrs8pma02" + "vector-01j6y06de0fdaafemr8b1t69z3" + "raiko-a7-10" +) + +# Define the list of network workloads. +NETWORK_WORKLOADS=( + # "fibonacci-17k" + # "ssz-withdrawals" + # "tendermint" + # "rsp-20526624" + # "rsa" + # "regex" + # "chess" + # "json" + # "blobstream-01j6z63fgafrc8jeh0k12gbtvw" + # "blobstream-01j6z95bdme9svevmfyc974bja" + # "blobstream-01j6z9ak0ke9srsppgywgke6fj" + # "vector-01j6xsv35re96tkgyda115320t" + # "vector-01j6xzy366ff5tbkzcrs8pma02" + # "vector-01j6y06de0fdaafemr8b1t69z3" + # "raiko-a7-10" + # "op-succinct-op-sepolia-1818303090-18303120" + # "op-succinct-op-sepolia-18200000-18200030" + # "op-succinct-op-sepolia-18250000-18250030" + # "op-succinct-op-sepolia-18303044-18303074" + # "op-succinct-op-sepolia-range-17685896-17685897" + # "op-succinct-op-sepolia-range-17985900-17985905" + # "op-succinct-op-sepolia-range-18129400-18129401" +) + +# Create a JSON object with the list of workloads. +WORKLOADS=$(jq -n \ + --arg cpu "$(printf '%s\n' "${CPU_WORKLOADS[@]}" | jq -R . | jq -s 'map(select(length > 0))')" \ + --arg cuda "$(printf '%s\n' "${CUDA_WORKLOADS[@]}" | jq -R . | jq -s 'map(select(length > 0))')" \ + --arg network "$(printf '%s\n' "${NETWORK_WORKLOADS[@]}" | jq -R . | jq -s 'map(select(length > 0))')" \ + '{cpu_workloads: $cpu, cuda_workloads: $cuda, network_workloads: $network}') + +# Run the workflow with the list of workloads. +echo $WORKLOADS | gh workflow run suite.yml --ref $GIT_REF --json diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index e817ed23fd..56ea6e4178 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -20,4 +20,3 @@ p3-poseidon2 = { workspace = true } p3-symmetric = { workspace = true } serde = { version = "1.0.207", features = ["derive"] } sha2 = "0.10.8" -itertools = "0.13.0" diff --git a/crates/prover/Cargo.toml b/crates/prover/Cargo.toml index a848d986d5..a4fc480c7f 100644 --- a/crates/prover/Cargo.toml +++ b/crates/prover/Cargo.toml @@ -11,13 +11,13 @@ categories = { workspace = true } [dependencies] p3-matrix = { workspace = true } -sp1-recursion-program = { workspace = true } -sp1-recursion-circuit = { workspace = true } sp1-recursion-compiler = { workspace = true } sp1-recursion-core = { workspace = true } +sp1-recursion-circuit = { workspace = true } sp1-recursion-gnark-ffi = { workspace = true } sp1-core-machine = { workspace = true } sp1-stark = { workspace = true } +p3-symmetric = { workspace = true } sp1-core-executor = { workspace = true } sp1-primitives = { workspace = true } p3-field = { workspace = true } @@ -33,7 +33,6 @@ tracing = "0.1.40" tracing-subscriber = "0.3.18" serde_json = "1.0.121" clap = { version = "4.5.9", features = ["derive", "env"] } -hex = "0.4.3" anyhow = "1.0.83" dirs = "5.0.1" tempfile = "3.10.1" @@ -41,7 +40,10 @@ subtle-encoding = "0.5.1" serial_test = "3.1.1" num-bigint = "0.4.6" thiserror = "1.0.63" -oneshot = "0.1.8" +lru = "0.12.4" +eyre = "0.6.12" +reqwest = { version = "0.11", features = ["blocking"] } +lazy_static = "1.5.0" [[bin]] name = "build_plonk_bn254" @@ -51,6 +53,14 @@ path = "scripts/build_plonk_bn254.rs" name = "build_groth16_bn254" path = "scripts/build_groth16_bn254.rs" +[[bin]] +name = "build_compress_vks" +path = "scripts/build_compress_vks.rs" + +[[bin]] +name = "post_trusted_setup" +path = "scripts/post_trusted_setup.rs" + [[bin]] name = "e2e" path = "scripts/e2e.rs" @@ -59,3 +69,4 @@ path = "scripts/e2e.rs" neon = ["sp1-core-machine/neon"] native-gnark = ["sp1-recursion-gnark-ffi/native"] export-tests = [] +debug = ["sp1-core-machine/debug"] diff --git a/crates/prover/Makefile b/crates/prover/Makefile index 9365179916..4cd36df55e 100644 --- a/crates/prover/Makefile +++ b/crates/prover/Makefile @@ -5,17 +5,22 @@ all: build-circuits: rm -rf build && \ mkdir -p build && \ - RUSTFLAGS='-C target-cpu=native' \ - cargo run -p sp1-prover --release --bin build_plonk_bn254 --features native-gnark -- \ - --build-dir=./build && \ - RUSTFLAGS='-C target-cpu=native' \ + mkdir -p build/groth16 && \ + mkdir -p build/plonk && \ + RUST_LOG=debug RUSTFLAGS='-C target-cpu=native' \ cargo run -p sp1-prover --release --bin build_groth16_bn254 --features native-gnark -- \ - --build-dir=./build + --build-dir=./build/groth16 && \ + RUST_LOG=debug RUSTFLAGS='-C target-cpu=native' \ + cargo run -p sp1-prover --release --bin build_plonk_bn254 --features native-gnark -- \ + --build-dir=./build/plonk release-circuits: @read -p "Release version (ex. v1.0.0-testnet)? " version; \ bash release.sh $$version +release-shapes: + bash shapes.sh + test-e2e: RUSTFLAGS='-C target-cpu=native' \ - cargo test --package sp1-prover --lib --release -- tests::test_e2e --exact --show-output + cargo test --package sp1-prover --lib --release -- tests::test_e2e --exact --show-output \ No newline at end of file diff --git a/crates/prover/TRUSTED_SETUP.md b/crates/prover/TRUSTED_SETUP.md new file mode 100644 index 0000000000..541e794fd0 --- /dev/null +++ b/crates/prover/TRUSTED_SETUP.md @@ -0,0 +1,89 @@ +# Groth16 Trusted Setup + +## Prerequisites + +Make sure you have already built the circuits to the `build/groth16` directory. + +``` +make build-circuits +``` + +The trusted setup process will overwrite the proving key, verifying key, and the relevant +contracts in the `build/groth16` directory. + +## Powers of Tau + +Download the powers of tau file for the given number of constraints. You will need to choose the +number based on the number of constraints in the circuit (nearest power of 2 above the number of constraints). + +``` +export NB_CONSTRAINTS_LOG2=23 +wget https://storage.googleapis.com/zkevm/ptau/powersOfTau28_hez_final_${NB_CONSTRAINTS_LOG2}.ptau \ + -O powersOfTau28_hez_final.ptau +``` + +## Semaphore Install + +``` +git clone https://github.com/jtguibas/semaphore-gnark-11.git +git checkout ee57a61abfc3924c61ffc0a3d033bb92dfe7bbe8 +go build +mv semaphore-mtb-setup semaphore-gnark-11 +cp semaphore-gnark-11 ../sp1/crates/prover/ +``` + +## Phase 1 Setup + +``` +mkdir -p trusted-setup +./semaphore-gnark-11 p1i powersOfTau28_hez_final.ptau trusted-setup/phase1 +``` + +## Phase 2 Setup + +``` +./semaphore-gnark-11 p2n trusted-setup/phase1 build/groth16/groth16_circuit.bin trusted-setup/phase2 trusted-setup/evals +``` + +## Phase 2 Contributions + +``` +./semaphore-gnark-11 p2c trusted-setup/phase2 trusted-setup/phase2-1-jtguibas +./semaphore-gnark-11 p2c trusted-setup/phase2-1-jtguibas trusted-setup/phase2-2-pumatheuma +cp trusted-setup/phase2-2-pumatheuma trusted-setup/phase2-final +``` + +## Export Keys + +``` +./semaphore-gnark-11 key trusted-setup/phase1 trusted-setup/phase2-final trusted-setup/evals build/groth16/groth16_circuit.bin +cp pk trusted-setup/groth16_pk.bin +cp vk trusted-setup/groth16_vk.bin +``` + +## Export Verifier + +``` +./semaphore-gnark-11 sol vk +cp Groth16Verifier.sol trusted-setup/Groth16Verifier.sol +``` + +## Override Existing Build + +``` +cp trusted-setup/groth16_pk.bin build/groth16/groth16_pk.bin +cp trusted-setup/groth16_vk.bin build/groth16/groth16_vk.bin +cp trusted-setup/Groth16Verifier.sol build/groth16/Groth16Verifier.sol +``` + +## Post Trusted Setup + +``` +cargo run --bin post_trusted_setup --release -- --build-dir build/groth16 +``` + +## Release + +``` +make release-circuits +``` \ No newline at end of file diff --git a/crates/prover/dummy_vk_map.bin b/crates/prover/dummy_vk_map.bin new file mode 100644 index 0000000000..0e1a2eaaa0 Binary files /dev/null and b/crates/prover/dummy_vk_map.bin differ diff --git a/crates/prover/release.sh b/crates/prover/release.sh index 61ffc55022..005a2b6456 100644 --- a/crates/prover/release.sh +++ b/crates/prover/release.sh @@ -4,8 +4,7 @@ set -e # Get the version from the command line. VERSION=$1 -# Specify the file to upload and the S3 bucket name -FILE_TO_UPLOAD="./build" +# Specify the S3 bucket name S3_BUCKET="sp1-circuits" # Check for unstaged changes in the Git repository @@ -24,21 +23,63 @@ fi # Put the version in the build directory echo "$COMMIT_HASH $VERSION" > ./build/SP1_COMMIT -# Create archive named after the commit hash -ARCHIVE_NAME="${VERSION}.tar.gz" -cd $FILE_TO_UPLOAD -tar --exclude='srs.bin' --exclude='srs_lagrange.bin' -czvf "../$ARCHIVE_NAME" . -cd - +# Create archives for Groth16, Plonk, and Trusted Setup +GROTH16_ARCHIVE="${VERSION}-groth16.tar.gz" +PLONK_ARCHIVE="${VERSION}-plonk.tar.gz" +TRUSTED_SETUP_ARCHIVE="${VERSION}-trusted-setup.tar.gz" + +# Create Groth16 archive +cd ./build/groth16 +tar --exclude='srs.bin' --exclude='srs_lagrange.bin' -czvf "../../$GROTH16_ARCHIVE" . +cd ../.. +if [ $? -ne 0 ]; then + echo "Failed to create Groth16 archive." + exit 1 +fi + +# Create Plonk archive +cd ./build/plonk +tar --exclude='srs.bin' --exclude='srs_lagrange.bin' -czvf "../../$PLONK_ARCHIVE" . +cd ../.. +if [ $? -ne 0 ]; then + echo "Failed to create Plonk archive." + exit 1 +fi + +# Create Trusted Setup archive +cd ./trusted-setup +tar -czvf "../$TRUSTED_SETUP_ARCHIVE" . +cd .. if [ $? -ne 0 ]; then - echo "Failed to create archive." + echo "Failed to create Trusted Setup archive." exit 1 fi -# Upload the file to S3, naming it after the current commit hash -aws s3 cp "$ARCHIVE_NAME" "s3://$S3_BUCKET/$ARCHIVE_NAME" +# Upload Groth16 archive to S3 +aws s3 cp "$GROTH16_ARCHIVE" "s3://$S3_BUCKET/$GROTH16_ARCHIVE" if [ $? -ne 0 ]; then - echo "Failed to upload file to S3." + echo "Failed to upload Groth16 archive to S3." exit 1 fi -echo "succesfully uploaded build artifacts to s3://$S3_BUCKET/$ARCHIVE_NAME" +# Upload Plonk archive to S3 +aws s3 cp "$PLONK_ARCHIVE" "s3://$S3_BUCKET/$PLONK_ARCHIVE" +if [ $? -ne 0 ]; then + echo "Failed to upload Plonk archive to S3." + exit 1 +fi + +# Upload Trusted Setup archive to S3 +aws s3 cp "$TRUSTED_SETUP_ARCHIVE" "s3://$S3_BUCKET/$TRUSTED_SETUP_ARCHIVE" +if [ $? -ne 0 ]; then + echo "Failed to upload Trusted Setup archive to S3." + exit 1 +fi + +echo "Successfully uploaded build artifacts to S3:" +echo "- s3://$S3_BUCKET/$GROTH16_ARCHIVE" +echo "- s3://$S3_BUCKET/$PLONK_ARCHIVE" +echo "- s3://$S3_BUCKET/$TRUSTED_SETUP_ARCHIVE" + +# Clean up local archive files +rm "$GROTH16_ARCHIVE" "$PLONK_ARCHIVE" "$TRUSTED_SETUP_ARCHIVE" diff --git a/crates/prover/scripts/build_compress_vks.rs b/crates/prover/scripts/build_compress_vks.rs new file mode 100644 index 0000000000..0f8c5a0d08 --- /dev/null +++ b/crates/prover/scripts/build_compress_vks.rs @@ -0,0 +1,50 @@ +use std::path::PathBuf; + +use clap::Parser; +use sp1_core_machine::utils::setup_logger; +use sp1_prover::{ + components::DefaultProverComponents, shapes::build_vk_map_to_file, REDUCE_BATCH_SIZE, +}; + +#[derive(Parser, Debug)] +#[clap(author, version, about, long_about = None)] +struct Args { + #[clap(short, long)] + build_dir: PathBuf, + #[clap(short, long, default_value_t = false)] + dummy: bool, + #[clap(short, long, default_value_t = REDUCE_BATCH_SIZE)] + reduce_batch_size: usize, + #[clap(short, long, default_value_t = 1)] + num_compiler_workers: usize, + #[clap(short, long, default_value_t = 1)] + num_setup_workers: usize, + #[clap(short, long)] + start: Option, + #[clap(short, long)] + end: Option, +} + +fn main() { + setup_logger(); + let args = Args::parse(); + + let reduce_batch_size = args.reduce_batch_size; + let build_dir = args.build_dir; + let dummy = args.dummy; + let num_compiler_workers = args.num_compiler_workers; + let num_setup_workers = args.num_setup_workers; + let range_start = args.start; + let range_end = args.end; + + build_vk_map_to_file::( + build_dir, + reduce_batch_size, + dummy, + num_compiler_workers, + num_setup_workers, + range_start, + range_end, + ) + .unwrap(); +} diff --git a/crates/prover/scripts/e2e.rs b/crates/prover/scripts/e2e.rs index 3476ce89e7..30415f432c 100644 --- a/crates/prover/scripts/e2e.rs +++ b/crates/prover/scripts/e2e.rs @@ -1,117 +1,119 @@ -use std::{borrow::Borrow, path::PathBuf}; - -use clap::Parser; -use p3_baby_bear::BabyBear; -use p3_field::PrimeField; -use sp1_core_executor::SP1Context; -use sp1_core_machine::io::SP1Stdin; -use sp1_prover::{ - utils::{babybear_bytes_to_bn254, babybears_to_bn254, words_to_bytes}, - SP1Prover, -}; -use sp1_recursion_circuit::{stark::build_wrap_circuit, witness::Witnessable}; -use sp1_recursion_compiler::ir::Witness; -use sp1_recursion_core::air::RecursionPublicValues; -use sp1_recursion_gnark_ffi::{Groth16Bn254Prover, PlonkBn254Prover}; -use sp1_stark::SP1ProverOpts; -use subtle_encoding::hex; - -#[derive(Parser, Debug)] -#[clap(author, version, about, long_about = None)] -struct Args { - #[clap(short, long)] - build_dir: String, - #[arg(short, long)] - system: String, -} - -pub fn main() { - sp1_core_machine::utils::setup_logger(); - std::env::set_var("RECONSTRUCT_COMMITMENTS", "false"); - - let args = Args::parse(); - let build_dir: PathBuf = args.build_dir.into(); - - let elf = include_bytes!("../elf/riscv32im-succinct-zkvm-elf"); - - tracing::info!("initializing prover"); - let prover: SP1Prover = SP1Prover::new(); - let opts = SP1ProverOpts::default(); - let context = SP1Context::default(); - - tracing::info!("setup elf"); - let (pk, vk) = prover.setup(elf); - - tracing::info!("prove core"); - let stdin = SP1Stdin::new(); - let core_proof = prover.prove_core(&pk, &stdin, opts, context).unwrap(); - - tracing::info!("Compress"); - let reduced_proof = prover.compress(&vk, core_proof, vec![], opts).unwrap(); - - tracing::info!("Shrink"); - let compressed_proof = prover.shrink(reduced_proof, opts).unwrap(); - - tracing::info!("wrap"); - let wrapped_proof = prover.wrap_bn254(compressed_proof, opts).unwrap(); - - tracing::info!("building verifier constraints"); - let constraints = tracing::info_span!("wrap circuit") - .in_scope(|| build_wrap_circuit(prover.wrap_vk(), wrapped_proof.proof.clone())); - - tracing::info!("building template witness"); - let pv: &RecursionPublicValues<_> = wrapped_proof.proof.public_values.as_slice().borrow(); - let vkey_hash = babybears_to_bn254(&pv.sp1_vk_digest); - let committed_values_digest_bytes: [BabyBear; 32] = - words_to_bytes(&pv.committed_value_digest).try_into().unwrap(); - let committed_values_digest = babybear_bytes_to_bn254(&committed_values_digest_bytes); - - let mut witness = Witness::default(); - wrapped_proof.proof.write(&mut witness); - witness.write_commited_values_digest(committed_values_digest); - witness.write_vkey_hash(vkey_hash); - - tracing::info!("sanity check plonk test"); - PlonkBn254Prover::test(constraints.clone(), witness.clone()); - - tracing::info!("sanity check plonk build"); - PlonkBn254Prover::build(constraints.clone(), witness.clone(), build_dir.clone()); - - tracing::info!("sanity check plonk prove"); - let plonk_bn254_prover = PlonkBn254Prover::new(); - - tracing::info!("plonk prove"); - let proof = plonk_bn254_prover.prove(witness.clone(), build_dir.clone()); - - tracing::info!("verify plonk proof"); - plonk_bn254_prover.verify( - &proof, - &vkey_hash.as_canonical_biguint(), - &committed_values_digest.as_canonical_biguint(), - &build_dir, - ); - - println!("plonk proof: {:?}", String::from_utf8(hex::encode(proof.encoded_proof)).unwrap()); - - tracing::info!("sanity check groth16 test"); - Groth16Bn254Prover::test(constraints.clone(), witness.clone()); - - tracing::info!("sanity check groth16 build"); - Groth16Bn254Prover::build(constraints.clone(), witness.clone(), build_dir.clone()); - - tracing::info!("sanity check groth16 prove"); - let groth16_bn254_prover = Groth16Bn254Prover::new(); - - tracing::info!("groth16 prove"); - let proof = groth16_bn254_prover.prove(witness.clone(), build_dir.clone()); - - tracing::info!("verify groth16 proof"); - groth16_bn254_prover.verify( - &proof, - &vkey_hash.as_canonical_biguint(), - &committed_values_digest.as_canonical_biguint(), - &build_dir, - ); +// use std::{borrow::Borrow, path::PathBuf}; + +// use clap::Parser; +// use p3_baby_bear::BabyBear; +// use p3_field::PrimeField; +// use sp1_core_executor::SP1Context; +// use sp1_core_machine::io::SP1Stdin; +// use sp1_prover::{ +// utils::{babybear_bytes_to_bn254, babybears_to_bn254, words_to_bytes}, +// SP1Prover, +// }; +// use sp1_recursion_circuit::{stark::build_wrap_circuit, witness::Witnessable}; +// use sp1_recursion_compiler::ir::Witness; +// use sp1_recursion_core::air::RecursionPublicValues; +// use sp1_recursion_gnark_ffi::{Groth16Bn254Prover, PlonkBn254Prover}; +// use sp1_stark::SP1ProverOpts; +// use subtle_encoding::hex; + +// #[derive(Parser, Debug)] +// #[clap(author, version, about, long_about = None)] +// struct Args { +// #[clap(short, long)] +// build_dir: String, +// #[arg(short, long)] +// system: String, +// } + +// pub fn main() { +// sp1_core_machine::utils::setup_logger(); +// std::env::set_var("RECONSTRUCT_COMMITMENTS", "false"); + +// let args = Args::parse(); +// let build_dir: PathBuf = args.build_dir.into(); + +// let elf = include_bytes!("../elf/riscv32im-succinct-zkvm-elf"); + +// tracing::info!("initializing prover"); +// let prover: SP1Prover = SP1Prover::new(); +// let opts = SP1ProverOpts::default(); +// let context = SP1Context::default(); + +// tracing::info!("setup elf"); +// let (pk, vk) = prover.setup(elf); + +// tracing::info!("prove core"); +// let stdin = SP1Stdin::new(); +// let core_proof = prover.prove_core(&pk, &stdin, opts, context).unwrap(); + +// tracing::info!("Compress"); +// let reduced_proof = prover.compress(&vk, core_proof, vec![], opts).unwrap(); + +// tracing::info!("Shrink"); +// let compressed_proof = prover.shrink(reduced_proof, opts).unwrap(); + +// tracing::info!("wrap"); +// let wrapped_proof = prover.wrap_bn254(compressed_proof, opts).unwrap(); + +// tracing::info!("building verifier constraints"); +// let constraints = tracing::info_span!("wrap circuit") +// .in_scope(|| build_wrap_circuit(prover.wrap_vk(), wrapped_proof.proof.clone())); + +// tracing::info!("building template witness"); +// let pv: &RecursionPublicValues<_> = wrapped_proof.proof.public_values.as_slice().borrow(); +// let vkey_hash = babybears_to_bn254(&pv.sp1_vk_digest); +// let committed_values_digest_bytes: [BabyBear; 32] = +// words_to_bytes(&pv.committed_value_digest).try_into().unwrap(); +// let committed_values_digest = babybear_bytes_to_bn254(&committed_values_digest_bytes); + +// let mut witness = Witness::default(); +// wrapped_proof.proof.write(&mut witness); +// witness.write_commited_values_digest(committed_values_digest); +// witness.write_vkey_hash(vkey_hash); + +// tracing::info!("sanity check plonk test"); +// PlonkBn254Prover::test(constraints.clone(), witness.clone()); + +// tracing::info!("sanity check plonk build"); +// PlonkBn254Prover::build(constraints.clone(), witness.clone(), build_dir.clone()); + +// tracing::info!("sanity check plonk prove"); +// let plonk_bn254_prover = PlonkBn254Prover::new(); + +// tracing::info!("plonk prove"); +// let proof = plonk_bn254_prover.prove(witness.clone(), build_dir.clone()); + +// tracing::info!("verify plonk proof"); +// plonk_bn254_prover.verify( +// &proof, +// &vkey_hash.as_canonical_biguint(), +// &committed_values_digest.as_canonical_biguint(), +// &build_dir, +// ); + +// println!("plonk proof: {:?}", String::from_utf8(hex::encode(proof.encoded_proof)).unwrap()); + +// tracing::info!("sanity check groth16 test"); +// Groth16Bn254Prover::test(constraints.clone(), witness.clone()); + +// tracing::info!("sanity check groth16 build"); +// Groth16Bn254Prover::build(constraints.clone(), witness.clone(), build_dir.clone()); + +// tracing::info!("sanity check groth16 prove"); +// let groth16_bn254_prover = Groth16Bn254Prover::new(); + +// tracing::info!("groth16 prove"); +// let proof = groth16_bn254_prover.prove(witness.clone(), build_dir.clone()); + +// tracing::info!("verify groth16 proof"); +// groth16_bn254_prover.verify( +// &proof, +// &vkey_hash.as_canonical_biguint(), +// &committed_values_digest.as_canonical_biguint(), +// &build_dir, +// ); - println!("groth16 proof: {:?}", String::from_utf8(hex::encode(proof.encoded_proof)).unwrap()); -} +// println!("groth16 proof: {:?}", +// String::from_utf8(hex::encode(proof.encoded_proof)).unwrap()); } + +pub fn main() {} diff --git a/crates/prover/scripts/post_trusted_setup.rs b/crates/prover/scripts/post_trusted_setup.rs new file mode 100644 index 0000000000..303d8fb762 --- /dev/null +++ b/crates/prover/scripts/post_trusted_setup.rs @@ -0,0 +1,18 @@ +use std::path::PathBuf; + +use clap::Parser; +use sp1_core_machine::utils::setup_logger; +use sp1_recursion_gnark_ffi::Groth16Bn254Prover; + +#[derive(Parser, Debug)] +#[clap(author, version, about, long_about = None)] +struct Args { + #[clap(short, long)] + build_dir: PathBuf, +} + +pub fn main() { + setup_logger(); + let args = Args::parse(); + Groth16Bn254Prover::build_contracts(args.build_dir); +} diff --git a/crates/prover/shapes.sh b/crates/prover/shapes.sh new file mode 100644 index 0000000000..f889227667 --- /dev/null +++ b/crates/prover/shapes.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +# S3 bucket name. +S3_BUCKET="sp1-circuits" + +# Check for unstaged changes in the Git repository +if ! git diff --quiet; then + echo "Error: There are unstaged changes. Please commit or stash them before running this script." + exit 1 +fi + +# Get the current short Git reference. +GIT_REF=$(git rev-parse --short HEAD) + +# Upload allowed_vk_map.bin. +aws s3 cp allowed_vk_map.bin "s3://${S3_BUCKET}/shapes-${GIT_REF}/allowed_vk_map.bin" + +# Upload dummy_vk_map.bin. +aws s3 cp dummy_vk_map.bin "s3://${S3_BUCKET}/shapes-${GIT_REF}/dummy_vk_map.bin" + +# Print the uploaded shapes. +echo "\n" +echo "Succesfully uploaded shapes to s3:" +echo "- https://${S3_BUCKET}.s3.us-east-2.amazonaws.com/shapes-${GIT_REF}/allowed_vk_map.bin" +echo "- https://${S3_BUCKET}.s3.us-east-2.amazonaws.com/shapes-${GIT_REF}/dummy_vk_map.bin" +echo "Shape Version: ${GIT_REF}" \ No newline at end of file diff --git a/crates/prover/src/build.rs b/crates/prover/src/build.rs index 39512346b7..d3e845ba95 100644 --- a/crates/prover/src/build.rs +++ b/crates/prover/src/build.rs @@ -3,17 +3,27 @@ use std::{borrow::Borrow, path::PathBuf}; use p3_baby_bear::BabyBear; use sp1_core_executor::SP1Context; use sp1_core_machine::io::SP1Stdin; -pub use sp1_recursion_circuit::{stark::build_wrap_circuit, witness::Witnessable}; -pub use sp1_recursion_compiler::ir::Witness; -use sp1_recursion_compiler::{config::OuterConfig, constraints::Constraint}; +use sp1_recursion_circuit::{ + hash::FieldHasherVariable, + machine::{SP1CompressWitnessValues, SP1WrapVerifier}, +}; +use sp1_recursion_compiler::{ + config::OuterConfig, + constraints::{Constraint, ConstraintCompiler}, + ir::Builder, +}; + use sp1_recursion_core::air::RecursionPublicValues; -pub use sp1_recursion_core::stark::utils::sp1_dev_mode; +pub use sp1_recursion_core::stark::sp1_dev_mode; + +pub use sp1_recursion_circuit::witness::{OuterWitness, Witnessable}; + use sp1_recursion_gnark_ffi::{Groth16Bn254Prover, PlonkBn254Prover}; use sp1_stark::{SP1ProverOpts, ShardProof, StarkVerifyingKey}; use crate::{ utils::{babybear_bytes_to_bn254, babybears_to_bn254, words_to_bytes}, - OuterSC, SP1Prover, + OuterSC, SP1Prover, WrapAir, }; /// Tries to build the PLONK artifacts inside the development directory. @@ -80,6 +90,14 @@ pub fn build_groth16_bn254_artifacts( /// the circuit. pub fn build_plonk_bn254_artifacts_with_dummy(build_dir: impl Into) { let (wrap_vk, wrapped_proof) = dummy_proof(); + let wrap_vk_bytes = bincode::serialize(&wrap_vk).unwrap(); + let wrapped_proof_bytes = bincode::serialize(&wrapped_proof).unwrap(); + std::fs::write("wrap_vk.bin", wrap_vk_bytes).unwrap(); + std::fs::write("wraped_proof.bin", wrapped_proof_bytes).unwrap(); + let wrap_vk_bytes = std::fs::read("wrap_vk.bin").unwrap(); + let wrapped_proof_bytes = std::fs::read("wraped_proof.bin").unwrap(); + let wrap_vk = bincode::deserialize(&wrap_vk_bytes).unwrap(); + let wrapped_proof = bincode::deserialize(&wrapped_proof_bytes).unwrap(); crate::build::build_plonk_bn254_artifacts(&wrap_vk, &wrapped_proof, build_dir.into()); } @@ -89,6 +107,14 @@ pub fn build_plonk_bn254_artifacts_with_dummy(build_dir: impl Into) { /// the circuit. pub fn build_groth16_bn254_artifacts_with_dummy(build_dir: impl Into) { let (wrap_vk, wrapped_proof) = dummy_proof(); + let wrap_vk_bytes = bincode::serialize(&wrap_vk).unwrap(); + let wrapped_proof_bytes = bincode::serialize(&wrapped_proof).unwrap(); + std::fs::write("wrap_vk.bin", wrap_vk_bytes).unwrap(); + std::fs::write("wraped_proof.bin", wrapped_proof_bytes).unwrap(); + let wrap_vk_bytes = std::fs::read("wrap_vk.bin").unwrap(); + let wrapped_proof_bytes = std::fs::read("wraped_proof.bin").unwrap(); + let wrap_vk = bincode::deserialize(&wrap_vk_bytes).unwrap(); + let wrapped_proof = bincode::deserialize(&wrapped_proof_bytes).unwrap(); crate::build::build_groth16_bn254_artifacts(&wrap_vk, &wrapped_proof, build_dir.into()); } @@ -96,10 +122,14 @@ pub fn build_groth16_bn254_artifacts_with_dummy(build_dir: impl Into) { pub fn build_constraints_and_witness( template_vk: &StarkVerifyingKey, template_proof: &ShardProof, -) -> (Vec, Witness) { +) -> (Vec, OuterWitness) { tracing::info!("building verifier constraints"); - let constraints = tracing::info_span!("wrap circuit") - .in_scope(|| build_wrap_circuit(template_vk, template_proof.clone())); + let template_input = SP1CompressWitnessValues { + vks_and_proofs: vec![(template_vk.clone(), template_proof.clone())], + is_complete: true, + }; + let constraints = + tracing::info_span!("wrap circuit").in_scope(|| build_outer_circuit(&template_input)); let pv: &RecursionPublicValues = template_proof.public_values.as_slice().borrow(); let vkey_hash = babybears_to_bn254(&pv.sp1_vk_digest); @@ -108,8 +138,8 @@ pub fn build_constraints_and_witness( let committed_values_digest = babybear_bytes_to_bn254(&committed_values_digest_bytes); tracing::info!("building template witness"); - let mut witness = Witness::default(); - template_proof.write(&mut witness); + let mut witness = OuterWitness::default(); + template_input.write(&mut witness); witness.write_commited_values_digest(committed_values_digest); witness.write_vkey_hash(vkey_hash); @@ -143,5 +173,38 @@ pub fn dummy_proof() -> (StarkVerifyingKey, ShardProof) { tracing::info!("wrap"); let wrapped_proof = prover.wrap_bn254(shrink_proof, opts).unwrap(); - (prover.wrap_keys.into_inner().unwrap().1, wrapped_proof.proof) + (wrapped_proof.vk, wrapped_proof.proof) +} + +fn build_outer_circuit(template_input: &SP1CompressWitnessValues) -> Vec { + let wrap_machine = WrapAir::wrap_machine(OuterSC::default()); + + let wrap_span = tracing::debug_span!("build wrap circuit").entered(); + let mut builder = Builder::::default(); + + // Get the value of the vk. + let template_vk = template_input.vks_and_proofs.first().unwrap().0.clone(); + // Get an input variable. + let input = template_input.read(&mut builder); + // Fix the `wrap_vk` value to be the same as the template `vk`. Since the chip information and + // the ordering is already a constant, we just need to constrain the commitment and pc_start. + + // Get the vk variable from the input. + let vk = input.vks_and_proofs.first().unwrap().0.clone(); + // Get the expected commitment. + let expected_commitment: [_; 1] = template_vk.commit.into(); + let expected_commitment = expected_commitment.map(|x| builder.eval(x)); + // Constrain `commit` to be the same as the template `vk`. + OuterSC::assert_digest_eq(&mut builder, expected_commitment, vk.commitment); + // Constrain `pc_start` to be the same as the template `vk`. + builder.assert_felt_eq(vk.pc_start, template_vk.pc_start); + + // Verify the proof. + SP1WrapVerifier::verify(&mut builder, &wrap_machine, input); + + let mut backend = ConstraintCompiler::::default(); + let operations = backend.emit(builder.into_operations()); + wrap_span.exit(); + + operations } diff --git a/crates/prover/src/init.rs b/crates/prover/src/init.rs deleted file mode 100644 index 933a899ba4..0000000000 --- a/crates/prover/src/init.rs +++ /dev/null @@ -1,169 +0,0 @@ -use crate::components::SP1ProverComponents; -use p3_baby_bear::BabyBear; -pub use sp1_core_machine::io::SP1Stdin; -pub use sp1_primitives::io::SP1PublicValues; -use sp1_primitives::types::RecursionProgramType; -use sp1_recursion_compiler::config::InnerConfig; -use sp1_recursion_core::runtime::RecursionProgram; -pub use sp1_recursion_gnark_ffi::{ - groth16_bn254::Groth16Bn254Prover, plonk_bn254::PlonkBn254Prover, -}; -pub use sp1_recursion_program::machine::{ - ReduceProgramType, SP1CompressMemoryLayout, SP1DeferredMemoryLayout, SP1RecursionMemoryLayout, - SP1RootMemoryLayout, -}; -use sp1_recursion_program::machine::{ - SP1CompressVerifier, SP1DeferredVerifier, SP1RecursiveVerifier, SP1RootVerifier, -}; -use sp1_stark::{MachineProver, StarkProvingKey, StarkVerifyingKey}; -use tracing::debug_span; - -use crate::{InnerSC, OuterSC, SP1Prover}; - -impl SP1Prover { - /// The program that can recursively verify a set of proofs into a single proof. - pub fn recursion_program(&self) -> &RecursionProgram { - self.recursion_program.get_or_init(|| { - debug_span!("init recursion program").in_scope(|| { - SP1RecursiveVerifier::::build(self.core_prover.machine()) - }) - }) - } - - /// The program that recursively verifies deferred proofs and accumulates the digests. - pub fn deferred_program(&self) -> &RecursionProgram { - self.deferred_program.get_or_init(|| { - debug_span!("init deferred program").in_scope(|| { - SP1DeferredVerifier::::build(self.compress_prover.machine()) - }) - }) - } - - /// The program that reduces a set of recursive proofs into a single proof. - pub fn compress_program(&self) -> &RecursionProgram { - self.compress_program.get_or_init(|| { - debug_span!("init compress program").in_scope(|| { - SP1CompressVerifier::::build( - self.compress_prover.machine(), - self.recursion_vk(), - self.deferred_vk(), - ) - }) - }) - } - - /// The shrink program that compresses a proof into a succinct proof. - pub fn shrink_program(&self) -> &RecursionProgram { - self.shrink_program.get_or_init(|| { - debug_span!("init shrink program").in_scope(|| { - SP1RootVerifier::::build( - self.compress_prover.machine(), - self.compress_vk(), - RecursionProgramType::Shrink, - ) - }) - }) - } - - /// The wrap program that wraps a proof into a SNARK-friendly field. - pub fn wrap_program(&self) -> &RecursionProgram { - self.wrap_program.get_or_init(|| { - debug_span!("init wrap program").in_scope(|| { - SP1RootVerifier::::build( - self.shrink_prover.machine(), - self.shrink_vk(), - RecursionProgramType::Wrap, - ) - }) - }) - } - - /// The proving and verifying keys for the recursion step. - pub fn recursion_keys(&self) -> &(StarkProvingKey, StarkVerifyingKey) { - self.recursion_keys.get_or_init(|| { - debug_span!("init recursion keys") - .in_scope(|| self.compress_prover.setup(self.recursion_program())) - }) - } - - /// The proving key for the recursion step. - pub fn recursion_pk(&self) -> &StarkProvingKey { - &self.recursion_keys().0 - } - - /// The verifying key for the recursion step. - pub fn recursion_vk(&self) -> &StarkVerifyingKey { - &self.recursion_keys().1 - } - - /// The proving and verifying keys for the deferred step. - pub fn deferred_keys(&self) -> &(StarkProvingKey, StarkVerifyingKey) { - self.deferred_keys.get_or_init(|| { - debug_span!("init deferred keys") - .in_scope(|| self.compress_prover.setup(self.deferred_program())) - }) - } - - /// The proving key for the deferred step. - pub fn deferred_pk(&self) -> &StarkProvingKey { - &self.deferred_keys().0 - } - - /// The verifying key for the deferred step. - pub fn deferred_vk(&self) -> &StarkVerifyingKey { - &self.deferred_keys().1 - } - - /// The proving and verifying keys for the compress step. - pub fn compress_keys(&self) -> &(StarkProvingKey, StarkVerifyingKey) { - self.compress_keys.get_or_init(|| { - debug_span!("init compress keys") - .in_scope(|| self.compress_prover.setup(self.compress_program())) - }) - } - - /// The proving key for the compress step. - pub fn compress_pk(&self) -> &StarkProvingKey { - &self.compress_keys().0 - } - - /// The verifying key for the compress step. - pub fn compress_vk(&self) -> &StarkVerifyingKey { - &self.compress_keys().1 - } - - /// The proving and verifying keys for the shrink step. - pub fn shrink_keys(&self) -> &(StarkProvingKey, StarkVerifyingKey) { - self.shrink_keys.get_or_init(|| { - debug_span!("init shrink keys") - .in_scope(|| self.shrink_prover.setup(self.shrink_program())) - }) - } - - /// The proving key for the shrink step. - pub fn shrink_pk(&self) -> &StarkProvingKey { - &self.shrink_keys().0 - } - - /// The verifying key for the shrink step. - pub fn shrink_vk(&self) -> &StarkVerifyingKey { - &self.shrink_keys().1 - } - - /// The proving and verifying keys for the wrap step. - pub fn wrap_keys(&self) -> &(StarkProvingKey, StarkVerifyingKey) { - self.wrap_keys.get_or_init(|| { - debug_span!("init wrap keys").in_scope(|| self.wrap_prover.setup(self.wrap_program())) - }) - } - - /// The proving key for the wrap step. - pub fn wrap_pk(&self) -> &StarkProvingKey { - &self.wrap_keys().0 - } - - /// The verifying key for the wrap step. - pub fn wrap_vk(&self) -> &StarkVerifyingKey { - &self.wrap_keys().1 - } -} diff --git a/crates/prover/src/lib.rs b/crates/prover/src/lib.rs index 91485cb60a..704c94f39e 100644 --- a/crates/prover/src/lib.rs +++ b/crates/prover/src/lib.rs @@ -13,54 +13,78 @@ pub mod build; pub mod components; -pub mod init; +pub mod shapes; pub mod types; pub mod utils; pub mod verify; use std::{ borrow::Borrow, + collections::BTreeMap, + env, + num::NonZeroUsize, path::Path, - sync::{mpsc::sync_channel, Arc, Mutex, OnceLock}, + sync::{ + atomic::{AtomicUsize, Ordering}, + mpsc::sync_channel, + Arc, Mutex, OnceLock, + }, thread, }; -use crate::init::SP1PublicValues; -use components::{DefaultProverComponents, SP1ProverComponents}; +use lru::LruCache; + +use tracing::instrument; + use p3_baby_bear::BabyBear; + use p3_challenger::CanObserve; -use p3_field::{AbstractField, PrimeField}; +use p3_field::{AbstractField, PrimeField, PrimeField32}; use p3_matrix::dense::RowMajorMatrix; use sp1_core_executor::{ExecutionError, ExecutionReport, Executor, Program, SP1Context}; -pub use sp1_core_machine::io::SP1Stdin; use sp1_core_machine::{ - riscv::RiscvAir, + io::SP1Stdin, + reduce::SP1ReduceProof, + riscv::{CoreShapeConfig, RiscvAir}, utils::{concurrency::TurnBasedSync, SP1CoreProverError}, }; -use sp1_primitives::hash_deferred_proof; -use sp1_recursion_circuit::witness::Witnessable; -use sp1_recursion_compiler::{config::InnerConfig, ir::Witness}; +use sp1_primitives::{hash_deferred_proof, io::SP1PublicValues}; +use sp1_recursion_circuit::{ + hash::FieldHasher, + machine::{ + PublicValuesOutputDigest, SP1CompressRootVerifierWithVKey, SP1CompressShape, + SP1CompressWithVKeyVerifier, SP1CompressWithVKeyWitnessValues, SP1CompressWithVkeyShape, + SP1CompressWitnessValues, SP1DeferredVerifier, SP1DeferredWitnessValues, + SP1MerkleProofWitnessValues, SP1RecursionShape, SP1RecursionWitnessValues, + SP1RecursiveVerifier, + }, + merkle_tree::MerkleTree, + witness::Witnessable, + WrapConfig, +}; +use sp1_recursion_compiler::{ + circuit::AsmCompiler, + config::InnerConfig, + ir::{Builder, Witness}, +}; use sp1_recursion_core::{ - air::RecursionPublicValues, - runtime::{ExecutionRecord, RecursionProgram, Runtime as RecursionRuntime}, - stark::{config::BabyBearPoseidon2Outer, RecursionAir}, + air::RecursionPublicValues, machine::RecursionAir, runtime::ExecutionRecord, + shape::RecursionShapeConfig, stark::BabyBearPoseidon2Outer, RecursionProgram, + Runtime as RecursionRuntime, }; pub use sp1_recursion_gnark_ffi::proof::{Groth16Bn254Proof, PlonkBn254Proof}; use sp1_recursion_gnark_ffi::{groth16_bn254::Groth16Bn254Prover, plonk_bn254::PlonkBn254Prover}; -use sp1_recursion_program::hints::Hintable; -pub use sp1_recursion_program::machine::{ - ReduceProgramType, SP1CompressMemoryLayout, SP1DeferredMemoryLayout, SP1RecursionMemoryLayout, - SP1RootMemoryLayout, -}; +use sp1_stark::{air::InteractionScope, MachineProvingKey, ProofShape}; use sp1_stark::{ air::PublicValues, baby_bear_poseidon2::BabyBearPoseidon2, Challenge, Challenger, - MachineProver, MachineVerificationError, SP1CoreOpts, SP1ProverOpts, ShardProof, - StarkGenericConfig, StarkProvingKey, StarkVerifyingKey, Val, Word, DIGEST_SIZE, + MachineProver, SP1CoreOpts, SP1ProverOpts, ShardProof, StarkGenericConfig, StarkVerifyingKey, + Val, Word, DIGEST_SIZE, }; -use tracing::instrument; pub use types::*; -use utils::words_to_bytes; +use utils::{sp1_commited_values_digest_bn254, sp1_vkey_digest_bn254, words_to_bytes}; + +use components::{DefaultProverComponents, SP1ProverComponents}; pub use sp1_core_machine::SP1_CIRCUIT_VERSION; @@ -74,8 +98,20 @@ pub type InnerSC = BabyBearPoseidon2; pub type OuterSC = BabyBearPoseidon2Outer; const COMPRESS_DEGREE: usize = 3; -const SHRINK_DEGREE: usize = 9; -const WRAP_DEGREE: usize = 17; +const SHRINK_DEGREE: usize = 3; +const WRAP_DEGREE: usize = 9; + +const CORE_CACHE_SIZE: usize = 5; +const COMPRESS_CACHE_SIZE: usize = 3; +pub const REDUCE_BATCH_SIZE: usize = 2; + +// TODO: FIX +// +// const SHAPES_URL_PREFIX: &str = "https://sp1-circuits.s3.us-east-2.amazonaws.com/shapes"; +// const SHAPES_VERSION: &str = "146079e0e"; +// lazy_static! { +// static ref SHAPES_INIT: Once = Once::new(); +// } pub type CompressAir = RecursionAir; pub type ShrinkAir = RecursionAir; @@ -83,65 +119,49 @@ pub type WrapAir = RecursionAir; /// A end-to-end prover implementation for the SP1 RISC-V zkVM. pub struct SP1Prover { - /// The program that can recursively verify a set of proofs into a single proof. - pub recursion_program: OnceLock>, + /// The machine used for proving the core step. + pub core_prover: C::CoreProver, - /// The proving key and verifying key for the recursion step. - pub recursion_keys: OnceLock<(StarkProvingKey, StarkVerifyingKey)>, + /// The machine used for proving the recursive and reduction steps. + pub compress_prover: C::CompressProver, - /// The program that recursively verifies deferred proofs and accumulates the digests. - pub deferred_program: OnceLock>, + /// The machine used for proving the shrink step. + pub shrink_prover: C::ShrinkProver, - /// The proving key and verifying key for the reduce step. - pub deferred_keys: OnceLock<(StarkProvingKey, StarkVerifyingKey)>, + /// The machine used for proving the wrapping step. + pub wrap_prover: C::WrapProver, - /// The program that reduces a set of recursive proofs into a single proof. - pub compress_program: OnceLock>, + pub recursion_programs: Mutex>>>, - /// The proving key and verifying key for the reduce step. - pub compress_keys: OnceLock<(StarkProvingKey, StarkVerifyingKey)>, + pub recursion_cache_misses: AtomicUsize, - /// The shrink program that compresses a proof into a succinct proof. - pub shrink_program: OnceLock>, + pub compress_programs: + Mutex>>>, - /// The proving key and verifying key for the compress step. - pub shrink_keys: OnceLock<(StarkProvingKey, StarkVerifyingKey)>, + pub compress_cache_misses: AtomicUsize, - /// The wrap program that wraps a proof into a SNARK-friendly field. - pub wrap_program: OnceLock>, + pub vk_root: >::Digest, - /// The proving key and verifying key for the wrap step. - pub wrap_keys: OnceLock<(StarkProvingKey, StarkVerifyingKey)>, + pub allowed_vk_map: BTreeMap<>::Digest, usize>, - /// The machine used for proving the core step. - pub core_prover: C::CoreProver, + pub vk_merkle_tree: MerkleTree, - /// The machine used for proving the recursive and reduction steps. - pub compress_prover: C::CompressProver, + pub core_shape_config: Option>, - /// The machine used for proving the shrink step. - pub shrink_prover: C::ShrinkProver, + pub recursion_shape_config: Option>>, - /// The machine used for proving the wrapping step. - pub wrap_prover: C::WrapProver, + pub wrap_program: OnceLock>>, + + pub wrap_vk: OnceLock>, + + pub vk_verification: bool, } impl SP1Prover { /// Initializes a new [SP1Prover]. #[instrument(name = "initialize prover", level = "debug", skip_all)] pub fn new() -> Self { - let prover = Self::uninitialized(); - // Initialize everything except wrap key which is a bit slow. - prover.recursion_program(); - prover.deferred_program(); - prover.compress_program(); - prover.shrink_program(); - prover.wrap_program(); - prover.recursion_keys(); - prover.deferred_keys(); - prover.compress_keys(); - prover.shrink_keys(); - prover + Self::uninitialized() } /// Creates a new [SP1Prover] with lazily initialized components. @@ -150,58 +170,99 @@ impl SP1Prover { let core_machine = RiscvAir::machine(CoreSC::default()); let core_prover = C::CoreProver::new(core_machine); - let compress_machine = CompressAir::machine(InnerSC::default()); + let compress_machine = CompressAir::compress_machine(InnerSC::default()); let compress_prover = C::CompressProver::new(compress_machine); - let shrink_machine = ShrinkAir::wrap_machine_dyn(InnerSC::compressed()); + // TODO: Put the correct shrink and wrap machines here. + let shrink_machine = ShrinkAir::shrink_machine(InnerSC::compressed()); let shrink_prover = C::ShrinkProver::new(shrink_machine); let wrap_machine = WrapAir::wrap_machine(OuterSC::default()); let wrap_prover = C::WrapProver::new(wrap_machine); + let core_cache_size = NonZeroUsize::new( + env::var("PROVER_CORE_CACHE_SIZE") + .unwrap_or_else(|_| CORE_CACHE_SIZE.to_string()) + .parse() + .unwrap_or(CORE_CACHE_SIZE), + ) + .expect("PROVER_CORE_CACHE_SIZE must be a non-zero usize"); + + let compress_cache_size = NonZeroUsize::new( + env::var("PROVER_COMPRESS_CACHE_SIZE") + .unwrap_or_else(|_| CORE_CACHE_SIZE.to_string()) + .parse() + .unwrap_or(COMPRESS_CACHE_SIZE), + ) + .expect("PROVER_COMPRESS_CACHE_SIZE must be a non-zero usize"); + + let core_shape_config = env::var("FIX_CORE_SHAPES") + .map(|v| v.eq_ignore_ascii_case("true")) + .unwrap_or(true) + .then_some(CoreShapeConfig::default()); + + let recursion_shape_config = env::var("FIX_RECURSION_SHAPES") + .map(|v| v.eq_ignore_ascii_case("true")) + .unwrap_or(true) + .then_some(RecursionShapeConfig::default()); + + let vk_verification = + env::var("VERIFY_VK").map(|v| v.eq_ignore_ascii_case("true")).unwrap_or(true); + + tracing::info!("vk verification: {}", vk_verification); + + // Read the shapes from the shapes directory and deserialize them into memory. + let allowed_vk_map: BTreeMap<[BabyBear; DIGEST_SIZE], usize> = if vk_verification { + bincode::deserialize(include_bytes!("../vk_map.bin")).unwrap() + } else { + bincode::deserialize(include_bytes!("../dummy_vk_map.bin")).unwrap() + }; + + let (root, merkle_tree) = MerkleTree::commit(allowed_vk_map.keys().copied().collect()); + Self { - recursion_program: OnceLock::new(), - recursion_keys: OnceLock::new(), - deferred_program: OnceLock::new(), - deferred_keys: OnceLock::new(), - compress_program: OnceLock::new(), - compress_keys: OnceLock::new(), - shrink_program: OnceLock::new(), - shrink_keys: OnceLock::new(), - wrap_program: OnceLock::new(), - wrap_keys: OnceLock::new(), core_prover, compress_prover, shrink_prover, wrap_prover, + recursion_programs: Mutex::new(LruCache::new(core_cache_size)), + recursion_cache_misses: AtomicUsize::new(0), + compress_programs: Mutex::new(LruCache::new(compress_cache_size)), + compress_cache_misses: AtomicUsize::new(0), + vk_root: root, + vk_merkle_tree: merkle_tree, + allowed_vk_map, + core_shape_config, + recursion_shape_config, + vk_verification, + wrap_program: OnceLock::new(), + wrap_vk: OnceLock::new(), } } /// Fully initializes the programs, proving keys, and verifying keys that are normally - /// lazily initialized. - pub fn initialize(&mut self) { - self.recursion_program(); - self.deferred_program(); - self.compress_program(); - self.shrink_program(); - self.wrap_program(); - self.recursion_keys(); - self.deferred_keys(); - self.compress_keys(); - self.shrink_keys(); - self.wrap_keys(); - } + /// lazily initialized. TODO: remove this. + pub fn initialize(&mut self) {} /// Creates a proving key and a verifying key for a given RISC-V ELF. #[instrument(name = "setup", level = "debug", skip_all)] pub fn setup(&self, elf: &[u8]) -> (SP1ProvingKey, SP1VerifyingKey) { - let program = Program::from(elf).unwrap(); + let program = self.get_program(elf).unwrap(); let (pk, vk) = self.core_prover.setup(&program); let vk = SP1VerifyingKey { vk }; - let pk = SP1ProvingKey { pk, elf: elf.to_vec(), vk: vk.clone() }; + let pk = SP1ProvingKey { pk: pk.to_host(), elf: elf.to_vec(), vk: vk.clone() }; (pk, vk) } + /// Get a program with an allowed preprocessed shape. + pub fn get_program(&self, elf: &[u8]) -> eyre::Result { + let mut program = Program::from(elf)?; + if let Some(core_shape_config) = &self.core_shape_config { + core_shape_config.fix_preprocessed_shape(&mut program)?; + } + Ok(program) + } + /// Generate a proof of an SP1 program with the specified inputs. #[instrument(name = "execute", level = "info", skip_all)] pub fn execute<'a>( @@ -211,7 +272,7 @@ impl SP1Prover { mut context: SP1Context<'a>, ) -> Result<(SP1PublicValues, ExecutionReport), ExecutionError> { context.subproof_verifier.replace(Arc::new(self)); - let program = Program::from(elf).unwrap(); + let program = self.get_program(elf).unwrap(); let opts = SP1CoreOpts::default(); let mut runtime = Executor::with_context(program, opts, context); runtime.write_vecs(&stdin.buffer); @@ -233,16 +294,21 @@ impl SP1Prover { mut context: SP1Context<'a>, ) -> Result { context.subproof_verifier.replace(Arc::new(self)); - let program = Program::from(&pk.elf).unwrap(); - let (proof, public_values_stream, cycles) = - sp1_core_machine::utils::prove_with_context::<_, C::CoreProver>( - &self.core_prover, + let program = self.get_program(&pk.elf).unwrap(); + let (proof, public_values_stream, cycles) = sp1_core_machine::utils::prove_with_context::< + _, + C::CoreProver, + >( + &self.core_prover, + &>>::DeviceProvingKey::from_host( &pk.pk, - program, - stdin, - opts.core_opts, - context, - )?; + ), + program, + stdin, + opts.core_opts, + context, + self.core_shape_config.as_ref(), + )?; Self::check_for_high_cycles(cycles); let public_values = SP1PublicValues::from(&public_values_stream); Ok(SP1CoreProof { @@ -253,33 +319,216 @@ impl SP1Prover { }) } - pub fn get_recursion_core_inputs<'a>( - &'a self, - vk: &'a StarkVerifyingKey, - leaf_challenger: &'a Challenger, + pub fn recursion_program( + &self, + input: &SP1RecursionWitnessValues, + ) -> Arc> { + let mut cache = self.recursion_programs.lock().unwrap_or_else(|e| e.into_inner()); + cache + .get_or_insert(input.shape(), || { + let misses = self.recursion_cache_misses.fetch_add(1, Ordering::Relaxed); + tracing::debug!("core cache miss, misses: {}", misses); + // Get the operations. + let builder_span = tracing::debug_span!("build recursion program").entered(); + let mut builder = Builder::::default(); + + let input = input.read(&mut builder); + SP1RecursiveVerifier::verify(&mut builder, self.core_prover.machine(), input); + let operations = builder.into_operations(); + builder_span.exit(); + + // Compile the program. + let compiler_span = tracing::debug_span!("compile recursion program").entered(); + let mut compiler = AsmCompiler::::default(); + let mut program = compiler.compile(operations); + if let Some(recursion_shape_config) = &self.recursion_shape_config { + recursion_shape_config.fix_shape(&mut program); + } + let program = Arc::new(program); + compiler_span.exit(); + program + }) + .clone() + } + + pub fn compress_program( + &self, + input: &SP1CompressWithVKeyWitnessValues, + ) -> Arc> { + let mut cache = self.compress_programs.lock().unwrap_or_else(|e| e.into_inner()); + cache + .get_or_insert(input.shape(), || { + let misses = self.compress_cache_misses.fetch_add(1, Ordering::Relaxed); + tracing::debug!("compress cache miss, misses: {}", misses); + // Get the operations. + let builder_span = tracing::debug_span!("build compress program").entered(); + let mut builder = Builder::::default(); + + // read the input. + let input = input.read(&mut builder); + // Verify the proof. + SP1CompressWithVKeyVerifier::verify( + &mut builder, + self.compress_prover.machine(), + input, + self.vk_verification, + PublicValuesOutputDigest::Reduce, + ); + let operations = builder.into_operations(); + builder_span.exit(); + + // Compile the program. + let compiler_span = tracing::debug_span!("compile compress program").entered(); + let mut compiler = AsmCompiler::::default(); + let mut program = compiler.compile(operations); + if let Some(recursion_shape_config) = &self.recursion_shape_config { + recursion_shape_config.fix_shape(&mut program); + } + let program = Arc::new(program); + compiler_span.exit(); + program + }) + .clone() + } + + pub fn shrink_program( + &self, + input: &SP1CompressWithVKeyWitnessValues, + ) -> Arc> { + // Get the operations. + let builder_span = tracing::debug_span!("build shrink program").entered(); + let mut builder = Builder::::default(); + let input = input.read(&mut builder); + // Verify the proof. + SP1CompressRootVerifierWithVKey::verify( + &mut builder, + self.compress_prover.machine(), + input, + self.vk_verification, + PublicValuesOutputDigest::Reduce, + ); + let operations = builder.into_operations(); + builder_span.exit(); + + // Compile the program. + let compiler_span = tracing::debug_span!("compile shrink program").entered(); + let mut compiler = AsmCompiler::::default(); + let mut program = compiler.compile(operations); + program.shape = Some(ShrinkAir::::shrink_shape()); + let program = Arc::new(program); + compiler_span.exit(); + program + } + + pub fn wrap_program(&self) -> Arc> { + self.wrap_program + .get_or_init(|| { + // Get the operations. + let builder_span = tracing::debug_span!("build compress program").entered(); + let mut builder = Builder::::default(); + + let shrink_shape: ProofShape = ShrinkAir::::shrink_shape().into(); + let input_shape = SP1CompressShape::from(vec![shrink_shape]); + let shape = SP1CompressWithVkeyShape { + compress_shape: input_shape, + merkle_tree_height: self.vk_merkle_tree.height, + }; + let dummy_input = + SP1CompressWithVKeyWitnessValues::dummy(self.shrink_prover.machine(), &shape); + + let input = dummy_input.read(&mut builder); + + // Attest that the merkle tree root is correct. + let root = input.merkle_var.root; + for (val, expected) in root.iter().zip(self.vk_root.iter()) { + builder.assert_felt_eq(*val, *expected); + } + // Verify the proof. + SP1CompressRootVerifierWithVKey::verify( + &mut builder, + self.shrink_prover.machine(), + input, + self.vk_verification, + PublicValuesOutputDigest::Root, + ); + + let operations = builder.into_operations(); + builder_span.exit(); + + // Compile the program. + let compiler_span = tracing::debug_span!("compile compress program").entered(); + let mut compiler = AsmCompiler::::default(); + let program = Arc::new(compiler.compile(operations)); + compiler_span.exit(); + program + }) + .clone() + } + + pub fn deferred_program( + &self, + input: &SP1DeferredWitnessValues, + ) -> Arc> { + // Compile the program. + + // Get the operations. + let operations_span = + tracing::debug_span!("get operations for the deferred program").entered(); + let mut builder = Builder::::default(); + let input_read_span = tracing::debug_span!("Read input values").entered(); + let input = input.read(&mut builder); + input_read_span.exit(); + let verify_span = tracing::debug_span!("Verify deferred program").entered(); + + // Verify the proof. + SP1DeferredVerifier::verify( + &mut builder, + self.compress_prover.machine(), + input, + self.vk_verification, + ); + verify_span.exit(); + let operations = builder.into_operations(); + operations_span.exit(); + + // Compile the program. + tracing::debug_span!("compile compress program").in_scope(|| { + let mut compiler = AsmCompiler::::default(); + Arc::new(compiler.compile(operations)) + }) + } + + pub fn get_recursion_core_inputs( + &self, + vk: &StarkVerifyingKey, + leaf_challenger: &Challenger, shard_proofs: &[ShardProof], batch_size: usize, is_complete: bool, - ) -> Vec>> { + ) -> Vec> { let mut core_inputs = Vec::new(); let mut reconstruct_challenger = self.core_prover.config().challenger(); vk.observe_into(&mut reconstruct_challenger); // Prepare the inputs for the recursion programs. - for batch in shard_proofs.chunks(batch_size) { + for (batch_idx, batch) in shard_proofs.chunks(batch_size).enumerate() { let proofs = batch.to_vec(); - core_inputs.push(SP1RecursionMemoryLayout { - vk, - machine: self.core_prover.machine(), + core_inputs.push(SP1RecursionWitnessValues { + vk: vk.clone(), shard_proofs: proofs.clone(), - leaf_challenger, + leaf_challenger: leaf_challenger.clone(), initial_reconstruct_challenger: reconstruct_challenger.clone(), is_complete, + is_first_shard: batch_idx == 0, + vk_root: self.vk_root, }); + assert_eq!(reconstruct_challenger.input_buffer.len(), 0); + assert_eq!(reconstruct_challenger.sponge_state.len(), 16); + assert_eq!(reconstruct_challenger.output_buffer.len(), 16); for proof in batch.iter() { - reconstruct_challenger.observe(proof.commitment.main_commit); + reconstruct_challenger.observe(proof.commitment.global_main_commit); reconstruct_challenger .observe_slice(&proof.public_values[0..self.core_prover.num_pv_elts()]); } @@ -297,32 +546,35 @@ impl SP1Prover { vk: &'a StarkVerifyingKey, leaf_challenger: &'a Challenger, last_proof_pv: &PublicValues, BabyBear>, - deferred_proofs: &[ShardProof], + deferred_proofs: &[SP1ReduceProof], batch_size: usize, - ) -> Vec>> { + ) -> Vec> { // Prepare the inputs for the deferred proofs recursive verification. let mut deferred_digest = [Val::::zero(); DIGEST_SIZE]; let mut deferred_inputs = Vec::new(); for batch in deferred_proofs.chunks(batch_size) { - let proofs = batch.to_vec(); + let vks_and_proofs = + batch.iter().cloned().map(|proof| (proof.vk, proof.proof)).collect::>(); - deferred_inputs.push(SP1DeferredMemoryLayout { - compress_vk: self.compress_vk(), - machine: self.compress_prover.machine(), - proofs, - start_reconstruct_deferred_digest: deferred_digest.to_vec(), + let input = SP1CompressWitnessValues { vks_and_proofs, is_complete: true }; + let input = self.make_merkle_proofs(input); + let SP1CompressWithVKeyWitnessValues { compress_val, merkle_val } = input; + + deferred_inputs.push(SP1DeferredWitnessValues { + vks_and_proofs: compress_val.vks_and_proofs, + vk_merkle_data: merkle_val, + start_reconstruct_deferred_digest: deferred_digest, is_complete: false, - sp1_vk: vk, - sp1_machine: self.core_prover.machine(), + sp1_vk_digest: vk.hash_babybear(), end_pc: Val::::zero(), end_shard: last_proof_pv.shard + BabyBear::one(), end_execution_shard: last_proof_pv.execution_shard, init_addr_bits: last_proof_pv.last_init_addr_bits, finalize_addr_bits: last_proof_pv.last_finalize_addr_bits, leaf_challenger: leaf_challenger.clone(), - committed_value_digest: last_proof_pv.committed_value_digest.to_vec(), - deferred_proofs_digest: last_proof_pv.deferred_proofs_digest.to_vec(), + committed_value_digest: last_proof_pv.committed_value_digest, + deferred_proofs_digest: last_proof_pv.deferred_proofs_digest, }); deferred_digest = Self::hash_deferred_proofs(deferred_digest, batch); @@ -337,9 +589,9 @@ impl SP1Prover { vk: &'a SP1VerifyingKey, leaf_challenger: &'a Challenger, shard_proofs: &[ShardProof], - deferred_proofs: &[ShardProof], + deferred_proofs: &[SP1ReduceProof], batch_size: usize, - ) -> Vec> { + ) -> Vec { let is_complete = shard_proofs.len() == 1 && deferred_proofs.is_empty(); let core_inputs = self.get_recursion_core_inputs( &vk.vk, @@ -358,8 +610,8 @@ impl SP1Prover { ); let mut inputs = Vec::new(); - inputs.extend(core_inputs.into_iter().map(SP1CompressMemoryLayouts::Core)); - inputs.extend(deferred_inputs.into_iter().map(SP1CompressMemoryLayouts::Deferred)); + inputs.extend(core_inputs.into_iter().map(SP1CircuitWitness::Core)); + inputs.extend(deferred_inputs.into_iter().map(SP1CircuitWitness::Deferred)); inputs } @@ -369,18 +621,21 @@ impl SP1Prover { &self, vk: &SP1VerifyingKey, proof: SP1CoreProof, - deferred_proofs: Vec>, + deferred_proofs: Vec>, opts: SP1ProverOpts, ) -> Result, SP1RecursionProverError> { - // Set the batch size for the reduction tree. - let batch_size = 2; + // The batch size for reducing two layers of recursion. + let batch_size = REDUCE_BATCH_SIZE; + // The batch size for reducing the first layer of recursion. + let first_layer_batch_size = 1; + let shard_proofs = &proof.proof.0; // Get the leaf challenger. let mut leaf_challenger = self.core_prover.config().challenger(); vk.vk.observe_into(&mut leaf_challenger); shard_proofs.iter().for_each(|proof| { - leaf_challenger.observe(proof.commitment.main_commit); + leaf_challenger.observe(proof.commitment.global_main_commit); leaf_challenger.observe_slice(&proof.public_values[0..self.core_prover.num_pv_elts()]); }); @@ -390,26 +645,26 @@ impl SP1Prover { &leaf_challenger, shard_proofs, &deferred_proofs, - batch_size, + first_layer_batch_size, ); // Calculate the expected height of the tree. - let mut expected_height = 1; + let mut expected_height = if first_layer_inputs.len() == 1 { 0 } else { 1 }; let num_first_layer_inputs = first_layer_inputs.len(); let mut num_layer_inputs = num_first_layer_inputs; while num_layer_inputs > batch_size { - num_layer_inputs = (num_layer_inputs + 1) / 2; + num_layer_inputs = num_layer_inputs.div_ceil(2); expected_height += 1; } // Generate the proofs. let span = tracing::Span::current().clone(); - let proof = thread::scope(|s| { + let (vk, proof) = thread::scope(|s| { let _span = span.enter(); // Spawn a worker that sends the first layer inputs to a bounded channel. let input_sync = Arc::new(TurnBasedSync::new()); - let (input_tx, input_rx) = sync_channel::<(usize, usize, SP1CompressMemoryLayouts)>( + let (input_tx, input_rx) = sync_channel::<(usize, usize, SP1CircuitWitness)>( opts.recursion_opts.checkpoints_channel_capacity, ); let input_tx = Arc::new(Mutex::new(input_tx)); @@ -431,9 +686,9 @@ impl SP1Prover { sync_channel::<( usize, usize, + Arc>, ExecutionRecord, Vec<(String, RowMajorMatrix)>, - ReduceProgramType, )>(opts.recursion_opts.records_and_traces_channel_capacity); let record_and_trace_tx = Arc::new(Mutex::new(record_and_trace_tx)); let record_and_trace_rx = Arc::new(Mutex::new(record_and_trace_rx)); @@ -449,36 +704,31 @@ impl SP1Prover { let received = { input_rx.lock().unwrap().recv() }; if let Ok((index, height, input)) = received { // Get the program and witness stream. - let (program, witness_stream, program_type) = tracing::debug_span!( - "write witness stream" + let (program, witness_stream) = tracing::debug_span!( + "get program and witness stream" ) .in_scope(|| match input { - SP1CompressMemoryLayouts::Core(input) => { + SP1CircuitWitness::Core(input) => { let mut witness_stream = Vec::new(); - witness_stream.extend(input.write()); - ( - self.recursion_program(), - witness_stream, - ReduceProgramType::Core, - ) + Witnessable::::write(&input, &mut witness_stream); + (self.recursion_program(&input), witness_stream) } - SP1CompressMemoryLayouts::Deferred(input) => { + SP1CircuitWitness::Deferred(input) => { let mut witness_stream = Vec::new(); - witness_stream.extend(input.write()); - ( - self.deferred_program(), - witness_stream, - ReduceProgramType::Deferred, - ) + Witnessable::::write(&input, &mut witness_stream); + (self.deferred_program(&input), witness_stream) } - SP1CompressMemoryLayouts::Compress(input) => { + SP1CircuitWitness::Compress(input) => { let mut witness_stream = Vec::new(); - witness_stream.extend(input.write()); - ( - self.compress_program(), - witness_stream, - ReduceProgramType::Reduce, - ) + + let input_with_merkle = self.make_merkle_proofs(input); + + Witnessable::::write( + &input_with_merkle, + &mut witness_stream, + ); + + (self.compress_program(&input_with_merkle), witness_stream) } }); @@ -486,7 +736,7 @@ impl SP1Prover { let record = tracing::debug_span!("execute runtime").in_scope(|| { let mut runtime = RecursionRuntime::, Challenge, _>::new( - program, + program.clone(), self.compress_prover.config().perm.clone(), ); runtime.witness_stream = witness_stream.into(); @@ -502,15 +752,19 @@ impl SP1Prover { // Generate the dependencies. let mut records = vec![record]; tracing::debug_span!("generate dependencies").in_scope(|| { - self.compress_prover - .machine() - .generate_dependencies(&mut records, &opts.recursion_opts) + self.compress_prover.machine().generate_dependencies( + &mut records, + &opts.recursion_opts, + None, + ) }); // Generate the traces. let record = records.into_iter().next().unwrap(); - let traces = tracing::debug_span!("generate traces") - .in_scope(|| self.compress_prover.generate_traces(&record)); + let traces = tracing::debug_span!("generate traces").in_scope(|| { + self.compress_prover + .generate_traces(&record, InteractionScope::Local) + }); // Wait for our turn to update the state. record_and_trace_sync.wait_for_turn(index); @@ -519,7 +773,7 @@ impl SP1Prover { record_and_trace_tx .lock() .unwrap() - .send((index, height, record, traces, program_type)) + .send((index, height, program, record, traces)) .unwrap(); // Advance the turn. @@ -534,7 +788,7 @@ impl SP1Prover { // Spawn workers who generate the compress proofs. let proofs_sync = Arc::new(TurnBasedSync::new()); let (proofs_tx, proofs_rx) = - sync_channel::<(usize, usize, ShardProof, ReduceProgramType)>( + sync_channel::<(usize, usize, StarkVerifyingKey, ShardProof)>( num_first_layer_inputs * 2, ); let proofs_tx = Arc::new(Mutex::new(proofs_tx)); @@ -549,16 +803,11 @@ impl SP1Prover { let _span = span.enter(); loop { let received = { record_and_trace_rx.lock().unwrap().recv() }; - if let Ok((index, height, record, traces, program_type)) = received { + if let Ok((index, height, program, record, traces)) = received { tracing::debug_span!("batch").in_scope(|| { - // Get the proving key. - let pk = if program_type == ReduceProgramType::Core { - self.recursion_pk() - } else if program_type == ReduceProgramType::Deferred { - self.deferred_pk() - } else { - self.compress_pk() - }; + // Get the keys. + let (pk, vk) = tracing::debug_span!("Setup compress program") + .in_scope(|| self.compress_prover.setup(&program)); // Observe the proving key. let mut challenger = self.compress_prover.config().challenger(); @@ -566,32 +815,58 @@ impl SP1Prover { pk.observe_into(&mut challenger); }); + #[cfg(feature = "debug")] + self.compress_prover.debug_constraints( + &pk.to_host(), + vec![record.clone()], + &mut challenger.clone(), + ); + // Commit to the record and traces. - let data = tracing::debug_span!("commit") - .in_scope(|| self.compress_prover.commit(record, traces)); + let local_data = tracing::debug_span!("commit") + .in_scope(|| self.compress_prover.commit(&record, traces)); // Observe the commitment. - tracing::debug_span!("observe commitment").in_scope(|| { - challenger.observe(data.main_commit); + tracing::debug_span!("observe public values").in_scope(|| { challenger.observe_slice( - &data.public_values[0..self.compress_prover.num_pv_elts()], + &local_data.public_values[0..self.compress_prover.num_pv_elts()], ); }); // Generate the proof. let proof = tracing::debug_span!("open").in_scope(|| { - self.compress_prover.open(pk, data, &mut challenger).unwrap() + self.compress_prover + .open( + &pk, + None, + local_data, + &mut challenger, + &[ + ::Challenge::zero(), + ::Challenge::zero(), + ], + ) + .unwrap() }); + // Verify the proof. + #[cfg(feature = "debug")] + self.compress_prover + .machine() + .verify( + &vk, + &sp1_stark::MachineProof { + shard_proofs: vec![proof.clone()], + }, + &mut self.compress_prover.config().challenger(), + ) + .unwrap(); + // Wait for our turn to update the state. prover_sync.wait_for_turn(index); // Send the proof. - proofs_tx - .lock() - .unwrap() - .send((index, height, proof, program_type)) - .unwrap(); + proofs_tx.lock().unwrap().send((index, height, vk, proof)).unwrap(); // Advance the turn. prover_sync.advance_turn(); @@ -615,13 +890,13 @@ impl SP1Prover { let mut batch: Vec<( usize, usize, - ShardProof, - ReduceProgramType, + StarkVerifyingKey, + ShardProof, )> = Vec::new(); loop { let received = { proofs_rx.lock().unwrap().recv() }; - if let Ok((index, height, proof, program_type)) = received { - batch.push((index, height, proof, program_type)); + if let Ok((index, height, vk, proof)) = received { + batch.push((index, height, vk, proof)); // Compute whether we've reached the root of the tree. let is_complete = height == expected_height; @@ -642,23 +917,23 @@ impl SP1Prover { // first input, otherwise we include all inputs. let inputs = if is_last { vec![batch[0].clone()] } else { batch.clone() }; - let shard_proofs = - inputs.iter().map(|(_, _, proof, _)| proof.clone()).collect(); - let kinds = inputs - .iter() - .map(|(_, _, _, program_type)| *program_type) - .collect(); - let input = - SP1CompressMemoryLayouts::Compress(SP1CompressMemoryLayout { - compress_vk: self.compress_vk(), - recursive_machine: self.compress_prover.machine(), - shard_proofs, - kinds, - is_complete, - }); + + let next_input_index = inputs[0].1 + 1; + let vks_and_proofs = inputs + .into_iter() + .map(|(_, _, vk, proof)| (vk, proof)) + .collect::>(); + let input = SP1CircuitWitness::Compress(SP1CompressWitnessValues { + vks_and_proofs, + is_complete, + }); input_sync.wait_for_turn(count); - input_tx.lock().unwrap().send((count, inputs[0].1 + 1, input)).unwrap(); + input_tx + .lock() + .unwrap() + .send((count, next_input_index, input)) + .unwrap(); input_sync.advance_turn(); count += 1; @@ -690,48 +965,11 @@ impl SP1Prover { } handle.join().unwrap(); - let output = proofs_rx.lock().unwrap().recv().unwrap(); - output.2 + let (_, _, vk, proof) = proofs_rx.lock().unwrap().recv().unwrap(); + (vk, proof) }); - Ok(SP1ReduceProof { proof }) - } - - /// Generate a proof with the compress machine. - pub fn compress_machine_proof( - &self, - input: impl Hintable, - program: &RecursionProgram, - pk: &StarkProvingKey, - opts: SP1ProverOpts, - ) -> Result, SP1RecursionProverError> { - let mut runtime = RecursionRuntime::, Challenge, _>::new( - program, - self.compress_prover.config().perm.clone(), - ); - - let span = tracing::debug_span!("execute runtime"); - let guard = span.enter(); - - let mut witness_stream = Vec::new(); - witness_stream.extend(input.write()); - - runtime.witness_stream = witness_stream.into(); - runtime.run().map_err(|e| SP1RecursionProverError::RuntimeError(e.to_string()))?; - runtime.print_stats(); - - drop(guard); - - let mut recursive_challenger = self.compress_prover.config().challenger(); - let proof = self - .compress_prover - .prove(pk, vec![runtime.record], &mut recursive_challenger, opts.recursion_opts) - .unwrap() - .shard_proofs - .pop() - .unwrap(); - - Ok(proof) + Ok(SP1ReduceProof { vk, proof }) } /// Wrap a reduce proof into a STARK proven over a SNARK-friendly field. @@ -742,41 +980,43 @@ impl SP1Prover { opts: SP1ProverOpts, ) -> Result, SP1RecursionProverError> { // Make the compress proof. - let input = SP1RootMemoryLayout { - machine: self.compress_prover.machine(), - proof: reduced_proof.proof, - is_reduce: true, + let SP1ReduceProof { vk: compressed_vk, proof: compressed_proof } = reduced_proof; + let input = SP1CompressWitnessValues { + vks_and_proofs: vec![(compressed_vk, compressed_proof)], + is_complete: true, }; + let input_with_merkle = self.make_merkle_proofs(input); + + let program = self.shrink_program(&input_with_merkle); + // Run the compress program. let mut runtime = RecursionRuntime::, Challenge, _>::new( - self.shrink_program(), + program.clone(), self.shrink_prover.config().perm.clone(), ); let mut witness_stream = Vec::new(); - witness_stream.extend(input.write()); + Witnessable::::write(&input_with_merkle, &mut witness_stream); runtime.witness_stream = witness_stream.into(); runtime.run().map_err(|e| SP1RecursionProverError::RuntimeError(e.to_string()))?; runtime.print_stats(); - tracing::debug!("Compress program executed successfully"); + tracing::debug!("Shrink program executed successfully"); + + let (shrink_pk, shrink_vk) = + tracing::debug_span!("setup shrink").in_scope(|| self.shrink_prover.setup(&program)); // Prove the compress program. let mut compress_challenger = self.shrink_prover.config().challenger(); let mut compress_proof = self .shrink_prover - .prove( - self.shrink_pk(), - vec![runtime.record], - &mut compress_challenger, - opts.recursion_opts, - ) + .prove(&shrink_pk, vec![runtime.record], &mut compress_challenger, opts.recursion_opts) .unwrap(); - Ok(SP1ReduceProof { proof: compress_proof.shard_proofs.pop().unwrap() }) + Ok(SP1ReduceProof { vk: shrink_vk, proof: compress_proof.shard_proofs.pop().unwrap() }) } /// Wrap a reduce proof into a STARK proven over a SNARK-friendly field. @@ -786,50 +1026,53 @@ impl SP1Prover { compressed_proof: SP1ReduceProof, opts: SP1ProverOpts, ) -> Result, SP1RecursionProverError> { - let input = SP1RootMemoryLayout { - machine: self.shrink_prover.machine(), - proof: compressed_proof.proof, - is_reduce: false, + let SP1ReduceProof { vk: compressed_vk, proof: compressed_proof } = compressed_proof; + let input = SP1CompressWitnessValues { + vks_and_proofs: vec![(compressed_vk, compressed_proof)], + is_complete: true, }; + let input_with_vk = self.make_merkle_proofs(input); + + let program = self.wrap_program(); // Run the compress program. let mut runtime = RecursionRuntime::, Challenge, _>::new( - self.wrap_program(), + program.clone(), self.shrink_prover.config().perm.clone(), ); let mut witness_stream = Vec::new(); - witness_stream.extend(input.write()); + Witnessable::::write(&input_with_vk, &mut witness_stream); runtime.witness_stream = witness_stream.into(); runtime.run().map_err(|e| SP1RecursionProverError::RuntimeError(e.to_string()))?; runtime.print_stats(); - tracing::debug!("Wrap program executed successfully"); + tracing::debug!("wrap program executed successfully"); + + // Setup the wrap program. + let (wrap_pk, wrap_vk) = + tracing::debug_span!("setup wrap").in_scope(|| self.wrap_prover.setup(&program)); + + if self.wrap_vk.set(wrap_vk.clone()).is_ok() { + tracing::debug!("wrap verifier key set"); + } // Prove the wrap program. let mut wrap_challenger = self.wrap_prover.config().challenger(); let time = std::time::Instant::now(); let mut wrap_proof = self .wrap_prover - .prove(self.wrap_pk(), vec![runtime.record], &mut wrap_challenger, opts.recursion_opts) + .prove(&wrap_pk, vec![runtime.record], &mut wrap_challenger, opts.recursion_opts) .unwrap(); let elapsed = time.elapsed(); - tracing::debug!("Wrap proving time: {:?}", elapsed); + tracing::debug!("wrap proving time: {:?}", elapsed); let mut wrap_challenger = self.wrap_prover.config().challenger(); - let result = - self.wrap_prover.machine().verify(self.wrap_vk(), &wrap_proof, &mut wrap_challenger); - match result { - Ok(_) => tracing::info!("Proof verified successfully"), - Err(MachineVerificationError::NonZeroCumulativeSum) => { - tracing::info!("Proof verification failed: NonZeroCumulativeSum") - } - e => panic!("Proof verification failed: {:?}", e), - } - tracing::info!("Wrapping successful"); + self.wrap_prover.machine().verify(&wrap_vk, &wrap_proof, &mut wrap_challenger).unwrap(); + tracing::info!("wrapping successful"); - Ok(SP1ReduceProof { proof: wrap_proof.shard_proofs.pop().unwrap() }) + Ok(SP1ReduceProof { vk: wrap_vk, proof: wrap_proof.shard_proofs.pop().unwrap() }) } /// Wrap the STARK proven over a SNARK-friendly field into a PLONK proof. @@ -839,13 +1082,17 @@ impl SP1Prover { proof: SP1ReduceProof, build_dir: &Path, ) -> PlonkBn254Proof { - let vkey_digest = proof.sp1_vkey_digest_bn254(); - let commited_values_digest = proof.sp1_commited_values_digest_bn254(); + let input = SP1CompressWitnessValues { + vks_and_proofs: vec![(proof.vk.clone(), proof.proof.clone())], + is_complete: true, + }; + let vkey_hash = sp1_vkey_digest_bn254(&proof); + let committed_values_digest = sp1_commited_values_digest_bn254(&proof); let mut witness = Witness::default(); - proof.proof.write(&mut witness); - witness.write_commited_values_digest(commited_values_digest); - witness.write_vkey_hash(vkey_digest); + input.write(&mut witness); + witness.write_commited_values_digest(committed_values_digest); + witness.write_vkey_hash(vkey_hash); let prover = PlonkBn254Prover::new(); let proof = prover.prove(witness, build_dir.to_path_buf()); @@ -853,8 +1100,8 @@ impl SP1Prover { // Verify the proof. prover.verify( &proof, - &vkey_digest.as_canonical_biguint(), - &commited_values_digest.as_canonical_biguint(), + &vkey_hash.as_canonical_biguint(), + &committed_values_digest.as_canonical_biguint(), build_dir, ); @@ -868,13 +1115,17 @@ impl SP1Prover { proof: SP1ReduceProof, build_dir: &Path, ) -> Groth16Bn254Proof { - let vkey_digest = proof.sp1_vkey_digest_bn254(); - let commited_values_digest = proof.sp1_commited_values_digest_bn254(); + let input = SP1CompressWitnessValues { + vks_and_proofs: vec![(proof.vk.clone(), proof.proof.clone())], + is_complete: true, + }; + let vkey_hash = sp1_vkey_digest_bn254(&proof); + let committed_values_digest = sp1_commited_values_digest_bn254(&proof); let mut witness = Witness::default(); - proof.proof.write(&mut witness); - witness.write_commited_values_digest(commited_values_digest); - witness.write_vkey_hash(vkey_digest); + input.write(&mut witness); + witness.write_commited_values_digest(committed_values_digest); + witness.write_vkey_hash(vkey_hash); let prover = Groth16Bn254Prover::new(); let proof = prover.prove(witness, build_dir.to_path_buf()); @@ -882,8 +1133,8 @@ impl SP1Prover { // Verify the proof. prover.verify( &proof, - &vkey_digest.as_canonical_biguint(), - &commited_values_digest.as_canonical_biguint(), + &vkey_hash.as_canonical_biguint(), + &committed_values_digest.as_canonical_biguint(), build_dir, ); @@ -893,11 +1144,12 @@ impl SP1Prover { /// Accumulate deferred proofs into a single digest. pub fn hash_deferred_proofs( prev_digest: [Val; DIGEST_SIZE], - deferred_proofs: &[ShardProof], + deferred_proofs: &[SP1ReduceProof], ) -> [Val; 8] { let mut digest = prev_digest; for proof in deferred_proofs.iter() { - let pv: &RecursionPublicValues> = proof.public_values.as_slice().borrow(); + let pv: &RecursionPublicValues> = + proof.proof.public_values.as_slice().borrow(); let committed_values_digest = words_to_bytes(&pv.committed_value_digest); digest = hash_deferred_proof( &digest, @@ -908,6 +1160,50 @@ impl SP1Prover { digest } + pub fn make_merkle_proofs( + &self, + input: SP1CompressWitnessValues, + ) -> SP1CompressWithVKeyWitnessValues { + let num_vks = self.allowed_vk_map.len(); + let (vk_indices, vk_digest_values): (Vec<_>, Vec<_>) = if self.vk_verification { + input + .vks_and_proofs + .iter() + .map(|(vk, _)| { + let vk_digest = vk.hash_babybear(); + let index = self.allowed_vk_map.get(&vk_digest).expect("vk not allowed"); + (index, vk_digest) + }) + .unzip() + } else { + input + .vks_and_proofs + .iter() + .map(|(vk, _)| { + let vk_digest = vk.hash_babybear(); + let index = (vk_digest[0].as_canonical_u32() as usize) % num_vks; + (index, [BabyBear::from_canonical_usize(index); 8]) + }) + .unzip() + }; + + let proofs = vk_indices + .iter() + .map(|index| { + let (_, proof) = MerkleTree::open(&self.vk_merkle_tree, *index); + proof + }) + .collect(); + + let merkle_val = SP1MerkleProofWitnessValues { + root: self.vk_root, + values: vk_digest_values, + vk_merkle_proofs: proofs, + }; + + SP1CompressWithVKeyWitnessValues { compress_val: input, merkle_val } + } + fn check_for_high_cycles(cycles: u64) { if cycles > 100_000_000 { tracing::warn!( @@ -921,21 +1217,26 @@ impl SP1Prover { pub mod tests { use std::{ + collections::BTreeSet, fs::File, io::{Read, Write}, }; use super::*; + use crate::build::try_build_plonk_bn254_artifacts_dev; use anyhow::Result; - use build::{try_build_groth16_bn254_artifacts_dev, try_build_plonk_bn254_artifacts_dev}; + use build::{build_constraints_and_witness, try_build_groth16_bn254_artifacts_dev}; use p3_field::PrimeField32; - use sp1_core_machine::io::SP1Stdin; + + use shapes::SP1ProofShape; + use sp1_recursion_core::air::RecursionPublicValues; #[cfg(test)] use serial_test::serial; #[cfg(test)] use sp1_core_machine::utils::setup_logger; + use utils::sp1_vkey_digest_babybear; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Test { @@ -943,38 +1244,78 @@ pub mod tests { Compress, Shrink, Wrap, - Plonk, + CircuitTest, + All, } pub fn test_e2e_prover( + prover: &SP1Prover, + elf: &[u8], + stdin: SP1Stdin, + opts: SP1ProverOpts, + test_kind: Test, + ) -> Result<()> { + run_e2e_prover_with_options(prover, elf, stdin, opts, test_kind, true) + } + + pub fn bench_e2e_prover( + prover: &SP1Prover, elf: &[u8], + stdin: SP1Stdin, opts: SP1ProverOpts, test_kind: Test, + ) -> Result<()> { + run_e2e_prover_with_options(prover, elf, stdin, opts, test_kind, false) + } + + pub fn run_e2e_prover_with_options( + prover: &SP1Prover, + elf: &[u8], + stdin: SP1Stdin, + opts: SP1ProverOpts, + test_kind: Test, + verify: bool, ) -> Result<()> { tracing::info!("initializing prover"); - let prover: SP1Prover = SP1Prover::::new(); let context = SP1Context::default(); tracing::info!("setup elf"); let (pk, vk) = prover.setup(elf); tracing::info!("prove core"); - let stdin = SP1Stdin::new(); let core_proof = prover.prove_core(&pk, &stdin, opts, context)?; let public_values = core_proof.public_values.clone(); - tracing::info!("verify core"); - prover.verify(&core_proof.proof, &vk)?; + if env::var("COLLECT_SHAPES").is_ok() { + let mut shapes = BTreeSet::new(); + for proof in core_proof.proof.0.iter() { + let shape = SP1ProofShape::Recursion(proof.shape()); + tracing::info!("shape: {:?}", shape); + shapes.insert(shape); + } + + let mut file = File::create("../shapes.bin").unwrap(); + bincode::serialize_into(&mut file, &shapes).unwrap(); + } + + if verify { + tracing::info!("verify core"); + prover.verify(&core_proof.proof, &vk)?; + } if test_kind == Test::Core { return Ok(()); } tracing::info!("compress"); + let compress_span = tracing::debug_span!("compress").entered(); let compressed_proof = prover.compress(&vk, core_proof, vec![], opts)?; + compress_span.exit(); - tracing::info!("verify compressed"); - prover.verify_compressed(&compressed_proof, &vk)?; + if verify { + tracing::info!("verify compressed"); + prover.verify_compressed(&compressed_proof, &vk)?; + } if test_kind == Test::Compress { return Ok(()); @@ -983,8 +1324,10 @@ pub mod tests { tracing::info!("shrink"); let shrink_proof = prover.shrink(compressed_proof, opts)?; - tracing::info!("verify shrink"); - prover.verify_shrink(&shrink_proof, &vk)?; + if verify { + tracing::info!("verify shrink"); + prover.verify_shrink(&shrink_proof, &vk)?; + } if test_kind == Test::Shrink { return Ok(()); @@ -992,37 +1335,51 @@ pub mod tests { tracing::info!("wrap bn254"); let wrapped_bn254_proof = prover.wrap_bn254(shrink_proof, opts)?; - let bytes = bincode::serialize(&wrapped_bn254_proof)?; + let bytes = bincode::serialize(&wrapped_bn254_proof).unwrap(); // Save the proof. - let mut file = File::create("proof-with-pis.bin")?; - file.write_all(bytes.as_slice())?; + let mut file = File::create("proof-with-pis.bin").unwrap(); + file.write_all(bytes.as_slice()).unwrap(); // Load the proof. - let mut file = File::open("proof-with-pis.bin")?; + let mut file = File::open("proof-with-pis.bin").unwrap(); let mut bytes = Vec::new(); - file.read_to_end(&mut bytes)?; + file.read_to_end(&mut bytes).unwrap(); - let wrapped_bn254_proof = bincode::deserialize(&bytes)?; + let wrapped_bn254_proof = bincode::deserialize(&bytes).unwrap(); - tracing::info!("verify wrap bn254"); - prover.verify_wrap_bn254(&wrapped_bn254_proof, &vk)?; + if verify { + tracing::info!("verify wrap bn254"); + prover.verify_wrap_bn254(&wrapped_bn254_proof, &vk).unwrap(); + } if test_kind == Test::Wrap { return Ok(()); } tracing::info!("checking vkey hash babybear"); - let vk_digest_babybear = wrapped_bn254_proof.sp1_vkey_digest_babybear(); + let vk_digest_babybear = sp1_vkey_digest_babybear(&wrapped_bn254_proof); assert_eq!(vk_digest_babybear, vk.hash_babybear()); tracing::info!("checking vkey hash bn254"); - let vk_digest_bn254 = wrapped_bn254_proof.sp1_vkey_digest_bn254(); + let vk_digest_bn254 = sp1_vkey_digest_bn254(&wrapped_bn254_proof); assert_eq!(vk_digest_bn254, vk.hash_bn254()); + tracing::info!("Test the outer Plonk circuit"); + let (constraints, witness) = + build_constraints_and_witness(&wrapped_bn254_proof.vk, &wrapped_bn254_proof.proof); + PlonkBn254Prover::test(constraints, witness); + tracing::info!("Circuit test succedded"); + + if test_kind == Test::CircuitTest { + return Ok(()); + } + tracing::info!("generate plonk bn254 proof"); - let artifacts_dir = - try_build_plonk_bn254_artifacts_dev(prover.wrap_vk(), &wrapped_bn254_proof.proof); + let artifacts_dir = try_build_plonk_bn254_artifacts_dev( + &wrapped_bn254_proof.vk, + &wrapped_bn254_proof.proof, + ); let plonk_bn254_proof = prover.wrap_plonk_bn254(wrapped_bn254_proof.clone(), &artifacts_dir); println!("{:?}", plonk_bn254_proof); @@ -1030,17 +1387,28 @@ pub mod tests { prover.verify_plonk_bn254(&plonk_bn254_proof, &vk, &public_values, &artifacts_dir)?; tracing::info!("generate groth16 bn254 proof"); - let artifacts_dir = - try_build_groth16_bn254_artifacts_dev(prover.wrap_vk(), &wrapped_bn254_proof.proof); + let artifacts_dir = try_build_groth16_bn254_artifacts_dev( + &wrapped_bn254_proof.vk, + &wrapped_bn254_proof.proof, + ); let groth16_bn254_proof = prover.wrap_groth16_bn254(wrapped_bn254_proof, &artifacts_dir); println!("{:?}", groth16_bn254_proof); - prover.verify_groth16_bn254(&groth16_bn254_proof, &vk, &public_values, &artifacts_dir)?; + if verify { + prover.verify_groth16_bn254( + &groth16_bn254_proof, + &vk, + &public_values, + &artifacts_dir, + )?; + } Ok(()) } - pub fn test_e2e_with_deferred_proofs_prover() -> Result<()> { + pub fn test_e2e_with_deferred_proofs_prover( + opts: SP1ProverOpts, + ) -> Result<()> { // Test program which proves the Keccak-256 hash of various inputs. let keccak_elf = include_bytes!("../../../tests/keccak256/elf/riscv32im-succinct-zkvm-elf"); @@ -1049,8 +1417,7 @@ pub mod tests { include_bytes!("../../../tests/verify-proof/elf/riscv32im-succinct-zkvm-elf"); tracing::info!("initializing prover"); - let prover: SP1Prover = SP1Prover::new(); - let opts = SP1ProverOpts::default(); + let prover = SP1Prover::::new(); tracing::info!("setup keccak elf"); let (keccak_pk, keccak_vk) = prover.setup(keccak_elf); @@ -1094,19 +1461,20 @@ pub mod tests { .unwrap(); stdin.write(&vkey_digest); stdin.write(&vec![pv_1.clone(), pv_2.clone(), pv_2.clone()]); - stdin.write_proof(deferred_reduce_1.proof.clone(), keccak_vk.vk.clone()); - stdin.write_proof(deferred_reduce_2.proof.clone(), keccak_vk.vk.clone()); - stdin.write_proof(deferred_reduce_2.proof.clone(), keccak_vk.vk.clone()); + stdin.write_proof(deferred_reduce_1.clone(), keccak_vk.vk.clone()); + stdin.write_proof(deferred_reduce_2.clone(), keccak_vk.vk.clone()); + stdin.write_proof(deferred_reduce_2.clone(), keccak_vk.vk.clone()); tracing::info!("proving verify program (core)"); let verify_proof = prover.prove_core(&verify_pk, &stdin, opts, Default::default())?; + // let public_values = verify_proof.public_values.clone(); // Generate recursive proof of verify program tracing::info!("compress verify program"); let verify_reduce = prover.compress( &verify_vk, verify_proof, - vec![deferred_reduce_1.proof, deferred_reduce_2.proof.clone(), deferred_reduce_2.proof], + vec![deferred_reduce_1, deferred_reduce_2.clone(), deferred_reduce_2], opts, )?; let reduce_pv: &RecursionPublicValues<_> = @@ -1117,6 +1485,18 @@ pub mod tests { tracing::info!("verify verify program"); prover.verify_compressed(&verify_reduce, &verify_vk)?; + let shrink_proof = prover.shrink(verify_reduce, opts)?; + + tracing::info!("verify shrink"); + prover.verify_shrink(&shrink_proof, &verify_vk)?; + + tracing::info!("wrap bn254"); + let wrapped_bn254_proof = prover.wrap_bn254(shrink_proof, opts)?; + + tracing::info!("verify wrap bn254"); + println!("verify wrap bn254 {:#?}", wrapped_bn254_proof.vk.commit); + prover.verify_wrap_bn254(&wrapped_bn254_proof, &verify_vk).unwrap(); + Ok(()) } @@ -1135,7 +1515,14 @@ pub mod tests { // TODO(mattstam): We should Test::Plonk here, but this uses the existing // docker image which has a different API than the current. So we need to wait until the // next release (v1.2.0+), and then switch it back. - test_e2e_prover::(elf, opts, Test::Wrap) + let prover = SP1Prover::::new(); + test_e2e_prover::( + &prover, + elf, + SP1Stdin::default(), + opts, + Test::All, + ) } /// Tests an end-to-end workflow of proving a program across the entire proof generation @@ -1144,6 +1531,6 @@ pub mod tests { #[serial] fn test_e2e_with_deferred_proofs() -> Result<()> { setup_logger(); - test_e2e_with_deferred_proofs_prover::() + test_e2e_with_deferred_proofs_prover::(SP1ProverOpts::default()) } } diff --git a/crates/prover/src/shapes.rs b/crates/prover/src/shapes.rs new file mode 100644 index 0000000000..b7adddc0e5 --- /dev/null +++ b/crates/prover/src/shapes.rs @@ -0,0 +1,309 @@ +use std::{ + collections::{BTreeMap, BTreeSet, HashSet}, + fs::File, + panic::{catch_unwind, AssertUnwindSafe}, + path::PathBuf, + sync::{Arc, Mutex}, +}; + +use thiserror::Error; + +use p3_baby_bear::BabyBear; +use p3_field::AbstractField; +use serde::{Deserialize, Serialize}; +use sp1_core_machine::riscv::CoreShapeConfig; +use sp1_recursion_circuit::machine::{ + SP1CompressWithVKeyWitnessValues, SP1CompressWithVkeyShape, SP1DeferredShape, + SP1DeferredWitnessValues, SP1RecursionShape, SP1RecursionWitnessValues, +}; +use sp1_recursion_core::{shape::RecursionShapeConfig, RecursionProgram}; +use sp1_stark::{MachineProver, ProofShape, DIGEST_SIZE}; + +use crate::{components::SP1ProverComponents, CompressAir, HashableKey, SP1Prover}; + +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +pub enum SP1ProofShape { + Recursion(ProofShape), + Compress(Vec), + Deferred(ProofShape), + Shrink(ProofShape), +} + +#[derive(Debug, Clone)] +pub enum SP1CompressProgramShape { + Recursion(SP1RecursionShape), + Compress(SP1CompressWithVkeyShape), + Deferred(SP1DeferredShape), + Shrink(SP1CompressWithVkeyShape), +} + +#[derive(Debug, Error)] +pub enum VkBuildError { + #[error("IO error: {0}")] + IO(#[from] std::io::Error), + #[error("Serialization error: {0}")] + Bincode(#[from] bincode::Error), +} + +pub fn build_vk_map( + reduce_batch_size: usize, + dummy: bool, + num_compiler_workers: usize, + num_setup_workers: usize, + indices: Option>, +) -> (BTreeSet<[BabyBear; DIGEST_SIZE]>, Vec, usize) { + let mut prover = SP1Prover::::new(); + prover.vk_verification = !dummy; + let core_shape_config = prover.core_shape_config.as_ref().expect("core shape config not found"); + let recursion_shape_config = + prover.recursion_shape_config.as_ref().expect("recursion shape config not found"); + + tracing::info!("building compress vk map"); + let (vk_set, panic_indices, height) = if dummy { + tracing::warn!("Making a dummy vk map"); + let dummy_set = SP1ProofShape::dummy_vk_map( + core_shape_config, + recursion_shape_config, + reduce_batch_size, + ) + .into_keys() + .collect::>(); + let height = dummy_set.len().next_power_of_two().ilog2() as usize; + (dummy_set, vec![], height) + } else { + let (vk_tx, vk_rx) = std::sync::mpsc::channel(); + let (shape_tx, shape_rx) = + std::sync::mpsc::sync_channel::<(usize, SP1CompressProgramShape)>(num_compiler_workers); + let (program_tx, program_rx) = std::sync::mpsc::sync_channel(num_setup_workers); + let (panic_tx, panic_rx) = std::sync::mpsc::channel(); + + let shape_rx = Mutex::new(shape_rx); + let program_rx = Mutex::new(program_rx); + + let indices_set = indices.map(|indices| indices.into_iter().collect::>()); + let all_shapes = + SP1ProofShape::generate(core_shape_config, recursion_shape_config, reduce_batch_size) + .collect::>(); + let num_shapes = all_shapes.len(); + tracing::info!("number of shapes: {}", num_shapes); + + let height = num_shapes.next_power_of_two().ilog2() as usize; + let chunk_size = indices_set.as_ref().map(|indices| indices.len()).unwrap_or(num_shapes); + + std::thread::scope(|s| { + // Initialize compiler workers. + for _ in 0..num_compiler_workers { + let program_tx = program_tx.clone(); + let shape_rx = &shape_rx; + let prover = &prover; + let panic_tx = panic_tx.clone(); + s.spawn(move || { + while let Ok((i, shape)) = shape_rx.lock().unwrap().recv() { + println!("shape {} is {:?}", i, shape); + let program = catch_unwind(AssertUnwindSafe(|| { + prover.program_from_shape(shape.clone()) + })); + let is_shrink = matches!(shape, SP1CompressProgramShape::Shrink(_)); + match program { + Ok(program) => program_tx.send((i, program, is_shrink)).unwrap(), + Err(e) => { + tracing::warn!( + "Program generation failed for shape {} {:?}, with error: {:?}", + i, + shape, + e + ); + panic_tx.send(i).unwrap(); + } + } + } + }); + } + + // Initialize setup workers. + for _ in 0..num_setup_workers { + let vk_tx = vk_tx.clone(); + let program_rx = &program_rx; + let prover = &prover; + s.spawn(move || { + let mut done = 0; + while let Ok((i, program, is_shrink)) = program_rx.lock().unwrap().recv() { + let vk = tracing::debug_span!("setup for program {}", i).in_scope(|| { + if is_shrink { + prover.shrink_prover.setup(&program).1 + } else { + prover.compress_prover.setup(&program).1 + } + }); + done += 1; + + let vk_digest = vk.hash_babybear(); + tracing::info!( + "program {} = {:?}, {}% done", + i, + vk_digest, + done * 100 / chunk_size + ); + vk_tx.send(vk_digest).unwrap(); + } + }); + } + + // Generate shapes and send them to the compiler workers. + all_shapes + .into_iter() + .enumerate() + .filter(|(i, _)| indices_set.as_ref().map(|set| set.contains(i)).unwrap_or(true)) + .map(|(i, shape)| (i, SP1CompressProgramShape::from_proof_shape(shape, height))) + .for_each(|(i, program_shape)| { + shape_tx.send((i, program_shape)).unwrap(); + }); + + drop(shape_tx); + drop(program_tx); + drop(vk_tx); + drop(panic_tx); + + let vk_set = vk_rx.iter().collect::>(); + + let panic_indices = panic_rx.iter().collect::>(); + + (vk_set, panic_indices, height) + }) + }; + tracing::info!("compress vks generated, number of keys: {}", vk_set.len()); + (vk_set, panic_indices, height) +} + +pub fn build_vk_map_to_file( + build_dir: PathBuf, + reduce_batch_size: usize, + dummy: bool, + num_compiler_workers: usize, + num_setup_workers: usize, + range_start: Option, + range_end: Option, +) -> Result<(), VkBuildError> { + std::fs::create_dir_all(&build_dir)?; + + tracing::info!("Building vk set"); + + let (vk_set, _, _) = build_vk_map::( + reduce_batch_size, + dummy, + num_compiler_workers, + num_setup_workers, + range_start.and_then(|start| range_end.map(|end| (start..end).collect())), + ); + + let vk_map = vk_set.into_iter().enumerate().map(|(i, vk)| (vk, i)).collect::>(); + + tracing::info!("Save the vk set to file"); + let mut file = if dummy { + File::create(build_dir.join("dummy_vk_map.bin"))? + } else { + File::create(build_dir.join("vk_map.bin"))? + }; + Ok(bincode::serialize_into(&mut file, &vk_map)?) +} + +impl SP1ProofShape { + pub fn generate<'a>( + core_shape_config: &'a CoreShapeConfig, + recursion_shape_config: &'a RecursionShapeConfig>, + reduce_batch_size: usize, + ) -> impl Iterator + 'a { + core_shape_config + .generate_all_allowed_shapes() + .map(Self::Recursion) + .chain((1..=reduce_batch_size).flat_map(|batch_size| { + recursion_shape_config.get_all_shape_combinations(batch_size).map(Self::Compress) + })) + .chain( + recursion_shape_config + .get_all_shape_combinations(1) + .map(|mut x| Self::Deferred(x.pop().unwrap())), + ) + .chain( + recursion_shape_config + .get_all_shape_combinations(1) + .map(|mut x| Self::Shrink(x.pop().unwrap())), + ) + } + + pub fn dummy_vk_map<'a>( + core_shape_config: &'a CoreShapeConfig, + recursion_shape_config: &'a RecursionShapeConfig>, + reduce_batch_size: usize, + ) -> BTreeMap<[BabyBear; DIGEST_SIZE], usize> { + Self::generate(core_shape_config, recursion_shape_config, reduce_batch_size) + .enumerate() + .map(|(i, _)| ([BabyBear::from_canonical_usize(i); DIGEST_SIZE], i)) + .collect() + } +} + +impl SP1CompressProgramShape { + pub fn from_proof_shape(shape: SP1ProofShape, height: usize) -> Self { + match shape { + SP1ProofShape::Recursion(proof_shape) => Self::Recursion(proof_shape.into()), + SP1ProofShape::Deferred(proof_shape) => { + Self::Deferred(SP1DeferredShape::new(vec![proof_shape].into(), height)) + } + SP1ProofShape::Compress(proof_shapes) => Self::Compress(SP1CompressWithVkeyShape { + compress_shape: proof_shapes.into(), + merkle_tree_height: height, + }), + SP1ProofShape::Shrink(proof_shape) => Self::Shrink(SP1CompressWithVkeyShape { + compress_shape: vec![proof_shape].into(), + merkle_tree_height: height, + }), + } + } +} + +impl SP1Prover { + pub fn program_from_shape( + &self, + shape: SP1CompressProgramShape, + ) -> Arc> { + match shape { + SP1CompressProgramShape::Recursion(shape) => { + let input = SP1RecursionWitnessValues::dummy(self.core_prover.machine(), &shape); + self.recursion_program(&input) + } + SP1CompressProgramShape::Deferred(shape) => { + let input = SP1DeferredWitnessValues::dummy(self.compress_prover.machine(), &shape); + self.deferred_program(&input) + } + SP1CompressProgramShape::Compress(shape) => { + let input = + SP1CompressWithVKeyWitnessValues::dummy(self.compress_prover.machine(), &shape); + self.compress_program(&input) + } + SP1CompressProgramShape::Shrink(shape) => { + let input = + SP1CompressWithVKeyWitnessValues::dummy(self.compress_prover.machine(), &shape); + self.shrink_program(&input) + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + #[ignore] + fn test_generate_all_shapes() { + let core_shape_config = CoreShapeConfig::default(); + let recursion_shape_config = RecursionShapeConfig::default(); + let reduce_batch_size = 2; + let all_shapes = + SP1ProofShape::generate(&core_shape_config, &recursion_shape_config, reduce_batch_size) + .collect::>(); + + println!("Number of compress shapes: {}", all_shapes.len()); + } +} diff --git a/crates/prover/src/types.rs b/crates/prover/src/types.rs index a2cd780286..dfebfb6c83 100644 --- a/crates/prover/src/types.rs +++ b/crates/prover/src/types.rs @@ -1,4 +1,4 @@ -use std::{borrow::Borrow, fs::File, path::Path}; +use std::{fs::File, path::Path}; use anyhow::Result; use p3_baby_bear::BabyBear; @@ -6,19 +6,21 @@ use p3_bn254_fr::Bn254Fr; use p3_commit::{Pcs, TwoAdicMultiplicativeCoset}; use p3_field::{AbstractField, PrimeField, PrimeField32, TwoAdicField}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; -use sp1_core_machine::{io::SP1Stdin, riscv::RiscvAir}; +use sp1_core_machine::{io::SP1Stdin, reduce::SP1ReduceProof}; use sp1_primitives::{io::SP1PublicValues, poseidon2_hash}; -use sp1_recursion_core::{air::RecursionPublicValues, stark::config::BabyBearPoseidon2Outer}; -use sp1_recursion_gnark_ffi::proof::{Groth16Bn254Proof, PlonkBn254Proof}; -use sp1_recursion_program::machine::{ - SP1CompressMemoryLayout, SP1DeferredMemoryLayout, SP1RecursionMemoryLayout, + +use sp1_recursion_circuit::machine::{ + SP1CompressWitnessValues, SP1DeferredWitnessValues, SP1RecursionWitnessValues, }; + +use sp1_recursion_gnark_ffi::proof::{Groth16Bn254Proof, PlonkBn254Proof}; + use sp1_stark::{ShardProof, StarkGenericConfig, StarkProvingKey, StarkVerifyingKey, DIGEST_SIZE}; use thiserror::Error; use crate::{ - utils::{babybear_bytes_to_bn254, babybears_to_bn254, words_to_bytes_be}, - words_to_bytes, CompressAir, CoreSC, InnerSC, + utils::{babybears_to_bn254, words_to_bytes_be}, + CoreSC, InnerSC, }; /// The information necessary to generate a proof for a given RISC-V program. @@ -196,34 +198,6 @@ impl ProofSystem { } } -/// An intermediate proof which proves the execution over a range of shards. -#[derive(Serialize, Deserialize, Clone)] -#[serde(bound(serialize = "ShardProof: Serialize"))] -#[serde(bound(deserialize = "ShardProof: Deserialize<'de>"))] -pub struct SP1ReduceProof { - pub proof: ShardProof, -} - -impl SP1ReduceProof { - pub fn sp1_vkey_digest_babybear(&self) -> [BabyBear; 8] { - let proof = &self.proof; - let pv: &RecursionPublicValues = proof.public_values.as_slice().borrow(); - pv.sp1_vk_digest - } - - pub fn sp1_vkey_digest_bn254(&self) -> Bn254Fr { - babybears_to_bn254(&self.sp1_vkey_digest_babybear()) - } - - pub fn sp1_commited_values_digest_bn254(&self) -> Bn254Fr { - let proof = &self.proof; - let pv: &RecursionPublicValues = proof.public_values.as_slice().borrow(); - let committed_values_digest_bytes: [BabyBear; 32] = - words_to_bytes(&pv.committed_value_digest).try_into().unwrap(); - babybear_bytes_to_bn254(&committed_values_digest_bytes) - } -} - /// A proof that can be reduced along with other proofs into one proof. #[derive(Serialize, Deserialize, Clone)] pub enum SP1ReduceProofWrapper { @@ -238,8 +212,8 @@ pub enum SP1RecursionProverError { } #[allow(clippy::large_enum_variant)] -pub enum SP1CompressMemoryLayouts<'a> { - Core(SP1RecursionMemoryLayout<'a, InnerSC, RiscvAir>), - Deferred(SP1DeferredMemoryLayout<'a, InnerSC, CompressAir>), - Compress(SP1CompressMemoryLayout<'a, InnerSC, CompressAir>), +pub enum SP1CircuitWitness { + Core(SP1RecursionWitnessValues), + Deferred(SP1DeferredWitnessValues), + Compress(SP1CompressWitnessValues), } diff --git a/crates/prover/src/utils.rs b/crates/prover/src/utils.rs index 9bc0643efd..da9253b722 100644 --- a/crates/prover/src/utils.rs +++ b/crates/prover/src/utils.rs @@ -1,16 +1,93 @@ use std::{ + borrow::Borrow, fs::{self, File}, io::Read, + iter::{Skip, Take}, }; +use itertools::Itertools; use p3_baby_bear::BabyBear; use p3_bn254_fr::Bn254Fr; use p3_field::{AbstractField, PrimeField32}; +use p3_symmetric::CryptographicHasher; use sp1_core_executor::{Executor, Program}; -use sp1_core_machine::io::SP1Stdin; -use sp1_stark::{SP1CoreOpts, Word}; +use sp1_core_machine::{io::SP1Stdin, reduce::SP1ReduceProof}; +use sp1_recursion_circuit::machine::RootPublicValues; +use sp1_recursion_core::{ + air::{RecursionPublicValues, NUM_PV_ELMS_TO_HASH}, + stark::BabyBearPoseidon2Outer, +}; +use sp1_stark::{baby_bear_poseidon2::MyHash as InnerHash, SP1CoreOpts, Word}; + +use crate::{InnerSC, SP1CoreProofData}; + +/// Get the SP1 vkey BabyBear Poseidon2 digest this reduce proof is representing. +pub fn sp1_vkey_digest_babybear(proof: &SP1ReduceProof) -> [BabyBear; 8] { + let proof = &proof.proof; + let pv: &RecursionPublicValues = proof.public_values.as_slice().borrow(); + pv.sp1_vk_digest +} + +/// Get the SP1 vkey Bn Poseidon2 digest this reduce proof is representing. +pub fn sp1_vkey_digest_bn254(proof: &SP1ReduceProof) -> Bn254Fr { + babybears_to_bn254(&sp1_vkey_digest_babybear(proof)) +} + +/// Compute the digest of the public values. +pub fn recursion_public_values_digest( + config: &InnerSC, + public_values: &RecursionPublicValues, +) -> [BabyBear; 8] { + let hash = InnerHash::new(config.perm.clone()); + let pv_array = public_values.as_array(); + hash.hash_slice(&pv_array[0..NUM_PV_ELMS_TO_HASH]) +} + +pub fn root_public_values_digest( + config: &InnerSC, + public_values: &RootPublicValues, +) -> [BabyBear; 8] { + let hash = InnerHash::new(config.perm.clone()); + let input = (*public_values.sp1_vk_digest()) + .into_iter() + .chain( + (*public_values.committed_value_digest()) + .into_iter() + .flat_map(|word| word.0.into_iter()), + ) + .collect::>(); + hash.hash_slice(&input) +} + +pub fn assert_root_public_values_valid( + config: &InnerSC, + public_values: &RootPublicValues, +) { + let expected_digest = root_public_values_digest(config, public_values); + for (value, expected) in public_values.digest().iter().copied().zip_eq(expected_digest) { + assert_eq!(value, expected); + } +} + +/// Assert that the digest of the public values is correct. +pub fn assert_recursion_public_values_valid( + config: &InnerSC, + public_values: &RecursionPublicValues, +) { + let expected_digest = recursion_public_values_digest(config, public_values); + for (value, expected) in public_values.digest.iter().copied().zip_eq(expected_digest) { + assert_eq!(value, expected); + } +} -use crate::SP1CoreProofData; +/// Get the committed values Bn Poseidon2 digest this reduce proof is representing. +pub fn sp1_commited_values_digest_bn254(proof: &SP1ReduceProof) -> Bn254Fr { + let proof = &proof.proof; + let pv: &RecursionPublicValues = proof.public_values.as_slice().borrow(); + let committed_values_digest_bytes: [BabyBear; 32] = + words_to_bytes(&pv.committed_value_digest).try_into().unwrap(); + babybear_bytes_to_bn254(&committed_values_digest_bytes) +} impl SP1CoreProofData { pub fn save(&self, path: &str) -> Result<(), std::io::Error> { @@ -79,3 +156,47 @@ pub fn words_to_bytes_be(words: &[u32; 8]) -> [u8; 32] { } bytes } + +pub trait MaybeTakeIterator: Iterator { + fn maybe_skip(self, bound: Option) -> RangedIterator + where + Self: Sized, + { + match bound { + Some(bound) => RangedIterator::Skip(self.skip(bound)), + None => RangedIterator::Unbounded(self), + } + } + + fn maybe_take(self, bound: Option) -> RangedIterator + where + Self: Sized, + { + match bound { + Some(bound) => RangedIterator::Take(self.take(bound)), + None => RangedIterator::Unbounded(self), + } + } +} + +impl MaybeTakeIterator for I {} + +pub enum RangedIterator { + Unbounded(I), + Skip(Skip), + Take(Take), + Range(Take>), +} + +impl Iterator for RangedIterator { + type Item = I::Item; + + fn next(&mut self) -> Option { + match self { + RangedIterator::Unbounded(unbounded) => unbounded.next(), + RangedIterator::Skip(skip) => skip.next(), + RangedIterator::Take(take) => take.next(), + RangedIterator::Range(range) => range.next(), + } + } +} diff --git a/crates/prover/src/verify.rs b/crates/prover/src/verify.rs index a1e180f002..9d4ba6901a 100644 --- a/crates/prover/src/verify.rs +++ b/crates/prover/src/verify.rs @@ -4,10 +4,12 @@ use anyhow::Result; use num_bigint::BigUint; use p3_baby_bear::BabyBear; use p3_field::{AbstractField, PrimeField}; -use sp1_core_executor::subproof::SubproofVerifier; +use sp1_core_executor::{subproof::SubproofVerifier, SP1ReduceProof}; use sp1_core_machine::cpu::MAX_CPU_LOG_DEGREE; use sp1_primitives::{consts::WORD_SIZE, io::SP1PublicValues}; -use sp1_recursion_core::{air::RecursionPublicValues, stark::config::BabyBearPoseidon2Outer}; + +use sp1_recursion_circuit::machine::RootPublicValues; +use sp1_recursion_core::{air::RecursionPublicValues, stark::BabyBearPoseidon2Outer}; use sp1_recursion_gnark_ffi::{ Groth16Bn254Proof, Groth16Bn254Prover, PlonkBn254Proof, PlonkBn254Prover, }; @@ -19,8 +21,9 @@ use sp1_stark::{ use thiserror::Error; use crate::{ - components::SP1ProverComponents, CoreSC, HashableKey, OuterSC, SP1CoreProofData, SP1Prover, - SP1ReduceProof, SP1VerifyingKey, + components::SP1ProverComponents, + utils::{assert_recursion_public_values_valid, assert_root_public_values_valid}, + CoreSC, HashableKey, OuterSC, SP1CoreProofData, SP1Prover, SP1VerifyingKey, }; #[derive(Error, Debug)] @@ -200,13 +203,13 @@ impl SP1Prover { return Err(MachineVerificationError::InvalidPublicValues( "last_init_addr_bits != last_finalize_addr_bits_prev", )); - } else if !shard_proof.contains_memory_init() + } else if !shard_proof.contains_global_memory_init() && public_values.previous_init_addr_bits != public_values.last_init_addr_bits { return Err(MachineVerificationError::InvalidPublicValues( "previous_init_addr_bits != last_init_addr_bits", )); - } else if !shard_proof.contains_memory_finalize() + } else if !shard_proof.contains_global_memory_finalize() && public_values.previous_finalize_addr_bits != public_values.last_finalize_addr_bits { @@ -290,17 +293,21 @@ impl SP1Prover { proof: &SP1ReduceProof, vk: &SP1VerifyingKey, ) -> Result<(), MachineVerificationError> { + let SP1ReduceProof { vk: compress_vk, proof } = proof; let mut challenger = self.compress_prover.config().challenger(); - let machine_proof = MachineProof { shard_proofs: vec![proof.proof.clone()] }; - self.compress_prover.machine().verify( - self.compress_vk(), - &machine_proof, - &mut challenger, - )?; + let machine_proof = MachineProof { shard_proofs: vec![proof.clone()] }; + self.compress_prover.machine().verify(compress_vk, &machine_proof, &mut challenger)?; // Validate public values - let public_values: &RecursionPublicValues<_> = - proof.proof.public_values.as_slice().borrow(); + let public_values: &RecursionPublicValues<_> = proof.public_values.as_slice().borrow(); + assert_recursion_public_values_valid( + self.compress_prover.machine().config(), + public_values, + ); + + if self.vk_verification && !self.allowed_vk_map.contains_key(&compress_vk.hash_babybear()) { + return Err(MachineVerificationError::InvalidVerificationKey); + } // `is_complete` should be 1. In the reduce program, this ensures that the proof is fully // reduced. @@ -314,14 +321,6 @@ impl SP1Prover { return Err(MachineVerificationError::InvalidPublicValues("sp1 vk hash mismatch")); } - // Verify that the reduce program is the one we are expecting. - let recursion_vkey_hash = self.compress_vk().hash_babybear(); - if public_values.compress_vk_digest != recursion_vkey_hash { - return Err(MachineVerificationError::InvalidPublicValues( - "recursion vk hash mismatch", - )); - } - Ok(()) } @@ -333,16 +332,18 @@ impl SP1Prover { ) -> Result<(), MachineVerificationError> { let mut challenger = self.shrink_prover.config().challenger(); let machine_proof = MachineProof { shard_proofs: vec![proof.proof.clone()] }; - self.shrink_prover.machine().verify(self.shrink_vk(), &machine_proof, &mut challenger)?; + self.shrink_prover.machine().verify(&proof.vk, &machine_proof, &mut challenger)?; // Validate public values let public_values: &RecursionPublicValues<_> = proof.proof.public_values.as_slice().borrow(); + assert_recursion_public_values_valid( + self.compress_prover.machine().config(), + public_values, + ); - // `is_complete` should be 1. In the reduce program, this ensures that the proof is fully - // reduced. - if public_values.is_complete != BabyBear::one() { - return Err(MachineVerificationError::InvalidPublicValues("is_complete is not 1")); + if self.vk_verification && !self.allowed_vk_map.contains_key(&proof.vk.hash_babybear()) { + return Err(MachineVerificationError::InvalidVerificationKey); } // Verify that the proof is for the sp1 vkey we are expecting. @@ -362,21 +363,17 @@ impl SP1Prover { ) -> Result<(), MachineVerificationError> { let mut challenger = self.wrap_prover.config().challenger(); let machine_proof = MachineProof { shard_proofs: vec![proof.proof.clone()] }; - self.wrap_prover.machine().verify(self.wrap_vk(), &machine_proof, &mut challenger)?; - // Validate public values - let public_values: &RecursionPublicValues<_> = - proof.proof.public_values.as_slice().borrow(); + let wrap_vk = self.wrap_vk.get().expect("Wrap verifier key not set"); + self.wrap_prover.machine().verify(wrap_vk, &machine_proof, &mut challenger)?; - // `is_complete` should be 1. In the reduce program, this ensures that the proof is fully - // reduced. - if public_values.is_complete != BabyBear::one() { - return Err(MachineVerificationError::InvalidPublicValues("is_complete is not 1")); - } + // Validate public values + let public_values: &RootPublicValues<_> = proof.proof.public_values.as_slice().borrow(); + assert_root_public_values_valid(self.shrink_prover.machine().config(), public_values); // Verify that the proof is for the sp1 vkey we are expecting. let vkey_hash = vk.hash_babybear(); - if public_values.sp1_vk_digest != vkey_hash { + if *public_values.sp1_vk_digest() != vkey_hash { return Err(MachineVerificationError::InvalidPublicValues("sp1 vk hash mismatch")); } @@ -475,7 +472,7 @@ pub fn verify_groth16_bn254_public_inputs( impl SubproofVerifier for &SP1Prover { fn verify_deferred_proof( &self, - proof: &sp1_stark::ShardProof, + proof: &sp1_core_machine::reduce::SP1ReduceProof, vk: &sp1_stark::StarkVerifyingKey, vk_hash: [u32; 8], committed_value_digest: [u32; 8], @@ -488,11 +485,12 @@ impl SubproofVerifier for &SP1Prover { } // Check that proof is valid. self.verify_compressed( - &SP1ReduceProof { proof: proof.clone() }, + &SP1ReduceProof { vk: proof.vk.clone(), proof: proof.proof.clone() }, &SP1VerifyingKey { vk: vk.clone() }, )?; // Check that the committed value digest matches the one from syscall - let public_values: &RecursionPublicValues<_> = proof.public_values.as_slice().borrow(); + let public_values: &RecursionPublicValues<_> = + proof.proof.public_values.as_slice().borrow(); for (i, word) in public_values.committed_value_digest.iter().enumerate() { if *word != committed_value_digest[i].into() { return Err(MachineVerificationError::InvalidPublicValues( diff --git a/crates/prover/vk_map.bin b/crates/prover/vk_map.bin new file mode 100644 index 0000000000..02f271f835 Binary files /dev/null and b/crates/prover/vk_map.bin differ diff --git a/crates/recursion/circuit-v2/CHANGELOG.md b/crates/recursion/circuit-v2/CHANGELOG.md deleted file mode 100644 index fe22843ca1..0000000000 --- a/crates/recursion/circuit-v2/CHANGELOG.md +++ /dev/null @@ -1,22 +0,0 @@ -# Changelog - -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [Unreleased] - -## [1.2.0-rc1](https://github.com/succinctlabs/sp1/releases/tag/sp1-recursion-circuit-v2-v1.2.0-rc1) - 2024-08-23 - -### Added - -- unify inner and outer witnesses in recursion circuit ([#1374](https://github.com/succinctlabs/sp1/pull/1374)) -- plonk in new circuit ([#1364](https://github.com/succinctlabs/sp1/pull/1364)) - -### Other - -- use crate `vec_map`, box large `Instruction` variants ([#1360](https://github.com/succinctlabs/sp1/pull/1360)) -- merge dev into experimental pt 2 ([#1341](https://github.com/succinctlabs/sp1/pull/1341)) -- add circuit v2 -- resolve merge conflicts between dev and experimental diff --git a/crates/recursion/circuit-v2/Cargo.toml b/crates/recursion/circuit-v2/Cargo.toml deleted file mode 100644 index a2be9881b7..0000000000 --- a/crates/recursion/circuit-v2/Cargo.toml +++ /dev/null @@ -1,63 +0,0 @@ -[package] -name = "sp1-recursion-circuit-v2" -description = "SP1 is a performant, 100% open-source, contributor-friendly zkVM." -readme = "../../../README.md" -version = { workspace = true } -edition = { workspace = true } -license = { workspace = true } -repository = { workspace = true } -keywords = { workspace = true } -categories = { workspace = true } - -[dependencies] -p3-air = { workspace = true } -p3-field = { workspace = true } -p3-commit = { workspace = true } -p3-fri = { workspace = true } -p3-matrix = { workspace = true } -p3-util = { workspace = true } -p3-maybe-rayon = { workspace = true } -p3-symmetric = { workspace = true } -p3-challenger = { workspace = true } -p3-dft = { workspace = true } -p3-merkle-tree = { workspace = true } -p3-poseidon2 = { workspace = true } -p3-bn254-fr = { workspace = true } -p3-baby-bear = { workspace = true } - -sp1-core-machine = { workspace = true } -sp1-core-executor = { workspace = true } -sp1-stark = { workspace = true } - -# todo: remove this dependency once everything is migrated. -sp1-recursion-program = { workspace = true } - -sp1-recursion-core-v2 = { workspace = true } -sp1-recursion-derive = { workspace = true } -sp1-recursion-compiler = { workspace = true } -sp1-primitives = { workspace = true } -sp1-recursion-gnark-ffi = { workspace = true } -sp1-recursion-circuit = { workspace = true } - -itertools = "0.13.0" -serde = { version = "1.0.204", features = ["derive"] } -bincode = "1.3.3" -rand = "0.8.5" -tracing = "0.1.40" -hashbrown = { version = "0.14.5", features = ["serde", "inline-more"] } -stacker = "0.1" - -[dev-dependencies] -sp1-core-executor = { workspace = true, features = ["programs"] } -ff = { version = "0.13", features = ["derive", "derive_bits"] } -p3-challenger = { workspace = true } -p3-symmetric = { workspace = true } -p3-dft = { workspace = true } -p3-merkle-tree = { workspace = true } -p3-poseidon2 = { workspace = true } -zkhash = "0.2.0" -rand = "0.8.5" - -[features] -native-gnark = ["sp1-recursion-gnark-ffi/native"] -export-tests = [] diff --git a/crates/recursion/circuit-v2/src/build_wrap_v2.rs b/crates/recursion/circuit-v2/src/build_wrap_v2.rs deleted file mode 100644 index f5cc231baa..0000000000 --- a/crates/recursion/circuit-v2/src/build_wrap_v2.rs +++ /dev/null @@ -1,517 +0,0 @@ -use std::borrow::Borrow; - -use itertools::Itertools; -use p3_baby_bear::BabyBear; -use p3_bn254_fr::Bn254Fr; -use p3_field::AbstractField; -use p3_fri::TwoAdicFriPcsProof; -use sp1_recursion_compiler::{ - config::OuterConfig, - constraints::{Constraint, ConstraintCompiler}, - ir::{Builder, Config, Ext, Felt, SymbolicExt, Var}, -}; -use sp1_recursion_core_v2::{ - air::RecursionPublicValues, - machine::RecursionAir, - stark::config::{ - BabyBearPoseidon2Outer, OuterChallenge, OuterChallengeMmcs, OuterFriProof, OuterVal, - OuterValMmcs, - }, -}; -use sp1_stark::{ - AirOpenedValues, ChipOpenedValues, ShardCommitment, ShardOpenedValues, ShardProof, - StarkMachine, StarkVerifyingKey, -}; - -use crate::{ - challenger::{CanObserveVariable, MultiField32ChallengerVariable}, - stark::{ShardProofVariable, StarkVerifier}, - utils::{felt_bytes_to_bn254_var, felts_to_bn254_var, words_to_bytes}, - witness::Witnessable, - BatchOpeningVariable, FriCommitPhaseProofStepVariable, FriProofVariable, FriQueryProofVariable, - TwoAdicPcsProofVariable, VerifyingKeyVariable, -}; - -pub const DIGEST_SIZE: usize = 1; - -type OuterSC = BabyBearPoseidon2Outer; -type OuterC = OuterConfig; -type OuterDigestVariable = [Var<::N>; DIGEST_SIZE]; - -/// A function to build the circuit for the final wrap layer using the architecture of core-v2. -/// -/// For now, the witnessing logic is not implemented and we just witness via constant proof variables. -pub fn build_wrap_circuit_v2( - wrap_vk: &StarkVerifyingKey, - template_proof: ShardProof, - outer_machine: StarkMachine>, -) -> Vec -where -{ - let mut builder = Builder::::default(); - let mut challenger = MultiField32ChallengerVariable::new(&mut builder); - - let preprocessed_commit_val: [Bn254Fr; 1] = wrap_vk.commit.into(); - let preprocessed_commit: OuterDigestVariable = [builder.eval(preprocessed_commit_val[0])]; - challenger.observe_commitment(&mut builder, preprocessed_commit); - let pc_start = builder.eval(wrap_vk.pc_start); - challenger.observe(&mut builder, pc_start); - - // let mut witness = OuterWitness::default(); - // template_proof.write(&mut witness); - - let proof = template_proof.read(&mut builder); - // let proof = const_shard_proof(&mut builder, &template_proof); - - let commited_values_digest = builder.constant(::N::zero()); - builder.commit_commited_values_digest_circuit(commited_values_digest); - let vkey_hash = builder.constant(::N::zero()); - builder.commit_vkey_hash_circuit(vkey_hash); - - // Validate public values - // let mut pv_elements = Vec::new(); - // for i in 0..PROOF_MAX_NUM_PVS { - // let element = builder.get(&proof.public_values, i); - // pv_elements.push(element); - // } - - let pv: &RecursionPublicValues<_> = proof.public_values.as_slice().borrow(); - - // TODO: Add back. - // let one_felt: Felt<_> = builder.constant(BabyBear::one()); - // // Proof must be complete. In the reduce program, this will ensure that the SP1 proof has - // // been fully accumulated. - // builder.assert_felt_eq(pv.is_complete, one_felt); - - // Convert pv.sp1_vk_digest into Bn254 - let pv_vkey_hash = felts_to_bn254_var(&mut builder, &pv.sp1_vk_digest); - // Vkey hash must match the witnessed commited_values_digest that we are committing to. - builder.assert_var_eq(pv_vkey_hash, vkey_hash); - - // Convert pv.committed_value_digest into Bn254 - let pv_committed_values_digest_bytes: [Felt<_>; 32] = - words_to_bytes(&pv.committed_value_digest).try_into().unwrap(); - let pv_committed_values_digest: Var<_> = - felt_bytes_to_bn254_var(&mut builder, &pv_committed_values_digest_bytes); - - // // Committed values digest must match the witnessed one that we are committing to. - builder.assert_var_eq(pv_committed_values_digest, commited_values_digest); - - let ShardCommitment { main_commit, .. } = &proof.commitment; - challenger.observe_commitment(&mut builder, *main_commit); - - challenger.observe_slice(&mut builder, proof.clone().public_values); - - let StarkVerifyingKey { commit, pc_start, chip_information, chip_ordering } = wrap_vk; - - let wrap_vk = VerifyingKeyVariable { - commitment: commit - .into_iter() - .map(|elem| builder.eval(elem)) - .collect_vec() - .try_into() - .unwrap(), - pc_start: builder.eval(*pc_start), - chip_information: chip_information.clone(), - chip_ordering: chip_ordering.clone(), - }; - - StarkVerifier::::verify_shard( - &mut builder, - &wrap_vk, - &outer_machine, - &mut challenger.clone(), - &proof, - ); - - let zero_ext: Ext<_, _> = builder.constant(::EF::zero()); - let cumulative_sum: Ext<_, _> = builder.eval(zero_ext); - for chip in proof.opened_values.chips { - builder.assign(cumulative_sum, cumulative_sum + chip.cumulative_sum); - } - builder.assert_ext_eq(cumulative_sum, zero_ext); - - // TODO: Add back. - // Verify the public values digest. - // let calculated_digest = - // builder.p2_circuit_babybear_hash(&proof.public_values[0..NUM_PV_ELMS_TO_HASH]); - // let expected_digest = pv.digest; - // for (calculated_elm, expected_elm) in calculated_digest.iter().zip(expected_digest.iter()) { - // builder.assert_felt_eq(*expected_elm, *calculated_elm); - // } - - let mut backend = ConstraintCompiler::::default(); - backend.emit(builder.operations) -} - -/// A utility function to convert a `ShardProof` into a `ShardProofVariable`. Should be replaced by -/// more refined witness generation. -pub fn const_shard_proof( - builder: &mut Builder, - proof: &ShardProof, -) -> ShardProofVariable { - let opening_proof = const_two_adic_pcs_proof(builder, proof.opening_proof.clone()); - let opened_values = proof - .opened_values - .chips - .iter() - .map(|chip| { - let ChipOpenedValues { - preprocessed, - main, - permutation, - quotient, - cumulative_sum, - log_degree, - } = chip; - let AirOpenedValues { local: prepr_local, next: prepr_next } = preprocessed; - let AirOpenedValues { local: main_local, next: main_next } = main; - let AirOpenedValues { local: perm_local, next: perm_next } = permutation; - - let quotient = - quotient.iter().map(|q| q.iter().map(|x| builder.constant(*x)).collect()).collect(); - let cumulative_sum = builder.constant(*cumulative_sum); - - let preprocessed = AirOpenedValues { - local: prepr_local.iter().map(|x| builder.constant(*x)).collect(), - next: prepr_next.iter().map(|x| builder.constant(*x)).collect(), - }; - - let main = AirOpenedValues { - local: main_local.iter().map(|x| builder.constant(*x)).collect(), - next: main_next.iter().map(|x| builder.constant(*x)).collect(), - }; - - let permutation = AirOpenedValues { - local: perm_local.iter().map(|x| builder.constant(*x)).collect(), - next: perm_next.iter().map(|x| builder.constant(*x)).collect(), - }; - - ChipOpenedValues { - preprocessed, - main, - permutation, - quotient, - cumulative_sum, - log_degree: *log_degree, - } - }) - .collect(); - let opened_values = ShardOpenedValues { chips: opened_values }; - let ShardCommitment { main_commit, permutation_commit, quotient_commit } = proof.commitment; - let main_commit: [Bn254Fr; 1] = main_commit.into(); - let permutation_commit: [Bn254Fr; 1] = permutation_commit.into(); - let quotient_commit: [Bn254Fr; 1] = quotient_commit.into(); - - let main_commit = core::array::from_fn(|i| builder.eval(main_commit[i])); - let permutation_commit = core::array::from_fn(|i| builder.eval(permutation_commit[i])); - let quotient_commit = core::array::from_fn(|i| builder.eval(quotient_commit[i])); - - let commitment = ShardCommitment { main_commit, permutation_commit, quotient_commit }; - ShardProofVariable { - commitment, - public_values: proof.public_values.iter().map(|x| builder.constant(*x)).collect(), - opened_values, - opening_proof, - chip_ordering: proof.chip_ordering.clone(), - } -} - -type C = OuterConfig; -type SC = BabyBearPoseidon2Outer; -type N = ::N; - -// Copy-paste from InnerCircuit implementation, changing generic parameters. -fn const_fri_proof( - builder: &mut Builder, - fri_proof: OuterFriProof, -) -> FriProofVariable { - // Set the commit phase commits. - let commit_phase_commits = fri_proof - .commit_phase_commits - .iter() - .map(|commit| { - let commit: [N; DIGEST_SIZE] = (*commit).into(); - commit.map(|x| builder.eval(x)) - }) - .collect::>(); - - // Set the query proofs. - let query_proofs = fri_proof - .query_proofs - .iter() - .map(|query_proof| { - let commit_phase_openings = query_proof - .commit_phase_openings - .iter() - .map(|commit_phase_opening| { - let sibling_value = - builder.eval(SymbolicExt::from_f(commit_phase_opening.sibling_value)); - let opening_proof = commit_phase_opening - .opening_proof - .iter() - .map(|sibling| sibling.map(|x| builder.eval(x))) - .collect::>(); - FriCommitPhaseProofStepVariable { sibling_value, opening_proof } - }) - .collect::>(); - FriQueryProofVariable { commit_phase_openings } - }) - .collect::>(); - - // Initialize the FRI proof variable. - FriProofVariable { - commit_phase_commits, - query_proofs, - final_poly: builder.eval(SymbolicExt::from_f(fri_proof.final_poly)), - pow_witness: builder.eval(fri_proof.pow_witness), - } -} - -pub fn const_two_adic_pcs_proof( - builder: &mut Builder, - proof: TwoAdicFriPcsProof, -) -> TwoAdicPcsProofVariable { - let fri_proof = const_fri_proof(builder, proof.fri_proof); - let query_openings = proof - .query_openings - .iter() - .map(|query_opening| { - query_opening - .iter() - .map(|opening| BatchOpeningVariable { - opened_values: opening - .opened_values - .iter() - .map(|opened_value| { - opened_value - .iter() - .map(|value| vec![builder.eval::, _>(*value)]) - .collect::>() - }) - .collect::>(), - opening_proof: opening - .opening_proof - .iter() - .map(|opening_proof| opening_proof.map(|x| builder.eval(x))) - .collect::>(), - }) - .collect::>() - }) - .collect::>(); - TwoAdicPcsProofVariable { fri_proof, query_openings } -} -#[cfg(test)] -pub mod tests { - - use std::{borrow::Borrow, iter::once, sync::Arc}; - - use p3_baby_bear::{BabyBear, DiffusionMatrixBabyBear}; - use p3_challenger::{CanObserve, FieldChallenger}; - use p3_commit::{Pcs, TwoAdicMultiplicativeCoset}; - use p3_field::{extension::BinomialExtensionField, AbstractField}; - use p3_matrix::dense::RowMajorMatrix; - use rand::{rngs::StdRng, SeedableRng}; - use sp1_core_machine::utils::{log2_strict_usize, run_test_machine, setup_logger}; - use sp1_recursion_compiler::{ - config::OuterConfig, - constraints::ConstraintCompiler, - ir::{Builder, Config, Ext, SymbolicExt}, - }; - use sp1_recursion_core_v2::{ - air::RecursionPublicValues, - instruction as instr, - machine::RecursionAir, - stark::config::{ - outer_fri_config, outer_perm, BabyBearPoseidon2Outer, OuterChallenge, OuterChallenger, - OuterCompress, OuterDft, OuterHash, OuterPcs, OuterVal, OuterValMmcs, - }, - BaseAluOpcode, MemAccessKind, RecursionProgram, Runtime, - }; - use sp1_recursion_gnark_ffi::{Groth16Bn254Prover, PlonkBn254Prover}; - use sp1_stark::{BabyBearPoseidon2Inner, StarkMachine}; - - use crate::{ - challenger::CanObserveVariable, - fri::verify_two_adic_pcs, - hash::BN254_DIGEST_SIZE, - utils::{babybear_bytes_to_bn254, babybears_to_bn254, words_to_bytes}, - witness::{OuterWitness, Witnessable}, - Digest, TwoAdicPcsMatsVariable, TwoAdicPcsRoundVariable, - }; - - use super::{build_wrap_circuit_v2, const_two_adic_pcs_proof}; - - fn test_machine(machine_maker: F) - where - F: Fn() -> StarkMachine>, - { - setup_logger(); - let n = 10; - // Fibonacci(n) - let instructions = once(instr::mem(MemAccessKind::Write, 1, 0, 0)) - .chain(once(instr::mem(MemAccessKind::Write, 2, 1, 1))) - .chain((2..=n).map(|i| instr::base_alu(BaseAluOpcode::AddF, 2, i, i - 2, i - 1))) - .chain(once(instr::mem(MemAccessKind::Read, 1, n - 1, 34))) - .chain(once(instr::mem(MemAccessKind::Read, 2, n, 55))) - .collect::>(); - - let machine = machine_maker(); - let program = RecursionProgram { instructions, ..Default::default() }; - let mut runtime = - Runtime::, DiffusionMatrixBabyBear>::new( - Arc::new(program.clone()), - BabyBearPoseidon2Inner::new().perm, - ); - runtime.run().unwrap(); - - let (pk, vk) = machine.setup(&program); - let result = run_test_machine(vec![runtime.record], machine, pk, vk.clone()).unwrap(); - - let machine = machine_maker(); - let constraints = - build_wrap_circuit_v2::(&vk, result.shard_proofs[0].clone(), machine); - - let pv: &RecursionPublicValues<_> = - result.shard_proofs[0].public_values.as_slice().borrow(); - let vkey_hash = babybears_to_bn254(&pv.sp1_vk_digest); - let committed_values_digest_bytes: [BabyBear; 32] = - words_to_bytes(&pv.committed_value_digest).try_into().unwrap(); - let committed_values_digest = babybear_bytes_to_bn254(&committed_values_digest_bytes); - - // Build the witness. - let mut witness = OuterWitness::default(); - result.shard_proofs[0].write(&mut witness); - witness.write_commited_values_digest(committed_values_digest); - witness.write_vkey_hash(vkey_hash); - - PlonkBn254Prover::test::(constraints.clone(), witness.clone()); - Groth16Bn254Prover::test::(constraints, witness); - } - - pub fn machine_with_all_chips( - log_erbl_rows: usize, - log_p2_rows: usize, - log_frifold_rows: usize, - ) -> StarkMachine> { - let config = SC::new_with_log_blowup(log2_strict_usize(DEGREE - 1)); - RecursionAir::::machine_with_padding( - config, - log_frifold_rows, - log_p2_rows, - log_erbl_rows, - ) - } - - #[test] - fn test_build_wrap() { - let machine_maker = || machine_with_all_chips::<17>(3, 3, 3); - test_machine(machine_maker); - } - type C = OuterConfig; - type SC = BabyBearPoseidon2Outer; - - #[allow(clippy::type_complexity)] - pub fn const_two_adic_pcs_rounds( - builder: &mut Builder, - commit: [::N; BN254_DIGEST_SIZE], - os: Vec<(TwoAdicMultiplicativeCoset, Vec<(OuterChallenge, Vec)>)>, - ) -> (Digest, Vec>) { - let commit: Digest = commit.map(|x| builder.eval(x)); - - let mut domains_points_and_opens = Vec::new(); - for (domain, poly) in os.into_iter() { - let points: Vec> = - poly.iter().map(|(p, _)| builder.eval(SymbolicExt::from_f(*p))).collect::>(); - let values: Vec>> = poly - .iter() - .map(|(_, v)| { - v.clone() - .iter() - .map(|t| builder.eval(SymbolicExt::from_f(*t))) - .collect::>() - }) - .collect::>(); - let domain_points_and_values = TwoAdicPcsMatsVariable { domain, points, values }; - domains_points_and_opens.push(domain_points_and_values); - } - - (commit, vec![TwoAdicPcsRoundVariable { batch_commit: commit, domains_points_and_opens }]) - } - - #[test] - fn test_verify_two_adic_pcs_outer() { - let mut rng = StdRng::seed_from_u64(0xDEADBEEF); - let log_degrees = &[19, 19]; - let perm = outer_perm(); - let mut fri_config = outer_fri_config(); - - // Lower blowup factor for testing. - fri_config.log_blowup = 2; - let hash = OuterHash::new(perm.clone()).unwrap(); - let compress = OuterCompress::new(perm.clone()); - let val_mmcs = OuterValMmcs::new(hash, compress); - let dft = OuterDft {}; - let pcs: OuterPcs = - OuterPcs::new(log_degrees.iter().copied().max().unwrap(), dft, val_mmcs, fri_config); - - // Generate proof. - let domains_and_polys = log_degrees - .iter() - .map(|&d| { - ( - >::natural_domain_for_degree( - &pcs, - 1 << d, - ), - RowMajorMatrix::::rand(&mut rng, 1 << d, 100), - ) - }) - .collect::>(); - let (commit, data) = >::commit( - &pcs, - domains_and_polys.clone(), - ); - let mut challenger = OuterChallenger::new(perm.clone()).unwrap(); - challenger.observe(commit); - let zeta = challenger.sample_ext_element::(); - let points = domains_and_polys.iter().map(|_| vec![zeta]).collect::>(); - let (opening, proof) = pcs.open(vec![(&data, points)], &mut challenger); - - // Verify proof. - let mut challenger = OuterChallenger::new(perm.clone()).unwrap(); - challenger.observe(commit); - let x1 = challenger.sample_ext_element::(); - let os = domains_and_polys - .iter() - .zip(&opening[0]) - .map(|((domain, _), mat_openings)| (*domain, vec![(zeta, mat_openings[0].clone())])) - .collect::>(); - pcs.verify(vec![(commit, os.clone())], &proof, &mut challenger).unwrap(); - - // Define circuit. - let mut builder = Builder::::default(); - let mut config = outer_fri_config(); - - // Lower blowup factor for testing. - config.log_blowup = 2; - let proof = const_two_adic_pcs_proof(&mut builder, proof); - let (commit, rounds) = const_two_adic_pcs_rounds(&mut builder, commit.into(), os); - let mut challenger = crate::challenger::MultiField32ChallengerVariable::new(&mut builder); - challenger.observe_slice(&mut builder, commit); - let x2 = challenger.sample_ext(&mut builder); - let x1: Ext<_, _> = builder.constant(x1); - builder.assert_ext_eq(x1, x2); - verify_two_adic_pcs::<_, BabyBearPoseidon2Outer>( - &mut builder, - &config, - &proof, - &mut challenger, - rounds, - ); - let mut backend = ConstraintCompiler::::default(); - let constraints = backend.emit(builder.operations); - let witness = OuterWitness::default(); - PlonkBn254Prover::test::(constraints, witness); - } -} diff --git a/crates/recursion/circuit-v2/src/challenger.rs b/crates/recursion/circuit-v2/src/challenger.rs deleted file mode 100644 index 2ea066920d..0000000000 --- a/crates/recursion/circuit-v2/src/challenger.rs +++ /dev/null @@ -1,650 +0,0 @@ -use p3_field::{AbstractField, Field}; -use sp1_recursion_compiler::{ - circuit::CircuitV2Builder, - ir::{DslIr, Var}, - prelude::{Builder, Config, Ext, Felt}, -}; -use sp1_recursion_core_v2::{ - air::ChallengerPublicValues, - runtime::{HASH_RATE, PERMUTATION_WIDTH}, - NUM_BITS, -}; - -// Constants for the Multifield challenger. -pub const SPONGE_SIZE: usize = 3; -pub const DIGEST_SIZE: usize = 1; -pub const RATE: usize = 16; - -// use crate::{DigestVariable, VerifyingKeyVariable}; - -pub trait CanCopyChallenger { - fn copy(&self, builder: &mut Builder) -> Self; -} -/// Reference: [p3_challenger::CanObserve]. -pub trait CanObserveVariable { - fn observe(&mut self, builder: &mut Builder, value: V); - - fn observe_slice(&mut self, builder: &mut Builder, values: impl IntoIterator) { - for value in values { - self.observe(builder, value); - } - } -} - -pub trait CanSampleVariable { - fn sample(&mut self, builder: &mut Builder) -> V; -} - -/// Reference: [p3_challenger::FieldChallenger]. -pub trait FieldChallengerVariable: - CanObserveVariable> + CanSampleVariable> + CanSampleBitsVariable -{ - fn sample_ext(&mut self, builder: &mut Builder) -> Ext; - - fn check_witness(&mut self, builder: &mut Builder, nb_bits: usize, witness: Felt); - - fn duplexing(&mut self, builder: &mut Builder); -} - -pub trait CanSampleBitsVariable { - fn sample_bits(&mut self, builder: &mut Builder, nb_bits: usize) -> Vec; -} - -/// Reference: [p3_challenger::DuplexChallenger] -#[derive(Clone)] -pub struct DuplexChallengerVariable { - pub sponge_state: [Felt; PERMUTATION_WIDTH], - pub input_buffer: Vec>, - pub output_buffer: Vec>, -} - -impl DuplexChallengerVariable { - /// Creates a new duplex challenger with the default state. - pub fn new(builder: &mut Builder) -> Self { - DuplexChallengerVariable:: { - sponge_state: core::array::from_fn(|_| builder.eval(C::F::zero())), - input_buffer: vec![], - output_buffer: vec![], - } - } - - /// Creates a new challenger with the same state as an existing challenger. - pub fn copy(&self, builder: &mut Builder) -> Self { - let DuplexChallengerVariable { sponge_state, input_buffer, output_buffer } = self; - let sponge_state = sponge_state.map(|x| builder.eval(x)); - let mut copy_vec = |v: &Vec>| v.iter().map(|x| builder.eval(*x)).collect(); - DuplexChallengerVariable:: { - sponge_state, - input_buffer: copy_vec(input_buffer), - output_buffer: copy_vec(output_buffer), - } - } - - // /// Asserts that the state of this challenger is equal to the state of another challenger. - // fn assert_eq(&self, builder: &mut Builder, other: &Self) { - // zip(&self.sponge_state, &other.sponge_state) - // .chain(zip(&self.input_buffer, &other.input_buffer)) - // .chain(zip(&self.output_buffer, &other.output_buffer)) - // .for_each(|(&element, &other_element)| { - // builder.assert_felt_eq(element, other_element); - // }); - // } - - // fn reset(&mut self, builder: &mut Builder) { - // self.sponge_state.fill(builder.eval(C::F::zero())); - // self.input_buffer.clear(); - // self.output_buffer.clear(); - // } - - fn observe(&mut self, builder: &mut Builder, value: Felt) { - self.output_buffer.clear(); - - self.input_buffer.push(value); - - if self.input_buffer.len() == HASH_RATE { - self.duplexing(builder); - } - } - - // fn observe_commitment(&mut self, builder: &mut Builder, commitment: DigestVariable) { - // for element in commitment { - // self.observe(builder, element); - // } - // } - - fn sample(&mut self, builder: &mut Builder) -> Felt { - if !self.input_buffer.is_empty() || self.output_buffer.is_empty() { - self.duplexing(builder); - } - - self.output_buffer.pop().expect("output buffer should be non-empty") - } - - fn sample_bits(&mut self, builder: &mut Builder, nb_bits: usize) -> Vec> { - assert!(nb_bits <= NUM_BITS); - let rand_f = self.sample(builder); - let mut rand_f_bits = builder.num2bits_v2_f(rand_f, NUM_BITS); - rand_f_bits.truncate(nb_bits); - rand_f_bits - } - - pub fn public_values(&self, builder: &mut Builder) -> ChallengerPublicValues> { - assert!(self.input_buffer.len() <= PERMUTATION_WIDTH); - assert!(self.output_buffer.len() <= PERMUTATION_WIDTH); - - let sponge_state = self.sponge_state; - let num_inputs = builder.eval(C::F::from_canonical_usize(self.input_buffer.len())); - let num_outputs = builder.eval(C::F::from_canonical_usize(self.output_buffer.len())); - - let input_buffer: [_; PERMUTATION_WIDTH] = self - .input_buffer - .iter() - .copied() - .chain((self.input_buffer.len()..PERMUTATION_WIDTH).map(|_| builder.eval(C::F::zero()))) - .collect::>() - .try_into() - .unwrap(); - - let output_buffer: [_; PERMUTATION_WIDTH] = self - .output_buffer - .iter() - .copied() - .chain( - (self.output_buffer.len()..PERMUTATION_WIDTH).map(|_| builder.eval(C::F::zero())), - ) - .collect::>() - .try_into() - .unwrap(); - - ChallengerPublicValues { - sponge_state, - num_inputs, - input_buffer, - num_outputs, - output_buffer, - } - } -} - -impl CanCopyChallenger for DuplexChallengerVariable { - fn copy(&self, builder: &mut Builder) -> Self { - DuplexChallengerVariable::copy(self, builder) - } -} - -impl CanObserveVariable> for DuplexChallengerVariable { - fn observe(&mut self, builder: &mut Builder, value: Felt) { - DuplexChallengerVariable::observe(self, builder, value); - } - - fn observe_slice( - &mut self, - builder: &mut Builder, - values: impl IntoIterator>, - ) { - for value in values { - self.observe(builder, value); - } - } -} - -impl CanObserveVariable; N]> - for DuplexChallengerVariable -{ - fn observe(&mut self, builder: &mut Builder, values: [Felt; N]) { - for value in values { - self.observe(builder, value); - } - } -} - -impl CanSampleVariable> for DuplexChallengerVariable { - fn sample(&mut self, builder: &mut Builder) -> Felt { - DuplexChallengerVariable::sample(self, builder) - } -} - -impl CanSampleBitsVariable> for DuplexChallengerVariable { - fn sample_bits(&mut self, builder: &mut Builder, nb_bits: usize) -> Vec> { - DuplexChallengerVariable::sample_bits(self, builder, nb_bits) - } -} - -impl FieldChallengerVariable> for DuplexChallengerVariable { - fn sample_ext(&mut self, builder: &mut Builder) -> Ext { - let a = self.sample(builder); - let b = self.sample(builder); - let c = self.sample(builder); - let d = self.sample(builder); - builder.ext_from_base_slice(&[a, b, c, d]) - } - - fn check_witness( - &mut self, - builder: &mut Builder, - nb_bits: usize, - witness: Felt<::F>, - ) { - self.observe(builder, witness); - let element_bits = self.sample_bits(builder, nb_bits); - for bit in element_bits { - builder.assert_felt_eq(bit, C::F::zero()); - } - } - - fn duplexing(&mut self, builder: &mut Builder) { - assert!(self.input_buffer.len() <= HASH_RATE); - - self.sponge_state[0..self.input_buffer.len()].copy_from_slice(self.input_buffer.as_slice()); - self.input_buffer.clear(); - - self.sponge_state = builder.poseidon2_permute_v2(self.sponge_state); - - self.output_buffer.clear(); - self.output_buffer.extend_from_slice(&self.sponge_state); - } -} - -#[derive(Clone)] -pub struct MultiField32ChallengerVariable { - sponge_state: [Var; 3], - input_buffer: Vec>, - output_buffer: Vec>, - num_f_elms: usize, -} - -impl MultiField32ChallengerVariable { - pub fn new(builder: &mut Builder) -> Self { - MultiField32ChallengerVariable:: { - sponge_state: [ - builder.eval(C::N::zero()), - builder.eval(C::N::zero()), - builder.eval(C::N::zero()), - ], - input_buffer: vec![], - output_buffer: vec![], - num_f_elms: C::N::bits() / 64, - } - } - - pub fn duplexing(&mut self, builder: &mut Builder) { - assert!(self.input_buffer.len() <= self.num_f_elms * SPONGE_SIZE); - - for (i, f_chunk) in self.input_buffer.chunks(self.num_f_elms).enumerate() { - self.sponge_state[i] = reduce_32(builder, f_chunk); - } - self.input_buffer.clear(); - - // TODO make this a method for the builder. - builder.push(DslIr::CircuitPoseidon2Permute(self.sponge_state)); - - self.output_buffer.clear(); - for &pf_val in self.sponge_state.iter() { - let f_vals = split_32(builder, pf_val, self.num_f_elms); - for f_val in f_vals { - self.output_buffer.push(f_val); - } - } - } - - pub fn observe(&mut self, builder: &mut Builder, value: Felt) { - self.output_buffer.clear(); - - self.input_buffer.push(value); - if self.input_buffer.len() == self.num_f_elms * SPONGE_SIZE { - self.duplexing(builder); - } - } - - pub fn observe_commitment( - &mut self, - builder: &mut Builder, - value: [Var; DIGEST_SIZE], - ) { - for val in value { - let f_vals: Vec> = split_32(builder, val, self.num_f_elms); - for f_val in f_vals { - self.observe(builder, f_val); - } - } - } - - pub fn sample(&mut self, builder: &mut Builder) -> Felt { - if !self.input_buffer.is_empty() || self.output_buffer.is_empty() { - self.duplexing(builder); - } - - self.output_buffer.pop().expect("output buffer should be non-empty") - } - - pub fn sample_ext(&mut self, builder: &mut Builder) -> Ext { - let a = self.sample(builder); - let b = self.sample(builder); - let c = self.sample(builder); - let d = self.sample(builder); - builder.felts2ext(&[a, b, c, d]) - } - - pub fn sample_bits(&mut self, builder: &mut Builder, bits: usize) -> Vec> { - let rand_f = self.sample(builder); - builder.num2bits_f_circuit(rand_f)[0..bits].to_vec() - } - - pub fn check_witness(&mut self, builder: &mut Builder, bits: usize, witness: Felt) { - self.observe(builder, witness); - let element = self.sample_bits(builder, bits); - for bit in element { - builder.assert_var_eq(bit, C::N::from_canonical_usize(0)); - } - } -} - -impl CanCopyChallenger for MultiField32ChallengerVariable { - /// Creates a new challenger with the same state as an existing challenger. - fn copy(&self, builder: &mut Builder) -> Self { - let MultiField32ChallengerVariable { - sponge_state, - input_buffer, - output_buffer, - num_f_elms, - } = self; - let sponge_state = sponge_state.map(|x| builder.eval(x)); - let mut copy_vec = |v: &Vec>| v.iter().map(|x| builder.eval(*x)).collect(); - MultiField32ChallengerVariable:: { - sponge_state, - num_f_elms: *num_f_elms, - input_buffer: copy_vec(input_buffer), - output_buffer: copy_vec(output_buffer), - } - } -} - -impl CanObserveVariable> for MultiField32ChallengerVariable { - fn observe(&mut self, builder: &mut Builder, value: Felt) { - MultiField32ChallengerVariable::observe(self, builder, value); - } -} - -impl CanObserveVariable; DIGEST_SIZE]> - for MultiField32ChallengerVariable -{ - fn observe(&mut self, builder: &mut Builder, value: [Var; DIGEST_SIZE]) { - self.observe_commitment(builder, value) - } -} - -impl CanObserveVariable> for MultiField32ChallengerVariable { - fn observe(&mut self, builder: &mut Builder, value: Var) { - self.observe_commitment(builder, [value]) - } -} - -impl CanSampleVariable> for MultiField32ChallengerVariable { - fn sample(&mut self, builder: &mut Builder) -> Felt { - MultiField32ChallengerVariable::sample(self, builder) - } -} - -impl CanSampleBitsVariable> for MultiField32ChallengerVariable { - fn sample_bits(&mut self, builder: &mut Builder, bits: usize) -> Vec> { - MultiField32ChallengerVariable::sample_bits(self, builder, bits) - } -} - -impl FieldChallengerVariable> for MultiField32ChallengerVariable { - fn sample_ext(&mut self, builder: &mut Builder) -> Ext { - MultiField32ChallengerVariable::sample_ext(self, builder) - } - - fn check_witness(&mut self, builder: &mut Builder, bits: usize, witness: Felt) { - MultiField32ChallengerVariable::check_witness(self, builder, bits, witness); - } - - fn duplexing(&mut self, builder: &mut Builder) { - MultiField32ChallengerVariable::duplexing(self, builder); - } -} - -pub fn reduce_32(builder: &mut Builder, vals: &[Felt]) -> Var { - let mut power = C::N::one(); - let result: Var = builder.eval(C::N::zero()); - for val in vals.iter() { - let val = builder.felt2var_circuit(*val); - builder.assign(result, result + val * power); - power *= C::N::from_canonical_u64(1u64 << 32); - } - result -} - -pub fn split_32(builder: &mut Builder, val: Var, n: usize) -> Vec> { - let bits = builder.num2bits_v_circuit(val, 256); - let mut results = Vec::new(); - for i in 0..n { - let result: Felt = builder.eval(C::F::zero()); - for j in 0..64 { - let bit = bits[i * 64 + j]; - let t = builder.eval(result + C::F::from_wrapped_u64(1 << j)); - let z = builder.select_f(bit, t, result); - builder.assign(result, z); - } - results.push(result); - } - results -} - -#[cfg(test)] -pub(crate) mod tests { - use std::iter::zip; - - use crate::{ - challenger::{CanCopyChallenger, MultiField32ChallengerVariable}, - hash::{FieldHasherVariable, BN254_DIGEST_SIZE}, - utils::tests::run_test_recursion, - }; - use p3_baby_bear::BabyBear; - use p3_bn254_fr::Bn254Fr; - use p3_challenger::{CanObserve, CanSample, CanSampleBits, FieldChallenger}; - use p3_field::AbstractField; - use p3_symmetric::{CryptographicHasher, Hash, PseudoCompressionFunction}; - use sp1_recursion_compiler::{ - asm::{AsmBuilder, AsmConfig}, - config::OuterConfig, - constraints::ConstraintCompiler, - ir::{Builder, Config, Ext, ExtConst, Felt, Var}, - }; - use sp1_recursion_core_v2::stark::config::{ - outer_perm, BabyBearPoseidon2Outer, OuterCompress, OuterHash, - }; - use sp1_recursion_gnark_ffi::PlonkBn254Prover; - use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, StarkGenericConfig}; - - use crate::{ - challenger::{DuplexChallengerVariable, FieldChallengerVariable}, - witness::OuterWitness, - }; - - type SC = BabyBearPoseidon2; - type C = OuterConfig; - type F = ::Val; - type EF = ::Challenge; - - #[test] - fn test_compiler_challenger() { - let config = SC::default(); - let mut challenger = config.challenger(); - challenger.observe(F::one()); - challenger.observe(F::two()); - challenger.observe(F::two()); - challenger.observe(F::two()); - let result: F = challenger.sample(); - println!("expected result: {}", result); - let result_ef: EF = challenger.sample_ext_element(); - println!("expected result_ef: {}", result_ef); - - let mut builder = AsmBuilder::::default(); - - let mut challenger = DuplexChallengerVariable::> { - sponge_state: core::array::from_fn(|_| builder.eval(F::zero())), - input_buffer: vec![], - output_buffer: vec![], - }; - let one: Felt<_> = builder.eval(F::one()); - let two: Felt<_> = builder.eval(F::two()); - - challenger.observe(&mut builder, one); - challenger.observe(&mut builder, two); - challenger.observe(&mut builder, two); - challenger.observe(&mut builder, two); - let element = challenger.sample(&mut builder); - let element_ef = challenger.sample_ext(&mut builder); - - let expected_result: Felt<_> = builder.eval(result); - let expected_result_ef: Ext<_, _> = builder.eval(result_ef.cons()); - builder.print_f(element); - builder.assert_felt_eq(expected_result, element); - builder.print_e(element_ef); - builder.assert_ext_eq(expected_result_ef, element_ef); - - run_test_recursion(builder.operations, None); - } - - #[test] - fn test_challenger_outer() { - type SC = BabyBearPoseidon2Outer; - type F = ::Val; - type EF = ::Challenge; - type N = ::N; - - let config = SC::default(); - let mut challenger = config.challenger(); - challenger.observe(F::one()); - challenger.observe(F::two()); - challenger.observe(F::two()); - challenger.observe(F::two()); - let commit = Hash::from([N::two()]); - challenger.observe(commit); - let result: F = challenger.sample(); - println!("expected result: {}", result); - let result_ef: EF = challenger.sample_ext_element(); - println!("expected result_ef: {}", result_ef); - let mut bits = challenger.sample_bits(30); - let mut bits_vec = vec![]; - for _ in 0..30 { - bits_vec.push(bits % 2); - bits >>= 1; - } - println!("expected bits: {:?}", bits_vec); - - let mut builder = Builder::::default(); - - // let width: Var<_> = builder.eval(F::from_canonical_usize(PERMUTATION_WIDTH)); - let mut challenger = MultiField32ChallengerVariable::::new(&mut builder); - let one: Felt<_> = builder.eval(F::one()); - let two: Felt<_> = builder.eval(F::two()); - let two_var: Var<_> = builder.eval(N::two()); - // builder.halt(); - challenger.observe(&mut builder, one); - challenger.observe(&mut builder, two); - challenger.observe(&mut builder, two); - challenger.observe(&mut builder, two); - challenger.observe_commitment(&mut builder, [two_var]); - - // Check to make sure the copying works. - challenger = challenger.copy(&mut builder); - let element = challenger.sample(&mut builder); - let element_ef = challenger.sample_ext(&mut builder); - let bits = challenger.sample_bits(&mut builder, 31); - - let expected_result: Felt<_> = builder.eval(result); - let expected_result_ef: Ext<_, _> = builder.eval(result_ef.cons()); - builder.print_f(element); - builder.assert_felt_eq(expected_result, element); - builder.print_e(element_ef); - builder.assert_ext_eq(expected_result_ef, element_ef); - for (expected_bit, bit) in zip(bits_vec.iter(), bits.iter()) { - let expected_bit: Var<_> = builder.eval(N::from_canonical_usize(*expected_bit)); - builder.print_v(*bit); - builder.assert_var_eq(expected_bit, *bit); - } - - let mut backend = ConstraintCompiler::::default(); - let constraints = backend.emit(builder.operations); - let witness = OuterWitness::default(); - PlonkBn254Prover::test::(constraints, witness); - } - - #[test] - fn test_select_chain_digest() { - type N = ::N; - - let mut builder = Builder::::default(); - - let one: Var<_> = builder.eval(N::one()); - let two: Var<_> = builder.eval(N::two()); - - let to_swap = [[one], [two]]; - let result = BabyBearPoseidon2Outer::select_chain_digest(&mut builder, one, to_swap); - - builder.assert_var_eq(result[0][0], two); - builder.assert_var_eq(result[1][0], one); - - let mut backend = ConstraintCompiler::::default(); - let constraints = backend.emit(builder.operations); - let witness = OuterWitness::default(); - PlonkBn254Prover::test::(constraints, witness); - } - - #[test] - fn test_p2_hash() { - let perm = outer_perm(); - let hasher = OuterHash::new(perm.clone()).unwrap(); - - let input: [BabyBear; 7] = [ - BabyBear::from_canonical_u32(0), - BabyBear::from_canonical_u32(1), - BabyBear::from_canonical_u32(2), - BabyBear::from_canonical_u32(2), - BabyBear::from_canonical_u32(2), - BabyBear::from_canonical_u32(2), - BabyBear::from_canonical_u32(2), - ]; - let output = hasher.hash_iter(input); - - let mut builder = Builder::::default(); - let a: Felt<_> = builder.eval(input[0]); - let b: Felt<_> = builder.eval(input[1]); - let c: Felt<_> = builder.eval(input[2]); - let d: Felt<_> = builder.eval(input[3]); - let e: Felt<_> = builder.eval(input[4]); - let f: Felt<_> = builder.eval(input[5]); - let g: Felt<_> = builder.eval(input[6]); - let result = BabyBearPoseidon2Outer::hash(&mut builder, &[a, b, c, d, e, f, g]); - - builder.assert_var_eq(result[0], output[0]); - - let mut backend = ConstraintCompiler::::default(); - let constraints = backend.emit(builder.operations); - PlonkBn254Prover::test::(constraints.clone(), OuterWitness::default()); - } - - #[test] - fn test_p2_compress() { - type OuterDigestVariable = [Var<::N>; BN254_DIGEST_SIZE]; - let perm = outer_perm(); - let compressor = OuterCompress::new(perm.clone()); - - let a: [Bn254Fr; 1] = [Bn254Fr::two()]; - let b: [Bn254Fr; 1] = [Bn254Fr::two()]; - let gt = compressor.compress([a, b]); - - let mut builder = Builder::::default(); - let a: OuterDigestVariable = [builder.eval(a[0])]; - let b: OuterDigestVariable = [builder.eval(b[0])]; - let result = BabyBearPoseidon2Outer::compress(&mut builder, [a, b]); - builder.assert_var_eq(result[0], gt[0]); - - let mut backend = ConstraintCompiler::::default(); - let constraints = backend.emit(builder.operations); - PlonkBn254Prover::test::(constraints.clone(), OuterWitness::default()); - } -} diff --git a/crates/recursion/circuit-v2/src/constraints.rs b/crates/recursion/circuit-v2/src/constraints.rs deleted file mode 100644 index 1512d144ae..0000000000 --- a/crates/recursion/circuit-v2/src/constraints.rs +++ /dev/null @@ -1,237 +0,0 @@ -use p3_air::{Air, BaseAir}; -use p3_baby_bear::BabyBear; -use p3_commit::{LagrangeSelectors, Mmcs, PolynomialSpace, TwoAdicMultiplicativeCoset}; -use p3_field::{AbstractExtensionField, AbstractField, TwoAdicField}; -use p3_matrix::dense::RowMajorMatrix; - -use sp1_recursion_compiler::ir::{ - Builder, Config, Ext, ExtensionOperand, Felt, SymbolicExt, SymbolicFelt, -}; -use sp1_stark::{ - air::MachineAir, AirOpenedValues, ChipOpenedValues, GenericVerifierConstraintFolder, - MachineChip, OpeningShapeError, -}; - -use crate::{ - domain::PolynomialSpaceVariable, stark::StarkVerifier, BabyBearFriConfigVariable, CircuitConfig, -}; - -pub type RecursiveVerifierConstraintFolder<'a, C> = GenericVerifierConstraintFolder< - 'a, - ::F, - ::EF, - Felt<::F>, - Ext<::F, ::EF>, - SymbolicExt<::F, ::EF>, ->; - -impl StarkVerifier -where - C::F: TwoAdicField, - SC: BabyBearFriConfigVariable, - C: CircuitConfig, - >::ProverData>: Clone, - A: MachineAir + for<'a> Air>, -{ - #[allow(clippy::too_many_arguments)] - pub fn verify_constraints( - builder: &mut Builder, - chip: &MachineChip, - opening: &ChipOpenedValues>, - trace_domain: TwoAdicMultiplicativeCoset, - qc_domains: Vec>, - zeta: Ext, - alpha: Ext, - permutation_challenges: &[Ext], - public_values: &[Felt], - ) { - let sels = trace_domain.selectors_at_point_variable(builder, zeta); - - // Recompute the quotient at zeta from the chunks. - let quotient = Self::recompute_quotient(builder, opening, &qc_domains, zeta); - - // Calculate the evaluations of the constraints at zeta. - let folded_constraints = Self::eval_constraints( - builder, - chip, - opening, - &sels, - alpha, - permutation_challenges, - public_values, - ); - - // Assert that the quotient times the zerofier is equal to the folded constraints. - builder.assert_ext_eq(folded_constraints * sels.inv_zeroifier, quotient); - } - - pub fn eval_constraints( - builder: &mut Builder, - chip: &MachineChip, - opening: &ChipOpenedValues>, - selectors: &LagrangeSelectors>, - alpha: Ext, - permutation_challenges: &[Ext], - public_values: &[Felt], - ) -> Ext { - let mut unflatten = |v: &[Ext]| { - v.chunks_exact(>::D) - .map(|chunk| { - builder.eval( - chunk - .iter() - .enumerate() - .map( - |(e_i, x): (usize, &Ext)| -> SymbolicExt { - SymbolicExt::from(*x) * C::EF::monomial(e_i) - }, - ) - .sum::>(), - ) - }) - .collect::>>() - }; - let perm_opening = AirOpenedValues { - local: unflatten(&opening.permutation.local), - next: unflatten(&opening.permutation.next), - }; - - let mut folder = RecursiveVerifierConstraintFolder:: { - preprocessed: opening.preprocessed.view(), - main: opening.main.view(), - perm: perm_opening.view(), - perm_challenges: permutation_challenges, - cumulative_sum: opening.cumulative_sum, - public_values, - is_first_row: selectors.is_first_row, - is_last_row: selectors.is_last_row, - is_transition: selectors.is_transition, - alpha, - accumulator: SymbolicExt::zero(), - _marker: std::marker::PhantomData, - }; - - chip.eval(&mut folder); - builder.eval(folder.accumulator) - } - - pub fn recompute_quotient( - builder: &mut Builder, - opening: &ChipOpenedValues>, - qc_domains: &[TwoAdicMultiplicativeCoset], - zeta: Ext, - ) -> Ext { - let zps = qc_domains - .iter() - .enumerate() - .map(|(i, domain)| { - let (zs, zinvs) = qc_domains - .iter() - .enumerate() - .filter(|(j, _)| *j != i) - .map(|(_, other_domain)| { - let first_point = builder.eval(domain.first_point()); - ( - other_domain - .zp_at_point_variable(builder, zeta) - .to_operand() - .symbolic(), - other_domain.zp_at_point_f(builder, first_point).inverse(), - ) - }) - .unzip::<_, _, Vec<_>, Vec<_>>(); - zs.into_iter().product::>() - * zinvs.into_iter().product::>() - }) - .collect::>>() - .into_iter() - .map(|x| builder.eval(x)) - .collect::>>(); - - builder.eval( - opening - .quotient - .iter() - .enumerate() - .map(|(ch_i, ch)| { - assert_eq!(ch.len(), C::EF::D); - ch.iter() - .enumerate() - .map(|(e_i, &c)| zps[ch_i] * C::EF::monomial(e_i) * c) - .sum::>() - }) - .sum::>(), - ) - } - - pub fn verify_opening_shape( - chip: &MachineChip, - opening: &ChipOpenedValues>, - ) -> Result<(), OpeningShapeError> { - // Verify that the preprocessed width matches the expected value for the chip. - if opening.preprocessed.local.len() != chip.preprocessed_width() { - return Err(OpeningShapeError::PreprocessedWidthMismatch( - chip.preprocessed_width(), - opening.preprocessed.local.len(), - )); - } - if opening.preprocessed.next.len() != chip.preprocessed_width() { - return Err(OpeningShapeError::PreprocessedWidthMismatch( - chip.preprocessed_width(), - opening.preprocessed.next.len(), - )); - } - - // Verify that the main width matches the expected value for the chip. - if opening.main.local.len() != chip.width() { - return Err(OpeningShapeError::MainWidthMismatch( - chip.width(), - opening.main.local.len(), - )); - } - if opening.main.next.len() != chip.width() { - return Err(OpeningShapeError::MainWidthMismatch( - chip.width(), - opening.main.next.len(), - )); - } - - // Verify that the permutation width matches the expected value for the chip. - if opening.permutation.local.len() - != chip.permutation_width() * >::D - { - return Err(OpeningShapeError::PermutationWidthMismatch( - chip.permutation_width(), - opening.permutation.local.len(), - )); - } - if opening.permutation.next.len() - != chip.permutation_width() * >::D - { - return Err(OpeningShapeError::PermutationWidthMismatch( - chip.permutation_width(), - opening.permutation.next.len(), - )); - } - - // Verift that the number of quotient chunks matches the expected value for the chip. - if opening.quotient.len() != chip.quotient_width() { - return Err(OpeningShapeError::QuotientWidthMismatch( - chip.quotient_width(), - opening.quotient.len(), - )); - } - // For each quotient chunk, verify that the number of elements is equal to the degree of the - // challenge extension field over the value field. - for slice in &opening.quotient { - if slice.len() != >::D { - return Err(OpeningShapeError::QuotientChunkSizeMismatch( - >::D, - slice.len(), - )); - } - } - - Ok(()) - } -} diff --git a/crates/recursion/circuit-v2/src/domain.rs b/crates/recursion/circuit-v2/src/domain.rs deleted file mode 100644 index 7c16673615..0000000000 --- a/crates/recursion/circuit-v2/src/domain.rs +++ /dev/null @@ -1,89 +0,0 @@ -use p3_commit::{LagrangeSelectors, PolynomialSpace, TwoAdicMultiplicativeCoset}; -use p3_field::{AbstractExtensionField, AbstractField, Field, TwoAdicField}; -use sp1_recursion_compiler::prelude::*; - -/// Reference: [p3_commit::PolynomialSpace] -pub trait PolynomialSpaceVariable: Sized + PolynomialSpace { - fn selectors_at_point_variable( - &self, - builder: &mut Builder, - point: Ext, - ) -> LagrangeSelectors>; - - fn zp_at_point_variable( - &self, - builder: &mut Builder, - point: Ext, - ) -> Ext; - - fn next_point_variable( - &self, - builder: &mut Builder, - point: Ext<::F, ::EF>, - ) -> Ext<::F, ::EF>; - - fn zp_at_point_f( - &self, - builder: &mut Builder, - point: Felt<::F>, - ) -> Felt<::F>; -} - -impl PolynomialSpaceVariable for TwoAdicMultiplicativeCoset -where - C::F: TwoAdicField, -{ - fn next_point_variable( - &self, - builder: &mut Builder, - point: Ext<::F, ::EF>, - ) -> Ext<::F, ::EF> { - let g = C::F::two_adic_generator(self.log_n); - // let g: Felt<_> = builder.eval(g); - builder.eval(point * g) - } - - fn selectors_at_point_variable( - &self, - builder: &mut Builder, - point: Ext<::F, ::EF>, - ) -> LagrangeSelectors::F, ::EF>> { - let unshifted_point: Ext<_, _> = builder.eval(point * self.shift.inverse()); - let z_h_expr = builder - .exp_power_of_2_v::>(unshifted_point, Usize::Const(self.log_n)) - - C::EF::one(); - let z_h: Ext<_, _> = builder.eval(z_h_expr); - let g = C::F::two_adic_generator(self.log_n); - let ginv = g.inverse(); - LagrangeSelectors { - is_first_row: builder.eval(z_h / (unshifted_point - C::EF::one())), - is_last_row: builder.eval(z_h / (unshifted_point - ginv)), - is_transition: builder.eval(unshifted_point - ginv), - inv_zeroifier: builder.eval(z_h.inverse()), - } - } - - fn zp_at_point_variable( - &self, - builder: &mut Builder, - point: Ext<::F, ::EF>, - ) -> Ext<::F, ::EF> { - let unshifted_power = builder.exp_power_of_2_v::>( - point - * C::EF::from_base_slice(&[self.shift, C::F::zero(), C::F::zero(), C::F::zero()]) - .inverse() - .cons(), - Usize::Const(self.log_n), - ); - builder.eval(unshifted_power - C::EF::one()) - } - fn zp_at_point_f( - &self, - builder: &mut Builder, - point: Felt<::F>, - ) -> Felt<::F> { - let unshifted_power = builder - .exp_power_of_2_v::>(point * self.shift.inverse(), Usize::Const(self.log_n)); - builder.eval(unshifted_power - C::F::one()) - } -} diff --git a/crates/recursion/circuit-v2/src/fri.rs b/crates/recursion/circuit-v2/src/fri.rs deleted file mode 100644 index 9ad2b73693..0000000000 --- a/crates/recursion/circuit-v2/src/fri.rs +++ /dev/null @@ -1,637 +0,0 @@ -use itertools::{izip, Itertools}; -use p3_baby_bear::BabyBear; -use p3_commit::PolynomialSpace; -use p3_field::{AbstractField, TwoAdicField}; -use p3_fri::FriConfig; -use p3_matrix::Dimensions; -use p3_util::log2_strict_usize; -use sp1_recursion_compiler::ir::{Builder, Felt, SymbolicExt, SymbolicFelt}; -use std::{ - cmp::Reverse, - iter::{once, repeat_with, zip}, -}; - -use crate::{ - challenger::{CanSampleBitsVariable, FieldChallengerVariable}, - BabyBearFriConfigVariable, CanObserveVariable, CircuitConfig, Ext, FriChallenges, FriMmcs, - FriProofVariable, FriQueryProofVariable, TwoAdicPcsProofVariable, TwoAdicPcsRoundVariable, -}; - -pub fn verify_shape_and_sample_challenges< - C: CircuitConfig, - SC: BabyBearFriConfigVariable, ->( - builder: &mut Builder, - config: &FriConfig>, - proof: &FriProofVariable, - challenger: &mut SC::FriChallengerVariable, -) -> FriChallenges { - let betas = proof - .commit_phase_commits - .iter() - .map(|commitment| { - challenger.observe(builder, *commitment); - challenger.sample_ext(builder) - }) - .collect(); - - // Observe the final polynomial. - let final_poly_felts = C::ext2felt(builder, proof.final_poly); - final_poly_felts.iter().for_each(|felt| { - challenger.observe(builder, *felt); - }); - - assert_eq!(proof.query_proofs.len(), config.num_queries); - challenger.check_witness(builder, config.proof_of_work_bits, proof.pow_witness); - - let log_max_height = proof.commit_phase_commits.len() + config.log_blowup; - let query_indices: Vec> = - repeat_with(|| challenger.sample_bits(builder, log_max_height)) - .take(config.num_queries) - .collect(); - - FriChallenges { query_indices, betas } -} - -pub fn verify_two_adic_pcs, SC: BabyBearFriConfigVariable>( - builder: &mut Builder, - config: &FriConfig>, - proof: &TwoAdicPcsProofVariable, - challenger: &mut SC::FriChallengerVariable, - rounds: Vec>, -) { - let alpha = challenger.sample_ext(builder); - - let fri_challenges = - verify_shape_and_sample_challenges::(builder, config, &proof.fri_proof, challenger); - - let log_global_max_height = proof.fri_proof.commit_phase_commits.len() + config.log_blowup; - - // The powers of alpha, where the ith element is alpha^i. - let mut alpha_pows: Vec> = - vec![builder.eval(SymbolicExt::from_f(C::EF::one()))]; - - let reduced_openings = proof - .query_openings - .iter() - .zip(&fri_challenges.query_indices) - .map(|(query_opening, index_bits)| { - // The powers of alpha, where the ith element is alpha^i. - let mut log_height_pow = [0usize; 32]; - let mut ro: [Ext; 32] = - [builder.eval(SymbolicExt::from_f(C::EF::zero())); 32]; - - for (batch_opening, round) in zip(query_opening, rounds.iter().cloned()) { - let batch_commit = round.batch_commit; - let mats = round.domains_points_and_opens; - let batch_heights = - mats.iter().map(|mat| mat.domain.size() << config.log_blowup).collect_vec(); - let batch_dims = batch_heights - .iter() - .map(|&height| Dimensions { width: 0, height }) - .collect_vec(); - - let batch_max_height = batch_heights.iter().max().expect("Empty batch?"); - let log_batch_max_height = log2_strict_usize(*batch_max_height); - let bits_reduced = log_global_max_height - log_batch_max_height; - - let reduced_index_bits = index_bits[bits_reduced..].to_vec(); - - verify_batch::( - builder, - batch_commit, - batch_dims, - reduced_index_bits, - batch_opening.opened_values.clone(), - batch_opening.opening_proof.clone(), - ); - - for (mat_opening, mat) in izip!(&batch_opening.opened_values, mats) { - let mat_domain = mat.domain; - let mat_points = mat.points; - let mat_values = mat.values; - let log_height = log2_strict_usize(mat_domain.size()) + config.log_blowup; - - let bits_reduced = log_global_max_height - log_height; - let reduced_index_bits_trunc = - index_bits[bits_reduced..(bits_reduced + log_height)].to_vec(); - - let g = builder.generator(); - let two_adic_generator: Felt<_> = - builder.eval(C::F::two_adic_generator(log_height)); - let two_adic_generator_exp = - C::exp_reverse_bits(builder, two_adic_generator, reduced_index_bits_trunc); - let x: Felt<_> = builder.eval(g * two_adic_generator_exp); - - for (z, ps_at_z) in izip!(mat_points, mat_values) { - // builder.cycle_tracker("2adic-hotloop"); - let mut acc: Ext = - builder.eval(SymbolicExt::from_f(C::EF::zero())); - for (p_at_x, p_at_z) in izip!(mat_opening.clone(), ps_at_z) { - let pow = log_height_pow[log_height]; - // Fill in any missing powers of alpha. - (alpha_pows.len()..pow + 1).for_each(|_| { - let new_alpha = builder.eval(*alpha_pows.last().unwrap() * alpha); - builder.reduce_e(new_alpha); - alpha_pows.push(new_alpha); - }); - acc = builder.eval(acc + (alpha_pows[pow] * (p_at_z - p_at_x[0]))); - log_height_pow[log_height] += 1; - } - ro[log_height] = builder.eval(ro[log_height] + acc / (z - x)); - // builder.cycle_tracker("2adic-hotloop"); - } - } - } - ro - }) - .collect::>(); - - verify_challenges::( - builder, - config, - &proof.fri_proof, - &fri_challenges, - reduced_openings, - ); -} - -pub fn verify_challenges, SC: BabyBearFriConfigVariable>( - builder: &mut Builder, - config: &FriConfig>, - proof: &FriProofVariable, - challenges: &FriChallenges, - reduced_openings: Vec<[Ext; 32]>, -) { - let log_max_height = proof.commit_phase_commits.len() + config.log_blowup; - for ((index_bits, query_proof), ro) in - challenges.query_indices.iter().zip(&proof.query_proofs).zip(reduced_openings) - { - let folded_eval = verify_query::( - builder, - proof.commit_phase_commits.clone(), - index_bits, - query_proof.clone(), - challenges.betas.clone(), - ro, - log_max_height, - ); - - builder.assert_ext_eq(folded_eval, proof.final_poly); - } -} - -pub fn verify_query, SC: BabyBearFriConfigVariable>( - builder: &mut Builder, - commit_phase_commits: Vec, - index_bits: &[C::Bit], - proof: FriQueryProofVariable, - betas: Vec>, - reduced_openings: [Ext; 32], - log_max_height: usize, -) -> Ext { - let mut folded_eval: Ext<_, _> = builder.constant(C::EF::zero()); - let two_adic_generator: Felt<_> = builder.constant(C::F::two_adic_generator(log_max_height)); - - let x_felt = - C::exp_reverse_bits(builder, two_adic_generator, index_bits[..log_max_height].to_vec()); - let mut x: Ext<_, _> = builder.eval(SymbolicExt::one() * SymbolicFelt::from(x_felt)); - - for (offset, log_folded_height, commit, step, beta) in izip!( - 0.., - (0..log_max_height).rev(), - commit_phase_commits, - &proof.commit_phase_openings, - betas, - ) { - folded_eval = builder.eval(folded_eval + reduced_openings[log_folded_height + 1]); - - let index_sibling_complement: C::Bit = index_bits[offset].clone(); - // let index_sibling_complement: Felt<_> = builder.constant(C::F::one()); - let index_pair = &index_bits[(offset + 1)..]; - - builder.reduce_e(folded_eval); - - let evals_ext = C::select_chain_ef( - builder, - index_sibling_complement.clone(), - once(folded_eval), - once(step.sibling_value), - ); - let evals_felt = vec![ - C::ext2felt(builder, evals_ext[0]).to_vec(), - C::ext2felt(builder, evals_ext[1]).to_vec(), - ]; - - let dims = &[Dimensions { width: 2, height: (1 << log_folded_height) }]; - verify_batch::( - builder, - commit, - dims.to_vec(), - index_pair.to_vec(), - [evals_felt].to_vec(), - step.opening_proof.clone(), - ); - - let xs_new: Ext<_, _> = builder.eval(x * C::EF::two_adic_generator(1)); - let xs = C::select_chain_ef(builder, index_sibling_complement, once(x), once(xs_new)); - folded_eval = builder - .eval(evals_ext[0] + (beta - xs[0]) * (evals_ext[1] - evals_ext[0]) / (xs[1] - xs[0])); - x = builder.eval(x * x); - } - - folded_eval -} - -pub fn verify_batch, SC: BabyBearFriConfigVariable>( - builder: &mut Builder, - commit: SC::Digest, - dimensions: Vec, - index_bits: Vec, - opened_values: Vec>>>, - proof: Vec, -) { - let mut heights_tallest_first = - dimensions.iter().enumerate().sorted_by_key(|(_, dims)| Reverse(dims.height)).peekable(); - - let mut curr_height_padded = heights_tallest_first.peek().unwrap().1.height.next_power_of_two(); - - let ext_slice: Vec>> = heights_tallest_first - .peeking_take_while(|(_, dims)| dims.height.next_power_of_two() == curr_height_padded) - .flat_map(|(i, _)| opened_values[i].as_slice()) - .cloned() - .collect::>(); - let felt_slice: Vec> = - ext_slice.iter().flat_map(|ext| ext.as_slice()).cloned().collect::>(); - let mut root: SC::Digest = SC::hash(builder, &felt_slice[..]); - - zip(index_bits, proof).for_each(|(bit, sibling): (C::Bit, SC::Digest)| { - let compress_args = SC::select_chain_digest(builder, bit, [root, sibling]); - - root = SC::compress(builder, compress_args); - curr_height_padded >>= 1; - - let next_height = heights_tallest_first - .peek() - .map(|(_, dims)| dims.height) - .filter(|h| h.next_power_of_two() == curr_height_padded); - - if let Some(next_height) = next_height { - let ext_slice: Vec>> = heights_tallest_first - .peeking_take_while(|(_, dims)| dims.height == next_height) - .flat_map(|(i, _)| opened_values[i].as_slice()) - .cloned() - .collect::>(); - let felt_slice: Vec> = - ext_slice.iter().flat_map(|ext| ext.as_slice()).cloned().collect::>(); - let next_height_openings_digest = SC::hash(builder, &felt_slice); - root = SC::compress(builder, [root, next_height_openings_digest]); - } - }); - - SC::assert_digest_eq(builder, root, commit); -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - challenger::DuplexChallengerVariable, utils::tests::run_test_recursion, - BatchOpeningVariable, FriCommitPhaseProofStepVariable, FriProofVariable, - FriQueryProofVariable, TwoAdicPcsMatsVariable, TwoAdicPcsProofVariable, - }; - use p3_challenger::{CanObserve, CanSample, FieldChallenger}; - use p3_commit::{Pcs, TwoAdicMultiplicativeCoset}; - use p3_field::AbstractField; - use p3_fri::{verifier, TwoAdicFriPcsProof}; - use p3_matrix::dense::RowMajorMatrix; - use rand::{ - rngs::{OsRng, StdRng}, - SeedableRng, - }; - use sp1_recursion_compiler::{ - asm::AsmBuilder, - config::InnerConfig, - ir::{Builder, Ext, SymbolicExt}, - }; - use sp1_stark::{ - baby_bear_poseidon2::BabyBearPoseidon2, inner_fri_config, inner_perm, InnerChallenge, - InnerChallengeMmcs, InnerChallenger, InnerCompress, InnerDft, InnerFriProof, InnerHash, - InnerPcs, InnerVal, InnerValMmcs, StarkGenericConfig, - }; - - use sp1_recursion_core_v2::DIGEST_SIZE; - - use crate::Digest; - - type C = InnerConfig; - type SC = BabyBearPoseidon2; - type F = ::Val; - type EF = ::Challenge; - - pub fn const_fri_proof( - builder: &mut AsmBuilder, - fri_proof: InnerFriProof, - ) -> FriProofVariable { - // Set the commit phase commits. - let commit_phase_commits = fri_proof - .commit_phase_commits - .iter() - .map(|commit| { - let commit: [F; DIGEST_SIZE] = (*commit).into(); - commit.map(|x| builder.eval(x)) - }) - .collect::>(); - - // Set the query proofs. - let query_proofs = fri_proof - .query_proofs - .iter() - .map(|query_proof| { - let commit_phase_openings = query_proof - .commit_phase_openings - .iter() - .map(|commit_phase_opening| { - let sibling_value = - builder.eval(SymbolicExt::from_f(commit_phase_opening.sibling_value)); - let opening_proof = commit_phase_opening - .opening_proof - .iter() - .map(|sibling| sibling.map(|x| builder.eval(x))) - .collect::>(); - FriCommitPhaseProofStepVariable { sibling_value, opening_proof } - }) - .collect::>(); - FriQueryProofVariable { commit_phase_openings } - }) - .collect::>(); - - // Initialize the FRI proof variable. - FriProofVariable { - commit_phase_commits, - query_proofs, - final_poly: builder.eval(SymbolicExt::from_f(fri_proof.final_poly)), - pow_witness: builder.eval(fri_proof.pow_witness), - } - } - - pub fn const_two_adic_pcs_proof( - builder: &mut Builder, - proof: TwoAdicFriPcsProof, - ) -> TwoAdicPcsProofVariable { - let fri_proof = const_fri_proof(builder, proof.fri_proof); - let query_openings = proof - .query_openings - .iter() - .map(|query_opening| { - query_opening - .iter() - .map(|opening| BatchOpeningVariable { - opened_values: opening - .opened_values - .iter() - .map(|opened_value| { - opened_value - .iter() - .map(|value| vec![builder.eval::, _>(*value)]) - .collect::>() - }) - .collect::>(), - opening_proof: opening - .opening_proof - .iter() - .map(|opening_proof| opening_proof.map(|x| builder.eval(x))) - .collect::>(), - }) - .collect::>() - }) - .collect::>(); - TwoAdicPcsProofVariable { fri_proof, query_openings } - } - - #[allow(clippy::type_complexity)] - pub fn const_two_adic_pcs_rounds( - builder: &mut Builder, - commit: [F; DIGEST_SIZE], - os: Vec<(TwoAdicMultiplicativeCoset, Vec<(InnerChallenge, Vec)>)>, - ) -> (Digest, Vec>) { - let commit: Digest = commit.map(|x| builder.eval(x)); - - let mut domains_points_and_opens = Vec::new(); - for (domain, poly) in os.into_iter() { - let points: Vec> = - poly.iter().map(|(p, _)| builder.eval(SymbolicExt::from_f(*p))).collect::>(); - let values: Vec>> = poly - .iter() - .map(|(_, v)| { - v.clone() - .iter() - .map(|t| builder.eval(SymbolicExt::from_f(*t))) - .collect::>() - }) - .collect::>(); - let domain_points_and_values = TwoAdicPcsMatsVariable { domain, points, values }; - domains_points_and_opens.push(domain_points_and_values); - } - - (commit, vec![TwoAdicPcsRoundVariable { batch_commit: commit, domains_points_and_opens }]) - } - - /// Reference: https://github.com/Plonky3/Plonky3/blob/4809fa7bedd9ba8f6f5d3267b1592618e3776c57/merkle-tree/src/mmcs.rs#L421 - #[test] - fn size_gaps() { - use p3_commit::Mmcs; - let perm = inner_perm(); - let hash = InnerHash::new(perm.clone()); - let compress = InnerCompress::new(perm); - let mmcs = InnerValMmcs::new(hash, compress); - - let mut builder = Builder::::default(); - - // 4 mats with 1000 rows, 8 columns - let large_mats = (0..4).map(|_| RowMajorMatrix::::rand(&mut OsRng, 1000, 8)); - let large_mat_dims = (0..4).map(|_| Dimensions { height: 1000, width: 8 }); - - // 5 mats with 70 rows, 8 columns - let medium_mats = (0..5).map(|_| RowMajorMatrix::::rand(&mut OsRng, 70, 8)); - let medium_mat_dims = (0..5).map(|_| Dimensions { height: 70, width: 8 }); - - // 6 mats with 8 rows, 8 columns - let small_mats = (0..6).map(|_| RowMajorMatrix::::rand(&mut OsRng, 8, 8)); - let small_mat_dims = (0..6).map(|_| Dimensions { height: 8, width: 8 }); - - let (commit, prover_data) = - mmcs.commit(large_mats.chain(medium_mats).chain(small_mats).collect_vec()); - - let commit: [_; DIGEST_SIZE] = commit.into(); - let commit = commit.map(|x| builder.eval(x)); - // open the 6th row of each matrix and verify - let (opened_values, proof) = mmcs.open_batch(6, &prover_data); - let opened_values = opened_values - .into_iter() - .map(|x| x.into_iter().map(|y| vec![builder.eval::, _>(y)]).collect()) - .collect(); - let index = builder.eval(F::from_canonical_u32(6)); - let index_bits = C::num2bits(&mut builder, index, 32); - let proof = proof.into_iter().map(|p| p.map(|x| builder.eval(x))).collect(); - verify_batch::<_, SC>( - &mut builder, - commit, - large_mat_dims.chain(medium_mat_dims).chain(small_mat_dims).collect_vec(), - index_bits, - opened_values, - proof, - ); - } - - #[test] - fn test_fri_verify_shape_and_sample_challenges() { - let mut rng = &mut OsRng; - let log_degrees = &[16, 9, 7, 4, 2]; - let perm = inner_perm(); - let fri_config = inner_fri_config(); - let hash = InnerHash::new(perm.clone()); - let compress = InnerCompress::new(perm.clone()); - let val_mmcs = InnerValMmcs::new(hash, compress); - let dft = InnerDft {}; - let pcs: InnerPcs = - InnerPcs::new(log_degrees.iter().copied().max().unwrap(), dft, val_mmcs, fri_config); - - // Generate proof. - let domains_and_polys = log_degrees - .iter() - .map(|&d| { - ( - >::natural_domain_for_degree( - &pcs, - 1 << d, - ), - RowMajorMatrix::::rand(&mut rng, 1 << d, 10), - ) - }) - .collect::>(); - let (commit, data) = >::commit( - &pcs, - domains_and_polys.clone(), - ); - let mut challenger = InnerChallenger::new(perm.clone()); - challenger.observe(commit); - let zeta = challenger.sample_ext_element::(); - let points = repeat_with(|| vec![zeta]).take(domains_and_polys.len()).collect::>(); - let (_, proof) = pcs.open(vec![(&data, points)], &mut challenger); - - // Verify proof. - let mut challenger = InnerChallenger::new(perm.clone()); - challenger.observe(commit); - let _: InnerChallenge = challenger.sample(); - let fri_challenges_gt = verifier::verify_shape_and_sample_challenges( - &inner_fri_config(), - &proof.fri_proof, - &mut challenger, - ) - .unwrap(); - - // Define circuit. - let mut builder = Builder::::default(); - let config = inner_fri_config(); - let fri_proof = const_fri_proof(&mut builder, proof.fri_proof); - - let mut challenger = DuplexChallengerVariable::new(&mut builder); - let commit: [_; DIGEST_SIZE] = commit.into(); - let commit: [Felt; DIGEST_SIZE] = commit.map(|x| builder.eval(x)); - challenger.observe_slice(&mut builder, commit); - let _ = challenger.sample_ext(&mut builder); - let fri_challenges = verify_shape_and_sample_challenges::( - &mut builder, - &config, - &fri_proof, - &mut challenger, - ); - - for i in 0..fri_challenges_gt.betas.len() { - builder.assert_ext_eq( - SymbolicExt::from_f(fri_challenges_gt.betas[i]), - fri_challenges.betas[i], - ); - } - - for i in 0..fri_challenges_gt.query_indices.len() { - let query_indices = - C::bits2num(&mut builder, fri_challenges.query_indices[i].iter().cloned()); - builder.assert_felt_eq( - F::from_canonical_usize(fri_challenges_gt.query_indices[i]), - query_indices, - ); - } - - run_test_recursion(builder.operations, None); - } - - #[test] - fn test_verify_two_adic_pcs_inner() { - let mut rng = StdRng::seed_from_u64(0xDEADBEEF); - let log_degrees = &[19, 19]; - let perm = inner_perm(); - let fri_config = inner_fri_config(); - let hash = InnerHash::new(perm.clone()); - let compress = InnerCompress::new(perm.clone()); - let val_mmcs = InnerValMmcs::new(hash, compress); - let dft = InnerDft {}; - let pcs: InnerPcs = - InnerPcs::new(log_degrees.iter().copied().max().unwrap(), dft, val_mmcs, fri_config); - - // Generate proof. - let domains_and_polys = log_degrees - .iter() - .map(|&d| { - ( - >::natural_domain_for_degree( - &pcs, - 1 << d, - ), - RowMajorMatrix::::rand(&mut rng, 1 << d, 100), - ) - }) - .collect::>(); - let (commit, data) = >::commit( - &pcs, - domains_and_polys.clone(), - ); - let mut challenger = InnerChallenger::new(perm.clone()); - challenger.observe(commit); - let zeta = challenger.sample_ext_element::(); - let points = domains_and_polys.iter().map(|_| vec![zeta]).collect::>(); - let (opening, proof) = pcs.open(vec![(&data, points)], &mut challenger); - - // Verify proof. - let mut challenger = InnerChallenger::new(perm.clone()); - challenger.observe(commit); - let x1 = challenger.sample_ext_element::(); - let os = domains_and_polys - .iter() - .zip(&opening[0]) - .map(|((domain, _), mat_openings)| (*domain, vec![(zeta, mat_openings[0].clone())])) - .collect::>(); - pcs.verify(vec![(commit, os.clone())], &proof, &mut challenger).unwrap(); - - // Define circuit. - let mut builder = Builder::::default(); - let config = inner_fri_config(); - let proof = const_two_adic_pcs_proof(&mut builder, proof); - let (commit, rounds) = const_two_adic_pcs_rounds(&mut builder, commit.into(), os); - let mut challenger = DuplexChallengerVariable::new(&mut builder); - challenger.observe_slice(&mut builder, commit); - let x2 = challenger.sample_ext(&mut builder); - let x1: Ext<_, _> = builder.constant(x1); - builder.assert_ext_eq(x1, x2); - verify_two_adic_pcs::<_, BabyBearPoseidon2>( - &mut builder, - &config, - &proof, - &mut challenger, - rounds, - ); - - run_test_recursion(builder.operations, std::iter::empty()); - } -} diff --git a/crates/recursion/circuit-v2/src/hash.rs b/crates/recursion/circuit-v2/src/hash.rs deleted file mode 100644 index 4e99a220bc..0000000000 --- a/crates/recursion/circuit-v2/src/hash.rs +++ /dev/null @@ -1,126 +0,0 @@ -use std::iter::zip; - -use itertools::Itertools; -use p3_baby_bear::BabyBear; -use p3_field::{AbstractField, Field}; - -use p3_bn254_fr::Bn254Fr; -use sp1_recursion_compiler::{ - circuit::CircuitV2Builder, - ir::{Builder, Config, DslIr, Felt, Var}, -}; -use sp1_recursion_core_v2::{stark::config::BabyBearPoseidon2Outer, DIGEST_SIZE}; -use sp1_stark::baby_bear_poseidon2::BabyBearPoseidon2; - -use crate::{ - challenger::{reduce_32, RATE, SPONGE_SIZE}, - select_chain, CircuitConfig, -}; - -pub trait FieldHasherVariable { - type Digest: Clone + Copy; - - fn hash(builder: &mut Builder, input: &[Felt]) -> Self::Digest; - - fn compress(builder: &mut Builder, input: [Self::Digest; 2]) -> Self::Digest; - - fn assert_digest_eq(builder: &mut Builder, a: Self::Digest, b: Self::Digest); - - // Encountered many issues trying to make the following two parametrically polymorphic. - fn select_chain_digest( - builder: &mut Builder, - should_swap: C::Bit, - input: [Self::Digest; 2], - ) -> [Self::Digest; 2]; -} - -impl>> FieldHasherVariable - for BabyBearPoseidon2 -{ - type Digest = [Felt; DIGEST_SIZE]; - - fn hash(builder: &mut Builder, input: &[Felt<::F>]) -> Self::Digest { - builder.poseidon2_hash_v2(input) - } - - fn compress(builder: &mut Builder, input: [Self::Digest; 2]) -> Self::Digest { - builder.poseidon2_compress_v2(input.into_iter().flatten()) - } - - fn assert_digest_eq(builder: &mut Builder, a: Self::Digest, b: Self::Digest) { - zip(a, b).for_each(|(e1, e2)| builder.assert_felt_eq(e1, e2)); - } - - fn select_chain_digest( - builder: &mut Builder, - should_swap: ::Bit, - input: [Self::Digest; 2], - ) -> [Self::Digest; 2] { - let err_msg = "select_chain's return value should have length the sum of its inputs"; - let mut selected = select_chain(builder, should_swap, input[0], input[1]); - let ret = [ - core::array::from_fn(|_| selected.next().expect(err_msg)), - core::array::from_fn(|_| selected.next().expect(err_msg)), - ]; - assert_eq!(selected.next(), None, "{}", err_msg); - ret - } -} - -pub const BN254_DIGEST_SIZE: usize = 1; -impl>> FieldHasherVariable - for BabyBearPoseidon2Outer -{ - type Digest = [Var; BN254_DIGEST_SIZE]; - - fn hash(builder: &mut Builder, input: &[Felt<::F>]) -> Self::Digest { - assert!(C::N::bits() == p3_bn254_fr::Bn254Fr::bits()); - assert!(C::F::bits() == p3_baby_bear::BabyBear::bits()); - let num_f_elms = C::N::bits() / C::F::bits(); - let mut state: [Var; SPONGE_SIZE] = - [builder.eval(C::N::zero()), builder.eval(C::N::zero()), builder.eval(C::N::zero())]; - for block_chunk in &input.iter().chunks(RATE) { - for (chunk_id, chunk) in (&block_chunk.chunks(num_f_elms)).into_iter().enumerate() { - let chunk = chunk.collect_vec().into_iter().copied().collect::>(); - state[chunk_id] = reduce_32(builder, chunk.as_slice()); - } - builder.push(DslIr::CircuitPoseidon2Permute(state)) - } - - [state[0]; BN254_DIGEST_SIZE] - } - - fn compress(builder: &mut Builder, input: [Self::Digest; 2]) -> Self::Digest { - let state: [Var; SPONGE_SIZE] = - [builder.eval(input[0][0]), builder.eval(input[1][0]), builder.eval(C::N::zero())]; - builder.push(DslIr::CircuitPoseidon2Permute(state)); - [state[0]; BN254_DIGEST_SIZE] - } - - fn assert_digest_eq(builder: &mut Builder, a: Self::Digest, b: Self::Digest) { - zip(a, b).for_each(|(e1, e2)| builder.assert_var_eq(e1, e2)); - } - - fn select_chain_digest( - builder: &mut Builder, - should_swap: ::Bit, - input: [Self::Digest; 2], - ) -> [Self::Digest; 2] { - let result0: [Var<_>; 1] = core::array::from_fn(|j| { - let result = builder.uninit(); - builder.push(DslIr::CircuitSelectV(should_swap, input[1][j], input[0][j], result)); - result - }); - let result1: [Var<_>; 1] = core::array::from_fn(|j| { - let result = builder.uninit(); - builder.push(DslIr::CircuitSelectV(should_swap, input[0][j], input[1][j], result)); - result - }); - - [result0, result1] - } -} - -// impl> FieldHasherVariable for OuterHash { - -// } diff --git a/crates/recursion/circuit-v2/src/lib.rs b/crates/recursion/circuit-v2/src/lib.rs deleted file mode 100644 index c3aedb819f..0000000000 --- a/crates/recursion/circuit-v2/src/lib.rs +++ /dev/null @@ -1,353 +0,0 @@ -//! Copied from [`sp1_recursion_program`]. - -use std::{ - iter::{repeat, zip}, - ops::{Add, Mul}, -}; - -use challenger::{ - CanCopyChallenger, CanObserveVariable, DuplexChallengerVariable, FieldChallengerVariable, - MultiField32ChallengerVariable, -}; -use hash::FieldHasherVariable; -use p3_bn254_fr::Bn254Fr; -use p3_field::AbstractField; -use p3_matrix::dense::RowMajorMatrix; -use sp1_recursion_compiler::{ - circuit::CircuitV2Builder, - config::{InnerConfig, OuterConfig}, - ir::{Builder, Config, DslIr, Ext, Felt, Var, Variable}, -}; - -mod types; - -pub mod build_wrap_v2; -pub mod challenger; -pub mod constraints; -pub mod domain; -pub mod fri; -pub mod hash; -pub mod machine; -pub mod stark; -pub(crate) mod utils; -pub mod witness; - -use sp1_stark::{ - baby_bear_poseidon2::{BabyBearPoseidon2, ValMmcs}, - StarkGenericConfig, -}; -pub use types::*; - -use p3_challenger::{CanObserve, CanSample, FieldChallenger, GrindingChallenger}; -use p3_commit::{ExtensionMmcs, Mmcs}; -use p3_dft::Radix2DitParallel; -use p3_fri::{FriConfig, TwoAdicFriPcs}; -use sp1_recursion_core_v2::{ - stark::config::{BabyBearPoseidon2Outer, OuterValMmcs}, - D, -}; - -use p3_baby_bear::BabyBear; - -type EF = ::Challenge; - -pub type PcsConfig = FriConfig< - ExtensionMmcs< - ::Val, - ::Challenge, - ::ValMmcs, - >, ->; - -pub type Digest = >::Digest; - -pub type FriMmcs = ExtensionMmcs::ValMmcs>; - -pub trait BabyBearFriConfig: - StarkGenericConfig< - Val = BabyBear, - Challenge = EF, - Challenger = Self::FriChallenger, - Pcs = TwoAdicFriPcs< - BabyBear, - Radix2DitParallel, - Self::ValMmcs, - ExtensionMmcs, - >, -> -{ - type ValMmcs: Mmcs> = Self::RowMajorProverData> - + Send - + Sync; - type RowMajorProverData: Clone + Send + Sync; - type FriChallenger: CanObserve<>::Commitment> - + CanSample - + GrindingChallenger - + FieldChallenger; - - fn fri_config(&self) -> &FriConfig>; -} - -pub trait BabyBearFriConfigVariable>: - BabyBearFriConfig + FieldHasherVariable -{ - type FriChallengerVariable: FieldChallengerVariable::Bit> - + CanObserveVariable>::Digest> - + CanCopyChallenger; - - /// Get a new challenger corresponding to the given config. - fn challenger_variable(&self, builder: &mut Builder) -> Self::FriChallengerVariable; -} - -pub trait CircuitConfig: Config { - type Bit: Clone + Variable; - - fn read_bit(builder: &mut Builder) -> Self::Bit; - - fn read_felt(builder: &mut Builder) -> Felt; - - fn read_ext(builder: &mut Builder) -> Ext; - - fn ext2felt( - builder: &mut Builder, - ext: Ext<::F, ::EF>, - ) -> [Felt<::F>; D]; - - fn exp_reverse_bits( - builder: &mut Builder, - input: Felt<::F>, - power_bits: Vec, - ) -> Felt<::F>; - - fn num2bits( - builder: &mut Builder, - num: Felt<::F>, - num_bits: usize, - ) -> Vec; - - fn bits2num( - builder: &mut Builder, - bits: impl IntoIterator, - ) -> Felt<::F>; - - #[allow(clippy::type_complexity)] - fn select_chain_ef( - builder: &mut Builder, - should_swap: Self::Bit, - first: impl IntoIterator::F, ::EF>> + Clone, - second: impl IntoIterator::F, ::EF>> + Clone, - ) -> Vec::F, ::EF>>; -} - -impl CircuitConfig for InnerConfig { - type Bit = Felt<::F>; - - fn read_bit(builder: &mut Builder) -> Self::Bit { - builder.hint_felt_v2() - } - - fn read_felt(builder: &mut Builder) -> Felt { - builder.hint_felt_v2() - } - - fn read_ext(builder: &mut Builder) -> Ext { - builder.hint_ext_v2() - } - - fn ext2felt( - builder: &mut Builder, - ext: Ext<::F, ::EF>, - ) -> [Felt<::F>; D] { - builder.ext2felt_v2(ext) - } - - fn exp_reverse_bits( - builder: &mut Builder, - input: Felt<::F>, - power_bits: Vec::F>>, - ) -> Felt<::F> { - builder.exp_reverse_bits_v2(input, power_bits) - } - - fn num2bits( - builder: &mut Builder, - num: Felt<::F>, - num_bits: usize, - ) -> Vec::F>> { - builder.num2bits_v2_f(num, num_bits) - } - - fn bits2num( - builder: &mut Builder, - bits: impl IntoIterator::F>>, - ) -> Felt<::F> { - builder.bits2num_v2_f(bits) - } - - fn select_chain_ef( - builder: &mut Builder, - should_swap: Self::Bit, - first: impl IntoIterator::F, ::EF>> + Clone, - second: impl IntoIterator::F, ::EF>> + Clone, - ) -> Vec::F, ::EF>> { - let one: Felt<_> = builder.constant(Self::F::one()); - let shouldnt_swap: Felt<_> = builder.eval(one - should_swap); - - let id_branch = first.clone().into_iter().chain(second.clone()); - let swap_branch = second.into_iter().chain(first); - zip(zip(id_branch, swap_branch), zip(repeat(shouldnt_swap), repeat(should_swap))) - .map(|((id_v, sw_v), (id_c, sw_c))| builder.eval(id_v * id_c + sw_v * sw_c)) - .collect() - } -} - -impl CircuitConfig for OuterConfig { - type Bit = Var<::N>; - - fn read_bit(builder: &mut Builder) -> Self::Bit { - builder.witness_var() - } - - fn read_felt(builder: &mut Builder) -> Felt { - builder.witness_felt() - } - - fn read_ext(builder: &mut Builder) -> Ext { - builder.witness_ext() - } - - fn ext2felt( - builder: &mut Builder, - ext: Ext<::F, ::EF>, - ) -> [Felt<::F>; D] { - let felts = core::array::from_fn(|_| builder.uninit()); - builder.operations.push(DslIr::CircuitExt2Felt(felts, ext)); - felts - } - - fn exp_reverse_bits( - builder: &mut Builder, - input: Felt<::F>, - power_bits: Vec::N>>, - ) -> Felt<::F> { - let mut result = builder.constant(Self::F::one()); - let power_f = input; - let bit_len = power_bits.len(); - - for i in 1..=bit_len { - let index = bit_len - i; - let bit = power_bits[index]; - let prod = builder.eval(result * power_f); - result = builder.select_f(bit, prod, result); - builder.assign(power_f, power_f * power_f); - } - result - } - - fn num2bits( - builder: &mut Builder, - num: Felt<::F>, - num_bits: usize, - ) -> Vec::N>> { - builder.num2bits_f_circuit(num)[..num_bits].to_vec() - } - - fn bits2num( - builder: &mut Builder, - bits: impl IntoIterator::N>>, - ) -> Felt<::F> { - let result = builder.eval(Self::F::zero()); - for (i, bit) in bits.into_iter().enumerate() { - let to_add: Felt<_> = builder.uninit(); - let pow2 = builder.constant(Self::F::from_canonical_u32(1 << i)); - let zero = builder.constant(Self::F::zero()); - builder.operations.push(DslIr::CircuitSelectF(bit, pow2, zero, to_add)); - builder.assign(result, result + to_add); - } - result - } - - fn select_chain_ef( - builder: &mut Builder, - should_swap: Self::Bit, - first: impl IntoIterator::F, ::EF>> + Clone, - second: impl IntoIterator::F, ::EF>> + Clone, - ) -> Vec::F, ::EF>> { - let id_branch = first.clone().into_iter().chain(second.clone()); - let swap_branch = second.into_iter().chain(first); - zip(id_branch, swap_branch) - .map(|(id_v, sw_v): (Ext<_, _>, Ext<_, _>)| -> Ext<_, _> { - let result: Ext<_, _> = builder.uninit(); - builder.operations.push(DslIr::CircuitSelectE(should_swap, sw_v, id_v, result)); - result - }) - .collect() - } -} - -impl BabyBearFriConfig for BabyBearPoseidon2 { - type ValMmcs = ValMmcs; - type FriChallenger = ::Challenger; - type RowMajorProverData = >::ProverData>; - - fn fri_config(&self) -> &FriConfig> { - self.pcs().fri_config() - } -} - -impl BabyBearFriConfig for BabyBearPoseidon2Outer { - type ValMmcs = OuterValMmcs; - type FriChallenger = ::Challenger; - - type RowMajorProverData = - >::ProverData>; - - fn fri_config(&self) -> &FriConfig> { - self.pcs().fri_config() - } -} - -impl>> BabyBearFriConfigVariable - for BabyBearPoseidon2 -{ - type FriChallengerVariable = DuplexChallengerVariable; - - fn challenger_variable(&self, builder: &mut Builder) -> Self::FriChallengerVariable { - DuplexChallengerVariable::new(builder) - } -} - -impl>> BabyBearFriConfigVariable - for BabyBearPoseidon2Outer -{ - type FriChallengerVariable = MultiField32ChallengerVariable; - - fn challenger_variable(&self, builder: &mut Builder) -> Self::FriChallengerVariable { - MultiField32ChallengerVariable::new(builder) - } -} - -pub fn select_chain<'a, C, R, S>( - builder: &'a mut Builder, - should_swap: R, - first: impl IntoIterator + Clone + 'a, - second: impl IntoIterator + Clone + 'a, -) -> impl Iterator + 'a -where - C: Config, - R: Variable + 'a, - S: Variable + 'a, - >::Expression: AbstractField - + Mul<>::Expression, Output = >::Expression>, - >::Expression: Add>::Expression>, -{ - let should_swap: >::Expression = should_swap.into(); - let one = >::Expression::one(); - let shouldnt_swap = one - should_swap.clone(); - - let id_branch = - first.clone().into_iter().chain(second.clone()).map(>::Expression::from); - let swap_branch = second.into_iter().chain(first).map(>::Expression::from); - zip(zip(id_branch, swap_branch), zip(repeat(shouldnt_swap), repeat(should_swap))) - .map(|((id_v, sw_v), (id_c, sw_c))| builder.eval(id_c * id_v + sw_c * sw_v)) -} diff --git a/crates/recursion/circuit-v2/src/machine/compress.rs b/crates/recursion/circuit-v2/src/machine/compress.rs deleted file mode 100644 index 8b13789179..0000000000 --- a/crates/recursion/circuit-v2/src/machine/compress.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/crates/recursion/circuit-v2/src/machine/core.rs b/crates/recursion/circuit-v2/src/machine/core.rs deleted file mode 100644 index 32d62cc2ca..0000000000 --- a/crates/recursion/circuit-v2/src/machine/core.rs +++ /dev/null @@ -1,558 +0,0 @@ -use std::{ - array, - borrow::{Borrow, BorrowMut}, - marker::PhantomData, -}; - -use itertools::Itertools; -use p3_baby_bear::BabyBear; -use p3_commit::Mmcs; -use p3_field::AbstractField; -use p3_matrix::dense::RowMajorMatrix; -use sp1_core_machine::riscv::RiscvAir; -use sp1_primitives::consts::WORD_SIZE; -use sp1_recursion_core_v2::air::PV_DIGEST_NUM_WORDS; -use sp1_stark::{ - air::{PublicValues, POSEIDON_NUM_WORDS}, - StarkMachine, Word, -}; - -use crate::{ - utils::commit_recursion_public_values, BabyBearFriConfig, BabyBearFriConfigVariable, - CircuitConfig, -}; - -use sp1_recursion_compiler::{ - circuit::CircuitV2Builder, - ir::{Builder, Config, Ext, ExtConst, Felt}, -}; - -use sp1_recursion_core_v2::{ - air::{RecursionPublicValues, RECURSIVE_PROOF_NUM_PV_ELTS}, - DIGEST_SIZE, -}; - -use crate::{ - challenger::{CanObserveVariable, DuplexChallengerVariable}, - stark::{ShardProofVariable, StarkVerifier}, - VerifyingKeyVariable, -}; - -pub struct SP1RecursionWitnessVariable< - C: CircuitConfig, - SC: BabyBearFriConfigVariable, -> { - pub vk: VerifyingKeyVariable, - pub shard_proofs: Vec>, - pub leaf_challenger: SC::FriChallengerVariable, - pub initial_reconstruct_challenger: DuplexChallengerVariable, - pub is_complete: Felt, -} - -/// A program for recursively verifying a batch of SP1 proofs. -#[derive(Debug, Clone, Copy)] -pub struct SP1RecursiveVerifier { - _phantom: PhantomData<(C, SC)>, -} - -impl SP1RecursiveVerifier -where - SC: BabyBearFriConfigVariable< - C, - FriChallengerVariable = DuplexChallengerVariable, - Digest = [Felt; DIGEST_SIZE], - >, - C: CircuitConfig>, - >::ProverData>: Clone, -{ - /// Verify a batch of SP1 shard proofs and aggregate their public values. - /// - /// This program represents a first recursive step in the verification of an SP1 proof - /// consisting of one or more shards. Each shard proof is verified and its public values are - /// aggregated into a single set representing the start and end state of the program execution - /// across all shards. - /// - /// # Constraints - /// - /// ## Verifying the STARK proofs. - /// For each shard, the verifier asserts the correctness of the STARK proof which is composed - /// of verifying the FRI proof for openings and verifying the constraints. - /// - /// ## Aggregating the shard public values. - /// See [SP1Prover::verify] for the verification algorithm of a complete SP1 proof. In this - /// function, we are aggregating several shard proofs and attesting to an aggregated state which - /// represents all the shards. - /// - /// ## The leaf challenger. - /// A key difference between the recursive tree verification and the complete one in - /// [SP1Prover::verify] is that the recursive verifier has no way of reconstructing the - /// chanllenger only from a part of the shard proof. Therefore, the value of the leaf challenger - /// is witnessed in the program and the verifier asserts correctness given this challenger. - /// In the course of the recursive verification, the challenger is reconstructed by observing - /// the commitments one by one, and in the final step, the challenger is asserted to be the same - /// as the one witnessed here. - pub fn verify( - builder: &mut Builder, - machine: &StarkMachine>, - input: SP1RecursionWitnessVariable, - ) { - // Read input. - let SP1RecursionWitnessVariable { - vk, - shard_proofs, - leaf_challenger, - initial_reconstruct_challenger, - is_complete, - } = input; - - // Initialize shard variables. - let initial_shard: Felt<_> = builder.uninit(); - let current_shard: Felt<_> = builder.uninit(); - - // Initialize execution shard variables. - let initial_execution_shard: Felt<_> = builder.uninit(); - let current_execution_shard: Felt<_> = builder.uninit(); - - // Initialize program counter variables. - let start_pc: Felt<_> = builder.uninit(); - let current_pc: Felt<_> = builder.uninit(); - - // Initialize memory initialization and finalization variables. - let initial_previous_init_addr_bits: [Felt<_>; 32] = array::from_fn(|_| builder.uninit()); - let initial_previous_finalize_addr_bits: [Felt<_>; 32] = - array::from_fn(|_| builder.uninit()); - let current_init_addr_bits: [Felt<_>; 32] = array::from_fn(|_| builder.uninit()); - let current_finalize_addr_bits: [Felt<_>; 32] = array::from_fn(|_| builder.uninit()); - - // Initialize the exit code variable. - let exit_code: Felt<_> = builder.uninit(); - - // Initialize the public values digest. - let committed_value_digest: [Word>; PV_DIGEST_NUM_WORDS] = - array::from_fn(|_| Word(array::from_fn(|_| builder.uninit()))); - - // Initialize the deferred proofs digest. - let deferred_proofs_digest: [Felt<_>; POSEIDON_NUM_WORDS] = - array::from_fn(|_| builder.uninit()); - - // Initialize the challenger variables. - let leaf_challenger_public_values = leaf_challenger.public_values(builder); - let mut reconstruct_challenger: DuplexChallengerVariable<_> = - initial_reconstruct_challenger.copy(builder); - - // Initialize the cumulative sum. - let cumulative_sum: Ext<_, _> = builder.eval(C::EF::zero().cons()); - - // Assert that the number of proofs is not zero. - assert!(!shard_proofs.is_empty()); - - // Verify proofs. - for (i, shard_proof) in shard_proofs.into_iter().enumerate() { - let contains_cpu = shard_proof.contains_cpu(); - let _contains_memory_init = shard_proof.contains_memory_init(); - let _contains_memory_finalize = shard_proof.contains_memory_finalize(); - - // Get the public values. - let public_values: &PublicValues>, Felt<_>> = - shard_proof.public_values.as_slice().borrow(); - - let _shard = public_values.shard; - - // If this is the first proof in the batch, initialize the variables. - if i == 0 { - // Shard. - builder.assign(initial_shard, public_values.shard); - builder.assign(current_shard, public_values.shard); - - // Execution shard. - builder.assign(initial_execution_shard, public_values.execution_shard); - builder.assign(current_execution_shard, public_values.execution_shard); - - // Program counter. - builder.assign(start_pc, public_values.start_pc); - builder.assign(current_pc, public_values.start_pc); - - // Memory initialization & finalization. - for ((bit, pub_bit), first_bit) in current_init_addr_bits - .iter() - .zip(public_values.previous_init_addr_bits.iter()) - .zip(initial_previous_init_addr_bits.iter()) - { - builder.assign(*bit, *pub_bit); - builder.assign(*first_bit, *pub_bit); - } - for ((bit, pub_bit), first_bit) in current_finalize_addr_bits - .iter() - .zip(public_values.previous_finalize_addr_bits.iter()) - .zip(initial_previous_finalize_addr_bits.iter()) - { - builder.assign(*bit, *pub_bit); - builder.assign(*first_bit, *pub_bit); - } - - // Exit code. - builder.assign(exit_code, public_values.exit_code); - - // Commited public values digests. - for (word, first_word) in committed_value_digest - .iter() - .zip_eq(public_values.committed_value_digest.iter()) - { - for (byte, first_byte) in word.0.iter().zip_eq(first_word.0.iter()) { - builder.assign(*byte, *first_byte); - } - } - - // Deferred proofs digests. - for (digest, first_digest) in deferred_proofs_digest - .iter() - .zip_eq(public_values.deferred_proofs_digest.iter()) - { - builder.assign(*digest, *first_digest); - } - } - - // // If the shard is the first shard, assert that the initial challenger is equal to a - // // fresh challenger observing the verifier key and the initial pc. - // let shard = felt2var(builder, public_values.shard); - // builder.if_eq(shard, C::N::one()).then(|builder| { - // let mut first_initial_challenger = DuplexChallengerVariable::new(builder); - // first_initial_challenger.observe(builder, vk.commitment.clone()); - // first_initial_challenger.observe(builder, vk.pc_start); - // initial_reconstruct_challenger.assert_eq(builder, &first_initial_challenger); - // }); - - // Verify the shard. - // - // Do not verify the cumulative sum here, since the permutation challenge is shared - // between all shards. - let mut challenger = leaf_challenger.copy(builder); - StarkVerifier::::verify_shard( - builder, - &vk, - machine, - &mut challenger, - &shard_proof, - ); - - // // First shard has a "CPU" constraint. - // { - // builder.if_eq(shard, C::N::one()).then(|builder| { - // builder.assert_var_eq(contains_cpu, C::N::one()); - // }); - // } - - // // CPU log degree bound check constraints. - // { - // for (i, chip) in machine.chips().iter().enumerate() { - // if chip.name() == "CPU" { - // builder.if_eq(contains_cpu, C::N::one()).then(|builder| { - // let index = builder.get(&proof.sorted_idxs, i); - // let cpu_log_degree = - // builder.get(&proof.opened_values.chips, index).log_degree; - // let cpu_log_degree_lt_max: Var<_> = builder.eval(C::N::zero()); - // builder - // .range(0, MAX_CPU_LOG_DEGREE + 1) - // .for_each(|j, builder| { - // builder.if_eq(j, cpu_log_degree).then(|builder| { - // builder.assign(cpu_log_degree_lt_max, C::N::one()); - // }); - // }); - // builder.assert_var_eq(cpu_log_degree_lt_max, C::N::one()); - // }); - // } - // } - // } - - // Shard constraints. - { - // Assert that the shard of the proof is equal to the current shard. - builder.assert_felt_eq(current_shard, public_values.shard); - - // Increment the current shard by one. - builder.assign(current_shard, current_shard + C::F::one()); - } - - // Execution shard constraints. - // let execution_shard = felt2var(builder, public_values.execution_shard); - { - // If the shard has a "CPU" chip, then the execution shard should be incremented by - // 1. - if contains_cpu { - // Assert that the shard of the proof is equal to the current shard. - // builder.assert_felt_eq(current_execution_shard, - // public_values.execution_shard); - - builder.assign(current_execution_shard, current_execution_shard + C::F::one()); - } - } - - // Program counter constraints. - { - // // If it's the first shard (which is the first execution shard), then the - // start_pc // should be vk.pc_start. - // builder.if_eq(shard, C::N::one()).then(|builder| { - // builder.assert_felt_eq(public_values.start_pc, vk.pc_start); - // }); - - // // Assert that the start_pc of the proof is equal to the current pc. - // builder.assert_felt_eq(current_pc, public_values.start_pc); - - // // If it's not a shard with "CPU", then assert that the start_pc equals the - // next_pc. builder.if_ne(contains_cpu, C::N::one()).then(|builder| - // { builder.assert_felt_eq(public_values.start_pc, - // public_values.next_pc); }); - - // // If it's a shard with "CPU", then assert that the start_pc is not zero. - // builder.if_eq(contains_cpu, C::N::one()).then(|builder| { - // builder.assert_felt_ne(public_values.start_pc, C::F::zero()); - // }); - - // Update current_pc to be the end_pc of the current proof. - builder.assign(current_pc, public_values.next_pc); - } - - // Exit code constraints. - { - // Assert that the exit code is zero (success) for all proofs. - builder.assert_felt_eq(exit_code, C::F::zero()); - } - - // Memory initialization & finalization constraints. - { - // // Assert that `init_addr_bits` and `finalize_addr_bits` are zero for the first - // execution shard. builder.if_eq(execution_shard, - // C::N::one()).then(|builder| { // Assert that the - // MemoryInitialize address bits are zero. for bit in - // current_init_addr_bits.iter() { builder.assert_felt_eq(* - // bit, C::F::zero()); } - - // // Assert that the MemoryFinalize address bits are zero. - // for bit in current_finalize_addr_bits.iter() { - // builder.assert_felt_eq(*bit, C::F::zero()); - // } - // }); - - // // Assert that the MemoryInitialize address bits match the current loop variable. - // for (bit, current_bit) in current_init_addr_bits - // .iter() - // .zip_eq(public_values.previous_init_addr_bits.iter()) - // { - // builder.assert_felt_eq(*bit, *current_bit); - // } - - // // Assert that the MemoryFinalize address bits match the current loop variable. - // for (bit, current_bit) in current_finalize_addr_bits - // .iter() - // .zip_eq(public_values.previous_finalize_addr_bits.iter()) - // { - // builder.assert_felt_eq(*bit, *current_bit); - // } - - // // Assert that if MemoryInit is not present, then the address bits are the same. - // builder - // .if_ne(contains_memory_init, C::N::one()) - // .then(|builder| { - // for (prev_bit, last_bit) in public_values - // .previous_init_addr_bits - // .iter() - // .zip_eq(public_values.last_init_addr_bits.iter()) - // { - // builder.assert_felt_eq(*prev_bit, *last_bit); - // } - // }); - - // // Assert that if MemoryFinalize is not present, then the address bits are the - // same. builder - // .if_ne(contains_memory_finalize, C::N::one()) - // .then(|builder| { - // for (prev_bit, last_bit) in public_values - // .previous_finalize_addr_bits - // .iter() - // .zip_eq(public_values.last_finalize_addr_bits.iter()) - // { - // builder.assert_felt_eq(*prev_bit, *last_bit); - // } - // }); - - // Update the MemoryInitialize address bits. - for (bit, pub_bit) in - current_init_addr_bits.iter().zip(public_values.last_init_addr_bits.iter()) - { - builder.assign(*bit, *pub_bit); - } - - // Update the MemoryFinalize address bits. - for (bit, pub_bit) in current_finalize_addr_bits - .iter() - .zip(public_values.last_finalize_addr_bits.iter()) - { - builder.assign(*bit, *pub_bit); - } - } - - // Digest constraints. - { - // // If `commited_value_digest` is not zero, then - // `public_values.commited_value_digest // should be the current - // value. let is_zero: Var<_> = builder.eval(C::N::one()); - // #[allow(clippy::needless_range_loop)] - // for i in 0..committed_value_digest.len() { - // for j in 0..WORD_SIZE { - // let d = felt2var(builder, committed_value_digest[i][j]); - // builder.if_ne(d, C::N::zero()).then(|builder| { - // builder.assign(is_zero, C::N::zero()); - // }); - // } - // } - // builder.if_eq(is_zero, C::N::zero()).then(|builder| { - // #[allow(clippy::needless_range_loop)] - // for i in 0..committed_value_digest.len() { - // for j in 0..WORD_SIZE { - // builder.assert_felt_eq( - // committed_value_digest[i][j], - // public_values.committed_value_digest[i][j], - // ); - // } - // } - // }); - - // // If it's not a shard with "CPU", then the committed value digest should not - // change. builder.if_ne(contains_cpu, C::N::one()).then(|builder| { - // #[allow(clippy::needless_range_loop)] - // for i in 0..committed_value_digest.len() { - // for j in 0..WORD_SIZE { - // builder.assert_felt_eq( - // committed_value_digest[i][j], - // public_values.committed_value_digest[i][j], - // ); - // } - // } - // }); - - // Update the committed value digest. - #[allow(clippy::needless_range_loop)] - for i in 0..committed_value_digest.len() { - for j in 0..WORD_SIZE { - builder.assign( - committed_value_digest[i][j], - public_values.committed_value_digest[i][j], - ); - } - } - - // // If `deferred_proofs_digest` is not zero, then - // `public_values.deferred_proofs_digest // should be the current - // value. let is_zero: Var<_> = builder.eval(C::N::one()); - // #[allow(clippy::needless_range_loop)] - // for i in 0..deferred_proofs_digest.len() { - // let d = felt2var(builder, deferred_proofs_digest[i]); - // builder.if_ne(d, C::N::zero()).then(|builder| { - // builder.assign(is_zero, C::N::zero()); - // }); - // } - // builder.if_eq(is_zero, C::N::zero()).then(|builder| { - // #[allow(clippy::needless_range_loop)] - // for i in 0..deferred_proofs_digest.len() { - // builder.assert_felt_eq( - // deferred_proofs_digest[i], - // public_values.deferred_proofs_digest[i], - // ); - // } - // }); - - // // If it's not a shard with "CPU", then the deferred proofs digest should not - // change. builder.if_ne(contains_cpu, C::N::one()).then(|builder| { - // #[allow(clippy::needless_range_loop)] - // for i in 0..deferred_proofs_digest.len() { - // builder.assert_felt_eq( - // deferred_proofs_digest[i], - // public_values.deferred_proofs_digest[i], - // ); - // } - // }); - - // Update the deferred proofs digest. - #[allow(clippy::needless_range_loop)] - for i in 0..deferred_proofs_digest.len() { - builder - .assign(deferred_proofs_digest[i], public_values.deferred_proofs_digest[i]); - } - } - - // // Verify that the number of shards is not too large. - // builder.range_check_f(public_values.shard, 16); - - // Update the reconstruct challenger. - reconstruct_challenger.observe(builder, shard_proof.commitment.main_commit); - for element in shard_proof.public_values.iter() { - reconstruct_challenger.observe(builder, *element); - } - - // Cumulative sum is updated by sums of all chips. - for values in shard_proof.opened_values.chips.iter() { - builder.assign(cumulative_sum, cumulative_sum + values.cumulative_sum); - } - } - - // Write all values to the public values struct and commit to them. - { - // Compute the vk digest. - let vk_digest = vk.hash(builder); - - // Collect the public values for challengers. - let initial_challenger_public_values = - initial_reconstruct_challenger.public_values(builder); - let final_challenger_public_values = reconstruct_challenger.public_values(builder); - - // Collect the cumulative sum. - let cumulative_sum_array = builder.ext2felt_v2(cumulative_sum); - - // Collect the deferred proof digests. - let zero: Felt<_> = builder.eval(C::F::zero()); - let start_deferred_digest = [zero; POSEIDON_NUM_WORDS]; - let end_deferred_digest = [zero; POSEIDON_NUM_WORDS]; - - // Collect the is_complete flag. - // let is_complete_felt = var2felt(builder, is_complete); - - // Initialize the public values we will commit to. - let mut recursion_public_values_stream = [zero; RECURSIVE_PROOF_NUM_PV_ELTS]; - let recursion_public_values: &mut RecursionPublicValues<_> = - recursion_public_values_stream.as_mut_slice().borrow_mut(); - recursion_public_values.committed_value_digest = committed_value_digest; - recursion_public_values.deferred_proofs_digest = deferred_proofs_digest; - recursion_public_values.start_pc = start_pc; - recursion_public_values.next_pc = current_pc; - recursion_public_values.start_shard = initial_shard; - recursion_public_values.next_shard = current_shard; - recursion_public_values.start_execution_shard = initial_execution_shard; - recursion_public_values.next_execution_shard = current_execution_shard; - recursion_public_values.previous_init_addr_bits = initial_previous_init_addr_bits; - recursion_public_values.last_init_addr_bits = current_init_addr_bits; - recursion_public_values.previous_finalize_addr_bits = - initial_previous_finalize_addr_bits; - recursion_public_values.last_finalize_addr_bits = current_finalize_addr_bits; - recursion_public_values.sp1_vk_digest = vk_digest; - recursion_public_values.leaf_challenger = leaf_challenger_public_values; - recursion_public_values.start_reconstruct_challenger = initial_challenger_public_values; - recursion_public_values.end_reconstruct_challenger = final_challenger_public_values; - recursion_public_values.cumulative_sum = cumulative_sum_array; - recursion_public_values.start_reconstruct_deferred_digest = start_deferred_digest; - recursion_public_values.end_reconstruct_deferred_digest = end_deferred_digest; - recursion_public_values.exit_code = exit_code; - recursion_public_values.is_complete = is_complete; - - // // If the proof represents a complete proof, make completeness assertions. - // // - // // *Remark*: In this program, this only happends if there is one shard and the - // program has // no deferred proofs to verify. However, the completeness - // check is independent of these // facts. - // builder.if_eq(is_complete, C::N::one()).then(|builder| { - // assert_complete(builder, recursion_public_values, &reconstruct_challenger) - // }); - - commit_recursion_public_values(builder, recursion_public_values); - } - } -} diff --git a/crates/recursion/circuit-v2/src/machine/mod.rs b/crates/recursion/circuit-v2/src/machine/mod.rs deleted file mode 100644 index 8de8ec4a0f..0000000000 --- a/crates/recursion/circuit-v2/src/machine/mod.rs +++ /dev/null @@ -1,14 +0,0 @@ -mod compress; -mod core; -mod witness; - -#[allow(unused_imports)] -pub use compress::*; -pub use core::*; - -#[allow(unused_imports)] -pub use witness::*; - -pub use sp1_recursion_program::machine::{ - SP1CompressMemoryLayout, SP1DeferredMemoryLayout, SP1RecursionMemoryLayout, -}; diff --git a/crates/recursion/circuit-v2/src/machine/witness.rs b/crates/recursion/circuit-v2/src/machine/witness.rs deleted file mode 100644 index 44f8ab54c7..0000000000 --- a/crates/recursion/circuit-v2/src/machine/witness.rs +++ /dev/null @@ -1,111 +0,0 @@ -use std::borrow::Borrow; - -use p3_baby_bear::BabyBear; -use p3_challenger::DuplexChallenger; -use p3_symmetric::Hash; - -use p3_field::AbstractField; -use sp1_recursion_compiler::ir::Builder; -use sp1_stark::{ - air::MachineAir, baby_bear_poseidon2::BabyBearPoseidon2, InnerChallenge, InnerPerm, InnerVal, - StarkVerifyingKey, -}; - -use sp1_recursion_compiler::ir::Felt; - -use crate::{ - challenger::DuplexChallengerVariable, - witness::{WitnessWriter, Witnessable}, - CircuitConfig, VerifyingKeyVariable, -}; - -use super::{SP1RecursionMemoryLayout, SP1RecursionWitnessVariable}; - -impl Witnessable for DuplexChallenger -where - C: CircuitConfig, -{ - type WitnessVariable = DuplexChallengerVariable; - - fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { - let sponge_state = self.sponge_state.read(builder); - let input_buffer = self.input_buffer.read(builder); - let output_buffer = self.output_buffer.read(builder); - DuplexChallengerVariable { sponge_state, input_buffer, output_buffer } - } - - fn write(&self, witness: &mut impl WitnessWriter) { - self.sponge_state.write(witness); - self.input_buffer.write(witness); - self.output_buffer.write(witness); - } -} - -impl Witnessable for Hash -where - C: CircuitConfig, - W: Witnessable, -{ - type WitnessVariable = [W::WitnessVariable; DIGEST_ELEMENTS]; - - fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { - let array: &[W; DIGEST_ELEMENTS] = self.borrow(); - array.read(builder) - } - - fn write(&self, witness: &mut impl WitnessWriter) { - let array: &[W; DIGEST_ELEMENTS] = self.borrow(); - array.write(witness); - } -} - -impl Witnessable for StarkVerifyingKey -where - C: CircuitConfig>, -{ - type WitnessVariable = VerifyingKeyVariable; - - fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { - let commitment = self.commit.read(builder); - let pc_start = self.pc_start.read(builder); - let chip_information = self.chip_information.clone(); - let chip_ordering = self.chip_ordering.clone(); - VerifyingKeyVariable { commitment, pc_start, chip_information, chip_ordering } - } - - fn write(&self, witness: &mut impl WitnessWriter) { - self.commit.write(witness); - self.pc_start.write(witness); - } -} - -impl<'a, C, A> Witnessable for SP1RecursionMemoryLayout<'a, BabyBearPoseidon2, A> -where - C: CircuitConfig>, - A: MachineAir, -{ - type WitnessVariable = SP1RecursionWitnessVariable; - - fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { - let vk = self.vk.read(builder); - let shard_proofs = self.shard_proofs.read(builder); - let leaf_challenger = self.leaf_challenger.read(builder); - let initial_reconstruct_challenger = self.initial_reconstruct_challenger.read(builder); - let is_complete = InnerVal::from_bool(self.is_complete).read(builder); - SP1RecursionWitnessVariable { - vk, - shard_proofs, - leaf_challenger, - initial_reconstruct_challenger, - is_complete, - } - } - - fn write(&self, witness: &mut impl WitnessWriter) { - self.vk.write(witness); - self.shard_proofs.write(witness); - self.leaf_challenger.write(witness); - self.initial_reconstruct_challenger.write(witness); - self.is_complete.write(witness); - } -} diff --git a/crates/recursion/circuit-v2/src/stark.rs b/crates/recursion/circuit-v2/src/stark.rs deleted file mode 100644 index 9f0a07b107..0000000000 --- a/crates/recursion/circuit-v2/src/stark.rs +++ /dev/null @@ -1,361 +0,0 @@ -use hashbrown::HashMap; -use itertools::{izip, Itertools}; -use p3_commit::Mmcs; -use p3_matrix::dense::RowMajorMatrix; - -use p3_air::Air; -use p3_baby_bear::BabyBear; -use p3_commit::{Pcs, TwoAdicMultiplicativeCoset}; -use p3_field::TwoAdicField; -use sp1_stark::{ShardCommitment, ShardOpenedValues, Val}; - -use p3_commit::PolynomialSpace; - -use sp1_recursion_compiler::{ - circuit::CircuitV2Builder, - ir::{Builder, Config, Ext}, - prelude::Felt, -}; -use sp1_stark::{air::MachineAir, StarkGenericConfig, StarkMachine, StarkVerifyingKey}; - -use crate::{ - challenger::CanObserveVariable, CircuitConfig, TwoAdicPcsMatsVariable, TwoAdicPcsProofVariable, -}; - -use crate::{ - challenger::FieldChallengerVariable, constraints::RecursiveVerifierConstraintFolder, - domain::PolynomialSpaceVariable, fri::verify_two_adic_pcs, BabyBearFriConfigVariable, - TwoAdicPcsRoundVariable, VerifyingKeyVariable, -}; - -/// Reference: [sp1_core::stark::ShardProof] -#[derive(Clone)] -pub struct ShardProofVariable, SC: BabyBearFriConfigVariable> { - pub commitment: ShardCommitment, - pub opened_values: ShardOpenedValues>, - pub opening_proof: TwoAdicPcsProofVariable, - pub chip_ordering: HashMap, - pub public_values: Vec>, -} - -pub const EMPTY: usize = 0x_1111_1111; - -#[derive(Debug, Clone, Copy)] -pub struct StarkVerifier { - _phantom: std::marker::PhantomData<(C, SC, A)>, -} - -pub struct VerifyingKeyHint<'a, SC: StarkGenericConfig, A> { - pub machine: &'a StarkMachine, - pub vk: &'a StarkVerifyingKey, -} - -impl<'a, SC: StarkGenericConfig, A: MachineAir> VerifyingKeyHint<'a, SC, A> { - pub const fn new(machine: &'a StarkMachine, vk: &'a StarkVerifyingKey) -> Self { - Self { machine, vk } - } -} - -impl StarkVerifier -where - C::F: TwoAdicField, - C: CircuitConfig, - SC: BabyBearFriConfigVariable, - >::ProverData>: Clone, - A: MachineAir>, -{ - pub fn natural_domain_for_degree( - config: &SC, - degree: usize, - ) -> TwoAdicMultiplicativeCoset { - >::natural_domain_for_degree( - config.pcs(), - degree, - ) - } - - pub fn verify_shard( - builder: &mut Builder, - vk: &VerifyingKeyVariable, - machine: &StarkMachine, - challenger: &mut SC::FriChallengerVariable, - proof: &ShardProofVariable, - ) where - A: for<'a> Air>, - { - let chips = machine.shard_chips_ordered(&proof.chip_ordering).collect::>(); - - let ShardProofVariable { - commitment, - opened_values, - opening_proof, - chip_ordering, - public_values, - } = proof; - - let log_degrees = opened_values.chips.iter().map(|val| val.log_degree).collect::>(); - - let log_quotient_degrees = - chips.iter().map(|chip| chip.log_quotient_degree()).collect::>(); - - let trace_domains = log_degrees - .iter() - .map(|log_degree| Self::natural_domain_for_degree(machine.config(), 1 << log_degree)) - .collect::>(); - - let ShardCommitment { main_commit, permutation_commit, quotient_commit } = *commitment; - - let permutation_challenges = - (0..2).map(|_| challenger.sample_ext(builder)).collect::>(); - - challenger.observe(builder, permutation_commit); - - let alpha = challenger.sample_ext(builder); - - challenger.observe(builder, quotient_commit); - - let zeta = challenger.sample_ext(builder); - - let preprocessed_domains_points_and_opens = vk - .chip_information - .iter() - .map(|(name, domain, _)| { - let i = chip_ordering[name]; - let values = opened_values.chips[i].preprocessed.clone(); - TwoAdicPcsMatsVariable:: { - domain: *domain, - points: vec![zeta, domain.next_point_variable(builder, zeta)], - values: vec![values.local, values.next], - } - }) - .collect::>(); - - let main_domains_points_and_opens = trace_domains - .iter() - .zip_eq(opened_values.chips.iter()) - .map(|(domain, values)| TwoAdicPcsMatsVariable:: { - domain: *domain, - points: vec![zeta, domain.next_point_variable(builder, zeta)], - values: vec![values.main.local.clone(), values.main.next.clone()], - }) - .collect::>(); - - let perm_domains_points_and_opens = trace_domains - .iter() - .zip_eq(opened_values.chips.iter()) - .map(|(domain, values)| TwoAdicPcsMatsVariable:: { - domain: *domain, - points: vec![zeta, domain.next_point_variable(builder, zeta)], - values: vec![values.permutation.local.clone(), values.permutation.next.clone()], - }) - .collect::>(); - - let quotient_chunk_domains = trace_domains - .iter() - .zip_eq(log_degrees) - .zip_eq(log_quotient_degrees) - .map(|((domain, log_degree), log_quotient_degree)| { - let quotient_degree = 1 << log_quotient_degree; - let quotient_domain = - domain.create_disjoint_domain(1 << (log_degree + log_quotient_degree)); - quotient_domain.split_domains(quotient_degree) - }) - .collect::>(); - - let quotient_domains_points_and_opens = proof - .opened_values - .chips - .iter() - .zip_eq(quotient_chunk_domains.iter()) - .flat_map(|(values, qc_domains)| { - values.quotient.iter().zip_eq(qc_domains).map(move |(values, q_domain)| { - TwoAdicPcsMatsVariable:: { - domain: *q_domain, - points: vec![zeta], - values: vec![values.clone()], - } - }) - }) - .collect::>(); - - // Create the pcs rounds. - let prep_commit = vk.commitment; - let prep_round = TwoAdicPcsRoundVariable { - batch_commit: prep_commit, - domains_points_and_opens: preprocessed_domains_points_and_opens, - }; - let main_round = TwoAdicPcsRoundVariable { - batch_commit: main_commit, - domains_points_and_opens: main_domains_points_and_opens, - }; - let perm_round = TwoAdicPcsRoundVariable { - batch_commit: permutation_commit, - domains_points_and_opens: perm_domains_points_and_opens, - }; - let quotient_round = TwoAdicPcsRoundVariable { - batch_commit: quotient_commit, - domains_points_and_opens: quotient_domains_points_and_opens, - }; - let rounds = vec![prep_round, main_round, perm_round, quotient_round]; - - // Verify the pcs proof - builder.cycle_tracker_v2_enter("stage-d-verify-pcs".to_string()); - let config = machine.config().fri_config(); - verify_two_adic_pcs::(builder, config, opening_proof, challenger, rounds); - builder.cycle_tracker_v2_exit(); - - // Verify the constrtaint evaluations. - builder.cycle_tracker_v2_enter("stage-e-verify-constraints".to_string()); - for (chip, trace_domain, qc_domains, values) in - izip!(chips.iter(), trace_domains, quotient_chunk_domains, opened_values.chips.iter(),) - { - // Verify the shape of the opening arguments matches the expected values. - Self::verify_opening_shape(chip, values).unwrap(); - // Verify the constraint evaluation. - Self::verify_constraints( - builder, - chip, - values, - trace_domain, - qc_domains, - zeta, - alpha, - &permutation_challenges, - public_values, - ); - } - builder.cycle_tracker_v2_exit(); - } -} - -impl, SC: BabyBearFriConfigVariable> ShardProofVariable { - pub fn contains_cpu(&self) -> bool { - self.chip_ordering.contains_key("CPU") - } - - pub fn contains_memory_init(&self) -> bool { - self.chip_ordering.contains_key("MemoryInit") - } - - pub fn contains_memory_finalize(&self) -> bool { - self.chip_ordering.contains_key("MemoryFinalize") - } -} - -#[allow(unused_imports)] -#[cfg(any(test, feature = "export-tests"))] -pub mod tests { - use std::collections::VecDeque; - - use crate::{ - challenger::{CanCopyChallenger, CanObserveVariable, DuplexChallengerVariable}, - utils::tests::run_test_recursion_with_prover, - BabyBearFriConfig, - }; - - use sp1_core_executor::{programs::tests::FIBONACCI_ELF, Program}; - use sp1_core_machine::{ - io::SP1Stdin, - riscv::RiscvAir, - utils::{prove, setup_logger}, - }; - use sp1_recursion_compiler::{ - config::{InnerConfig, OuterConfig}, - ir::{Builder, DslIr, TracedVec}, - }; - - use sp1_recursion_core_v2::{ - air::Block, machine::RecursionAir, stark::config::BabyBearPoseidon2Outer, - }; - use sp1_stark::{ - baby_bear_poseidon2::BabyBearPoseidon2, CpuProver, InnerVal, MachineProver, SP1CoreOpts, - ShardProof, - }; - - use super::*; - use crate::witness::*; - - type F = InnerVal; - type A = RiscvAir; - - pub fn build_verify_shard_with_provers< - C: CircuitConfig>, - SC: BabyBearFriConfigVariable + Default + Sync + Send, - CoreP: MachineProver, - RecP: MachineProver>, - >( - config: SC, - elf: &[u8], - opts: SP1CoreOpts, - num_shards_in_batch: Option, - ) -> (TracedVec>, Vec>) - where - SC::Challenger: Send, - <::ValMmcs as Mmcs>::ProverData< - RowMajorMatrix, - >: Send + Sync, - <::ValMmcs as Mmcs>::Commitment: Send + Sync, - <::ValMmcs as Mmcs>::Proof: Send, - StarkVerifyingKey: Witnessable>, - ShardProof: Witnessable>, - { - // Generate a dummy proof. - setup_logger(); - - let machine = RiscvAir::::machine(SC::default()); - let (_, vk) = machine.setup(&Program::from(elf).unwrap()); - let (proof, _, _) = - prove::<_, CoreP>(Program::from(elf).unwrap(), &SP1Stdin::new(), SC::default(), opts) - .unwrap(); - let mut challenger = machine.config().challenger(); - machine.verify(&vk, &proof, &mut challenger).unwrap(); - println!("Proof generated successfully"); - - // Observe all the commitments. - let mut builder = Builder::::default(); - - let mut witness_stream = Vec::>::new(); - - // Add a hash invocation, since the poseidon2 table expects that it's in the first row. - let mut challenger = config.challenger_variable(&mut builder); - // let vk = VerifyingKeyVariable::from_constant_key_babybear(&mut builder, &vk); - vk.write(&mut witness_stream); - let vk: VerifyingKeyVariable<_, _> = vk.read(&mut builder); - vk.observe_into(&mut builder, &mut challenger); - - let proofs = proof - .shard_proofs - .into_iter() - .map(|proof| { - proof.write(&mut witness_stream); - proof.read(&mut builder) - }) - .collect::>(); - // Observe all the commitments, and put the proofs into the witness stream. - for proof in proofs.iter() { - let ShardCommitment { main_commit, .. } = proof.commitment; - challenger.observe(&mut builder, main_commit); - let pv_slice = &proof.public_values[..machine.num_pv_elts()]; - challenger.observe_slice(&mut builder, pv_slice.iter().cloned()); - } - // Verify the first proof. - let num_shards = num_shards_in_batch.unwrap_or(proofs.len()); - for proof in proofs.into_iter().take(num_shards) { - let mut challenger = challenger.copy(&mut builder); - StarkVerifier::verify_shard(&mut builder, &vk, &machine, &mut challenger, &proof); - } - (builder.operations, witness_stream) - } - - #[test] - fn test_verify_shard_inner() { - let (operations, stream) = - build_verify_shard_with_provers::< - InnerConfig, - BabyBearPoseidon2, - CpuProver<_, _>, - CpuProver<_, _>, - >(BabyBearPoseidon2::new(), FIBONACCI_ELF, SP1CoreOpts::default(), Some(2)); - run_test_recursion_with_prover::>(operations, stream); - } -} diff --git a/crates/recursion/circuit-v2/src/types.rs b/crates/recursion/circuit-v2/src/types.rs deleted file mode 100644 index 33166668fd..0000000000 --- a/crates/recursion/circuit-v2/src/types.rs +++ /dev/null @@ -1,111 +0,0 @@ -use hashbrown::HashMap; -use p3_commit::TwoAdicMultiplicativeCoset; -use p3_field::{AbstractField, TwoAdicField}; -use p3_matrix::Dimensions; - -use sp1_recursion_compiler::ir::{Builder, Ext, Felt}; - -use sp1_recursion_core_v2::DIGEST_SIZE; - -use crate::{ - challenger::CanObserveVariable, hash::FieldHasherVariable, BabyBearFriConfigVariable, - CircuitConfig, -}; - -/// Reference: [sp1_core::stark::StarkVerifyingKey] -#[derive(Clone)] -pub struct VerifyingKeyVariable, SC: BabyBearFriConfigVariable> { - pub commitment: SC::Digest, - pub pc_start: Felt, - pub chip_information: Vec<(String, TwoAdicMultiplicativeCoset, Dimensions)>, - pub chip_ordering: HashMap, -} - -#[derive(Clone)] -pub struct FriProofVariable> { - pub commit_phase_commits: Vec, - pub query_proofs: Vec>, - pub final_poly: Ext, - pub pow_witness: Felt, -} - -/// Reference: https://github.com/Plonky3/Plonky3/blob/4809fa7bedd9ba8f6f5d3267b1592618e3776c57/fri/src/proof.rs#L32 -#[derive(Clone)] -pub struct FriCommitPhaseProofStepVariable> { - pub sibling_value: Ext, - pub opening_proof: Vec, -} - -/// Reference: https://github.com/Plonky3/Plonky3/blob/4809fa7bedd9ba8f6f5d3267b1592618e3776c57/fri/src/proof.rs#L23 -#[derive(Clone)] -pub struct FriQueryProofVariable> { - pub commit_phase_openings: Vec>, -} - -/// Reference: https://github.com/Plonky3/Plonky3/blob/4809fa7bedd9ba8f6f5d3267b1592618e3776c57/fri/src/verifier.rs#L22 -#[derive(Clone)] -pub struct FriChallenges { - pub query_indices: Vec>, - pub betas: Vec>, -} - -#[derive(Clone)] -pub struct TwoAdicPcsProofVariable> { - pub fri_proof: FriProofVariable, - pub query_openings: Vec>>, -} - -#[derive(Clone)] -pub struct BatchOpeningVariable> { - pub opened_values: Vec>>>, - pub opening_proof: Vec, -} - -#[derive(Clone)] -pub struct TwoAdicPcsRoundVariable> { - pub batch_commit: H::Digest, - pub domains_points_and_opens: Vec>, -} - -#[derive(Clone)] -pub struct TwoAdicPcsMatsVariable { - pub domain: TwoAdicMultiplicativeCoset, - pub points: Vec>, - pub values: Vec>>, -} - -impl, SC: BabyBearFriConfigVariable> VerifyingKeyVariable { - pub fn observe_into(&self, builder: &mut Builder, challenger: &mut Challenger) - where - Challenger: CanObserveVariable> + CanObserveVariable, - { - // Observe the commitment. - challenger.observe(builder, self.commitment); - // Observe the pc_start. - challenger.observe(builder, self.pc_start); - } - - /// Hash the verifying key + prep domains into a single digest. - /// poseidon2( commit[0..8] || pc_start || prep_domains[N].{log_n, .size, .shift, .g}) - pub fn hash(&self, builder: &mut Builder) -> SC::Digest - where - C::F: TwoAdicField, - SC::Digest: IntoIterator>, - { - let prep_domains = self.chip_information.iter().map(|(_, domain, _)| domain); - let num_inputs = DIGEST_SIZE + 1 + (4 * prep_domains.len()); - let mut inputs = Vec::with_capacity(num_inputs); - inputs.extend(self.commitment); - inputs.push(self.pc_start); - for domain in prep_domains { - inputs.push(builder.eval(C::F::from_canonical_usize(domain.log_n))); - let size = 1 << domain.log_n; - inputs.push(builder.eval(C::F::from_canonical_usize(size))); - let g = C::F::two_adic_generator(domain.log_n); - inputs.push(builder.eval(domain.shift)); - inputs.push(builder.eval(g)); - } - - SC::hash(builder, &inputs) - } -} diff --git a/crates/recursion/circuit-v2/src/utils.rs b/crates/recursion/circuit-v2/src/utils.rs deleted file mode 100644 index 71a630f2f7..0000000000 --- a/crates/recursion/circuit-v2/src/utils.rs +++ /dev/null @@ -1,181 +0,0 @@ -use std::borrow::BorrowMut; - -use p3_baby_bear::BabyBear; -use p3_bn254_fr::Bn254Fr; -use p3_field::AbstractField; -use p3_field::PrimeField32; - -use sp1_recursion_compiler::{ - circuit::CircuitV2Builder, - ir::{Builder, Config, Felt, Var}, -}; -use sp1_recursion_core_v2::{ - air::{RecursionPublicValues, NUM_PV_ELMS_TO_HASH, RECURSIVE_PROOF_NUM_PV_ELTS}, - DIGEST_SIZE, -}; -use sp1_stark::Word; - -/// Register and commits the recursion public values. -pub fn commit_recursion_public_values( - builder: &mut Builder, - public_values: &RecursionPublicValues>, -) { - let mut pv_elements: [Felt<_>; RECURSIVE_PROOF_NUM_PV_ELTS] = - core::array::from_fn(|_| builder.uninit()); - *pv_elements.as_mut_slice().borrow_mut() = *public_values; - let pv_elms_no_digest = &pv_elements[0..NUM_PV_ELMS_TO_HASH]; - - for value in pv_elms_no_digest.iter() { - builder.register_public_value(*value); - } - - // Hash the public values. - let pv_digest = builder.poseidon2_hash_v2(&pv_elements[0..NUM_PV_ELMS_TO_HASH]); - for element in pv_digest { - builder.commit_public_value(element); - } -} - -/// Convert 8 BabyBear words into a Bn254Fr field element by shifting by 31 bits each time. The last -/// word becomes the least significant bits. -#[allow(dead_code)] -pub fn babybears_to_bn254(digest: &[BabyBear; 8]) -> Bn254Fr { - let mut result = Bn254Fr::zero(); - for word in digest.iter() { - // Since BabyBear prime is less than 2^31, we can shift by 31 bits each time and still be - // within the Bn254Fr field, so we don't have to truncate the top 3 bits. - result *= Bn254Fr::from_canonical_u64(1 << 31); - result += Bn254Fr::from_canonical_u32(word.as_canonical_u32()); - } - result -} - -/// Convert 32 BabyBear bytes into a Bn254Fr field element. The first byte's most significant 3 bits -/// (which would become the 3 most significant bits) are truncated. -#[allow(dead_code)] -pub fn babybear_bytes_to_bn254(bytes: &[BabyBear; 32]) -> Bn254Fr { - let mut result = Bn254Fr::zero(); - for (i, byte) in bytes.iter().enumerate() { - debug_assert!(byte < &BabyBear::from_canonical_u32(256)); - if i == 0 { - // 32 bytes is more than Bn254 prime, so we need to truncate the top 3 bits. - result = Bn254Fr::from_canonical_u32(byte.as_canonical_u32() & 0x1f); - } else { - result *= Bn254Fr::from_canonical_u32(256); - result += Bn254Fr::from_canonical_u32(byte.as_canonical_u32()); - } - } - result -} - -pub fn felts_to_bn254_var( - builder: &mut Builder, - digest: &[Felt; DIGEST_SIZE], -) -> Var { - let var_2_31: Var<_> = builder.constant(C::N::from_canonical_u32(1 << 31)); - let result = builder.constant(C::N::zero()); - for (i, word) in digest.iter().enumerate() { - let word_bits = builder.num2bits_f_circuit(*word); - let word_var = builder.bits2num_v_circuit(&word_bits); - if i == 0 { - builder.assign(result, word_var); - } else { - builder.assign(result, result * var_2_31 + word_var); - } - } - result -} - -pub fn felt_bytes_to_bn254_var( - builder: &mut Builder, - bytes: &[Felt; 32], -) -> Var { - let var_256: Var<_> = builder.constant(C::N::from_canonical_u32(256)); - let zero_var: Var<_> = builder.constant(C::N::zero()); - let result = builder.constant(C::N::zero()); - for (i, byte) in bytes.iter().enumerate() { - let byte_bits = builder.num2bits_f_circuit(*byte); - if i == 0 { - // Since 32 bytes doesn't fit into Bn254, we need to truncate the top 3 bits. - // For first byte, zero out 3 most significant bits. - for i in 0..3 { - builder.assign(byte_bits[8 - i - 1], zero_var); - } - let byte_var = builder.bits2num_v_circuit(&byte_bits); - builder.assign(result, byte_var); - } else { - let byte_var = builder.bits2num_v_circuit(&byte_bits); - builder.assign(result, result * var_256 + byte_var); - } - } - result -} - -pub fn words_to_bytes(words: &[Word]) -> Vec { - words.iter().flat_map(|w| w.0).collect::>() -} - -#[cfg(any(test, feature = "export-tests"))] -pub(crate) mod tests { - use std::sync::Arc; - - use sp1_core_machine::utils::{run_test_machine_with_prover, setup_logger}; - use sp1_recursion_compiler::{asm::AsmConfig, circuit::AsmCompiler, ir::DslIr}; - - use sp1_recursion_compiler::ir::TracedVec; - use sp1_recursion_core_v2::{machine::RecursionAir, Runtime}; - use sp1_stark::{ - baby_bear_poseidon2::BabyBearPoseidon2, CpuProver, InnerChallenge, InnerVal, MachineProver, - }; - - use crate::witness::WitnessBlock; - - type SC = BabyBearPoseidon2; - type F = InnerVal; - type EF = InnerChallenge; - - /// A simplified version of some code from `recursion/core/src/stark/mod.rs`. - /// Takes in a program and runs it with the given witness and generates a proof with a variety - /// of machines depending on the provided test_config. - pub(crate) fn run_test_recursion_with_prover>>( - operations: TracedVec>>, - witness_stream: impl IntoIterator>>, - ) { - setup_logger(); - - let compile_span = tracing::debug_span!("compile").entered(); - let mut compiler = AsmCompiler::>::default(); - let program = Arc::new(compiler.compile(operations)); - compile_span.exit(); - - let config = SC::default(); - - let run_span = tracing::debug_span!("run the recursive program").entered(); - let mut runtime = Runtime::::new(program.clone(), config.perm.clone()); - runtime.witness_stream.extend(witness_stream); - tracing::debug_span!("run").in_scope(|| runtime.run().unwrap()); - assert!(runtime.witness_stream.is_empty()); - run_span.exit(); - - let records = vec![runtime.record]; - - // Run with the poseidon2 wide chip. - let proof_wide_span = tracing::debug_span!("Run test with wide machine").entered(); - let wide_machine = RecursionAir::<_, 3, 0>::machine_wide(SC::default()); - let (pk, vk) = wide_machine.setup(&program); - let result = run_test_machine_with_prover::<_, _, P>(records.clone(), wide_machine, pk, vk); - proof_wide_span.exit(); - - if let Err(e) = result { - panic!("Verification failed: {:?}", e); - } - } - - #[allow(dead_code)] - pub(crate) fn run_test_recursion( - operations: TracedVec>>, - witness_stream: impl IntoIterator>>, - ) { - run_test_recursion_with_prover::>(operations, witness_stream) - } -} diff --git a/crates/recursion/circuit/CHANGELOG.md b/crates/recursion/circuit/CHANGELOG.md index c2e0a0af20..194010c0f4 100644 --- a/crates/recursion/circuit/CHANGELOG.md +++ b/crates/recursion/circuit/CHANGELOG.md @@ -7,109 +7,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] -## [1.1.0](https://github.com/succinctlabs/sp1/compare/sp1-recursion-circuit-v1.0.1...sp1-recursion-circuit-v1.1.0) - 2024-08-02 +## [1.2.0-rc1](https://github.com/succinctlabs/sp1/releases/tag/sp1-recursion-circuit-v1.2.0-rc1) - 2024-08-23 ### Added -- update tg ([#1214](https://github.com/succinctlabs/sp1/pull/1214)) -### Fixed -- BabyBear range check Gnark ([#1225](https://github.com/succinctlabs/sp1/pull/1225)) +- unify inner and outer witnesses in recursion circuit ([#1374](https://github.com/succinctlabs/sp1/pull/1374)) +- plonk in new circuit ([#1364](https://github.com/succinctlabs/sp1/pull/1364)) ### Other -- Merge branch 'main' into dev -- prover trait cleanup ([#1170](https://github.com/succinctlabs/sp1/pull/1170)) -- add audit reports ([#1142](https://github.com/succinctlabs/sp1/pull/1142)) -## [1.0.0-rc1](https://github.com/succinctlabs/sp1/compare/sp1-recursion-circuit-v1.0.0-rc1...sp1-recursion-circuit-v1.0.0-rc1) - 2024-07-19 - -### Added - -- result instead of exit(1) on trap in recursion ([#1089](https://github.com/succinctlabs/sp1/pull/1089)) -- publish sp1 to crates.io ([#1052](https://github.com/succinctlabs/sp1/pull/1052)) -- critical constraint changes ([#1046](https://github.com/succinctlabs/sp1/pull/1046)) -- plonk circuit optimizations ([#972](https://github.com/succinctlabs/sp1/pull/972)) -- poseidon2 hash ([#885](https://github.com/succinctlabs/sp1/pull/885)) -- use docker by default for gnark ([#890](https://github.com/succinctlabs/sp1/pull/890)) -- sp1 core prover opts -- exit code ([#750](https://github.com/succinctlabs/sp1/pull/750)) -- program refactor ([#651](https://github.com/succinctlabs/sp1/pull/651)) -- e2e groth16 with contract verifier ([#671](https://github.com/succinctlabs/sp1/pull/671)) -- improve circuit by 3-4x ([#648](https://github.com/succinctlabs/sp1/pull/648)) -- regularize proof shape ([#641](https://github.com/succinctlabs/sp1/pull/641)) -- _(sdk)_ auto setup circuit ([#635](https://github.com/succinctlabs/sp1/pull/635)) -- arbitrary degree in recursion ([#605](https://github.com/succinctlabs/sp1/pull/605)) -- prover tweaks ([#603](https://github.com/succinctlabs/sp1/pull/603)) -- enable arbitrary constraint degree ([#593](https://github.com/succinctlabs/sp1/pull/593)) -- recursion compress layer + RecursionAirWideDeg3 + RecursionAirSkinnyDeg7 + optimized groth16 ([#590](https://github.com/succinctlabs/sp1/pull/590)) -- _(Recursion)_ evaluate constraints in a single expression ([#592](https://github.com/succinctlabs/sp1/pull/592)) -- expression caching ([#586](https://github.com/succinctlabs/sp1/pull/586)) -- plonk e2e prover ([#582](https://github.com/succinctlabs/sp1/pull/582)) -- public inputs in gnark circuit ([#576](https://github.com/succinctlabs/sp1/pull/576)) -- e2e groth16 flow ([#549](https://github.com/succinctlabs/sp1/pull/549)) -- stark cleanup and verification ([#556](https://github.com/succinctlabs/sp1/pull/556)) -- recursion experiments ([#522](https://github.com/succinctlabs/sp1/pull/522)) -- groth16 circuit build script ([#541](https://github.com/succinctlabs/sp1/pull/541)) -- verify shard transitions + fixes ([#482](https://github.com/succinctlabs/sp1/pull/482)) -- recursion profiling ([#521](https://github.com/succinctlabs/sp1/pull/521)) -- gnark wrap test + cleanup ([#511](https://github.com/succinctlabs/sp1/pull/511)) -- reduce with different configs ([#508](https://github.com/succinctlabs/sp1/pull/508)) -- groth16 recursion e2e ([#502](https://github.com/succinctlabs/sp1/pull/502)) -- logup batching ([#487](https://github.com/succinctlabs/sp1/pull/487)) -- recursion optimizations + compiler cleanup ([#499](https://github.com/succinctlabs/sp1/pull/499)) -- recursion vm public values ([#495](https://github.com/succinctlabs/sp1/pull/495)) -- cleanup compiler ir ([#496](https://github.com/succinctlabs/sp1/pull/496)) -- shard transition public values ([#466](https://github.com/succinctlabs/sp1/pull/466)) -- recursion permutation challenges as variables ([#486](https://github.com/succinctlabs/sp1/pull/486)) -- add support for witness in programs ([#476](https://github.com/succinctlabs/sp1/pull/476)) -- gnark recursive verifier ([#457](https://github.com/succinctlabs/sp1/pull/457)) -- Preprocessing + recursion ([#450](https://github.com/succinctlabs/sp1/pull/450)) -- working two adic pcs verifier in recursive zkvm ([#434](https://github.com/succinctlabs/sp1/pull/434)) -- new README img ([#226](https://github.com/succinctlabs/sp1/pull/226)) -- readme updates ([#205](https://github.com/succinctlabs/sp1/pull/205)) -- more final touches ([#194](https://github.com/succinctlabs/sp1/pull/194)) -- curtaup + release system + cargo prove CLI updates ([#178](https://github.com/succinctlabs/sp1/pull/178)) -- (perf) updates from Plonky3 and verifier refactor ([#156](https://github.com/succinctlabs/sp1/pull/156)) -- developer experience improvements ([#145](https://github.com/succinctlabs/sp1/pull/145)) -- toolchain build from source & install ([#113](https://github.com/succinctlabs/sp1/pull/113)) -- io::read io::write ([#126](https://github.com/succinctlabs/sp1/pull/126)) -- tracing, profiling, benchmarking ([#99](https://github.com/succinctlabs/sp1/pull/99)) - -### Fixed - -- fix overflow when compile to wasm32 ([#812](https://github.com/succinctlabs/sp1/pull/812)) -- p3 audit change ([#964](https://github.com/succinctlabs/sp1/pull/964)) -- _(recursion)_ assert curve bit length in circuit p2_hash ([#736](https://github.com/succinctlabs/sp1/pull/736)) -- fri fold mem access ([#660](https://github.com/succinctlabs/sp1/pull/660)) -- verify reduced proofs ([#655](https://github.com/succinctlabs/sp1/pull/655)) -- high degree constraints in recursion ([#619](https://github.com/succinctlabs/sp1/pull/619)) -- circuit sponge absorb rate ([#618](https://github.com/succinctlabs/sp1/pull/618)) -- groth16 prover issues ([#571](https://github.com/succinctlabs/sp1/pull/571)) -- observe only non-padded public values ([#523](https://github.com/succinctlabs/sp1/pull/523)) -- broken e2e recursion -- don't observe padded public values ([#520](https://github.com/succinctlabs/sp1/pull/520)) - -### Other - -- use global workspace version ([#1102](https://github.com/succinctlabs/sp1/pull/1102)) -- fix release-plz ([#1088](https://github.com/succinctlabs/sp1/pull/1088)) -- add release-plz ([#1086](https://github.com/succinctlabs/sp1/pull/1086)) -- _(deps)_ bump serde from 1.0.203 to 1.0.204 ([#1063](https://github.com/succinctlabs/sp1/pull/1063)) -- _(deps)_ bump itertools from 0.12.1 to 0.13.0 ([#817](https://github.com/succinctlabs/sp1/pull/817)) -- circuit poseidon2 babybear ([#870](https://github.com/succinctlabs/sp1/pull/870)) -- remove unecessary todos in recursion -- permutation argument in circuit ([#804](https://github.com/succinctlabs/sp1/pull/804)) -- remove unecessary todo in bb31 to bn254 ([#805](https://github.com/succinctlabs/sp1/pull/805)) -- remove unecessary todo -- Clean up TOML files ([#796](https://github.com/succinctlabs/sp1/pull/796)) -- update all dependencies ([#689](https://github.com/succinctlabs/sp1/pull/689)) -- cleanup prover ([#551](https://github.com/succinctlabs/sp1/pull/551)) -- make ci faster ([#536](https://github.com/succinctlabs/sp1/pull/536)) -- cleanup for allen ([#518](https://github.com/succinctlabs/sp1/pull/518)) -- final touches for public release ([#239](https://github.com/succinctlabs/sp1/pull/239)) -- update docs with slight nits ([#224](https://github.com/succinctlabs/sp1/pull/224)) -- sp1 rename ([#212](https://github.com/succinctlabs/sp1/pull/212)) -- enshrine AlignedBorrow macro ([#209](https://github.com/succinctlabs/sp1/pull/209)) -- readme cleanup ([#196](https://github.com/succinctlabs/sp1/pull/196)) -- rename succinct to curta ([#192](https://github.com/succinctlabs/sp1/pull/192)) -- better curta graphic ([#184](https://github.com/succinctlabs/sp1/pull/184)) -- Initial commit +- use crate `vec_map`, box large `Instruction` variants ([#1360](https://github.com/succinctlabs/sp1/pull/1360)) +- merge dev into experimental pt 2 ([#1341](https://github.com/succinctlabs/sp1/pull/1341)) +- add circuit v2 +- resolve merge conflicts between dev and experimental diff --git a/crates/recursion/circuit/Cargo.toml b/crates/recursion/circuit/Cargo.toml index 9bcf439798..7299445d5a 100644 --- a/crates/recursion/circuit/Cargo.toml +++ b/crates/recursion/circuit/Cargo.toml @@ -16,19 +16,31 @@ p3-commit = { workspace = true } p3-fri = { workspace = true } p3-matrix = { workspace = true } p3-util = { workspace = true } -sp1-recursion-core = { workspace = true } +p3-symmetric = { workspace = true } +p3-challenger = { workspace = true } +p3-dft = { workspace = true } +p3-bn254-fr = { workspace = true } +p3-baby-bear = { workspace = true } + sp1-core-machine = { workspace = true } +sp1-core-executor = { workspace = true } sp1-stark = { workspace = true } -itertools = "0.13.0" -serde = { version = "1.0.204", features = ["derive"] } -sp1-recursion-derive = { workspace = true } +sp1-derive = { workspace = true } +sp1-recursion-core = { workspace = true } sp1-recursion-compiler = { workspace = true } -sp1-recursion-program = { workspace = true } -p3-bn254-fr = { workspace = true } -p3-baby-bear = { workspace = true } -bincode = "1.3.3" +sp1-primitives = { workspace = true } +sp1-recursion-gnark-ffi = { workspace = true } + +itertools = "0.13.0" +serde = { version = "1.0", features = ["derive"] } +rand = "0.8.5" +tracing = "0.1.40" +hashbrown = { version = "0.14.5", features = ["serde", "inline-more"] } +num-traits = "0.2.19" +rayon = "1.10.0" [dev-dependencies] +sp1-core-executor = { workspace = true, features = ["programs"] } ff = { version = "0.13", features = ["derive", "derive_bits"] } p3-challenger = { workspace = true } p3-symmetric = { workspace = true } @@ -37,7 +49,8 @@ p3-merkle-tree = { workspace = true } p3-poseidon2 = { workspace = true } zkhash = "0.2.0" rand = "0.8.5" -sp1-recursion-gnark-ffi = { workspace = true } [features] native-gnark = ["sp1-recursion-gnark-ffi/native"] +export-tests = [] +debug = ["sp1-core-machine/debug"] diff --git a/crates/recursion/circuit/build/verifier.go b/crates/recursion/circuit/build/verifier.go deleted file mode 100644 index 1bb14130b2..0000000000 --- a/crates/recursion/circuit/build/verifier.go +++ /dev/null @@ -1,36 +0,0 @@ -// This file is auto-generated by sp1-recursion-compiler. -package verifier - -import ( - "github.com/consensys/gnark/frontend" - "github.com/succinctlabs/sp1-recursion-gnark/poseidon2" -) - -type Circuit struct { - X frontend.Variable - Y frontend.Variable -} - -func (circuit *Circuit) Define(api frontend.API) error { - var state [3]frontend.Variable - p2 := poseidon2.NewPoseidon2Chip(api) - - // Variables. - var var1 frontend.Variable - var var0 frontend.Variable - var var2 frontend.Variable - - // Operations. - var0 = frontend.Variable("0") - var1 = frontend.Variable("1") - var2 = frontend.Variable("2") - state = [3]frontend.Variable{var0,var1,var2} - p2.PermuteMut(&state) - var0 = state[0] - var1 = state[1] - var2 = state[2] - api.AssertIsEqual(var0, frontend.Variable("5297208644449048816064511434384511824916970985131888684874823260532015509555")) - api.AssertIsEqual(var1, frontend.Variable("21816030159894113985964609355246484851575571273661473159848781012394295965040")) - api.AssertIsEqual(var2, frontend.Variable("13940986381491601233448981668101586453321811870310341844570924906201623195336")) - return nil -} diff --git a/crates/recursion/circuit-v2/scripts/circuit_architecture_sweep.rs b/crates/recursion/circuit/scripts/circuit_architecture_sweep.rs similarity index 95% rename from crates/recursion/circuit-v2/scripts/circuit_architecture_sweep.rs rename to crates/recursion/circuit/scripts/circuit_architecture_sweep.rs index 0e76784618..3d4a04dd37 100644 --- a/crates/recursion/circuit-v2/scripts/circuit_architecture_sweep.rs +++ b/crates/recursion/circuit/scripts/circuit_architecture_sweep.rs @@ -3,9 +3,9 @@ use p3_baby_bear::BabyBear; use sp1_core::{stark::StarkMachine, utils::log2_strict_usize}; -use sp1_recursion_circuit_v2::build_wrap_v2::{machine_with_all_chips, test_machine}; +use sp1_recursion_circuit::build_wrap_v2::{machine_with_all_chips, test_machine}; +use sp1_recursion_core::machine::RecursionAir; use sp1_recursion_core::stark::config::BabyBearPoseidon2Outer; -use sp1_recursion_core_v2::machine::RecursionAir; type SC = BabyBearPoseidon2Outer; diff --git a/crates/recursion/circuit/src/challenger.rs b/crates/recursion/circuit/src/challenger.rs index 13602835a7..857da9c6a3 100644 --- a/crates/recursion/circuit/src/challenger.rs +++ b/crates/recursion/circuit/src/challenger.rs @@ -1,7 +1,236 @@ +use p3_baby_bear::BabyBear; use p3_field::{AbstractField, Field}; -use sp1_recursion_compiler::ir::{Array, Builder, Config, Ext, Felt, Var}; +use sp1_recursion_compiler::{ + circuit::CircuitV2Builder, + ir::{DslIr, Var}, + prelude::{Builder, Config, Ext, Felt}, +}; +use sp1_recursion_core::{ + air::ChallengerPublicValues, + runtime::{HASH_RATE, PERMUTATION_WIDTH}, + stark::{OUTER_MULTI_FIELD_CHALLENGER_DIGEST_SIZE, OUTER_MULTI_FIELD_CHALLENGER_RATE}, + NUM_BITS, +}; + +// Constants for the Multifield challenger. +pub const POSEIDON_2_BB_RATE: usize = 16; + +// use crate::{DigestVariable, VerifyingKeyVariable}; + +pub trait CanCopyChallenger { + fn copy(&self, builder: &mut Builder) -> Self; +} + +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct SpongeChallengerShape { + pub input_buffer_len: usize, + pub output_buffer_len: usize, +} + +/// Reference: [p3_challenger::CanObserve]. +pub trait CanObserveVariable { + fn observe(&mut self, builder: &mut Builder, value: V); + + fn observe_slice(&mut self, builder: &mut Builder, values: impl IntoIterator) { + for value in values { + self.observe(builder, value); + } + } +} + +pub trait CanSampleVariable { + fn sample(&mut self, builder: &mut Builder) -> V; +} + +/// Reference: [p3_challenger::FieldChallenger]. +pub trait FieldChallengerVariable: + CanObserveVariable> + CanSampleVariable> + CanSampleBitsVariable +{ + fn sample_ext(&mut self, builder: &mut Builder) -> Ext; + + fn check_witness(&mut self, builder: &mut Builder, nb_bits: usize, witness: Felt); + + fn duplexing(&mut self, builder: &mut Builder); +} + +pub trait CanSampleBitsVariable { + fn sample_bits(&mut self, builder: &mut Builder, nb_bits: usize) -> Vec; +} + +/// Reference: [p3_challenger::DuplexChallenger] +#[derive(Clone, Debug)] +pub struct DuplexChallengerVariable { + pub sponge_state: [Felt; PERMUTATION_WIDTH], + pub input_buffer: Vec>, + pub output_buffer: Vec>, +} + +impl> DuplexChallengerVariable { + /// Creates a new duplex challenger with the default state. + pub fn new(builder: &mut Builder) -> Self { + DuplexChallengerVariable:: { + sponge_state: core::array::from_fn(|_| builder.eval(C::F::zero())), + input_buffer: vec![], + output_buffer: vec![], + } + } + + /// Creates a new challenger with the same state as an existing challenger. + pub fn copy(&self, builder: &mut Builder) -> Self { + let DuplexChallengerVariable { sponge_state, input_buffer, output_buffer } = self; + let sponge_state = sponge_state.map(|x| builder.eval(x)); + let mut copy_vec = |v: &Vec>| v.iter().map(|x| builder.eval(*x)).collect(); + DuplexChallengerVariable:: { + sponge_state, + input_buffer: copy_vec(input_buffer), + output_buffer: copy_vec(output_buffer), + } + } + + fn observe(&mut self, builder: &mut Builder, value: Felt) { + self.output_buffer.clear(); + + self.input_buffer.push(value); + + if self.input_buffer.len() == HASH_RATE { + self.duplexing(builder); + } + } + + fn sample(&mut self, builder: &mut Builder) -> Felt { + if !self.input_buffer.is_empty() || self.output_buffer.is_empty() { + self.duplexing(builder); + } + + self.output_buffer.pop().expect("output buffer should be non-empty") + } + + fn sample_bits(&mut self, builder: &mut Builder, nb_bits: usize) -> Vec> { + assert!(nb_bits <= NUM_BITS); + let rand_f = self.sample(builder); + let mut rand_f_bits = builder.num2bits_v2_f(rand_f, NUM_BITS); + rand_f_bits.truncate(nb_bits); + rand_f_bits + } + + pub fn public_values(&self, builder: &mut Builder) -> ChallengerPublicValues> { + assert!(self.input_buffer.len() <= PERMUTATION_WIDTH); + assert!(self.output_buffer.len() <= PERMUTATION_WIDTH); + + let sponge_state = self.sponge_state; + let num_inputs = builder.eval(C::F::from_canonical_usize(self.input_buffer.len())); + let num_outputs = builder.eval(C::F::from_canonical_usize(self.output_buffer.len())); + + let input_buffer: [_; PERMUTATION_WIDTH] = self + .input_buffer + .iter() + .copied() + .chain((self.input_buffer.len()..PERMUTATION_WIDTH).map(|_| builder.eval(C::F::zero()))) + .collect::>() + .try_into() + .unwrap(); + + let output_buffer: [_; PERMUTATION_WIDTH] = self + .output_buffer + .iter() + .copied() + .chain( + (self.output_buffer.len()..PERMUTATION_WIDTH).map(|_| builder.eval(C::F::zero())), + ) + .collect::>() + .try_into() + .unwrap(); + + ChallengerPublicValues { + sponge_state, + num_inputs, + input_buffer, + num_outputs, + output_buffer, + } + } +} + +impl> CanCopyChallenger for DuplexChallengerVariable { + fn copy(&self, builder: &mut Builder) -> Self { + DuplexChallengerVariable::copy(self, builder) + } +} + +impl> CanObserveVariable> for DuplexChallengerVariable { + fn observe(&mut self, builder: &mut Builder, value: Felt) { + DuplexChallengerVariable::observe(self, builder, value); + } + + fn observe_slice( + &mut self, + builder: &mut Builder, + values: impl IntoIterator>, + ) { + for value in values { + self.observe(builder, value); + } + } +} + +impl, const N: usize> CanObserveVariable; N]> + for DuplexChallengerVariable +{ + fn observe(&mut self, builder: &mut Builder, values: [Felt; N]) { + for value in values { + self.observe(builder, value); + } + } +} + +impl> CanSampleVariable> for DuplexChallengerVariable { + fn sample(&mut self, builder: &mut Builder) -> Felt { + DuplexChallengerVariable::sample(self, builder) + } +} + +impl> CanSampleBitsVariable> for DuplexChallengerVariable { + fn sample_bits(&mut self, builder: &mut Builder, nb_bits: usize) -> Vec> { + DuplexChallengerVariable::sample_bits(self, builder, nb_bits) + } +} + +impl> FieldChallengerVariable> + for DuplexChallengerVariable +{ + fn sample_ext(&mut self, builder: &mut Builder) -> Ext { + let a = self.sample(builder); + let b = self.sample(builder); + let c = self.sample(builder); + let d = self.sample(builder); + builder.ext_from_base_slice(&[a, b, c, d]) + } + + fn check_witness( + &mut self, + builder: &mut Builder, + nb_bits: usize, + witness: Felt<::F>, + ) { + self.observe(builder, witness); + let element_bits = self.sample_bits(builder, nb_bits); + for bit in element_bits { + builder.assert_felt_eq(bit, C::F::zero()); + } + } -use crate::{poseidon2::Poseidon2CircuitBuilder, DIGEST_SIZE, SPONGE_SIZE}; + fn duplexing(&mut self, builder: &mut Builder) { + assert!(self.input_buffer.len() <= HASH_RATE); + + self.sponge_state[0..self.input_buffer.len()].copy_from_slice(self.input_buffer.as_slice()); + self.input_buffer.clear(); + + self.sponge_state = builder.poseidon2_permute_v2(self.sponge_state); + + self.output_buffer.clear(); + self.output_buffer.extend_from_slice(&self.sponge_state); + } +} #[derive(Clone)] pub struct MultiField32ChallengerVariable { @@ -26,14 +255,15 @@ impl MultiField32ChallengerVariable { } pub fn duplexing(&mut self, builder: &mut Builder) { - assert!(self.input_buffer.len() <= self.num_f_elms * SPONGE_SIZE); + assert!(self.input_buffer.len() <= self.num_f_elms * OUTER_MULTI_FIELD_CHALLENGER_RATE); for (i, f_chunk) in self.input_buffer.chunks(self.num_f_elms).enumerate() { self.sponge_state[i] = reduce_32(builder, f_chunk); } self.input_buffer.clear(); - builder.p2_permute_mut(self.sponge_state); + // TODO make this a method for the builder. + builder.push_op(DslIr::CircuitPoseidon2Permute(self.sponge_state)); self.output_buffer.clear(); for &pf_val in self.sponge_state.iter() { @@ -48,34 +278,18 @@ impl MultiField32ChallengerVariable { self.output_buffer.clear(); self.input_buffer.push(value); - if self.input_buffer.len() == self.num_f_elms * SPONGE_SIZE { + if self.input_buffer.len() == self.num_f_elms * OUTER_MULTI_FIELD_CHALLENGER_RATE { self.duplexing(builder); } } - pub fn observe_slice(&mut self, builder: &mut Builder, values: Array>) { - match values { - Array::Dyn(_, len) => { - builder.range(0, len).for_each(|i, builder| { - let element = builder.get(&values, i); - self.observe(builder, element); - }); - } - Array::Fixed(values) => { - values.iter().for_each(|value| { - self.observe(builder, *value); - }); - } - } - } - pub fn observe_commitment( &mut self, builder: &mut Builder, - value: [Var; DIGEST_SIZE], + value: [Var; OUTER_MULTI_FIELD_CHALLENGER_DIGEST_SIZE], ) { - for i in 0..DIGEST_SIZE { - let f_vals: Vec> = split_32(builder, value[i], self.num_f_elms); + for val in value { + let f_vals: Vec> = split_32(builder, val, self.num_f_elms); for f_val in f_vals { self.observe(builder, f_val); } @@ -98,16 +312,87 @@ impl MultiField32ChallengerVariable { builder.felts2ext(&[a, b, c, d]) } - pub fn sample_bits(&mut self, builder: &mut Builder, bits: usize) -> Var { + pub fn sample_bits(&mut self, builder: &mut Builder, bits: usize) -> Vec> { let rand_f = self.sample(builder); - let rand_f_bits = builder.num2bits_f_circuit(rand_f); - builder.bits2num_v_circuit(&rand_f_bits[0..bits]) + builder.num2bits_f_circuit(rand_f)[0..bits].to_vec() } pub fn check_witness(&mut self, builder: &mut Builder, bits: usize, witness: Felt) { self.observe(builder, witness); let element = self.sample_bits(builder, bits); - builder.assert_var_eq(element, C::N::from_canonical_usize(0)); + for bit in element { + builder.assert_var_eq(bit, C::N::from_canonical_usize(0)); + } + } +} + +impl CanCopyChallenger for MultiField32ChallengerVariable { + /// Creates a new challenger with the same state as an existing challenger. + fn copy(&self, builder: &mut Builder) -> Self { + let MultiField32ChallengerVariable { + sponge_state, + input_buffer, + output_buffer, + num_f_elms, + } = self; + let sponge_state = sponge_state.map(|x| builder.eval(x)); + let mut copy_vec = |v: &Vec>| v.iter().map(|x| builder.eval(*x)).collect(); + MultiField32ChallengerVariable:: { + sponge_state, + num_f_elms: *num_f_elms, + input_buffer: copy_vec(input_buffer), + output_buffer: copy_vec(output_buffer), + } + } +} + +impl CanObserveVariable> for MultiField32ChallengerVariable { + fn observe(&mut self, builder: &mut Builder, value: Felt) { + MultiField32ChallengerVariable::observe(self, builder, value); + } +} + +impl CanObserveVariable; OUTER_MULTI_FIELD_CHALLENGER_DIGEST_SIZE]> + for MultiField32ChallengerVariable +{ + fn observe( + &mut self, + builder: &mut Builder, + value: [Var; OUTER_MULTI_FIELD_CHALLENGER_DIGEST_SIZE], + ) { + self.observe_commitment(builder, value) + } +} + +impl CanObserveVariable> for MultiField32ChallengerVariable { + fn observe(&mut self, builder: &mut Builder, value: Var) { + self.observe_commitment(builder, [value]) + } +} + +impl CanSampleVariable> for MultiField32ChallengerVariable { + fn sample(&mut self, builder: &mut Builder) -> Felt { + MultiField32ChallengerVariable::sample(self, builder) + } +} + +impl CanSampleBitsVariable> for MultiField32ChallengerVariable { + fn sample_bits(&mut self, builder: &mut Builder, bits: usize) -> Vec> { + MultiField32ChallengerVariable::sample_bits(self, builder, bits) + } +} + +impl FieldChallengerVariable> for MultiField32ChallengerVariable { + fn sample_ext(&mut self, builder: &mut Builder) -> Ext { + MultiField32ChallengerVariable::sample_ext(self, builder) + } + + fn check_witness(&mut self, builder: &mut Builder, bits: usize, witness: Felt) { + MultiField32ChallengerVariable::check_witness(self, builder, bits, witness); + } + + fn duplexing(&mut self, builder: &mut Builder) { + MultiField32ChallengerVariable::duplexing(self, builder); } } @@ -139,153 +424,216 @@ pub fn split_32(builder: &mut Builder, val: Var, n: usize) - } #[cfg(test)] -mod tests { +pub(crate) mod tests { + use std::iter::zip; + + use crate::{ + challenger::{CanCopyChallenger, MultiField32ChallengerVariable}, + hash::{FieldHasherVariable, BN254_DIGEST_SIZE}, + utils::tests::run_test_recursion, + }; use p3_baby_bear::BabyBear; use p3_bn254_fr::Bn254Fr; - use p3_challenger::{CanObserve, CanSample, FieldChallenger}; - use p3_field::{ - extension::BinomialExtensionField, reduce_32 as reduce_32_gt, split_32 as split_32_gt, - AbstractField, - }; - use p3_symmetric::Hash; + use p3_challenger::{CanObserve, CanSample, CanSampleBits, FieldChallenger}; + use p3_field::AbstractField; + use p3_symmetric::{CryptographicHasher, Hash, PseudoCompressionFunction}; use sp1_recursion_compiler::{ + circuit::{AsmBuilder, AsmConfig}, config::OuterConfig, constraints::ConstraintCompiler, - ir::{Builder, SymbolicExt, Witness}, + ir::{Builder, Config, Ext, ExtConst, Felt, Var}, }; - use sp1_recursion_core::stark::config::{outer_perm, OuterChallenger}; + use sp1_recursion_core::stark::{outer_perm, BabyBearPoseidon2Outer, OuterCompress, OuterHash}; use sp1_recursion_gnark_ffi::PlonkBn254Prover; + use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, StarkGenericConfig}; - use super::{reduce_32, split_32}; - use crate::{challenger::MultiField32ChallengerVariable, DIGEST_SIZE}; + use crate::{ + challenger::{DuplexChallengerVariable, FieldChallengerVariable}, + witness::OuterWitness, + }; - #[test] - fn test_num2bits_v() { - let mut builder = Builder::::default(); - let mut value_u32 = 1345237507; - let value = builder.eval(Bn254Fr::from_canonical_u32(value_u32)); - let result = builder.num2bits_v_circuit(value, 32); - for i in 0..result.len() { - builder.assert_var_eq(result[i], Bn254Fr::from_canonical_u32(value_u32 & 1)); - value_u32 >>= 1; - } + type SC = BabyBearPoseidon2; + type C = OuterConfig; + type F = ::Val; + type EF = ::Challenge; - let mut backend = ConstraintCompiler::::default(); - let constraints = backend.emit(builder.operations); - PlonkBn254Prover::test::(constraints.clone(), Witness::default()); + #[test] + fn test_compiler_challenger() { + let config = SC::default(); + let mut challenger = config.challenger(); + challenger.observe(F::one()); + challenger.observe(F::two()); + challenger.observe(F::two()); + challenger.observe(F::two()); + let result: F = challenger.sample(); + println!("expected result: {}", result); + let result_ef: EF = challenger.sample_ext_element(); + println!("expected result_ef: {}", result_ef); + + let mut builder = AsmBuilder::::default(); + + let mut challenger = DuplexChallengerVariable::> { + sponge_state: core::array::from_fn(|_| builder.eval(F::zero())), + input_buffer: vec![], + output_buffer: vec![], + }; + let one: Felt<_> = builder.eval(F::one()); + let two: Felt<_> = builder.eval(F::two()); + + challenger.observe(&mut builder, one); + challenger.observe(&mut builder, two); + challenger.observe(&mut builder, two); + challenger.observe(&mut builder, two); + let element = challenger.sample(&mut builder); + let element_ef = challenger.sample_ext(&mut builder); + + let expected_result: Felt<_> = builder.eval(result); + let expected_result_ef: Ext<_, _> = builder.eval(result_ef.cons()); + builder.print_f(element); + builder.assert_felt_eq(expected_result, element); + builder.print_e(element_ef); + builder.assert_ext_eq(expected_result_ef, element_ef); + + run_test_recursion(builder.into_operations(), None); } #[test] - fn test_reduce_32() { - let value_1 = BabyBear::from_canonical_u32(1345237507); - let value_2 = BabyBear::from_canonical_u32(1000001); - let gt: Bn254Fr = reduce_32_gt(&[value_1, value_2]); - - let mut builder = Builder::::default(); - let value_1 = builder.eval(value_1); - let value_2 = builder.eval(value_2); - let result = reduce_32(&mut builder, &[value_1, value_2]); - builder.assert_var_eq(result, gt); + fn test_challenger_outer() { + type SC = BabyBearPoseidon2Outer; + type F = ::Val; + type EF = ::Challenge; + type N = ::N; + + let config = SC::default(); + let mut challenger = config.challenger(); + challenger.observe(F::one()); + challenger.observe(F::two()); + challenger.observe(F::two()); + challenger.observe(F::two()); + let commit = Hash::from([N::two()]); + challenger.observe(commit); + let result: F = challenger.sample(); + println!("expected result: {}", result); + let result_ef: EF = challenger.sample_ext_element(); + println!("expected result_ef: {}", result_ef); + let mut bits = challenger.sample_bits(30); + let mut bits_vec = vec![]; + for _ in 0..30 { + bits_vec.push(bits % 2); + bits >>= 1; + } + println!("expected bits: {:?}", bits_vec); + + let mut builder = Builder::::default(); + + // let width: Var<_> = builder.eval(F::from_canonical_usize(PERMUTATION_WIDTH)); + let mut challenger = MultiField32ChallengerVariable::::new(&mut builder); + let one: Felt<_> = builder.eval(F::one()); + let two: Felt<_> = builder.eval(F::two()); + let two_var: Var<_> = builder.eval(N::two()); + // builder.halt(); + challenger.observe(&mut builder, one); + challenger.observe(&mut builder, two); + challenger.observe(&mut builder, two); + challenger.observe(&mut builder, two); + challenger.observe_commitment(&mut builder, [two_var]); + + // Check to make sure the copying works. + challenger = challenger.copy(&mut builder); + let element = challenger.sample(&mut builder); + let element_ef = challenger.sample_ext(&mut builder); + let bits = challenger.sample_bits(&mut builder, 31); + + let expected_result: Felt<_> = builder.eval(result); + let expected_result_ef: Ext<_, _> = builder.eval(result_ef.cons()); + builder.print_f(element); + builder.assert_felt_eq(expected_result, element); + builder.print_e(element_ef); + builder.assert_ext_eq(expected_result_ef, element_ef); + for (expected_bit, bit) in zip(bits_vec.iter(), bits.iter()) { + let expected_bit: Var<_> = builder.eval(N::from_canonical_usize(*expected_bit)); + builder.print_v(*bit); + builder.assert_var_eq(expected_bit, *bit); + } - let mut backend = ConstraintCompiler::::default(); - let constraints = backend.emit(builder.operations); - PlonkBn254Prover::test::(constraints.clone(), Witness::default()); + let mut backend = ConstraintCompiler::::default(); + let constraints = backend.emit(builder.into_operations()); + let witness = OuterWitness::default(); + PlonkBn254Prover::test::(constraints, witness); } #[test] - fn test_split_32() { - let value = Bn254Fr::from_canonical_u32(1345237507); - let gt: Vec = split_32_gt(value, 3); + fn test_select_chain_digest() { + type N = ::N; + + let mut builder = Builder::::default(); - let mut builder = Builder::::default(); - let value = builder.eval(value); - let result = split_32(&mut builder, value, 3); + let one: Var<_> = builder.eval(N::one()); + let two: Var<_> = builder.eval(N::two()); - builder.assert_felt_eq(result[0], gt[0]); - builder.assert_felt_eq(result[1], gt[1]); - builder.assert_felt_eq(result[2], gt[2]); + let to_swap = [[one], [two]]; + let result = BabyBearPoseidon2Outer::select_chain_digest(&mut builder, one, to_swap); - let mut backend = ConstraintCompiler::::default(); - let constraints = backend.emit(builder.operations); - PlonkBn254Prover::test::(constraints.clone(), Witness::default()); + builder.assert_var_eq(result[0][0], two); + builder.assert_var_eq(result[1][0], one); + + let mut backend = ConstraintCompiler::::default(); + let constraints = backend.emit(builder.into_operations()); + let witness = OuterWitness::default(); + PlonkBn254Prover::test::(constraints, witness); } #[test] - fn test_challenger() { + fn test_p2_hash() { let perm = outer_perm(); - let mut challenger = OuterChallenger::new(perm).unwrap(); - let a = BabyBear::from_canonical_usize(1); - let b = BabyBear::from_canonical_usize(2); - let c = BabyBear::from_canonical_usize(3); - challenger.observe(a); - challenger.observe(b); - challenger.observe(c); - let gt1: BabyBear = challenger.sample(); - challenger.observe(a); - challenger.observe(b); - challenger.observe(c); - let gt2: BabyBear = challenger.sample(); - - let mut builder = Builder::::default(); - let mut challenger = MultiField32ChallengerVariable::new(&mut builder); - let a = builder.eval(a); - let b = builder.eval(b); - let c = builder.eval(c); - challenger.observe(&mut builder, a); - challenger.observe(&mut builder, b); - challenger.observe(&mut builder, c); - let result1 = challenger.sample(&mut builder); - builder.assert_felt_eq(gt1, result1); - challenger.observe(&mut builder, a); - challenger.observe(&mut builder, b); - challenger.observe(&mut builder, c); - let result2 = challenger.sample(&mut builder); - builder.assert_felt_eq(gt2, result2); - - let mut backend = ConstraintCompiler::::default(); - let constraints = backend.emit(builder.operations); - PlonkBn254Prover::test::(constraints.clone(), Witness::default()); + let hasher = OuterHash::new(perm.clone()).unwrap(); + + let input: [BabyBear; 7] = [ + BabyBear::from_canonical_u32(0), + BabyBear::from_canonical_u32(1), + BabyBear::from_canonical_u32(2), + BabyBear::from_canonical_u32(2), + BabyBear::from_canonical_u32(2), + BabyBear::from_canonical_u32(2), + BabyBear::from_canonical_u32(2), + ]; + let output = hasher.hash_iter(input); + + let mut builder = Builder::::default(); + let a: Felt<_> = builder.eval(input[0]); + let b: Felt<_> = builder.eval(input[1]); + let c: Felt<_> = builder.eval(input[2]); + let d: Felt<_> = builder.eval(input[3]); + let e: Felt<_> = builder.eval(input[4]); + let f: Felt<_> = builder.eval(input[5]); + let g: Felt<_> = builder.eval(input[6]); + let result = BabyBearPoseidon2Outer::hash(&mut builder, &[a, b, c, d, e, f, g]); + + builder.assert_var_eq(result[0], output[0]); + + let mut backend = ConstraintCompiler::::default(); + let constraints = backend.emit(builder.into_operations()); + PlonkBn254Prover::test::(constraints.clone(), OuterWitness::default()); } #[test] - fn test_challenger_sample_ext() { + fn test_p2_compress() { + type OuterDigestVariable = [Var<::N>; BN254_DIGEST_SIZE]; let perm = outer_perm(); - let mut challenger = OuterChallenger::new(perm).unwrap(); - let a = BabyBear::from_canonical_usize(1); - let b = BabyBear::from_canonical_usize(2); - let c = BabyBear::from_canonical_usize(3); - let hash = Hash::from([Bn254Fr::two(); DIGEST_SIZE]); - challenger.observe(hash); - challenger.observe(a); - challenger.observe(b); - challenger.observe(c); - let gt1: BinomialExtensionField = challenger.sample_ext_element(); - challenger.observe(a); - challenger.observe(b); - challenger.observe(c); - let gt2: BinomialExtensionField = challenger.sample_ext_element(); - - let mut builder = Builder::::default(); - let mut challenger = MultiField32ChallengerVariable::new(&mut builder); - let a = builder.eval(a); - let b = builder.eval(b); - let c = builder.eval(c); - let hash = builder.eval(Bn254Fr::two()); - challenger.observe_commitment(&mut builder, [hash]); - challenger.observe(&mut builder, a); - challenger.observe(&mut builder, b); - challenger.observe(&mut builder, c); - let result1 = challenger.sample_ext(&mut builder); - challenger.observe(&mut builder, a); - challenger.observe(&mut builder, b); - challenger.observe(&mut builder, c); - let result2 = challenger.sample_ext(&mut builder); - - builder.assert_ext_eq(SymbolicExt::from_f(gt1), result1); - builder.assert_ext_eq(SymbolicExt::from_f(gt2), result2); - - let mut backend = ConstraintCompiler::::default(); - let constraints = backend.emit(builder.operations); - PlonkBn254Prover::test::(constraints.clone(), Witness::default()); + let compressor = OuterCompress::new(perm.clone()); + + let a: [Bn254Fr; 1] = [Bn254Fr::two()]; + let b: [Bn254Fr; 1] = [Bn254Fr::two()]; + let gt = compressor.compress([a, b]); + + let mut builder = Builder::::default(); + let a: OuterDigestVariable = [builder.eval(a[0])]; + let b: OuterDigestVariable = [builder.eval(b[0])]; + let result = BabyBearPoseidon2Outer::compress(&mut builder, [a, b]); + builder.assert_var_eq(result[0], gt[0]); + + let mut backend = ConstraintCompiler::::default(); + let constraints = backend.emit(builder.into_operations()); + PlonkBn254Prover::test::(constraints.clone(), OuterWitness::default()); } } diff --git a/crates/recursion/circuit/src/constraints.rs b/crates/recursion/circuit/src/constraints.rs index f2df585d09..fa3c506764 100644 --- a/crates/recursion/circuit/src/constraints.rs +++ b/crates/recursion/circuit/src/constraints.rs @@ -1,50 +1,91 @@ -use p3_air::Air; -use p3_commit::LagrangeSelectors; -use p3_field::{AbstractExtensionField, AbstractField, TwoAdicField}; -use sp1_recursion_compiler::{ - ir::{Array, Builder, Config, Ext, ExtensionOperand, Felt, SymbolicFelt}, - prelude::SymbolicExt, +use p3_air::{Air, BaseAir}; +use p3_baby_bear::BabyBear; +use p3_commit::{LagrangeSelectors, Mmcs, PolynomialSpace, TwoAdicMultiplicativeCoset}; +use p3_field::{AbstractExtensionField, AbstractField, Field, TwoAdicField}; +use p3_matrix::dense::RowMajorMatrix; + +use sp1_recursion_compiler::ir::{ + Builder, Config, Ext, ExtConst, ExtensionOperand, Felt, SymbolicExt, SymbolicFelt, }; -use sp1_recursion_program::commit::PolynomialSpaceVariable; - -use sp1_recursion_program::stark::RecursiveVerifierConstraintFolder; use sp1_stark::{ - air::MachineAir, AirOpenedValues, MachineChip, StarkGenericConfig, PROOF_MAX_NUM_PVS, + air::MachineAir, AirOpenedValues, ChipOpenedValues, GenericVerifierConstraintFolder, + MachineChip, OpeningShapeError, }; use crate::{ - domain::TwoAdicMultiplicativeCosetVariable, - stark::StarkVerifierCircuit, - types::{ChipOpenedValuesVariable, ChipOpening}, + domain::PolynomialSpaceVariable, stark::StarkVerifier, BabyBearFriConfigVariable, CircuitConfig, }; -impl StarkVerifierCircuit +pub type RecursiveVerifierConstraintFolder<'a, C> = GenericVerifierConstraintFolder< + 'a, + ::F, + ::EF, + Felt<::F>, + Ext<::F, ::EF>, + SymbolicExt<::F, ::EF>, +>; + +impl StarkVerifier where - SC: StarkGenericConfig, C::F: TwoAdicField, + SC: BabyBearFriConfigVariable, + C: CircuitConfig, + >::ProverData>: Clone, + A: MachineAir + for<'a> Air>, { - fn eval_constraints( + #[allow(clippy::too_many_arguments)] + pub fn verify_constraints( + builder: &mut Builder, + chip: &MachineChip, + opening: &ChipOpenedValues>, + trace_domain: TwoAdicMultiplicativeCoset, + qc_domains: Vec>, + zeta: Ext, + alpha: Ext, + permutation_challenges: &[Ext], + public_values: &[Felt], + ) { + let sels = trace_domain.selectors_at_point_variable(builder, zeta); + + // Recompute the quotient at zeta from the chunks. + let quotient = Self::recompute_quotient(builder, opening, &qc_domains, zeta); + + // Calculate the evaluations of the constraints at zeta. + let folded_constraints = Self::eval_constraints( + builder, + chip, + opening, + &sels, + alpha, + permutation_challenges, + public_values, + ); + + // Assert that the quotient times the zerofier is equal to the folded constraints. + builder.assert_ext_eq(folded_constraints * sels.inv_zeroifier, quotient); + } + + pub fn eval_constraints( builder: &mut Builder, chip: &MachineChip, - opening: &ChipOpening, - public_values: Array>, + opening: &ChipOpenedValues>, selectors: &LagrangeSelectors>, alpha: Ext, permutation_challenges: &[Ext], - ) -> Ext - where - A: for<'b> Air>, - { + public_values: &[Felt], + ) -> Ext { let mut unflatten = |v: &[Ext]| { - v.chunks_exact(SC::Challenge::D) + v.chunks_exact(>::D) .map(|chunk| { builder.eval( chunk .iter() .enumerate() - .map(|(e_i, &x)| { - x * SymbolicExt::::from_f(C::EF::monomial(e_i)) - }) + .map( + |(e_i, x): (usize, &Ext)| -> SymbolicExt { + SymbolicExt::from(*x) * C::EF::monomial(e_i) + }, + ) .sum::>(), ) }) @@ -55,18 +96,13 @@ where next: unflatten(&opening.permutation.next), }; - let mut folder_pv = Vec::new(); - for i in 0..PROOF_MAX_NUM_PVS { - folder_pv.push(builder.get(&public_values, i)); - } - let mut folder = RecursiveVerifierConstraintFolder:: { preprocessed: opening.preprocessed.view(), main: opening.main.view(), perm: perm_opening.view(), perm_challenges: permutation_challenges, - cumulative_sum: opening.cumulative_sum, - public_values: &folder_pv, + cumulative_sums: &[opening.global_cumulative_sum, opening.local_cumulative_sum], + public_values, is_first_row: selectors.is_first_row, is_last_row: selectors.is_last_row, is_transition: selectors.is_transition, @@ -79,12 +115,23 @@ where builder.eval(folder.accumulator) } - fn recompute_quotient( + pub fn recompute_quotient( builder: &mut Builder, - opening: &ChipOpening, - qc_domains: Vec>, + opening: &ChipOpenedValues>, + qc_domains: &[TwoAdicMultiplicativeCoset], zeta: Ext, ) -> Ext { + // Compute the maximum power of zeta we will need. + let max_domain_log_n = qc_domains.iter().map(|d| d.log_n).max().unwrap(); + + // Compute all powers of zeta of the form zeta^(2^i) up to `zeta^(2^max_domain_log_n)`. + let mut zetas: Vec> = vec![zeta]; + for _ in 1..max_domain_log_n + 1 { + let last_zeta = zetas.last().unwrap(); + let new_zeta = builder.eval(*last_zeta * *last_zeta); + builder.reduce_e(new_zeta); + zetas.push(new_zeta); + } let zps = qc_domains .iter() .enumerate() @@ -94,24 +141,37 @@ where .enumerate() .filter(|(j, _)| *j != i) .map(|(_, other_domain)| { - // Calculate: other_domain.zp_at_point(zeta) - // * other_domain.zp_at_point(domain.first_point()).inverse() - let first_point = domain.first_point(builder); - let z = other_domain.zp_at_point_f(builder, first_point); + // `shift_power` is used in the computation of + let shift_power = + other_domain.shift.exp_power_of_2(other_domain.log_n).inverse(); + // This is `other_domain.zp_at_point_f(builder, domain.first_point())`. + // We compute it as a constant here. + let z_f = domain.first_point().exp_power_of_2(other_domain.log_n) + * shift_power + - C::F::one(); ( - other_domain.zp_at_point(builder, zeta).to_operand().symbolic(), - z.inverse(), + { + // We use the precomputed powers of zeta to compute (inline) the value of + // `other_domain.zp_at_point_variable(builder, zeta)`. + let z: Ext<_, _> = builder.eval( + zetas[other_domain.log_n] * SymbolicFelt::from_f(shift_power) + - SymbolicExt::from_f(C::EF::one()), + ); + z.to_operand().symbolic() + }, + builder.constant::>(z_f), ) }) - .unzip::<_, _, Vec<_>, Vec<_>>(); - zs.into_iter().product::>() - * zinvs.into_iter().product::>() + .unzip::<_, _, Vec>, Vec>>(); + let symbolic_prod: SymbolicFelt<_> = + zinvs.into_iter().map(|x| x.into()).product::>(); + (zs.into_iter().product::>(), symbolic_prod) }) - .collect::>>() + .collect::, SymbolicFelt<_>)>>() .into_iter() - .map(|x| builder.eval(x)) + .map(|(x, y)| builder.eval(x / y)) .collect::>>(); - + zps.iter().for_each(|zp| builder.reduce_e(*zp)); builder.eval( opening .quotient @@ -119,223 +179,84 @@ where .enumerate() .map(|(ch_i, ch)| { assert_eq!(ch.len(), C::EF::D); - ch.iter() - .enumerate() - .map(|(e_i, &c)| zps[ch_i] * C::EF::monomial(e_i) * c) - .sum::>() + zps[ch_i].to_operand().symbolic() + * ch.iter() + .enumerate() + .map(|(e_i, &c)| C::EF::monomial(e_i).cons() * SymbolicExt::from(c)) + .sum::>() }) .sum::>(), ) } - pub fn verify_constraints( - builder: &mut Builder, + pub fn verify_opening_shape( chip: &MachineChip, - opening: &ChipOpenedValuesVariable, - public_values: Array>, - trace_domain: TwoAdicMultiplicativeCosetVariable, - qc_domains: Vec>, - zeta: Ext, - alpha: Ext, - permutation_challenges: &[Ext], - ) where - A: MachineAir + for<'a> Air>, - { - builder.cycle_tracker("verify constraints"); - - let opening = ChipOpening::from_variable(builder, chip, opening); - let sels = trace_domain.selectors_at_point(builder, zeta); - - let folded_constraints = Self::eval_constraints( - builder, - chip, - &opening, - public_values, - &sels, - alpha, - permutation_challenges, - ); - - let quotient: Ext<_, _> = Self::recompute_quotient(builder, &opening, qc_domains, zeta); - - builder.assert_ext_eq(folded_constraints * sels.inv_zeroifier, quotient); - - builder.cycle_tracker("verify constraints"); - } -} - -#[cfg(test)] -mod tests { - - use itertools::{izip, Itertools}; - use p3_baby_bear::DiffusionMatrixBabyBear; - use p3_challenger::{CanObserve, FieldChallenger}; - use p3_commit::{Pcs, PolynomialSpace}; - use sp1_recursion_compiler::{ - config::OuterConfig, - constraints::ConstraintCompiler, - ir::{Builder, Witness}, - prelude::ExtConst, - }; - use sp1_recursion_core::{ - runtime::Runtime, - stark::{config::BabyBearPoseidon2Outer, RecursionAirWideDeg3}, - }; - use sp1_recursion_gnark_ffi::PlonkBn254Prover; - use sp1_stark::{ - Chip, Com, CpuProver, Dom, MachineProver, OpeningProof, PcsProverData, SP1CoreOpts, - ShardCommitment, ShardProof, StarkGenericConfig, StarkMachine, - }; - - use crate::stark::{tests::basic_program, StarkVerifierCircuit}; - - #[allow(clippy::type_complexity)] - fn get_shard_data<'a, SC>( - machine: &'a StarkMachine>, - proof: &'a ShardProof, - challenger: &mut SC::Challenger, - ) -> ( - Vec<&'a Chip>>, - Vec>, - Vec>>, - Vec, - SC::Challenge, - SC::Challenge, - ) - where - SC: StarkGenericConfig + Default, - SC::Challenger: Clone, - OpeningProof: Send + Sync, - Com: Send + Sync, - PcsProverData: Send + Sync, - SC::Val: p3_field::PrimeField32, - ::Val: p3_field::extension::BinomiallyExtendable<4>, - { - let ShardProof { commitment, opened_values, .. } = proof; - - let ShardCommitment { permutation_commit, quotient_commit, .. } = commitment; - - // Extract verification metadata. - let pcs = machine.config().pcs(); - - let permutation_challenges = - (0..2).map(|_| challenger.sample_ext_element::()).collect::>(); - - challenger.observe(permutation_commit.clone()); - - let alpha = challenger.sample_ext_element::(); - - // Observe the quotient commitments. - challenger.observe(quotient_commit.clone()); - - let zeta = challenger.sample_ext_element::(); - - let chips = machine.shard_chips_ordered(&proof.chip_ordering).collect::>(); - - let log_degrees = opened_values.chips.iter().map(|val| val.log_degree).collect::>(); - - let log_quotient_degrees = - chips.iter().map(|chip| chip.log_quotient_degree()).collect::>(); - - let trace_domains = log_degrees - .iter() - .map(|log_degree| pcs.natural_domain_for_degree(1 << log_degree)) - .collect::>(); - - let quotient_chunk_domains = trace_domains - .iter() - .zip_eq(log_degrees) - .zip_eq(log_quotient_degrees) - .map(|((domain, log_degree), log_quotient_degree)| { - let quotient_degree = 1 << log_quotient_degree; - let quotient_domain = - domain.create_disjoint_domain(1 << (log_degree + log_quotient_degree)); - quotient_domain.split_domains(quotient_degree) - }) - .collect::>(); - - (chips, trace_domains, quotient_chunk_domains, permutation_challenges, alpha, zeta) - } - - #[test] - fn test_verify_constraints_whole() { - type SC = BabyBearPoseidon2Outer; - type F = ::Val; - type EF = ::Challenge; - type A = RecursionAirWideDeg3; - - sp1_core_machine::utils::setup_logger(); - let program = basic_program::(); - let config = SC::new(); - let mut runtime = Runtime::::new_no_perm(&program); - runtime.run().unwrap(); - let machine = A::machine(config); - let prover = CpuProver::new(machine); - let (pk, vk) = prover.setup(&program); - let mut challenger = prover.config().challenger(); - let proof = prover - .prove(&pk, vec![runtime.record], &mut challenger, SP1CoreOpts::recursion()) - .unwrap(); - - let mut challenger = prover.config().challenger(); - vk.observe_into(&mut challenger); - proof.shard_proofs.iter().for_each(|proof| { - challenger.observe(proof.commitment.main_commit); - challenger.observe_slice(&proof.public_values[0..prover.num_pv_elts()]); - }); - - // Run the verify inside the DSL and compare it to the calculated value. - let mut builder = Builder::::default(); - - for proof in proof.shard_proofs.into_iter().take(1) { - let ( - chips, - trace_domains_vals, - quotient_chunk_domains_vals, - permutation_challenges, - alpha_val, - zeta_val, - ) = get_shard_data(prover.machine(), &proof, &mut challenger); + opening: &ChipOpenedValues>, + ) -> Result<(), OpeningShapeError> { + // Verify that the preprocessed width matches the expected value for the chip. + if opening.preprocessed.local.len() != chip.preprocessed_width() { + return Err(OpeningShapeError::PreprocessedWidthMismatch( + chip.preprocessed_width(), + opening.preprocessed.local.len(), + )); + } + if opening.preprocessed.next.len() != chip.preprocessed_width() { + return Err(OpeningShapeError::PreprocessedWidthMismatch( + chip.preprocessed_width(), + opening.preprocessed.next.len(), + )); + } - for (chip, trace_domain_val, qc_domains_vals, values_vals) in izip!( - chips.iter(), - trace_domains_vals, - quotient_chunk_domains_vals, - proof.opened_values.chips.iter(), - ) { - let opening = builder.constant(values_vals.clone()); - let alpha = builder.eval(alpha_val.cons()); - let zeta = builder.eval(zeta_val.cons()); - let trace_domain = builder.constant(trace_domain_val); - let pv_felts = - proof.public_values.iter().map(|v| builder.constant(*v)).collect_vec(); - let public_values = builder.vec(pv_felts); - let qc_domains = qc_domains_vals - .iter() - .map(|domain| builder.constant(*domain)) - .collect::>(); + // Verify that the main width matches the expected value for the chip. + if opening.main.local.len() != chip.width() { + return Err(OpeningShapeError::MainWidthMismatch( + chip.width(), + opening.main.local.len(), + )); + } + if opening.main.next.len() != chip.width() { + return Err(OpeningShapeError::MainWidthMismatch( + chip.width(), + opening.main.next.len(), + )); + } - let permutation_challenges = permutation_challenges - .iter() - .map(|c| builder.eval(c.cons())) - .collect::>(); + // Verify that the permutation width matches the expected value for the chip. + if opening.permutation.local.len() + != chip.permutation_width() * >::D + { + return Err(OpeningShapeError::PermutationWidthMismatch( + chip.permutation_width(), + opening.permutation.local.len(), + )); + } + if opening.permutation.next.len() + != chip.permutation_width() * >::D + { + return Err(OpeningShapeError::PermutationWidthMismatch( + chip.permutation_width(), + opening.permutation.next.len(), + )); + } - StarkVerifierCircuit::<_, SC>::verify_constraints::( - &mut builder, - chip, - &opening, - public_values, - trace_domain, - qc_domains, - zeta, - alpha, - &permutation_challenges, - ) + // Verift that the number of quotient chunks matches the expected value for the chip. + if opening.quotient.len() != chip.quotient_width() { + return Err(OpeningShapeError::QuotientWidthMismatch( + chip.quotient_width(), + opening.quotient.len(), + )); + } + // For each quotient chunk, verify that the number of elements is equal to the degree of the + // challenge extension field over the value field. + for slice in &opening.quotient { + if slice.len() != >::D { + return Err(OpeningShapeError::QuotientChunkSizeMismatch( + >::D, + slice.len(), + )); } } - let mut backend = ConstraintCompiler::::default(); - let constraints = backend.emit(builder.operations); - PlonkBn254Prover::test::(constraints.clone(), Witness::default()); + Ok(()) } } diff --git a/crates/recursion/circuit/src/domain.rs b/crates/recursion/circuit/src/domain.rs index 52b96536f2..7c16673615 100644 --- a/crates/recursion/circuit/src/domain.rs +++ b/crates/recursion/circuit/src/domain.rs @@ -1,87 +1,49 @@ -use p3_commit::{LagrangeSelectors, TwoAdicMultiplicativeCoset}; +use p3_commit::{LagrangeSelectors, PolynomialSpace, TwoAdicMultiplicativeCoset}; use p3_field::{AbstractExtensionField, AbstractField, Field, TwoAdicField}; use sp1_recursion_compiler::prelude::*; -use sp1_recursion_program::{commit::PolynomialSpaceVariable, fri::types::FriConfigVariable}; -#[derive(Clone, Copy)] -pub struct TwoAdicMultiplicativeCosetVariable { - pub log_n: usize, - pub size: usize, - pub shift: C::F, - pub g: C::F, -} +/// Reference: [p3_commit::PolynomialSpace] +pub trait PolynomialSpaceVariable: Sized + PolynomialSpace { + fn selectors_at_point_variable( + &self, + builder: &mut Builder, + point: Ext, + ) -> LagrangeSelectors>; -impl TwoAdicMultiplicativeCosetVariable { - pub fn gen(&self, builder: &mut Builder) -> Felt { - builder.eval(self.g) - } + fn zp_at_point_variable( + &self, + builder: &mut Builder, + point: Ext, + ) -> Ext; - pub fn geninv(&self, builder: &mut Builder) -> Felt { - builder.eval(self.g.inverse()) - } + fn next_point_variable( + &self, + builder: &mut Builder, + point: Ext<::F, ::EF>, + ) -> Ext<::F, ::EF>; - pub fn first_point(&self, builder: &mut Builder) -> Felt { - builder.eval(self.shift) - } - pub fn zp_at_point_f( + fn zp_at_point_f( &self, builder: &mut Builder, point: Felt<::F>, - ) -> Felt<::F> { - let unshifted_power = builder - .exp_power_of_2_v::>(point * self.shift.inverse(), Usize::Const(self.log_n)); - builder.eval(unshifted_power - C::F::one()) - } -} - -impl FromConstant for TwoAdicMultiplicativeCosetVariable -where - C::F: TwoAdicField, -{ - type Constant = TwoAdicMultiplicativeCoset; - - fn constant(value: Self::Constant, _: &mut Builder) -> Self { - let g_val = C::F::two_adic_generator(value.log_n); - TwoAdicMultiplicativeCosetVariable:: { - log_n: value.log_n, - size: 1 << value.log_n, - shift: value.shift, - g: g_val, - } - } -} - -pub fn new_coset( - _: &mut Builder, - log_degree: usize, -) -> TwoAdicMultiplicativeCosetVariable -where - C::F: TwoAdicField, -{ - TwoAdicMultiplicativeCosetVariable:: { - log_n: log_degree, - size: 1 << log_degree, - shift: C::F::one(), - g: C::F::two_adic_generator(log_degree), - } + ) -> Felt<::F>; } -impl PolynomialSpaceVariable for TwoAdicMultiplicativeCosetVariable +impl PolynomialSpaceVariable for TwoAdicMultiplicativeCoset where C::F: TwoAdicField, { - type Constant = p3_commit::TwoAdicMultiplicativeCoset; - - fn next_point( + fn next_point_variable( &self, builder: &mut Builder, point: Ext<::F, ::EF>, ) -> Ext<::F, ::EF> { - let g: Felt<_> = builder.eval(self.g); + let g = C::F::two_adic_generator(self.log_n); + // let g: Felt<_> = builder.eval(g); builder.eval(point * g) } - fn selectors_at_point( + fn selectors_at_point_variable( &self, builder: &mut Builder, point: Ext<::F, ::EF>, @@ -91,7 +53,8 @@ where .exp_power_of_2_v::>(unshifted_point, Usize::Const(self.log_n)) - C::EF::one(); let z_h: Ext<_, _> = builder.eval(z_h_expr); - let ginv = self.geninv(builder); + let g = C::F::two_adic_generator(self.log_n); + let ginv = g.inverse(); LagrangeSelectors { is_first_row: builder.eval(z_h / (unshifted_point - C::EF::one())), is_last_row: builder.eval(z_h / (unshifted_point - ginv)), @@ -100,7 +63,7 @@ where } } - fn zp_at_point( + fn zp_at_point_variable( &self, builder: &mut Builder, point: Ext<::F, ::EF>, @@ -114,49 +77,13 @@ where ); builder.eval(unshifted_power - C::EF::one()) } - - fn split_domains( - &self, - _builder: &mut Builder, - _log_num_chunks: impl Into::N>>, - _num_chunks: impl Into::N>>, - ) -> Array { - unimplemented!("Not implemented for a circuit variable") - } - - fn split_domains_const(&self, _: &mut Builder, log_num_chunks: usize) -> Vec { - let num_chunks = 1 << log_num_chunks; - let log_n = self.log_n - log_num_chunks; - let size = 1 << log_n; - - let g = self.g; - - let mut domain_power = C::F::one(); - let mut domains = vec![]; - - for _ in 0..num_chunks { - domains.push(TwoAdicMultiplicativeCosetVariable { - log_n, - size, - shift: self.shift * domain_power, - g, - }); - domain_power *= g; - } - domains - } - - fn create_disjoint_domain( + fn zp_at_point_f( &self, builder: &mut Builder, - log_degree: Usize<::N>, - _: Option>, - ) -> Self { - let mut domain = match log_degree { - Usize::Const(log_degree) => new_coset(builder, log_degree), - _ => unreachable!(), - }; - domain.shift = self.shift * C::F::generator(); - domain + point: Felt<::F>, + ) -> Felt<::F> { + let unshifted_power = builder + .exp_power_of_2_v::>(point * self.shift.inverse(), Usize::Const(self.log_n)); + builder.eval(unshifted_power - C::F::one()) } } diff --git a/crates/recursion/circuit/src/fri.rs b/crates/recursion/circuit/src/fri.rs index e847d5a3b4..099f8bcfe4 100644 --- a/crates/recursion/circuit/src/fri.rs +++ b/crates/recursion/circuit/src/fri.rs @@ -1,47 +1,58 @@ use itertools::{izip, Itertools}; -use p3_bn254_fr::Bn254Fr; +use p3_baby_bear::BabyBear; use p3_commit::PolynomialSpace; use p3_field::{AbstractField, TwoAdicField}; -use p3_fri::{FriConfig, TwoAdicFriPcsProof}; -use p3_matrix::Dimensions; -use p3_util::log2_strict_usize; -use sp1_recursion_compiler::{ - config::OuterConfig, - ir::{Builder, Config, Felt}, - prelude::*, +use p3_fri::{ + BatchOpening, CommitPhaseProofStep, FriConfig, FriProof, QueryProof, TwoAdicFriPcsProof, }; -use sp1_recursion_core::stark::config::{ - OuterChallenge, OuterChallengeMmcs, OuterFriProof, OuterVal, OuterValMmcs, +use p3_symmetric::Hash; +use p3_util::log2_strict_usize; +use sp1_recursion_compiler::ir::{Builder, DslIr, Felt, SymbolicExt}; +use sp1_recursion_core::DIGEST_SIZE; +use sp1_stark::{InnerChallenge, InnerChallengeMmcs, InnerPcsProof, InnerVal}; +use std::{ + cmp::Reverse, + iter::{once, repeat_with, zip}, }; use crate::{ - challenger::MultiField32ChallengerVariable, - mmcs::verify_batch, - types::{ - BatchOpeningVariable, FriChallenges, FriCommitPhaseProofStepVariable, FriProofVariable, - FriQueryProofVariable, OuterDigestVariable, TwoAdicPcsProofVariable, - TwoAdicPcsRoundVariable, - }, - DIGEST_SIZE, + challenger::{CanSampleBitsVariable, FieldChallengerVariable}, + BabyBearFriConfigVariable, CanObserveVariable, CircuitConfig, Ext, FriChallenges, FriMmcs, + FriProofVariable, FriQueryProofVariable, TwoAdicPcsProofVariable, TwoAdicPcsRoundVariable, }; -pub fn verify_shape_and_sample_challenges( +#[derive(Debug, Clone, Copy)] +pub struct PolynomialShape { + pub width: usize, + pub log_degree: usize, +} + +#[derive(Debug, Clone)] + +pub struct PolynomialBatchShape { + pub shapes: Vec, +} + +pub fn verify_shape_and_sample_challenges< + C: CircuitConfig, + SC: BabyBearFriConfigVariable, +>( builder: &mut Builder, - config: &FriConfig, - proof: &FriProofVariable, - challenger: &mut MultiField32ChallengerVariable, + config: &FriConfig>, + proof: &FriProofVariable, + challenger: &mut SC::FriChallengerVariable, ) -> FriChallenges { - let mut betas = vec![]; - - for i in 0..proof.commit_phase_commits.len() { - let commitment: [Var; DIGEST_SIZE] = proof.commit_phase_commits[i]; - challenger.observe_commitment(builder, commitment); - let sample = challenger.sample_ext(builder); - betas.push(sample); - } + let betas = proof + .commit_phase_commits + .iter() + .map(|commitment| { + challenger.observe(builder, *commitment); + challenger.sample_ext(builder) + }) + .collect(); // Observe the final polynomial. - let final_poly_felts = builder.ext2felt_circuit(proof.final_poly); + let final_poly_felts = C::ext2felt(builder, proof.final_poly); final_poly_felts.iter().for_each(|felt| { challenger.observe(builder, *felt); }); @@ -50,27 +61,36 @@ pub fn verify_shape_and_sample_challenges( challenger.check_witness(builder, config.proof_of_work_bits, proof.pow_witness); let log_max_height = proof.commit_phase_commits.len() + config.log_blowup; - let query_indices: Vec> = - (0..config.num_queries).map(|_| challenger.sample_bits(builder, log_max_height)).collect(); + let query_indices: Vec> = + repeat_with(|| challenger.sample_bits(builder, log_max_height)) + .take(config.num_queries) + .collect(); FriChallenges { query_indices, betas } } -pub fn verify_two_adic_pcs( +pub fn verify_two_adic_pcs, SC: BabyBearFriConfigVariable>( builder: &mut Builder, - config: &FriConfig, - proof: &TwoAdicPcsProofVariable, - challenger: &mut MultiField32ChallengerVariable, - rounds: Vec>, + config: &FriConfig>, + proof: &TwoAdicPcsProofVariable, + challenger: &mut SC::FriChallengerVariable, + rounds: Vec>, ) { - builder.cycle_tracker("2adic"); let alpha = challenger.sample_ext(builder); let fri_challenges = - verify_shape_and_sample_challenges(builder, config, &proof.fri_proof, challenger); + verify_shape_and_sample_challenges::(builder, config, &proof.fri_proof, challenger); let log_global_max_height = proof.fri_proof.commit_phase_commits.len() + config.log_blowup; + // Precompute the two-adic powers of the two-adic generator. They can be loaded in as constants. + // The ith element has order 2^(log_global_max_height - i). + let mut precomputed_generator_powers: Vec> = vec![]; + for i in 0..log_global_max_height + 1 { + precomputed_generator_powers + .push(builder.constant(C::F::two_adic_generator(log_global_max_height - i))); + } + // The powers of alpha, where the ith element is alpha^i. let mut alpha_pows: Vec> = vec![builder.eval(SymbolicExt::from_f(C::EF::one()))]; @@ -79,101 +99,145 @@ pub fn verify_two_adic_pcs( .query_openings .iter() .zip(&fri_challenges.query_indices) - .map(|(query_opening, &index)| { + .map(|(query_opening, index_bits)| { + // The powers of alpha, where the ith element is alpha^i. + let mut log_height_pow = [0usize; 32]; let mut ro: [Ext; 32] = [builder.eval(SymbolicExt::from_f(C::EF::zero())); 32]; - // An array of the current power for each log_height. - let mut log_height_pow = [0usize; 32]; - for (batch_opening, round) in izip!(query_opening.clone(), &rounds) { + for (batch_opening, round) in zip(query_opening, rounds.iter().cloned()) { let batch_commit = round.batch_commit; - let mats = &round.mats; + let mats = round.domains_points_and_opens; let batch_heights = mats.iter().map(|mat| mat.domain.size() << config.log_blowup).collect_vec(); - let batch_dims = batch_heights - .iter() - .map(|&height| Dimensions { width: 0, height }) - .collect_vec(); let batch_max_height = batch_heights.iter().max().expect("Empty batch?"); let log_batch_max_height = log2_strict_usize(*batch_max_height); let bits_reduced = log_global_max_height - log_batch_max_height; - let index_bits = builder.num2bits_v_circuit(index, 32); - let reduced_index_bits = index_bits[bits_reduced..].to_vec(); + let reduced_index_bits = &index_bits[bits_reduced..]; - verify_batch::( + verify_batch::( builder, batch_commit, - batch_dims, + &batch_heights, reduced_index_bits, batch_opening.opened_values.clone(), batch_opening.opening_proof.clone(), ); - for (mat_opening, mat) in izip!(batch_opening.opened_values.clone(), mats) { + + for (mat_opening, mat) in izip!(&batch_opening.opened_values, mats) { let mat_domain = mat.domain; - let mat_points = &mat.points; - let mat_values = &mat.values; + let mat_points = mat.points; + let mat_values = mat.values; let log_height = log2_strict_usize(mat_domain.size()) + config.log_blowup; let bits_reduced = log_global_max_height - log_height; - let rev_reduced_index = builder - .reverse_bits_len_circuit(index_bits[bits_reduced..].to_vec(), log_height); + let reduced_index_bits_trunc = + index_bits[bits_reduced..(bits_reduced + log_height)].to_vec(); let g = builder.generator(); - let two_adic_generator: Felt<_> = - builder.eval(C::F::two_adic_generator(log_height)); - let two_adic_generator_exp = - builder.exp_f_bits(two_adic_generator, rev_reduced_index); - let x: Felt<_> = builder.eval(g * two_adic_generator_exp); + let two_adic_generator_exp = C::exp_f_bits_precomputed( + builder, + &reduced_index_bits_trunc.into_iter().rev().collect_vec(), + &precomputed_generator_powers[bits_reduced..], + ); + + // Unroll the following to avoid symbolic expression overhead + // let x: Felt<_> = builder.eval(g * two_adic_generator_exp); + let x: Felt<_> = builder.uninit(); + builder.push_op(DslIr::MulF(x, g, two_adic_generator_exp)); for (z, ps_at_z) in izip!(mat_points, mat_values) { - builder.cycle_tracker("2adic-hotloop"); - let mut acc: Ext = - builder.eval(SymbolicExt::from_f(C::EF::zero())); - for (p_at_x, &p_at_z) in izip!(mat_opening.clone(), ps_at_z) { + // Unroll the loop calculation to avoid symbolic expression overhead + + // let mut acc: Ext = builder.constant(C::EF::zero()); + let mut acc: Ext<_, _> = builder.uninit(); + + builder.push_op(DslIr::ImmE(acc, C::EF::zero())); + for (p_at_x, p_at_z) in izip!(mat_opening.clone(), ps_at_z) { let pow = log_height_pow[log_height]; // Fill in any missing powers of alpha. - (alpha_pows.len()..pow + 1).for_each(|_| { - let new_alpha = builder.eval(*alpha_pows.last().unwrap() * alpha); + for _ in alpha_pows.len()..pow + 1 { + // let new_alpha = builder.eval(*alpha_pows.last().unwrap() * + // alpha); + let new_alpha: Ext<_, _> = builder.uninit(); + builder.push_op(DslIr::MulE( + new_alpha, + *alpha_pows.last().unwrap(), + alpha, + )); builder.reduce_e(new_alpha); alpha_pows.push(new_alpha); - }); - acc = builder.eval(acc + (alpha_pows[pow] * (p_at_z - p_at_x[0]))); + } + // Unroll: + // + // acc = builder.eval(acc + (alpha_pows[pow] * (p_at_z - p_at_x[0]))); + + // let temp_1 = p_at_z - p_at_x[0]; + let temp_1: Ext<_, _> = builder.uninit(); + builder.push_op(DslIr::SubEF(temp_1, p_at_z, p_at_x[0])); + // let temp_2 = alpha_pows[pow] * temp_1; + let temp_2: Ext<_, _> = builder.uninit(); + builder.push_op(DslIr::MulE(temp_2, alpha_pows[pow], temp_1)); + // let temp_3 = acc + temp_2; + let temp_3: Ext<_, _> = builder.uninit(); + builder.push_op(DslIr::AddE(temp_3, acc, temp_2)); + // acc = temp_3; + acc = temp_3; + log_height_pow[log_height] += 1; } - ro[log_height] = builder.eval(ro[log_height] + acc / (*z - x)); - builder.cycle_tracker("2adic-hotloop"); + // Unroll this calculation to avoid symbolic expression overhead + // ro[log_height] = builder.eval(ro[log_height] + acc / (z - x)); + + // let temp_1 = z - x; + let temp_1: Ext<_, _> = builder.uninit(); + builder.push_op(DslIr::SubEF(temp_1, z, x)); + + // let temp_2 = acc / (temp_1); + let temp_2: Ext<_, _> = builder.uninit(); + builder.push_op(DslIr::DivE(temp_2, acc, temp_1)); + + // let temp_3 = rp[log_height] + temp_2; + let temp_3: Ext<_, _> = builder.uninit(); + builder.push_op(DslIr::AddE(temp_3, ro[log_height], temp_2)); + + // ro[log_height] = temp_3; + ro[log_height] = temp_3; } } } ro }) .collect::>(); - builder.cycle_tracker("2adic"); - builder.cycle_tracker("challenges"); - verify_challenges(builder, config, &proof.fri_proof, &fri_challenges, reduced_openings); - builder.cycle_tracker("challenges"); + verify_challenges::( + builder, + config, + proof.fri_proof.clone(), + &fri_challenges, + reduced_openings, + ); } -pub fn verify_challenges( +pub fn verify_challenges, SC: BabyBearFriConfigVariable>( builder: &mut Builder, - config: &FriConfig, - proof: &FriProofVariable, + config: &FriConfig>, + proof: FriProofVariable, challenges: &FriChallenges, reduced_openings: Vec<[Ext; 32]>, ) { let log_max_height = proof.commit_phase_commits.len() + config.log_blowup; - for (&index, query_proof, ro) in - izip!(&challenges.query_indices, &proof.query_proofs, reduced_openings) + for ((index_bits, query_proof), ro) in + challenges.query_indices.iter().zip(proof.query_proofs).zip(reduced_openings) { - let folded_eval = verify_query( + let folded_eval = verify_query::( builder, - proof.commit_phase_commits.clone(), - index, - query_proof.clone(), - challenges.betas.clone(), + &proof.commit_phase_commits, + index_bits, + query_proof, + &challenges.betas, ro, log_max_height, ); @@ -182,267 +246,408 @@ pub fn verify_challenges( } } -pub fn verify_query( +pub fn verify_query, SC: BabyBearFriConfigVariable>( builder: &mut Builder, - commit_phase_commits: Vec>, - index: Var, - proof: FriQueryProofVariable, - betas: Vec>, + commit_phase_commits: &[SC::DigestVariable], + index_bits: &[C::Bit], + proof: FriQueryProofVariable, + betas: &[Ext], reduced_openings: [Ext; 32], log_max_height: usize, ) -> Ext { - let mut folded_eval: Ext = builder.eval(SymbolicExt::from_f(C::EF::zero())); - let two_adic_generator = - builder.eval(SymbolicExt::from_f(C::EF::two_adic_generator(log_max_height))); - let index_bits = builder.num2bits_v_circuit(index, 32); - let rev_reduced_index = builder.reverse_bits_len_circuit(index_bits.clone(), log_max_height); - let mut x = builder.exp_e_bits(two_adic_generator, rev_reduced_index); - builder.reduce_e(x); - - let mut offset = 0; - for (log_folded_height, commit, step, beta) in - izip!((0..log_max_height).rev(), commit_phase_commits, &proof.commit_phase_openings, betas,) - { + let mut folded_eval: Ext<_, _> = builder.constant(C::EF::zero()); + let two_adic_generator: Felt<_> = builder.constant(C::F::two_adic_generator(log_max_height)); + + // TODO: fix expreversebits address bug to avoid needing to allocate a new variable. + let mut x = + C::exp_reverse_bits(builder, two_adic_generator, index_bits[..log_max_height].to_vec()); + // let mut x = builder.uninit(); + // builder.push(DslIr::AddFI(x, x_f, C::F::zero())); + + // let mut x = builder.eval(x + C::F::zero()); + // let mut x: Ext<_, _> = builder.eval(SymbolicExt::one() * SymbolicFelt::from(x_felt)); + + for (offset, log_folded_height, commit, step, beta) in izip!( + 0.., + (0..log_max_height).rev(), + commit_phase_commits, + &proof.commit_phase_openings, + betas, + ) { folded_eval = builder.eval(folded_eval + reduced_openings[log_folded_height + 1]); - let one: Var<_> = builder.eval(C::N::one()); - let index_sibling: Var<_> = builder.eval(one - index_bits.clone()[offset]); + let index_sibling_complement: C::Bit = index_bits[offset]; let index_pair = &index_bits[(offset + 1)..]; - // Reduce folded_eval (mod the BabyBear prime) since it gets used multiple times below and - // the reductions will be repeated. builder.reduce_e(folded_eval); - let evals_ext = [ - builder.select_ef(index_sibling, folded_eval, step.sibling_value), - builder.select_ef(index_sibling, step.sibling_value, folded_eval), - ]; + let evals_ext = C::select_chain_ef( + builder, + index_sibling_complement, + once(folded_eval), + once(step.sibling_value), + ); let evals_felt = vec![ - builder.ext2felt_circuit(evals_ext[0]).to_vec(), - builder.ext2felt_circuit(evals_ext[1]).to_vec(), + C::ext2felt(builder, evals_ext[0]).to_vec(), + C::ext2felt(builder, evals_ext[1]).to_vec(), ]; - let dims = &[Dimensions { width: 2, height: (1 << log_folded_height) }]; - verify_batch::( + let heights = &[1 << log_folded_height]; + verify_batch::( builder, - commit, - dims.to_vec(), - index_pair.to_vec(), + *commit, + heights, + index_pair, [evals_felt].to_vec(), step.opening_proof.clone(), ); - let xs_new = builder.eval(x * C::EF::two_adic_generator(1)); - let xs = [ - builder.select_ef(index_sibling, x, xs_new), - builder.select_ef(index_sibling, xs_new, x), - ]; - folded_eval = builder - .eval(evals_ext[0] + (beta - xs[0]) * (evals_ext[1] - evals_ext[0]) / (xs[1] - xs[0])); - x = builder.eval(x * x); - builder.reduce_e(x); - offset += 1; + let xs_new: Felt<_> = builder.eval(x * C::F::two_adic_generator(1)); + let xs = C::select_chain_f(builder, index_sibling_complement, once(x), once(xs_new)); + + // Unroll the `folded_eval` calculation to avoid symbolic expression overhead. + // folded_eval = builder + // .eval(evals_ext[0] + (beta - xs[0]) * (evals_ext[1] - evals_ext[0]) / (xs[1] - + // xs[0])); x = builder.eval(x * x); + + // let temp_1 = xs[1] - xs[0]; + let temp_1: Felt<_> = builder.uninit(); + builder.push_op(DslIr::SubF(temp_1, xs[1], xs[0])); + + // let temp_2 = evals_ext[1] - evals_ext[0]; + let temp_2: Ext<_, _> = builder.uninit(); + builder.push_op(DslIr::SubE(temp_2, evals_ext[1], evals_ext[0])); + + // let temp_3 = temp_2 / temp_1; + let temp_3: Ext<_, _> = builder.uninit(); + builder.push_op(DslIr::DivEF(temp_3, temp_2, temp_1)); + + // let temp_4 = beta - xs[0]; + let temp_4: Ext<_, _> = builder.uninit(); + builder.push_op(DslIr::SubEF(temp_4, *beta, xs[0])); + + // let temp_5 = temp_4 * temp_3; + let temp_5: Ext<_, _> = builder.uninit(); + builder.push_op(DslIr::MulE(temp_5, temp_4, temp_3)); + + // let temp65 = evals_ext[0] + temp_5; + let temp_6: Ext<_, _> = builder.uninit(); + builder.push_op(DslIr::AddE(temp_6, evals_ext[0], temp_5)); + // folded_eval = temp_6; + folded_eval = temp_6; + + // let temp_7 = x * x; + let temp_7: Felt<_> = builder.uninit(); + builder.push_op(DslIr::MulF(temp_7, x, x)); + // x = temp_7; + x = temp_7; } folded_eval } -pub fn const_fri_proof( - builder: &mut Builder, - fri_proof: OuterFriProof, -) -> FriProofVariable { - // Set the commit phase commits. - let commit_phase_commits = fri_proof - .commit_phase_commits - .iter() - .map(|commit| { - let commit: [Bn254Fr; DIGEST_SIZE] = (*commit).into(); - let commit: Var<_> = builder.eval(commit[0]); - [commit; DIGEST_SIZE] - }) + +pub fn verify_batch, SC: BabyBearFriConfigVariable>( + builder: &mut Builder, + commit: SC::DigestVariable, + heights: &[usize], + index_bits: &[C::Bit], + opened_values: Vec>>>, + proof: Vec, +) { + let mut heights_tallest_first = + heights.iter().enumerate().sorted_by_key(|(_, height)| Reverse(*height)).peekable(); + + let mut curr_height_padded = heights_tallest_first.peek().unwrap().1.next_power_of_two(); + + let ext_slice: Vec>> = heights_tallest_first + .peeking_take_while(|(_, height)| height.next_power_of_two() == curr_height_padded) + .flat_map(|(i, _)| opened_values[i].as_slice()) + .cloned() .collect::>(); + let felt_slice: Vec> = ext_slice.into_iter().flatten().collect::>(); + let mut root: SC::DigestVariable = SC::hash(builder, &felt_slice[..]); - // Set the query proofs. - let query_proofs = fri_proof - .query_proofs - .iter() - .map(|query_proof| { - let commit_phase_openings = query_proof - .commit_phase_openings - .iter() - .map(|commit_phase_opening| { - let sibling_value = - builder.eval(SymbolicExt::from_f(commit_phase_opening.sibling_value)); - let opening_proof = commit_phase_opening - .opening_proof - .iter() - .map(|sibling| { - let commit: Var<_> = builder.eval(sibling[0]); - [commit; DIGEST_SIZE] - }) - .collect::>(); - FriCommitPhaseProofStepVariable { sibling_value, opening_proof } - }) + zip(index_bits.iter(), proof).for_each(|(&bit, sibling): (&C::Bit, SC::DigestVariable)| { + let compress_args = SC::select_chain_digest(builder, bit, [root, sibling]); + + root = SC::compress(builder, compress_args); + curr_height_padded >>= 1; + + let next_height = heights_tallest_first + .peek() + .map(|(_, height)| *height) + .filter(|h| h.next_power_of_two() == curr_height_padded); + + if let Some(next_height) = next_height { + let ext_slice: Vec>> = heights_tallest_first + .peeking_take_while(|(_, height)| *height == next_height) + .flat_map(|(i, _)| opened_values[i].clone()) .collect::>(); - FriQueryProofVariable { commit_phase_openings } - }) - .collect::>(); + let felt_slice: Vec> = ext_slice.into_iter().flatten().collect::>(); + let next_height_openings_digest = SC::hash(builder, &felt_slice); + root = SC::compress(builder, [root, next_height_openings_digest]); + } + }); - // Initialize the FRI proof variable. - FriProofVariable { - commit_phase_commits, - query_proofs, - final_poly: builder.eval(SymbolicExt::from_f(fri_proof.final_poly)), - pow_witness: builder.eval(fri_proof.pow_witness), + SC::assert_digest_eq(builder, root, commit); +} + +pub fn dummy_hash() -> Hash { + [BabyBear::zero(); DIGEST_SIZE].into() +} + +pub fn dummy_query_proof( + height: usize, + log_blowup: usize, +) -> QueryProof { + QueryProof { + commit_phase_openings: (0..height) + .map(|i| CommitPhaseProofStep { + sibling_value: InnerChallenge::zero(), + opening_proof: vec![dummy_hash().into(); height - i + log_blowup - 1], + }) + .collect(), } } -pub fn const_two_adic_pcs_proof( - builder: &mut Builder, - proof: TwoAdicFriPcsProof, -) -> TwoAdicPcsProofVariable { - let fri_proof = const_fri_proof(builder, proof.fri_proof); - let query_openings = proof - .query_openings +/// Make a dummy PCS proof for a given proof shape. Used to generate vkey information for fixed proof +/// shapes. +/// +/// The parameter `batch_shapes` contains (width, height) data for each matrix in each batch. +pub fn dummy_pcs_proof( + fri_queries: usize, + batch_shapes: &[PolynomialBatchShape], + log_blowup: usize, +) -> InnerPcsProof { + let max_height = batch_shapes .iter() - .map(|query_opening| { - query_opening + .map(|shape| shape.shapes.iter().map(|shape| shape.log_degree).max().unwrap()) + .max() + .unwrap(); + let fri_proof = FriProof { + commit_phase_commits: vec![dummy_hash(); max_height], + query_proofs: vec![dummy_query_proof(max_height, log_blowup); fri_queries], + final_poly: InnerChallenge::zero(), + pow_witness: InnerVal::zero(), + }; + + // For each query, create a dummy batch opening for each matrix in the batch. `batch_shapes` + // determines the sizes of each dummy batch opening. + let query_openings = (0..fri_queries) + .map(|_| { + batch_shapes .iter() - .map(|opening| BatchOpeningVariable { - opened_values: opening - .opened_values - .iter() - .map(|opened_value| { - opened_value - .iter() - .map(|value| vec![builder.eval::, _>(*value)]) - .collect::>() - }) - .collect::>(), - opening_proof: opening - .opening_proof - .iter() - .map(|opening_proof| [builder.eval(opening_proof[0])]) - .collect::>(), + .map(|shapes| { + let batch_max_height = + shapes.shapes.iter().map(|shape| shape.log_degree).max().unwrap(); + BatchOpening { + opened_values: shapes + .shapes + .iter() + .map(|shape| vec![BabyBear::zero(); shape.width]) + .collect(), + opening_proof: vec![dummy_hash().into(); batch_max_height + log_blowup], + } }) .collect::>() }) .collect::>(); - TwoAdicPcsProofVariable { fri_proof, query_openings } + TwoAdicFriPcsProof { fri_proof, query_openings } } -#[cfg(test)] -pub mod tests { - use p3_bn254_fr::Bn254Fr; +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + challenger::DuplexChallengerVariable, + utils::tests::run_test_recursion, + witness::{WitnessBlock, Witnessable}, + FriCommitPhaseProofStepVariable, FriProofVariable, FriQueryProofVariable, + TwoAdicPcsMatsVariable, + }; use p3_challenger::{CanObserve, CanSample, FieldChallenger}; - use p3_commit::{Pcs, TwoAdicMultiplicativeCoset}; + use p3_commit::Pcs; use p3_field::AbstractField; use p3_fri::verifier; use p3_matrix::dense::RowMajorMatrix; - use rand::rngs::OsRng; + use rand::{ + rngs::{OsRng, StdRng}, + SeedableRng, + }; use sp1_recursion_compiler::{ - config::OuterConfig, - constraints::ConstraintCompiler, - ir::{Builder, Ext, SymbolicExt, Var, Witness}, + circuit::AsmBuilder, + config::InnerConfig, + ir::{Builder, Ext, SymbolicExt}, }; - use sp1_recursion_core::stark::config::{ - outer_perm, test_fri_config, OuterChallenge, OuterChallenger, OuterCompress, OuterDft, - OuterHash, OuterPcs, OuterVal, OuterValMmcs, + use sp1_stark::{ + baby_bear_poseidon2::BabyBearPoseidon2, inner_fri_config, inner_perm, InnerChallenge, + InnerChallenger, InnerCompress, InnerDft, InnerFriProof, InnerHash, InnerPcs, InnerVal, + InnerValMmcs, StarkGenericConfig, }; - use sp1_recursion_gnark_ffi::PlonkBn254Prover; - use super::{verify_shape_and_sample_challenges, verify_two_adic_pcs, TwoAdicPcsRoundVariable}; - use crate::{ - challenger::MultiField32ChallengerVariable, - types::{OuterDigestVariable, TwoAdicPcsMatsVariable}, - DIGEST_SIZE, - }; + use sp1_recursion_core::DIGEST_SIZE; - pub fn const_two_adic_pcs_rounds( - builder: &mut Builder, - commit: [Bn254Fr; DIGEST_SIZE], - os: Vec<(TwoAdicMultiplicativeCoset, Vec<(OuterChallenge, Vec)>)>, - ) -> (OuterDigestVariable, Vec>) { - let commit: OuterDigestVariable = [builder.eval(commit[0])]; - - let mut mats = Vec::new(); - for (domain, poly) in os.into_iter() { - let points: Vec> = - poly.iter().map(|(p, _)| builder.eval(SymbolicExt::from_f(*p))).collect::>(); - let values: Vec>> = poly - .iter() - .map(|(_, v)| { - v.clone() - .iter() - .map(|t| builder.eval(SymbolicExt::from_f(*t))) - .collect::>() - }) - .collect::>(); - let mat = TwoAdicPcsMatsVariable { domain, points, values }; - mats.push(mat); + type C = InnerConfig; + type SC = BabyBearPoseidon2; + type F = ::Val; + type EF = ::Challenge; + + pub fn const_fri_proof( + builder: &mut AsmBuilder, + fri_proof: InnerFriProof, + ) -> FriProofVariable { + // Set the commit phase commits. + let commit_phase_commits = fri_proof + .commit_phase_commits + .iter() + .map(|commit| { + let commit: [F; DIGEST_SIZE] = (*commit).into(); + commit.map(|x| builder.eval(x)) + }) + .collect::>(); + + // Set the query proofs. + let query_proofs = fri_proof + .query_proofs + .iter() + .map(|query_proof| { + let commit_phase_openings = query_proof + .commit_phase_openings + .iter() + .map(|commit_phase_opening| { + let sibling_value = + builder.eval(SymbolicExt::from_f(commit_phase_opening.sibling_value)); + let opening_proof = commit_phase_opening + .opening_proof + .iter() + .map(|sibling| sibling.map(|x| builder.eval(x))) + .collect::>(); + FriCommitPhaseProofStepVariable { sibling_value, opening_proof } + }) + .collect::>(); + FriQueryProofVariable { commit_phase_openings } + }) + .collect::>(); + + // Initialize the FRI proof variable. + FriProofVariable { + commit_phase_commits, + query_proofs, + final_poly: builder.eval(SymbolicExt::from_f(fri_proof.final_poly)), + pow_witness: builder.eval(fri_proof.pow_witness), } + } - (commit, vec![TwoAdicPcsRoundVariable { batch_commit: commit, mats }]) + /// Reference: https://github.com/Plonky3/Plonky3/blob/4809fa7bedd9ba8f6f5d3267b1592618e3776c57/merkle-tree/src/mmcs.rs#L421 + #[test] + fn size_gaps() { + use p3_commit::Mmcs; + let perm = inner_perm(); + let hash = InnerHash::new(perm.clone()); + let compress = InnerCompress::new(perm); + let mmcs = InnerValMmcs::new(hash, compress); + + let mut builder = Builder::::default(); + + // 4 mats with 1000 rows, 8 columns + let large_mats = (0..4).map(|_| RowMajorMatrix::::rand(&mut OsRng, 1000, 8)); + let large_mat_heights = (0..4).map(|_| 1000); + + // 5 mats with 70 rows, 8 columns + let medium_mats = (0..5).map(|_| RowMajorMatrix::::rand(&mut OsRng, 70, 8)); + let medium_mat_heights = (0..5).map(|_| 70); + + // 6 mats with 8 rows, 8 columns + let small_mats = (0..6).map(|_| RowMajorMatrix::::rand(&mut OsRng, 8, 8)); + let small_mat_heights = (0..6).map(|_| 8); + + let (commit, prover_data) = + mmcs.commit(large_mats.chain(medium_mats).chain(small_mats).collect_vec()); + + let commit: [_; DIGEST_SIZE] = commit.into(); + let commit = commit.map(|x| builder.eval(x)); + // open the 6th row of each matrix and verify + let (opened_values, proof) = mmcs.open_batch(6, &prover_data); + let opened_values = opened_values + .into_iter() + .map(|x| x.into_iter().map(|y| vec![builder.eval::, _>(y)]).collect()) + .collect(); + let index = builder.eval(F::from_canonical_u32(6)); + let index_bits = C::num2bits(&mut builder, index, 31); + let proof = proof.into_iter().map(|p| p.map(|x| builder.eval(x))).collect(); + verify_batch::<_, SC>( + &mut builder, + commit, + &large_mat_heights.chain(medium_mat_heights).chain(small_mat_heights).collect_vec(), + &index_bits, + opened_values, + proof, + ); } #[test] fn test_fri_verify_shape_and_sample_challenges() { let mut rng = &mut OsRng; let log_degrees = &[16, 9, 7, 4, 2]; - let perm = outer_perm(); - let fri_config = test_fri_config(); - let hash = OuterHash::new(perm.clone()).unwrap(); - let compress = OuterCompress::new(perm.clone()); - let val_mmcs = OuterValMmcs::new(hash, compress); - let dft = OuterDft {}; - let pcs: OuterPcs = - OuterPcs::new(log_degrees.iter().copied().max().unwrap(), dft, val_mmcs, fri_config); + let perm = inner_perm(); + let fri_config = inner_fri_config(); + let hash = InnerHash::new(perm.clone()); + let compress = InnerCompress::new(perm.clone()); + let val_mmcs = InnerValMmcs::new(hash, compress); + let dft = InnerDft {}; + let pcs: InnerPcs = + InnerPcs::new(log_degrees.iter().copied().max().unwrap(), dft, val_mmcs, fri_config); // Generate proof. let domains_and_polys = log_degrees .iter() .map(|&d| { ( - >::natural_domain_for_degree( + >::natural_domain_for_degree( &pcs, 1 << d, ), - RowMajorMatrix::::rand(&mut rng, 1 << d, 10), + RowMajorMatrix::::rand(&mut rng, 1 << d, 10), ) }) .collect::>(); - let (commit, data) = >::commit( + let (commit, data) = >::commit( &pcs, domains_and_polys.clone(), ); - let mut challenger = OuterChallenger::new(perm.clone()).unwrap(); + let mut challenger = InnerChallenger::new(perm.clone()); challenger.observe(commit); - let zeta = challenger.sample_ext_element::(); - let points = domains_and_polys.iter().map(|_| vec![zeta]).collect::>(); + let zeta = challenger.sample_ext_element::(); + let points = repeat_with(|| vec![zeta]).take(domains_and_polys.len()).collect::>(); let (_, proof) = pcs.open(vec![(&data, points)], &mut challenger); // Verify proof. - let mut challenger = OuterChallenger::new(perm.clone()).unwrap(); + let mut challenger = InnerChallenger::new(perm.clone()); challenger.observe(commit); - let _: OuterChallenge = challenger.sample(); + let _: InnerChallenge = challenger.sample(); let fri_challenges_gt = verifier::verify_shape_and_sample_challenges( - &test_fri_config(), + &inner_fri_config(), &proof.fri_proof, &mut challenger, ) .unwrap(); // Define circuit. - let mut builder = Builder::::default(); - let config = test_fri_config(); - let fri_proof = super::const_fri_proof(&mut builder, proof.fri_proof); - - let mut challenger = MultiField32ChallengerVariable::new(&mut builder); - let commit: [Bn254Fr; DIGEST_SIZE] = commit.into(); - let commit: Var<_> = builder.eval(commit[0]); - challenger.observe_commitment(&mut builder, [commit]); + let mut builder = Builder::::default(); + let config = inner_fri_config(); + let fri_proof = const_fri_proof(&mut builder, proof.fri_proof); + + let mut challenger = DuplexChallengerVariable::new(&mut builder); + let commit: [_; DIGEST_SIZE] = commit.into(); + let commit: [Felt; DIGEST_SIZE] = commit.map(|x| builder.eval(x)); + challenger.observe_slice(&mut builder, commit); let _ = challenger.sample_ext(&mut builder); - let fri_challenges = - verify_shape_and_sample_challenges(&mut builder, &config, &fri_proof, &mut challenger); + let fri_challenges = verify_shape_and_sample_challenges::( + &mut builder, + &config, + &fri_proof, + &mut challenger, + ); for i in 0..fri_challenges_gt.betas.len() { builder.assert_ext_eq( @@ -452,79 +657,147 @@ pub mod tests { } for i in 0..fri_challenges_gt.query_indices.len() { - builder.assert_var_eq( - Bn254Fr::from_canonical_usize(fri_challenges_gt.query_indices[i]), - fri_challenges.query_indices[i], + let query_indices = + C::bits2num(&mut builder, fri_challenges.query_indices[i].iter().cloned()); + builder.assert_felt_eq( + F::from_canonical_usize(fri_challenges_gt.query_indices[i]), + query_indices, ); } - let mut backend = ConstraintCompiler::::default(); - let constraints = backend.emit(builder.operations); - PlonkBn254Prover::test::(constraints.clone(), Witness::default()); + run_test_recursion(builder.into_operations(), None); } #[test] - fn test_verify_two_adic_pcs() { - let mut rng = &mut OsRng; + fn test_verify_two_adic_pcs_inner() { + let mut rng = StdRng::seed_from_u64(0xDEADBEEF); let log_degrees = &[19, 19]; - let perm = outer_perm(); - let fri_config = test_fri_config(); - let hash = OuterHash::new(perm.clone()).unwrap(); - let compress = OuterCompress::new(perm.clone()); - let val_mmcs = OuterValMmcs::new(hash, compress); - let dft = OuterDft {}; - let pcs: OuterPcs = - OuterPcs::new(log_degrees.iter().copied().max().unwrap(), dft, val_mmcs, fri_config); + let perm = inner_perm(); + let fri_config = inner_fri_config(); + let hash = InnerHash::new(perm.clone()); + let compress = InnerCompress::new(perm.clone()); + let val_mmcs = InnerValMmcs::new(hash, compress); + let dft = InnerDft {}; + let pcs: InnerPcs = + InnerPcs::new(log_degrees.iter().copied().max().unwrap(), dft, val_mmcs, fri_config); // Generate proof. let domains_and_polys = log_degrees .iter() .map(|&d| { ( - >::natural_domain_for_degree( + >::natural_domain_for_degree( &pcs, 1 << d, ), - RowMajorMatrix::::rand(&mut rng, 1 << d, 100), + RowMajorMatrix::::rand(&mut rng, 1 << d, 100), ) }) .collect::>(); - let (commit, data) = >::commit( + let (commit, data) = >::commit( &pcs, domains_and_polys.clone(), ); - let mut challenger = OuterChallenger::new(perm.clone()).unwrap(); + let mut challenger = InnerChallenger::new(perm.clone()); challenger.observe(commit); - let zeta = challenger.sample_ext_element::(); + let zeta = challenger.sample_ext_element::(); let points = domains_and_polys.iter().map(|_| vec![zeta]).collect::>(); let (opening, proof) = pcs.open(vec![(&data, points)], &mut challenger); // Verify proof. - let mut challenger = OuterChallenger::new(perm.clone()).unwrap(); + let mut challenger = InnerChallenger::new(perm.clone()); challenger.observe(commit); - challenger.sample_ext_element::(); - let os: Vec<( - TwoAdicMultiplicativeCoset, - Vec<(OuterChallenge, Vec)>, - )> = domains_and_polys + let x1 = challenger.sample_ext_element::(); + let os = domains_and_polys .iter() .zip(&opening[0]) .map(|((domain, _), mat_openings)| (*domain, vec![(zeta, mat_openings[0].clone())])) - .collect(); + .collect::>(); pcs.verify(vec![(commit, os.clone())], &proof, &mut challenger).unwrap(); + let batch_shapes = vec![PolynomialBatchShape { + shapes: log_degrees + .iter() + .copied() + .map(|d| PolynomialShape { width: 100, log_degree: d }) + .collect(), + }]; + + let dummy_proof = dummy_pcs_proof( + inner_fri_config().num_queries, + &batch_shapes, + inner_fri_config().log_blowup, + ); + + let dummy_commit = dummy_hash(); + let dummy_openings = os + .iter() + .map(|(domain, points_and_openings)| { + ( + *domain, + points_and_openings + .iter() + .map(|(_, row)| { + ( + InnerChallenge::zero(), + row.iter().map(|_| InnerChallenge::zero()).collect_vec(), + ) + }) + .collect_vec(), + ) + }) + .collect::>(); + // Define circuit. - let mut builder = Builder::::default(); - let config = test_fri_config(); - let proof = super::const_two_adic_pcs_proof(&mut builder, proof); - let (commit, rounds) = const_two_adic_pcs_rounds(&mut builder, commit.into(), os); - let mut challenger = MultiField32ChallengerVariable::new(&mut builder); - challenger.observe_commitment(&mut builder, commit); - challenger.sample_ext(&mut builder); - verify_two_adic_pcs(&mut builder, &config, &proof, &mut challenger, rounds); - - let mut backend = ConstraintCompiler::::default(); - let constraints = backend.emit(builder.operations); - PlonkBn254Prover::test::(constraints.clone(), Witness::default()); + let mut builder = Builder::::default(); + let config = inner_fri_config(); + + let proof_variable = dummy_proof.read(&mut builder); + let commit_variable = dummy_commit.read(&mut builder); + + let domains_points_and_opens = dummy_openings + .into_iter() + .map(|(domain, points_and_opens)| { + let mut points = vec![]; + let mut opens = vec![]; + for (point, opening_for_point) in points_and_opens { + points.push(InnerChallenge::read(&point, &mut builder)); + opens.push(Vec::::read(&opening_for_point, &mut builder)); + } + TwoAdicPcsMatsVariable { domain, points, values: opens } + }) + .collect::>(); + + let rounds = vec![TwoAdicPcsRoundVariable { + batch_commit: commit_variable, + domains_points_and_opens, + }]; + // let proof = const_two_adic_pcs_proof(&mut builder, proof); + // let (commit, rounds) = const_two_adic_pcs_rounds(&mut builder, commit.into(), os); + let mut challenger = DuplexChallengerVariable::new(&mut builder); + challenger.observe_slice(&mut builder, commit_variable); + let x2 = challenger.sample_ext(&mut builder); + let x1: Ext<_, _> = builder.constant(x1); + builder.assert_ext_eq(x1, x2); + verify_two_adic_pcs::<_, BabyBearPoseidon2>( + &mut builder, + &config, + &proof_variable, + &mut challenger, + rounds, + ); + + let mut witness_stream = Vec::>::new(); + Witnessable::::write(&proof, &mut witness_stream); + Witnessable::::write(&commit, &mut witness_stream); + for opening in os { + let (_, points_and_opens) = opening; + for (point, opening_for_point) in points_and_opens { + Witnessable::::write(&point, &mut witness_stream); + Witnessable::::write(&opening_for_point, &mut witness_stream); + } + } + + run_test_recursion(builder.into_operations(), witness_stream); } } diff --git a/crates/recursion/circuit/src/hash.rs b/crates/recursion/circuit/src/hash.rs new file mode 100644 index 0000000000..8089f81a47 --- /dev/null +++ b/crates/recursion/circuit/src/hash.rs @@ -0,0 +1,225 @@ +use std::fmt::Debug; +use std::iter::{repeat, zip}; + +use itertools::Itertools; +use p3_baby_bear::BabyBear; +use p3_field::{AbstractField, Field}; + +use p3_bn254_fr::Bn254Fr; +use p3_symmetric::Permutation; +use sp1_recursion_compiler::{ + circuit::CircuitV2Builder, + ir::{Builder, Config, DslIr, Felt, Var}, +}; +use sp1_recursion_core::stark::{outer_perm, OUTER_MULTI_FIELD_CHALLENGER_WIDTH}; +use sp1_recursion_core::{stark::BabyBearPoseidon2Outer, DIGEST_SIZE}; +use sp1_recursion_core::{HASH_RATE, PERMUTATION_WIDTH}; +use sp1_stark::baby_bear_poseidon2::BabyBearPoseidon2; +use sp1_stark::inner_perm; + +use crate::{ + challenger::{reduce_32, POSEIDON_2_BB_RATE}, + select_chain, CircuitConfig, +}; + +pub trait FieldHasher { + type Digest: Copy + Default + Eq + Ord + Copy + Debug + Send + Sync; + + fn constant_compress(input: [Self::Digest; 2]) -> Self::Digest; +} + +pub trait Posedion2BabyBearHasherVariable { + fn poseidon2_permute( + builder: &mut Builder, + state: [Felt; PERMUTATION_WIDTH], + ) -> [Felt; PERMUTATION_WIDTH]; + + /// Applies the Poseidon2 hash function to the given array. + /// + /// Reference: [p3_symmetric::PaddingFreeSponge] + fn poseidon2_hash(builder: &mut Builder, input: &[Felt]) -> [Felt; DIGEST_SIZE] { + // static_assert(RATE < WIDTH) + let mut state = core::array::from_fn(|_| builder.eval(C::F::zero())); + for input_chunk in input.chunks(HASH_RATE) { + state[..input_chunk.len()].copy_from_slice(input_chunk); + state = Self::poseidon2_permute(builder, state); + } + let digest: [Felt; DIGEST_SIZE] = state[..DIGEST_SIZE].try_into().unwrap(); + digest + } +} + +pub trait FieldHasherVariable: FieldHasher { + type DigestVariable: Clone + Copy; + + fn hash(builder: &mut Builder, input: &[Felt]) -> Self::DigestVariable; + + fn compress(builder: &mut Builder, input: [Self::DigestVariable; 2]) + -> Self::DigestVariable; + + fn assert_digest_eq(builder: &mut Builder, a: Self::DigestVariable, b: Self::DigestVariable); + + // Encountered many issues trying to make the following two parametrically polymorphic. + fn select_chain_digest( + builder: &mut Builder, + should_swap: C::Bit, + input: [Self::DigestVariable; 2], + ) -> [Self::DigestVariable; 2]; + + fn print_digest(builder: &mut Builder, digest: Self::DigestVariable); +} + +impl FieldHasher for BabyBearPoseidon2 { + type Digest = [BabyBear; DIGEST_SIZE]; + + fn constant_compress(input: [Self::Digest; 2]) -> Self::Digest { + let mut pre_iter = input.into_iter().flatten().chain(repeat(BabyBear::zero())); + let mut pre = core::array::from_fn(move |_| pre_iter.next().unwrap()); + (inner_perm()).permute_mut(&mut pre); + pre[..DIGEST_SIZE].try_into().unwrap() + } +} + +impl> Posedion2BabyBearHasherVariable for BabyBearPoseidon2 { + fn poseidon2_permute( + builder: &mut Builder, + input: [Felt<::F>; PERMUTATION_WIDTH], + ) -> [Felt<::F>; PERMUTATION_WIDTH] { + builder.poseidon2_permute_v2(input) + } +} + +impl Posedion2BabyBearHasherVariable for BabyBearPoseidon2Outer { + fn poseidon2_permute( + builder: &mut Builder, + state: [Felt<::F>; PERMUTATION_WIDTH], + ) -> [Felt<::F>; PERMUTATION_WIDTH] { + let state: [Felt<_>; PERMUTATION_WIDTH] = state.map(|x| builder.eval(x)); + builder.push_op(DslIr::CircuitPoseidon2PermuteBabyBear(Box::new(state))); + state + } +} + +impl>> FieldHasherVariable + for BabyBearPoseidon2 +{ + type DigestVariable = [Felt; DIGEST_SIZE]; + + fn hash(builder: &mut Builder, input: &[Felt<::F>]) -> Self::DigestVariable { + >::poseidon2_hash(builder, input) + } + + fn compress( + builder: &mut Builder, + input: [Self::DigestVariable; 2], + ) -> Self::DigestVariable { + builder.poseidon2_compress_v2(input.into_iter().flatten()) + } + + fn assert_digest_eq( + builder: &mut Builder, + a: Self::DigestVariable, + b: Self::DigestVariable, + ) { + zip(a, b).for_each(|(e1, e2)| builder.assert_felt_eq(e1, e2)); + } + + fn select_chain_digest( + builder: &mut Builder, + should_swap: ::Bit, + input: [Self::DigestVariable; 2], + ) -> [Self::DigestVariable; 2] { + let err_msg = "select_chain's return value should have length the sum of its inputs"; + let mut selected = select_chain(builder, should_swap, input[0], input[1]); + let ret = [ + core::array::from_fn(|_| selected.next().expect(err_msg)), + core::array::from_fn(|_| selected.next().expect(err_msg)), + ]; + assert_eq!(selected.next(), None, "{}", err_msg); + ret + } + + fn print_digest(builder: &mut Builder, digest: Self::DigestVariable) { + for d in digest.iter() { + builder.print_f(*d); + } + } +} + +pub const BN254_DIGEST_SIZE: usize = 1; + +impl FieldHasher for BabyBearPoseidon2Outer { + type Digest = [Bn254Fr; BN254_DIGEST_SIZE]; + + fn constant_compress(input: [Self::Digest; 2]) -> Self::Digest { + let mut state = [input[0][0], input[1][0], Bn254Fr::zero()]; + outer_perm().permute_mut(&mut state); + [state[0]; BN254_DIGEST_SIZE] + } +} + +impl>> FieldHasherVariable + for BabyBearPoseidon2Outer +{ + type DigestVariable = [Var; BN254_DIGEST_SIZE]; + + fn hash(builder: &mut Builder, input: &[Felt<::F>]) -> Self::DigestVariable { + assert!(C::N::bits() == p3_bn254_fr::Bn254Fr::bits()); + assert!(C::F::bits() == p3_baby_bear::BabyBear::bits()); + let num_f_elms = C::N::bits() / C::F::bits(); + let mut state: [Var; OUTER_MULTI_FIELD_CHALLENGER_WIDTH] = + [builder.eval(C::N::zero()), builder.eval(C::N::zero()), builder.eval(C::N::zero())]; + for block_chunk in &input.iter().chunks(POSEIDON_2_BB_RATE) { + for (chunk_id, chunk) in (&block_chunk.chunks(num_f_elms)).into_iter().enumerate() { + let chunk = chunk.copied().collect::>(); + state[chunk_id] = reduce_32(builder, chunk.as_slice()); + } + builder.push_op(DslIr::CircuitPoseidon2Permute(state)) + } + + [state[0]; BN254_DIGEST_SIZE] + } + + fn compress( + builder: &mut Builder, + input: [Self::DigestVariable; 2], + ) -> Self::DigestVariable { + let state: [Var; OUTER_MULTI_FIELD_CHALLENGER_WIDTH] = + [builder.eval(input[0][0]), builder.eval(input[1][0]), builder.eval(C::N::zero())]; + builder.push_op(DslIr::CircuitPoseidon2Permute(state)); + [state[0]; BN254_DIGEST_SIZE] + } + + fn assert_digest_eq( + builder: &mut Builder, + a: Self::DigestVariable, + b: Self::DigestVariable, + ) { + zip(a, b).for_each(|(e1, e2)| builder.assert_var_eq(e1, e2)); + } + + fn select_chain_digest( + builder: &mut Builder, + should_swap: ::Bit, + input: [Self::DigestVariable; 2], + ) -> [Self::DigestVariable; 2] { + let result0: [Var<_>; BN254_DIGEST_SIZE] = core::array::from_fn(|j| { + let result = builder.uninit(); + builder.push_op(DslIr::CircuitSelectV(should_swap, input[1][j], input[0][j], result)); + result + }); + let result1: [Var<_>; BN254_DIGEST_SIZE] = core::array::from_fn(|j| { + let result = builder.uninit(); + builder.push_op(DslIr::CircuitSelectV(should_swap, input[0][j], input[1][j], result)); + result + }); + + [result0, result1] + } + + fn print_digest(builder: &mut Builder, digest: Self::DigestVariable) { + for d in digest.iter() { + builder.print_v(*d); + } + } +} diff --git a/crates/recursion/circuit/src/lib.rs b/crates/recursion/circuit/src/lib.rs index bfc01386a5..dc6c59cda1 100644 --- a/crates/recursion/circuit/src/lib.rs +++ b/crates/recursion/circuit/src/lib.rs @@ -1,181 +1,615 @@ -#![allow(clippy::type_complexity)] -#![allow(clippy::too_many_arguments)] -#![allow(clippy::needless_range_loop)] -#![allow(clippy::explicit_counter_loop)] -#![allow(type_alias_bounds)] +//! Copied from [`sp1_recursion_program`]. + +use std::{ + iter::{repeat, zip}, + ops::{Add, Mul}, +}; + +use challenger::{ + CanCopyChallenger, CanObserveVariable, DuplexChallengerVariable, FieldChallengerVariable, + MultiField32ChallengerVariable, SpongeChallengerShape, +}; +use hash::{FieldHasherVariable, Posedion2BabyBearHasherVariable}; +use p3_bn254_fr::Bn254Fr; +use p3_field::AbstractField; +use p3_matrix::dense::RowMajorMatrix; +use sp1_recursion_compiler::{ + circuit::CircuitV2Builder, + config::{InnerConfig, OuterConfig}, + ir::{Builder, Config, DslIr, Ext, Felt, SymbolicFelt, Var, Variable}, +}; + +mod types; pub mod challenger; pub mod constraints; pub mod domain; pub mod fri; -pub mod mmcs; -pub mod poseidon2; +pub mod hash; +pub mod machine; +pub mod merkle_tree; pub mod stark; -pub mod types; -pub mod utils; +pub(crate) mod utils; pub mod witness; -pub const SPONGE_SIZE: usize = 3; -pub const DIGEST_SIZE: usize = 1; -pub const RATE: usize = 16; - -#[cfg(test)] -mod tests { - use p3_baby_bear::BabyBear; - use p3_bn254_fr::Bn254Fr; - use p3_field::AbstractField; - use sp1_recursion_compiler::{ - config::OuterConfig, - constraints::ConstraintCompiler, - ir::{Builder, Config, Ext, ExtConst, Felt, Witness}, - }; - use sp1_recursion_gnark_ffi::PlonkBn254Prover; - - #[test] - fn test_base_babybear() { - let mut builder = Builder::::default(); - let a_val = BabyBear::from_wrapped_u32(3124235823); - let b_val = BabyBear::from_wrapped_u32(3252375321); - let a: Felt<_> = builder.eval(a_val); - let b: Felt<_> = builder.eval(b_val); - - // Testing base addition. - let a_plus_b: Felt<_> = builder.eval(a + b); - builder.assert_felt_eq(a_plus_b, a_val + b_val); - - // Testing base subtraction. - let a_minus_b: Felt<_> = builder.eval(a - b); - builder.assert_felt_eq(a_minus_b, a_val - b_val); - - // Testing base multiplication. - let a_times_b: Felt<_> = builder.eval(a * b); - builder.assert_felt_eq(a_times_b, a_val * b_val); - - // Testing large linear combination. - let dot_product: Felt<_> = builder.eval(a * a + b * b + a * b); - builder.assert_felt_eq(dot_product, a_val * a_val + b_val * b_val + a_val * b_val); - - // Testing high degree multiplication. - let a_times_b_times_c: Felt<_> = - builder.eval(a_val * b_val * a_val * b_val * a_val * b_val); - builder.assert_felt_eq(a_times_b_times_c, a_val * b_val * a_val * b_val * a_val * b_val); - - let mut backend = ConstraintCompiler::::default(); - let constraints = backend.emit(builder.operations); - - let witness = Witness::default(); - PlonkBn254Prover::test::(constraints.clone(), witness); - } - - #[test] - fn test_extension_babybear() { - let mut builder = Builder::::default(); - let one_val = ::EF::from_wrapped_u32(1); - let a_val = ::EF::from_wrapped_u32(3124235823); - let b_val = ::EF::from_wrapped_u32(3252375321); - let one: Ext<_, _> = builder.eval(BabyBear::one()); - let a: Ext<_, _> = builder.eval(a_val.cons()); - let b: Ext<_, _> = builder.eval(b_val.cons()); - - // Testing extension addition. - let a_plus_b: Ext<_, _> = builder.eval(a + b); - builder.assert_ext_eq(a_plus_b, (a_val + b_val).cons()); - - // // Testing negation. - // let neg_a: Ext<_, _> = builder.eval(-a); - // builder.assert_ext_eq(neg_a, (-a_val).cons()); - - // Testing extension subtraction. - let a_minus_b: Ext<_, _> = builder.eval(a - b); - builder.assert_ext_eq(a_minus_b, (a_val - b_val).cons()); - - // Testing base multiplication. - let a_times_b: Ext<_, _> = builder.eval(a * b); - builder.assert_ext_eq(a_times_b, (a_val * b_val).cons()); - - // Testing base division. - let a_div_b: Ext<_, _> = builder.eval(a / b); - builder.assert_ext_eq(a_div_b, (a_val / b_val).cons()); - - // Testing base inversion. - let a_inv: Ext<_, _> = builder.eval(one / a); - builder.assert_ext_eq(a_inv, (one_val / a_val).cons()); - - // Testing large linear combination. - let dot_product: Ext<_, _> = builder.eval(a * a + b * b + a * b); - builder.assert_ext_eq(dot_product, (a_val * a_val + b_val * b_val + a_val * b_val).cons()); - - // Testing high degree multiplication. - let a_times_b_times_c: Ext<_, _> = builder.eval(a * b * a * b * a * b); - builder.assert_ext_eq( - a_times_b_times_c, - (a_val * b_val * a_val * b_val * a_val * b_val).cons(), - ); - - let mut backend = ConstraintCompiler::::default(); - let constraints = backend.emit(builder.operations); - - let witness = Witness::default(); - PlonkBn254Prover::test::(constraints.clone(), witness); - } - - #[test] - fn test_commit() { - let mut builder = Builder::::default(); - let vkey_hash_bn254 = Bn254Fr::from_canonical_u32(1345237507); - let commited_values_digest_bn254 = Bn254Fr::from_canonical_u32(102); - let vkey_hash = builder.eval(vkey_hash_bn254); - let commited_values_digest = builder.eval(commited_values_digest_bn254); - builder.commit_vkey_hash_circuit(vkey_hash); - builder.commit_commited_values_digest_circuit(commited_values_digest); +use sp1_stark::{ + baby_bear_poseidon2::{BabyBearPoseidon2, ValMmcs}, + StarkGenericConfig, +}; +pub use types::*; + +use p3_challenger::{CanObserve, CanSample, FieldChallenger, GrindingChallenger}; +use p3_commit::{ExtensionMmcs, Mmcs}; +use p3_dft::Radix2DitParallel; +use p3_fri::{FriConfig, TwoAdicFriPcs}; +use sp1_recursion_core::{ + air::RecursionPublicValues, + stark::{BabyBearPoseidon2Outer, OuterValMmcs}, + D, +}; + +use p3_baby_bear::BabyBear; +use utils::{felt_bytes_to_bn254_var, felts_to_bn254_var, words_to_bytes}; + +type EF = ::Challenge; + +pub type PcsConfig = FriConfig< + ExtensionMmcs< + ::Val, + ::Challenge, + ::ValMmcs, + >, +>; + +pub type Digest = >::DigestVariable; + +pub type FriMmcs = ExtensionMmcs::ValMmcs>; + +pub trait BabyBearFriConfig: + StarkGenericConfig< + Val = BabyBear, + Challenge = EF, + Challenger = Self::FriChallenger, + Pcs = TwoAdicFriPcs< + BabyBear, + Radix2DitParallel, + Self::ValMmcs, + ExtensionMmcs, + >, +> +{ + type ValMmcs: Mmcs> = Self::RowMajorProverData> + + Send + + Sync; + type RowMajorProverData: Clone + Send + Sync; + type FriChallenger: CanObserve<>::Commitment> + + CanSample + + GrindingChallenger + + FieldChallenger; + + fn fri_config(&self) -> &FriConfig>; + + fn challenger_shape(challenger: &Self::FriChallenger) -> SpongeChallengerShape; +} - let mut backend = ConstraintCompiler::::default(); - let constraints = backend.emit(builder.operations); +pub trait BabyBearFriConfigVariable>: + BabyBearFriConfig + FieldHasherVariable + Posedion2BabyBearHasherVariable +{ + type FriChallengerVariable: FieldChallengerVariable::Bit> + + CanObserveVariable>::DigestVariable> + + CanCopyChallenger; - let mut witness = Witness::default(); - witness.write_vkey_hash(vkey_hash_bn254); - witness.write_commited_values_digest(commited_values_digest_bn254); + /// Get a new challenger corresponding to the given config. + fn challenger_variable(&self, builder: &mut Builder) -> Self::FriChallengerVariable; + + fn commit_recursion_public_values( + builder: &mut Builder, + public_values: RecursionPublicValues>, + ); +} - PlonkBn254Prover::test::(constraints.clone(), witness); +pub trait CircuitConfig: Config { + type Bit: Copy + Variable; + + fn read_bit(builder: &mut Builder) -> Self::Bit; + + fn read_felt(builder: &mut Builder) -> Felt; + + fn read_ext(builder: &mut Builder) -> Ext; + + fn assert_bit_zero(builder: &mut Builder, bit: Self::Bit); + + fn assert_bit_one(builder: &mut Builder, bit: Self::Bit); + + fn ext2felt( + builder: &mut Builder, + ext: Ext<::F, ::EF>, + ) -> [Felt<::F>; D]; + + fn exp_reverse_bits( + builder: &mut Builder, + input: Felt<::F>, + power_bits: Vec, + ) -> Felt<::F>; + + /// Exponentiates a felt x to a list of bits in little endian. Uses precomputed powers + /// of x. + fn exp_f_bits_precomputed( + builder: &mut Builder, + power_bits: &[Self::Bit], + two_adic_powers_of_x: &[Felt], + ) -> Felt; + + fn num2bits( + builder: &mut Builder, + num: Felt<::F>, + num_bits: usize, + ) -> Vec; + + fn bits2num( + builder: &mut Builder, + bits: impl IntoIterator, + ) -> Felt<::F>; + + #[allow(clippy::type_complexity)] + fn select_chain_f( + builder: &mut Builder, + should_swap: Self::Bit, + first: impl IntoIterator::F>> + Clone, + second: impl IntoIterator::F>> + Clone, + ) -> Vec::F>>; + + #[allow(clippy::type_complexity)] + fn select_chain_ef( + builder: &mut Builder, + should_swap: Self::Bit, + first: impl IntoIterator::F, ::EF>> + Clone, + second: impl IntoIterator::F, ::EF>> + Clone, + ) -> Vec::F, ::EF>>; + + fn range_check_felt(builder: &mut Builder, value: Felt, num_bits: usize) { + let bits = Self::num2bits(builder, value, 31); + for bit in bits.into_iter().skip(num_bits) { + Self::assert_bit_zero(builder, bit); + } } +} - #[test] - #[should_panic] - fn test_commit_vkey_fail() { - let mut builder = Builder::::default(); - let vkey_hash_bn254 = Bn254Fr::from_canonical_u32(1345237507); - let commited_values_digest_bn254 = Bn254Fr::from_canonical_u32(102); - let vkey_hash = builder.eval(vkey_hash_bn254); - let commited_values_digest = builder.eval(commited_values_digest_bn254); - builder.commit_vkey_hash_circuit(vkey_hash); - builder.commit_commited_values_digest_circuit(commited_values_digest); +impl CircuitConfig for InnerConfig { + type Bit = Felt<::F>; + + fn assert_bit_zero(builder: &mut Builder, bit: Self::Bit) { + builder.assert_felt_eq(bit, Self::F::zero()); + } - let mut backend = ConstraintCompiler::::default(); - let constraints = backend.emit(builder.operations); + fn assert_bit_one(builder: &mut Builder, bit: Self::Bit) { + builder.assert_felt_eq(bit, Self::F::one()); + } - let mut witness = Witness::default(); - witness.write_commited_values_digest(commited_values_digest_bn254); + fn read_bit(builder: &mut Builder) -> Self::Bit { + builder.hint_felt_v2() + } - PlonkBn254Prover::test::(constraints.clone(), witness); + fn read_felt(builder: &mut Builder) -> Felt { + builder.hint_felt_v2() } - #[test] - #[should_panic] - fn test_commit_commited_values_digest_fail() { - let mut builder = Builder::::default(); - let vkey_hash_bn254 = Bn254Fr::from_canonical_u32(1345237507); - let commited_values_digest_bn254 = Bn254Fr::from_canonical_u32(102); - let vkey_hash = builder.eval(vkey_hash_bn254); - let commited_values_digest = builder.eval(commited_values_digest_bn254); - builder.commit_vkey_hash_circuit(vkey_hash); - builder.commit_commited_values_digest_circuit(commited_values_digest); + fn read_ext(builder: &mut Builder) -> Ext { + builder.hint_ext_v2() + } - let mut backend = ConstraintCompiler::::default(); - let constraints = backend.emit(builder.operations); + fn ext2felt( + builder: &mut Builder, + ext: Ext<::F, ::EF>, + ) -> [Felt<::F>; D] { + builder.ext2felt_v2(ext) + } - let mut witness = Witness::default(); - witness.write_vkey_hash(vkey_hash_bn254); + fn exp_reverse_bits( + builder: &mut Builder, + input: Felt<::F>, + power_bits: Vec::F>>, + ) -> Felt<::F> { + builder.exp_reverse_bits_v2(input, power_bits) + } - PlonkBn254Prover::test::(constraints.clone(), witness); + fn num2bits( + builder: &mut Builder, + num: Felt<::F>, + num_bits: usize, + ) -> Vec::F>> { + builder.num2bits_v2_f(num, num_bits) } + + fn bits2num( + builder: &mut Builder, + bits: impl IntoIterator::F>>, + ) -> Felt<::F> { + builder.bits2num_v2_f(bits) + } + + fn select_chain_f( + builder: &mut Builder, + should_swap: Self::Bit, + first: impl IntoIterator::F>> + Clone, + second: impl IntoIterator::F>> + Clone, + ) -> Vec::F>> { + let one: Felt<_> = builder.constant(Self::F::one()); + let shouldnt_swap: Felt<_> = builder.eval(one - should_swap); + + let id_branch = first.clone().into_iter().chain(second.clone()); + let swap_branch = second.into_iter().chain(first); + zip(zip(id_branch, swap_branch), zip(repeat(shouldnt_swap), repeat(should_swap))) + .map(|((id_v, sw_v), (id_c, sw_c))| builder.eval(id_v * id_c + sw_v * sw_c)) + .collect() + } + + fn select_chain_ef( + builder: &mut Builder, + should_swap: Self::Bit, + first: impl IntoIterator::F, ::EF>> + Clone, + second: impl IntoIterator::F, ::EF>> + Clone, + ) -> Vec::F, ::EF>> { + let one: Felt<_> = builder.constant(Self::F::one()); + let shouldnt_swap: Felt<_> = builder.eval(one - should_swap); + + let id_branch = first.clone().into_iter().chain(second.clone()); + let swap_branch = second.into_iter().chain(first); + zip(zip(id_branch, swap_branch), zip(repeat(shouldnt_swap), repeat(should_swap))) + .map(|((id_v, sw_v), (id_c, sw_c))| builder.eval(id_v * id_c + sw_v * sw_c)) + .collect() + } + + fn exp_f_bits_precomputed( + builder: &mut Builder, + power_bits: &[Self::Bit], + two_adic_powers_of_x: &[Felt], + ) -> Felt { + Self::exp_reverse_bits( + builder, + two_adic_powers_of_x[0], + power_bits.iter().rev().copied().collect(), + ) + } +} + +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct WrapConfig; + +impl Config for WrapConfig { + type F = ::F; + type EF = ::EF; + type N = ::N; +} + +impl CircuitConfig for WrapConfig { + type Bit = ::Bit; + + fn assert_bit_zero(builder: &mut Builder, bit: Self::Bit) { + builder.assert_felt_eq(bit, Self::F::zero()); + } + + fn assert_bit_one(builder: &mut Builder, bit: Self::Bit) { + builder.assert_felt_eq(bit, Self::F::one()); + } + + fn read_bit(builder: &mut Builder) -> Self::Bit { + builder.hint_felt_v2() + } + + fn read_felt(builder: &mut Builder) -> Felt { + builder.hint_felt_v2() + } + + fn read_ext(builder: &mut Builder) -> Ext { + builder.hint_ext_v2() + } + + fn ext2felt( + builder: &mut Builder, + ext: Ext<::F, ::EF>, + ) -> [Felt<::F>; D] { + builder.ext2felt_v2(ext) + } + + fn exp_reverse_bits( + builder: &mut Builder, + input: Felt<::F>, + power_bits: Vec::F>>, + ) -> Felt<::F> { + // builder.exp_reverse_bits_v2(input, power_bits) + let mut result = builder.constant(Self::F::one()); + let mut power_f = input; + let bit_len = power_bits.len(); + + for i in 1..=bit_len { + let index = bit_len - i; + let bit = power_bits[index]; + let prod: Felt<_> = builder.eval(result * power_f); + result = builder.eval(bit * prod + (SymbolicFelt::one() - bit) * result); + power_f = builder.eval(power_f * power_f); + } + result + } + + fn num2bits( + builder: &mut Builder, + num: Felt<::F>, + num_bits: usize, + ) -> Vec::F>> { + builder.num2bits_v2_f(num, num_bits) + } + + fn bits2num( + builder: &mut Builder, + bits: impl IntoIterator::F>>, + ) -> Felt<::F> { + builder.bits2num_v2_f(bits) + } + + fn select_chain_f( + builder: &mut Builder, + should_swap: Self::Bit, + first: impl IntoIterator::F>> + Clone, + second: impl IntoIterator::F>> + Clone, + ) -> Vec::F>> { + let one: Felt<_> = builder.constant(Self::F::one()); + let shouldnt_swap: Felt<_> = builder.eval(one - should_swap); + + let id_branch = first.clone().into_iter().chain(second.clone()); + let swap_branch = second.into_iter().chain(first); + zip(zip(id_branch, swap_branch), zip(repeat(shouldnt_swap), repeat(should_swap))) + .map(|((id_v, sw_v), (id_c, sw_c))| builder.eval(id_v * id_c + sw_v * sw_c)) + .collect() + } + + fn select_chain_ef( + builder: &mut Builder, + should_swap: Self::Bit, + first: impl IntoIterator::F, ::EF>> + Clone, + second: impl IntoIterator::F, ::EF>> + Clone, + ) -> Vec::F, ::EF>> { + let one: Felt<_> = builder.constant(Self::F::one()); + let shouldnt_swap: Felt<_> = builder.eval(one - should_swap); + + let id_branch = first.clone().into_iter().chain(second.clone()); + let swap_branch = second.into_iter().chain(first); + zip(zip(id_branch, swap_branch), zip(repeat(shouldnt_swap), repeat(should_swap))) + .map(|((id_v, sw_v), (id_c, sw_c))| builder.eval(id_v * id_c + sw_v * sw_c)) + .collect() + } + + fn exp_f_bits_precomputed( + builder: &mut Builder, + power_bits: &[Self::Bit], + two_adic_powers_of_x: &[Felt], + ) -> Felt { + Self::exp_reverse_bits( + builder, + two_adic_powers_of_x[0], + power_bits.iter().rev().copied().collect(), + ) + } +} + +impl CircuitConfig for OuterConfig { + type Bit = Var<::N>; + + fn assert_bit_zero(builder: &mut Builder, bit: Self::Bit) { + builder.assert_var_eq(bit, Self::N::zero()); + } + + fn assert_bit_one(builder: &mut Builder, bit: Self::Bit) { + builder.assert_var_eq(bit, Self::N::one()); + } + + fn read_bit(builder: &mut Builder) -> Self::Bit { + builder.witness_var() + } + + fn read_felt(builder: &mut Builder) -> Felt { + builder.witness_felt() + } + + fn read_ext(builder: &mut Builder) -> Ext { + builder.witness_ext() + } + + fn ext2felt( + builder: &mut Builder, + ext: Ext<::F, ::EF>, + ) -> [Felt<::F>; D] { + let felts = core::array::from_fn(|_| builder.uninit()); + builder.push_op(DslIr::CircuitExt2Felt(felts, ext)); + felts + } + + fn exp_reverse_bits( + builder: &mut Builder, + input: Felt<::F>, + power_bits: Vec::N>>, + ) -> Felt<::F> { + let mut result = builder.constant(Self::F::one()); + let power_f = input; + let bit_len = power_bits.len(); + + for i in 1..=bit_len { + let index = bit_len - i; + let bit = power_bits[index]; + let prod = builder.eval(result * power_f); + result = builder.select_f(bit, prod, result); + builder.assign(power_f, power_f * power_f); + } + result + } + + fn num2bits( + builder: &mut Builder, + num: Felt<::F>, + num_bits: usize, + ) -> Vec::N>> { + builder.num2bits_f_circuit(num)[..num_bits].to_vec() + } + + fn bits2num( + builder: &mut Builder, + bits: impl IntoIterator::N>>, + ) -> Felt<::F> { + let result = builder.eval(Self::F::zero()); + for (i, bit) in bits.into_iter().enumerate() { + let to_add: Felt<_> = builder.uninit(); + let pow2 = builder.constant(Self::F::from_canonical_u32(1 << i)); + let zero = builder.constant(Self::F::zero()); + builder.push_op(DslIr::CircuitSelectF(bit, pow2, zero, to_add)); + builder.assign(result, result + to_add); + } + result + } + + fn select_chain_f( + builder: &mut Builder, + should_swap: Self::Bit, + first: impl IntoIterator::F>> + Clone, + second: impl IntoIterator::F>> + Clone, + ) -> Vec::F>> { + let id_branch = first.clone().into_iter().chain(second.clone()); + let swap_branch = second.into_iter().chain(first); + zip(id_branch, swap_branch) + .map(|(id_v, sw_v): (Felt<_>, Felt<_>)| -> Felt<_> { + let result: Felt<_> = builder.uninit(); + builder.push_op(DslIr::CircuitSelectF(should_swap, sw_v, id_v, result)); + result + }) + .collect() + } + + fn select_chain_ef( + builder: &mut Builder, + should_swap: Self::Bit, + first: impl IntoIterator::F, ::EF>> + Clone, + second: impl IntoIterator::F, ::EF>> + Clone, + ) -> Vec::F, ::EF>> { + let id_branch = first.clone().into_iter().chain(second.clone()); + let swap_branch = second.into_iter().chain(first); + zip(id_branch, swap_branch) + .map(|(id_v, sw_v): (Ext<_, _>, Ext<_, _>)| -> Ext<_, _> { + let result: Ext<_, _> = builder.uninit(); + builder.push_op(DslIr::CircuitSelectE(should_swap, sw_v, id_v, result)); + result + }) + .collect() + } + + fn exp_f_bits_precomputed( + builder: &mut Builder, + power_bits: &[Self::Bit], + two_adic_powers_of_x: &[Felt], + ) -> Felt { + let mut result: Felt<_> = builder.eval(Self::F::one()); + let one = builder.constant(Self::F::one()); + for (&bit, &power) in power_bits.iter().zip(two_adic_powers_of_x) { + let multiplier = builder.select_f(bit, power, one); + result = builder.eval(multiplier * result); + } + result + } +} + +impl BabyBearFriConfig for BabyBearPoseidon2 { + type ValMmcs = ValMmcs; + type FriChallenger = ::Challenger; + type RowMajorProverData = >::ProverData>; + + fn fri_config(&self) -> &FriConfig> { + self.pcs().fri_config() + } + + fn challenger_shape(challenger: &Self::FriChallenger) -> SpongeChallengerShape { + SpongeChallengerShape { + input_buffer_len: challenger.input_buffer.len(), + output_buffer_len: challenger.output_buffer.len(), + } + } +} + +impl BabyBearFriConfig for BabyBearPoseidon2Outer { + type ValMmcs = OuterValMmcs; + type FriChallenger = ::Challenger; + + type RowMajorProverData = + >::ProverData>; + + fn fri_config(&self) -> &FriConfig> { + self.pcs().fri_config() + } + + fn challenger_shape(_challenger: &Self::FriChallenger) -> SpongeChallengerShape { + unimplemented!("Shape not supported for outer fri challenger"); + } +} + +impl>> BabyBearFriConfigVariable + for BabyBearPoseidon2 +{ + type FriChallengerVariable = DuplexChallengerVariable; + + fn challenger_variable(&self, builder: &mut Builder) -> Self::FriChallengerVariable { + DuplexChallengerVariable::new(builder) + } + + fn commit_recursion_public_values( + builder: &mut Builder, + public_values: RecursionPublicValues::F>>, + ) { + builder.commit_public_values_v2(public_values); + } +} + +impl>> BabyBearFriConfigVariable + for BabyBearPoseidon2Outer +{ + type FriChallengerVariable = MultiField32ChallengerVariable; + + fn challenger_variable(&self, builder: &mut Builder) -> Self::FriChallengerVariable { + MultiField32ChallengerVariable::new(builder) + } + + fn commit_recursion_public_values( + builder: &mut Builder, + public_values: RecursionPublicValues::F>>, + ) { + let committed_values_digest_bytes_felts: [Felt<_>; 32] = + words_to_bytes(&public_values.committed_value_digest).try_into().unwrap(); + let committed_values_digest_bytes: Var<_> = + felt_bytes_to_bn254_var(builder, &committed_values_digest_bytes_felts); + builder.commit_commited_values_digest_circuit(committed_values_digest_bytes); + + let vkey_hash = felts_to_bn254_var(builder, &public_values.sp1_vk_digest); + builder.commit_vkey_hash_circuit(vkey_hash); + } +} + +pub fn select_chain<'a, C, R, S>( + builder: &'a mut Builder, + should_swap: R, + first: impl IntoIterator + Clone + 'a, + second: impl IntoIterator + Clone + 'a, +) -> impl Iterator + 'a +where + C: Config, + R: Variable + 'a, + S: Variable + 'a, + >::Expression: AbstractField + + Mul<>::Expression, Output = >::Expression>, + >::Expression: Add>::Expression>, +{ + let should_swap: >::Expression = should_swap.into(); + let one = >::Expression::one(); + let shouldnt_swap = one - should_swap.clone(); + + let id_branch = + first.clone().into_iter().chain(second.clone()).map(>::Expression::from); + let swap_branch = second.into_iter().chain(first).map(>::Expression::from); + zip(zip(id_branch, swap_branch), zip(repeat(shouldnt_swap), repeat(should_swap))) + .map(|((id_v, sw_v), (id_c, sw_c))| builder.eval(id_c * id_v + sw_c * sw_v)) } diff --git a/crates/recursion/circuit/src/machine/complete.rs b/crates/recursion/circuit/src/machine/complete.rs new file mode 100644 index 0000000000..8446dd95b9 --- /dev/null +++ b/crates/recursion/circuit/src/machine/complete.rs @@ -0,0 +1,75 @@ +use itertools::Itertools; +use p3_field::AbstractField; + +use sp1_recursion_compiler::ir::{Builder, Config, Felt}; +use sp1_recursion_core::air::RecursionPublicValues; + +/// Assertions on recursion public values which represent a complete proof. +/// +/// The assertions consist of checking all the expected boundary conditions from a compress proof +/// that represents the end of the recursion tower. +pub(crate) fn assert_complete( + builder: &mut Builder, + public_values: &RecursionPublicValues>, + is_complete: Felt, +) { + let RecursionPublicValues { + deferred_proofs_digest, + next_pc, + start_shard, + next_shard, + start_execution_shard, + cumulative_sum, + start_reconstruct_deferred_digest, + end_reconstruct_deferred_digest, + leaf_challenger, + end_reconstruct_challenger, + contains_execution_shard, + .. + } = public_values; + + // Assert that the `is_complete` flag is boolean. + builder.assert_felt_eq(is_complete * (is_complete - C::F::one()), C::F::zero()); + + // Assert that `next_pc` is equal to zero (so program execution has completed) + builder.assert_felt_eq(is_complete * *next_pc, C::F::zero()); + + // Assert that start shard is equal to 1. + builder.assert_felt_eq(is_complete * (*start_shard - C::F::one()), C::F::zero()); + + // Assert that the next shard is not equal to one. This guarantees that there is at least one + // shard that contains CPU. + // + // TODO: figure out if this is needed. + builder.assert_felt_ne(is_complete * *next_shard, C::F::one()); + + // Assert that that an execution shard is present. + builder.assert_felt_eq(is_complete * (*contains_execution_shard - C::F::one()), C::F::zero()); + // Assert that the start execution shard is equal to 1. + builder.assert_felt_eq(is_complete * (*start_execution_shard - C::F::one()), C::F::zero()); + + // Assert that the end reconstruct challenger is equal to the leaf challenger. + for (end_challenger_d, leaf_challenger_d) in + end_reconstruct_challenger.into_iter().zip(*leaf_challenger) + { + builder.assert_felt_eq(is_complete * (end_challenger_d - leaf_challenger_d), C::F::zero()); + } + + // The start reconstruct deffered digest should be zero. + for start_digest_word in start_reconstruct_deferred_digest { + builder.assert_felt_eq(is_complete * *start_digest_word, C::F::zero()); + } + + // The end reconstruct deffered digest should be equal to the deferred proofs digest. + for (end_digest_word, deferred_digest_word) in + end_reconstruct_deferred_digest.iter().zip_eq(deferred_proofs_digest.iter()) + { + builder + .assert_felt_eq(is_complete * (*end_digest_word - *deferred_digest_word), C::F::zero()); + } + + // Assert that the cumulative sum is zero. + for b in cumulative_sum.iter() { + builder.assert_felt_eq(is_complete * *b, C::F::zero()); + } +} diff --git a/crates/recursion/circuit/src/machine/compress.rs b/crates/recursion/circuit/src/machine/compress.rs new file mode 100644 index 0000000000..c92d105788 --- /dev/null +++ b/crates/recursion/circuit/src/machine/compress.rs @@ -0,0 +1,584 @@ +use std::{ + array, + borrow::{Borrow, BorrowMut}, + marker::PhantomData, + mem::MaybeUninit, +}; + +use itertools::{izip, Itertools}; + +use p3_air::Air; +use p3_baby_bear::BabyBear; + +use p3_commit::Mmcs; +use p3_field::AbstractField; +use p3_matrix::dense::RowMajorMatrix; + +use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use sp1_recursion_compiler::ir::{Builder, Ext, Felt, SymbolicFelt}; + +use sp1_recursion_core::{ + air::{ChallengerPublicValues, RecursionPublicValues, RECURSIVE_PROOF_NUM_PV_ELTS}, + D, +}; + +use sp1_stark::{ + air::{MachineAir, POSEIDON_NUM_WORDS, PV_DIGEST_NUM_WORDS}, + baby_bear_poseidon2::BabyBearPoseidon2, + Dom, ProofShape, ShardProof, StarkGenericConfig, StarkMachine, StarkVerifyingKey, Word, + DIGEST_SIZE, +}; + +use crate::{ + challenger::CanObserveVariable, + constraints::RecursiveVerifierConstraintFolder, + machine::{ + assert_complete, assert_recursion_public_values_valid, recursion_public_values_digest, + root_public_values_digest, + }, + stark::{dummy_vk_and_shard_proof, ShardProofVariable, StarkVerifier}, + utils::uninit_challenger_pv, + BabyBearFriConfig, BabyBearFriConfigVariable, CircuitConfig, VerifyingKeyVariable, +}; + +/// A program to verify a batch of recursive proofs and aggregate their public values. +#[derive(Debug, Clone, Copy)] +pub struct SP1CompressVerifier { + _phantom: PhantomData<(C, SC, A)>, +} + +pub enum PublicValuesOutputDigest { + Reduce, + Root, +} + +/// Witness layout for the compress stage verifier. +pub struct SP1CompressWitnessVariable< + C: CircuitConfig, + SC: BabyBearFriConfigVariable, +> { + /// The shard proofs to verify. + pub vks_and_proofs: Vec<(VerifyingKeyVariable, ShardProofVariable)>, + pub is_complete: Felt, +} + +/// An input layout for the reduce verifier. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(bound(serialize = "ShardProof: Serialize, Dom: Serialize"))] +#[serde(bound(deserialize = "ShardProof: Deserialize<'de>, Dom: DeserializeOwned"))] +pub struct SP1CompressWitnessValues { + pub vks_and_proofs: Vec<(StarkVerifyingKey, ShardProof)>, + pub is_complete: bool, +} + +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct SP1CompressShape { + proof_shapes: Vec, +} + +impl SP1CompressVerifier +where + SC: BabyBearFriConfigVariable, + C: CircuitConfig, + >::ProverData>: Clone, + A: MachineAir + for<'a> Air>, +{ + /// Verify a batch of recursive proofs and aggregate their public values. + /// + /// The compression verifier can aggregate proofs of different kinds: + /// - Core proofs: proofs which are recursive proof of a batch of SP1 shard proofs. The + /// implementation in this function assumes a fixed recursive verifier speicified by + /// `recursive_vk`. + /// - Deferred proofs: proofs which are recursive proof of a batch of deferred proofs. The + /// implementation in this function assumes a fixed deferred verification program specified by + /// `deferred_vk`. + /// - Compress proofs: these are proofs which refer to a prove of this program. The key for it + /// is part of public values will be propagated accross all levels of recursion and will be + /// checked against itself as in [sp1_prover::Prover] or as in [super::SP1RootVerifier]. + pub fn verify( + builder: &mut Builder, + machine: &StarkMachine, + input: SP1CompressWitnessVariable, + vk_root: [Felt; DIGEST_SIZE], + kind: PublicValuesOutputDigest, + ) { + // Read input. + let SP1CompressWitnessVariable { vks_and_proofs, is_complete } = input; + + // Initialize the values for the aggregated public output. + + let mut reduce_public_values_stream: Vec> = (0..RECURSIVE_PROOF_NUM_PV_ELTS) + .map(|_| unsafe { MaybeUninit::zeroed().assume_init() }) + .collect(); + let compress_public_values: &mut RecursionPublicValues<_> = + reduce_public_values_stream.as_mut_slice().borrow_mut(); + + // TODO: add vk correctness check. + + // Make sure there is at least one proof. + assert!(!vks_and_proofs.is_empty()); + + // Initialize the consistency check variables. + let mut sp1_vk_digest: [Felt<_>; DIGEST_SIZE] = + array::from_fn(|_| unsafe { MaybeUninit::zeroed().assume_init() }); + let mut pc: Felt<_> = unsafe { MaybeUninit::zeroed().assume_init() }; + let mut shard: Felt<_> = unsafe { MaybeUninit::zeroed().assume_init() }; + + let mut exit_code: Felt<_> = builder.uninit(); + + let mut execution_shard: Felt<_> = unsafe { MaybeUninit::zeroed().assume_init() }; + let mut initial_reconstruct_challenger_values: ChallengerPublicValues> = + unsafe { uninit_challenger_pv(builder) }; + let mut reconstruct_challenger_values: ChallengerPublicValues> = + unsafe { uninit_challenger_pv(builder) }; + let mut leaf_challenger_values: ChallengerPublicValues> = + unsafe { uninit_challenger_pv(builder) }; + let mut committed_value_digest: [Word>; PV_DIGEST_NUM_WORDS] = + array::from_fn(|_| { + Word(array::from_fn(|_| unsafe { MaybeUninit::zeroed().assume_init() })) + }); + let mut deferred_proofs_digest: [Felt<_>; POSEIDON_NUM_WORDS] = + array::from_fn(|_| unsafe { MaybeUninit::zeroed().assume_init() }); + let mut reconstruct_deferred_digest: [Felt<_>; POSEIDON_NUM_WORDS] = + core::array::from_fn(|_| unsafe { MaybeUninit::zeroed().assume_init() }); + let mut global_cumulative_sum: [Felt<_>; D] = + core::array::from_fn(|_| builder.eval(C::F::zero())); + let mut init_addr_bits: [Felt<_>; 32] = + core::array::from_fn(|_| unsafe { MaybeUninit::zeroed().assume_init() }); + let mut finalize_addr_bits: [Felt<_>; 32] = + core::array::from_fn(|_| unsafe { MaybeUninit::zeroed().assume_init() }); + + // Initialize a flag to denote if the any of the recursive proofs represents a shard range + // where at least once of the shards is an execution shard (i.e. contains cpu). + let mut contains_execution_shard: Felt<_> = builder.eval(C::F::zero()); + + // Verify proofs, check consistency, and aggregate public values. + for (i, (vk, shard_proof)) in vks_and_proofs.into_iter().enumerate() { + // Verify the shard proof. + + // Prepare a challenger. + let mut challenger = machine.config().challenger_variable(builder); + + // Observe the vk and start pc. + challenger.observe(builder, vk.commitment); + challenger.observe(builder, vk.pc_start); + let zero: Felt<_> = builder.eval(C::F::zero()); + for _ in 0..7 { + challenger.observe(builder, zero); + } + + // Observe the main commitment and public values. + challenger.observe_slice( + builder, + shard_proof.public_values[0..machine.num_pv_elts()].iter().copied(), + ); + + let zero_ext: Ext = builder.eval(C::F::zero()); + StarkVerifier::verify_shard( + builder, + &vk, + machine, + &mut challenger, + &shard_proof, + &[zero_ext, zero_ext], + ); + + // Get the current public values. + let current_public_values: &RecursionPublicValues> = + shard_proof.public_values.as_slice().borrow(); + // Assert that the public values are valid. + assert_recursion_public_values_valid::(builder, current_public_values); + // Assert that the vk root is the same as the witnessed one. + for (expected, actual) in vk_root.iter().zip(current_public_values.vk_root.iter()) { + builder.assert_felt_eq(*expected, *actual); + } + + // Set the exit code, it is already constrained to be zero in the previous proof. + exit_code = current_public_values.exit_code; + + if i == 0 { + // Initialize global and accumulated values. + + // Initialize the start of deferred digests. + for (digest, current_digest, global_digest) in izip!( + reconstruct_deferred_digest.iter_mut(), + current_public_values.start_reconstruct_deferred_digest.iter(), + compress_public_values.start_reconstruct_deferred_digest.iter_mut() + ) { + *digest = *current_digest; + *global_digest = *current_digest; + } + + // Initialize the sp1_vk digest + for (digest, first_digest) in + sp1_vk_digest.iter_mut().zip(current_public_values.sp1_vk_digest) + { + *digest = first_digest; + } + + // Initiallize start pc. + compress_public_values.start_pc = current_public_values.start_pc; + pc = current_public_values.start_pc; + + // Initialize start shard. + compress_public_values.start_shard = current_public_values.start_shard; + shard = current_public_values.start_shard; + + // Initialize start execution shard. + compress_public_values.start_execution_shard = + current_public_values.start_execution_shard; + execution_shard = current_public_values.start_execution_shard; + + // Initialize the MemoryInitialize address bits. + for (bit, (first_bit, current_bit)) in init_addr_bits.iter_mut().zip( + compress_public_values + .previous_init_addr_bits + .iter_mut() + .zip(current_public_values.previous_init_addr_bits.iter()), + ) { + *bit = *current_bit; + *first_bit = *current_bit; + } + + // Initialize the MemoryFinalize address bits. + for (bit, (first_bit, current_bit)) in finalize_addr_bits.iter_mut().zip( + compress_public_values + .previous_finalize_addr_bits + .iter_mut() + .zip(current_public_values.previous_finalize_addr_bits.iter()), + ) { + *bit = *current_bit; + *first_bit = *current_bit; + } + + // Initialize the leaf challenger public values. + leaf_challenger_values = current_public_values.leaf_challenger; + + // Initialize the initial reconstruct challenger public values. + initial_reconstruct_challenger_values = + current_public_values.start_reconstruct_challenger; + reconstruct_challenger_values = current_public_values.start_reconstruct_challenger; + + // Assign the commited values and deferred proof digests. + for (word, current_word) in committed_value_digest + .iter_mut() + .zip_eq(current_public_values.committed_value_digest.iter()) + { + for (byte, current_byte) in word.0.iter_mut().zip_eq(current_word.0.iter()) { + *byte = *current_byte; + } + } + + for (digest, current_digest) in deferred_proofs_digest + .iter_mut() + .zip_eq(current_public_values.deferred_proofs_digest.iter()) + { + *digest = *current_digest; + } + } + + // Assert that the current values match the accumulated values. + + // Assert that the start deferred digest is equal to the current deferred digest. + for (digest, current_digest) in reconstruct_deferred_digest + .iter() + .zip_eq(current_public_values.start_reconstruct_deferred_digest.iter()) + { + builder.assert_felt_eq(*digest, *current_digest); + } + + // // Consistency checks for all accumulated values. + + // Assert that the sp1_vk digest is always the same. + for (digest, current) in sp1_vk_digest.iter().zip(current_public_values.sp1_vk_digest) { + builder.assert_felt_eq(*digest, current); + } + + // Assert that the start pc is equal to the current pc. + builder.assert_felt_eq(pc, current_public_values.start_pc); + + // Verify that the shard is equal to the current shard. + builder.assert_felt_eq(shard, current_public_values.start_shard); + + // Execution shard constraints. + { + // Assert that `contains_execution_shard` is boolean. + builder.assert_felt_eq( + current_public_values.contains_execution_shard + * (SymbolicFelt::one() - current_public_values.contains_execution_shard), + C::F::zero(), + ); + // A flag to indicate whether the first execution shard has been seen. We have: + // - `is_first_execution_shard_seen` = current_contains_execution_shard && + // !execution_shard_seen_before. + // Since `contains_execution_shard` is the boolean flag used to denote if we have + // seen an execution shard, we can use it to denote if we have seen an execution + // shard before. + let is_first_execution_shard_seen: Felt<_> = builder.eval( + current_public_values.contains_execution_shard + * (SymbolicFelt::one() - contains_execution_shard), + ); + + // If this is the first execution shard, then we update the start execution shard + // and the `execution_shard` values. + compress_public_values.start_execution_shard = builder.eval( + current_public_values.start_execution_shard * is_first_execution_shard_seen + + compress_public_values.start_execution_shard + * (SymbolicFelt::one() - is_first_execution_shard_seen), + ); + execution_shard = builder.eval( + current_public_values.start_execution_shard * is_first_execution_shard_seen + + execution_shard * (SymbolicFelt::one() - is_first_execution_shard_seen), + ); + + // If this is an execution shard, make the assertion that the value is consistent. + builder.assert_felt_eq( + current_public_values.contains_execution_shard + * (execution_shard - current_public_values.start_execution_shard), + C::F::zero(), + ); + } + + // Assert that the MemoryInitialize address bits are the same. + for (bit, current_bit) in + init_addr_bits.iter().zip(current_public_values.previous_init_addr_bits.iter()) + { + builder.assert_felt_eq(*bit, *current_bit); + } + + // Assert that the MemoryFinalize address bits are the same. + for (bit, current_bit) in finalize_addr_bits + .iter() + .zip(current_public_values.previous_finalize_addr_bits.iter()) + { + builder.assert_felt_eq(*bit, *current_bit); + } + + // Assert that the leaf challenger is always the same. + for (current, expected) in + leaf_challenger_values.into_iter().zip(current_public_values.leaf_challenger) + { + builder.assert_felt_eq(current, expected); + } + + // Assert that the current challenger matches the start reconstruct challenger. + for (current, expected) in reconstruct_challenger_values + .into_iter() + .zip(current_public_values.start_reconstruct_challenger) + { + builder.assert_felt_eq(current, expected); + } + + // Digest constraints. + { + // If `commited_value_digest` is not zero, then `public_values.commited_value_digest + // should be the current. + + // Set a flags to indicate whether `commited_value_digest` is non-zero. The flags + // are given by the elements of the array, and they will be used as filters to + // constrain the equality. + let mut is_non_zero_flags = vec![]; + for word in committed_value_digest { + for byte in word { + is_non_zero_flags.push(byte); + } + } + + // Using the flags, we can constrain the equality. + for is_non_zero in is_non_zero_flags { + for (word_current, word_public) in committed_value_digest + .into_iter() + .zip(current_public_values.committed_value_digest) + { + for (byte_current, byte_public) in word_current.into_iter().zip(word_public) + { + builder.assert_felt_eq( + is_non_zero * (byte_current - byte_public), + C::F::zero(), + ); + } + } + } + + // Update the committed value digest. + for (word, current_word) in committed_value_digest + .iter_mut() + .zip_eq(current_public_values.committed_value_digest.iter()) + { + for (byte, current_byte) in word.0.iter_mut().zip_eq(current_word.0.iter()) { + *byte = *current_byte; + } + } + + // If `deferred_proofs_digest` is not zero, then the current value should be + // `public_values.deferred_proofs_digest`. We will use a similar approach as above. + let mut is_non_zero_flags = vec![]; + for element in deferred_proofs_digest { + is_non_zero_flags.push(element); + } + + for is_non_zero in is_non_zero_flags { + for (digest_current, digest_public) in deferred_proofs_digest + .into_iter() + .zip(current_public_values.deferred_proofs_digest) + { + builder.assert_felt_eq( + is_non_zero * (digest_current - digest_public), + C::F::zero(), + ); + } + } + + // Update the deferred proofs digest. + for (digest, current_digest) in deferred_proofs_digest + .iter_mut() + .zip_eq(current_public_values.deferred_proofs_digest.iter()) + { + *digest = *current_digest; + } + } + + // Update the accumulated values. + + // If the current shard has an execution shard, then we update the flag in case it was + // not already set. That is: + // - If the current shard has an execution shard and the flag is set to zero, it will + // be set to one. + // - If the current shard has an execution shard and the flag is set to one, it will + // remain set to one. + contains_execution_shard = builder.eval( + contains_execution_shard + + current_public_values.contains_execution_shard + * (SymbolicFelt::one() - contains_execution_shard), + ); + + // If this proof contains an execution shard, we update the execution shard value. + execution_shard = builder.eval( + current_public_values.next_execution_shard + * current_public_values.contains_execution_shard + + execution_shard + * (SymbolicFelt::one() - current_public_values.contains_execution_shard), + ); + + // Update the reconstruct deferred proof digest. + for (digest, current_digest) in reconstruct_deferred_digest + .iter_mut() + .zip_eq(current_public_values.end_reconstruct_deferred_digest.iter()) + { + *digest = *current_digest; + } + + // Update pc to be the next pc. + pc = current_public_values.next_pc; + + // Update the shard to be the next shard. + shard = current_public_values.next_shard; + + // Update the MemoryInitialize address bits. + for (bit, next_bit) in + init_addr_bits.iter_mut().zip(current_public_values.last_init_addr_bits.iter()) + { + *bit = *next_bit; + } + + // Update the MemoryFinalize address bits. + for (bit, next_bit) in finalize_addr_bits + .iter_mut() + .zip(current_public_values.last_finalize_addr_bits.iter()) + { + *bit = *next_bit; + } + + // Update the reconstruct challenger. + reconstruct_challenger_values = current_public_values.end_reconstruct_challenger; + + // Update the cumulative sum. + for (sum_element, current_sum_element) in + global_cumulative_sum.iter_mut().zip_eq(current_public_values.cumulative_sum.iter()) + { + *sum_element = builder.eval(*sum_element + *current_sum_element); + } + } + + // Update the global values from the last accumulated values. + // Set sp1_vk digest to the one from the proof values. + compress_public_values.sp1_vk_digest = sp1_vk_digest; + // Set next_pc to be the last pc (which is the same as accumulated pc) + compress_public_values.next_pc = pc; + // Set next shard to be the last shard + compress_public_values.next_shard = shard; + // Set next execution shard to be the last execution shard + compress_public_values.next_execution_shard = execution_shard; + // Set the MemoryInitialize address bits to be the last MemoryInitialize address bits. + compress_public_values.last_init_addr_bits = init_addr_bits; + // Set the MemoryFinalize address bits to be the last MemoryFinalize address bits. + compress_public_values.last_finalize_addr_bits = finalize_addr_bits; + // Set the leaf challenger to it's value. + compress_public_values.leaf_challenger = leaf_challenger_values; + // Set the start reconstruct challenger to be the initial reconstruct challenger. + compress_public_values.start_reconstruct_challenger = initial_reconstruct_challenger_values; + // Set the end reconstruct challenger to be the last reconstruct challenger. + compress_public_values.end_reconstruct_challenger = reconstruct_challenger_values; + // Set the start reconstruct deferred digest to be the last reconstruct deferred digest. + compress_public_values.end_reconstruct_deferred_digest = reconstruct_deferred_digest; + // Assign the deferred proof digests. + compress_public_values.deferred_proofs_digest = deferred_proofs_digest; + // Assign the committed value digests. + compress_public_values.committed_value_digest = committed_value_digest; + // Assign the cumulative sum. + compress_public_values.cumulative_sum = global_cumulative_sum; + // Assign the `is_complete` flag. + compress_public_values.is_complete = is_complete; + // Set the contains an execution shard flag. + compress_public_values.contains_execution_shard = contains_execution_shard; + // Set the exit code. + compress_public_values.exit_code = exit_code; + // Refelct the vk root. + compress_public_values.vk_root = vk_root; + // Set the digest according to the previous values. + compress_public_values.digest = match kind { + PublicValuesOutputDigest::Reduce => { + recursion_public_values_digest::(builder, compress_public_values) + } + PublicValuesOutputDigest::Root => { + root_public_values_digest::(builder, compress_public_values) + } + }; + + // If the proof is complete, make completeness assertions. + assert_complete(builder, compress_public_values, is_complete); + + SC::commit_recursion_public_values(builder, *compress_public_values); + } +} + +impl SP1CompressWitnessValues { + pub fn shape(&self) -> SP1CompressShape { + let proof_shapes = self.vks_and_proofs.iter().map(|(_, proof)| proof.shape()).collect(); + SP1CompressShape { proof_shapes } + } +} + +impl SP1CompressWitnessValues { + pub fn dummy>( + machine: &StarkMachine, + shape: &SP1CompressShape, + ) -> Self { + let vks_and_proofs = shape + .proof_shapes + .iter() + .map(|proof_shape| { + let (vk, proof) = dummy_vk_and_shard_proof(machine, proof_shape); + (vk, proof) + }) + .collect(); + + Self { vks_and_proofs, is_complete: false } + } +} + +impl From> for SP1CompressShape { + fn from(proof_shapes: Vec) -> Self { + Self { proof_shapes } + } +} diff --git a/crates/recursion/circuit/src/machine/core.rs b/crates/recursion/circuit/src/machine/core.rs new file mode 100644 index 0000000000..568463cca0 --- /dev/null +++ b/crates/recursion/circuit/src/machine/core.rs @@ -0,0 +1,631 @@ +use std::{ + array, + borrow::{Borrow, BorrowMut}, + marker::PhantomData, + mem::MaybeUninit, +}; + +use itertools::Itertools; +use p3_baby_bear::BabyBear; +use p3_commit::Mmcs; +use p3_field::AbstractField; +use p3_matrix::dense::RowMajorMatrix; + +use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use sp1_core_machine::{ + cpu::MAX_CPU_LOG_DEGREE, + riscv::{RiscvAir, MAX_LOG_NUMBER_OF_SHARDS}, +}; + +use sp1_recursion_core::air::PV_DIGEST_NUM_WORDS; +use sp1_stark::{ + air::{PublicValues, POSEIDON_NUM_WORDS}, + baby_bear_poseidon2::BabyBearPoseidon2, + Dom, ProofShape, StarkMachine, Word, +}; + +use sp1_stark::{ShardProof, StarkGenericConfig, StarkVerifyingKey}; + +use sp1_recursion_compiler::{ + circuit::CircuitV2Builder, + ir::{Builder, Config, Ext, ExtConst, Felt, SymbolicFelt}, +}; + +use sp1_recursion_core::{ + air::{RecursionPublicValues, RECURSIVE_PROOF_NUM_PV_ELTS}, + DIGEST_SIZE, +}; + +use crate::{ + challenger::{CanObserveVariable, DuplexChallengerVariable, FieldChallengerVariable}, + machine::recursion_public_values_digest, + stark::{dummy_challenger, dummy_vk_and_shard_proof, ShardProofVariable, StarkVerifier}, + BabyBearFriConfig, BabyBearFriConfigVariable, CircuitConfig, VerifyingKeyVariable, +}; + +pub struct SP1RecursionWitnessVariable< + C: CircuitConfig, + SC: BabyBearFriConfigVariable, +> { + pub vk: VerifyingKeyVariable, + pub shard_proofs: Vec>, + pub leaf_challenger: SC::FriChallengerVariable, + pub initial_reconstruct_challenger: DuplexChallengerVariable, + pub is_complete: Felt, + pub is_first_shard: Felt, + pub vk_root: [Felt; DIGEST_SIZE], +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(bound(serialize = "ShardProof: Serialize, Dom: Serialize"))] +#[serde(bound(deserialize = "ShardProof: Deserialize<'de>, Dom: DeserializeOwned"))] +pub struct SP1RecursionWitnessValues { + pub vk: StarkVerifyingKey, + pub shard_proofs: Vec>, + pub leaf_challenger: SC::Challenger, + pub initial_reconstruct_challenger: SC::Challenger, + pub is_complete: bool, + pub is_first_shard: bool, + pub vk_root: [SC::Val; DIGEST_SIZE], +} + +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct SP1RecursionShape { + pub proof_shapes: Vec, + pub is_complete: bool, +} + +/// A program for recursively verifying a batch of SP1 proofs. +#[derive(Debug, Clone, Copy)] +pub struct SP1RecursiveVerifier { + _phantom: PhantomData<(C, SC)>, +} + +impl SP1RecursiveVerifier +where + SC: BabyBearFriConfigVariable< + C, + FriChallengerVariable = DuplexChallengerVariable, + DigestVariable = [Felt; DIGEST_SIZE], + >, + C: CircuitConfig>, + >::ProverData>: Clone, +{ + /// Verify a batch of SP1 shard proofs and aggregate their public values. + /// + /// This program represents a first recursive step in the verification of an SP1 proof + /// consisting of one or more shards. Each shard proof is verified and its public values are + /// aggregated into a single set representing the start and end state of the program execution + /// across all shards. + /// + /// # Constraints + /// + /// ## Verifying the STARK proofs. + /// For each shard, the verifier asserts the correctness of the STARK proof which is composed + /// of verifying the FRI proof for openings and verifying the constraints. + /// + /// ## Aggregating the shard public values. + /// See [SP1Prover::verify] for the verification algorithm of a complete SP1 proof. In this + /// function, we are aggregating several shard proofs and attesting to an aggregated state which + /// represents all the shards. + /// + /// ## The leaf challenger. + /// A key difference between the recursive tree verification and the complete one in + /// [SP1Prover::verify] is that the recursive verifier has no way of reconstructing the + /// chanllenger only from a part of the shard proof. Therefore, the value of the leaf challenger + /// is witnessed in the program and the verifier asserts correctness given this challenger. + /// In the course of the recursive verification, the challenger is reconstructed by observing + /// the commitments one by one, and in the final step, the challenger is asserted to be the same + /// as the one witnessed here. + pub fn verify( + builder: &mut Builder, + machine: &StarkMachine>, + input: SP1RecursionWitnessVariable, + ) { + // Read input. + let SP1RecursionWitnessVariable { + vk, + shard_proofs, + leaf_challenger, + initial_reconstruct_challenger, + is_complete, + is_first_shard, + vk_root, + } = input; + + // Initialize shard variables. + let mut initial_shard: Felt<_> = unsafe { MaybeUninit::zeroed().assume_init() }; + let mut current_shard: Felt<_> = unsafe { MaybeUninit::zeroed().assume_init() }; + + // Initialize execution shard variables. + let mut initial_execution_shard: Felt<_> = unsafe { MaybeUninit::zeroed().assume_init() }; + let mut current_execution_shard: Felt<_> = unsafe { MaybeUninit::zeroed().assume_init() }; + + // Initialize program counter variables. + let mut start_pc: Felt<_> = unsafe { MaybeUninit::zeroed().assume_init() }; + let mut current_pc: Felt<_> = unsafe { MaybeUninit::zeroed().assume_init() }; + + // Initialize memory initialization and finalization variables. + let mut initial_previous_init_addr_bits: [Felt<_>; 32] = + unsafe { MaybeUninit::zeroed().assume_init() }; + let mut initial_previous_finalize_addr_bits: [Felt<_>; 32] = + unsafe { MaybeUninit::zeroed().assume_init() }; + let mut current_init_addr_bits: [Felt<_>; 32] = + unsafe { MaybeUninit::zeroed().assume_init() }; + let mut current_finalize_addr_bits: [Felt<_>; 32] = + unsafe { MaybeUninit::zeroed().assume_init() }; + + // Initialize the exit code variable. + let mut exit_code: Felt<_> = unsafe { MaybeUninit::zeroed().assume_init() }; + + // Initialize the public values digest. + let mut committed_value_digest: [Word>; PV_DIGEST_NUM_WORDS] = + array::from_fn(|_| Word(array::from_fn(|_| builder.uninit()))); + + // Initialize the deferred proofs digest. + let mut deferred_proofs_digest: [Felt<_>; POSEIDON_NUM_WORDS] = + array::from_fn(|_| builder.uninit()); + + // Initialize the challenger variables. + let leaf_challenger_public_values = leaf_challenger.public_values(builder); + let mut reconstruct_challenger: DuplexChallengerVariable<_> = + initial_reconstruct_challenger.copy(builder); + + // Initialize the cumulative sum. + let mut global_cumulative_sum: Ext<_, _> = builder.eval(C::EF::zero().cons()); + + // Assert that the number of proofs is not zero. + assert!(!shard_proofs.is_empty()); + + // Initialize a flag to denote the first (if any) CPU shard. + let mut cpu_shard_seen = false; + + // Verify proofs. + for (i, shard_proof) in shard_proofs.into_iter().enumerate() { + let contains_cpu = shard_proof.contains_cpu(); + let contains_memory_init = shard_proof.contains_memory_init(); + let contains_memory_finalize = shard_proof.contains_memory_finalize(); + + // Get the public values. + let public_values: &PublicValues>, Felt<_>> = + shard_proof.public_values.as_slice().borrow(); + + // If this is the first proof in the batch, initialize the variables. + if i == 0 { + // Shard. + initial_shard = public_values.shard; + current_shard = public_values.shard; + + // Execution shard. + initial_execution_shard = public_values.execution_shard; + current_execution_shard = public_values.execution_shard; + + // Program counter. + start_pc = public_values.start_pc; + current_pc = public_values.start_pc; + + // Memory initialization & finalization. + for ((bit, pub_bit), first_bit) in current_init_addr_bits + .iter_mut() + .zip(public_values.previous_init_addr_bits.iter()) + .zip(initial_previous_init_addr_bits.iter_mut()) + { + *bit = *pub_bit; + *first_bit = *pub_bit; + } + for ((bit, pub_bit), first_bit) in current_finalize_addr_bits + .iter_mut() + .zip(public_values.previous_finalize_addr_bits.iter()) + .zip(initial_previous_finalize_addr_bits.iter_mut()) + { + *bit = *pub_bit; + *first_bit = *pub_bit; + } + + // Exit code. + exit_code = public_values.exit_code; + + // Commited public values digests. + for (word, first_word) in committed_value_digest + .iter_mut() + .zip_eq(public_values.committed_value_digest.iter()) + { + for (byte, first_byte) in word.0.iter_mut().zip_eq(first_word.0.iter()) { + *byte = *first_byte; + } + } + + // Deferred proofs digests. + for (digest, first_digest) in deferred_proofs_digest + .iter_mut() + .zip_eq(public_values.deferred_proofs_digest.iter()) + { + *digest = *first_digest; + } + + // First shard constraints. We verify the validity of the `is_first_shard` boolean + // flag, and make assertions for that are specific to the first shard using that + // flag. + + // Assert that the shard is boolean. + builder + .assert_felt_eq(is_first_shard * (is_first_shard - C::F::one()), C::F::zero()); + // Assert that if the flag is set to `1`, then the shard idex is `1`. + builder + .assert_felt_eq(is_first_shard * (initial_shard - C::F::one()), C::F::zero()); + // Assert that if the flag is set to `0`, then the shard index is not `1`. + builder.assert_felt_ne( + (SymbolicFelt::one() - is_first_shard) * initial_shard, + C::F::one(), + ); + + // If the initial shard is the first shard, we assert that the initial challenger + // is the same as a fresh challenger that absorbed the verifying key. + let mut first_shard_challenger = machine.config().challenger_variable(builder); + vk.observe_into(builder, &mut first_shard_challenger); + let first_challenger_public_values = first_shard_challenger.public_values(builder); + let initial_challenger_public_values = + initial_reconstruct_challenger.public_values(builder); + for (first, initial) in + first_challenger_public_values.into_iter().zip(initial_challenger_public_values) + { + builder.assert_felt_eq(is_first_shard * (first - initial), C::F::zero()); + } + + // If it's the first shard (which is the first execution shard), then the `start_pc` + // should be vk.pc_start. + builder.assert_felt_eq(is_first_shard * (start_pc - vk.pc_start), C::F::zero()); + + // Assert that `init_addr_bits` and `finalize_addr_bits` are zero for the first + for bit in current_init_addr_bits.iter() { + builder.assert_felt_eq(is_first_shard * *bit, C::F::zero()); + } + for bit in current_finalize_addr_bits.iter() { + builder.assert_felt_eq(is_first_shard * *bit, C::F::zero()); + } + } + + // Verify the shard. + // + // Do not verify the cumulative sum here, since the permutation challenge is shared + // between all shards. + let mut challenger = leaf_challenger.copy(builder); + + let global_permutation_challenges = + (0..2).map(|_| challenger.sample_ext(builder)).collect::>(); + + StarkVerifier::verify_shard( + builder, + &vk, + machine, + &mut challenger, + &shard_proof, + &global_permutation_challenges, + ); + + // Assert that first shard has a "CPU". Equivalently, assert that if the shard does + // not have a "CPU", then the current shard is not 1. + if !contains_cpu { + builder.assert_felt_ne(current_shard, C::F::one()); + } + + // CPU log degree bound check constraints (this assertion is made in compile time). + if shard_proof.contains_cpu() { + let log_degree_cpu = shard_proof.log_degree_cpu(); + assert!(log_degree_cpu <= MAX_CPU_LOG_DEGREE); + } + + // Shard constraints. + { + // Assert that the shard of the proof is equal to the current shard. + builder.assert_felt_eq(current_shard, public_values.shard); + + // Increment the current shard by one. + current_shard = builder.eval(current_shard + C::F::one()); + } + + // Execution shard constraints. + { + // If the shard has a "CPU" chip, then the execution shard should be incremented by + // 1. + if contains_cpu { + // If this is the first time we've seen the CPU, we initialize the initial and + // current execution shards. + if !cpu_shard_seen { + initial_execution_shard = public_values.execution_shard; + current_execution_shard = initial_execution_shard; + cpu_shard_seen = true; + } + + builder.assert_felt_eq(current_execution_shard, public_values.execution_shard); + + current_execution_shard = builder.eval(current_execution_shard + C::F::one()); + } + } + + // Program counter constraints. + { + // Assert that the start_pc of the proof is equal to the current pc. + builder.assert_felt_eq(current_pc, public_values.start_pc); + + // If it's not a shard with "CPU", then assert that the start_pc equals the + // next_pc. + if !contains_cpu { + builder.assert_felt_eq(public_values.start_pc, public_values.next_pc); + } else { + // If it's a shard with "CPU", then assert that the start_pc is not zero. + builder.assert_felt_ne(public_values.start_pc, C::F::zero()); + } + + // Update current_pc to be the end_pc of the current proof. + current_pc = public_values.next_pc; + } + + // Exit code constraints. + { + // Assert that the exit code is zero (success) for all proofs. + builder.assert_felt_eq(exit_code, C::F::zero()); + } + + // Memory initialization & finalization constraints. + { + // Assert that the MemoryInitialize address bits match the current loop variable. + for (bit, current_bit) in current_init_addr_bits + .iter() + .zip_eq(public_values.previous_init_addr_bits.iter()) + { + builder.assert_felt_eq(*bit, *current_bit); + } + + // Assert that the MemoryFinalize address bits match the current loop variable. + for (bit, current_bit) in current_finalize_addr_bits + .iter() + .zip_eq(public_values.previous_finalize_addr_bits.iter()) + { + builder.assert_felt_eq(*bit, *current_bit); + } + + // Assert that if MemoryInit is not present, then the address bits are the same. + if !contains_memory_init { + for (prev_bit, last_bit) in public_values + .previous_init_addr_bits + .iter() + .zip_eq(public_values.last_init_addr_bits.iter()) + { + builder.assert_felt_eq(*prev_bit, *last_bit); + } + } + + // Assert that if MemoryFinalize is not present, then the address bits are the + // same. + if !contains_memory_finalize { + for (prev_bit, last_bit) in public_values + .previous_finalize_addr_bits + .iter() + .zip_eq(public_values.last_finalize_addr_bits.iter()) + { + builder.assert_felt_eq(*prev_bit, *last_bit); + } + } + + // Update the MemoryInitialize address bits. + for (bit, pub_bit) in + current_init_addr_bits.iter_mut().zip(public_values.last_init_addr_bits.iter()) + { + *bit = *pub_bit; + } + + // Update the MemoryFinalize address bits. + for (bit, pub_bit) in current_finalize_addr_bits + .iter_mut() + .zip(public_values.last_finalize_addr_bits.iter()) + { + *bit = *pub_bit; + } + } + + // Digest constraints. + { + // // If `commited_value_digest` is not zero, then the current value should be equal + // to `public_values.commited_value_digest`. + + // Set flags to indicate whether `commited_value_digest` is non-zero. The flags are + // given by the elements of the array, and they will be used as filters to constrain + // the equality. + let mut is_non_zero_flags = vec![]; + for word in committed_value_digest { + for byte in word { + is_non_zero_flags.push(byte); + } + } + + // Using the flags, we can constrain the equality. + for is_non_zero in is_non_zero_flags { + for (word_current, word_public) in + committed_value_digest.into_iter().zip(public_values.committed_value_digest) + { + for (byte_current, byte_public) in word_current.into_iter().zip(word_public) + { + builder.assert_felt_eq( + is_non_zero * (byte_current - byte_public), + C::F::zero(), + ); + } + } + } + + // If it's not a shard with "CPU", then the committed value digest shouldn't change. + if !contains_cpu { + for (word_d, pub_word_d) in committed_value_digest + .iter() + .zip(public_values.committed_value_digest.iter()) + { + for (d, pub_d) in word_d.0.iter().zip(pub_word_d.0.iter()) { + builder.assert_felt_eq(*d, *pub_d); + } + } + } + + // Update the committed value digest. + for (word_d, pub_word_d) in committed_value_digest + .iter_mut() + .zip(public_values.committed_value_digest.iter()) + { + for (d, pub_d) in word_d.0.iter_mut().zip(pub_word_d.0.iter()) { + *d = *pub_d; + } + } + + // Update the exit code. + exit_code = public_values.exit_code; + + // If `deferred_proofs_digest` is not zero, then the current value should be equal + // to `public_values.deferred_proofs_digest. + + // Set a flag to indicate whether `deferred_proofs_digest` is non-zero. The flags + // are given by the elements of the array, and they will be used as filters to + // constrain the equality. + let mut is_non_zero_flags = vec![]; + for element in deferred_proofs_digest { + is_non_zero_flags.push(element); + } + + // Using the flags, we can constrain the equality. + for is_non_zero in is_non_zero_flags { + for (deferred_current, deferred_public) in deferred_proofs_digest + .iter() + .zip(public_values.deferred_proofs_digest.iter()) + { + builder.assert_felt_eq( + is_non_zero * (*deferred_current - *deferred_public), + C::F::zero(), + ); + } + } + + // If it's not a shard with "CPU", then the deferred proofs digest should not + // change. + if !contains_cpu { + for (d, pub_d) in deferred_proofs_digest + .iter() + .zip(public_values.deferred_proofs_digest.iter()) + { + builder.assert_felt_eq(*d, *pub_d); + } + } + + // Update the deferred proofs digest. + deferred_proofs_digest.copy_from_slice(&public_values.deferred_proofs_digest); + } + + // Verify that the number of shards is not too large, i.e. that for every shard, we + // have shard < 2^{MAX_LOG_NUMBER_OF_SHARDS}. + C::range_check_felt(builder, public_values.shard, MAX_LOG_NUMBER_OF_SHARDS); + + // Update the reconstruct challenger. + reconstruct_challenger.observe(builder, shard_proof.commitment.global_main_commit); + for element in shard_proof.public_values.iter().take(machine.num_pv_elts()) { + reconstruct_challenger.observe(builder, *element); + } + + // Cumulative sum is updated by sums of all chips. + for values in shard_proof.opened_values.chips.iter() { + global_cumulative_sum = + builder.eval(global_cumulative_sum + values.global_cumulative_sum); + } + } + + // Assert that the last exit code is zero. + builder.assert_felt_eq(exit_code, C::F::zero()); + + // Write all values to the public values struct and commit to them. + { + // Compute the vk digest. + let vk_digest = vk.hash(builder); + + // Collect the public values for challengers. + let initial_challenger_public_values = + initial_reconstruct_challenger.public_values(builder); + let final_challenger_public_values = reconstruct_challenger.public_values(builder); + + // Collect the cumulative sum. + let global_cumulative_sum_array = builder.ext2felt_v2(global_cumulative_sum); + + // Collect the deferred proof digests. + let zero: Felt<_> = builder.eval(C::F::zero()); + let start_deferred_digest = [zero; POSEIDON_NUM_WORDS]; + let end_deferred_digest = [zero; POSEIDON_NUM_WORDS]; + + // Initialize the public values we will commit to. + let mut recursion_public_values_stream = [zero; RECURSIVE_PROOF_NUM_PV_ELTS]; + let recursion_public_values: &mut RecursionPublicValues<_> = + recursion_public_values_stream.as_mut_slice().borrow_mut(); + recursion_public_values.committed_value_digest = committed_value_digest; + recursion_public_values.deferred_proofs_digest = deferred_proofs_digest; + recursion_public_values.start_pc = start_pc; + recursion_public_values.next_pc = current_pc; + recursion_public_values.start_shard = initial_shard; + recursion_public_values.next_shard = current_shard; + recursion_public_values.start_execution_shard = initial_execution_shard; + recursion_public_values.next_execution_shard = current_execution_shard; + recursion_public_values.previous_init_addr_bits = initial_previous_init_addr_bits; + recursion_public_values.last_init_addr_bits = current_init_addr_bits; + recursion_public_values.previous_finalize_addr_bits = + initial_previous_finalize_addr_bits; + recursion_public_values.last_finalize_addr_bits = current_finalize_addr_bits; + recursion_public_values.sp1_vk_digest = vk_digest; + recursion_public_values.leaf_challenger = leaf_challenger_public_values; + recursion_public_values.start_reconstruct_challenger = initial_challenger_public_values; + recursion_public_values.end_reconstruct_challenger = final_challenger_public_values; + recursion_public_values.cumulative_sum = global_cumulative_sum_array; + recursion_public_values.start_reconstruct_deferred_digest = start_deferred_digest; + recursion_public_values.end_reconstruct_deferred_digest = end_deferred_digest; + recursion_public_values.exit_code = exit_code; + recursion_public_values.is_complete = is_complete; + // Set the contains an execution shard flag. + recursion_public_values.contains_execution_shard = + builder.eval(C::F::from_bool(cpu_shard_seen)); + recursion_public_values.vk_root = vk_root; + + // Calculate the digest and set it in the public values. + recursion_public_values.digest = + recursion_public_values_digest::(builder, recursion_public_values); + + SC::commit_recursion_public_values(builder, *recursion_public_values); + } + } +} + +impl SP1RecursionWitnessValues { + pub fn shape(&self) -> SP1RecursionShape { + let proof_shapes = self.shard_proofs.iter().map(|proof| proof.shape()).collect(); + + SP1RecursionShape { proof_shapes, is_complete: self.is_complete } + } +} + +impl SP1RecursionWitnessValues { + pub fn dummy( + machine: &StarkMachine>, + shape: &SP1RecursionShape, + ) -> Self { + let (mut vks, shard_proofs): (Vec<_>, Vec<_>) = + shape.proof_shapes.iter().map(|shape| dummy_vk_and_shard_proof(machine, shape)).unzip(); + let vk = vks.pop().unwrap(); + Self { + vk, + shard_proofs, + leaf_challenger: dummy_challenger(machine.config()), + initial_reconstruct_challenger: dummy_challenger(machine.config()), + is_complete: shape.is_complete, + is_first_shard: false, + vk_root: [BabyBear::zero(); DIGEST_SIZE], + } + } +} + +impl From for SP1RecursionShape { + fn from(proof_shape: ProofShape) -> Self { + Self { proof_shapes: vec![proof_shape], is_complete: false } + } +} diff --git a/crates/recursion/circuit/src/machine/deferred.rs b/crates/recursion/circuit/src/machine/deferred.rs new file mode 100644 index 0000000000..793b79adbc --- /dev/null +++ b/crates/recursion/circuit/src/machine/deferred.rs @@ -0,0 +1,289 @@ +use std::{ + array, + borrow::{Borrow, BorrowMut}, +}; + +use serde::{de::DeserializeOwned, Deserialize, Serialize}; + +use p3_air::Air; +use p3_baby_bear::BabyBear; +use p3_commit::Mmcs; +use p3_field::AbstractField; +use p3_matrix::dense::RowMajorMatrix; + +use sp1_primitives::consts::WORD_SIZE; +use sp1_recursion_compiler::ir::{Builder, Ext, Felt}; + +use sp1_stark::{ + air::{MachineAir, POSEIDON_NUM_WORDS}, + baby_bear_poseidon2::BabyBearPoseidon2, + Dom, ShardProof, StarkMachine, StarkVerifyingKey, Word, +}; + +use sp1_recursion_core::{ + air::{RecursionPublicValues, PV_DIGEST_NUM_WORDS, RECURSIVE_PROOF_NUM_PV_ELTS}, + DIGEST_SIZE, +}; + +use crate::{ + challenger::{CanObserveVariable, DuplexChallengerVariable}, + constraints::RecursiveVerifierConstraintFolder, + hash::{FieldHasher, FieldHasherVariable}, + machine::assert_recursion_public_values_valid, + stark::{dummy_challenger, ShardProofVariable, StarkVerifier}, + BabyBearFriConfig, BabyBearFriConfigVariable, CircuitConfig, VerifyingKeyVariable, +}; + +use super::{ + recursion_public_values_digest, SP1CompressShape, SP1CompressWitnessValues, + SP1MerkleProofVerifier, SP1MerkleProofWitnessValues, SP1MerkleProofWitnessVariable, +}; + +pub struct SP1DeferredVerifier { + _phantom: std::marker::PhantomData<(C, SC, A)>, +} + +#[derive(Debug, Clone)] +pub struct SP1DeferredShape { + inner: SP1CompressShape, + height: usize, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(bound( + serialize = "SC::Challenger: Serialize, ShardProof: Serialize, Dom: Serialize, [SC::Val; DIGEST_SIZE]: Serialize, SC::Digest: Serialize" +))] +#[serde(bound( + deserialize = "SC::Challenger: Deserialize<'de>, ShardProof: Deserialize<'de>, Dom: DeserializeOwned, [SC::Val; DIGEST_SIZE]: Deserialize<'de>, SC::Digest: Deserialize<'de>" +))] +pub struct SP1DeferredWitnessValues> { + pub vks_and_proofs: Vec<(StarkVerifyingKey, ShardProof)>, + pub vk_merkle_data: SP1MerkleProofWitnessValues, + pub start_reconstruct_deferred_digest: [SC::Val; POSEIDON_NUM_WORDS], + pub sp1_vk_digest: [SC::Val; DIGEST_SIZE], + pub leaf_challenger: SC::Challenger, + pub committed_value_digest: [Word; PV_DIGEST_NUM_WORDS], + pub deferred_proofs_digest: [SC::Val; POSEIDON_NUM_WORDS], + pub end_pc: SC::Val, + pub end_shard: SC::Val, + pub end_execution_shard: SC::Val, + pub init_addr_bits: [SC::Val; 32], + pub finalize_addr_bits: [SC::Val; 32], + pub is_complete: bool, +} + +pub struct SP1DeferredWitnessVariable< + C: CircuitConfig, + SC: FieldHasherVariable + BabyBearFriConfigVariable, +> { + pub vks_and_proofs: Vec<(VerifyingKeyVariable, ShardProofVariable)>, + pub vk_merkle_data: SP1MerkleProofWitnessVariable, + pub start_reconstruct_deferred_digest: [Felt; POSEIDON_NUM_WORDS], + pub sp1_vk_digest: [Felt; DIGEST_SIZE], + pub leaf_challenger: SC::FriChallengerVariable, + pub committed_value_digest: [Word>; PV_DIGEST_NUM_WORDS], + pub deferred_proofs_digest: [Felt; POSEIDON_NUM_WORDS], + pub end_pc: Felt, + pub end_shard: Felt, + pub end_execution_shard: Felt, + pub init_addr_bits: [Felt; 32], + pub finalize_addr_bits: [Felt; 32], + pub is_complete: Felt, +} + +impl SP1DeferredVerifier +where + SC: BabyBearFriConfigVariable< + C, + FriChallengerVariable = DuplexChallengerVariable, + DigestVariable = [Felt; DIGEST_SIZE], + >, + C: CircuitConfig>, + >::ProverData>: Clone, + A: MachineAir + for<'a> Air>, +{ + /// Verify a batch of deferred proofs. + /// + /// Each deferred proof is a recursive proof representing some computation. Namely, every such + /// proof represents a recursively verified program. + /// verifier: + /// - Asserts that each of these proofs is valid as a `compress` proof. + /// - Asserts that each of these proofs is complete by checking the `is_complete` flag in the + /// proof's public values. + /// - Aggregates the proof information into the accumulated deferred digest. + pub fn verify( + builder: &mut Builder, + machine: &StarkMachine, + input: SP1DeferredWitnessVariable, + value_assertions: bool, + ) { + let SP1DeferredWitnessVariable { + vks_and_proofs, + vk_merkle_data, + start_reconstruct_deferred_digest, + sp1_vk_digest, + leaf_challenger, + committed_value_digest, + deferred_proofs_digest, + end_pc, + end_shard, + end_execution_shard, + init_addr_bits, + finalize_addr_bits, + is_complete, + } = input; + + // First, verify the merkle tree proofs. + let vk_root = vk_merkle_data.root; + let values = vks_and_proofs.iter().map(|(vk, _)| vk.hash(builder)).collect::>(); + SP1MerkleProofVerifier::verify(builder, values, vk_merkle_data, value_assertions); + + let mut deferred_public_values_stream: Vec> = + (0..RECURSIVE_PROOF_NUM_PV_ELTS).map(|_| builder.uninit()).collect(); + let deferred_public_values: &mut RecursionPublicValues<_> = + deferred_public_values_stream.as_mut_slice().borrow_mut(); + + // Initialize the start of deferred digests. + deferred_public_values.start_reconstruct_deferred_digest = + start_reconstruct_deferred_digest; + + // Initialize the consistency check variable. + let mut reconstruct_deferred_digest: [Felt; POSEIDON_NUM_WORDS] = + start_reconstruct_deferred_digest; + + for (vk, shard_proof) in vks_and_proofs { + // Initialize a challenger. + let mut challenger = machine.config().challenger_variable(builder); + // Observe the vk and start pc. + challenger.observe(builder, vk.commitment); + challenger.observe(builder, vk.pc_start); + let zero: Felt<_> = builder.eval(C::F::zero()); + for _ in 0..7 { + challenger.observe(builder, zero); + } + + // Observe the and public values. + challenger.observe_slice( + builder, + shard_proof.public_values[0..machine.num_pv_elts()].iter().copied(), + ); + + let zero_ext: Ext = builder.eval(C::F::zero()); + StarkVerifier::verify_shard( + builder, + &vk, + machine, + &mut challenger, + &shard_proof, + &[zero_ext, zero_ext], + ); + + // Get the current public values. + let current_public_values: &RecursionPublicValues> = + shard_proof.public_values.as_slice().borrow(); + // Assert that the public values are valid. + assert_recursion_public_values_valid::(builder, current_public_values); + + // Assert that the proof is complete. + builder.assert_felt_eq(current_public_values.is_complete, C::F::one()); + + // Update deferred proof digest + // poseidon2( current_digest[..8] || pv.sp1_vk_digest[..8] || + // pv.committed_value_digest[..32] ) + let mut inputs: [Felt; 48] = array::from_fn(|_| builder.uninit()); + inputs[0..DIGEST_SIZE].copy_from_slice(&reconstruct_deferred_digest); + + inputs[DIGEST_SIZE..DIGEST_SIZE + DIGEST_SIZE] + .copy_from_slice(¤t_public_values.sp1_vk_digest); + + for j in 0..PV_DIGEST_NUM_WORDS { + for k in 0..WORD_SIZE { + let element = current_public_values.committed_value_digest[j][k]; + inputs[j * WORD_SIZE + k + 16] = element; + } + } + reconstruct_deferred_digest = SC::hash(builder, &inputs); + } + + // Set the public values. + + // Set initial_pc, end_pc, initial_shard, and end_shard to be the hitned values. + deferred_public_values.start_pc = end_pc; + deferred_public_values.next_pc = end_pc; + deferred_public_values.start_shard = end_shard; + deferred_public_values.next_shard = end_shard; + deferred_public_values.start_execution_shard = end_execution_shard; + deferred_public_values.next_execution_shard = end_execution_shard; + // Set the init and finalize address bits to be the hinted values. + deferred_public_values.previous_init_addr_bits = init_addr_bits; + deferred_public_values.last_init_addr_bits = init_addr_bits; + deferred_public_values.previous_finalize_addr_bits = finalize_addr_bits; + deferred_public_values.last_finalize_addr_bits = finalize_addr_bits; + + // Set the sp1_vk_digest to be the hitned value. + deferred_public_values.sp1_vk_digest = sp1_vk_digest; + + // Set the committed value digest to be the hitned value. + deferred_public_values.committed_value_digest = committed_value_digest; + // Set the deferred proof digest to be the hitned value. + deferred_public_values.deferred_proofs_digest = deferred_proofs_digest; + + // Set the initial, end, and leaf challenger to be the hitned values. + let values = leaf_challenger.public_values(builder); + deferred_public_values.leaf_challenger = values; + deferred_public_values.start_reconstruct_challenger = values; + deferred_public_values.end_reconstruct_challenger = values; + // Set the exit code to be zero for now. + deferred_public_values.exit_code = builder.eval(C::F::zero()); + // Assign the deffered proof digests. + deferred_public_values.end_reconstruct_deferred_digest = reconstruct_deferred_digest; + // Set the is_complete flag. + deferred_public_values.is_complete = is_complete; + // Set the `contains_execution_shard` flag. + deferred_public_values.contains_execution_shard = builder.eval(C::F::zero()); + // Set the cumulative sum to zero. + deferred_public_values.cumulative_sum = array::from_fn(|_| builder.eval(C::F::zero())); + // Set the vk root from the witness. + deferred_public_values.vk_root = vk_root; + // Set the digest according to the previous values. + deferred_public_values.digest = + recursion_public_values_digest::(builder, deferred_public_values); + + SC::commit_recursion_public_values(builder, *deferred_public_values); + } +} + +impl SP1DeferredWitnessValues { + pub fn dummy>( + machine: &StarkMachine, + shape: &SP1DeferredShape, + ) -> Self { + let inner_witness = + SP1CompressWitnessValues::::dummy(machine, &shape.inner); + let vks_and_proofs = inner_witness.vks_and_proofs; + + let vk_merkle_data = SP1MerkleProofWitnessValues::dummy(vks_and_proofs.len(), shape.height); + + Self { + vks_and_proofs, + vk_merkle_data, + leaf_challenger: dummy_challenger(machine.config()), + is_complete: true, + sp1_vk_digest: [BabyBear::zero(); DIGEST_SIZE], + start_reconstruct_deferred_digest: [BabyBear::zero(); POSEIDON_NUM_WORDS], + committed_value_digest: [Word::default(); PV_DIGEST_NUM_WORDS], + deferred_proofs_digest: [BabyBear::zero(); POSEIDON_NUM_WORDS], + end_pc: BabyBear::zero(), + end_shard: BabyBear::zero(), + end_execution_shard: BabyBear::zero(), + init_addr_bits: [BabyBear::zero(); 32], + finalize_addr_bits: [BabyBear::zero(); 32], + } + } +} + +impl SP1DeferredShape { + pub const fn new(inner: SP1CompressShape, height: usize) -> Self { + Self { inner, height } + } +} diff --git a/crates/recursion/circuit/src/machine/mod.rs b/crates/recursion/circuit/src/machine/mod.rs new file mode 100644 index 0000000000..6b8b73b4ce --- /dev/null +++ b/crates/recursion/circuit/src/machine/mod.rs @@ -0,0 +1,21 @@ +mod complete; +mod compress; +mod core; +mod deferred; +mod public_values; +mod root; +mod vkey_proof; +mod witness; +mod wrap; + +pub(crate) use complete::*; +pub use compress::*; +pub use core::*; +pub use deferred::*; +pub use public_values::*; +pub use root::*; +pub use vkey_proof::*; +pub use wrap::*; + +#[allow(unused_imports)] +pub use witness::*; diff --git a/crates/recursion/circuit/src/machine/public_values.rs b/crates/recursion/circuit/src/machine/public_values.rs new file mode 100644 index 0000000000..eeed3d1b3d --- /dev/null +++ b/crates/recursion/circuit/src/machine/public_values.rs @@ -0,0 +1,95 @@ +use itertools::Itertools; +use sp1_derive::AlignedBorrow; +use sp1_recursion_compiler::ir::{Builder, Felt}; +use sp1_recursion_core::{ + air::{RecursionPublicValues, NUM_PV_ELMS_TO_HASH}, + DIGEST_SIZE, +}; +use sp1_stark::{air::PV_DIGEST_NUM_WORDS, Word}; + +use crate::{hash::Posedion2BabyBearHasherVariable, CircuitConfig}; + +#[derive(Debug, Clone, Copy, Default, AlignedBorrow)] +#[repr(C)] +pub struct RootPublicValues { + pub(crate) inner: RecursionPublicValues, +} + +/// Verifies the digest of a recursive public values struct. +pub(crate) fn assert_recursion_public_values_valid( + builder: &mut Builder, + public_values: &RecursionPublicValues>, +) where + C: CircuitConfig, + H: Posedion2BabyBearHasherVariable, +{ + let digest = recursion_public_values_digest::(builder, public_values); + for (value, expected) in public_values.digest.iter().copied().zip_eq(digest) { + builder.assert_felt_eq(value, expected); + } +} + +/// Verifies the digest of a recursive public values struct. +pub(crate) fn recursion_public_values_digest( + builder: &mut Builder, + public_values: &RecursionPublicValues>, +) -> [Felt; DIGEST_SIZE] +where + C: CircuitConfig, + H: Posedion2BabyBearHasherVariable, +{ + let pv_slice = public_values.as_array(); + H::poseidon2_hash(builder, &pv_slice[..NUM_PV_ELMS_TO_HASH]) +} + +/// Assert that the digest of the root public values is correct. +pub(crate) fn assert_root_public_values_valid( + builder: &mut Builder, + public_values: &RootPublicValues>, +) where + C: CircuitConfig, + H: Posedion2BabyBearHasherVariable, +{ + let expected_digest = root_public_values_digest::(builder, &public_values.inner); + for (value, expected) in public_values.inner.digest.iter().copied().zip_eq(expected_digest) { + builder.assert_felt_eq(value, expected); + } +} + +/// Compute the digest of the root public values. +pub(crate) fn root_public_values_digest( + builder: &mut Builder, + public_values: &RecursionPublicValues>, +) -> [Felt; DIGEST_SIZE] +where + C: CircuitConfig, + H: Posedion2BabyBearHasherVariable, +{ + let input = public_values + .sp1_vk_digest + .into_iter() + .chain(public_values.committed_value_digest.into_iter().flat_map(|word| word.0.into_iter())) + .collect::>(); + H::poseidon2_hash(builder, &input) +} + +impl RootPublicValues { + pub const fn new(inner: RecursionPublicValues) -> Self { + Self { inner } + } + + #[inline] + pub const fn sp1_vk_digest(&self) -> &[T; DIGEST_SIZE] { + &self.inner.sp1_vk_digest + } + + #[inline] + pub const fn committed_value_digest(&self) -> &[Word; PV_DIGEST_NUM_WORDS] { + &self.inner.committed_value_digest + } + + #[inline] + pub const fn digest(&self) -> &[T; DIGEST_SIZE] { + &self.inner.digest + } +} diff --git a/crates/recursion/circuit/src/machine/root.rs b/crates/recursion/circuit/src/machine/root.rs new file mode 100644 index 0000000000..a2137a7fa3 --- /dev/null +++ b/crates/recursion/circuit/src/machine/root.rs @@ -0,0 +1,88 @@ +use std::marker::PhantomData; + +use p3_air::Air; +use p3_baby_bear::BabyBear; +use p3_commit::Mmcs; +use p3_field::AbstractField; +use p3_matrix::dense::RowMajorMatrix; + +use super::{ + PublicValuesOutputDigest, SP1CompressVerifier, SP1CompressWithVKeyVerifier, + SP1CompressWithVKeyWitnessVariable, SP1CompressWitnessVariable, +}; +use crate::{ + challenger::DuplexChallengerVariable, constraints::RecursiveVerifierConstraintFolder, + BabyBearFriConfigVariable, CircuitConfig, +}; +use sp1_recursion_compiler::ir::{Builder, Felt}; +use sp1_recursion_core::DIGEST_SIZE; +use sp1_stark::{air::MachineAir, StarkMachine}; + +/// A program to verify a single recursive proof representing a complete proof of program execution. +/// +/// The root verifier is simply a `SP1CompressVerifier` with an assertion that the `is_complete` +/// flag is set to true. +#[derive(Debug, Clone, Copy)] +pub struct SP1CompressRootVerifier { + _phantom: PhantomData<(C, SC, A)>, +} + +/// A program to verify a single recursive proof representing a complete proof of program execution. +/// +/// The root verifier is simply a `SP1CompressVerifier` with an assertion that the `is_complete` +/// flag is set to true. +#[derive(Debug, Clone, Copy)] +pub struct SP1CompressRootVerifierWithVKey { + _phantom: PhantomData<(C, SC, A)>, +} + +impl SP1CompressRootVerifier +where + SC: BabyBearFriConfigVariable, + C: CircuitConfig, + >::ProverData>: Clone, + A: MachineAir + for<'a> Air>, +{ + pub fn verify( + builder: &mut Builder, + machine: &StarkMachine, + input: SP1CompressWitnessVariable, + vk_root: [Felt; DIGEST_SIZE], + ) { + // Assert that the program is complete. + builder.assert_felt_eq(input.is_complete, C::F::one()); + // Verify the proof, as a compress proof. + SP1CompressVerifier::verify( + builder, + machine, + input, + vk_root, + PublicValuesOutputDigest::Root, + ); + } +} + +impl SP1CompressRootVerifierWithVKey +where + SC: BabyBearFriConfigVariable< + C, + FriChallengerVariable = DuplexChallengerVariable, + DigestVariable = [Felt; DIGEST_SIZE], + >, + C: CircuitConfig>, + >::ProverData>: Clone, + A: MachineAir + for<'a> Air>, +{ + pub fn verify( + builder: &mut Builder, + machine: &StarkMachine, + input: SP1CompressWithVKeyWitnessVariable, + value_assertions: bool, + kind: PublicValuesOutputDigest, + ) { + // Assert that the program is complete. + builder.assert_felt_eq(input.compress_var.is_complete, C::F::one()); + // Verify the proof, as a compress proof. + SP1CompressWithVKeyVerifier::verify(builder, machine, input, value_assertions, kind); + } +} diff --git a/crates/recursion/circuit/src/machine/vkey_proof.rs b/crates/recursion/circuit/src/machine/vkey_proof.rs new file mode 100644 index 0000000000..eeefe314b4 --- /dev/null +++ b/crates/recursion/circuit/src/machine/vkey_proof.rs @@ -0,0 +1,201 @@ +use std::marker::PhantomData; + +use p3_air::Air; +use p3_baby_bear::BabyBear; +use p3_commit::Mmcs; +use p3_field::AbstractField; +use p3_matrix::dense::RowMajorMatrix; +use serde::{Deserialize, Serialize}; +use sp1_recursion_compiler::ir::{Builder, Felt}; +use sp1_recursion_core::DIGEST_SIZE; +use sp1_stark::{ + air::MachineAir, baby_bear_poseidon2::BabyBearPoseidon2, Com, InnerChallenge, OpeningProof, + StarkGenericConfig, StarkMachine, +}; + +use crate::{ + challenger::DuplexChallengerVariable, + constraints::RecursiveVerifierConstraintFolder, + hash::{FieldHasher, FieldHasherVariable}, + merkle_tree::{verify, MerkleProof}, + stark::MerkleProofVariable, + witness::{WitnessWriter, Witnessable}, + BabyBearFriConfig, BabyBearFriConfigVariable, CircuitConfig, TwoAdicPcsProofVariable, +}; + +use super::{ + PublicValuesOutputDigest, SP1CompressShape, SP1CompressVerifier, SP1CompressWitnessValues, + SP1CompressWitnessVariable, +}; + +/// A program to verify a batch of recursive proofs and aggregate their public values. +#[derive(Debug, Clone, Copy)] +pub struct SP1MerkleProofVerifier { + _phantom: PhantomData<(C, SC)>, +} + +/// The shape of the compress proof with vk validation proofs. +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct SP1CompressWithVkeyShape { + pub compress_shape: SP1CompressShape, + pub merkle_tree_height: usize, +} + +/// Witness layout for the compress stage verifier. +pub struct SP1MerkleProofWitnessVariable< + C: CircuitConfig, + SC: FieldHasherVariable + BabyBearFriConfigVariable, +> { + /// The shard proofs to verify. + pub vk_merkle_proofs: Vec>, + /// Hinted values to enable dummy digests. + pub values: Vec, + /// The root of the merkle tree. + pub root: SC::DigestVariable, +} + +/// An input layout for the reduce verifier. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(bound(serialize = "SC::Digest: Serialize"))] +#[serde(bound(deserialize = "SC::Digest: Deserialize<'de>"))] +pub struct SP1MerkleProofWitnessValues> { + pub vk_merkle_proofs: Vec>, + pub values: Vec, + pub root: SC::Digest, +} + +impl SP1MerkleProofVerifier +where + SC: BabyBearFriConfigVariable, + C: CircuitConfig, +{ + /// Verify (via Merkle tree) that the vkey digests of a proof belong to a specified set (encoded + /// the Merkle tree proofs in input). + pub fn verify( + builder: &mut Builder, + digests: Vec, + input: SP1MerkleProofWitnessVariable, + value_assertions: bool, + ) { + let SP1MerkleProofWitnessVariable { vk_merkle_proofs, values, root } = input; + for ((proof, value), expected_value) in + vk_merkle_proofs.into_iter().zip(values).zip(digests) + { + verify(builder, proof, value, root); + if value_assertions { + SC::assert_digest_eq(builder, expected_value, value); + } else { + SC::assert_digest_eq(builder, value, value); + } + } + } +} + +#[derive(Debug, Clone, Copy)] +pub struct SP1CompressWithVKeyVerifier { + _phantom: PhantomData<(C, SC, A)>, +} + +/// Witness layout for the verifier of the proof shape phase of the compress stage. +pub struct SP1CompressWithVKeyWitnessVariable< + C: CircuitConfig, + SC: BabyBearFriConfigVariable, +> { + pub compress_var: SP1CompressWitnessVariable, + pub merkle_var: SP1MerkleProofWitnessVariable, +} + +/// An input layout for the verifier of the proof shape phase of the compress stage. +pub struct SP1CompressWithVKeyWitnessValues> { + pub compress_val: SP1CompressWitnessValues, + pub merkle_val: SP1MerkleProofWitnessValues, +} + +impl SP1CompressWithVKeyVerifier +where + SC: BabyBearFriConfigVariable< + C, + FriChallengerVariable = DuplexChallengerVariable, + DigestVariable = [Felt; DIGEST_SIZE], + >, + C: CircuitConfig>, + >::ProverData>: Clone, + A: MachineAir + for<'a> Air>, +{ + /// Verify the proof shape phase of the compress stage. + pub fn verify( + builder: &mut Builder, + machine: &StarkMachine, + input: SP1CompressWithVKeyWitnessVariable, + value_assertions: bool, + kind: PublicValuesOutputDigest, + ) { + let values = input + .compress_var + .vks_and_proofs + .iter() + .map(|(vk, _)| vk.hash(builder)) + .collect::>(); + let vk_root = input.merkle_var.root.map(|x| builder.eval(x)); + SP1MerkleProofVerifier::verify(builder, values, input.merkle_var, value_assertions); + SP1CompressVerifier::verify(builder, machine, input.compress_var, vk_root, kind); + } +} + +impl> SP1CompressWithVKeyWitnessValues { + pub fn shape(&self) -> SP1CompressWithVkeyShape { + let merkle_tree_height = self.merkle_val.vk_merkle_proofs.first().unwrap().path.len(); + SP1CompressWithVkeyShape { compress_shape: self.compress_val.shape(), merkle_tree_height } + } +} + +impl SP1MerkleProofWitnessValues { + pub fn dummy(num_proofs: usize, height: usize) -> Self { + let dummy_digest = [BabyBear::zero(); DIGEST_SIZE]; + let vk_merkle_proofs = + vec![MerkleProof { index: 0, path: vec![dummy_digest; height] }; num_proofs]; + let values = vec![dummy_digest; num_proofs]; + + Self { vk_merkle_proofs, values, root: dummy_digest } + } +} + +impl SP1CompressWithVKeyWitnessValues { + pub fn dummy>( + machine: &StarkMachine, + shape: &SP1CompressWithVkeyShape, + ) -> Self { + let compress_val = + SP1CompressWitnessValues::::dummy(machine, &shape.compress_shape); + let num_proofs = compress_val.vks_and_proofs.len(); + let merkle_val = SP1MerkleProofWitnessValues::::dummy( + num_proofs, + shape.merkle_tree_height, + ); + Self { compress_val, merkle_val } + } +} + +impl, SC: BabyBearFriConfigVariable> + Witnessable for SP1CompressWithVKeyWitnessValues +where + Com: Witnessable>::DigestVariable>, + // This trait bound is redundant, but Rust-Analyzer is not able to infer it. + SC: FieldHasher, + >::Digest: Witnessable, + OpeningProof: Witnessable>, +{ + type WitnessVariable = SP1CompressWithVKeyWitnessVariable; + + fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { + SP1CompressWithVKeyWitnessVariable { + compress_var: self.compress_val.read(builder), + merkle_var: self.merkle_val.read(builder), + } + } + + fn write(&self, witness: &mut impl WitnessWriter) { + self.compress_val.write(witness); + self.merkle_val.write(witness); + } +} diff --git a/crates/recursion/circuit/src/machine/witness.rs b/crates/recursion/circuit/src/machine/witness.rs new file mode 100644 index 0000000000..11b79831e1 --- /dev/null +++ b/crates/recursion/circuit/src/machine/witness.rs @@ -0,0 +1,267 @@ +use std::borrow::Borrow; + +use p3_baby_bear::BabyBear; +use p3_challenger::DuplexChallenger; +use p3_symmetric::Hash; + +use p3_field::AbstractField; +use sp1_recursion_compiler::ir::Builder; +use sp1_stark::{ + baby_bear_poseidon2::BabyBearPoseidon2, Com, InnerChallenge, InnerPerm, InnerVal, OpeningProof, + StarkVerifyingKey, Word, +}; + +use sp1_recursion_compiler::ir::Felt; + +use crate::{ + challenger::DuplexChallengerVariable, + hash::{FieldHasher, FieldHasherVariable}, + merkle_tree::MerkleProof, + stark::MerkleProofVariable, + witness::{WitnessWriter, Witnessable}, + BabyBearFriConfigVariable, CircuitConfig, TwoAdicPcsProofVariable, VerifyingKeyVariable, +}; + +use super::{ + SP1CompressWitnessValues, SP1CompressWitnessVariable, SP1DeferredWitnessValues, + SP1DeferredWitnessVariable, SP1MerkleProofWitnessValues, SP1MerkleProofWitnessVariable, + SP1RecursionWitnessValues, SP1RecursionWitnessVariable, +}; + +impl> Witnessable for Word { + type WitnessVariable = Word; + + fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { + Word(self.0.read(builder)) + } + + fn write(&self, witness: &mut impl WitnessWriter) { + self.0.write(witness); + } +} + +impl Witnessable for DuplexChallenger +where + C: CircuitConfig, +{ + type WitnessVariable = DuplexChallengerVariable; + + fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { + let sponge_state = self.sponge_state.read(builder); + let input_buffer = self.input_buffer.read(builder); + let output_buffer = self.output_buffer.read(builder); + DuplexChallengerVariable { sponge_state, input_buffer, output_buffer } + } + + fn write(&self, witness: &mut impl WitnessWriter) { + self.sponge_state.write(witness); + self.input_buffer.write(witness); + self.output_buffer.write(witness); + } +} + +impl Witnessable for Hash +where + C: CircuitConfig, + W: Witnessable, +{ + type WitnessVariable = [W::WitnessVariable; DIGEST_ELEMENTS]; + + fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { + let array: &[W; DIGEST_ELEMENTS] = self.borrow(); + array.read(builder) + } + + fn write(&self, witness: &mut impl WitnessWriter) { + let array: &[W; DIGEST_ELEMENTS] = self.borrow(); + array.write(witness); + } +} + +impl, SC: BabyBearFriConfigVariable> + Witnessable for StarkVerifyingKey +where + Com: Witnessable>::DigestVariable>, + OpeningProof: Witnessable>, +{ + type WitnessVariable = VerifyingKeyVariable; + + fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { + let commitment = self.commit.read(builder); + let pc_start = self.pc_start.read(builder); + let chip_information = self.chip_information.clone(); + let chip_ordering = self.chip_ordering.clone(); + VerifyingKeyVariable { commitment, pc_start, chip_information, chip_ordering } + } + + fn write(&self, witness: &mut impl WitnessWriter) { + self.commit.write(witness); + self.pc_start.write(witness); + } +} + +impl Witnessable for SP1RecursionWitnessValues +where + C: CircuitConfig>, +{ + type WitnessVariable = SP1RecursionWitnessVariable; + + fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { + let vk = self.vk.read(builder); + let shard_proofs = self.shard_proofs.read(builder); + let leaf_challenger = self.leaf_challenger.read(builder); + let initial_reconstruct_challenger = self.initial_reconstruct_challenger.read(builder); + let is_complete = InnerVal::from_bool(self.is_complete).read(builder); + let is_first_shard = InnerVal::from_bool(self.is_first_shard).read(builder); + let vk_root = self.vk_root.read(builder); + SP1RecursionWitnessVariable { + vk, + shard_proofs, + leaf_challenger, + initial_reconstruct_challenger, + is_complete, + is_first_shard, + vk_root, + } + } + + fn write(&self, witness: &mut impl WitnessWriter) { + self.vk.write(witness); + self.shard_proofs.write(witness); + self.leaf_challenger.write(witness); + self.initial_reconstruct_challenger.write(witness); + self.is_complete.write(witness); + self.is_first_shard.write(witness); + self.vk_root.write(witness); + } +} + +impl, SC: BabyBearFriConfigVariable> + Witnessable for SP1CompressWitnessValues +where + Com: Witnessable>::DigestVariable>, + OpeningProof: Witnessable>, +{ + type WitnessVariable = SP1CompressWitnessVariable; + + fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { + let vks_and_proofs = self.vks_and_proofs.read(builder); + let is_complete = InnerVal::from_bool(self.is_complete).read(builder); + + SP1CompressWitnessVariable { vks_and_proofs, is_complete } + } + + fn write(&self, witness: &mut impl WitnessWriter) { + self.vks_and_proofs.write(witness); + InnerVal::from_bool(self.is_complete).write(witness); + } +} + +impl Witnessable for SP1DeferredWitnessValues +where + C: CircuitConfig>, +{ + type WitnessVariable = SP1DeferredWitnessVariable; + + fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { + let vks_and_proofs = self.vks_and_proofs.read(builder); + let vk_merkle_data = self.vk_merkle_data.read(builder); + let start_reconstruct_deferred_digest = + self.start_reconstruct_deferred_digest.read(builder); + let sp1_vk_digest = self.sp1_vk_digest.read(builder); + let leaf_challenger = self.leaf_challenger.read(builder); + let committed_value_digest = self.committed_value_digest.read(builder); + let deferred_proofs_digest = self.deferred_proofs_digest.read(builder); + let end_pc = self.end_pc.read(builder); + let end_shard = self.end_shard.read(builder); + let end_execution_shard = self.end_execution_shard.read(builder); + let init_addr_bits = self.init_addr_bits.read(builder); + let finalize_addr_bits = self.finalize_addr_bits.read(builder); + let is_complete = InnerVal::from_bool(self.is_complete).read(builder); + + SP1DeferredWitnessVariable { + vks_and_proofs, + vk_merkle_data, + start_reconstruct_deferred_digest, + sp1_vk_digest, + leaf_challenger, + committed_value_digest, + deferred_proofs_digest, + end_pc, + end_shard, + end_execution_shard, + init_addr_bits, + finalize_addr_bits, + is_complete, + } + } + + fn write(&self, witness: &mut impl WitnessWriter) { + self.vks_and_proofs.write(witness); + self.vk_merkle_data.write(witness); + self.start_reconstruct_deferred_digest.write(witness); + self.sp1_vk_digest.write(witness); + self.leaf_challenger.write(witness); + self.committed_value_digest.write(witness); + self.deferred_proofs_digest.write(witness); + self.end_pc.write(witness); + self.end_shard.write(witness); + self.end_execution_shard.write(witness); + self.init_addr_bits.write(witness); + self.finalize_addr_bits.write(witness); + self.is_complete.write(witness); + } +} + +impl> Witnessable for MerkleProof +where + HV::Digest: Witnessable, +{ + type WitnessVariable = MerkleProofVariable; + + fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { + let mut bits = vec![]; + let mut index = self.index; + for _ in 0..self.path.len() { + bits.push(index % 2 == 1); + index >>= 1; + } + let index_bits = bits.read(builder); + let path = self.path.read(builder); + + MerkleProofVariable { index: index_bits, path } + } + + fn write(&self, witness: &mut impl WitnessWriter) { + let mut index = self.index; + for _ in 0..self.path.len() { + (index % 2 == 1).write(witness); + index >>= 1; + } + self.path.write(witness); + } +} + +impl, SC: BabyBearFriConfigVariable> Witnessable + for SP1MerkleProofWitnessValues +where + // This trait bound is redundant, but Rust-Analyzer is not able to infer it. + SC: FieldHasher, + >::Digest: Witnessable, +{ + type WitnessVariable = SP1MerkleProofWitnessVariable; + + fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { + SP1MerkleProofWitnessVariable { + vk_merkle_proofs: self.vk_merkle_proofs.read(builder), + values: self.values.read(builder), + root: self.root.read(builder), + } + } + + fn write(&self, witness: &mut impl WitnessWriter) { + self.vk_merkle_proofs.write(witness); + self.values.write(witness); + self.root.write(witness); + } +} diff --git a/crates/recursion/circuit/src/machine/wrap.rs b/crates/recursion/circuit/src/machine/wrap.rs new file mode 100644 index 0000000000..1a3eaf8da2 --- /dev/null +++ b/crates/recursion/circuit/src/machine/wrap.rs @@ -0,0 +1,91 @@ +use std::{borrow::Borrow, marker::PhantomData}; + +use p3_air::Air; +use p3_baby_bear::BabyBear; +use p3_commit::Mmcs; +use p3_field::AbstractField; +use p3_matrix::dense::RowMajorMatrix; +use sp1_recursion_compiler::ir::{Builder, Ext, Felt}; +use sp1_stark::{air::MachineAir, StarkMachine}; + +use crate::{ + challenger::CanObserveVariable, + constraints::RecursiveVerifierConstraintFolder, + machine::{assert_root_public_values_valid, RootPublicValues}, + stark::StarkVerifier, + BabyBearFriConfigVariable, CircuitConfig, +}; + +use super::SP1CompressWitnessVariable; + +/// A program that recursively verifies a proof made by [super::SP1RootVerifier]. +#[derive(Debug, Clone, Copy)] +pub struct SP1WrapVerifier { + _phantom: PhantomData<(C, SC, A)>, +} + +impl SP1WrapVerifier +where + SC: BabyBearFriConfigVariable, + C: CircuitConfig, + >::ProverData>: Clone, + A: MachineAir + for<'a> Air>, +{ + /// Verify a batch of recursive proofs and aggregate their public values. + /// + /// The compression verifier can aggregate proofs of different kinds: + /// - Core proofs: proofs which are recursive proof of a batch of SP1 shard proofs. The + /// implementation in this function assumes a fixed recursive verifier speicified by + /// `recursive_vk`. + /// - Deferred proofs: proofs which are recursive proof of a batch of deferred proofs. The + /// implementation in this function assumes a fixed deferred verification program specified by + /// `deferred_vk`. + /// - Compress proofs: these are proofs which refer to a prove of this program. The key for it + /// is part of public values will be propagated accross all levels of recursion and will be + /// checked against itself as in [sp1_prover::Prover] or as in [super::SP1RootVerifier]. + pub fn verify( + builder: &mut Builder, + machine: &StarkMachine, + input: SP1CompressWitnessVariable, + ) { + // Read input. + let SP1CompressWitnessVariable { vks_and_proofs, .. } = input; + + // Assert that there is only one proof, and get the verification key and proof. + let [(vk, proof)] = vks_and_proofs.try_into().ok().unwrap(); + + // Verify the stark proof. + + // Prepare a challenger. + let mut challenger = machine.config().challenger_variable(builder); + + // Observe the vk and start pc. + challenger.observe(builder, vk.commitment); + challenger.observe(builder, vk.pc_start); + let zero: Felt<_> = builder.eval(C::F::zero()); + for _ in 0..7 { + challenger.observe(builder, zero); + } + + // Observe the main commitment and public values. + challenger + .observe_slice(builder, proof.public_values[0..machine.num_pv_elts()].iter().copied()); + + let zero_ext: Ext = builder.eval(C::F::zero()); + StarkVerifier::verify_shard( + builder, + &vk, + machine, + &mut challenger, + &proof, + &[zero_ext, zero_ext], + ); + + // Get the public values, and assert that they are valid. + let public_values: &RootPublicValues> = proof.public_values.as_slice().borrow(); + assert_root_public_values_valid::(builder, public_values); + + // Reflect the public values to the next level. + SC::commit_recursion_public_values(builder, public_values.inner); + } +} diff --git a/crates/recursion/circuit/src/merkle_tree.rs b/crates/recursion/circuit/src/merkle_tree.rs new file mode 100644 index 0000000000..a3524c0567 --- /dev/null +++ b/crates/recursion/circuit/src/merkle_tree.rs @@ -0,0 +1,222 @@ +use std::fmt::Debug; + +use rayon::prelude::*; + +use p3_field::Field; +use p3_util::{reverse_bits_len, reverse_slice_index_bits}; +use serde::{Deserialize, Serialize}; +use sp1_core_machine::utils::log2_strict_usize; +use sp1_recursion_compiler::ir::Builder; + +use crate::{ + hash::{FieldHasher, FieldHasherVariable}, + stark::MerkleProofVariable, + CircuitConfig, +}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(bound(serialize = "HV::Digest: Serialize"))] +#[serde(bound(deserialize = "HV::Digest: Deserialize<'de>"))] +pub struct MerkleTree> { + /// The height of the tree, not counting the root layer. This is the same as the logarithm of the + /// number of leaves. + pub height: usize, + + /// All the layers but the root. If there are `n` leaves where `n` is a power of 2, there are + /// `2n - 2` elements in this vector. The leaves are at the beginning of the vector. + pub digest_layers: Vec, +} +pub struct VcsError; + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(bound(serialize = "HV::Digest: Serialize"))] +#[serde(bound(deserialize = "HV::Digest: Deserialize<'de>"))] +pub struct MerkleProof> { + pub index: usize, + pub path: Vec, +} + +impl Debug for VcsError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "VcsError") + } +} + +impl> MerkleTree { + pub fn commit(leaves: Vec) -> (HV::Digest, Self) { + assert!(!leaves.is_empty()); + let new_len = leaves.len().next_power_of_two(); + let height = log2_strict_usize(new_len); + + // Pre-allocate the vector. + let mut digest_layers = Vec::with_capacity(2 * new_len - 2); + + // If `leaves.len()` is not a power of 2, we pad the leaves with default values. + let mut last_layer = leaves; + let old_len = last_layer.len(); + for _ in old_len..new_len { + last_layer.push(HV::Digest::default()); + } + + // Store the leaves in bit-reversed order. + reverse_slice_index_bits(&mut last_layer); + + digest_layers.extend(last_layer.iter()); + + // Compute the rest of the layers. + for _ in 0..height - 1 { + let mut next_layer = Vec::with_capacity(last_layer.len() / 2); + last_layer + .par_chunks_exact(2) + .map(|chunk| { + let [left, right] = chunk.try_into().unwrap(); + HV::constant_compress([left, right]) + }) + .collect_into_vec(&mut next_layer); + digest_layers.extend(next_layer.iter()); + + last_layer = next_layer; + } + + debug_assert_eq!(digest_layers.len(), 2 * new_len - 2); + + let root = HV::constant_compress([last_layer[0], last_layer[1]]); + (root, Self { height, digest_layers }) + } + + pub fn open(&self, index: usize) -> (HV::Digest, MerkleProof) { + let mut path = Vec::with_capacity(self.height); + let mut bit_rev_index = reverse_bits_len(index, self.height); + let value = self.digest_layers[bit_rev_index]; + + // Variable to keep track index of the first element in the current layer. + let mut offset = 0; + for i in 0..self.height { + let sibling = if bit_rev_index % 2 == 0 { + self.digest_layers[offset + bit_rev_index + 1] + } else { + self.digest_layers[offset + bit_rev_index - 1] + }; + path.push(sibling); + bit_rev_index >>= 1; + + // The current layer has 1 << (height - i) elements, so we shift offset by that amount. + offset += 1 << (self.height - i); + } + debug_assert_eq!(path.len(), self.height); + (value, MerkleProof { index, path }) + } + + pub fn verify( + proof: MerkleProof, + value: HV::Digest, + commitment: HV::Digest, + ) -> Result<(), VcsError> { + let MerkleProof { index, path } = proof; + + let mut value = value; + + let mut index = reverse_bits_len(index, path.len()); + + for sibling in path { + // If the index is odd, swap the order of [value, sibling]. + let new_pair = if index % 2 == 0 { [value, sibling] } else { [sibling, value] }; + value = HV::constant_compress(new_pair); + index >>= 1; + } + if value == commitment { + Ok(()) + } else { + Err(VcsError) + } + } +} + +pub fn verify>( + builder: &mut Builder, + proof: MerkleProofVariable, + value: HV::DigestVariable, + commitment: HV::DigestVariable, +) { + let mut value = value; + for (sibling, bit) in proof.path.iter().zip(proof.index.iter().rev()) { + let sibling = *sibling; + + // If the index is odd, swap the order of [value, sibling]. + let new_pair = HV::select_chain_digest(builder, *bit, [value, sibling]); + value = HV::compress(builder, new_pair); + } + HV::assert_digest_eq(builder, value, commitment); +} + +#[cfg(test)] +mod tests { + use itertools::Itertools; + use p3_baby_bear::BabyBear; + use p3_field::AbstractField; + use p3_util::log2_ceil_usize; + use rand::rngs::OsRng; + use sp1_recursion_compiler::{ + config::InnerConfig, + ir::{Builder, Felt}, + }; + use sp1_recursion_core::DIGEST_SIZE; + use sp1_stark::baby_bear_poseidon2::BabyBearPoseidon2; + use zkhash::ark_ff::UniformRand; + + use crate::{ + merkle_tree::{verify, MerkleTree}, + stark::MerkleProofVariable, + utils::tests::run_test_recursion, + CircuitConfig, + }; + type C = InnerConfig; + type F = BabyBear; + type HV = BabyBearPoseidon2; + + #[test] + fn test_merkle_tree_inner() { + let mut rng = OsRng; + let mut builder = Builder::::default(); + // Run five times with different randomness. + for _ in 0..5 { + // Test with different number of leaves. + for j in 2..20 { + let leaves: Vec<[F; DIGEST_SIZE]> = + (0..j).map(|_| std::array::from_fn(|_| F::rand(&mut rng))).collect(); + let (root, tree) = MerkleTree::::commit(leaves.to_vec()); + for (i, leaf) in leaves.iter().enumerate() { + let (_, proof) = MerkleTree::::open(&tree, i); + MerkleTree::::verify(proof.clone(), *leaf, root).unwrap(); + let (value_variable, path_variable): ([Felt<_>; 8], Vec<[Felt<_>; 8]>) = ( + std::array::from_fn(|i| builder.constant(leaf[i])), + proof + .path + .iter() + .map(|x| std::array::from_fn(|i| builder.constant(x[i]))) + .collect_vec(), + ); + + let index_var = builder.constant(BabyBear::from_canonical_usize(i)); + let index_bits = C::num2bits(&mut builder, index_var, log2_ceil_usize(j)); + let root_variable: [Felt<_>; 8] = + root.iter().map(|x| builder.constant(*x)).collect_vec().try_into().unwrap(); + + let proof_variable = MerkleProofVariable:: { + index: index_bits, + path: path_variable, + }; + + verify::( + &mut builder, + proof_variable, + value_variable, + root_variable, + ); + } + } + } + + run_test_recursion(builder.into_operations(), std::iter::empty()); + } +} diff --git a/crates/recursion/circuit/src/mmcs.rs b/crates/recursion/circuit/src/mmcs.rs deleted file mode 100644 index 0847e02c5f..0000000000 --- a/crates/recursion/circuit/src/mmcs.rs +++ /dev/null @@ -1,57 +0,0 @@ -use itertools::Itertools; -use p3_matrix::Dimensions; -use sp1_recursion_compiler::ir::{Builder, Config, Felt, Var}; -use std::cmp::Reverse; - -use crate::{poseidon2::Poseidon2CircuitBuilder, types::OuterDigestVariable}; - -pub fn verify_batch( - builder: &mut Builder, - commit: OuterDigestVariable, - dimensions: Vec, - index_bits: Vec>, - opened_values: Vec>>>, - proof: Vec>, -) { - let mut heights_tallest_first = - dimensions.iter().enumerate().sorted_by_key(|(_, dims)| Reverse(dims.height)).peekable(); - - let mut curr_height_padded = heights_tallest_first.peek().unwrap().1.height.next_power_of_two(); - - let ext_slice: Vec>> = heights_tallest_first - .peeking_take_while(|(_, dims)| dims.height.next_power_of_two() == curr_height_padded) - .flat_map(|(i, _)| opened_values[i].as_slice()) - .cloned() - .collect::>(); - let felt_slice: Vec> = - ext_slice.iter().flat_map(|ext| ext.as_slice()).cloned().collect::>(); - let mut root = builder.p2_hash(&felt_slice); - - for (i, sibling) in proof.iter().enumerate() { - let bit = index_bits[i]; - let left = [builder.select_v(bit, sibling[0], root[0])]; - let right = [builder.select_v(bit, root[0], sibling[0])]; - - root = builder.p2_compress([left, right]); - curr_height_padded >>= 1; - - let next_height = heights_tallest_first - .peek() - .map(|(_, dims)| dims.height) - .filter(|h| h.next_power_of_two() == curr_height_padded); - - if let Some(next_height) = next_height { - let ext_slice: Vec>> = heights_tallest_first - .peeking_take_while(|(_, dims)| dims.height == next_height) - .flat_map(|(i, _)| opened_values[i].as_slice()) - .cloned() - .collect::>(); - let felt_slice: Vec> = - ext_slice.iter().flat_map(|ext| ext.as_slice()).cloned().collect::>(); - let next_height_openings_digest = builder.p2_hash(&felt_slice); - root = builder.p2_compress([root, next_height_openings_digest]); - } - } - - builder.assert_var_eq(root[0], commit[0]); -} diff --git a/crates/recursion/circuit/src/poseidon2.rs b/crates/recursion/circuit/src/poseidon2.rs deleted file mode 100644 index ffa482057f..0000000000 --- a/crates/recursion/circuit/src/poseidon2.rs +++ /dev/null @@ -1,228 +0,0 @@ -//! An implementation of Poseidon2 over BN254. - -use std::array; - -use itertools::Itertools; -use p3_field::{AbstractField, Field}; -use sp1_recursion_compiler::ir::{Builder, Config, DslIr, Felt, Var}; - -use crate::{challenger::reduce_32, types::OuterDigestVariable, DIGEST_SIZE, RATE, SPONGE_SIZE}; - -pub trait Poseidon2CircuitBuilder { - fn p2_permute_mut(&mut self, state: [Var; SPONGE_SIZE]); - fn p2_hash(&mut self, input: &[Felt]) -> OuterDigestVariable; - fn p2_compress(&mut self, input: [OuterDigestVariable; 2]) -> OuterDigestVariable; - fn p2_babybear_permute_mut(&mut self, state: [Felt; 16]); - fn p2_babybear_hash(&mut self, input: &[Felt]) -> [Felt; 8]; -} - -impl Poseidon2CircuitBuilder for Builder { - fn p2_permute_mut(&mut self, state: [Var; SPONGE_SIZE]) { - self.push(DslIr::CircuitPoseidon2Permute(state)) - } - - fn p2_hash(&mut self, input: &[Felt]) -> OuterDigestVariable { - assert!(C::N::bits() == p3_bn254_fr::Bn254Fr::bits()); - assert!(C::F::bits() == p3_baby_bear::BabyBear::bits()); - let num_f_elms = C::N::bits() / C::F::bits(); - let mut state: [Var; SPONGE_SIZE] = - [self.eval(C::N::zero()), self.eval(C::N::zero()), self.eval(C::N::zero())]; - for block_chunk in &input.iter().chunks(RATE) { - for (chunk_id, chunk) in (&block_chunk.chunks(num_f_elms)).into_iter().enumerate() { - let chunk = chunk.collect_vec().into_iter().copied().collect::>(); - state[chunk_id] = reduce_32(self, chunk.as_slice()); - } - self.p2_permute_mut(state); - } - - [state[0]] - } - - fn p2_compress(&mut self, input: [OuterDigestVariable; 2]) -> OuterDigestVariable { - let state: [Var; SPONGE_SIZE] = - [self.eval(input[0][0]), self.eval(input[1][0]), self.eval(C::N::zero())]; - self.p2_permute_mut(state); - [state[0]; DIGEST_SIZE] - } - - fn p2_babybear_permute_mut(&mut self, state: [Felt; 16]) { - self.push(DslIr::CircuitPoseidon2PermuteBabyBear(Box::new(state))); - } - - fn p2_babybear_hash(&mut self, input: &[Felt]) -> [Felt; 8] { - let mut state: [Felt; 16] = array::from_fn(|_| self.eval(C::F::zero())); - - for block_chunk in &input.iter().chunks(8) { - state.iter_mut().zip(block_chunk).for_each(|(s, i)| *s = self.eval(*i)); - self.p2_babybear_permute_mut(state); - } - - array::from_fn(|i| state[i]) - } -} - -#[cfg(test)] -pub mod tests { - use p3_baby_bear::BabyBear; - use p3_bn254_fr::Bn254Fr; - use p3_field::AbstractField; - use p3_symmetric::{CryptographicHasher, Permutation, PseudoCompressionFunction}; - use rand::{thread_rng, Rng}; - use sp1_recursion_compiler::{ - config::OuterConfig, - constraints::ConstraintCompiler, - ir::{Builder, Felt, Var, Witness}, - }; - use sp1_recursion_core::stark::config::{outer_perm, OuterCompress, OuterHash}; - use sp1_recursion_gnark_ffi::PlonkBn254Prover; - use sp1_stark::{inner_perm, InnerHash}; - - use crate::{poseidon2::Poseidon2CircuitBuilder, types::OuterDigestVariable}; - - #[test] - fn test_p2_permute_mut() { - let poseidon2 = outer_perm(); - let input: [Bn254Fr; 3] = [ - Bn254Fr::from_canonical_u32(0), - Bn254Fr::from_canonical_u32(1), - Bn254Fr::from_canonical_u32(2), - ]; - let mut output = input; - poseidon2.permute_mut(&mut output); - - let mut builder = Builder::::default(); - let a: Var<_> = builder.eval(input[0]); - let b: Var<_> = builder.eval(input[1]); - let c: Var<_> = builder.eval(input[2]); - builder.p2_permute_mut([a, b, c]); - - builder.assert_var_eq(a, output[0]); - builder.assert_var_eq(b, output[1]); - builder.assert_var_eq(c, output[2]); - - let mut backend = ConstraintCompiler::::default(); - let constraints = backend.emit(builder.operations); - PlonkBn254Prover::test::(constraints.clone(), Witness::default()); - } - - #[test] - fn test_p2_babybear_permute_mut() { - let mut rng = thread_rng(); - let mut builder = Builder::::default(); - let input: [BabyBear; 16] = [rng.gen(); 16]; - let input_vars: [Felt<_>; 16] = input.map(|x| builder.eval(x)); - builder.p2_babybear_permute_mut(input_vars); - - let perm = inner_perm(); - let result = perm.permute(input); - for i in 0..16 { - builder.assert_felt_eq(input_vars[i], result[i]); - } - - let mut backend = ConstraintCompiler::::default(); - let constraints = backend.emit(builder.operations); - PlonkBn254Prover::test::(constraints.clone(), Witness::default()); - } - - #[test] - fn test_p2_hash() { - let perm = outer_perm(); - let hasher = OuterHash::new(perm.clone()).unwrap(); - - let input: [BabyBear; 7] = [ - BabyBear::from_canonical_u32(0), - BabyBear::from_canonical_u32(1), - BabyBear::from_canonical_u32(2), - BabyBear::from_canonical_u32(2), - BabyBear::from_canonical_u32(2), - BabyBear::from_canonical_u32(2), - BabyBear::from_canonical_u32(2), - ]; - let output = hasher.hash_iter(input); - - let mut builder = Builder::::default(); - let a: Felt<_> = builder.eval(input[0]); - let b: Felt<_> = builder.eval(input[1]); - let c: Felt<_> = builder.eval(input[2]); - let d: Felt<_> = builder.eval(input[3]); - let e: Felt<_> = builder.eval(input[4]); - let f: Felt<_> = builder.eval(input[5]); - let g: Felt<_> = builder.eval(input[6]); - let result = builder.p2_hash(&[a, b, c, d, e, f, g]); - - builder.assert_var_eq(result[0], output[0]); - - let mut backend = ConstraintCompiler::::default(); - let constraints = backend.emit(builder.operations); - PlonkBn254Prover::test::(constraints.clone(), Witness::default()); - } - - #[test] - fn test_p2_compress() { - let perm = outer_perm(); - let compressor = OuterCompress::new(perm.clone()); - - let a: [Bn254Fr; 1] = [Bn254Fr::two()]; - let b: [Bn254Fr; 1] = [Bn254Fr::two()]; - let gt = compressor.compress([a, b]); - - let mut builder = Builder::::default(); - let a: OuterDigestVariable = [builder.eval(a[0])]; - let b: OuterDigestVariable = [builder.eval(b[0])]; - let result = builder.p2_compress([a, b]); - builder.assert_var_eq(result[0], gt[0]); - - let mut backend = ConstraintCompiler::::default(); - let constraints = backend.emit(builder.operations); - PlonkBn254Prover::test::(constraints.clone(), Witness::default()); - } - - #[test] - fn test_p2_babybear_hash() { - let perm = inner_perm(); - let hasher = InnerHash::new(perm.clone()); - - let input: [BabyBear; 26] = [ - BabyBear::from_canonical_u32(0), - BabyBear::from_canonical_u32(1), - BabyBear::from_canonical_u32(2), - BabyBear::from_canonical_u32(2), - BabyBear::from_canonical_u32(2), - BabyBear::from_canonical_u32(2), - BabyBear::from_canonical_u32(2), - BabyBear::from_canonical_u32(2), - BabyBear::from_canonical_u32(2), - BabyBear::from_canonical_u32(2), - BabyBear::from_canonical_u32(2), - BabyBear::from_canonical_u32(2), - BabyBear::from_canonical_u32(2), - BabyBear::from_canonical_u32(2), - BabyBear::from_canonical_u32(2), - BabyBear::from_canonical_u32(3), - BabyBear::from_canonical_u32(3), - BabyBear::from_canonical_u32(3), - BabyBear::from_canonical_u32(3), - BabyBear::from_canonical_u32(3), - BabyBear::from_canonical_u32(3), - BabyBear::from_canonical_u32(3), - BabyBear::from_canonical_u32(3), - BabyBear::from_canonical_u32(3), - BabyBear::from_canonical_u32(3), - BabyBear::from_canonical_u32(3), - ]; - let output = hasher.hash_iter(input); - println!("{:?}", output); - - let mut builder = Builder::::default(); - let input_felts: [Felt<_>; 26] = input.map(|x| builder.eval(x)); - let result = builder.p2_babybear_hash(input_felts.as_slice()); - - for i in 0..8 { - builder.assert_felt_eq(result[i], output[i]); - } - - let mut backend = ConstraintCompiler::::default(); - let constraints = backend.emit(builder.operations); - PlonkBn254Prover::test::(constraints.clone(), Witness::default()); - } -} diff --git a/crates/recursion/circuit/src/stark.rs b/crates/recursion/circuit/src/stark.rs index 6c30bdb468..26e83c76d3 100644 --- a/crates/recursion/circuit/src/stark.rs +++ b/crates/recursion/circuit/src/stark.rs @@ -1,413 +1,646 @@ -use std::{borrow::Borrow, fmt::Debug, marker::PhantomData}; +use hashbrown::HashMap; +use itertools::{izip, Itertools}; -use crate::{ - fri::verify_two_adic_pcs, - poseidon2::Poseidon2CircuitBuilder, - types::OuterDigestVariable, - utils::{babybear_bytes_to_bn254, babybears_to_bn254, words_to_bytes}, - witness::Witnessable, -}; -use p3_air::Air; +use num_traits::cast::ToPrimitive; + +use p3_air::{Air, BaseAir}; use p3_baby_bear::BabyBear; -use p3_bn254_fr::Bn254Fr; -use p3_commit::TwoAdicMultiplicativeCoset; -use p3_field::{AbstractField, TwoAdicField}; -use p3_util::log2_strict_usize; -use sp1_core_machine::utils::{Span, SpanBuilder, SpanBuilderError}; +use p3_commit::{Mmcs, Pcs, PolynomialSpace, TwoAdicMultiplicativeCoset}; +use p3_field::{AbstractField, ExtensionField, Field, TwoAdicField}; +use p3_matrix::{dense::RowMajorMatrix, Dimensions}; + use sp1_recursion_compiler::{ - config::OuterConfig, - constraints::{Constraint, ConstraintCompiler}, - ir::{Builder, Config, DslIr, Ext, Felt, Usize, Var, Witness}, - prelude::SymbolicVar, + circuit::CircuitV2Builder, + ir::{Builder, Config, Ext, ExtConst}, + prelude::Felt, }; -use sp1_recursion_core::{ - air::{RecursionPublicValues, NUM_PV_ELMS_TO_HASH}, - stark::{ - config::{outer_fri_config_with_blowup, BabyBearPoseidon2Outer}, - utils::sp1_dev_mode, - RecursionAirWideDeg17, - }, -}; -use sp1_recursion_program::{ - commit::PolynomialSpaceVariable, stark::RecursiveVerifierConstraintFolder, - types::QuotientDataValues, +use sp1_stark::{ + air::InteractionScope, baby_bear_poseidon2::BabyBearPoseidon2, AirOpenedValues, Challenger, + Chip, ChipOpenedValues, InnerChallenge, ProofShape, ShardCommitment, ShardOpenedValues, + ShardProof, Val, PROOF_MAX_NUM_PVS, }; +use sp1_stark::{air::MachineAir, StarkGenericConfig, StarkMachine, StarkVerifyingKey}; -use sp1_stark::{ - air::MachineAir, Com, ShardCommitment, ShardProof, StarkGenericConfig, StarkMachine, - StarkVerifyingKey, PROOF_MAX_NUM_PVS, +use crate::{ + challenger::CanObserveVariable, + fri::{dummy_hash, dummy_pcs_proof, PolynomialBatchShape, PolynomialShape}, + hash::FieldHasherVariable, + BabyBearFriConfig, CircuitConfig, TwoAdicPcsMatsVariable, TwoAdicPcsProofVariable, }; use crate::{ - challenger::MultiField32ChallengerVariable, - domain::{new_coset, TwoAdicMultiplicativeCosetVariable}, - types::{RecursionShardProofVariable, TwoAdicPcsMatsVariable, TwoAdicPcsRoundVariable}, + challenger::FieldChallengerVariable, constraints::RecursiveVerifierConstraintFolder, + domain::PolynomialSpaceVariable, fri::verify_two_adic_pcs, BabyBearFriConfigVariable, + TwoAdicPcsRoundVariable, VerifyingKeyVariable, }; +/// Reference: [sp1_core::stark::ShardProof] +#[derive(Clone)] +pub struct ShardProofVariable, SC: BabyBearFriConfigVariable> { + pub commitment: ShardCommitment, + pub opened_values: ShardOpenedValues>, + pub opening_proof: TwoAdicPcsProofVariable, + pub chip_ordering: HashMap, + pub public_values: Vec>, +} + +/// Get a dummy duplex challenger for use in dummy proofs. +pub fn dummy_challenger(config: &BabyBearPoseidon2) -> Challenger { + let mut challenger = config.challenger(); + challenger.input_buffer = vec![]; + challenger.output_buffer = vec![BabyBear::zero(); challenger.sponge_state.len()]; + challenger +} + +/// Make a dummy shard proof for a given proof shape. +pub fn dummy_vk_and_shard_proof>( + machine: &StarkMachine, + shape: &ProofShape, +) -> (StarkVerifyingKey, ShardProof) { + // Make a dummy commitment. + let commitment = ShardCommitment { + global_main_commit: dummy_hash(), + local_main_commit: dummy_hash(), + permutation_commit: dummy_hash(), + quotient_commit: dummy_hash(), + }; + + // Get dummy opened values by reading the chip ordering from the shape. + let chip_ordering = shape + .chip_information + .iter() + .enumerate() + .map(|(i, (name, _))| (name.clone(), i)) + .collect::>(); + let shard_chips = machine.shard_chips_ordered(&chip_ordering).collect::>(); + let chip_scopes = shard_chips.iter().map(|chip| chip.commit_scope()).collect::>(); + let has_global_main_commit = chip_scopes.contains(&InteractionScope::Global); + let opened_values = ShardOpenedValues { + chips: shard_chips + .iter() + .zip_eq(shape.chip_information.iter()) + .map(|(chip, (_, log_degree))| { + dummy_opened_values::<_, InnerChallenge, _>(chip, *log_degree) + }) + .collect(), + }; + + let mut preprocessed_names_and_dimensions = vec![]; + let mut preprocessed_batch_shape = vec![]; + let mut global_main_batch_shape = vec![]; + let mut local_main_batch_shape = vec![]; + let mut permutation_batch_shape = vec![]; + let mut quotient_batch_shape = vec![]; + + for ((chip, chip_opening), scope) in + shard_chips.iter().zip_eq(opened_values.chips.iter()).zip_eq(chip_scopes.iter()) + { + if !chip_opening.preprocessed.local.is_empty() { + let prep_shape = PolynomialShape { + width: chip_opening.preprocessed.local.len(), + log_degree: chip_opening.log_degree, + }; + preprocessed_names_and_dimensions.push(( + chip.name(), + prep_shape.width, + prep_shape.log_degree, + )); + preprocessed_batch_shape.push(prep_shape); + } + let main_shape = PolynomialShape { + width: chip_opening.main.local.len(), + log_degree: chip_opening.log_degree, + }; + match scope { + InteractionScope::Global => global_main_batch_shape.push(main_shape), + InteractionScope::Local => local_main_batch_shape.push(main_shape), + } + let permutation_shape = PolynomialShape { + width: chip_opening.permutation.local.len(), + log_degree: chip_opening.log_degree, + }; + permutation_batch_shape.push(permutation_shape); + for quot_chunk in chip_opening.quotient.iter() { + assert_eq!(quot_chunk.len(), 4); + quotient_batch_shape.push(PolynomialShape { + width: quot_chunk.len(), + log_degree: chip_opening.log_degree, + }); + } + } + + let batch_shapes = if has_global_main_commit { + vec![ + PolynomialBatchShape { shapes: preprocessed_batch_shape }, + PolynomialBatchShape { shapes: global_main_batch_shape }, + PolynomialBatchShape { shapes: local_main_batch_shape }, + PolynomialBatchShape { shapes: permutation_batch_shape }, + PolynomialBatchShape { shapes: quotient_batch_shape }, + ] + } else { + vec![ + PolynomialBatchShape { shapes: preprocessed_batch_shape }, + PolynomialBatchShape { shapes: local_main_batch_shape }, + PolynomialBatchShape { shapes: permutation_batch_shape }, + PolynomialBatchShape { shapes: quotient_batch_shape }, + ] + }; + + let fri_queries = machine.config().fri_config().num_queries; + let log_blowup = machine.config().fri_config().log_blowup; + let opening_proof = dummy_pcs_proof(fri_queries, &batch_shapes, log_blowup); + + let public_values = (0..PROOF_MAX_NUM_PVS).map(|_| BabyBear::zero()).collect::>(); + + // Get the preprocessed chip information. + let pcs = machine.config().pcs(); + let preprocessed_chip_information: Vec<_> = preprocessed_names_and_dimensions + .iter() + .map(|(name, width, log_height)| { + let domain = <::Pcs as Pcs< + ::Challenge, + ::Challenger, + >>::natural_domain_for_degree(pcs, 1 << log_height); + (name.to_owned(), domain, Dimensions { width: *width, height: 1 << log_height }) + }) + .collect(); + + // Get the chip ordering. + let preprocessed_chip_ordering = preprocessed_names_and_dimensions + .iter() + .enumerate() + .map(|(i, (name, _, _))| (name.to_owned(), i)) + .collect::>(); + + let vk = StarkVerifyingKey { + commit: dummy_hash(), + pc_start: BabyBear::zero(), + chip_information: preprocessed_chip_information, + chip_ordering: preprocessed_chip_ordering, + }; + + let shard_proof = + ShardProof { commitment, opened_values, opening_proof, chip_ordering, public_values }; + + (vk, shard_proof) +} + +fn dummy_opened_values, A: MachineAir>( + chip: &Chip, + log_degree: usize, +) -> ChipOpenedValues { + let preprocessed_width = chip.preprocessed_width(); + let preprocessed = AirOpenedValues { + local: vec![EF::zero(); preprocessed_width], + next: vec![EF::zero(); preprocessed_width], + }; + let main_width = chip.width(); + let main = + AirOpenedValues { local: vec![EF::zero(); main_width], next: vec![EF::zero(); main_width] }; + + let permutation_width = chip.permutation_width(); + let permutation = AirOpenedValues { + local: vec![EF::zero(); permutation_width * EF::D], + next: vec![EF::zero(); permutation_width * EF::D], + }; + let quotient_width = chip.quotient_width(); + let quotient = (0..quotient_width).map(|_| vec![EF::zero(); EF::D]).collect::>(); + + ChipOpenedValues { + preprocessed, + main, + permutation, + quotient, + global_cumulative_sum: EF::zero(), + local_cumulative_sum: EF::zero(), + log_degree, + } +} + +#[derive(Clone)] +pub struct MerkleProofVariable> { + pub index: Vec, + pub path: Vec, +} + +pub const EMPTY: usize = 0x_1111_1111; + #[derive(Debug, Clone, Copy)] -pub struct StarkVerifierCircuit { - _phantom: PhantomData<(C, SC)>, +pub struct StarkVerifier { + _phantom: std::marker::PhantomData<(C, SC, A)>, } -impl StarkVerifierCircuit +pub struct VerifyingKeyHint<'a, SC: StarkGenericConfig, A> { + pub machine: &'a StarkMachine, + pub vk: &'a StarkVerifyingKey, +} + +impl<'a, SC: StarkGenericConfig, A: MachineAir> VerifyingKeyHint<'a, SC, A> { + pub const fn new(machine: &'a StarkMachine, vk: &'a StarkVerifyingKey) -> Self { + Self { machine, vk } + } +} + +impl StarkVerifier where - SC: StarkGenericConfig< - Val = C::F, - Challenge = C::EF, - Domain = TwoAdicMultiplicativeCoset, - >, + C::F: TwoAdicField, + C: CircuitConfig, + SC: BabyBearFriConfigVariable, + >::ProverData>: Clone, + A: MachineAir>, { - pub fn verify_shard( + pub fn natural_domain_for_degree( + config: &SC, + degree: usize, + ) -> TwoAdicMultiplicativeCoset { + >::natural_domain_for_degree( + config.pcs(), + degree, + ) + } + + pub fn verify_shard( builder: &mut Builder, - vk: &StarkVerifyingKey, + vk: &VerifyingKeyVariable, machine: &StarkMachine, - challenger: &mut MultiField32ChallengerVariable, - proof: &RecursionShardProofVariable, - chip_quotient_data: Vec, - sorted_chips: Vec, - sorted_indices: Vec, + challenger: &mut SC::FriChallengerVariable, + proof: &ShardProofVariable, + global_permutation_challenges: &[Ext], ) where - A: MachineAir + for<'a> Air>, - C::F: TwoAdicField, - C::EF: TwoAdicField, - Com: Into<[Bn254Fr; 1]>, - SymbolicVar<::N>: From, + A: for<'a> Air>, { - let RecursionShardProofVariable { commitment, opened_values, .. } = proof; + let chips = machine.shard_chips_ordered(&proof.chip_ordering).collect::>(); + let chip_scopes = chips.iter().map(|chip| chip.commit_scope()).collect::>(); + + let has_global_main_commit = chip_scopes.contains(&InteractionScope::Global); + + let ShardProofVariable { + commitment, + opened_values, + opening_proof, + chip_ordering, + public_values, + } = proof; + + // Assert that the byte multiplicities don't overflow. + let mut max_byte_lookup_mult = 0u64; + chips.iter().zip(opened_values.chips.iter()).for_each(|(chip, val)| { + max_byte_lookup_mult = max_byte_lookup_mult + .checked_add( + (chip.num_sent_byte_lookups() as u64) + .checked_mul(1u64.checked_shl(val.log_degree as u32).unwrap()) + .unwrap(), + ) + .unwrap(); + }); - let ShardCommitment { main_commit, permutation_commit, quotient_commit } = commitment; + assert!( + max_byte_lookup_mult <= SC::Val::order().to_u64().unwrap(), + "Byte multiplicities overflow" + ); - let permutation_challenges = - (0..2).map(|_| challenger.sample_ext(builder)).collect::>(); + let log_degrees = opened_values.chips.iter().map(|val| val.log_degree).collect::>(); - challenger.observe_commitment(builder, *permutation_commit); + let log_quotient_degrees = + chips.iter().map(|chip| chip.log_quotient_degree()).collect::>(); - let alpha = challenger.sample_ext(builder); + let trace_domains = log_degrees + .iter() + .map(|log_degree| Self::natural_domain_for_degree(machine.config(), 1 << log_degree)) + .collect::>(); - challenger.observe_commitment(builder, *quotient_commit); + let ShardCommitment { + global_main_commit, + local_main_commit, + permutation_commit, + quotient_commit, + } = *commitment; - let zeta = challenger.sample_ext(builder); + challenger.observe(builder, local_main_commit); + + let local_permutation_challenges = + (0..2).map(|_| challenger.sample_ext(builder)).collect::>(); - let num_shard_chips = opened_values.chips.len(); - let mut trace_domains = Vec::new(); - let mut quotient_domains = Vec::new(); + challenger.observe(builder, permutation_commit); + for (opening, chip) in opened_values.chips.iter().zip_eq(chips.iter()) { + let global_sum = C::ext2felt(builder, opening.global_cumulative_sum); + let local_sum = C::ext2felt(builder, opening.local_cumulative_sum); + challenger.observe_slice(builder, global_sum); + challenger.observe_slice(builder, local_sum); + + let has_global_interactions = chip + .sends() + .iter() + .chain(chip.receives()) + .any(|i| i.scope == InteractionScope::Global); + if !has_global_interactions { + builder.assert_ext_eq(opening.global_cumulative_sum, C::EF::zero().cons()); + } + let has_local_interactions = chip + .sends() + .iter() + .chain(chip.receives()) + .any(|i| i.scope == InteractionScope::Local); + if !has_local_interactions { + builder.assert_ext_eq(opening.local_cumulative_sum, C::EF::zero().cons()); + } + } - let mut main_mats: Vec> = Vec::new(); - let mut perm_mats: Vec> = Vec::new(); + let alpha = challenger.sample_ext(builder); - let mut quotient_mats = Vec::new(); + challenger.observe(builder, quotient_commit); - let qc_points = vec![zeta]; + let zeta = challenger.sample_ext(builder); - let prep_mats: Vec> = vk + let preprocessed_domains_points_and_opens = vk .chip_information .iter() .map(|(name, domain, _)| { - let chip_idx = - machine.chips().iter().rposition(|chip| &chip.name() == name).unwrap(); - let index = sorted_indices[chip_idx]; - let opening = &opened_values.chips[index]; - - let domain_var: TwoAdicMultiplicativeCosetVariable<_> = builder.constant(*domain); - - let mut trace_points = Vec::new(); - let zeta_next = domain_var.next_point(builder, zeta); - - trace_points.push(zeta); - trace_points.push(zeta_next); - - let prep_values = - vec![opening.preprocessed.local.clone(), opening.preprocessed.next.clone()]; + let i = chip_ordering[name]; + let values = opened_values.chips[i].preprocessed.clone(); TwoAdicPcsMatsVariable:: { domain: *domain, - points: trace_points.clone(), - values: prep_values, + points: vec![zeta, domain.next_point_variable(builder, zeta)], + values: vec![values.local, values.next], } }) .collect::>(); - (0..num_shard_chips).for_each(|i| { - let opening = &opened_values.chips[i]; - let log_quotient_degree = chip_quotient_data[i].log_quotient_degree; - let domain = new_coset(builder, opening.log_degree); - - let log_quotient_size = opening.log_degree + log_quotient_degree; - let quotient_domain = - domain.create_disjoint_domain(builder, Usize::Const(log_quotient_size), None); - - let mut trace_points = Vec::new(); - let zeta_next = domain.next_point(builder, zeta); - trace_points.push(zeta); - trace_points.push(zeta_next); - - let main_values = vec![opening.main.local.clone(), opening.main.next.clone()]; - let main_mat = TwoAdicPcsMatsVariable:: { - domain: TwoAdicMultiplicativeCoset { log_n: domain.log_n, shift: domain.shift }, - values: main_values, - points: trace_points.clone(), - }; + let main_domains_points_and_opens = trace_domains + .iter() + .zip_eq(opened_values.chips.iter()) + .map(|(domain, values)| TwoAdicPcsMatsVariable:: { + domain: *domain, + points: vec![zeta, domain.next_point_variable(builder, zeta)], + values: vec![values.main.local.clone(), values.main.next.clone()], + }) + .collect::>(); - let perm_values = - vec![opening.permutation.local.clone(), opening.permutation.next.clone()]; - let perm_mat = TwoAdicPcsMatsVariable:: { - domain: TwoAdicMultiplicativeCoset { - log_n: domain.clone().log_n, - shift: domain.clone().shift, - }, - values: perm_values, - points: trace_points, - }; + let perm_domains_points_and_opens = trace_domains + .iter() + .zip_eq(opened_values.chips.iter()) + .map(|(domain, values)| TwoAdicPcsMatsVariable:: { + domain: *domain, + points: vec![zeta, domain.next_point_variable(builder, zeta)], + values: vec![values.permutation.local.clone(), values.permutation.next.clone()], + }) + .collect::>(); - let qc_mats = quotient_domain - .split_domains_const(builder, log_quotient_degree) - .into_iter() - .enumerate() - .map(|(j, qc_dom)| TwoAdicPcsMatsVariable:: { - domain: TwoAdicMultiplicativeCoset { - log_n: qc_dom.clone().log_n, - shift: qc_dom.clone().shift, - }, - values: vec![opening.quotient[j].clone()], - points: qc_points.clone(), - }); - - trace_domains.push(domain.clone()); - quotient_domains.push(quotient_domain.clone()); - main_mats.push(main_mat); - perm_mats.push(perm_mat); - quotient_mats.extend(qc_mats); - }); + let quotient_chunk_domains = trace_domains + .iter() + .zip_eq(log_degrees) + .zip_eq(log_quotient_degrees) + .map(|((domain, log_degree), log_quotient_degree)| { + let quotient_degree = 1 << log_quotient_degree; + let quotient_domain = + domain.create_disjoint_domain(1 << (log_degree + log_quotient_degree)); + quotient_domain.split_domains(quotient_degree) + }) + .collect::>(); - let mut rounds = Vec::new(); - let prep_commit_val: [Bn254Fr; 1] = vk.commit.clone().into(); - let prep_commit: OuterDigestVariable = [builder.eval(prep_commit_val[0])]; - let prep_round = TwoAdicPcsRoundVariable { batch_commit: prep_commit, mats: prep_mats }; - let main_round = TwoAdicPcsRoundVariable { batch_commit: *main_commit, mats: main_mats }; - let perm_round = - TwoAdicPcsRoundVariable { batch_commit: *permutation_commit, mats: perm_mats }; - let quotient_round = - TwoAdicPcsRoundVariable { batch_commit: *quotient_commit, mats: quotient_mats }; - rounds.push(prep_round); - rounds.push(main_round); - rounds.push(perm_round); - rounds.push(quotient_round); - let config = outer_fri_config_with_blowup(log2_strict_usize(DEGREE - 1)); - verify_two_adic_pcs(builder, &config, &proof.opening_proof, challenger, rounds); - - if !sp1_dev_mode() { - for (i, sorted_chip) in sorted_chips.iter().enumerate() { - for chip in machine.chips() { - if chip.name() == *sorted_chip { - let values = &opened_values.chips[i]; - let trace_domain = &trace_domains[i]; - let quotient_domain = "ient_domains[i]; - let qc_domains = quotient_domain - .split_domains_const(builder, chip.log_quotient_degree()); - Self::verify_constraints( - builder, - chip, - values, - proof.public_values.clone(), - trace_domain.clone(), - qc_domains, - zeta, - alpha, - &permutation_challenges, - ); + let quotient_domains_points_and_opens = proof + .opened_values + .chips + .iter() + .zip_eq(quotient_chunk_domains.iter()) + .flat_map(|(values, qc_domains)| { + values.quotient.iter().zip_eq(qc_domains).map(move |(values, q_domain)| { + TwoAdicPcsMatsVariable:: { + domain: *q_domain, + points: vec![zeta], + values: vec![values.clone()], } - } + }) + }) + .collect::>(); + + // Split the main_domains_points_and_opens to the global and local chips. + let mut global_trace_points_and_openings = Vec::new(); + let mut local_trace_points_and_openings = Vec::new(); + for (i, points_and_openings) in + main_domains_points_and_opens.clone().into_iter().enumerate() + { + let scope = chip_scopes[i]; + if scope == InteractionScope::Global { + global_trace_points_and_openings.push(points_and_openings); + } else { + local_trace_points_and_openings.push(points_and_openings); } } - } -} - -type OuterSC = BabyBearPoseidon2Outer; -type OuterF = ::Val; -type OuterC = OuterConfig; - -pub fn build_wrap_circuit( - wrap_vk: &StarkVerifyingKey, - template_proof: ShardProof, -) -> Vec { - let outer_config = OuterSC::new(); - let outer_machine = RecursionAirWideDeg17::::wrap_machine(outer_config); - - let mut builder = Builder::::default(); - let mut challenger = MultiField32ChallengerVariable::new(&mut builder); - - let preprocessed_commit_val: [Bn254Fr; 1] = wrap_vk.commit.into(); - let preprocessed_commit: OuterDigestVariable = - [builder.eval(preprocessed_commit_val[0])]; - challenger.observe_commitment(&mut builder, preprocessed_commit); - let pc_start = builder.eval(wrap_vk.pc_start); - challenger.observe(&mut builder, pc_start); - - let mut witness = Witness::default(); - template_proof.write(&mut witness); - let proof = template_proof.read(&mut builder); - - let commited_values_digest = Bn254Fr::zero().read(&mut builder); - builder.commit_commited_values_digest_circuit(commited_values_digest); - let vkey_hash = Bn254Fr::zero().read(&mut builder); - builder.commit_vkey_hash_circuit(vkey_hash); - - // Validate public values - let mut pv_elements = Vec::new(); - for i in 0..PROOF_MAX_NUM_PVS { - let element = builder.get(&proof.public_values, i); - pv_elements.push(element); - } - - let pv: &RecursionPublicValues<_> = pv_elements.as_slice().borrow(); - - let one_felt: Felt<_> = builder.constant(BabyBear::one()); - // Proof must be complete. In the reduce program, this will ensure that the SP1 proof has been - // fully accumulated. - builder.assert_felt_eq(pv.is_complete, one_felt); - - // Convert pv.sp1_vk_digest into Bn254 - let pv_vkey_hash = babybears_to_bn254(&mut builder, &pv.sp1_vk_digest); - // Vkey hash must match the witnessed commited_values_digest that we are committing to. - builder.assert_var_eq(pv_vkey_hash, vkey_hash); - // Convert pv.committed_value_digest into Bn254 - let pv_committed_values_digest_bytes: [Felt<_>; 32] = - words_to_bytes(&pv.committed_value_digest).try_into().unwrap(); - let pv_committed_values_digest: Var<_> = - babybear_bytes_to_bn254(&mut builder, &pv_committed_values_digest_bytes); + // Create the pcs rounds. + let prep_commit = vk.commitment; + let prep_round = TwoAdicPcsRoundVariable { + batch_commit: prep_commit, + domains_points_and_opens: preprocessed_domains_points_and_opens, + }; + let global_main_round = TwoAdicPcsRoundVariable { + batch_commit: global_main_commit, + domains_points_and_opens: global_trace_points_and_openings, + }; + let local_main_round = TwoAdicPcsRoundVariable { + batch_commit: local_main_commit, + domains_points_and_opens: local_trace_points_and_openings, + }; + let perm_round = TwoAdicPcsRoundVariable { + batch_commit: permutation_commit, + domains_points_and_opens: perm_domains_points_and_opens, + }; + let quotient_round = TwoAdicPcsRoundVariable { + batch_commit: quotient_commit, + domains_points_and_opens: quotient_domains_points_and_opens, + }; + + let rounds = if has_global_main_commit { + vec![prep_round, global_main_round, local_main_round, perm_round, quotient_round] + } else { + vec![prep_round, local_main_round, perm_round, quotient_round] + }; + + // Verify the pcs proof + builder.cycle_tracker_v2_enter("stage-d-verify-pcs".to_string()); + let config = machine.config().fri_config(); + verify_two_adic_pcs::(builder, config, opening_proof, challenger, rounds); + builder.cycle_tracker_v2_exit(); + + // Verify the constrtaint evaluations. + builder.cycle_tracker_v2_enter("stage-e-verify-constraints".to_string()); + let permutation_challenges = global_permutation_challenges + .iter() + .chain(local_permutation_challenges.iter()) + .copied() + .collect::>(); - // Committed values digest must match the witnessed one that we are committing to. - builder.assert_var_eq(pv_committed_values_digest, commited_values_digest); + for (chip, trace_domain, qc_domains, values) in + izip!(chips.iter(), trace_domains, quotient_chunk_domains, opened_values.chips.iter(),) + { + // Verify the shape of the opening arguments matches the expected values. + Self::verify_opening_shape(chip, values).unwrap(); + // Verify the constraint evaluation. + Self::verify_constraints( + builder, + chip, + values, + trace_domain, + qc_domains, + zeta, + alpha, + &permutation_challenges, + public_values, + ); + } - let chips = outer_machine - .shard_chips_ordered(&template_proof.chip_ordering) - .map(|chip| chip.name()) - .collect::>(); + // Verify that the chips' local_cumulative_sum sum to 0. + let local_cumulative_sum: Ext = opened_values + .chips + .iter() + .map(|val| val.local_cumulative_sum) + .fold(builder.constant(C::EF::zero()), |acc, x| builder.eval(acc + x)); + let zero_ext: Ext<_, _> = builder.constant(C::EF::zero()); + builder.assert_ext_eq(local_cumulative_sum, zero_ext); - let sorted_indices = outer_machine - .chips() - .iter() - .map(|chip| template_proof.chip_ordering.get(&chip.name()).copied().unwrap_or(usize::MAX)) - .collect::>(); - - let chip_quotient_data = outer_machine - .shard_chips_ordered(&template_proof.chip_ordering) - .map(|chip| { - let log_quotient_degree = chip.log_quotient_degree(); - QuotientDataValues { log_quotient_degree, quotient_size: 1 << log_quotient_degree } - }) - .collect(); + builder.cycle_tracker_v2_exit(); + } +} - let ShardCommitment { main_commit, .. } = &proof.commitment; - challenger.observe_commitment(&mut builder, *main_commit); - let pv_slice = proof.public_values.slice( - &mut builder, - Usize::Const(0), - Usize::Const(outer_machine.num_pv_elts()), - ); - challenger.observe_slice(&mut builder, pv_slice); - - StarkVerifierCircuit::::verify_shard::<_, 17>( - &mut builder, - wrap_vk, - &outer_machine, - &mut challenger.clone(), - &proof, - chip_quotient_data, - chips, - sorted_indices, - ); - - let zero_ext: Ext<_, _> = builder.constant(::EF::zero()); - let cumulative_sum: Ext<_, _> = builder.eval(zero_ext); - for chip in proof.opened_values.chips { - builder.assign(cumulative_sum, cumulative_sum + chip.cumulative_sum); +impl, SC: BabyBearFriConfigVariable> ShardProofVariable { + pub fn contains_cpu(&self) -> bool { + self.chip_ordering.contains_key("CPU") } - builder.assert_ext_eq(cumulative_sum, zero_ext); - // Verify the public values digest. - let calculated_digest = builder.p2_babybear_hash(&pv_elements[0..NUM_PV_ELMS_TO_HASH]); - let expected_digest = pv.digest; - for (calculated_elm, expected_elm) in calculated_digest.iter().zip(expected_digest.iter()) { - builder.assert_felt_eq(*expected_elm, *calculated_elm); + pub fn log_degree_cpu(&self) -> usize { + let idx = self.chip_ordering.get("CPU").expect("CPU chip not found"); + self.opened_values.chips[*idx].log_degree } - // Print out cycle tracking info. - for line in cycle_tracker(&builder.operations.vec).unwrap().lines() { - println!("{}", line); + pub fn contains_memory_init(&self) -> bool { + self.chip_ordering.contains_key("MemoryGlobalInit") } - let mut backend = ConstraintCompiler::::default(); - backend.emit(builder.operations) + pub fn contains_memory_finalize(&self) -> bool { + self.chip_ordering.contains_key("MemoryGlobalFinalize") + } } -pub fn cycle_tracker<'a, C: Config + Debug + 'a>( - operations: impl IntoIterator>, -) -> Result, SpanBuilderError> { - let mut span_builder = SpanBuilder::new("cycle_tracker".to_string()); - for op in operations.into_iter() { - if let DslIr::CycleTracker(name) = op { - if span_builder.current_span.name != *name { - span_builder.enter(name.to_owned()); - } else { - span_builder.exit().map_err(SpanBuilderError::from)?; - } - } else { - let op_dbg_str = format!("{op:?}"); - let op_name = op_dbg_str - [..op_dbg_str.chars().take_while(|x| x.is_alphanumeric()).count()] - .to_owned(); - span_builder.item(op_name); +#[allow(unused_imports)] +#[cfg(any(test, feature = "export-tests"))] +pub mod tests { + use std::collections::VecDeque; + use std::fmt::Debug; + + use crate::{ + challenger::{CanCopyChallenger, CanObserveVariable, DuplexChallengerVariable}, + utils::tests::run_test_recursion_with_prover, + BabyBearFriConfig, + }; + + use sp1_core_executor::{programs::tests::FIBONACCI_ELF, Program}; + use sp1_core_machine::{ + io::SP1Stdin, + riscv::RiscvAir, + utils::{prove, setup_logger}, + }; + use sp1_recursion_compiler::{ + config::{InnerConfig, OuterConfig}, + ir::{Builder, DslIr, TracedVec}, + }; + + use sp1_recursion_core::{air::Block, machine::RecursionAir, stark::BabyBearPoseidon2Outer}; + use sp1_stark::{ + baby_bear_poseidon2::BabyBearPoseidon2, CpuProver, InnerVal, MachineProver, SP1CoreOpts, + ShardProof, + }; + + use super::*; + use crate::witness::*; + + type F = InnerVal; + type A = RiscvAir; + type SC = BabyBearPoseidon2; + + pub fn build_verify_shard_with_provers< + C: CircuitConfig> + Debug, + CoreP: MachineProver, + RecP: MachineProver>, + >( + config: SC, + elf: &[u8], + opts: SP1CoreOpts, + num_shards_in_batch: Option, + ) -> (TracedVec>, Vec>) { + setup_logger(); + + let machine = RiscvAir::::machine(SC::default()); + let (_, vk) = machine.setup(&Program::from(elf).unwrap()); + let (proof, _, _) = prove::<_, CoreP>( + Program::from(elf).unwrap(), + &SP1Stdin::new(), + SC::default(), + opts, + None, + ) + .unwrap(); + let mut challenger = machine.config().challenger(); + machine.verify(&vk, &proof, &mut challenger).unwrap(); + + // Observe all the commitments. + let mut builder = Builder::::default(); + + let mut witness_stream = Vec::>::new(); + + // Add a hash invocation, since the poseidon2 table expects that it's in the first row. + let mut challenger = config.challenger_variable(&mut builder); + // let vk = VerifyingKeyVariable::from_constant_key_babybear(&mut builder, &vk); + Witnessable::::write(&vk, &mut witness_stream); + let vk: VerifyingKeyVariable<_, _> = vk.read(&mut builder); + vk.observe_into(&mut builder, &mut challenger); + + let proofs = proof + .shard_proofs + .into_iter() + .map(|proof| { + let shape = proof.shape(); + let (_, dummy_proof) = dummy_vk_and_shard_proof(&machine, &shape); + Witnessable::::write(&proof, &mut witness_stream); + dummy_proof.read(&mut builder) + }) + .collect::>(); + // Observe all the commitments, and put the proofs into the witness stream. + for proof in proofs.iter() { + let ShardCommitment { global_main_commit, .. } = proof.commitment; + challenger.observe(&mut builder, global_main_commit); + let pv_slice = &proof.public_values[..machine.num_pv_elts()]; + challenger.observe_slice(&mut builder, pv_slice.iter().cloned()); + } + + let global_permutation_challenges = + (0..2).map(|_| challenger.sample_ext(&mut builder)).collect::>(); + + // Verify the first proof. + let num_shards = num_shards_in_batch.unwrap_or(proofs.len()); + for proof in proofs.into_iter().take(num_shards) { + let mut challenger = challenger.copy(&mut builder); + StarkVerifier::verify_shard( + &mut builder, + &vk, + &machine, + &mut challenger, + &proof, + &global_permutation_challenges, + ); } + (builder.into_operations(), witness_stream) } - span_builder.finish().map_err(SpanBuilderError::from) -} -#[cfg(test)] -pub(crate) mod tests { - - use sp1_recursion_core::{cpu::Instruction, runtime::Opcode}; - - pub fn basic_program( - ) -> sp1_recursion_core::runtime::RecursionProgram { - let zero = [F::zero(); 4]; - let one = [F::one(), F::zero(), F::zero(), F::zero()]; - let mut instructions = vec![Instruction::new( - Opcode::ADD, - F::from_canonical_u32(3), - zero, - one, - F::zero(), - F::zero(), - false, - true, - "".to_string(), - )]; - instructions.resize( - 31, - Instruction::new( - Opcode::ADD, - F::from_canonical_u32(3), - zero, - one, - F::zero(), - F::zero(), - false, - true, - "".to_string(), - ), - ); - instructions.push(Instruction::new( - Opcode::HALT, - F::zero(), - zero, - zero, - F::zero(), - F::zero(), - true, - true, - "".to_string(), - )); - sp1_recursion_core::runtime::RecursionProgram:: { instructions, traces: vec![None] } + #[test] + fn test_verify_shard_inner() { + let (operations, stream) = + build_verify_shard_with_provers::, CpuProver<_, _>>( + BabyBearPoseidon2::new(), + FIBONACCI_ELF, + SP1CoreOpts::default(), + Some(2), + ); + run_test_recursion_with_prover::>(operations, stream); } } diff --git a/crates/recursion/circuit/src/types.rs b/crates/recursion/circuit/src/types.rs index 1c39b71a25..16eab3bc92 100644 --- a/crates/recursion/circuit/src/types.rs +++ b/crates/recursion/circuit/src/types.rs @@ -1,201 +1,116 @@ -use p3_air::BaseAir; +use hashbrown::HashMap; use p3_commit::TwoAdicMultiplicativeCoset; -use p3_field::AbstractExtensionField; -use p3_matrix::{dense::RowMajorMatrixView, stack::VerticalPair}; -use sp1_recursion_compiler::ir::{Array, Builder, Config, Ext, ExtConst, Felt, FromConstant, Var}; -use sp1_stark::{air::MachineAir, AirOpenedValues, Chip, ChipOpenedValues, ShardCommitment}; +use p3_field::{AbstractField, TwoAdicField}; +use p3_matrix::Dimensions; -use crate::DIGEST_SIZE; +use sp1_recursion_compiler::ir::{Builder, Ext, Felt}; -pub type OuterDigestVariable = [Var; DIGEST_SIZE]; +use sp1_recursion_core::DIGEST_SIZE; -pub struct RecursionShardProofVariable { - pub commitment: ShardCommitment>, - pub opened_values: RecursionShardOpenedValuesVariable, - pub opening_proof: TwoAdicPcsProofVariable, - pub public_values: Array>, -} +use crate::{ + challenger::CanObserveVariable, hash::FieldHasherVariable, BabyBearFriConfigVariable, + CircuitConfig, +}; +/// Reference: [sp1_core::stark::StarkVerifyingKey] #[derive(Clone)] -pub struct RecursionShardOpenedValuesVariable { - pub chips: Vec>, +pub struct VerifyingKeyVariable, SC: BabyBearFriConfigVariable> { + pub commitment: SC::DigestVariable, + pub pc_start: Felt, + pub chip_information: Vec<(String, TwoAdicMultiplicativeCoset, Dimensions)>, + pub chip_ordering: HashMap, } -/// Reference: https://github.com/Plonky3/Plonky3/blob/4809fa7bedd9ba8f6f5d3267b1592618e3776c57/fri/src/proof.rs#L12 #[derive(Clone)] -pub struct FriProofVariable { - pub commit_phase_commits: Vec>, - pub query_proofs: Vec>, +pub struct FriProofVariable> { + pub commit_phase_commits: Vec, + pub query_proofs: Vec>, pub final_poly: Ext, pub pow_witness: Felt, } /// Reference: https://github.com/Plonky3/Plonky3/blob/4809fa7bedd9ba8f6f5d3267b1592618e3776c57/fri/src/proof.rs#L32 #[derive(Clone)] -pub struct FriCommitPhaseProofStepVariable { +pub struct FriCommitPhaseProofStepVariable> { pub sibling_value: Ext, - pub opening_proof: Vec>, + pub opening_proof: Vec, } /// Reference: https://github.com/Plonky3/Plonky3/blob/4809fa7bedd9ba8f6f5d3267b1592618e3776c57/fri/src/proof.rs#L23 #[derive(Clone)] -pub struct FriQueryProofVariable { - pub commit_phase_openings: Vec>, +pub struct FriQueryProofVariable> { + pub commit_phase_openings: Vec>, } /// Reference: https://github.com/Plonky3/Plonky3/blob/4809fa7bedd9ba8f6f5d3267b1592618e3776c57/fri/src/verifier.rs#L22 #[derive(Clone)] -pub struct FriChallenges { - pub query_indices: Vec>, +pub struct FriChallenges { + pub query_indices: Vec>, pub betas: Vec>, } #[derive(Clone)] -pub struct BatchOpeningVariable { - pub opened_values: Vec>>>, - pub opening_proof: Vec>, +pub struct TwoAdicPcsProofVariable> { + pub fri_proof: FriProofVariable, + pub query_openings: Vec>>, } #[derive(Clone)] -pub struct TwoAdicPcsProofVariable { - pub fri_proof: FriProofVariable, - pub query_openings: Vec>>, +pub struct BatchOpeningVariable> { + pub opened_values: Vec>>>, + pub opening_proof: Vec, } #[derive(Clone)] -pub struct TwoAdicPcsRoundVariable { - pub batch_commit: OuterDigestVariable, - pub mats: Vec>, +pub struct TwoAdicPcsRoundVariable> { + pub batch_commit: H::DigestVariable, + pub domains_points_and_opens: Vec>, } -#[allow(clippy::type_complexity)] #[derive(Clone)] -pub struct TwoAdicPcsMatsVariable { +pub struct TwoAdicPcsMatsVariable { pub domain: TwoAdicMultiplicativeCoset, pub points: Vec>, pub values: Vec>>, } -#[derive(Debug, Clone)] -pub struct ChipOpenedValuesVariable { - pub preprocessed: AirOpenedValuesVariable, - pub main: AirOpenedValuesVariable, - pub permutation: AirOpenedValuesVariable, - pub quotient: Vec>>, - pub cumulative_sum: Ext, - pub log_degree: usize, -} - -#[derive(Debug, Clone)] -pub struct AirOpenedValuesVariable { - pub local: Vec>, - pub next: Vec>, -} - -impl FromConstant for AirOpenedValuesVariable { - type Constant = AirOpenedValues; - - fn constant(value: Self::Constant, builder: &mut Builder) -> Self { - AirOpenedValuesVariable { - local: value.local.iter().map(|x| builder.constant(*x)).collect(), - next: value.next.iter().map(|x| builder.constant(*x)).collect(), - } - } -} - -impl AirOpenedValuesVariable { - pub fn view( - &self, - ) -> VerticalPair< - RowMajorMatrixView<'_, Ext>, - RowMajorMatrixView<'_, Ext>, - > { - let a = RowMajorMatrixView::new_row(&self.local); - let b = RowMajorMatrixView::new_row(&self.next); - VerticalPair::new(a, b) - } -} - -impl FromConstant for ChipOpenedValuesVariable { - type Constant = ChipOpenedValues; - - fn constant(value: Self::Constant, builder: &mut Builder) -> Self { - ChipOpenedValuesVariable { - preprocessed: builder.constant(value.preprocessed), - main: builder.constant(value.main), - permutation: builder.constant(value.permutation), - quotient: value - .quotient - .iter() - .map(|x| x.iter().map(|y| builder.constant(*y)).collect()) - .collect(), - cumulative_sum: builder.eval(value.cumulative_sum.cons()), - log_degree: value.log_degree, +impl, SC: BabyBearFriConfigVariable> VerifyingKeyVariable { + pub fn observe_into(&self, builder: &mut Builder, challenger: &mut Challenger) + where + Challenger: CanObserveVariable> + CanObserveVariable, + { + // Observe the commitment. + challenger.observe(builder, self.commitment); + // Observe the pc_start. + challenger.observe(builder, self.pc_start); + // Observe the padding. + let zero: Felt<_> = builder.eval(C::F::zero()); + for _ in 0..7 { + challenger.observe(builder, zero); } } -} -#[derive(Debug, Clone)] -pub struct ChipOpening { - pub preprocessed: AirOpenedValues>, - pub main: AirOpenedValues>, - pub permutation: AirOpenedValues>, - pub quotient: Vec>>, - pub cumulative_sum: Ext, - pub log_degree: usize, -} - -impl ChipOpening { - pub fn from_variable( - _: &mut Builder, - chip: &Chip, - opening: &ChipOpenedValuesVariable, - ) -> Self + /// Hash the verifying key + prep domains into a single digest. + /// poseidon2( commit[0..8] || pc_start || prep_domains[N].{log_n, .size, .shift, .g}) + pub fn hash(&self, builder: &mut Builder) -> SC::DigestVariable where - A: MachineAir, + C::F: TwoAdicField, + SC::DigestVariable: IntoIterator>, { - let mut preprocessed = AirOpenedValues { local: vec![], next: vec![] }; - let preprocess_width = chip.preprocessed_width(); - for i in 0..preprocess_width { - preprocessed.local.push(opening.preprocessed.local[i]); - preprocessed.next.push(opening.preprocessed.next[i]); - } - - let mut main = AirOpenedValues { local: vec![], next: vec![] }; - let main_width = chip.width(); - for i in 0..main_width { - main.local.push(opening.main.local[i]); - main.next.push(opening.main.next[i]); + let prep_domains = self.chip_information.iter().map(|(_, domain, _)| domain); + let num_inputs = DIGEST_SIZE + 1 + (4 * prep_domains.len()); + let mut inputs = Vec::with_capacity(num_inputs); + inputs.extend(self.commitment); + inputs.push(self.pc_start); + for domain in prep_domains { + inputs.push(builder.eval(C::F::from_canonical_usize(domain.log_n))); + let size = 1 << domain.log_n; + inputs.push(builder.eval(C::F::from_canonical_usize(size))); + let g = C::F::two_adic_generator(domain.log_n); + inputs.push(builder.eval(domain.shift)); + inputs.push(builder.eval(g)); } - let mut permutation = AirOpenedValues { local: vec![], next: vec![] }; - let permutation_width = C::EF::D * chip.permutation_width(); - - for i in 0..permutation_width { - permutation.local.push(opening.permutation.local[i]); - permutation.next.push(opening.permutation.next[i]); - } - - let num_quotient_chunks = 1 << chip.log_quotient_degree(); - - let mut quotient = vec![]; - for i in 0..num_quotient_chunks { - let chunk = &opening.quotient[i]; - let mut quotient_vals = vec![]; - for j in 0..C::EF::D { - let value = &chunk[j]; - quotient_vals.push(*value); - } - quotient.push(quotient_vals); - } - - ChipOpening { - preprocessed, - main, - permutation, - quotient, - cumulative_sum: opening.cumulative_sum, - log_degree: opening.log_degree, - } + SC::hash(builder, &inputs) } } diff --git a/crates/recursion/circuit/src/utils.rs b/crates/recursion/circuit/src/utils.rs index e86f0cdc5e..9609a5f40c 100644 --- a/crates/recursion/circuit/src/utils.rs +++ b/crates/recursion/circuit/src/utils.rs @@ -1,22 +1,61 @@ -use p3_field::AbstractField; +use std::mem::MaybeUninit; + +use p3_baby_bear::BabyBear; +use p3_bn254_fr::Bn254Fr; +use p3_field::{AbstractField, PrimeField32}; + use sp1_recursion_compiler::ir::{Builder, Config, Felt, Var}; -use sp1_recursion_core::runtime::DIGEST_SIZE; +use sp1_recursion_core::{air::ChallengerPublicValues, DIGEST_SIZE}; + use sp1_stark::Word; -pub fn felt2var(builder: &mut Builder, felt: Felt) -> Var { - let bits = builder.num2bits_f(felt); - builder.bits2num_v(&bits) +pub(crate) unsafe fn uninit_challenger_pv( + _builder: &mut Builder, +) -> ChallengerPublicValues> { + unsafe { MaybeUninit::zeroed().assume_init() } +} + +/// Convert 8 BabyBear words into a Bn254Fr field element by shifting by 31 bits each time. The last +/// word becomes the least significant bits. +#[allow(dead_code)] +pub fn babybears_to_bn254(digest: &[BabyBear; 8]) -> Bn254Fr { + let mut result = Bn254Fr::zero(); + for word in digest.iter() { + // Since BabyBear prime is less than 2^31, we can shift by 31 bits each time and still be + // within the Bn254Fr field, so we don't have to truncate the top 3 bits. + result *= Bn254Fr::from_canonical_u64(1 << 31); + result += Bn254Fr::from_canonical_u32(word.as_canonical_u32()); + } + result +} + +/// Convert 32 BabyBear bytes into a Bn254Fr field element. The first byte's most significant 3 bits +/// (which would become the 3 most significant bits) are truncated. +#[allow(dead_code)] +pub fn babybear_bytes_to_bn254(bytes: &[BabyBear; 32]) -> Bn254Fr { + let mut result = Bn254Fr::zero(); + for (i, byte) in bytes.iter().enumerate() { + debug_assert!(byte < &BabyBear::from_canonical_u32(256)); + if i == 0 { + // 32 bytes is more than Bn254 prime, so we need to truncate the top 3 bits. + result = Bn254Fr::from_canonical_u32(byte.as_canonical_u32() & 0x1f); + } else { + result *= Bn254Fr::from_canonical_u32(256); + result += Bn254Fr::from_canonical_u32(byte.as_canonical_u32()); + } + } + result } -pub fn babybears_to_bn254( +#[allow(dead_code)] +pub fn felts_to_bn254_var( builder: &mut Builder, digest: &[Felt; DIGEST_SIZE], ) -> Var { let var_2_31: Var<_> = builder.constant(C::N::from_canonical_u32(1 << 31)); let result = builder.constant(C::N::zero()); for (i, word) in digest.iter().enumerate() { - let word_bits = builder.num2bits_f_circuit(*word); - let word_var = builder.bits2num_v_circuit(&word_bits); + let word_var = builder.felt2var_circuit(*word); if i == 0 { builder.assign(result, word_var); } else { @@ -26,7 +65,8 @@ pub fn babybears_to_bn254( result } -pub fn babybear_bytes_to_bn254( +#[allow(dead_code)] +pub fn felt_bytes_to_bn254_var( builder: &mut Builder, bytes: &[Felt; 32], ) -> Var { @@ -51,6 +91,75 @@ pub fn babybear_bytes_to_bn254( result } +#[allow(dead_code)] pub fn words_to_bytes(words: &[Word]) -> Vec { words.iter().flat_map(|w| w.0).collect::>() } + +#[cfg(any(test, feature = "export-tests"))] +pub(crate) mod tests { + use std::sync::Arc; + + use sp1_core_machine::utils::{run_test_machine_with_prover, setup_logger}; + use sp1_recursion_compiler::{circuit::AsmCompiler, circuit::AsmConfig, ir::DslIr}; + + use sp1_recursion_compiler::ir::TracedVec; + use sp1_recursion_core::{machine::RecursionAir, Runtime}; + use sp1_stark::{ + baby_bear_poseidon2::BabyBearPoseidon2, CpuProver, InnerChallenge, InnerVal, MachineProver, + MachineProvingKey, + }; + + use crate::witness::WitnessBlock; + + type SC = BabyBearPoseidon2; + type F = InnerVal; + type EF = InnerChallenge; + + /// A simplified version of some code from `recursion/core/src/stark/mod.rs`. + /// Takes in a program and runs it with the given witness and generates a proof with a variety + /// of machines depending on the provided test_config. + pub(crate) fn run_test_recursion_with_prover>>( + operations: TracedVec>>, + witness_stream: impl IntoIterator>>, + ) { + setup_logger(); + + let compile_span = tracing::debug_span!("compile").entered(); + let mut compiler = AsmCompiler::>::default(); + let program = Arc::new(compiler.compile(operations)); + compile_span.exit(); + + let config = SC::default(); + + let run_span = tracing::debug_span!("run the recursive program").entered(); + let mut runtime = Runtime::::new(program.clone(), config.perm.clone()); + runtime.witness_stream.extend(witness_stream); + tracing::debug_span!("run").in_scope(|| runtime.run().unwrap()); + assert!(runtime.witness_stream.is_empty()); + run_span.exit(); + + let records = vec![runtime.record]; + + // Run with the poseidon2 wide chip. + let proof_wide_span = tracing::debug_span!("Run test with wide machine").entered(); + let wide_machine = RecursionAir::<_, 3>::compress_machine(SC::default()); + let (pk, vk) = wide_machine.setup(&program); + let pk = P::DeviceProvingKey::from_host(&pk); + let prover = P::new(wide_machine); + let result = run_test_machine_with_prover::<_, _, P>(&prover, records.clone(), pk, vk); + proof_wide_span.exit(); + + if let Err(e) = result { + panic!("Verification failed: {:?}", e); + } + } + + #[allow(dead_code)] + pub(crate) fn run_test_recursion( + operations: TracedVec>>, + witness_stream: impl IntoIterator>>, + ) { + run_test_recursion_with_prover::>(operations, witness_stream) + } +} diff --git a/crates/recursion/circuit/src/witness.rs b/crates/recursion/circuit/src/witness.rs deleted file mode 100644 index 7255f43a4e..0000000000 --- a/crates/recursion/circuit/src/witness.rs +++ /dev/null @@ -1,353 +0,0 @@ -use p3_bn254_fr::Bn254Fr; -use sp1_recursion_compiler::{ - config::OuterConfig, - ir::{Builder, Config, Ext, Felt, Var, Witness}, -}; -use sp1_recursion_core::stark::config::{ - BabyBearPoseidon2Outer, OuterBatchOpening, OuterChallenge, OuterCommitPhaseStep, OuterDigest, - OuterFriProof, OuterPcsProof, OuterQueryProof, OuterVal, -}; -use sp1_stark::{ - AirOpenedValues, ChipOpenedValues, ShardCommitment, ShardOpenedValues, ShardProof, -}; - -use crate::types::{ - AirOpenedValuesVariable, BatchOpeningVariable, ChipOpenedValuesVariable, - FriCommitPhaseProofStepVariable, FriProofVariable, FriQueryProofVariable, OuterDigestVariable, - RecursionShardOpenedValuesVariable, RecursionShardProofVariable, TwoAdicPcsProofVariable, -}; - -pub trait Witnessable { - type WitnessVariable; - - fn read(&self, builder: &mut Builder) -> Self::WitnessVariable; - - fn write(&self, witness: &mut Witness); -} - -type C = OuterConfig; - -impl Witnessable for Bn254Fr { - type WitnessVariable = Var; - - fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { - builder.witness_var() - } - - fn write(&self, witness: &mut Witness) { - witness.vars.push(*self); - } -} - -impl Witnessable for OuterVal { - type WitnessVariable = Felt; - - fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { - builder.witness_felt() - } - - fn write(&self, witness: &mut Witness) { - witness.felts.push(*self); - } -} - -impl Witnessable for OuterChallenge { - type WitnessVariable = Ext; - - fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { - builder.witness_ext() - } - - fn write(&self, witness: &mut Witness) { - witness.exts.push(*self); - } -} - -trait VectorWitnessable: Witnessable {} -impl VectorWitnessable for Bn254Fr {} -impl VectorWitnessable for OuterVal {} -impl VectorWitnessable for OuterChallenge {} -impl VectorWitnessable for Vec {} -impl VectorWitnessable for Vec {} -impl VectorWitnessable for Vec> {} - -impl> Witnessable for Vec { - type WitnessVariable = Vec; - - fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { - self.iter().map(|x| x.read(builder)).collect() - } - - fn write(&self, witness: &mut Witness) { - self.iter().for_each(|x| x.write(witness)); - } -} - -impl Witnessable for OuterDigest { - type WitnessVariable = OuterDigestVariable; - - fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { - [builder.witness_var()] - } - - fn write(&self, witness: &mut Witness) { - witness.vars.push(self[0]); - } -} -impl VectorWitnessable for OuterDigest {} - -impl Witnessable for ShardCommitment { - type WitnessVariable = ShardCommitment>; - - fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { - let main_commit = self.main_commit.read(builder); - let permutation_commit = self.permutation_commit.read(builder); - let quotient_commit = self.quotient_commit.read(builder); - ShardCommitment { main_commit, permutation_commit, quotient_commit } - } - - fn write(&self, witness: &mut Witness) { - self.main_commit.write(witness); - self.permutation_commit.write(witness); - self.quotient_commit.write(witness); - } -} - -impl Witnessable for AirOpenedValues { - type WitnessVariable = AirOpenedValuesVariable; - - fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { - let local = self.local.read(builder); - let next = self.next.read(builder); - AirOpenedValuesVariable { local, next } - } - - fn write(&self, witness: &mut Witness) { - self.local.write(witness); - self.next.write(witness); - } -} - -impl Witnessable for ChipOpenedValues { - type WitnessVariable = ChipOpenedValuesVariable; - - fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { - let preprocessed = self.preprocessed.read(builder); - let main = self.main.read(builder); - let permutation = self.permutation.read(builder); - let quotient = self.quotient.read(builder); - let cumulative_sum = self.cumulative_sum.read(builder); - let log_degree = self.log_degree; - ChipOpenedValuesVariable { - preprocessed, - main, - permutation, - quotient, - cumulative_sum, - log_degree, - } - } - - fn write(&self, witness: &mut Witness) { - self.preprocessed.write(witness); - self.main.write(witness); - self.permutation.write(witness); - self.quotient.write(witness); - self.cumulative_sum.write(witness); - } -} -impl VectorWitnessable for ChipOpenedValues {} - -impl Witnessable for ShardOpenedValues { - type WitnessVariable = RecursionShardOpenedValuesVariable; - - fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { - let chips = self.chips.read(builder); - RecursionShardOpenedValuesVariable { chips } - } - - fn write(&self, witness: &mut Witness) { - self.chips.write(witness); - } -} - -impl Witnessable for OuterBatchOpening { - type WitnessVariable = BatchOpeningVariable; - - fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { - let opened_values = self - .opened_values - .read(builder) - .into_iter() - .map(|a| a.into_iter().map(|b| vec![b]).collect()) - .collect(); - let opening_proof = self.opening_proof.read(builder); - BatchOpeningVariable { opened_values, opening_proof } - } - - fn write(&self, witness: &mut Witness) { - self.opened_values.write(witness); - self.opening_proof.write(witness); - } -} -impl VectorWitnessable for OuterBatchOpening {} -impl VectorWitnessable for Vec {} - -impl Witnessable for OuterCommitPhaseStep { - type WitnessVariable = FriCommitPhaseProofStepVariable; - - fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { - let sibling_value = self.sibling_value.read(builder); - let opening_proof = self.opening_proof.read(builder); - FriCommitPhaseProofStepVariable { sibling_value, opening_proof } - } - - fn write(&self, witness: &mut Witness) { - self.sibling_value.write(witness); - self.opening_proof.write(witness); - } -} -impl VectorWitnessable for OuterCommitPhaseStep {} - -impl Witnessable for OuterQueryProof { - type WitnessVariable = FriQueryProofVariable; - - fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { - let commit_phase_openings = self.commit_phase_openings.read(builder); - FriQueryProofVariable { commit_phase_openings } - } - - fn write(&self, witness: &mut Witness) { - self.commit_phase_openings.write(witness); - } -} -impl VectorWitnessable for OuterQueryProof {} - -impl Witnessable for OuterFriProof { - type WitnessVariable = FriProofVariable; - - fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { - let commit_phase_commits = self - .commit_phase_commits - .iter() - .map(|commit| { - let commit: OuterDigest = (*commit).into(); - commit.read(builder) - }) - .collect(); - let query_proofs = self.query_proofs.read(builder); - let final_poly = self.final_poly.read(builder); - let pow_witness = self.pow_witness.read(builder); - FriProofVariable { commit_phase_commits, query_proofs, final_poly, pow_witness } - } - - fn write(&self, witness: &mut Witness) { - self.commit_phase_commits.iter().for_each(|commit| { - let commit: OuterDigest = (*commit).into(); - commit.write(witness) - }); - self.query_proofs.write(witness); - self.final_poly.write(witness); - self.pow_witness.write(witness); - } -} - -impl Witnessable for OuterPcsProof { - type WitnessVariable = TwoAdicPcsProofVariable; - - fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { - let fri_proof = self.fri_proof.read(builder); - let query_openings = self.query_openings.read(builder); - TwoAdicPcsProofVariable { fri_proof, query_openings } - } - - fn write(&self, witness: &mut Witness) { - self.fri_proof.write(witness); - self.query_openings.write(witness); - } -} - -impl Witnessable for ShardProof { - type WitnessVariable = RecursionShardProofVariable; - - fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { - let main_commit: OuterDigest = self.commitment.main_commit.into(); - let permutation_commit: OuterDigest = self.commitment.permutation_commit.into(); - let quotient_commit: OuterDigest = self.commitment.quotient_commit.into(); - let commitment = ShardCommitment { - main_commit: main_commit.read(builder), - permutation_commit: permutation_commit.read(builder), - quotient_commit: quotient_commit.read(builder), - }; - let opened_values = self.opened_values.read(builder); - let opening_proof = self.opening_proof.read(builder); - let public_values = self.public_values.read(builder); - let public_values = builder.vec(public_values); - - RecursionShardProofVariable { commitment, opened_values, opening_proof, public_values } - } - - fn write(&self, witness: &mut Witness) { - let main_commit: OuterDigest = self.commitment.main_commit.into(); - let permutation_commit: OuterDigest = self.commitment.permutation_commit.into(); - let quotient_commit: OuterDigest = self.commitment.quotient_commit.into(); - main_commit.write(witness); - permutation_commit.write(witness); - quotient_commit.write(witness); - self.opened_values.write(witness); - self.opening_proof.write(witness); - self.public_values.write(witness); - } -} - -#[cfg(test)] -mod tests { - use p3_baby_bear::BabyBear; - use p3_bn254_fr::Bn254Fr; - use p3_field::AbstractField; - use sp1_recursion_compiler::{ - config::OuterConfig, - constraints::ConstraintCompiler, - ir::{Builder, ExtConst, Witness}, - }; - use sp1_recursion_core::stark::config::OuterChallenge; - use sp1_recursion_gnark_ffi::PlonkBn254Prover; - - #[test] - fn test_witness_simple() { - let mut builder = Builder::::default(); - let a = builder.witness_var(); - let b = builder.witness_var(); - builder.assert_var_eq(a, Bn254Fr::one()); - builder.assert_var_eq(b, Bn254Fr::two()); - builder.print_v(a); - builder.print_v(b); - - let a = builder.witness_felt(); - let b = builder.witness_felt(); - builder.assert_felt_eq(a, BabyBear::one()); - builder.assert_felt_eq(b, BabyBear::two()); - builder.print_f(a); - builder.print_f(b); - - let a = builder.witness_ext(); - let b = builder.witness_ext(); - builder.assert_ext_eq(a, OuterChallenge::one().cons()); - builder.assert_ext_eq(b, OuterChallenge::two().cons()); - builder.print_e(a); - builder.print_e(b); - - let mut backend = ConstraintCompiler::::default(); - let constraints = backend.emit(builder.operations); - PlonkBn254Prover::test::( - constraints, - Witness { - vars: vec![Bn254Fr::one(), Bn254Fr::two()], - felts: vec![BabyBear::one(), BabyBear::two()], - exts: vec![OuterChallenge::one(), OuterChallenge::two()], - vkey_hash: Bn254Fr::one(), - commited_values_digest: Bn254Fr::one(), - }, - ); - } -} diff --git a/crates/recursion/circuit-v2/src/witness/mod.rs b/crates/recursion/circuit/src/witness/mod.rs similarity index 84% rename from crates/recursion/circuit-v2/src/witness/mod.rs rename to crates/recursion/circuit/src/witness/mod.rs index c8c90e7c20..05b23de603 100644 --- a/crates/recursion/circuit-v2/src/witness/mod.rs +++ b/crates/recursion/circuit/src/witness/mod.rs @@ -58,6 +58,19 @@ impl<'a, C: CircuitConfig, T: Witnessable> Witnessable for &'a T { } } +impl, U: Witnessable> Witnessable for (T, U) { + type WitnessVariable = (T::WitnessVariable, U::WitnessVariable); + + fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { + (self.0.read(builder), self.1.read(builder)) + } + + fn write(&self, witness: &mut impl WitnessWriter) { + self.0.write(witness); + self.1.write(witness); + } +} + impl> Witnessable for InnerVal { type WitnessVariable = Felt; @@ -119,7 +132,7 @@ impl> Witnessable for Vec { impl, SC: BabyBearFriConfigVariable> Witnessable for ShardProof where - Com: Witnessable>::Digest>, + Com: Witnessable>::DigestVariable>, OpeningProof: Witnessable>, { type WitnessVariable = ShardProofVariable; @@ -152,14 +165,21 @@ impl> Witnessable for ShardCommitment type WitnessVariable = ShardCommitment; fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { - let main_commit = self.main_commit.read(builder); + let global_main_commit = self.global_main_commit.read(builder); + let local_main_commit = self.local_main_commit.read(builder); let permutation_commit = self.permutation_commit.read(builder); let quotient_commit = self.quotient_commit.read(builder); - Self::WitnessVariable { main_commit, permutation_commit, quotient_commit } + Self::WitnessVariable { + global_main_commit, + local_main_commit, + permutation_commit, + quotient_commit, + } } fn write(&self, witness: &mut impl WitnessWriter) { - self.main_commit.write(witness); + self.global_main_commit.write(witness); + self.local_main_commit.write(witness); self.permutation_commit.write(witness); self.quotient_commit.write(witness); } @@ -190,14 +210,16 @@ impl> Witnessable let main = self.main.read(builder); let permutation = self.permutation.read(builder); let quotient = self.quotient.read(builder); - let cumulative_sum = self.cumulative_sum.read(builder); + let global_cumulative_sum = self.global_cumulative_sum.read(builder); + let local_cumulative_sum = self.local_cumulative_sum.read(builder); let log_degree = self.log_degree; Self::WitnessVariable { preprocessed, main, permutation, quotient, - cumulative_sum, + global_cumulative_sum, + local_cumulative_sum, log_degree, } } @@ -207,6 +229,7 @@ impl> Witnessable self.main.write(witness); self.permutation.write(witness); self.quotient.write(witness); - self.cumulative_sum.write(witness); + self.global_cumulative_sum.write(witness); + self.local_cumulative_sum.write(witness); } } diff --git a/crates/recursion/circuit-v2/src/witness/outer.rs b/crates/recursion/circuit/src/witness/outer.rs similarity index 92% rename from crates/recursion/circuit-v2/src/witness/outer.rs rename to crates/recursion/circuit/src/witness/outer.rs index a8fe1b84f7..66c49d520c 100644 --- a/crates/recursion/circuit-v2/src/witness/outer.rs +++ b/crates/recursion/circuit/src/witness/outer.rs @@ -9,14 +9,14 @@ use sp1_recursion_compiler::{ config::OuterConfig, ir::{Builder, Var}, }; -use sp1_recursion_core_v2::stark::config::{ +use sp1_recursion_core::stark::{ BabyBearPoseidon2Outer, OuterBatchOpening, OuterChallenge, OuterChallengeMmcs, OuterDigest, OuterFriProof, OuterPcsProof, OuterVal, }; use crate::{ - BatchOpeningVariable, FriCommitPhaseProofStepVariable, FriProofVariable, FriQueryProofVariable, - TwoAdicPcsProofVariable, + BatchOpeningVariable, CircuitConfig, FriCommitPhaseProofStepVariable, FriProofVariable, + FriQueryProofVariable, TwoAdicPcsProofVariable, }; use super::{WitnessWriter, Witnessable}; @@ -39,13 +39,13 @@ impl WitnessWriter for OuterWitness { } } -impl Witnessable for Bn254Fr { +impl> Witnessable for Bn254Fr { type WitnessVariable = Var; - fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { + fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { builder.witness_var() } - fn write(&self, witness: &mut impl WitnessWriter) { + fn write(&self, witness: &mut impl WitnessWriter) { witness.write_var(*self) } } diff --git a/crates/recursion/circuit-v2/src/witness/stark.rs b/crates/recursion/circuit/src/witness/stark.rs similarity index 99% rename from crates/recursion/circuit-v2/src/witness/stark.rs rename to crates/recursion/circuit/src/witness/stark.rs index 0f2308c0e7..4ecd21e416 100644 --- a/crates/recursion/circuit-v2/src/witness/stark.rs +++ b/crates/recursion/circuit/src/witness/stark.rs @@ -5,7 +5,7 @@ use p3_field::{AbstractExtensionField, AbstractField}; use p3_fri::{CommitPhaseProofStep, QueryProof}; use sp1_recursion_compiler::ir::{Builder, Config, Ext, Felt}; -use sp1_recursion_core_v2::air::Block; +use sp1_recursion_core::air::Block; use sp1_stark::{ baby_bear_poseidon2::BabyBearPoseidon2, AirOpenedValues, InnerBatchOpening, InnerChallenge, InnerChallengeMmcs, InnerDigest, InnerFriProof, InnerPcsProof, InnerVal, diff --git a/crates/recursion/compiler/Cargo.toml b/crates/recursion/compiler/Cargo.toml index 839273cab7..3cd075f91a 100644 --- a/crates/recursion/compiler/Cargo.toml +++ b/crates/recursion/compiler/Cargo.toml @@ -10,21 +10,14 @@ keywords = { workspace = true } categories = { workspace = true } [dependencies] -p3-air = { workspace = true } p3-bn254-fr = { workspace = true } p3-baby-bear = { workspace = true } -p3-commit = { workspace = true } p3-field = { workspace = true } -p3-fri = { workspace = true } -p3-matrix = { workspace = true } -p3-poseidon2 = { workspace = true } p3-symmetric = { workspace = true } -p3-util = { workspace = true } sp1-core-machine = { workspace = true } sp1-primitives = { workspace = true } sp1-recursion-core = { workspace = true } -sp1-recursion-core-v2 = { workspace = true } sp1-recursion-derive = { workspace = true } sp1-stark = { workspace = true } @@ -32,7 +25,6 @@ itertools = "0.13.0" serde = { version = "1.0.204", features = ["derive"] } backtrace = "0.3.71" tracing = "0.1.40" -rayon = "1.10.0" vec_map = "0.8.2" [dev-dependencies] @@ -41,7 +33,3 @@ p3-dft = { workspace = true } p3-merkle-tree = { workspace = true } rand = "0.8.5" criterion = { version = "0.5.1", features = ["html_reports"] } - -[[bench]] -name = "circuit" -harness = false diff --git a/crates/recursion/compiler/benches/circuit.rs b/crates/recursion/compiler/benches/circuit.rs deleted file mode 100644 index 42d2e94ec5..0000000000 --- a/crates/recursion/compiler/benches/circuit.rs +++ /dev/null @@ -1,85 +0,0 @@ -use std::time::Duration; - -use criterion::*; -use p3_symmetric::Permutation; -use rand::{rngs::StdRng, Rng, SeedableRng}; - -use sp1_recursion_compiler::{ - asm::{AsmBuilder, AsmConfig}, - circuit::*, - ir::{DslIr, TracedVec}, - prelude::Felt, -}; -use sp1_recursion_core_v2::chips::poseidon2_wide::WIDTH; -use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, inner_perm, StarkGenericConfig}; - -type SC = BabyBearPoseidon2; -type F = ::Val; -type EF = ::Challenge; -type C = AsmConfig; - -fn poseidon_program() -> TracedVec> { - let mut builder = AsmBuilder::::default(); - let mut rng = StdRng::seed_from_u64(0xCAFEDA7E) - .sample_iter::<[F; WIDTH], _>(rand::distributions::Standard); - for _ in 0..100 { - let input_1: [F; WIDTH] = rng.next().unwrap(); - let output_1 = inner_perm().permute(input_1); - - let input_1_felts = input_1.map(|x| builder.eval(x)); - let output_1_felts = builder.poseidon2_permute_v2(input_1_felts); - let expected: [Felt<_>; WIDTH] = output_1.map(|x| builder.eval(x)); - for (lhs, rhs) in output_1_felts.into_iter().zip(expected) { - builder.assert_felt_eq(lhs, rhs); - } - } - builder.operations -} - -#[allow(dead_code)] -fn compile_one(c: &mut Criterion) { - let input = { - let mut ops = poseidon_program().vec; - ops.truncate(100); - ops - }; - - c.bench_with_input( - BenchmarkId::new("compile_one", format!("{} instructions", input.len())), - &input, - |b, operations| { - let mut compiler = AsmCompiler::>::default(); - b.iter(|| { - for instr in operations.iter().cloned() { - compiler.compile_one(std::hint::black_box(instr), drop); - } - compiler.next_addr = Default::default(); - compiler.virtual_to_physical.clear(); - compiler.consts.clear(); - compiler.addr_to_mult.clear(); - }) - }, - ); -} - -fn compile(c: &mut Criterion) { - let input = poseidon_program(); - - c.bench_with_input( - BenchmarkId::new("compile", format!("{} instructions", input.vec.len())), - &input, - |b, operations| { - let mut compiler = AsmCompiler::>::default(); - b.iter(|| { - compiler.compile(operations.clone()); - }) - }, - ); -} - -criterion_group! { - name = benches; - config = Criterion::default().measurement_time(Duration::from_secs(60)); - targets = compile -} -criterion_main!(benches); diff --git a/crates/recursion/compiler/examples/fibonacci.rs b/crates/recursion/compiler/examples/fibonacci.rs deleted file mode 100644 index e92b2fde93..0000000000 --- a/crates/recursion/compiler/examples/fibonacci.rs +++ /dev/null @@ -1,69 +0,0 @@ -use p3_field::AbstractField; -use sp1_recursion_compiler::{ - asm::AsmBuilder, - ir::{Felt, Var}, -}; -use sp1_recursion_core::runtime::Runtime; -use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, StarkGenericConfig}; - -fn fibonacci(n: u32) -> u32 { - if n == 0 { - 0 - } else { - let mut a = 0; - let mut b = 1; - for _ in 0..n { - let temp = b; - b += a; - a = temp; - } - a - } -} - -fn main() { - type SC = BabyBearPoseidon2; - type F = ::Val; - type EF = ::Challenge; - - let n_val = 10; - let mut builder = AsmBuilder::::default(); - let a: Felt<_> = builder.eval(F::zero()); - let b: Felt<_> = builder.eval(F::one()); - let n: Var<_> = builder.eval(F::from_canonical_u32(n_val)); - - let start: Var<_> = builder.eval(F::zero()); - let end = n; - - builder.range(start, end).for_each(|_, builder| { - let temp: Felt<_> = builder.uninit(); - builder.assign(temp, b); - builder.assign(b, a + b); - builder.assign(a, temp); - }); - - let expected_value = F::from_canonical_u32(fibonacci(n_val)); - builder.assert_felt_eq(a, expected_value); - - let code = builder.compile_asm(); - println!("{}", code); - - let program = code.machine_code(); - println!("Program size = {}", program.instructions.len()); - - let config = SC::new(); - let mut runtime = Runtime::::new(&program, config.perm.clone()); - runtime.run().unwrap(); - - // let machine = RecursionAir::machine(config); - // let (pk, vk) = machine.setup(&program); - // let mut challenger = machine.config().challenger(); - - // let start = Instant::now(); - // let proof = machine.prove::>(&pk, runtime.record, &mut challenger); - // let duration = start.elapsed().as_secs(); - - // let mut challenger = machine.config().challenger(); - // machine.verify(&vk, &proof, &mut challenger).unwrap(); - // println!("proving duration = {}", duration); -} diff --git a/crates/recursion/compiler/src/asm/builder.rs b/crates/recursion/compiler/src/asm/builder.rs deleted file mode 100644 index cc511e4e08..0000000000 --- a/crates/recursion/compiler/src/asm/builder.rs +++ /dev/null @@ -1,25 +0,0 @@ -use p3_field::{ExtensionField, PrimeField32, TwoAdicField}; -use sp1_recursion_core::runtime::RecursionProgram; - -use crate::prelude::Builder; - -use super::{config::AsmConfig, AsmCompiler, AssemblyCode}; - -/// A builder that compiles assembly code. -pub type AsmBuilder = Builder>; - -impl + TwoAdicField> AsmBuilder { - /// Compile to assembly code. - pub fn compile_asm(self) -> AssemblyCode { - let mut compiler = AsmCompiler::new(); - compiler.build(self.operations); - compiler.code() - } - - /// Compile to a program that can be executed in the recursive zkVM. - pub fn compile_program(self) -> RecursionProgram { - let mut compiler = AsmCompiler::new(); - compiler.build(self.operations); - compiler.compile() - } -} diff --git a/crates/recursion/compiler/src/asm/code.rs b/crates/recursion/compiler/src/asm/code.rs deleted file mode 100644 index 026186ac96..0000000000 --- a/crates/recursion/compiler/src/asm/code.rs +++ /dev/null @@ -1,95 +0,0 @@ -use alloc::{collections::BTreeMap, format}; -use backtrace::Backtrace; -use core::{fmt, fmt::Display}; - -use p3_field::{ExtensionField, PrimeField32}; -use sp1_recursion_core::runtime::RecursionProgram; - -use super::AsmInstruction; - -/// A basic block of assembly instructions. -#[derive(Debug, Clone, Default)] -pub struct BasicBlock( - pub(crate) Vec>, - pub(crate) Vec>, -); - -impl> BasicBlock { - /// Creates a new basic block. - pub const fn new() -> Self { - Self(Vec::new(), Vec::new()) - } - - /// Pushes an instruction to a basic block. - pub(crate) fn push( - &mut self, - instruction: AsmInstruction, - backtrace: Option, - ) { - self.0.push(instruction); - self.1.push(backtrace); - } -} - -/// Assembly code for a program. -#[derive(Debug, Clone)] -pub struct AssemblyCode { - blocks: Vec>, - labels: BTreeMap, -} - -impl> AssemblyCode { - /// Creates a new assembly code. - pub const fn new(blocks: Vec>, labels: BTreeMap) -> Self { - Self { blocks, labels } - } - - pub fn size(&self) -> usize { - self.blocks.iter().map(|block| block.0.len()).sum() - } - - /// Convert the assembly code to a program. - pub fn machine_code(self) -> RecursionProgram { - let blocks = self.blocks; - - // Make a first pass to collect all the pc rows corresponding to the labels. - let mut label_to_pc = BTreeMap::new(); - let mut pc = 0; - for (i, block) in blocks.iter().enumerate() { - label_to_pc.insert(F::from_canonical_usize(i), pc); - pc += block.0.len(); - } - - // Make the second pass to convert the assembly code to machine code. - let mut machine_code = Vec::new(); - let mut traces = Vec::new(); - let mut pc = 0; - for block in blocks { - for (instruction, trace) in block.0.into_iter().zip(block.1) { - machine_code.push(instruction.to_machine(pc, &label_to_pc)); - traces.push(trace); - pc += 1; - } - } - - RecursionProgram { instructions: machine_code, traces } - } -} - -impl> Display for AssemblyCode { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - for (i, block) in self.blocks.iter().enumerate() { - writeln!( - f, - "{}:", - self.labels.get(&F::from_canonical_u32(i as u32)).unwrap_or(&format!(".L{}", i)) - )?; - for instruction in &block.0 { - write!(f, " ")?; - instruction.fmt(&self.labels, f)?; - writeln!(f)?; - } - } - Ok(()) - } -} diff --git a/crates/recursion/compiler/src/asm/compiler.rs b/crates/recursion/compiler/src/asm/compiler.rs deleted file mode 100644 index fb0450444e..0000000000 --- a/crates/recursion/compiler/src/asm/compiler.rs +++ /dev/null @@ -1,824 +0,0 @@ -use alloc::{collections::BTreeMap, vec}; -use backtrace::Backtrace; -use sp1_recursion_core::runtime::{HEAP_PTR, HEAP_START_ADDRESS}; -use std::collections::BTreeSet; - -use p3_field::{ExtensionField, PrimeField32, TwoAdicField}; -use sp1_recursion_core::runtime::RecursionProgram; - -use super::{config::AsmConfig, AssemblyCode, BasicBlock, IndexTriple, ValueOrConst}; -use crate::{ - asm::AsmInstruction, - ir::{Array, DslIr, Ext, Felt, Ptr, Usize, Var}, - prelude::TracedVec, -}; - -/// The zero address. -pub(crate) const ZERO: i32 = 0; - -/// The offset which the stack starts. -pub(crate) const STACK_START_OFFSET: i32 = 16; - -/// The address of A0. -pub(crate) const A0: i32 = -8; - -/// The assembly compiler. -#[derive(Debug, Clone, Default)] -pub struct AsmCompiler { - basic_blocks: Vec>, - break_label: Option, - break_label_map: BTreeMap, - break_counter: usize, - contains_break: BTreeSet, - function_labels: BTreeMap, -} - -impl Var { - /// Gets the frame pointer for a var. - pub const fn fp(&self) -> i32 { - -((self.0 as i32) * 3 + 1 + STACK_START_OFFSET) - } -} - -impl Felt { - /// Gets the frame pointer for a felt. - pub const fn fp(&self) -> i32 { - -((self.0 as i32) * 3 + 2 + STACK_START_OFFSET) - } -} - -impl Ext { - /// Gets the frame pointer for an extension element - pub const fn fp(&self) -> i32 { - -((self.0 as i32) * 3 + STACK_START_OFFSET) - } -} - -impl Ptr { - /// Gets the frame pointer for a pointer. - pub const fn fp(&self) -> i32 { - self.address.fp() - } -} - -impl + TwoAdicField> AsmCompiler { - /// Creates a new [AsmCompiler]. - pub fn new() -> Self { - Self { - basic_blocks: vec![BasicBlock::new()], - break_label: None, - break_label_map: BTreeMap::new(), - contains_break: BTreeSet::new(), - function_labels: BTreeMap::new(), - break_counter: 0, - } - } - - /// Creates a new break label. - pub fn new_break_label(&mut self) -> F { - let label = self.break_counter; - self.break_counter += 1; - let label = F::from_canonical_usize(label); - self.break_label = Some(label); - label - } - - /// Builds the operations into assembly instructions. - pub fn build(&mut self, operations: TracedVec>>) { - // Set the heap pointer value according to stack size. - if self.block_label().is_zero() { - let stack_size = F::from_canonical_usize(HEAP_START_ADDRESS); - self.push(AsmInstruction::AddFI(HEAP_PTR, ZERO, stack_size), None); - } - - // For each operation, generate assembly instructions. - for (op, trace) in operations.clone() { - match op { - DslIr::ImmV(dst, src) => { - self.push(AsmInstruction::AddFI(dst.fp(), ZERO, src), trace); - } - DslIr::ImmF(dst, src) => { - self.push(AsmInstruction::AddFI(dst.fp(), ZERO, src), trace); - } - DslIr::ImmE(dst, src) => { - self.push(AsmInstruction::AddEI(dst.fp(), ZERO, src), trace); - } - DslIr::AddV(dst, lhs, rhs) => { - self.push(AsmInstruction::AddF(dst.fp(), lhs.fp(), rhs.fp()), trace); - } - DslIr::AddVI(dst, lhs, rhs) => { - self.push(AsmInstruction::AddFI(dst.fp(), lhs.fp(), rhs), trace); - } - DslIr::AddF(dst, lhs, rhs) => { - self.push(AsmInstruction::AddF(dst.fp(), lhs.fp(), rhs.fp()), trace); - } - DslIr::AddFI(dst, lhs, rhs) => { - self.push(AsmInstruction::AddFI(dst.fp(), lhs.fp(), rhs), trace); - } - DslIr::AddE(dst, lhs, rhs) => { - self.push(AsmInstruction::AddE(dst.fp(), lhs.fp(), rhs.fp()), trace); - } - DslIr::AddEI(dst, lhs, rhs) => { - self.push(AsmInstruction::AddEI(dst.fp(), lhs.fp(), rhs), trace); - } - DslIr::AddEF(dst, lhs, rhs) => { - self.push(AsmInstruction::AddE(dst.fp(), lhs.fp(), rhs.fp()), trace); - } - DslIr::AddEFFI(dst, lhs, rhs) => { - self.push(AsmInstruction::AddEI(dst.fp(), lhs.fp(), rhs), trace); - } - DslIr::AddEFI(dst, lhs, rhs) => { - self.push(AsmInstruction::AddEI(dst.fp(), lhs.fp(), EF::from_base(rhs)), trace); - } - DslIr::SubV(dst, lhs, rhs) => { - self.push(AsmInstruction::SubF(dst.fp(), lhs.fp(), rhs.fp()), trace); - } - DslIr::SubVI(dst, lhs, rhs) => { - self.push(AsmInstruction::SubFI(dst.fp(), lhs.fp(), rhs), trace); - } - DslIr::SubVIN(dst, lhs, rhs) => { - self.push(AsmInstruction::SubFIN(dst.fp(), lhs, rhs.fp()), trace); - } - DslIr::SubF(dst, lhs, rhs) => { - self.push(AsmInstruction::SubF(dst.fp(), lhs.fp(), rhs.fp()), trace); - } - DslIr::SubFI(dst, lhs, rhs) => { - self.push(AsmInstruction::SubFI(dst.fp(), lhs.fp(), rhs), trace); - } - DslIr::SubFIN(dst, lhs, rhs) => { - self.push(AsmInstruction::SubFIN(dst.fp(), lhs, rhs.fp()), trace); - } - DslIr::NegV(dst, src) => { - self.push(AsmInstruction::SubFIN(dst.fp(), F::zero(), src.fp()), trace); - } - DslIr::NegF(dst, src) => { - self.push(AsmInstruction::SubFIN(dst.fp(), F::zero(), src.fp()), trace); - } - DslIr::DivF(dst, lhs, rhs) => { - self.push(AsmInstruction::DivF(dst.fp(), lhs.fp(), rhs.fp()), trace); - } - DslIr::DivFI(dst, lhs, rhs) => { - self.push(AsmInstruction::DivFI(dst.fp(), lhs.fp(), rhs), trace); - } - DslIr::DivFIN(dst, lhs, rhs) => { - self.push(AsmInstruction::DivFIN(dst.fp(), lhs, rhs.fp()), trace); - } - DslIr::InvV(dst, src) => { - self.push(AsmInstruction::DivFIN(dst.fp(), F::one(), src.fp()), trace); - } - DslIr::InvF(dst, src) => { - self.push(AsmInstruction::DivFIN(dst.fp(), F::one(), src.fp()), trace); - } - DslIr::DivEF(dst, lhs, rhs) => { - self.push(AsmInstruction::DivE(dst.fp(), lhs.fp(), rhs.fp()), trace); - } - DslIr::DivEFI(dst, lhs, rhs) => { - self.push(AsmInstruction::DivEI(dst.fp(), lhs.fp(), EF::from_base(rhs)), trace); - } - DslIr::DivEIN(dst, lhs, rhs) => { - self.push(AsmInstruction::DivEIN(dst.fp(), lhs, rhs.fp()), trace); - } - DslIr::DivEFIN(dst, lhs, rhs) => { - self.push( - AsmInstruction::DivEIN(dst.fp(), EF::from_base(lhs), rhs.fp()), - trace, - ); - } - DslIr::DivE(dst, lhs, rhs) => { - self.push(AsmInstruction::DivE(dst.fp(), lhs.fp(), rhs.fp()), trace); - } - DslIr::DivEI(dst, lhs, rhs) => { - self.push(AsmInstruction::DivEI(dst.fp(), lhs.fp(), rhs), trace); - } - DslIr::InvE(dst, src) => { - self.push(AsmInstruction::DivEIN(dst.fp(), EF::one(), src.fp()), trace); - } - DslIr::SubEF(dst, lhs, rhs) => { - self.push(AsmInstruction::SubE(dst.fp(), lhs.fp(), rhs.fp()), trace); - } - DslIr::SubEFI(dst, lhs, rhs) => { - self.push(AsmInstruction::SubEI(dst.fp(), lhs.fp(), EF::from_base(rhs)), trace); - } - DslIr::SubEIN(dst, lhs, rhs) => { - self.push(AsmInstruction::SubEIN(dst.fp(), lhs, rhs.fp()), trace); - } - DslIr::SubE(dst, lhs, rhs) => { - self.push(AsmInstruction::SubE(dst.fp(), lhs.fp(), rhs.fp()), trace); - } - DslIr::SubEI(dst, lhs, rhs) => { - self.push(AsmInstruction::SubEI(dst.fp(), lhs.fp(), rhs), trace); - } - DslIr::NegE(dst, src) => { - self.push(AsmInstruction::SubEIN(dst.fp(), EF::zero(), src.fp()), trace); - } - DslIr::MulV(dst, lhs, rhs) => { - self.push(AsmInstruction::MulF(dst.fp(), lhs.fp(), rhs.fp()), trace); - } - DslIr::MulVI(dst, lhs, rhs) => { - self.push(AsmInstruction::MulFI(dst.fp(), lhs.fp(), rhs), trace); - } - DslIr::MulF(dst, lhs, rhs) => { - self.push(AsmInstruction::MulF(dst.fp(), lhs.fp(), rhs.fp()), trace); - } - DslIr::MulFI(dst, lhs, rhs) => { - self.push(AsmInstruction::MulFI(dst.fp(), lhs.fp(), rhs), trace); - } - DslIr::MulE(dst, lhs, rhs) => { - self.push(AsmInstruction::MulE(dst.fp(), lhs.fp(), rhs.fp()), trace); - } - DslIr::MulEI(dst, lhs, rhs) => { - self.push(AsmInstruction::MulEI(dst.fp(), lhs.fp(), rhs), trace); - } - DslIr::MulEF(dst, lhs, rhs) => { - self.push(AsmInstruction::MulE(dst.fp(), lhs.fp(), rhs.fp()), trace); - } - DslIr::MulEFI(dst, lhs, rhs) => { - self.push(AsmInstruction::MulEI(dst.fp(), lhs.fp(), EF::from_base(rhs)), trace); - } - DslIr::IfEq(data) => { - let (lhs, rhs, then_block, else_block) = *data; - let if_compiler = IfCompiler { - compiler: self, - lhs: lhs.fp(), - rhs: ValueOrConst::Val(rhs.fp()), - is_eq: true, - }; - if else_block.is_empty() { - if_compiler.then(|builder| builder.build(then_block)); - } else { - if_compiler.then_or_else( - |builder| builder.build(then_block), - |builder| builder.build(else_block), - ); - } - } - DslIr::IfNe(data) => { - let (lhs, rhs, then_block, else_block) = *data; - let if_compiler = IfCompiler { - compiler: self, - lhs: lhs.fp(), - rhs: ValueOrConst::Val(rhs.fp()), - is_eq: false, - }; - if else_block.is_empty() { - if_compiler.then(|builder| builder.build(then_block)); - } else { - if_compiler.then_or_else( - |builder| builder.build(then_block), - |builder| builder.build(else_block), - ); - } - } - DslIr::IfEqI(data) => { - let (lhs, rhs, then_block, else_block) = *data; - let if_compiler = IfCompiler { - compiler: self, - lhs: lhs.fp(), - rhs: ValueOrConst::Const(rhs), - is_eq: true, - }; - if else_block.is_empty() { - if_compiler.then(|builder| builder.build(then_block)); - } else { - if_compiler.then_or_else( - |builder| builder.build(then_block), - |builder| builder.build(else_block), - ); - } - } - DslIr::IfNeI(data) => { - let (lhs, rhs, then_block, else_block) = *data; - let if_compiler = IfCompiler { - compiler: self, - lhs: lhs.fp(), - rhs: ValueOrConst::Const(rhs), - is_eq: false, - }; - if else_block.is_empty() { - if_compiler.then(|builder| builder.build(then_block)); - } else { - if_compiler.then_or_else( - |builder| builder.build(then_block), - |builder| builder.build(else_block), - ); - } - } - DslIr::Break => { - let label = self.break_label.expect("No break label set"); - let current_block = self.block_label(); - self.contains_break.insert(current_block); - self.push(AsmInstruction::Break(label), trace); - } - DslIr::For(data) => { - let (start, end, step_size, loop_var, block) = *data; - let for_compiler = - ForCompiler { compiler: self, start, end, step_size, loop_var }; - for_compiler.for_each(move |_, builder| builder.build(block)); - } - DslIr::AssertEqV(lhs, rhs) => { - // If lhs != rhs, execute TRAP - self.assert(lhs.fp(), ValueOrConst::Val(rhs.fp()), false, trace) - } - DslIr::AssertEqVI(lhs, rhs) => { - // If lhs != rhs, execute TRAP - self.assert(lhs.fp(), ValueOrConst::Const(rhs), false, trace) - } - DslIr::AssertNeV(lhs, rhs) => { - // If lhs == rhs, execute TRAP - self.assert(lhs.fp(), ValueOrConst::Val(rhs.fp()), true, trace) - } - DslIr::AssertNeVI(lhs, rhs) => { - // If lhs == rhs, execute TRAP - self.assert(lhs.fp(), ValueOrConst::Const(rhs), true, trace) - } - DslIr::AssertEqF(lhs, rhs) => { - // If lhs != rhs, execute TRAP - self.assert(lhs.fp(), ValueOrConst::Val(rhs.fp()), false, trace) - } - DslIr::AssertEqFI(lhs, rhs) => { - // If lhs != rhs, execute TRAP - self.assert(lhs.fp(), ValueOrConst::Const(rhs), false, trace) - } - DslIr::AssertNeF(lhs, rhs) => { - // If lhs == rhs, execute TRAP - self.assert(lhs.fp(), ValueOrConst::Val(rhs.fp()), true, trace) - } - DslIr::AssertNeFI(lhs, rhs) => { - // If lhs == rhs, execute TRAP - self.assert(lhs.fp(), ValueOrConst::Const(rhs), true, trace) - } - DslIr::AssertEqE(lhs, rhs) => { - // If lhs != rhs, execute TRAP - self.assert(lhs.fp(), ValueOrConst::ExtVal(rhs.fp()), false, trace) - } - DslIr::AssertEqEI(lhs, rhs) => { - // If lhs != rhs, execute TRAP - self.assert(lhs.fp(), ValueOrConst::ExtConst(rhs), false, trace) - } - DslIr::AssertNeE(lhs, rhs) => { - // If lhs == rhs, execute TRAP - self.assert(lhs.fp(), ValueOrConst::ExtVal(rhs.fp()), true, trace) - } - DslIr::AssertNeEI(lhs, rhs) => { - // If lhs == rhs, execute TRAP - self.assert(lhs.fp(), ValueOrConst::ExtConst(rhs), true, trace) - } - DslIr::Alloc(ptr, len, size) => { - self.alloc(ptr, len, size, trace); - } - DslIr::LoadV(var, ptr, index) => match index.fp() { - IndexTriple::Const(index, offset, size) => self.push( - AsmInstruction::LoadFI(var.fp(), ptr.fp(), index, offset, size), - trace, - ), - IndexTriple::Var(index, offset, size) => self.push( - AsmInstruction::LoadF(var.fp(), ptr.fp(), index, offset, size), - trace, - ), - }, - DslIr::LoadF(var, ptr, index) => match index.fp() { - IndexTriple::Const(index, offset, size) => self.push( - AsmInstruction::LoadFI(var.fp(), ptr.fp(), index, offset, size), - trace, - ), - IndexTriple::Var(index, offset, size) => self.push( - AsmInstruction::LoadF(var.fp(), ptr.fp(), index, offset, size), - trace, - ), - }, - DslIr::LoadE(var, ptr, index) => match index.fp() { - IndexTriple::Const(index, offset, size) => self.push( - AsmInstruction::LoadEI(var.fp(), ptr.fp(), index, offset, size), - trace, - ), - IndexTriple::Var(index, offset, size) => self.push( - AsmInstruction::LoadE(var.fp(), ptr.fp(), index, offset, size), - trace, - ), - }, - DslIr::StoreV(var, ptr, index) => match index.fp() { - IndexTriple::Const(index, offset, size) => self.push( - AsmInstruction::StoreFI(var.fp(), ptr.fp(), index, offset, size), - trace, - ), - IndexTriple::Var(index, offset, size) => self.push( - AsmInstruction::StoreF(var.fp(), ptr.fp(), index, offset, size), - trace, - ), - }, - DslIr::StoreF(var, ptr, index) => match index.fp() { - IndexTriple::Const(index, offset, size) => self.push( - AsmInstruction::StoreFI(var.fp(), ptr.fp(), index, offset, size), - trace, - ), - IndexTriple::Var(index, offset, size) => self.push( - AsmInstruction::StoreF(var.fp(), ptr.fp(), index, offset, size), - trace, - ), - }, - DslIr::StoreE(var, ptr, index) => match index.fp() { - IndexTriple::Const(index, offset, size) => self.push( - AsmInstruction::StoreEI(var.fp(), ptr.fp(), index, offset, size), - trace, - ), - IndexTriple::Var(index, offset, size) => self.push( - AsmInstruction::StoreE(var.fp(), ptr.fp(), index, offset, size), - trace, - ), - }, - - DslIr::HintBitsU(dst, src) => match (dst, src) { - (Array::Dyn(dst, _), Usize::Var(src)) => { - self.push(AsmInstruction::HintBits(dst.fp(), src.fp()), trace); - } - _ => unimplemented!(), - }, - DslIr::HintBitsF(dst, src) => match dst { - Array::Dyn(dst, _) => { - self.push(AsmInstruction::HintBits(dst.fp(), src.fp()), trace); - } - _ => unimplemented!(), - }, - DslIr::HintBitsV(dst, src) => match dst { - Array::Dyn(dst, _) => { - self.push(AsmInstruction::HintBits(dst.fp(), src.fp()), trace); - } - _ => unimplemented!(), - }, - DslIr::Poseidon2PermuteBabyBear(data) => match *data { - (Array::Dyn(dst, _), Array::Dyn(src, _)) => { - self.push(AsmInstruction::Poseidon2Permute(dst.fp(), src.fp()), trace) - } - _ => unimplemented!(), - }, - DslIr::Error() => self.push(AsmInstruction::Trap, trace), - DslIr::PrintF(dst) => self.push(AsmInstruction::PrintF(dst.fp()), trace), - DslIr::PrintV(dst) => self.push(AsmInstruction::PrintV(dst.fp()), trace), - DslIr::PrintE(dst) => self.push(AsmInstruction::PrintE(dst.fp()), trace), - DslIr::HintExt2Felt(dst, src) => match (dst, src) { - (Array::Dyn(dst, _), src) => { - self.push(AsmInstruction::HintExt2Felt(dst.fp(), src.fp()), trace) - } - _ => unimplemented!(), - }, - DslIr::HintLen(dst) => self.push(AsmInstruction::HintLen(dst.fp()), trace), - DslIr::HintVars(dst) => match dst { - Array::Dyn(dst, _) => self.push(AsmInstruction::Hint(dst.fp()), trace), - _ => unimplemented!(), - }, - DslIr::HintFelts(dst) => match dst { - Array::Dyn(dst, _) => self.push(AsmInstruction::Hint(dst.fp()), trace), - _ => unimplemented!(), - }, - DslIr::HintExts(dst) => match dst { - Array::Dyn(dst, _) => self.push(AsmInstruction::Hint(dst.fp()), trace), - _ => unimplemented!(), - }, - DslIr::FriFold(m, input_ptr) => { - if let Array::Dyn(ptr, _) = input_ptr { - self.push(AsmInstruction::FriFold(m.fp(), ptr.fp()), trace); - } else { - unimplemented!(); - } - } - DslIr::Poseidon2CompressBabyBear(data) => match *data { - (Array::Dyn(result, _), Array::Dyn(left, _), Array::Dyn(right, _)) => self - .push( - AsmInstruction::Poseidon2Compress(result.fp(), left.fp(), right.fp()), - trace, - ), - _ => unimplemented!(), - }, - DslIr::Poseidon2AbsorbBabyBear(p2_hash_and_absorb_num, input) => match input { - Array::Dyn(input, input_size) => { - if let Usize::Var(input_size) = input_size { - self.push( - AsmInstruction::Poseidon2Absorb( - p2_hash_and_absorb_num.fp(), - input.fp(), - input_size.fp(), - ), - trace, - ); - } else { - unimplemented!(); - } - } - _ => unimplemented!(), - }, - DslIr::Poseidon2FinalizeBabyBear(p2_hash_num, output) => match output { - Array::Dyn(output, _) => { - self.push( - AsmInstruction::Poseidon2Finalize(p2_hash_num.fp(), output.fp()), - trace, - ); - } - _ => unimplemented!(), - }, - DslIr::Commit(val, index) => { - self.push(AsmInstruction::Commit(val.fp(), index.fp()), trace); - } - DslIr::RegisterPublicValue(val) => { - self.push(AsmInstruction::RegisterPublicValue(val.fp()), trace); - } - DslIr::LessThan(dst, left, right) => { - self.push(AsmInstruction::LessThan(dst.fp(), left.fp(), right.fp()), trace); - } - DslIr::CycleTracker(name) => { - self.push(AsmInstruction::CycleTracker(name.clone()), trace); - } - DslIr::Halt => { - self.push(AsmInstruction::Halt, trace); - } - DslIr::ExpReverseBitsLen(base, ptr, len) => { - self.push( - AsmInstruction::ExpReverseBitsLen(base.fp(), ptr.fp(), len.fp()), - trace, - ); - } - _ => unimplemented!(), - } - } - } - - pub fn alloc(&mut self, ptr: Ptr, len: Usize, size: usize, backtrace: Option) { - // Load the current heap ptr address to the stack value and advance the heap ptr. - let size = F::from_canonical_usize(size); - match len { - Usize::Const(len) => { - let len = F::from_canonical_usize(len); - self.push(AsmInstruction::AddFI(ptr.fp(), HEAP_PTR, F::zero()), backtrace.clone()); - self.push(AsmInstruction::AddFI(HEAP_PTR, HEAP_PTR, len * size), backtrace); - } - Usize::Var(len) => { - self.push(AsmInstruction::AddFI(ptr.fp(), HEAP_PTR, F::zero()), backtrace.clone()); - self.push(AsmInstruction::MulFI(A0, len.fp(), size), backtrace.clone()); - self.push(AsmInstruction::AddF(HEAP_PTR, HEAP_PTR, A0), backtrace); - } - } - } - - pub fn assert( - &mut self, - lhs: i32, - rhs: ValueOrConst, - is_eq: bool, - backtrace: Option, - ) { - let if_compiler = IfCompiler { compiler: self, lhs, rhs, is_eq }; - if_compiler.then(|builder| builder.push(AsmInstruction::Trap, backtrace)); - } - - pub fn code(self) -> AssemblyCode { - let labels = self.function_labels.into_iter().map(|(k, v)| (v, k)).collect(); - AssemblyCode::new(self.basic_blocks, labels) - } - - pub fn compile(self) -> RecursionProgram { - let code = self.code(); - tracing::debug!("recursion program size: {}", code.size()); - code.machine_code() - } - - fn basic_block(&mut self) { - self.basic_blocks.push(BasicBlock::new()); - } - - fn block_label(&mut self) -> F { - F::from_canonical_usize(self.basic_blocks.len() - 1) - } - - fn push_to_block( - &mut self, - block_label: F, - instruction: AsmInstruction, - backtrace: Option, - ) { - self.basic_blocks - .get_mut(block_label.as_canonical_u32() as usize) - .unwrap_or_else(|| panic!("Missing block at label: {:?}", block_label)) - .push(instruction, backtrace); - } - - fn push(&mut self, instruction: AsmInstruction, backtrace: Option) { - self.basic_blocks.last_mut().unwrap().push(instruction, backtrace); - } -} - -pub struct IfCompiler<'a, F, EF> { - compiler: &'a mut AsmCompiler, - lhs: i32, - rhs: ValueOrConst, - is_eq: bool, -} - -impl<'a, F: PrimeField32 + TwoAdicField, EF: ExtensionField + TwoAdicField> - IfCompiler<'a, F, EF> -{ - pub fn then(self, f: Func) - where - Func: FnOnce(&mut AsmCompiler), - { - let Self { compiler, lhs, rhs, is_eq } = self; - - // Get the label for the current block. - let current_block = compiler.block_label(); - - // Generate the blocks for the then branch. - compiler.basic_block(); - f(compiler); - - // Generate the block for returning to the main flow. - compiler.basic_block(); - let after_if_block = compiler.block_label(); - - // Get the branch instruction to push to the `current_block`. - let instr = Self::branch(lhs, rhs, is_eq, after_if_block); - compiler.push_to_block(current_block, instr, None); - } - - pub fn then_or_else(self, then_f: ThenFunc, else_f: ElseFunc) - where - ThenFunc: FnOnce(&mut AsmCompiler), - ElseFunc: FnOnce(&mut AsmCompiler), - { - let Self { compiler, lhs, rhs, is_eq } = self; - - // Get the label for the current block, so we can generate the jump instruction into it. - // conditional branc instruction to it, if the condition is not met. - let if_branching_block = compiler.block_label(); - - // Generate the block for the then branch. - compiler.basic_block(); - then_f(compiler); - let last_if_block = compiler.block_label(); - - // Generate the block for the else branch. - compiler.basic_block(); - let else_block = compiler.block_label(); - else_f(compiler); - - // Generate the jump instruction to the else block - let instr = Self::branch(lhs, rhs, is_eq, else_block); - compiler.push_to_block(if_branching_block, instr, None); - - // Generate the block for returning to the main flow. - compiler.basic_block(); - let main_flow_block = compiler.block_label(); - let instr = AsmInstruction::j(main_flow_block); - compiler.push_to_block(last_if_block, instr, None); - } - - const fn branch( - lhs: i32, - rhs: ValueOrConst, - is_eq: bool, - block: F, - ) -> AsmInstruction { - match (rhs, is_eq) { - (ValueOrConst::Const(rhs), true) => AsmInstruction::BneI(block, lhs, rhs), - (ValueOrConst::Const(rhs), false) => AsmInstruction::BeqI(block, lhs, rhs), - (ValueOrConst::ExtConst(rhs), true) => AsmInstruction::BneEI(block, lhs, rhs), - (ValueOrConst::ExtConst(rhs), false) => AsmInstruction::BeqEI(block, lhs, rhs), - (ValueOrConst::Val(rhs), true) => AsmInstruction::Bne(block, lhs, rhs), - (ValueOrConst::Val(rhs), false) => AsmInstruction::Beq(block, lhs, rhs), - (ValueOrConst::ExtVal(rhs), true) => AsmInstruction::BneE(block, lhs, rhs), - (ValueOrConst::ExtVal(rhs), false) => AsmInstruction::BeqE(block, lhs, rhs), - } - } -} - -/// A builder for a for loop. -/// -/// SAFETY: Starting with end < start will lead to undefined behavior. -pub struct ForCompiler<'a, F, EF> { - compiler: &'a mut AsmCompiler, - start: Usize, - end: Usize, - step_size: F, - loop_var: Var, -} - -impl<'a, F: PrimeField32 + TwoAdicField, EF: ExtensionField + TwoAdicField> - ForCompiler<'a, F, EF> -{ - pub(super) fn for_each(mut self, f: impl FnOnce(Var, &mut AsmCompiler)) { - // The function block structure: - // - Setting the loop range - // - Executing the loop body and incrementing the loop variable - // - the loop condition - // Set the loop variable to the start of the range. - - // Set the loop variable to the start of the range. - self.set_loop_var(); - - // Save the label of the for loop call. - let loop_call_label = self.compiler.block_label(); - - // Initialize a break label for this loop. - let break_label = self.compiler.new_break_label(); - self.compiler.break_label = Some(break_label); - - // A basic block for the loop body - self.compiler.basic_block(); - - // Save the loop body label for the loop condition. - let loop_label = self.compiler.block_label(); - - // The loop body. - f(self.loop_var, self.compiler); - - // If the step size is just one, compile to the optimized branch instruction. - if self.step_size == F::one() { - self.jump_to_loop_body_inc(loop_label); - } else { - // Increment the loop variable. - self.compiler.push( - AsmInstruction::AddFI(self.loop_var.fp(), self.loop_var.fp(), self.step_size), - None, - ); - } - - // Add a basic block for the loop condition. - self.compiler.basic_block(); - - // Jump to loop body if the loop condition still holds. - self.jump_to_loop_body(loop_label); - - // Add a jump instruction to the loop condition in the loop call block. - let label = self.compiler.block_label(); - let instr = AsmInstruction::j(label); - self.compiler.push_to_block(loop_call_label, instr, None); - - // Initialize the after loop block. - self.compiler.basic_block(); - - // Resolve the break label. - let label = self.compiler.block_label(); - self.compiler.break_label_map.insert(break_label, label); - - // Replace the break instruction with a jump to the after loop block. - for block in self.compiler.contains_break.iter() { - for instruction in - self.compiler.basic_blocks[block.as_canonical_u32() as usize].0.iter_mut() - { - if let AsmInstruction::Break(l) = instruction { - if *l == break_label { - *instruction = AsmInstruction::j(label); - } - } - } - } - - // self.compiler.contains_break.clear(); - } - - fn set_loop_var(&mut self) { - match self.start { - Usize::Const(start) => { - self.compiler.push( - AsmInstruction::AddFI(self.loop_var.fp(), ZERO, F::from_canonical_usize(start)), - None, - ); - } - Usize::Var(var) => { - self.compiler - .push(AsmInstruction::AddFI(self.loop_var.fp(), var.fp(), F::zero()), None); - } - } - } - - fn jump_to_loop_body(&mut self, loop_label: F) { - match self.end { - Usize::Const(end) => { - let instr = AsmInstruction::BneI( - loop_label, - self.loop_var.fp(), - F::from_canonical_usize(end), - ); - self.compiler.push(instr, None); - } - Usize::Var(end) => { - let instr = AsmInstruction::Bne(loop_label, self.loop_var.fp(), end.fp()); - self.compiler.push(instr, None); - } - } - } - - fn jump_to_loop_body_inc(&mut self, loop_label: F) { - match self.end { - Usize::Const(end) => { - let instr = AsmInstruction::BneIInc( - loop_label, - self.loop_var.fp(), - F::from_canonical_usize(end), - ); - self.compiler.push(instr, None); - } - Usize::Var(end) => { - let instr = AsmInstruction::BneInc(loop_label, self.loop_var.fp(), end.fp()); - self.compiler.push(instr, None); - } - } - } -} diff --git a/crates/recursion/compiler/src/asm/instruction.rs b/crates/recursion/compiler/src/asm/instruction.rs deleted file mode 100644 index c6f4578db4..0000000000 --- a/crates/recursion/compiler/src/asm/instruction.rs +++ /dev/null @@ -1,1162 +0,0 @@ -use alloc::{collections::BTreeMap, format}; -use core::fmt; - -use p3_field::{ExtensionField, PrimeField32}; -use sp1_recursion_core::{ - cpu::Instruction, - runtime::{canonical_i32_to_field, Opcode, PERMUTATION_WIDTH}, -}; - -use super::A0; - -#[derive(Debug, Clone)] -pub enum AsmInstruction { - /// Load word (dst, src, index, offset, size). - /// - /// Load a value from the address stored at src(fp) into dstfp). - LoadF(i32, i32, i32, F, F), - LoadFI(i32, i32, F, F, F), - - /// Store word (val, addr, index, offset, size) - /// - /// Store a value from val(fp) into the address stored at addr(fp) with given index and offset. - StoreF(i32, i32, i32, F, F), - StoreFI(i32, i32, F, F, F), - - /// Add, dst = lhs + rhs. - AddF(i32, i32, i32), - - /// Add immediate, dst = lhs + rhs. - AddFI(i32, i32, F), - - /// Subtract, dst = lhs - rhs. - SubF(i32, i32, i32), - - /// Subtract immediate, dst = lhs - rhs. - SubFI(i32, i32, F), - - /// Subtract value from immediate, dst = lhs - rhs. - SubFIN(i32, F, i32), - - /// Multiply, dst = lhs * rhs. - MulF(i32, i32, i32), - - /// Multiply immediate. - MulFI(i32, i32, F), - - /// Divide, dst = lhs / rhs. - DivF(i32, i32, i32), - - /// Divide immediate, dst = lhs / rhs. - DivFI(i32, i32, F), - - /// Divide value from immediate, dst = lhs / rhs. - DivFIN(i32, F, i32), - - /// Load an ext value (dst, src, index, offset, size). - /// - /// Load a value from the address stored at src(fp) into dst(fp). - LoadE(i32, i32, i32, F, F), - LoadEI(i32, i32, F, F, F), - - /// Store an ext value (val, addr, index, offset, size). - /// - /// Store a value from val(fp) into the address stored at addr(fp) with given index and offset. - StoreE(i32, i32, i32, F, F), - StoreEI(i32, i32, F, F, F), - - /// Add extension, dst = lhs + rhs. - AddE(i32, i32, i32), - - /// Add immediate extension, dst = lhs + rhs. - AddEI(i32, i32, EF), - - /// Subtract extension, dst = lhs - rhs. - SubE(i32, i32, i32), - - /// Subtract immediate extension, dst = lhs - rhs. - SubEI(i32, i32, EF), - - /// Subtract value from immediate extension, dst = lhs - rhs. - SubEIN(i32, EF, i32), - - /// Multiply extension, dst = lhs * rhs. - MulE(i32, i32, i32), - - /// Multiply immediate extension. - MulEI(i32, i32, EF), - - /// Divide extension, dst = lhs / rhs. - DivE(i32, i32, i32), - - /// Divide immediate extension, dst = lhs / rhs. - DivEI(i32, i32, EF), - - /// Divide value from immediate extension, dst = lhs / rhs. - DivEIN(i32, EF, i32), - - /// Jump and link. - Jal(i32, F, F), - - /// Jump and link value. - JalR(i32, i32, i32), - - /// Branch not equal. - Bne(F, i32, i32), - - /// Branch not equal increment c by 1. - BneInc(F, i32, i32), - - /// Branch not equal immediate. - BneI(F, i32, F), - - /// Branch not equal immediate and increment c by 1. - BneIInc(F, i32, F), - - /// Branch equal. - Beq(F, i32, i32), - - /// Branch equal immediate. - BeqI(F, i32, F), - - /// Branch not equal extension. - BneE(F, i32, i32), - - /// Branch not equal immediate extension. - BneEI(F, i32, EF), - - /// Branch equal extension. - BeqE(F, i32, i32), - - /// Branch equal immediate extension. - BeqEI(F, i32, EF), - - /// Trap. - Trap, - - /// Halt. - Halt, - - /// Break(label) - Break(F), - - /// HintBits(dst, src). - /// - /// Decompose the field element `src` into bits and write them to the array - /// starting at the address stored at `dst`. - HintBits(i32, i32), - - /// Perform a permutation of the Poseidon2 hash function on the array specified by the ptr. - Poseidon2Permute(i32, i32), - - /// Perform a Poseidon2 compress. - Poseidon2Compress(i32, i32, i32), - - /// Performs a Posedion2 absorb. - Poseidon2Absorb(i32, i32, i32), - - /// Performs a Poseidon2 finalize. - Poseidon2Finalize(i32, i32), - - /// Print a variable. - PrintV(i32), - - /// Print a felt. - PrintF(i32), - - /// Print an extension element. - PrintE(i32), - - /// Convert an extension element to field elements. - HintExt2Felt(i32, i32), - - /// Hint the lenght of the next vector of blocks. - HintLen(i32), - - /// Hint a vector of blocks. - Hint(i32), - - /// FRIFold(m, input). - FriFold(i32, i32), - - /// Commit(val, index). - Commit(i32, i32), - - /// RegisterPublicValue(val). - RegisterPublicValue(i32), - - LessThan(i32, i32, i32), - - CycleTracker(String), - - /// ExpReverseBitsLen instruction: (mathematical description) given `x`, `exp`, `len`, - /// bit-reverse the last `len` bits of `exp` and raise `x` to the power of the resulting - /// value. The arguments are a pointer to the addresss at which `x` is located (will be - /// written to with the result), a pointer to the address containing the bits of `exp` - /// stored as a little-endian bit array, and `len`. - ExpReverseBitsLen(i32, i32, i32), -} - -impl> AsmInstruction { - pub fn j(label: F) -> Self { - AsmInstruction::Jal(A0, label, F::zero()) - } - - pub fn to_machine(self, pc: usize, label_to_pc: &BTreeMap) -> Instruction { - let i32_f = canonical_i32_to_field::; - let i32_f_arr = |x: i32| [canonical_i32_to_field::(x), F::zero(), F::zero(), F::zero()]; - let f_u32 = |x: F| [x, F::zero(), F::zero(), F::zero()]; - let zero = [F::zero(), F::zero(), F::zero(), F::zero()]; - match self { - AsmInstruction::Break(_) => panic!("Unresolved break instruction"), - AsmInstruction::LoadF(dst, src, index, offset, size) => Instruction::new( - Opcode::LOAD, - i32_f(dst), - i32_f_arr(src), - i32_f_arr(index), - offset, - size, - false, - false, - "".to_string(), - ), - AsmInstruction::LoadFI(dst, src, index, offset, size) => Instruction::new( - Opcode::LOAD, - i32_f(dst), - i32_f_arr(src), - f_u32(index), - offset, - size, - false, - true, - "".to_string(), - ), - AsmInstruction::StoreF(value, addr, index, offset, size) => Instruction::new( - Opcode::STORE, - i32_f(value), - i32_f_arr(addr), - i32_f_arr(index), - offset, - size, - false, - false, - "".to_string(), - ), - AsmInstruction::StoreFI(value, addr, index, offset, size) => Instruction::new( - Opcode::STORE, - i32_f(value), - i32_f_arr(addr), - f_u32(index), - offset, - size, - false, - true, - "".to_string(), - ), - - AsmInstruction::AddF(dst, lhs, rhs) => Instruction::new( - Opcode::ADD, - i32_f(dst), - i32_f_arr(lhs), - i32_f_arr(rhs), - F::zero(), - F::zero(), - false, - false, - "".to_string(), - ), - AsmInstruction::LessThan(dst, lhs, rhs) => Instruction::new( - Opcode::LessThanF, - i32_f(dst), - i32_f_arr(lhs), - i32_f_arr(rhs), - F::zero(), - F::zero(), - false, - false, - "".to_string(), - ), - AsmInstruction::AddFI(dst, lhs, rhs) => Instruction::new( - Opcode::ADD, - i32_f(dst), - i32_f_arr(lhs), - f_u32(rhs), - F::zero(), - F::zero(), - false, - true, - "".to_string(), - ), - AsmInstruction::SubF(dst, lhs, rhs) => Instruction::new( - Opcode::SUB, - i32_f(dst), - i32_f_arr(lhs), - i32_f_arr(rhs), - F::zero(), - F::zero(), - false, - false, - "".to_string(), - ), - AsmInstruction::SubFI(dst, lhs, rhs) => Instruction::new( - Opcode::SUB, - i32_f(dst), - i32_f_arr(lhs), - f_u32(rhs), - F::zero(), - F::zero(), - false, - true, - "".to_string(), - ), - AsmInstruction::SubFIN(dst, lhs, rhs) => Instruction::new( - Opcode::SUB, - i32_f(dst), - f_u32(lhs), - i32_f_arr(rhs), - F::zero(), - F::zero(), - true, - false, - "".to_string(), - ), - AsmInstruction::MulF(dst, lhs, rhs) => Instruction::new( - Opcode::MUL, - i32_f(dst), - i32_f_arr(lhs), - i32_f_arr(rhs), - F::zero(), - F::zero(), - false, - false, - "".to_string(), - ), - AsmInstruction::MulFI(dst, lhs, rhs) => Instruction::new( - Opcode::MUL, - i32_f(dst), - i32_f_arr(lhs), - f_u32(rhs), - F::zero(), - F::zero(), - false, - true, - "".to_string(), - ), - AsmInstruction::DivF(dst, lhs, rhs) => Instruction::new( - Opcode::DIV, - i32_f(dst), - i32_f_arr(lhs), - i32_f_arr(rhs), - F::zero(), - F::zero(), - false, - false, - "".to_string(), - ), - AsmInstruction::DivFI(dst, lhs, rhs) => Instruction::new( - Opcode::DIV, - i32_f(dst), - i32_f_arr(lhs), - f_u32(rhs), - F::zero(), - F::zero(), - false, - true, - "".to_string(), - ), - AsmInstruction::DivFIN(dst, lhs, rhs) => Instruction::new( - Opcode::DIV, - i32_f(dst), - f_u32(lhs), - i32_f_arr(rhs), - F::zero(), - F::zero(), - true, - false, - "".to_string(), - ), - AsmInstruction::LoadE(dst, src, index, offset, size) => Instruction::new( - Opcode::LOAD, - i32_f(dst), - i32_f_arr(src), - i32_f_arr(index), - offset, - size, - false, - false, - "".to_string(), - ), - AsmInstruction::LoadEI(dst, src, index, offset, size) => Instruction::new( - Opcode::LOAD, - i32_f(dst), - i32_f_arr(src), - f_u32(index), - offset, - size, - false, - true, - "".to_string(), - ), - AsmInstruction::StoreE(value, addr, index, offset, size) => Instruction::new( - Opcode::STORE, - i32_f(value), - i32_f_arr(addr), - i32_f_arr(index), - offset, - size, - false, - false, - "".to_string(), - ), - AsmInstruction::StoreEI(value, addr, index, offset, size) => Instruction::new( - Opcode::STORE, - i32_f(value), - i32_f_arr(addr), - f_u32(index), - offset, - size, - false, - true, - "".to_string(), - ), - AsmInstruction::AddE(dst, lhs, rhs) => Instruction::new( - Opcode::EADD, - i32_f(dst), - i32_f_arr(lhs), - i32_f_arr(rhs), - F::zero(), - F::zero(), - false, - false, - "".to_string(), - ), - AsmInstruction::AddEI(dst, lhs, rhs) => Instruction::new( - Opcode::EADD, - i32_f(dst), - i32_f_arr(lhs), - rhs.as_base_slice().try_into().unwrap(), - F::zero(), - F::zero(), - false, - true, - "".to_string(), - ), - AsmInstruction::SubE(dst, lhs, rhs) => Instruction::new( - Opcode::ESUB, - i32_f(dst), - i32_f_arr(lhs), - i32_f_arr(rhs), - F::zero(), - F::zero(), - false, - false, - "".to_string(), - ), - AsmInstruction::SubEI(dst, lhs, rhs) => Instruction::new( - Opcode::ESUB, - i32_f(dst), - i32_f_arr(lhs), - rhs.as_base_slice().try_into().unwrap(), - F::zero(), - F::zero(), - false, - true, - "".to_string(), - ), - AsmInstruction::SubEIN(dst, lhs, rhs) => Instruction::new( - Opcode::ESUB, - i32_f(dst), - lhs.as_base_slice().try_into().unwrap(), - i32_f_arr(rhs), - F::zero(), - F::zero(), - true, - false, - "".to_string(), - ), - AsmInstruction::MulE(dst, lhs, rhs) => Instruction::new( - Opcode::EMUL, - i32_f(dst), - i32_f_arr(lhs), - i32_f_arr(rhs), - F::zero(), - F::zero(), - false, - false, - "".to_string(), - ), - AsmInstruction::MulEI(dst, lhs, rhs) => Instruction::new( - Opcode::EMUL, - i32_f(dst), - i32_f_arr(lhs), - rhs.as_base_slice().try_into().unwrap(), - F::zero(), - F::zero(), - false, - true, - "".to_string(), - ), - AsmInstruction::DivE(dst, lhs, rhs) => Instruction::new( - Opcode::EDIV, - i32_f(dst), - i32_f_arr(lhs), - i32_f_arr(rhs), - F::zero(), - F::zero(), - false, - false, - "".to_string(), - ), - AsmInstruction::DivEI(dst, lhs, rhs) => Instruction::new( - Opcode::EDIV, - i32_f(dst), - i32_f_arr(lhs), - rhs.as_base_slice().try_into().unwrap(), - F::zero(), - F::zero(), - false, - true, - "".to_string(), - ), - AsmInstruction::DivEIN(dst, lhs, rhs) => Instruction::new( - Opcode::EDIV, - i32_f(dst), - lhs.as_base_slice().try_into().unwrap(), - i32_f_arr(rhs), - F::zero(), - F::zero(), - true, - false, - "".to_string(), - ), - - AsmInstruction::Beq(label, lhs, rhs) => { - let offset = - F::from_canonical_usize(label_to_pc[&label]) - F::from_canonical_usize(pc); - Instruction::new( - Opcode::BEQ, - i32_f(lhs), - i32_f_arr(rhs), - f_u32(offset), - F::zero(), - F::zero(), - false, - true, - "".to_string(), - ) - } - AsmInstruction::BeqI(label, lhs, rhs) => { - let offset = - F::from_canonical_usize(label_to_pc[&label]) - F::from_canonical_usize(pc); - Instruction::new( - Opcode::BEQ, - i32_f(lhs), - f_u32(rhs), - f_u32(offset), - F::zero(), - F::zero(), - true, - true, - "".to_string(), - ) - } - AsmInstruction::Bne(label, lhs, rhs) => { - let offset = - F::from_canonical_usize(label_to_pc[&label]) - F::from_canonical_usize(pc); - Instruction::new( - Opcode::BNE, - i32_f(lhs), - i32_f_arr(rhs), - f_u32(offset), - F::zero(), - F::zero(), - false, - true, - "".to_string(), - ) - } - AsmInstruction::BneInc(label, lhs, rhs) => { - let offset = - F::from_canonical_usize(label_to_pc[&label]) - F::from_canonical_usize(pc); - Instruction::new( - Opcode::BNEINC, - i32_f(lhs), - i32_f_arr(rhs), - f_u32(offset), - F::zero(), - F::zero(), - false, - true, - "".to_string(), - ) - } - AsmInstruction::BneI(label, lhs, rhs) => { - let offset = - F::from_canonical_usize(label_to_pc[&label]) - F::from_canonical_usize(pc); - Instruction::new( - Opcode::BNE, - i32_f(lhs), - f_u32(rhs), - f_u32(offset), - F::zero(), - F::zero(), - true, - true, - "".to_string(), - ) - } - AsmInstruction::BneIInc(label, lhs, rhs) => { - let offset = - F::from_canonical_usize(label_to_pc[&label]) - F::from_canonical_usize(pc); - Instruction::new( - Opcode::BNEINC, - i32_f(lhs), - f_u32(rhs), - f_u32(offset), - F::zero(), - F::zero(), - true, - true, - "".to_string(), - ) - } - AsmInstruction::BneE(label, lhs, rhs) => { - let offset = - F::from_canonical_usize(label_to_pc[&label]) - F::from_canonical_usize(pc); - Instruction::new( - Opcode::BNE, - i32_f(lhs), - i32_f_arr(rhs), - f_u32(offset), - F::zero(), - F::zero(), - false, - true, - "".to_string(), - ) - } - AsmInstruction::BneEI(label, lhs, rhs) => { - let offset = - F::from_canonical_usize(label_to_pc[&label]) - F::from_canonical_usize(pc); - Instruction::new( - Opcode::BNE, - i32_f(lhs), - rhs.as_base_slice().try_into().unwrap(), - f_u32(offset), - F::zero(), - F::zero(), - true, - true, - "".to_string(), - ) - } - AsmInstruction::BeqE(label, lhs, rhs) => { - let offset = - F::from_canonical_usize(label_to_pc[&label]) - F::from_canonical_usize(pc); - Instruction::new( - Opcode::BEQ, - i32_f(lhs), - i32_f_arr(rhs), - f_u32(offset), - F::zero(), - F::zero(), - false, - true, - "".to_string(), - ) - } - AsmInstruction::BeqEI(label, lhs, rhs) => { - let offset = - F::from_canonical_usize(label_to_pc[&label]) - F::from_canonical_usize(pc); - Instruction::new( - Opcode::BEQ, - i32_f(lhs), - rhs.as_base_slice().try_into().unwrap(), - f_u32(offset), - F::zero(), - F::zero(), - true, - true, - "".to_string(), - ) - } - AsmInstruction::Jal(dst, label, offset) => { - let pc_offset = - F::from_canonical_usize(label_to_pc[&label]) - F::from_canonical_usize(pc); - Instruction::new( - Opcode::JAL, - i32_f(dst), - f_u32(pc_offset), - f_u32(offset), - F::zero(), - F::zero(), - true, - true, - "".to_string(), - ) - } - AsmInstruction::JalR(dst, label, offset) => Instruction::new( - Opcode::JALR, - i32_f(dst), - i32_f_arr(label), - i32_f_arr(offset), - F::zero(), - F::zero(), - false, - true, - "".to_string(), - ), - AsmInstruction::Trap => Instruction::new( - Opcode::TRAP, - F::zero(), - zero, - zero, - F::zero(), - F::zero(), - false, - false, - "".to_string(), - ), - AsmInstruction::Halt => Instruction::new( - Opcode::HALT, - F::zero(), - zero, - zero, - F::zero(), - F::zero(), - false, - false, - "".to_string(), - ), - AsmInstruction::HintBits(dst, src) => Instruction::new( - Opcode::HintBits, - i32_f(dst), - i32_f_arr(src), - f_u32(F::zero()), - F::zero(), - F::zero(), - false, - true, - "".to_string(), - ), - AsmInstruction::Poseidon2Permute(dst, src) => Instruction::new( - Opcode::Poseidon2Compress, - i32_f(dst), - i32_f_arr(src), - i32_f_arr(src), - F::from_canonical_usize(PERMUTATION_WIDTH / 2), - F::zero(), - false, - false, - "".to_string(), - ), - AsmInstruction::PrintF(dst) => Instruction::new( - Opcode::PrintF, - i32_f(dst), - f_u32(F::zero()), - f_u32(F::zero()), - F::zero(), - F::zero(), - false, - true, - "".to_string(), - ), - AsmInstruction::PrintV(dst) => Instruction::new( - Opcode::PrintF, - i32_f(dst), - f_u32(F::zero()), - f_u32(F::zero()), - F::zero(), - F::zero(), - false, - true, - "".to_string(), - ), - AsmInstruction::PrintE(dst) => Instruction::new( - Opcode::PrintE, - i32_f(dst), - f_u32(F::zero()), - f_u32(F::zero()), - F::zero(), - F::zero(), - false, - true, - "".to_string(), - ), - AsmInstruction::CycleTracker(name) => Instruction::new( - Opcode::CycleTracker, - i32_f(0), - f_u32(F::zero()), - f_u32(F::zero()), - F::zero(), - F::zero(), - true, - false, - name, - ), - AsmInstruction::HintExt2Felt(dst, src) => Instruction::new( - Opcode::HintExt2Felt, - i32_f(dst), - i32_f_arr(src), - f_u32(F::zero()), - F::zero(), - F::zero(), - false, - true, - "".to_string(), - ), - AsmInstruction::HintLen(dst) => Instruction::new( - Opcode::HintLen, - i32_f(dst), - i32_f_arr(dst), - f_u32(F::zero()), - F::zero(), - F::zero(), - false, - true, - "".to_string(), - ), - AsmInstruction::Hint(dst) => Instruction::new( - Opcode::Hint, - i32_f(dst), - i32_f_arr(dst), - f_u32(F::zero()), - F::zero(), - F::zero(), - false, - true, - "".to_string(), - ), - AsmInstruction::FriFold(m, ptr) => Instruction::new( - Opcode::FRIFold, - i32_f(m), - i32_f_arr(ptr), - f_u32(F::zero()), - F::zero(), - F::zero(), - false, - true, - "".to_string(), - ), - AsmInstruction::Poseidon2Compress(result, src1, src2) => Instruction::new( - Opcode::Poseidon2Compress, - i32_f(result), - i32_f_arr(src1), - i32_f_arr(src2), - F::zero(), - F::zero(), - false, - false, - "".to_string(), - ), - AsmInstruction::Poseidon2Absorb(hash_and_absorb_num, input_ptr, input_len) => { - Instruction::new( - Opcode::Poseidon2Absorb, - i32_f(hash_and_absorb_num), - i32_f_arr(input_ptr), - i32_f_arr(input_len), - F::zero(), - F::zero(), - false, - false, - "".to_string(), - ) - } - AsmInstruction::Poseidon2Finalize(hash_num, output_ptr) => Instruction::new( - Opcode::Poseidon2Finalize, - i32_f(hash_num), - i32_f_arr(output_ptr), - f_u32(F::zero()), - F::zero(), - F::zero(), - false, - false, - "".to_string(), - ), - AsmInstruction::Commit(val, index) => Instruction::new( - Opcode::Commit, - i32_f(val), - i32_f_arr(index), - f_u32(F::zero()), - F::zero(), - F::zero(), - false, - true, - "".to_string(), - ), - AsmInstruction::RegisterPublicValue(val) => Instruction::new( - Opcode::RegisterPublicValue, - i32_f(val), - f_u32(F::zero()), - f_u32(F::zero()), - F::zero(), - F::zero(), - false, - true, - "".to_string(), - ), - AsmInstruction::ExpReverseBitsLen(base, ptr, len) => Instruction::new( - Opcode::ExpReverseBitsLen, - i32_f(base), - i32_f_arr(ptr), - i32_f_arr(len), - F::zero(), - F::zero(), - false, - false, - "".to_string(), - ), - } - } - - pub fn fmt(&self, labels: &BTreeMap, f: &mut fmt::Formatter) -> fmt::Result { - match self { - AsmInstruction::Break(_) => panic!("Unresolved break instruction"), - AsmInstruction::LessThan(dst, left, right) => { - write!(f, "lt ({})fp, {}, {}", dst, left, right,) - } - AsmInstruction::LoadF(dst, src, index, offset, size) => { - write!(f, "lw ({})fp, ({})fp, ({})fp, {}, {}", dst, src, index, offset, size) - } - AsmInstruction::LoadFI(dst, src, index, offset, size) => { - write!(f, "lwi ({})fp, ({})fp, {}, {}, {}", dst, src, index, offset, size) - } - AsmInstruction::StoreF(dst, src, index, offset, size) => { - write!(f, "sw ({})fp, ({})fp, ({})fp, {}, {}", dst, src, index, offset, size) - } - AsmInstruction::StoreFI(dst, src, index, offset, size) => { - write!(f, "swi ({})fp, ({})fp, {}, {}, {}", dst, src, index, offset, size) - } - AsmInstruction::AddF(dst, lhs, rhs) => { - write!(f, "add ({})fp, ({})fp, ({})fp", dst, lhs, rhs) - } - AsmInstruction::AddFI(dst, lhs, rhs) => { - write!(f, "addi ({})fp, ({})fp, {}", dst, lhs, rhs) - } - AsmInstruction::SubF(dst, lhs, rhs) => { - write!(f, "sub ({})fp, ({})fp, ({})fp", dst, lhs, rhs) - } - AsmInstruction::SubFI(dst, lhs, rhs) => { - write!(f, "subi ({})fp, ({})fp, {}", dst, lhs, rhs) - } - AsmInstruction::SubFIN(dst, lhs, rhs) => { - write!(f, "subin ({})fp, {}, ({})fp", dst, lhs, rhs) - } - AsmInstruction::MulF(dst, lhs, rhs) => { - write!(f, "mul ({})fp, ({})fp, ({})fp", dst, lhs, rhs) - } - AsmInstruction::MulFI(dst, lhs, rhs) => { - write!(f, "muli ({})fp, ({})fp, {}", dst, lhs, rhs) - } - AsmInstruction::DivF(dst, lhs, rhs) => { - write!(f, "div ({})fp, ({})fp, ({})fp", dst, lhs, rhs) - } - AsmInstruction::DivFI(dst, lhs, rhs) => { - write!(f, "divi ({})fp, ({})fp, {}", dst, lhs, rhs) - } - AsmInstruction::DivFIN(dst, lhs, rhs) => { - write!(f, "divin ({})fp, {}, ({})fp", dst, lhs, rhs) - } - AsmInstruction::LoadE(dst, src, index, offset, size) => { - write!(f, "le ({})fp, ({})fp, ({})fp, {}, {}", dst, src, index, offset, size) - } - AsmInstruction::LoadEI(dst, src, index, offset, size) => { - write!(f, "lei ({})fp, ({})fp, {}, {}, {}", dst, src, index, offset, size) - } - AsmInstruction::StoreE(dst, src, index, offset, size) => { - write!(f, "se ({})fp, ({})fp, ({})fp, {}, {}", dst, src, index, offset, size) - } - AsmInstruction::StoreEI(dst, src, index, offset, size) => { - write!(f, "sei ({})fp, ({})fp, {}, {}, {}", dst, src, index, offset, size) - } - AsmInstruction::AddE(dst, lhs, rhs) => { - write!(f, "eadd ({})fp, ({})fp, ({})fp", dst, lhs, rhs) - } - AsmInstruction::AddEI(dst, lhs, rhs) => { - write!(f, "eaddi ({})fp, ({})fp, {}", dst, lhs, rhs) - } - AsmInstruction::SubE(dst, lhs, rhs) => { - write!(f, "esub ({})fp, ({})fp, ({})fp", dst, lhs, rhs) - } - AsmInstruction::SubEI(dst, lhs, rhs) => { - write!(f, "esubi ({})fp, ({})fp, {}", dst, lhs, rhs) - } - AsmInstruction::SubEIN(dst, lhs, rhs) => { - write!(f, "esubin ({})fp, {}, ({})fp", dst, lhs, rhs) - } - AsmInstruction::MulE(dst, lhs, rhs) => { - write!(f, "emul ({})fp, ({})fp, ({})fp", dst, lhs, rhs) - } - AsmInstruction::MulEI(dst, lhs, rhs) => { - write!(f, "emuli ({})fp, ({})fp, {}", dst, lhs, rhs) - } - AsmInstruction::DivE(dst, lhs, rhs) => { - write!(f, "ediv ({})fp, ({})fp, ({})fp", dst, lhs, rhs) - } - AsmInstruction::DivEI(dst, lhs, rhs) => { - write!(f, "edivi ({})fp, ({})fp, {}", dst, lhs, rhs) - } - AsmInstruction::DivEIN(dst, lhs, rhs) => { - write!(f, "edivin ({})fp, {}, ({})fp", dst, lhs, rhs) - } - AsmInstruction::Jal(dst, label, offset) => { - if *offset == F::zero() { - return write!( - f, - "j ({})fp, {}", - dst, - labels.get(label).unwrap_or(&format!(".L{}", label)) - ); - } - write!( - f, - "jal ({})fp, {}, {}", - dst, - labels.get(label).unwrap_or(&format!(".L{}", label)), - offset - ) - } - AsmInstruction::JalR(dst, label, offset) => { - write!(f, "jalr ({})fp, ({})fp, ({})fp", dst, label, offset) - } - AsmInstruction::Bne(label, lhs, rhs) => { - write!( - f, - "bne {}, ({})fp, ({})fp", - labels.get(label).unwrap_or(&format!(".L{}", label)), - lhs, - rhs - ) - } - AsmInstruction::BneI(label, lhs, rhs) => { - write!( - f, - "bnei {}, ({})fp, {}", - labels.get(label).unwrap_or(&format!(".L{}", label)), - lhs, - rhs - ) - } - AsmInstruction::BneInc(label, lhs, rhs) => { - write!( - f, - "bneinc {}, ({})fp, {}", - labels.get(label).unwrap_or(&format!(".L{}", label)), - lhs, - rhs - ) - } - AsmInstruction::BneIInc(label, lhs, rhs) => { - write!( - f, - "bneiinc {}, ({})fp, {}", - labels.get(label).unwrap_or(&format!(".L{}", label)), - lhs, - rhs - ) - } - AsmInstruction::Beq(label, lhs, rhs) => { - write!( - f, - "beq {}, ({})fp, ({})fp", - labels.get(label).unwrap_or(&format!(".L{}", label)), - lhs, - rhs - ) - } - AsmInstruction::BeqI(label, lhs, rhs) => { - write!( - f, - "beqi {}, ({})fp, {}", - labels.get(label).unwrap_or(&format!(".L{}", label)), - lhs, - rhs - ) - } - AsmInstruction::BneE(label, lhs, rhs) => { - write!( - f, - "ebne {}, ({})fp, ({})fp", - labels.get(label).unwrap_or(&format!(".L{}", label)), - lhs, - rhs - ) - } - AsmInstruction::BneEI(label, lhs, rhs) => { - write!( - f, - "ebnei {}, ({})fp, {}", - labels.get(label).unwrap_or(&format!(".L{}", label)), - lhs, - rhs - ) - } - AsmInstruction::BeqE(label, lhs, rhs) => { - write!( - f, - "ebeq {}, ({})fp, ({})fp", - labels.get(label).unwrap_or(&format!(".L{}", label)), - lhs, - rhs - ) - } - AsmInstruction::BeqEI(label, lhs, rhs) => { - write!( - f, - "ebeqi {}, ({})fp, {}", - labels.get(label).unwrap_or(&format!(".L{}", label)), - lhs, - rhs - ) - } - AsmInstruction::Trap => write!(f, "trap"), - AsmInstruction::Halt => write!(f, "halt"), - AsmInstruction::HintBits(dst, src) => write!(f, "hint_bits ({})fp, ({})fp", dst, src), - AsmInstruction::Poseidon2Permute(dst, src) => { - write!(f, "poseidon2_permute ({})fp, ({})fp", dst, src) - } - AsmInstruction::PrintF(dst) => { - write!(f, "print_f ({})fp", dst) - } - AsmInstruction::PrintV(dst) => { - write!(f, "print_v ({})fp", dst) - } - AsmInstruction::PrintE(dst) => { - write!(f, "print_e ({})fp", dst) - } - AsmInstruction::HintExt2Felt(dst, src) => { - write!(f, "hintExt2felt ({})fp, {})fp", dst, src) - } - AsmInstruction::HintLen(dst) => write!(f, "hint_len ({})fp", dst), - AsmInstruction::Hint(dst) => write!(f, "hint ({})fp", dst), - AsmInstruction::FriFold(m, input_ptr) => { - write!(f, "fri_fold ({})fp, ({})fp", m, input_ptr) - } - AsmInstruction::Poseidon2Compress(result, src1, src2) => { - write!(f, "poseidon2_compress ({})fp, {})fp, {})fp", result, src1, src2) - } - AsmInstruction::Poseidon2Absorb(hash_and_absorb_num, input_ptr, input_len) => { - write!( - f, - "poseidon2_absorb ({})fp, {})fp, ({})fp", - hash_and_absorb_num, input_ptr, input_len, - ) - } - AsmInstruction::Poseidon2Finalize(hash_num, output_ptr) => { - write!(f, "poseidon2_finalize ({})fp, ({})fp", hash_num, output_ptr,) - } - AsmInstruction::Commit(val, index) => { - write!(f, "commit ({})fp ({})fp", val, index) - } - AsmInstruction::RegisterPublicValue(val) => { - write!(f, "register_public_value ({})fp", val) - } - AsmInstruction::CycleTracker(name) => { - write!(f, "cycle-tracker {}", name) - } - AsmInstruction::ExpReverseBitsLen(base, ptr, len) => { - write!(f, "exp_reverse_bits_len ({})fp, ({})fp, ({})fp", base, ptr, len) - } - } - } -} diff --git a/crates/recursion/compiler/src/asm/mod.rs b/crates/recursion/compiler/src/asm/mod.rs deleted file mode 100644 index 3abda61194..0000000000 --- a/crates/recursion/compiler/src/asm/mod.rs +++ /dev/null @@ -1,13 +0,0 @@ -mod builder; -mod code; -mod compiler; -mod config; -mod instruction; -mod utils; - -pub use builder::*; -pub use code::*; -pub use compiler::*; -pub use config::*; -pub use instruction::*; -pub use utils::*; diff --git a/crates/recursion/compiler/src/asm/utils.rs b/crates/recursion/compiler/src/asm/utils.rs deleted file mode 100644 index 18fb056bbb..0000000000 --- a/crates/recursion/compiler/src/asm/utils.rs +++ /dev/null @@ -1,34 +0,0 @@ -use p3_field::PrimeField32; - -use crate::prelude::{MemIndex, Usize}; - -/// Represents a memory index triple. -pub enum IndexTriple { - Var(i32, F, F), - Const(F, F, F), -} - -impl MemIndex { - pub fn fp(&self) -> IndexTriple { - match self.index { - Usize::Const(index) => IndexTriple::Const( - F::from_canonical_usize(index), - F::from_canonical_usize(self.offset), - F::from_canonical_usize(self.size), - ), - Usize::Var(index) => IndexTriple::Var( - index.fp(), - F::from_canonical_usize(self.offset), - F::from_canonical_usize(self.size), - ), - } - } -} - -/// A value or a constant. -pub enum ValueOrConst { - Val(i32), - ExtVal(i32), - Const(F), - ExtConst(EF), -} diff --git a/crates/recursion/compiler/src/circuit/builder.rs b/crates/recursion/compiler/src/circuit/builder.rs index cca0788722..418aeb4bfb 100644 --- a/crates/recursion/compiler/src/circuit/builder.rs +++ b/crates/recursion/compiler/src/circuit/builder.rs @@ -2,11 +2,12 @@ use std::iter::repeat; +use p3_baby_bear::BabyBear; use p3_field::{AbstractExtensionField, AbstractField}; use sp1_recursion_core::air::RecursionPublicValues; use crate::prelude::*; -use sp1_recursion_core_v2::{chips::poseidon2_skinny::WIDTH, D, DIGEST_SIZE, HASH_RATE}; +use sp1_recursion_core::{chips::poseidon2_skinny::WIDTH, D, DIGEST_SIZE, HASH_RATE}; pub trait CircuitV2Builder { fn bits2num_v2_f( @@ -33,7 +34,7 @@ pub trait CircuitV2Builder { fn hint_felts_v2(&mut self, len: usize) -> Vec>; } -impl CircuitV2Builder for Builder { +impl> CircuitV2Builder for Builder { fn bits2num_v2_f( &mut self, bits: impl IntoIterator::F>>, @@ -49,7 +50,7 @@ impl CircuitV2Builder for Builder { /// Converts a felt to bits inside a circuit. fn num2bits_v2_f(&mut self, num: Felt, num_bits: usize) -> Vec> { let output = std::iter::from_fn(|| Some(self.uninit())).take(num_bits).collect::>(); - self.push(DslIr::CircuitV2HintBitsF(output.clone(), num)); + self.push_op(DslIr::CircuitV2HintBitsF(output.clone(), num)); let x: SymbolicFelt<_> = output .iter() @@ -60,6 +61,35 @@ impl CircuitV2Builder for Builder { }) .sum(); + // Range check the bits to be less than the BabyBear modulus. + + assert!(num_bits <= 31, "num_bits must be less than or equal to 31"); + + // If there are less than 31 bits, there is nothing to check. + if num_bits > 30 { + // Since BabyBear modulus is 2^31 - 2^27 + 1, if any of the top `4` bits are zero, the + // number is less than 2^27, and we can stop the iteration. Othwriwse, if all the top + // `4` bits are '1`, we need to check that all the bottom `27` are '0` + + // Get a flag that is zero if any of the top `4` bits are zero, and one otherwise. We + // can do this by simply taking their product (which is bitwise AND). + let are_all_top_bits_one: Felt<_> = self.eval( + output + .iter() + .rev() + .take(4) + .copied() + .map(SymbolicFelt::from) + .product::>(), + ); + + // Assert that if all the top `4` bits are one, then all the bottom `27` bits are zero. + for bit in output.iter().take(27).copied() { + self.assert_felt_eq(bit * are_all_top_bits_one, C::F::zero()); + } + } + + // Check that the original number matches the bit decomposition. self.assert_felt_eq(x, num); output @@ -72,18 +102,18 @@ impl CircuitV2Builder for Builder { power_bits: Vec>, ) -> Felt { let output: Felt<_> = self.uninit(); - self.operations.push(DslIr::CircuitV2ExpReverseBits(output, input, power_bits)); + self.push_op(DslIr::CircuitV2ExpReverseBits(output, input, power_bits)); output } /// Applies the Poseidon2 permutation to the given array. fn poseidon2_permute_v2(&mut self, array: [Felt; WIDTH]) -> [Felt; WIDTH] { let output: [Felt; WIDTH] = core::array::from_fn(|_| self.uninit()); - self.operations.push(DslIr::CircuitV2Poseidon2PermuteBabyBear(Box::new((output, array)))); + self.push_op(DslIr::CircuitV2Poseidon2PermuteBabyBear(Box::new((output, array)))); output } - /// Applies the Poseidon2 permutation to the given array. + /// Applies the Poseidon2 hash function to the given array. /// /// Reference: [p3_symmetric::PaddingFreeSponge] fn poseidon2_hash_v2(&mut self, input: &[Felt]) -> [Felt; DIGEST_SIZE] { @@ -119,14 +149,14 @@ impl CircuitV2Builder for Builder { alpha_pow_output: uninit_vec(input.alpha_pow_input.len()), ro_output: uninit_vec(input.ro_input.len()), }; - self.operations.push(DslIr::CircuitV2FriFold(Box::new((output.clone(), input)))); + self.push_op(DslIr::CircuitV2FriFold(Box::new((output.clone(), input)))); output } /// Decomposes an ext into its felt coordinates. fn ext2felt_v2(&mut self, ext: Ext) -> [Felt; D] { let felts = core::array::from_fn(|_| self.uninit()); - self.operations.push(DslIr::CircuitExt2Felt(felts, ext)); + self.push_op(DslIr::CircuitExt2Felt(felts, ext)); // Verify that the decomposed extension element is correct. let mut reconstructed_ext: Ext = self.constant(C::EF::zero()); for i in 0..4 { @@ -142,15 +172,15 @@ impl CircuitV2Builder for Builder { // Commits public values. fn commit_public_values_v2(&mut self, public_values: RecursionPublicValues>) { - self.operations.push(DslIr::CircuitV2CommitPublicValues(Box::new(public_values))); + self.push_op(DslIr::CircuitV2CommitPublicValues(Box::new(public_values))); } fn cycle_tracker_v2_enter(&mut self, name: String) { - self.operations.push(DslIr::CycleTrackerV2Enter(name)); + self.push_op(DslIr::CycleTrackerV2Enter(name)); } fn cycle_tracker_v2_exit(&mut self) { - self.operations.push(DslIr::CycleTrackerV2Exit); + self.push_op(DslIr::CycleTrackerV2Exit); } /// Hint a single felt. @@ -166,14 +196,14 @@ impl CircuitV2Builder for Builder { /// Hint a vector of felts. fn hint_felts_v2(&mut self, len: usize) -> Vec> { let arr = std::iter::from_fn(|| Some(self.uninit())).take(len).collect::>(); - self.operations.push(DslIr::CircuitV2HintFelts(arr.clone())); + self.push_op(DslIr::CircuitV2HintFelts(arr.clone())); arr } /// Hint a vector of exts. fn hint_exts_v2(&mut self, len: usize) -> Vec> { let arr = std::iter::from_fn(|| Some(self.uninit())).take(len).collect::>(); - self.operations.push(DslIr::CircuitV2HintExts(arr.clone())); + self.push_op(DslIr::CircuitV2HintExts(arr.clone())); arr } } diff --git a/crates/recursion/compiler/src/circuit/compiler.rs b/crates/recursion/compiler/src/circuit/compiler.rs index 377d92ab92..04b59bc6e5 100644 --- a/crates/recursion/compiler/src/circuit/compiler.rs +++ b/crates/recursion/compiler/src/circuit/compiler.rs @@ -1,22 +1,19 @@ use chips::poseidon2_skinny::WIDTH; use core::fmt::Debug; use instruction::{FieldEltType, HintBitsInstr, HintExt2FeltsInstr, HintInstr, PrintInstr}; +use itertools::Itertools; use p3_field::{ AbstractExtensionField, AbstractField, Field, PrimeField, PrimeField64, TwoAdicField, }; use sp1_core_machine::utils::{sp1_debug_mode, SpanBuilder}; -use sp1_recursion_core::air::{Block, RecursionPublicValues, RECURSIVE_PROOF_NUM_PV_ELTS}; -use sp1_recursion_core_v2::{BaseAluInstr, BaseAluOpcode}; -use std::{ - borrow::Borrow, - cmp::Ordering, - collections::BTreeMap, - iter::repeat, - mem::{take, transmute}, +use sp1_recursion_core::{ + air::{Block, RecursionPublicValues, RECURSIVE_PROOF_NUM_PV_ELTS}, + BaseAluInstr, BaseAluOpcode, }; +use std::{borrow::Borrow, collections::HashMap, iter::repeat, mem::transmute}; use vec_map::VecMap; -use sp1_recursion_core_v2::*; +use sp1_recursion_core::*; use crate::prelude::*; @@ -27,7 +24,7 @@ pub struct AsmCompiler { /// Map the frame pointers of the variables to the "physical" addresses. pub virtual_to_physical: VecMap>, /// Map base or extension field constants to "physical" addresses and mults. - pub consts: BTreeMap, Address>, + pub consts: HashMap, (Address, C::F)>, /// Map each "physical" address to its read count. pub addr_to_mult: VecMap, } @@ -38,11 +35,12 @@ where { /// Allocate a fresh address. Checks that the address space is not full. pub fn alloc(next_addr: &mut C::F) -> Address { + let id = Address(*next_addr); *next_addr += C::F::one(); if next_addr.is_zero() { panic!("out of address space"); } - Address(*next_addr) + id } /// Map `fp` to its existing address without changing its mult. @@ -107,7 +105,7 @@ where /// /// Ensures that `addr` has already been assigned a `mult`. pub fn read_ghost_addr(&mut self, addr: Address) -> &mut C::F { - self.read_addr_internal(addr, false) + self.read_addr_internal(addr, true) } fn read_addr_internal(&mut self, addr: Address, increment_mult: bool) -> &mut C::F { @@ -142,36 +140,27 @@ where /// /// Increments the mult, first creating an entry if it does not yet exist. pub fn read_const(&mut self, imm: Imm) -> Address { - use vec_map::Entry; - let addr = *self.consts.entry(imm).or_insert_with(|| Self::alloc(&mut self.next_addr)); - match self.addr_to_mult.entry(addr.as_usize()) { - Entry::Vacant(entry) => drop(entry.insert(C::F::one())), - Entry::Occupied(mut entry) => *entry.get_mut() += C::F::one(), - } - addr + self.consts + .entry(imm) + .and_modify(|(_, x)| *x += C::F::one()) + .or_insert_with(|| (Self::alloc(&mut self.next_addr), C::F::one())) + .0 } /// Read a constant (a.k.a. immediate). /// /// Does not increment the mult. Creates an entry if it does not yet exist. pub fn read_ghost_const(&mut self, imm: Imm) -> Address { - let addr = *self.consts.entry(imm).or_insert_with(|| Self::alloc(&mut self.next_addr)); - self.addr_to_mult.entry(addr.as_usize()).or_insert_with(C::F::zero); - addr + self.consts.entry(imm).or_insert_with(|| (Self::alloc(&mut self.next_addr), C::F::zero())).0 } - /// Turn `dst` into an alias for the constant `src`. - fn mem_write_const(&mut self, dst: impl HasVirtualAddress, src: Imm) { - use vec_map::Entry; - let src_addr = src.read_ghost(self); - match self.virtual_to_physical.entry(dst.vaddr()) { - Entry::Vacant(entry) => drop(entry.insert(src_addr)), - Entry::Occupied(entry) => panic!( - "unexpected entry: virtual_to_physical[{:?}] = {:?}", - dst.vaddr(), - entry.get() - ), - } + fn mem_write_const(&mut self, dst: impl Reg, src: Imm) -> Instruction { + Instruction::Mem(MemInstr { + addrs: MemIo { inner: dst.write(self) }, + vals: MemIo { inner: src.as_block() }, + mult: C::F::zero(), + kind: MemAccessKind::Write, + }) } fn base_alu( @@ -211,7 +200,7 @@ where use BaseAluOpcode::*; let [diff, out] = core::array::from_fn(|_| Self::alloc(&mut self.next_addr)); f(self.base_alu(SubF, diff, lhs, rhs)); - f(self.base_alu(DivF, out, diff, Imm::f(C::F::zero()))); + f(self.base_alu(DivF, out, diff, Imm::F(C::F::zero()))); } fn base_assert_ne( @@ -224,7 +213,7 @@ where let [diff, out] = core::array::from_fn(|_| Self::alloc(&mut self.next_addr)); f(self.base_alu(SubF, diff, lhs, rhs)); - f(self.base_alu(DivF, out, Imm::f(C::F::one()), diff)); + f(self.base_alu(DivF, out, Imm::F(C::F::one()), diff)); } fn ext_assert_eq( @@ -237,7 +226,7 @@ where let [diff, out] = core::array::from_fn(|_| Self::alloc(&mut self.next_addr)); f(self.ext_alu(SubE, diff, lhs, rhs)); - f(self.ext_alu(DivE, out, diff, Imm::ef(C::EF::zero()))); + f(self.ext_alu(DivE, out, diff, Imm::EF(C::EF::zero()))); } fn ext_assert_ne( @@ -250,7 +239,7 @@ where let [diff, out] = core::array::from_fn(|_| Self::alloc(&mut self.next_addr)); f(self.ext_alu(SubE, diff, lhs, rhs)); - f(self.ext_alu(DivE, out, Imm::ef(C::EF::one()), diff)); + f(self.ext_alu(DivE, out, Imm::EF(C::EF::one()), diff)); } fn poseidon2_permute( @@ -392,71 +381,71 @@ where let mut f = |instr| consumer(Ok(instr)); match ir_instr { - DslIr::ImmV(dst, src) => self.mem_write_const(dst, Imm::f(src)), - DslIr::ImmF(dst, src) => self.mem_write_const(dst, Imm::f(src)), - DslIr::ImmE(dst, src) => self.mem_write_const(dst, Imm::ef(src)), + DslIr::ImmV(dst, src) => f(self.mem_write_const(dst, Imm::F(src))), + DslIr::ImmF(dst, src) => f(self.mem_write_const(dst, Imm::F(src))), + DslIr::ImmE(dst, src) => f(self.mem_write_const(dst, Imm::EF(src))), DslIr::AddV(dst, lhs, rhs) => f(self.base_alu(AddF, dst, lhs, rhs)), - DslIr::AddVI(dst, lhs, rhs) => f(self.base_alu(AddF, dst, lhs, Imm::f(rhs))), + DslIr::AddVI(dst, lhs, rhs) => f(self.base_alu(AddF, dst, lhs, Imm::F(rhs))), DslIr::AddF(dst, lhs, rhs) => f(self.base_alu(AddF, dst, lhs, rhs)), - DslIr::AddFI(dst, lhs, rhs) => f(self.base_alu(AddF, dst, lhs, Imm::f(rhs))), + DslIr::AddFI(dst, lhs, rhs) => f(self.base_alu(AddF, dst, lhs, Imm::F(rhs))), DslIr::AddE(dst, lhs, rhs) => f(self.ext_alu(AddE, dst, lhs, rhs)), - DslIr::AddEI(dst, lhs, rhs) => f(self.ext_alu(AddE, dst, lhs, Imm::ef(rhs))), + DslIr::AddEI(dst, lhs, rhs) => f(self.ext_alu(AddE, dst, lhs, Imm::EF(rhs))), DslIr::AddEF(dst, lhs, rhs) => f(self.ext_alu(AddE, dst, lhs, rhs)), - DslIr::AddEFI(dst, lhs, rhs) => f(self.ext_alu(AddE, dst, lhs, Imm::f(rhs))), - DslIr::AddEFFI(dst, lhs, rhs) => f(self.ext_alu(AddE, dst, lhs, Imm::ef(rhs))), + DslIr::AddEFI(dst, lhs, rhs) => f(self.ext_alu(AddE, dst, lhs, Imm::F(rhs))), + DslIr::AddEFFI(dst, lhs, rhs) => f(self.ext_alu(AddE, dst, lhs, Imm::EF(rhs))), DslIr::SubV(dst, lhs, rhs) => f(self.base_alu(SubF, dst, lhs, rhs)), - DslIr::SubVI(dst, lhs, rhs) => f(self.base_alu(SubF, dst, lhs, Imm::f(rhs))), - DslIr::SubVIN(dst, lhs, rhs) => f(self.base_alu(SubF, dst, Imm::f(lhs), rhs)), + DslIr::SubVI(dst, lhs, rhs) => f(self.base_alu(SubF, dst, lhs, Imm::F(rhs))), + DslIr::SubVIN(dst, lhs, rhs) => f(self.base_alu(SubF, dst, Imm::F(lhs), rhs)), DslIr::SubF(dst, lhs, rhs) => f(self.base_alu(SubF, dst, lhs, rhs)), - DslIr::SubFI(dst, lhs, rhs) => f(self.base_alu(SubF, dst, lhs, Imm::f(rhs))), - DslIr::SubFIN(dst, lhs, rhs) => f(self.base_alu(SubF, dst, Imm::f(lhs), rhs)), + DslIr::SubFI(dst, lhs, rhs) => f(self.base_alu(SubF, dst, lhs, Imm::F(rhs))), + DslIr::SubFIN(dst, lhs, rhs) => f(self.base_alu(SubF, dst, Imm::F(lhs), rhs)), DslIr::SubE(dst, lhs, rhs) => f(self.ext_alu(SubE, dst, lhs, rhs)), - DslIr::SubEI(dst, lhs, rhs) => f(self.ext_alu(SubE, dst, lhs, Imm::ef(rhs))), - DslIr::SubEIN(dst, lhs, rhs) => f(self.ext_alu(SubE, dst, Imm::ef(lhs), rhs)), - DslIr::SubEFI(dst, lhs, rhs) => f(self.ext_alu(SubE, dst, lhs, Imm::f(rhs))), + DslIr::SubEI(dst, lhs, rhs) => f(self.ext_alu(SubE, dst, lhs, Imm::EF(rhs))), + DslIr::SubEIN(dst, lhs, rhs) => f(self.ext_alu(SubE, dst, Imm::EF(lhs), rhs)), + DslIr::SubEFI(dst, lhs, rhs) => f(self.ext_alu(SubE, dst, lhs, Imm::F(rhs))), DslIr::SubEF(dst, lhs, rhs) => f(self.ext_alu(SubE, dst, lhs, rhs)), DslIr::MulV(dst, lhs, rhs) => f(self.base_alu(MulF, dst, lhs, rhs)), - DslIr::MulVI(dst, lhs, rhs) => f(self.base_alu(MulF, dst, lhs, Imm::f(rhs))), + DslIr::MulVI(dst, lhs, rhs) => f(self.base_alu(MulF, dst, lhs, Imm::F(rhs))), DslIr::MulF(dst, lhs, rhs) => f(self.base_alu(MulF, dst, lhs, rhs)), - DslIr::MulFI(dst, lhs, rhs) => f(self.base_alu(MulF, dst, lhs, Imm::f(rhs))), + DslIr::MulFI(dst, lhs, rhs) => f(self.base_alu(MulF, dst, lhs, Imm::F(rhs))), DslIr::MulE(dst, lhs, rhs) => f(self.ext_alu(MulE, dst, lhs, rhs)), - DslIr::MulEI(dst, lhs, rhs) => f(self.ext_alu(MulE, dst, lhs, Imm::ef(rhs))), - DslIr::MulEFI(dst, lhs, rhs) => f(self.ext_alu(MulE, dst, lhs, Imm::f(rhs))), + DslIr::MulEI(dst, lhs, rhs) => f(self.ext_alu(MulE, dst, lhs, Imm::EF(rhs))), + DslIr::MulEFI(dst, lhs, rhs) => f(self.ext_alu(MulE, dst, lhs, Imm::F(rhs))), DslIr::MulEF(dst, lhs, rhs) => f(self.ext_alu(MulE, dst, lhs, rhs)), DslIr::DivF(dst, lhs, rhs) => f(self.base_alu(DivF, dst, lhs, rhs)), - DslIr::DivFI(dst, lhs, rhs) => f(self.base_alu(DivF, dst, lhs, Imm::f(rhs))), - DslIr::DivFIN(dst, lhs, rhs) => f(self.base_alu(DivF, dst, Imm::f(lhs), rhs)), + DslIr::DivFI(dst, lhs, rhs) => f(self.base_alu(DivF, dst, lhs, Imm::F(rhs))), + DslIr::DivFIN(dst, lhs, rhs) => f(self.base_alu(DivF, dst, Imm::F(lhs), rhs)), DslIr::DivE(dst, lhs, rhs) => f(self.ext_alu(DivE, dst, lhs, rhs)), - DslIr::DivEI(dst, lhs, rhs) => f(self.ext_alu(DivE, dst, lhs, Imm::ef(rhs))), - DslIr::DivEIN(dst, lhs, rhs) => f(self.ext_alu(DivE, dst, Imm::ef(lhs), rhs)), - DslIr::DivEFI(dst, lhs, rhs) => f(self.ext_alu(DivE, dst, lhs, Imm::f(rhs))), - DslIr::DivEFIN(dst, lhs, rhs) => f(self.ext_alu(DivE, dst, Imm::f(lhs), rhs)), + DslIr::DivEI(dst, lhs, rhs) => f(self.ext_alu(DivE, dst, lhs, Imm::EF(rhs))), + DslIr::DivEIN(dst, lhs, rhs) => f(self.ext_alu(DivE, dst, Imm::EF(lhs), rhs)), + DslIr::DivEFI(dst, lhs, rhs) => f(self.ext_alu(DivE, dst, lhs, Imm::F(rhs))), + DslIr::DivEFIN(dst, lhs, rhs) => f(self.ext_alu(DivE, dst, Imm::F(lhs), rhs)), DslIr::DivEF(dst, lhs, rhs) => f(self.ext_alu(DivE, dst, lhs, rhs)), - DslIr::NegV(dst, src) => f(self.base_alu(SubF, dst, Imm::f(C::F::zero()), src)), - DslIr::NegF(dst, src) => f(self.base_alu(SubF, dst, Imm::f(C::F::zero()), src)), - DslIr::NegE(dst, src) => f(self.ext_alu(SubE, dst, Imm::ef(C::EF::zero()), src)), - DslIr::InvV(dst, src) => f(self.base_alu(DivF, dst, Imm::f(C::F::one()), src)), - DslIr::InvF(dst, src) => f(self.base_alu(DivF, dst, Imm::f(C::F::one()), src)), - DslIr::InvE(dst, src) => f(self.ext_alu(DivE, dst, Imm::f(C::F::one()), src)), + DslIr::NegV(dst, src) => f(self.base_alu(SubF, dst, Imm::F(C::F::zero()), src)), + DslIr::NegF(dst, src) => f(self.base_alu(SubF, dst, Imm::F(C::F::zero()), src)), + DslIr::NegE(dst, src) => f(self.ext_alu(SubE, dst, Imm::EF(C::EF::zero()), src)), + DslIr::InvV(dst, src) => f(self.base_alu(DivF, dst, Imm::F(C::F::one()), src)), + DslIr::InvF(dst, src) => f(self.base_alu(DivF, dst, Imm::F(C::F::one()), src)), + DslIr::InvE(dst, src) => f(self.ext_alu(DivE, dst, Imm::F(C::F::one()), src)), DslIr::AssertEqV(lhs, rhs) => self.base_assert_eq(lhs, rhs, f), DslIr::AssertEqF(lhs, rhs) => self.base_assert_eq(lhs, rhs, f), DslIr::AssertEqE(lhs, rhs) => self.ext_assert_eq(lhs, rhs, f), - DslIr::AssertEqVI(lhs, rhs) => self.base_assert_eq(lhs, Imm::f(rhs), f), - DslIr::AssertEqFI(lhs, rhs) => self.base_assert_eq(lhs, Imm::f(rhs), f), - DslIr::AssertEqEI(lhs, rhs) => self.ext_assert_eq(lhs, Imm::ef(rhs), f), + DslIr::AssertEqVI(lhs, rhs) => self.base_assert_eq(lhs, Imm::F(rhs), f), + DslIr::AssertEqFI(lhs, rhs) => self.base_assert_eq(lhs, Imm::F(rhs), f), + DslIr::AssertEqEI(lhs, rhs) => self.ext_assert_eq(lhs, Imm::EF(rhs), f), DslIr::AssertNeV(lhs, rhs) => self.base_assert_ne(lhs, rhs, f), DslIr::AssertNeF(lhs, rhs) => self.base_assert_ne(lhs, rhs, f), DslIr::AssertNeE(lhs, rhs) => self.ext_assert_ne(lhs, rhs, f), - DslIr::AssertNeVI(lhs, rhs) => self.base_assert_ne(lhs, Imm::f(rhs), f), - DslIr::AssertNeFI(lhs, rhs) => self.base_assert_ne(lhs, Imm::f(rhs), f), - DslIr::AssertNeEI(lhs, rhs) => self.ext_assert_ne(lhs, Imm::ef(rhs), f), + DslIr::AssertNeVI(lhs, rhs) => self.base_assert_ne(lhs, Imm::F(rhs), f), + DslIr::AssertNeFI(lhs, rhs) => self.base_assert_ne(lhs, Imm::F(rhs), f), + DslIr::AssertNeEI(lhs, rhs) => self.ext_assert_ne(lhs, Imm::EF(rhs), f), DslIr::CircuitV2Poseidon2PermuteBabyBear(data) => { f(self.poseidon2_permute(data.0, data.1)) @@ -544,7 +533,7 @@ where // Replace the mults using the address count data gathered in this previous. // Exhaustive match for refactoring purposes. - let total_memory = self.addr_to_mult.len(); + let total_memory = self.addr_to_mult.len() + self.consts.len(); let mut backfill = |(mult, addr): (&mut F, &Address)| { *mult = self.addr_to_mult.remove(addr.as_usize()).unwrap() }; @@ -561,6 +550,12 @@ where addrs: ExtAluIo { out: ref addr, .. }, .. }) => backfill((mult, addr)), + Instruction::Mem(MemInstr { + addrs: MemIo { inner: ref addr }, + mult, + kind: MemAccessKind::Write, + .. + }) => backfill((mult, addr)), Instruction::Poseidon2(instr) => { let Poseidon2SkinnyInstr { addrs: Poseidon2Io { output: ref addrs, .. }, @@ -597,24 +592,25 @@ where .iter_mut() .for_each(|(addr, mult)| backfill((mult, addr))); } - Instruction::Mem(_) => { - panic!("mem instructions should be produced through the `consts` map") - } // Instructions that do not write to memory. - Instruction::CommitPublicValues(_) | Instruction::Print(_) => (), + Instruction::Mem(MemInstr { kind: MemAccessKind::Read, .. }) + | Instruction::CommitPublicValues(_) + | Instruction::Print(_) => (), } } }); + debug_assert!(self.addr_to_mult.is_empty()); // Initialize constants. let total_consts = self.consts.len(); - let instrs_consts = take(&mut self.consts).into_iter().map(|(imm, addr)| { - Instruction::Mem(MemInstr { - addrs: MemIo { inner: addr }, - vals: MemIo { inner: imm.as_block() }, - mult: self.addr_to_mult.remove(addr.as_usize()).unwrap(), - kind: MemAccessKind::Write, - }) - }); + let instrs_consts = + self.consts.drain().sorted_by_key(|x| x.1 .0 .0).map(|(imm, (addr, mult))| { + Instruction::Mem(MemInstr { + addrs: MemIo { inner: addr }, + vals: MemIo { inner: imm.as_block() }, + mult, + kind: MemAccessKind::Write, + }) + }); tracing::debug!("number of consts to initialize: {}", instrs_consts.len()); // Reset the other fields. self.next_addr = Default::default(); @@ -629,8 +625,7 @@ where (instrs_consts.chain(instrs).collect(), traces) } }); - debug_assert!(self.addr_to_mult.is_empty()); - RecursionProgram { instructions, total_memory, traces } + RecursionProgram { instructions, total_memory, traces, shape: None } } } @@ -670,57 +665,6 @@ pub enum Imm { EF(EF), } -impl Imm -where - F: Field, - EF: AbstractExtensionField, -{ - /// Wraps its argument in `Self::F`. - pub fn f(f: F) -> Self { - Self::F(f) - } - - /// If `ef` lives in the base field, then we encode it as `Self::F`. - /// Otherwise, we encode it as `Self::EF`. - pub fn ef(ef: EF) -> Self { - if ef.as_base_slice()[1..].iter().all(Field::is_zero) { - Self::F(ef.as_base_slice()[0]) - } else { - Self::EF(ef) - } - } -} - -impl PartialOrd for Imm -where - F: PartialEq + AbstractField + PartialOrd, - EF: PartialEq + AbstractExtensionField, -{ - fn partial_cmp(&self, other: &Self) -> Option { - match (self, other) { - (Imm::F(a), Imm::F(b)) => a.partial_cmp(b), - (Imm::F(_), Imm::EF(_)) => Some(Ordering::Less), - (Imm::EF(_), Imm::F(_)) => Some(Ordering::Greater), - (Imm::EF(a), Imm::EF(b)) => a.as_base_slice().partial_cmp(b.as_base_slice()), - } - } -} - -impl Ord for Imm -where - F: Eq + AbstractField + Ord, - EF: Eq + AbstractExtensionField, -{ - fn cmp(&self, other: &Self) -> Ordering { - match (self, other) { - (Imm::F(a), Imm::F(b)) => a.cmp(b), - (Imm::F(_), Imm::EF(_)) => Ordering::Less, - (Imm::EF(_), Imm::F(_)) => Ordering::Greater, - (Imm::EF(a), Imm::EF(b)) => a.as_base_slice().cmp(b.as_base_slice()), - } - } -} - impl Imm where F: AbstractField + Copy, @@ -735,25 +679,6 @@ where } } -/// Expose the "virtual address" counter of the variable types. -trait HasVirtualAddress { - fn vaddr(&self) -> usize; -} - -macro_rules! impl_has_virtual_address { - ($type:ident<$($gen:ident),*>) => { - impl<$($gen),*> HasVirtualAddress for $type<$($gen),*> { - fn vaddr(&self) -> usize { - self.0 as usize - } - } - }; -} - -impl_has_virtual_address!(Var); -impl_has_virtual_address!(Felt); -impl_has_virtual_address!(Ext); - /// Utility functions for various register types. trait Reg { /// Mark the register as to be read from, returning the "physical" address. @@ -766,20 +691,54 @@ trait Reg { fn write(&self, compiler: &mut AsmCompiler) -> Address; } -impl, T: HasVirtualAddress> Reg for T { - fn read(&self, compiler: &mut AsmCompiler) -> Address { - compiler.read_vaddr(self.vaddr()) - } +macro_rules! impl_reg_borrowed { + ($a:ty) => { + impl Reg for $a + where + C: Config, + T: Reg + ?Sized, + { + fn read(&self, compiler: &mut AsmCompiler) -> Address { + (**self).read(compiler) + } - fn read_ghost(&self, compiler: &mut AsmCompiler) -> Address { - compiler.read_ghost_vaddr(self.vaddr()) - } + fn read_ghost(&self, compiler: &mut AsmCompiler) -> Address { + (**self).read_ghost(compiler) + } - fn write(&self, compiler: &mut AsmCompiler) -> Address { - compiler.write_fp(self.vaddr()) - } + fn write(&self, compiler: &mut AsmCompiler) -> Address { + (**self).write(compiler) + } + } + }; +} + +// Allow for more flexibility in arguments. +impl_reg_borrowed!(&T); +impl_reg_borrowed!(&mut T); +impl_reg_borrowed!(Box); + +macro_rules! impl_reg_vaddr { + ($a:ty) => { + impl> Reg for $a { + fn read(&self, compiler: &mut AsmCompiler) -> Address { + compiler.read_vaddr(self.idx as usize) + } + fn read_ghost(&self, compiler: &mut AsmCompiler) -> Address { + compiler.read_ghost_vaddr(self.idx as usize) + } + fn write(&self, compiler: &mut AsmCompiler) -> Address { + compiler.write_fp(self.idx as usize) + } + } + }; } +// These three types wrap a `u32` but they don't share a trait. +impl_reg_vaddr!(Var); +impl_reg_vaddr!(Felt); +impl_reg_vaddr!(Ext); + impl> Reg for Imm { fn read(&self, compiler: &mut AsmCompiler) -> Address { compiler.read_const(*self) @@ -821,16 +780,13 @@ mod tests { use rand::{rngs::StdRng, Rng, SeedableRng}; use sp1_core_machine::utils::{run_test_machine, setup_logger}; - use sp1_recursion_core_v2::{machine::RecursionAir, RecursionProgram, Runtime}; + use sp1_recursion_core::{machine::RecursionAir, RecursionProgram, Runtime}; use sp1_stark::{ baby_bear_poseidon2::BabyBearPoseidon2, inner_perm, BabyBearPoseidon2Inner, InnerHash, StarkGenericConfig, }; - use crate::{ - asm::{AsmBuilder, AsmConfig}, - circuit::CircuitV2Builder, - }; + use crate::circuit::{AsmBuilder, AsmConfig, CircuitV2Builder}; use super::*; @@ -857,7 +813,8 @@ mod tests { let record = run(program.clone()); // Run with the poseidon2 wide chip. - let wide_machine = RecursionAir::<_, 3, 0>::machine_wide(BabyBearPoseidon2::default()); + let wide_machine = + RecursionAir::<_, 3>::machine_wide_with_all_chips(BabyBearPoseidon2::default()); let (pk, vk) = wide_machine.setup(&program); let result = run_test_machine(vec![record.clone()], wide_machine, pk, vk); if let Err(e) = result { @@ -865,7 +822,9 @@ mod tests { } // Run with the poseidon2 skinny chip. - let skinny_machine = RecursionAir::<_, 9, 0>::machine(BabyBearPoseidon2::compressed()); + let skinny_machine = RecursionAir::<_, 9>::machine_skinny_with_all_chips( + BabyBearPoseidon2::ultra_compressed(), + ); let (pk, vk) = skinny_machine.setup(&program); let result = run_test_machine(vec![record.clone()], skinny_machine, pk, vk); if let Err(e) = result { @@ -892,7 +851,7 @@ mod tests { } } - test_operations(builder.operations); + test_operations(builder.into_operations()); } #[test] @@ -972,7 +931,7 @@ mod tests { let expected_felt: Felt<_> = builder.eval(expected); builder.assert_felt_eq(result_felt, expected_felt); } - test_operations(builder.operations); + test_operations(builder.into_operations()); } #[test] @@ -1027,7 +986,7 @@ mod tests { } } - test_operations(builder.operations); + test_operations(builder.into_operations()); } #[test] @@ -1050,7 +1009,7 @@ mod tests { builder.assert_felt_eq(lhs, rhs); } } - test_operations(builder.operations); + test_operations(builder.into_operations()); } #[test] @@ -1091,7 +1050,7 @@ mod tests { } builder.cycle_tracker_v2_exit(); - test_operations_with_runner(builder.operations, |program| { + test_operations_with_runner(builder.into_operations(), |program| { let mut runtime = Runtime::::new( program, BabyBearPoseidon2Inner::new().perm, @@ -1130,7 +1089,7 @@ mod tests { builder.assert_felt_eq(lhs, rhs); } } - test_operations(builder.operations); + test_operations(builder.into_operations()); } macro_rules! test_assert_fixture { @@ -1140,7 +1099,7 @@ mod tests { let mut builder = AsmBuilder::::default(); test_assert_fixture!(builder, identity, F, Felt<_>, 0xDEADBEEF, $assert_felt, $should_offset); test_assert_fixture!(builder, EF::cons, EF, Ext<_, _>, 0xABADCAFE, $assert_ext, $should_offset); - test_operations(builder.operations); + test_operations(builder.into_operations()); } }; ($builder:ident, $wrap:path, $t:ty, $u:ty, $seed:expr, $assert:ident, $should_offset:expr) => { diff --git a/crates/recursion/compiler/src/asm/config.rs b/crates/recursion/compiler/src/circuit/config.rs similarity index 52% rename from crates/recursion/compiler/src/asm/config.rs rename to crates/recursion/compiler/src/circuit/config.rs index d6564fe551..cfadd1d4e9 100644 --- a/crates/recursion/compiler/src/asm/config.rs +++ b/crates/recursion/compiler/src/circuit/config.rs @@ -1,14 +1,16 @@ use std::marker::PhantomData; -use p3_field::{ExtensionField, PrimeField, TwoAdicField}; +use p3_field::{ExtensionField, PrimeField32, TwoAdicField}; -use crate::prelude::Config; +use crate::{ir::Builder, prelude::Config}; /// An assembly code configuration given a field and an extension field. #[derive(Debug, Clone, Default)] pub struct AsmConfig(PhantomData<(F, EF)>); -impl + TwoAdicField> Config +pub type AsmBuilder = Builder>; + +impl + TwoAdicField> Config for AsmConfig { type N = F; diff --git a/crates/recursion/compiler/src/circuit/mod.rs b/crates/recursion/compiler/src/circuit/mod.rs index 1fbe681c70..715021f62a 100644 --- a/crates/recursion/compiler/src/circuit/mod.rs +++ b/crates/recursion/compiler/src/circuit/mod.rs @@ -1,37 +1,24 @@ mod builder; mod compiler; +mod config; pub use builder::*; pub use compiler::*; +pub use config::*; #[cfg(test)] mod tests { use std::sync::Arc; use p3_baby_bear::DiffusionMatrixBabyBear; - use p3_field::{AbstractExtensionField, AbstractField}; - use rand::{rngs::StdRng, Rng, SeedableRng}; - - use sp1_core_machine::utils::{run_test_machine, setup_logger}; - use sp1_recursion_core_v2::{ - chips::{ - alu_base::BaseAluChip, - alu_ext::ExtAluChip, - exp_reverse_bits::ExpReverseBitsLenChip, - fri_fold::FriFoldChip, - mem::{MemoryConstChip, MemoryVarChip}, - poseidon2_wide::Poseidon2WideChip, - }, - machine::RecursionAir, - Runtime, RuntimeError, - }; - use sp1_stark::{ - BabyBearPoseidon2Inner, Chip, StarkGenericConfig, StarkMachine, PROOF_MAX_NUM_PVS, - }; + use p3_field::AbstractField; + + use sp1_core_machine::utils::run_test_machine; + use sp1_recursion_core::{machine::RecursionAir, Runtime, RuntimeError}; + use sp1_stark::{BabyBearPoseidon2Inner, StarkGenericConfig}; use crate::{ - asm::AsmBuilder, - circuit::{AsmCompiler, CircuitV2Builder}, + circuit::{AsmBuilder, AsmCompiler, CircuitV2Builder}, ir::*, }; @@ -40,92 +27,7 @@ mod tests { type SC = BabyBearPoseidon2Inner; type F = ::Val; type EF = ::Challenge; - type A = RecursionAir; - - /// Rough test to give an idea of how long the compress stage would take on the v2 circuit - /// relative to the recursion VM. - /// - /// The constants below were manually populated by running - /// `RUST_LOG=debug RUST_LOGGER=forest FRI_QUERIES=100 cargo test - /// --release --package sp1-prover -- --exact tests::test_e2e` - /// and writing down numbers from the first `prove_shards` section of the compress stage. - /// We use those numbers to create a dummy circuit that should roughly be the size of - /// the finished circuit, which will be equivalent to the compress program on the VM. - /// Therefore, by running `RUST_LOG=debug RUST_LOGGER=forest FRI_QUERIES=100 cargo test - /// -release --lib --features native-gnark -- test_compress_dummy_circuit` - /// and comparing the durations of the `prove_shards` sections, we can roughly estimate the - /// speed-up factor. At the time of writing, the factor is approximately 30.4s/3.5s = 8.7. - #[test] - fn test_compress_dummy_circuit() { - setup_logger(); - - // To aid in testing. - const SCALE: usize = 1; - const FIELD_OPERATIONS: usize = 451653 * SCALE; - const EXTENSION_OPERATIONS: usize = 82903 * SCALE; - const POSEIDON_OPERATIONS: usize = 34697 * SCALE; - const EXP_REVERSE_BITS_LEN_OPERATIONS: usize = 35200 * SCALE; - const FRI_FOLD_OPERATIONS: usize = 152800 * SCALE; - - let mut builder = AsmBuilder::::default(); - - let mut rng = StdRng::seed_from_u64(0xFEB29).sample_iter(rand::distributions::Standard); - let mut random_felt = move || -> F { rng.next().unwrap() }; - let mut rng = - StdRng::seed_from_u64(0x0451).sample_iter::<[F; 4], _>(rand::distributions::Standard); - let mut random_ext = move || EF::from_base_slice(&rng.next().unwrap()); - - for _ in 0..FIELD_OPERATIONS { - let a: Felt<_> = builder.eval(random_felt()); - let b: Felt<_> = builder.eval(random_felt()); - let _: Felt<_> = builder.eval(a + b); - } - for _ in 0..EXTENSION_OPERATIONS { - let a: Ext<_, _> = builder.eval(random_ext().cons()); - let b: Ext<_, _> = builder.eval(random_ext().cons()); - let _: Ext<_, _> = builder.eval(a + b); - } - - let operations = builder.operations; - let mut compiler = AsmCompiler::default(); - let program = Arc::new(compiler.compile(operations)); - let mut runtime = Runtime::::new( - program.clone(), - BabyBearPoseidon2Inner::new().perm, - ); - runtime.run().unwrap(); - - // Construct the machine ourselves so we can pad the tables, avoiding `A::machine`. - let config = SC::default(); - let chips: Vec> = vec![ - A::MemoryConst(MemoryConstChip::default()), - A::MemoryVar(MemoryVarChip::default()), - A::BaseAlu(BaseAluChip::default()), - A::ExtAlu(ExtAluChip::default()), - A::Poseidon2Wide(Poseidon2WideChip:: { - fixed_log2_rows: Some(((POSEIDON_OPERATIONS - 1).ilog2() + 1) as usize), - pad: true, - }), - A::ExpReverseBitsLen(ExpReverseBitsLenChip:: { - fixed_log2_rows: Some(((EXP_REVERSE_BITS_LEN_OPERATIONS - 1).ilog2() + 1) as usize), - pad: true, - }), - A::FriFold(FriFoldChip:: { - fixed_log2_rows: Some(((FRI_FOLD_OPERATIONS - 1).ilog2() + 1) as usize), - pad: true, - }), - ] - .into_iter() - .map(Chip::new) - .collect(); - let machine = StarkMachine::new(config, chips, PROOF_MAX_NUM_PVS); - - let (pk, vk) = machine.setup(&program); - let result = - run_test_machine(vec![runtime.record], machine, pk, vk.clone()).expect("should verify"); - - tracing::info!("num shard proofs: {}", result.shard_proofs.len()); - } + type A = RecursionAir; #[test] fn test_io() { @@ -148,7 +50,7 @@ mod tests { let zero: Felt<_> = builder.constant(F::zero()); builder.assert_felt_eq(y, zero); - let operations = builder.operations; + let operations = builder.into_operations(); let mut compiler = AsmCompiler::default(); let program = Arc::new(compiler.compile(operations)); let mut runtime = @@ -163,7 +65,7 @@ mod tests { .into(); runtime.run().unwrap(); - let machine = A::machine_wide(SC::new()); + let machine = A::compress_machine(SC::new()); let (pk, vk) = machine.setup(&program); let result = @@ -186,7 +88,7 @@ mod tests { let sum: Ext<_, _> = builder.eval(exts[0] + exts[1]); builder.assert_ext_ne(sum, exts[2]); - let operations = builder.operations; + let operations = builder.into_operations(); let mut compiler = AsmCompiler::default(); let program = Arc::new(compiler.compile(operations)); let mut runtime = diff --git a/crates/recursion/compiler/src/config.rs b/crates/recursion/compiler/src/config.rs index 2beec186b8..dbdfd40c5e 100644 --- a/crates/recursion/compiler/src/config.rs +++ b/crates/recursion/compiler/src/config.rs @@ -3,7 +3,7 @@ use p3_bn254_fr::Bn254Fr; use p3_field::extension::BinomialExtensionField; use sp1_stark::{InnerChallenge, InnerVal}; -use crate::{asm::AsmConfig, prelude::Config}; +use crate::{circuit::AsmConfig, prelude::Config}; pub type InnerConfig = AsmConfig; diff --git a/crates/recursion/compiler/src/constraints/mod.rs b/crates/recursion/compiler/src/constraints/mod.rs index f75d71c080..c60eee971a 100644 --- a/crates/recursion/compiler/src/constraints/mod.rs +++ b/crates/recursion/compiler/src/constraints/mod.rs @@ -159,6 +159,13 @@ impl ConstraintCompiler { args: vec![vec![a.id()], vec![b.id()], vec![tmp]], }); } + DslIr::SubFIN(a, b, c) => { + let temp = self.alloc_f(&mut constraints, b); + constraints.push(Constraint { + opcode: ConstraintOpcode::SubF, + args: vec![vec![a.id()], vec![temp], vec![c.id()]], + }); + } DslIr::SubE(a, b, c) => constraints.push(Constraint { opcode: ConstraintOpcode::SubE, args: vec![vec![a.id()], vec![b.id()], vec![c.id()]], @@ -225,6 +232,14 @@ impl ConstraintCompiler { args: vec![vec![a.id()], vec![tmp], vec![c.id()]], }); } + DslIr::DivF(a, b, c) => constraints.push(Constraint { + opcode: ConstraintOpcode::DivF, + args: vec![vec![a.id()], vec![b.id()], vec![c.id()]], + }), + DslIr::DivEF(a, b, c) => constraints.push(Constraint { + opcode: ConstraintOpcode::DivEF, + args: vec![vec![a.id()], vec![b.id()], vec![c.id()]], + }), DslIr::DivE(a, b, c) => constraints.push(Constraint { opcode: ConstraintOpcode::DivE, args: vec![vec![a.id()], vec![b.id()], vec![c.id()]], @@ -316,6 +331,17 @@ impl ConstraintCompiler { opcode: ConstraintOpcode::AssertEqE, args: vec![vec![a.id()], vec![b.id()]], }), + DslIr::AssertNeF(a, b) => constraints.push(Constraint { + opcode: ConstraintOpcode::AssertNeF, + args: vec![vec![a.id()], vec![b.id()]], + }), + DslIr::AssertNeFI(a, b) => { + let tmp = self.alloc_f(&mut constraints, b); + constraints.push(Constraint { + opcode: ConstraintOpcode::AssertNeF, + args: vec![vec![a.id()], vec![tmp]], + }); + } DslIr::AssertEqEI(a, b) => { let tmp = self.alloc_e(&mut constraints, b); constraints.push(Constraint { diff --git a/crates/recursion/compiler/src/constraints/opcodes.rs b/crates/recursion/compiler/src/constraints/opcodes.rs index edb6b1c2e0..82e8dc647f 100644 --- a/crates/recursion/compiler/src/constraints/opcodes.rs +++ b/crates/recursion/compiler/src/constraints/opcodes.rs @@ -30,6 +30,7 @@ pub enum ConstraintOpcode { AssertEqV, AssertEqF, AssertEqE, + AssertNeF, Permute, Num2BitsV, Num2BitsF, diff --git a/crates/recursion/compiler/src/ir/arithmetic.rs b/crates/recursion/compiler/src/ir/arithmetic.rs new file mode 100644 index 0000000000..c8df6d0ac2 --- /dev/null +++ b/crates/recursion/compiler/src/ir/arithmetic.rs @@ -0,0 +1,970 @@ +use std::{cell::UnsafeCell, mem::ManuallyDrop}; + +use p3_field::{AbstractExtensionField, AbstractField, Field}; + +use crate::ir::DslIr; + +use super::{Config, Ext, Felt, InnerBuilder, Var}; + +#[derive(Debug)] +pub struct VarHandle { + ptr: *mut (), + + add_var: fn(*mut (), Var, Var) -> Var, + add_var_const: fn(*mut (), Var, N) -> Var, + + sub_var: fn(*mut (), Var, Var) -> Var, + sub_var_const: fn(*mut (), Var, N) -> Var, + sub_const_var: fn(*mut (), N, Var) -> Var, + + neg_var: fn(ptr: *mut (), lhs: Var) -> Var, + + mul_var: fn(*mut (), Var, Var) -> Var, + mul_var_const: fn(*mut (), Var, N) -> Var, +} + +#[derive(Debug)] +pub struct FeltHandle { + ptr: *mut (), + + pub ext_handle_ptr: *mut (), + + add_felt: fn(*mut (), Felt, Felt) -> Felt, + add_const_felt: fn(*mut (), Felt, F) -> Felt, + + sub_felt: fn(*mut (), Felt, Felt) -> Felt, + sub_const_felt: fn(*mut (), F, Felt) -> Felt, + sub_felt_const: fn(*mut (), Felt, F) -> Felt, + + neg_felt: fn(ptr: *mut (), lhs: Felt) -> Felt, + + mul_felt: fn(*mut (), Felt, Felt) -> Felt, + mul_felt_const: fn(ptr: *mut (), lhs: Felt, rhs: F) -> Felt, + + div_felt: fn(*mut (), Felt, Felt) -> Felt, + div_felt_const: fn(*mut (), Felt, F) -> Felt, + div_const_felt: fn(*mut (), F, Felt) -> Felt, +} + +#[derive(Debug)] +pub struct ExtHandle { + ptr: *mut (), + + add_ext: fn(*mut (), Ext, Ext) -> Ext, + add_const_ext: fn(*mut (), Ext, EF) -> Ext, + add_ext_base: fn(*mut (), Ext, Felt) -> Ext, + add_const_base: fn(*mut (), Ext, F) -> Ext, + add_felt_const_ext: fn(*mut (), Felt, EF, *mut Self) -> Ext, + + sub_ext: fn(*mut (), Ext, Ext) -> Ext, + sub_ext_base: fn(*mut (), Ext, Felt) -> Ext, + sub_base_ext: fn(*mut (), Felt, Ext) -> Ext, + sub_felt_const_ext: fn(*mut (), Felt, EF, *mut Self) -> Ext, + sub_const_ext: fn(*mut (), Ext, EF) -> Ext, + sub_ext_const: fn(*mut (), EF, Ext) -> Ext, + + neg_ext: fn(ptr: *mut (), lhs: Ext) -> Ext, + + div_ext: fn(*mut (), Ext, Ext) -> Ext, + div_const_ext: fn(*mut (), Ext, EF) -> Ext, + div_ext_base: fn(*mut (), Ext, Felt) -> Ext, + div_base_ext: fn(*mut (), Felt, Ext) -> Ext, + div_const_base: fn(*mut (), Ext, F) -> Ext, + div_ext_const: fn(*mut (), EF, Ext) -> Ext, + div_felt_const_ext: fn(*mut (), Felt, EF, *mut Self) -> Ext, + + mul_ext: fn(*mut (), Ext, Ext) -> Ext, + mul_const_ext: fn(*mut (), Ext, EF) -> Ext, + mul_ext_base: fn(*mut (), Ext, Felt) -> Ext, + mul_felt_const_ext: fn(*mut (), Felt, EF, *mut Self) -> Ext, +} + +pub(crate) trait VarOperations { + fn add_var(ptr: *mut (), lhs: Var, rhs: Var) -> Var; + fn add_const_var(ptr: *mut (), lhs: Var, rhs: N) -> Var; + + fn sub_var(ptr: *mut (), lhs: Var, rhs: Var) -> Var; + fn sub_var_const(ptr: *mut (), lhs: Var, rhs: N) -> Var; + fn sub_const_var(ptr: *mut (), lhs: N, rhs: Var) -> Var; + + fn neg_var(ptr: *mut (), lhs: Var) -> Var; + + fn mul_var(ptr: *mut (), lhs: Var, rhs: Var) -> Var; + fn mul_const_var(ptr: *mut (), lhs: Var, rhs: N) -> Var; + + fn var_handle(element: &mut Box) -> VarHandle { + VarHandle { + ptr: element.as_mut() as *mut Self as *mut (), + add_var: Self::add_var, + sub_var: Self::sub_var, + mul_var: Self::mul_var, + neg_var: Self::neg_var, + add_var_const: Self::add_const_var, + sub_var_const: Self::sub_var_const, + sub_const_var: Self::sub_const_var, + mul_var_const: Self::mul_const_var, + } + } +} + +pub(crate) trait FeltOperations { + fn add_felt(ptr: *mut (), lhs: Felt, rhs: Felt) -> Felt; + fn sub_felt(ptr: *mut (), lhs: Felt, rhs: Felt) -> Felt; + fn mul_felt(ptr: *mut (), lhs: Felt, rhs: Felt) -> Felt; + fn add_felt_const(ptr: *mut (), lhs: Felt, rhs: F) -> Felt; + fn sub_felt_const(ptr: *mut (), lhs: Felt, rhs: F) -> Felt; + fn mul_const_felt(ptr: *mut (), lhs: Felt, rhs: F) -> Felt; + fn sub_const_felt(ptr: *mut (), lhs: F, rhs: Felt) -> Felt; + fn div_felt(ptr: *mut (), lhs: Felt, rhs: Felt) -> Felt; + fn div_felt_const(ptr: *mut (), lhs: Felt, rhs: F) -> Felt; + fn div_const_felt(ptr: *mut (), lhs: F, rhs: Felt) -> Felt; + fn neg_felt(ptr: *mut (), lhs: Felt) -> Felt; + + fn felt_handle(element: &mut Box, ext_handle_ptr: *mut ()) -> FeltHandle { + FeltHandle { + ptr: element.as_mut() as *mut Self as *mut (), + ext_handle_ptr, + add_felt: Self::add_felt, + sub_felt: Self::sub_felt, + mul_felt: Self::mul_felt, + add_const_felt: Self::add_felt_const, + mul_felt_const: Self::mul_const_felt, + sub_felt_const: Self::sub_felt_const, + sub_const_felt: Self::sub_const_felt, + div_felt: Self::div_felt, + div_felt_const: Self::div_felt_const, + div_const_felt: Self::div_const_felt, + neg_felt: Self::neg_felt, + } + } +} + +pub(crate) trait ExtOperations { + fn add_ext(ptr: *mut (), lhs: Ext, rhs: Ext) -> Ext; + fn add_felt_const_ext( + ptr: *mut (), + lhs: Felt, + rhs: EF, + handle: *mut ExtHandle, + ) -> Ext; + fn add_ext_base(ptr: *mut (), lhs: Ext, rhs: Felt) -> Ext; + fn add_const_ext(ptr: *mut (), lhs: Ext, rhs: EF) -> Ext; + fn add_const_base(ptr: *mut (), lhs: Ext, rhs: F) -> Ext; + + fn neg_ext(ptr: *mut (), lhs: Ext) -> Ext; + + fn sub_ext(ptr: *mut (), lhs: Ext, rhs: Ext) -> Ext; + fn sub_ext_base(ptr: *mut (), lhs: Ext, rhs: Felt) -> Ext; + fn sub_base_ext(ptr: *mut (), lhs: Felt, rhs: Ext) -> Ext; + fn sub_ext_const(ptr: *mut (), lhs: EF, rhs: Ext) -> Ext; + fn sub_const_ext(ptr: *mut (), lhs: Ext, rhs: EF) -> Ext; + fn sub_felt_const_ext( + ptr: *mut (), + lhs: Felt, + rhs: EF, + handle: *mut ExtHandle, + ) -> Ext; + + fn div_ext(ptr: *mut (), lhs: Ext, rhs: Ext) -> Ext; + fn div_ext_base(ptr: *mut (), lhs: Ext, rhs: Felt) -> Ext; + fn div_base_ext(ptr: *mut (), lhs: Felt, rhs: Ext) -> Ext; + fn div_const_ext(ptr: *mut (), lhs: Ext, rhs: EF) -> Ext; + fn div_ext_const(ptr: *mut (), lhs: EF, rhs: Ext) -> Ext; + fn div_const_base(ptr: *mut (), lhs: Ext, rhs: F) -> Ext; + fn div_felt_const_ext( + ptr: *mut (), + lhs: Felt, + rhs: EF, + handle: *mut ExtHandle, + ) -> Ext; + + fn mul_ext(ptr: *mut (), lhs: Ext, rhs: Ext) -> Ext; + fn mul_const_ext(ptr: *mut (), lhs: Ext, rhs: EF) -> Ext; + fn mul_felt_const_ext( + ptr: *mut (), + lhs: Felt, + rhs: EF, + handle: *mut ExtHandle, + ) -> Ext; + fn mul_ext_base(ptr: *mut (), lhs: Ext, rhs: Felt) -> Ext; + + fn ext_handle(element: &mut Box) -> ExtHandle { + ExtHandle { + ptr: element.as_mut() as *mut Self as *mut (), + add_ext: Self::add_ext, + add_ext_base: Self::add_ext_base, + add_felt_const_ext: Self::add_felt_const_ext, + sub_ext: Self::sub_ext, + sub_base_ext: Self::sub_base_ext, + add_const_base: Self::add_const_base, + add_const_ext: Self::add_const_ext, + neg_ext: Self::neg_ext, + sub_ext_base: Self::sub_ext_base, + sub_felt_const_ext: Self::sub_felt_const_ext, + sub_const_ext: Self::sub_const_ext, + div_ext: Self::div_ext, + div_const_ext: Self::div_const_ext, + div_felt_const_ext: Self::div_felt_const_ext, + div_ext_base: Self::div_ext_base, + sub_ext_const: Self::sub_ext_const, + div_base_ext: Self::div_base_ext, + div_const_base: Self::div_const_base, + mul_ext: Self::mul_ext, + mul_const_ext: Self::mul_const_ext, + mul_ext_base: Self::mul_ext_base, + mul_felt_const_ext: Self::mul_felt_const_ext, + div_ext_const: Self::div_ext_const, + } + } +} + +impl VarOperations for UnsafeCell> { + fn add_var(ptr: *mut (), lhs: Var, rhs: Var) -> Var { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Var::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::AddV(res, lhs, rhs)); + + res + } + + fn sub_var(ptr: *mut (), lhs: Var, rhs: Var) -> Var { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Var::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::SubV(res, lhs, rhs)); + + res + } + + fn mul_var(ptr: *mut (), lhs: Var, rhs: Var) -> Var { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Var::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::MulV(res, lhs, rhs)); + + res + } + + fn add_const_var(ptr: *mut (), lhs: Var, rhs: C::N) -> Var { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Var::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::AddVI(res, lhs, rhs)); + + res + } + + fn mul_const_var(ptr: *mut (), lhs: Var, rhs: C::N) -> Var { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Var::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::MulVI(res, lhs, rhs)); + + res + } + + fn sub_const_var(ptr: *mut (), lhs: C::N, rhs: Var) -> Var { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Var::new(idx, rhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::SubVIN(res, lhs, rhs)); + + res + } + + fn sub_var_const(ptr: *mut (), lhs: Var, rhs: C::N) -> Var { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Var::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::SubVI(res, lhs, rhs)); + + res + } + + fn neg_var(ptr: *mut (), lhs: Var) -> Var { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Var::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::NegV(res, lhs)); + + res + } +} + +impl FeltOperations for UnsafeCell> { + fn add_felt(ptr: *mut (), lhs: Felt, rhs: Felt) -> Felt { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Felt::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::AddF(res, lhs, rhs)); + + res + } + + fn sub_felt(ptr: *mut (), lhs: Felt, rhs: Felt) -> Felt { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Felt::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::SubF(res, lhs, rhs)); + + res + } + + fn neg_felt(ptr: *mut (), lhs: Felt) -> Felt { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Felt::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::NegF(res, lhs)); + + res + } + + fn mul_felt(ptr: *mut (), lhs: Felt, rhs: Felt) -> Felt { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Felt::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::MulF(res, lhs, rhs)); + + res + } + + fn add_felt_const(ptr: *mut (), lhs: Felt, rhs: C::F) -> Felt { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Felt::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::AddFI(res, lhs, rhs)); + + res + } + + fn sub_felt_const(ptr: *mut (), lhs: Felt, rhs: C::F) -> Felt { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Felt::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::SubFI(res, lhs, rhs)); + + res + } + + fn sub_const_felt(ptr: *mut (), lhs: C::F, rhs: Felt) -> Felt { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Felt::new(idx, rhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::SubFIN(res, lhs, rhs)); + + res + } + + fn mul_const_felt(ptr: *mut (), lhs: Felt, rhs: C::F) -> Felt { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Felt::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::MulFI(res, lhs, rhs)); + + res + } + + fn div_felt(ptr: *mut (), lhs: Felt, rhs: Felt) -> Felt { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Felt::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::DivF(res, lhs, rhs)); + + res + } + + fn div_felt_const(ptr: *mut (), lhs: Felt, rhs: C::F) -> Felt { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Felt::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::DivFI(res, lhs, rhs)); + + res + } + + fn div_const_felt(ptr: *mut (), lhs: C::F, rhs: Felt) -> Felt { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Felt::new(idx, rhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::DivFIN(res, lhs, rhs)); + + res + } +} + +impl ExtOperations for UnsafeCell> { + fn add_ext(ptr: *mut (), lhs: Ext, rhs: Ext) -> Ext { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Ext::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::AddE(res, lhs, rhs)); + + res + } + + fn add_ext_base(ptr: *mut (), lhs: Ext, rhs: Felt) -> Ext { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Ext::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::AddEF(res, lhs, rhs)); + + res + } + + fn add_const_base(ptr: *mut (), lhs: Ext, rhs: C::F) -> Ext { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Ext::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::AddEFI(res, lhs, rhs)); + + res + } + + fn add_const_ext(ptr: *mut (), lhs: Ext, rhs: C::EF) -> Ext { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Ext::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::AddEI(res, lhs, rhs)); + + res + } + + fn add_felt_const_ext( + ptr: *mut (), + lhs: Felt, + rhs: C::EF, + handle: *mut ExtHandle, + ) -> Ext { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let idx = inner.get_mut().variable_count; + let res = Ext::new(idx, handle); + let inner = inner.get_mut(); + + inner.variable_count += 1; + inner.operations.push(DslIr::AddEFFI(res, lhs, rhs)); + + res + } + + fn sub_ext(ptr: *mut (), lhs: Ext, rhs: Ext) -> Ext { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Ext::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::SubE(res, lhs, rhs)); + + res + } + + fn sub_ext_base(ptr: *mut (), lhs: Ext, rhs: Felt) -> Ext { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Ext::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::SubEF(res, lhs, rhs)); + + res + } + + fn sub_const_ext(ptr: *mut (), lhs: Ext, rhs: C::EF) -> Ext { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Ext::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::SubEI(res, lhs, rhs)); + + res + } + + fn sub_ext_const(ptr: *mut (), lhs: C::EF, rhs: Ext) -> Ext { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Ext::new(idx, rhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::SubEIN(res, lhs, rhs)); + + res + } + + fn sub_felt_const_ext( + ptr: *mut (), + lhs: Felt, + rhs: C::EF, + handle: *mut ExtHandle, + ) -> Ext { + Self::add_felt_const_ext(ptr, lhs, -rhs, handle) + } + + fn sub_base_ext(ptr: *mut (), lhs: Felt, rhs: Ext) -> Ext { + // TODO: optimize to one opcode. + let rhs = Self::neg_ext(ptr, rhs); + Self::add_ext_base(ptr, rhs, lhs) + } + + fn neg_ext(ptr: *mut (), lhs: Ext) -> Ext { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Ext::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::NegE(res, lhs)); + + res + } + + fn mul_ext(ptr: *mut (), lhs: Ext, rhs: Ext) -> Ext { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Ext::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::MulE(res, lhs, rhs)); + + res + } + + fn mul_ext_base(ptr: *mut (), lhs: Ext, rhs: Felt) -> Ext { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Ext::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::MulEF(res, lhs, rhs)); + + res + } + + fn mul_const_ext(ptr: *mut (), lhs: Ext, rhs: C::EF) -> Ext { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Ext::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::MulEI(res, lhs, rhs)); + + res + } + + fn mul_felt_const_ext( + ptr: *mut (), + lhs: Felt, + rhs: C::EF, + handle: *mut ExtHandle, + ) -> Ext { + // TODO: optimize to one opcode. + let lhs = Self::add_felt_const_ext(ptr, lhs, C::EF::zero(), handle); + Self::mul_const_ext(ptr, lhs, rhs) + } + + fn div_ext(ptr: *mut (), lhs: Ext, rhs: Ext) -> Ext { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Ext::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::DivE(res, lhs, rhs)); + + res + } + + fn div_const_base(ptr: *mut (), lhs: Ext, rhs: C::F) -> Ext { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Ext::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::DivEFI(res, lhs, rhs)); + + res + } + + fn div_const_ext(ptr: *mut (), lhs: Ext, rhs: C::EF) -> Ext { + Self::mul_const_ext(ptr, lhs, rhs.inverse()) + } + + fn div_base_ext(ptr: *mut (), lhs: Felt, rhs: Ext) -> Ext { + // TODO: optimize to one opcode. + let lhs = Self::add_felt_const_ext(ptr, lhs, C::EF::zero(), rhs.handle); + Self::div_ext(ptr, lhs, rhs) + } + + fn div_ext_base(ptr: *mut (), lhs: Ext, rhs: Felt) -> Ext { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Ext::new(idx, lhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::DivEF(res, lhs, rhs)); + + res + } + + fn div_ext_const(ptr: *mut (), lhs: C::EF, rhs: Ext) -> Ext { + let mut inner = unsafe { ManuallyDrop::new(Box::from_raw(ptr as *mut Self)) }; + let inner = inner.get_mut(); + let idx = inner.variable_count; + let res = Ext::new(idx, rhs.handle); + inner.variable_count += 1; + + inner.operations.push(DslIr::DivEIN(res, lhs, rhs)); + + res + } + + fn div_felt_const_ext( + ptr: *mut (), + lhs: Felt, + rhs: C::EF, + handle: *mut ExtHandle, + ) -> Ext { + Self::mul_felt_const_ext(ptr, lhs, rhs.inverse(), handle) + } +} + +impl VarHandle { + pub fn add_v(&self, lhs: Var, rhs: Var) -> Var { + (self.add_var)(self.ptr, lhs, rhs) + } + + pub fn sub_v(&self, lhs: Var, rhs: Var) -> Var { + (self.sub_var)(self.ptr, lhs, rhs) + } + + pub fn neg_v(&self, lhs: Var) -> Var { + (self.neg_var)(self.ptr, lhs) + } + + pub fn mul_v(&self, lhs: Var, rhs: Var) -> Var { + (self.mul_var)(self.ptr, lhs, rhs) + } + + pub fn add_const_v(&self, lhs: Var, rhs: N) -> Var { + (self.add_var_const)(self.ptr, lhs, rhs) + } + + pub fn add_v_const(&self, lhs: N, rhs: Var) -> Var { + self.add_const_v(rhs, lhs) + } + + pub fn mul_const_v(&self, lhs: Var, rhs: N) -> Var { + (self.mul_var_const)(self.ptr, lhs, rhs) + } + + pub fn mul_v_const(&self, lhs: N, rhs: Var) -> Var { + self.mul_const_v(rhs, lhs) + } + + pub fn sub_const_v(&self, lhs: N, rhs: Var) -> Var { + (self.sub_const_var)(self.ptr, lhs, rhs) + } + + pub fn sub_v_const(&self, lhs: Var, rhs: N) -> Var { + (self.sub_var_const)(self.ptr, lhs, rhs) + } +} + +impl FeltHandle { + pub fn add_f(&self, lhs: Felt, rhs: Felt) -> Felt { + (self.add_felt)(self.ptr, lhs, rhs) + } + + pub fn add_const_f(&self, lhs: Felt, rhs: F) -> Felt { + (self.add_const_felt)(self.ptr, lhs, rhs) + } + + pub fn add_f_const(&self, lhs: F, rhs: Felt) -> Felt { + self.add_const_f(rhs, lhs) + } + + pub fn sub_f(&self, lhs: Felt, rhs: Felt) -> Felt { + (self.sub_felt)(self.ptr, lhs, rhs) + } + + pub fn sub_f_const(&self, lhs: Felt, rhs: F) -> Felt { + (self.sub_felt_const)(self.ptr, lhs, rhs) + } + + pub fn sub_const_f(&self, lhs: F, rhs: Felt) -> Felt { + (self.sub_const_felt)(self.ptr, lhs, rhs) + } + + pub fn neg_f(&self, lhs: Felt) -> Felt { + (self.neg_felt)(self.ptr, lhs) + } + + pub fn mul_f(&self, lhs: Felt, rhs: Felt) -> Felt { + (self.mul_felt)(self.ptr, lhs, rhs) + } + + pub fn mul_const_f(&self, lhs: Felt, rhs: F) -> Felt { + (self.mul_felt_const)(self.ptr, lhs, rhs) + } + + pub fn mul_f_const(&self, lhs: F, rhs: Felt) -> Felt { + self.mul_const_f(rhs, lhs) + } + + pub fn div_f(&self, lhs: Felt, rhs: Felt) -> Felt { + (self.div_felt)(self.ptr, lhs, rhs) + } + + pub fn div_f_const(&self, lhs: Felt, rhs: F) -> Felt { + (self.div_felt_const)(self.ptr, lhs, rhs) + } + + pub fn div_const_f(&self, lhs: F, rhs: Felt) -> Felt { + (self.div_const_felt)(self.ptr, lhs, rhs) + } +} + +impl> ExtHandle { + pub fn add_e(&self, lhs: Ext, rhs: Ext) -> Ext { + (self.add_ext)(self.ptr, lhs, rhs) + } + + pub fn add_e_f(&self, lhs: Ext, rhs: Felt) -> Ext { + (self.add_ext_base)(self.ptr, lhs, rhs) + } + + pub fn add_f_e(&self, lhs: Felt, rhs: Ext) -> Ext { + self.add_e_f(rhs, lhs) + } + + pub fn add_e_const_f(&self, lhs: Ext, rhs: F) -> Ext { + (self.add_const_base)(self.ptr, lhs, rhs) + } + + pub fn add_f_const_e( + &self, + lhs: Felt, + rhs: EF, + handle: *mut ExtHandle, + ) -> Ext { + (self.add_felt_const_ext)(self.ptr, lhs, rhs, handle) + } + + pub fn add_const_e_f( + &self, + lhs: EF, + rhs: Felt, + handle: *mut ExtHandle, + ) -> Ext { + self.add_f_const_e(rhs, lhs, handle) + } + + pub fn add_const_e(&self, lhs: Ext, rhs: EF) -> Ext { + (self.add_const_ext)(self.ptr, lhs, rhs) + } + + pub fn add_e_const(&self, lhs: EF, rhs: Ext) -> Ext { + (self.add_const_ext)(self.ptr, rhs, lhs) + } + + pub fn sub_e(&self, lhs: Ext, rhs: Ext) -> Ext { + (self.sub_ext)(self.ptr, lhs, rhs) + } + + pub fn sub_e_f(&self, lhs: Ext, rhs: Felt) -> Ext { + (self.sub_ext_base)(self.ptr, lhs, rhs) + } + + pub fn sub_f_e(&self, lhs: Felt, rhs: Ext) -> Ext { + (self.sub_base_ext)(self.ptr, lhs, rhs) + } + + pub fn sub_e_const_f(&self, lhs: Ext, rhs: F) -> Ext { + (self.sub_const_ext)(self.ptr, lhs, EF::from_base(rhs)) + } + + pub fn sub_f_const_e( + &self, + lhs: Felt, + rhs: EF, + handle: *mut ExtHandle, + ) -> Ext { + (self.sub_felt_const_ext)(self.ptr, lhs, rhs, handle) + } + + pub fn sub_const_e_f( + &self, + lhs: EF, + rhs: Felt, + handle: *mut ExtHandle, + ) -> Ext { + // TODO: optimize to one opcode. + let rhs = self.add_f_const_e(rhs, EF::zero(), handle); + self.sub_e_const(lhs, rhs) + } + + pub fn sub_const_e(&self, lhs: Ext, rhs: EF) -> Ext { + (self.sub_const_ext)(self.ptr, lhs, rhs) + } + + pub fn sub_e_const(&self, lhs: EF, rhs: Ext) -> Ext { + (self.sub_ext_const)(self.ptr, lhs, rhs) + } + + pub fn neg_e(&self, lhs: Ext) -> Ext { + (self.neg_ext)(self.ptr, lhs) + } + + pub fn mul_e(&self, lhs: Ext, rhs: Ext) -> Ext { + (self.mul_ext)(self.ptr, lhs, rhs) + } + + pub fn mul_e_f(&self, lhs: Ext, rhs: Felt) -> Ext { + (self.mul_ext_base)(self.ptr, lhs, rhs) + } + + pub fn mul_f_e(&self, lhs: Felt, rhs: Ext) -> Ext { + self.mul_e_f(rhs, lhs) + } + + pub fn mul_e_const_f(&self, lhs: Ext, rhs: F) -> Ext { + (self.mul_const_ext)(self.ptr, lhs, EF::from_base(rhs)) + } + + pub fn mul_f_const_e( + &self, + lhs: Felt, + rhs: EF, + handle: *mut ExtHandle, + ) -> Ext { + (self.mul_felt_const_ext)(self.ptr, lhs, rhs, handle) + } + + pub fn mul_const_e_f( + &self, + lhs: EF, + rhs: Felt, + handle: *mut ExtHandle, + ) -> Ext { + self.mul_f_const_e(rhs, lhs, handle) + } + + pub fn mul_const_e(&self, lhs: Ext, rhs: EF) -> Ext { + (self.mul_const_ext)(self.ptr, lhs, rhs) + } + + pub fn mul_e_const(&self, lhs: EF, rhs: Ext) -> Ext { + (self.mul_const_ext)(self.ptr, rhs, lhs) + } + + pub fn div_e(&self, lhs: Ext, rhs: Ext) -> Ext { + (self.div_ext)(self.ptr, lhs, rhs) + } + + pub fn div_e_f(&self, lhs: Ext, rhs: Felt) -> Ext { + (self.div_ext_base)(self.ptr, lhs, rhs) + } + + pub fn div_f_e(&self, lhs: Felt, rhs: Ext) -> Ext { + (self.div_base_ext)(self.ptr, lhs, rhs) + } + + pub fn div_e_const_f(&self, lhs: Ext, rhs: F) -> Ext { + (self.div_const_base)(self.ptr, lhs, rhs) + } + + pub fn div_f_const_e( + &self, + lhs: Felt, + rhs: EF, + handle: *mut ExtHandle, + ) -> Ext { + (self.div_felt_const_ext)(self.ptr, lhs, rhs, handle) + } + + pub fn div_const_e(&self, lhs: Ext, rhs: EF) -> Ext { + (self.div_const_ext)(self.ptr, lhs, rhs) + } + + pub fn div_e_const(&self, lhs: EF, rhs: Ext) -> Ext { + (self.div_ext_const)(self.ptr, lhs, rhs) + } +} diff --git a/crates/recursion/compiler/src/ir/bits.rs b/crates/recursion/compiler/src/ir/bits.rs index 25cf97c05f..361d5acd7d 100644 --- a/crates/recursion/compiler/src/ir/bits.rs +++ b/crates/recursion/compiler/src/ir/bits.rs @@ -10,7 +10,7 @@ impl Builder { assert!(C::N::bits() == NUM_BITS); let output = self.dyn_array::>(NUM_BITS); - self.push(DslIr::HintBitsV(output.clone(), num)); + self.push_op(DslIr::HintBitsV(output.clone(), num)); let sum: Var<_> = self.eval(C::N::zero()); for i in 0..NUM_BITS { @@ -42,7 +42,7 @@ impl Builder { output.push(self.uninit()); } - self.push(DslIr::CircuitNum2BitsV(num, bits, output.clone())); + self.push_op(DslIr::CircuitNum2BitsV(num, bits, output.clone())); output } @@ -59,7 +59,7 @@ impl Builder { /// Converts a felt to bits. pub fn num2bits_f(&mut self, num: Felt) -> Array> { let output = self.dyn_array::>(NUM_BITS); - self.push(DslIr::HintBitsF(output.clone(), num)); + self.push_op(DslIr::HintBitsF(output.clone(), num)); let sum: Felt<_> = self.eval(C::F::zero()); for i in 0..NUM_BITS { @@ -84,10 +84,7 @@ impl Builder { output.push(self.uninit()); } - self.push(DslIr::CircuitNum2BitsF(num, output.clone())); - - let output_array = self.vec(output.clone()); - self.less_than_bb_modulus(output_array); + self.push_op(DslIr::CircuitNum2BitsF(num, output.clone())); output } diff --git a/crates/recursion/compiler/src/ir/builder.rs b/crates/recursion/compiler/src/ir/builder.rs index 19cd92b6e9..e3fdba9b89 100644 --- a/crates/recursion/compiler/src/ir/builder.rs +++ b/crates/recursion/compiler/src/ir/builder.rs @@ -1,4 +1,4 @@ -use std::{iter::Zip, vec::IntoIter}; +use std::{cell::UnsafeCell, iter::Zip, ptr, vec::IntoIter}; use backtrace::Backtrace; use p3_field::AbstractField; @@ -6,8 +6,9 @@ use sp1_core_machine::utils::sp1_debug_mode; use sp1_primitives::types::RecursionProgramType; use super::{ - Array, Config, DslIr, Ext, Felt, FromConstant, SymbolicExt, SymbolicFelt, SymbolicUsize, - SymbolicVar, Usize, Var, Variable, + Array, Config, DslIr, Ext, ExtHandle, ExtOperations, Felt, FeltHandle, FeltOperations, + FromConstant, SymbolicExt, SymbolicFelt, SymbolicUsize, SymbolicVar, Usize, Var, VarHandle, + VarOperations, Variable, }; /// TracedVec is a Vec wrapper that records a trace whenever an element is pushed. When extending @@ -76,17 +77,25 @@ impl IntoIterator for TracedVec { } } +#[derive(Debug, Clone)] +pub struct InnerBuilder { + pub(crate) variable_count: u32, + pub operations: TracedVec>, +} + /// A builder for the DSL. /// /// Can compile to both assembly and a set of constraints. -#[derive(Debug, Clone)] +#[derive(Debug)] pub struct Builder { - pub(crate) variable_count: u32, - pub operations: TracedVec>, + pub(crate) inner: Box>>, pub(crate) nb_public_values: Option>, pub(crate) witness_var_count: u32, pub(crate) witness_felt_count: u32, pub(crate) witness_ext_count: u32, + pub(crate) var_handle: Box>, + pub(crate) felt_handle: Box>, + pub(crate) ext_handle: Box>, pub(crate) p2_hash_num: Var, pub(crate) debug: bool, pub(crate) is_sub_builder: bool, @@ -102,15 +111,29 @@ impl Default for Builder { impl Builder { pub fn new(program_type: RecursionProgramType) -> Self { // We need to create a temporary placeholder for the p2_hash_num variable. - let placeholder_p2_hash_num = Var::new(0); + let placeholder_p2_hash_num = Var::new(0, ptr::null_mut()); - let mut new_builder = Self { + let mut inner = Box::new(UnsafeCell::new(InnerBuilder { variable_count: 0, + operations: Default::default(), + })); + + let var_handle = Box::new(VarOperations::var_handle(&mut inner)); + let mut ext_handle = Box::new(ExtOperations::ext_handle(&mut inner)); + let felt_handle = Box::new(FeltOperations::felt_handle( + &mut inner, + ext_handle.as_mut() as *mut _ as *mut (), + )); + + let mut new_builder = Self { + inner, witness_var_count: 0, witness_felt_count: 0, witness_ext_count: 0, - operations: Default::default(), nb_public_values: None, + var_handle, + felt_handle, + ext_handle, p2_hash_num: placeholder_p2_hash_num, debug: false, is_sub_builder: false, @@ -129,31 +152,35 @@ impl Builder { debug: bool, program_type: RecursionProgramType, ) -> Self { - Self { - variable_count, - // Witness counts are only used when the target is a gnark circuit. And sub-builders - // are not used when the target is a gnark circuit, so it's fine to set the - // witness counts to 0. - witness_var_count: 0, - witness_felt_count: 0, - witness_ext_count: 0, - operations: Default::default(), - nb_public_values, - p2_hash_num, - debug, - is_sub_builder: true, - program_type, - } + let mut builder = Self::new(program_type); + builder.inner.get_mut().variable_count = variable_count; + builder.nb_public_values = nb_public_values; + builder.p2_hash_num = p2_hash_num; + builder.debug = debug; + + builder } /// Pushes an operation to the builder. - pub fn push(&mut self, op: DslIr) { - self.operations.push(op); + pub fn push_op(&mut self, op: DslIr) { + self.inner.get_mut().operations.push(op); + } + + pub fn extend_ops(&mut self, ops: impl IntoIterator, Option)>) { + self.inner.get_mut().operations.extend(ops); } /// Pushes an operation to the builder and records a trace if SP1_DEBUG. pub fn trace_push(&mut self, op: DslIr) { - self.operations.trace_push(op); + self.inner.get_mut().operations.trace_push(op); + } + + pub fn variable_count(&self) -> u32 { + unsafe { (*self.inner.get()).variable_count } + } + + pub fn into_operations(self) -> TracedVec> { + self.inner.into_inner().operations } /// Creates an uninitialized variable. @@ -279,7 +306,7 @@ impl Builder { pub fn lt(&mut self, lhs: Var, rhs: Var) -> Var { let result = self.uninit(); - self.operations.push(DslIr::LessThan(result, lhs, rhs)); + self.push_op(DslIr::LessThan(result, lhs, rhs)); result } @@ -312,7 +339,7 @@ impl Builder { /// Break out of a loop. pub fn break_loop(&mut self) { - self.operations.push(DslIr::Break); + self.push_op(DslIr::Break); } pub fn print_debug(&mut self, val: usize) { @@ -322,23 +349,23 @@ impl Builder { /// Print a variable. pub fn print_v(&mut self, dst: Var) { - self.operations.push(DslIr::PrintV(dst)); + self.push_op(DslIr::PrintV(dst)); } /// Print a felt. pub fn print_f(&mut self, dst: Felt) { - self.operations.push(DslIr::PrintF(dst)); + self.push_op(DslIr::PrintF(dst)); } /// Print an ext. pub fn print_e(&mut self, dst: Ext) { - self.operations.push(DslIr::PrintE(dst)); + self.push_op(DslIr::PrintE(dst)); } /// Hint the length of the next vector of variables. pub fn hint_len(&mut self) -> Var { let len = self.uninit(); - self.operations.push(DslIr::HintLen(len)); + self.push_op(DslIr::HintLen(len)); len } @@ -346,7 +373,7 @@ impl Builder { pub fn hint_var(&mut self) -> Var { let len = self.hint_len(); let arr = self.dyn_array(len); - self.operations.push(DslIr::HintVars(arr.clone())); + self.push_op(DslIr::HintVars(arr.clone())); self.get(&arr, 0) } @@ -354,7 +381,7 @@ impl Builder { pub fn hint_felt(&mut self) -> Felt { let len = self.hint_len(); let arr = self.dyn_array(len); - self.operations.push(DslIr::HintFelts(arr.clone())); + self.push_op(DslIr::HintFelts(arr.clone())); self.get(&arr, 0) } @@ -362,7 +389,7 @@ impl Builder { pub fn hint_ext(&mut self) -> Ext { let len = self.hint_len(); let arr = self.dyn_array(len); - self.operations.push(DslIr::HintExts(arr.clone())); + self.push_op(DslIr::HintExts(arr.clone())); self.get(&arr, 0) } @@ -370,7 +397,7 @@ impl Builder { pub fn hint_vars(&mut self) -> Array> { let len = self.hint_len(); let arr = self.dyn_array(len); - self.operations.push(DslIr::HintVars(arr.clone())); + self.push_op(DslIr::HintVars(arr.clone())); arr } @@ -378,7 +405,7 @@ impl Builder { pub fn hint_felts(&mut self) -> Array> { let len = self.hint_len(); let arr = self.dyn_array(len); - self.operations.push(DslIr::HintFelts(arr.clone())); + self.push_op(DslIr::HintFelts(arr.clone())); arr } @@ -386,14 +413,14 @@ impl Builder { pub fn hint_exts(&mut self) -> Array> { let len = self.hint_len(); let arr = self.dyn_array(len); - self.operations.push(DslIr::HintExts(arr.clone())); + self.push_op(DslIr::HintExts(arr.clone())); arr } pub fn witness_var(&mut self) -> Var { assert!(!self.is_sub_builder, "Cannot create a witness var with a sub builder"); let witness = self.uninit(); - self.operations.push(DslIr::WitnessVar(witness, self.witness_var_count)); + self.push_op(DslIr::WitnessVar(witness, self.witness_var_count)); self.witness_var_count += 1; witness } @@ -401,7 +428,7 @@ impl Builder { pub fn witness_felt(&mut self) -> Felt { assert!(!self.is_sub_builder, "Cannot create a witness felt with a sub builder"); let witness = self.uninit(); - self.operations.push(DslIr::WitnessFelt(witness, self.witness_felt_count)); + self.push_op(DslIr::WitnessFelt(witness, self.witness_felt_count)); self.witness_felt_count += 1; witness } @@ -409,14 +436,14 @@ impl Builder { pub fn witness_ext(&mut self) -> Ext { assert!(!self.is_sub_builder, "Cannot create a witness ext with a sub builder"); let witness = self.uninit(); - self.operations.push(DslIr::WitnessExt(witness, self.witness_ext_count)); + self.push_op(DslIr::WitnessExt(witness, self.witness_ext_count)); self.witness_ext_count += 1; witness } /// Throws an error. pub fn error(&mut self) { - self.operations.trace_push(DslIr::Error()); + self.trace_push(DslIr::Error()); } /// Materializes a usize into a variable. @@ -429,7 +456,7 @@ impl Builder { /// Register a felt as public value. This is append to the proof's public values buffer. pub fn register_public_value(&mut self, val: Felt) { - self.operations.push(DslIr::RegisterPublicValue(val)); + self.push_op(DslIr::RegisterPublicValue(val)); } /// Register and commits a felt as public value. This value will be constrained when verified. @@ -440,7 +467,7 @@ impl Builder { } let nb_public_values = *self.nb_public_values.as_ref().unwrap(); - self.operations.push(DslIr::Commit(val, nb_public_values)); + self.push_op(DslIr::Commit(val, nb_public_values)); self.assign(nb_public_values, nb_public_values + C::N::one()); } @@ -455,33 +482,34 @@ impl Builder { } pub fn commit_vkey_hash_circuit(&mut self, var: Var) { - self.operations.push(DslIr::CircuitCommitVkeyHash(var)); + self.push_op(DslIr::CircuitCommitVkeyHash(var)); } pub fn commit_commited_values_digest_circuit(&mut self, var: Var) { - self.operations.push(DslIr::CircuitCommitCommitedValuesDigest(var)); + self.push_op(DslIr::CircuitCommitCommitedValuesDigest(var)); } pub fn reduce_e(&mut self, ext: Ext) { - self.operations.push(DslIr::ReduceE(ext)); + self.push_op(DslIr::ReduceE(ext)); } pub fn felt2var_circuit(&mut self, felt: Felt) -> Var { let var = self.uninit(); - self.operations.push(DslIr::CircuitFelt2Var(felt, var)); + self.push_op(DslIr::CircuitFelt2Var(felt, var)); var } pub fn cycle_tracker(&mut self, name: &str) { - self.operations.push(DslIr::CycleTracker(name.to_string())); + self.push_op(DslIr::CycleTracker(name.to_string())); } pub fn halt(&mut self) { - self.operations.push(DslIr::Halt); + self.push_op(DslIr::Halt); } } /// A builder for the DSL that handles if statements. +#[allow(dead_code)] pub struct IfBuilder<'a, C: Config> { lhs: SymbolicVar, rhs: SymbolicVar, @@ -490,6 +518,7 @@ pub struct IfBuilder<'a, C: Config> { } /// A set of conditions that if statements can be based on. +#[allow(dead_code)] enum IfCondition { EqConst(N, N), NeConst(N, N), @@ -506,7 +535,7 @@ impl<'a, C: Config> IfBuilder<'a, C> { // Execute the `then` block and collect the instructions. let mut f_builder = Builder::::new_sub_builder( - self.builder.variable_count, + self.builder.variable_count(), self.builder.nb_public_values, self.builder.p2_hash_num, self.builder.debug, @@ -515,35 +544,35 @@ impl<'a, C: Config> IfBuilder<'a, C> { f(&mut f_builder); self.builder.p2_hash_num = f_builder.p2_hash_num; - let then_instructions = f_builder.operations; + let then_instructions = f_builder.into_operations(); // Dispatch instructions to the correct conditional block. match condition { IfCondition::EqConst(lhs, rhs) => { if lhs == rhs { - self.builder.operations.extend(then_instructions); + self.builder.extend_ops(then_instructions); } } IfCondition::NeConst(lhs, rhs) => { if lhs != rhs { - self.builder.operations.extend(then_instructions); + self.builder.extend_ops(then_instructions); } } IfCondition::Eq(lhs, rhs) => { let op = DslIr::IfEq(Box::new((lhs, rhs, then_instructions, Default::default()))); - self.builder.operations.push(op); + self.builder.push_op(op); } IfCondition::EqI(lhs, rhs) => { let op = DslIr::IfEqI(Box::new((lhs, rhs, then_instructions, Default::default()))); - self.builder.operations.push(op); + self.builder.push_op(op); } IfCondition::Ne(lhs, rhs) => { let op = DslIr::IfNe(Box::new((lhs, rhs, then_instructions, Default::default()))); - self.builder.operations.push(op); + self.builder.push_op(op); } IfCondition::NeI(lhs, rhs) => { let op = DslIr::IfNeI(Box::new((lhs, rhs, then_instructions, Default::default()))); - self.builder.operations.push(op); + self.builder.push_op(op); } } } @@ -556,7 +585,7 @@ impl<'a, C: Config> IfBuilder<'a, C> { // Get the condition reduced from the expressions for lhs and rhs. let condition = self.condition(); let mut then_builder = Builder::::new_sub_builder( - self.builder.variable_count, + self.builder.variable_count(), self.builder.nb_public_values, self.builder.p2_hash_num, self.builder.debug, @@ -567,10 +596,10 @@ impl<'a, C: Config> IfBuilder<'a, C> { then_f(&mut then_builder); self.builder.p2_hash_num = then_builder.p2_hash_num; - let then_instructions = then_builder.operations; + let then_instructions = then_builder.into_operations(); let mut else_builder = Builder::::new_sub_builder( - self.builder.variable_count, + self.builder.variable_count(), self.builder.nb_public_values, self.builder.p2_hash_num, self.builder.debug, @@ -579,112 +608,113 @@ impl<'a, C: Config> IfBuilder<'a, C> { else_f(&mut else_builder); self.builder.p2_hash_num = else_builder.p2_hash_num; - let else_instructions = else_builder.operations; + let else_instructions = else_builder.into_operations(); // Dispatch instructions to the correct conditional block. match condition { IfCondition::EqConst(lhs, rhs) => { if lhs == rhs { - self.builder.operations.extend(then_instructions); + self.builder.extend_ops(then_instructions); } else { - self.builder.operations.extend(else_instructions); + self.builder.extend_ops(else_instructions); } } IfCondition::NeConst(lhs, rhs) => { if lhs != rhs { - self.builder.operations.extend(then_instructions); + self.builder.extend_ops(then_instructions); } else { - self.builder.operations.extend(else_instructions); + self.builder.extend_ops(else_instructions); } } IfCondition::Eq(lhs, rhs) => { let op = DslIr::IfEq(Box::new((lhs, rhs, then_instructions, else_instructions))); - self.builder.operations.push(op); + self.builder.push_op(op); } IfCondition::EqI(lhs, rhs) => { let op = DslIr::IfEqI(Box::new((lhs, rhs, then_instructions, else_instructions))); - self.builder.operations.push(op); + self.builder.push_op(op); } IfCondition::Ne(lhs, rhs) => { let op = DslIr::IfNe(Box::new((lhs, rhs, then_instructions, else_instructions))); - self.builder.operations.push(op); + self.builder.push_op(op); } IfCondition::NeI(lhs, rhs) => { let op = DslIr::IfNeI(Box::new((lhs, rhs, then_instructions, else_instructions))); - self.builder.operations.push(op); + self.builder.push_op(op); } } } fn condition(&mut self) -> IfCondition { - match (self.lhs.clone(), self.rhs.clone(), self.is_eq) { - (SymbolicVar::Const(lhs, _), SymbolicVar::Const(rhs, _), true) => { - IfCondition::EqConst(lhs, rhs) - } - (SymbolicVar::Const(lhs, _), SymbolicVar::Const(rhs, _), false) => { - IfCondition::NeConst(lhs, rhs) - } - (SymbolicVar::Const(lhs, _), SymbolicVar::Val(rhs, _), true) => { - IfCondition::EqI(rhs, lhs) - } - (SymbolicVar::Const(lhs, _), SymbolicVar::Val(rhs, _), false) => { - IfCondition::NeI(rhs, lhs) - } - (SymbolicVar::Const(lhs, _), rhs, true) => { - let rhs: Var = self.builder.eval(rhs); - IfCondition::EqI(rhs, lhs) - } - (SymbolicVar::Const(lhs, _), rhs, false) => { - let rhs: Var = self.builder.eval(rhs); - IfCondition::NeI(rhs, lhs) - } - (SymbolicVar::Val(lhs, _), SymbolicVar::Const(rhs, _), true) => { - let lhs: Var = self.builder.eval(lhs); - IfCondition::EqI(lhs, rhs) - } - (SymbolicVar::Val(lhs, _), SymbolicVar::Const(rhs, _), false) => { - let lhs: Var = self.builder.eval(lhs); - IfCondition::NeI(lhs, rhs) - } - (lhs, SymbolicVar::Const(rhs, _), true) => { - let lhs: Var = self.builder.eval(lhs); - IfCondition::EqI(lhs, rhs) - } - (lhs, SymbolicVar::Const(rhs, _), false) => { - let lhs: Var = self.builder.eval(lhs); - IfCondition::NeI(lhs, rhs) - } - (SymbolicVar::Val(lhs, _), SymbolicVar::Val(rhs, _), true) => IfCondition::Eq(lhs, rhs), - (SymbolicVar::Val(lhs, _), SymbolicVar::Val(rhs, _), false) => { - IfCondition::Ne(lhs, rhs) - } - (SymbolicVar::Val(lhs, _), rhs, true) => { - let rhs: Var = self.builder.eval(rhs); - IfCondition::Eq(lhs, rhs) - } - (SymbolicVar::Val(lhs, _), rhs, false) => { - let rhs: Var = self.builder.eval(rhs); - IfCondition::Ne(lhs, rhs) - } - (lhs, SymbolicVar::Val(rhs, _), true) => { - let lhs: Var = self.builder.eval(lhs); - IfCondition::Eq(lhs, rhs) - } - (lhs, SymbolicVar::Val(rhs, _), false) => { - let lhs: Var = self.builder.eval(lhs); - IfCondition::Ne(lhs, rhs) - } - (lhs, rhs, true) => { - let lhs: Var = self.builder.eval(lhs); - let rhs: Var = self.builder.eval(rhs); - IfCondition::Eq(lhs, rhs) - } - (lhs, rhs, false) => { - let lhs: Var = self.builder.eval(lhs); - let rhs: Var = self.builder.eval(rhs); - IfCondition::Ne(lhs, rhs) - } - } + unimplemented!("Deprecated") + // match (self.lhs.clone(), self.rhs.clone(), self.is_eq) { + // (SymbolicVar::Const(lhs, _), SymbolicVar::Const(rhs, _), true) => { + // IfCondition::EqConst(lhs, rhs) + // } + // (SymbolicVar::Const(lhs, _), SymbolicVar::Const(rhs, _), false) => { + // IfCondition::NeConst(lhs, rhs) + // } + // (SymbolicVar::Const(lhs, _), SymbolicVar::Val(rhs, _), true) => { + // IfCondition::EqI(rhs, lhs) + // } + // (SymbolicVar::Const(lhs, _), SymbolicVar::Val(rhs, _), false) => { + // IfCondition::NeI(rhs, lhs) + // } + // (SymbolicVar::Const(lhs, _), rhs, true) => { + // let rhs: Var = self.builder.eval(rhs); + // IfCondition::EqI(rhs, lhs) + // } + // (SymbolicVar::Const(lhs, _), rhs, false) => { + // let rhs: Var = self.builder.eval(rhs); + // IfCondition::NeI(rhs, lhs) + // } + // (SymbolicVar::Val(lhs, _), SymbolicVar::Const(rhs, _), true) => { + // let lhs: Var = self.builder.eval(lhs); + // IfCondition::EqI(lhs, rhs) + // } + // (SymbolicVar::Val(lhs, _), SymbolicVar::Const(rhs, _), false) => { + // let lhs: Var = self.builder.eval(lhs); + // IfCondition::NeI(lhs, rhs) + // } + // (lhs, SymbolicVar::Const(rhs, _), true) => { + // let lhs: Var = self.builder.eval(lhs); + // IfCondition::EqI(lhs, rhs) + // } + // (lhs, SymbolicVar::Const(rhs, _), false) => { + // let lhs: Var = self.builder.eval(lhs); + // IfCondition::NeI(lhs, rhs) + // } + // (SymbolicVar::Val(lhs, _), SymbolicVar::Val(rhs, _), true) => IfCondition::Eq(lhs, rhs), + // (SymbolicVar::Val(lhs, _), SymbolicVar::Val(rhs, _), false) => { + // IfCondition::Ne(lhs, rhs) + // } + // (SymbolicVar::Val(lhs, _), rhs, true) => { + // let rhs: Var = self.builder.eval(rhs); + // IfCondition::Eq(lhs, rhs) + // } + // (SymbolicVar::Val(lhs, _), rhs, false) => { + // let rhs: Var = self.builder.eval(rhs); + // IfCondition::Ne(lhs, rhs) + // } + // (lhs, SymbolicVar::Val(rhs, _), true) => { + // let lhs: Var = self.builder.eval(lhs); + // IfCondition::Eq(lhs, rhs) + // } + // (lhs, SymbolicVar::Val(rhs, _), false) => { + // let lhs: Var = self.builder.eval(lhs); + // IfCondition::Ne(lhs, rhs) + // } + // (lhs, rhs, true) => { + // let lhs: Var = self.builder.eval(lhs); + // let rhs: Var = self.builder.eval(rhs); + // IfCondition::Eq(lhs, rhs) + // } + // (lhs, rhs, false) => { + // let lhs: Var = self.builder.eval(lhs); + // let rhs: Var = self.builder.eval(rhs); + // IfCondition::Ne(lhs, rhs) + // } + // } } } @@ -706,7 +736,7 @@ impl<'a, C: Config> RangeBuilder<'a, C> { let step_size = C::N::from_canonical_usize(self.step_size); let loop_variable: Var = self.builder.uninit(); let mut loop_body_builder = Builder::::new_sub_builder( - self.builder.variable_count, + self.builder.variable_count(), self.builder.nb_public_values, self.builder.p2_hash_num, self.builder.debug, @@ -716,7 +746,7 @@ impl<'a, C: Config> RangeBuilder<'a, C> { f(loop_variable, &mut loop_body_builder); self.builder.p2_hash_num = loop_body_builder.p2_hash_num; - let loop_instructions = loop_body_builder.operations; + let loop_instructions = loop_body_builder.into_operations(); let op = DslIr::For(Box::new(( self.start, @@ -725,6 +755,6 @@ impl<'a, C: Config> RangeBuilder<'a, C> { loop_variable, loop_instructions, ))); - self.builder.operations.push(op); + self.builder.push_op(op); } } diff --git a/crates/recursion/compiler/src/ir/mod.rs b/crates/recursion/compiler/src/ir/mod.rs index c0098c7932..060362ddb0 100644 --- a/crates/recursion/compiler/src/ir/mod.rs +++ b/crates/recursion/compiler/src/ir/mod.rs @@ -1,5 +1,6 @@ -use p3_field::{ExtensionField, PrimeField, TwoAdicField}; +use p3_field::{ExtensionField, PrimeField, PrimeField32, TwoAdicField}; +mod arithmetic; mod bits; mod builder; mod collections; @@ -12,6 +13,7 @@ mod types; mod utils; mod var; +pub use arithmetic::*; pub use builder::*; pub use collections::*; pub use fold::*; @@ -23,6 +25,6 @@ pub use var::*; pub trait Config: Clone + Default { type N: PrimeField; - type F: PrimeField + TwoAdicField; + type F: PrimeField32 + TwoAdicField; type EF: ExtensionField + TwoAdicField; } diff --git a/crates/recursion/compiler/src/ir/poseidon.rs b/crates/recursion/compiler/src/ir/poseidon.rs index 4ef469e5fb..b06360eb6f 100644 --- a/crates/recursion/compiler/src/ir/poseidon.rs +++ b/crates/recursion/compiler/src/ir/poseidon.rs @@ -15,8 +15,7 @@ impl Builder { } Array::Dyn(_, len) => self.array::>(*len), }; - self.operations - .push(DslIr::Poseidon2PermuteBabyBear(Box::new((output.clone(), array.clone())))); + self.push_op(DslIr::Poseidon2PermuteBabyBear(Box::new((output.clone(), array.clone())))); output } @@ -24,8 +23,7 @@ impl Builder { /// /// Reference: [p3_poseidon2::Poseidon2] pub fn poseidon2_permute_mut(&mut self, array: &Array>) { - self.operations - .push(DslIr::Poseidon2PermuteBabyBear(Box::new((array.clone(), array.clone())))); + self.push_op(DslIr::Poseidon2PermuteBabyBear(Box::new((array.clone(), array.clone())))); } /// Applies the Poseidon2 absorb function to the given array. @@ -36,7 +34,7 @@ impl Builder { p2_hash_and_absorb_num: Var, input: &Array>, ) { - self.operations.push(DslIr::Poseidon2AbsorbBabyBear(p2_hash_and_absorb_num, input.clone())); + self.push_op(DslIr::Poseidon2AbsorbBabyBear(p2_hash_and_absorb_num, input.clone())); } /// Applies the Poseidon2 finalize to the given hash number. @@ -47,7 +45,7 @@ impl Builder { p2_hash_num: Var, output: &Array>, ) { - self.operations.push(DslIr::Poseidon2FinalizeBabyBear(p2_hash_num, output.clone())); + self.push_op(DslIr::Poseidon2FinalizeBabyBear(p2_hash_num, output.clone())); } /// Applies the Poseidon2 compression function to the given array. @@ -78,7 +76,7 @@ impl Builder { left: &Array>, right: &Array>, ) { - self.operations.push(DslIr::Poseidon2CompressBabyBear(Box::new(( + self.push_op(DslIr::Poseidon2CompressBabyBear(Box::new(( result.clone(), left.clone(), right.clone(), diff --git a/crates/recursion/compiler/src/ir/ptr.rs b/crates/recursion/compiler/src/ir/ptr.rs index daf658d85f..ed62bcfacc 100644 --- a/crates/recursion/compiler/src/ir/ptr.rs +++ b/crates/recursion/compiler/src/ir/ptr.rs @@ -18,7 +18,7 @@ impl Builder { /// Allocates an array on the heap. pub(crate) fn alloc(&mut self, len: Usize, size: usize) -> Ptr { let ptr = Ptr::uninit(self); - self.push(DslIr::Alloc(ptr, len, size)); + self.push_op(DslIr::Alloc(ptr, len, size)); ptr } diff --git a/crates/recursion/compiler/src/ir/symbolic.rs b/crates/recursion/compiler/src/ir/symbolic.rs index 1ff565c7b9..ea9955b0f5 100644 --- a/crates/recursion/compiler/src/ir/symbolic.rs +++ b/crates/recursion/compiler/src/ir/symbolic.rs @@ -1,180 +1,40 @@ -use alloc::rc::Rc; use core::{ any::Any, ops::{Add, Div, Mul, Neg, Sub}, }; use std::{ any::TypeId, - hash::Hash, iter::{Product, Sum}, - mem, + mem::{self, ManuallyDrop}, ops::{AddAssign, DivAssign, MulAssign, SubAssign}, }; -use p3_field::{AbstractField, ExtensionField, Field, FieldArray}; +use p3_field::{AbstractField, ExtensionField, Field}; -use super::{Ext, Felt, Usize, Var}; - -const NUM_RANDOM_ELEMENTS: usize = 4; - -pub type Digest = FieldArray; - -pub fn elements() -> Digest { - let powers = [1671541671, 1254988180, 442438744, 1716490559]; - let generator = F::generator(); - - Digest::from(powers.map(|p| generator.exp_u64(p))) -} - -pub fn ext_elements>() -> Digest { - let powers = [1021539871, 1430550064, 447478069, 1248903325]; - let generator = EF::generator(); - - Digest::from(powers.map(|p| generator.exp_u64(p))) -} - -fn digest_id(id: u32) -> Digest { - let elements = elements(); - Digest::from( - elements.0.map(|e: F| (e + F::from_canonical_u32(id)).try_inverse().unwrap_or(F::one())), - ) -} - -fn digest_id_ext>(id: u32) -> Digest { - let elements = ext_elements(); - Digest::from( - elements.0.map(|e: EF| (e + EF::from_canonical_u32(id)).try_inverse().unwrap_or(EF::one())), - ) -} +use crate::ir::ExtHandle; -fn div_digests(a: Digest, b: Digest) -> Digest { - Digest::from(core::array::from_fn(|i| a.0[i] / b.0[i])) -} +use super::{Ext, Felt, Usize, Var}; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Copy)] pub enum SymbolicVar { - Const(N, Digest), - Val(Var, Digest), - Add(Rc>, Rc>, Digest), - Mul(Rc>, Rc>, Digest), - Sub(Rc>, Rc>, Digest), - Neg(Rc>, Digest), + Const(N), + Val(Var), } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Copy)] pub enum SymbolicFelt { - Const(F, Digest), - Val(Felt, Digest), - Add(Rc>, Rc>, Digest), - Mul(Rc>, Rc>, Digest), - Sub(Rc>, Rc>, Digest), - Div(Rc>, Rc>, Digest), - Neg(Rc>, Digest), + Const(F), + Val(Felt), } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Copy)] pub enum SymbolicExt { - Const(EF, Digest), - Base(Rc>, Digest), - Val(Ext, Digest), - Add(Rc>, Rc>, Digest), - Mul(Rc>, Rc>, Digest), - Sub(Rc>, Rc>, Digest), - Div(Rc>, Rc>, Digest), - Neg(Rc>, Digest), -} - -impl Hash for SymbolicVar { - fn hash(&self, state: &mut H) { - for elem in self.digest().0.iter() { - elem.hash(state); - } - } -} - -impl PartialEq for SymbolicVar { - fn eq(&self, other: &Self) -> bool { - self.digest() == other.digest() - } -} - -impl Eq for SymbolicVar {} - -impl Hash for SymbolicFelt { - fn hash(&self, state: &mut H) { - for elem in self.digest().0.iter() { - elem.hash(state); - } - } -} - -impl PartialEq for SymbolicFelt { - fn eq(&self, other: &Self) -> bool { - self.digest() == other.digest() - } -} - -impl Eq for SymbolicFelt {} - -impl Hash for SymbolicExt { - fn hash(&self, state: &mut H) { - for elem in self.digest().0.iter() { - elem.hash(state); - } - } -} - -impl PartialEq for SymbolicExt { - fn eq(&self, other: &Self) -> bool { - self.digest() == other.digest() - } -} - -impl Eq for SymbolicExt {} - -impl SymbolicVar { - pub(crate) const fn digest(&self) -> Digest { - match self { - SymbolicVar::Const(_, d) => *d, - SymbolicVar::Val(_, d) => *d, - SymbolicVar::Add(_, _, d) => *d, - SymbolicVar::Mul(_, _, d) => *d, - SymbolicVar::Sub(_, _, d) => *d, - SymbolicVar::Neg(_, d) => *d, - } - } -} - -impl SymbolicFelt { - pub(crate) const fn digest(&self) -> Digest { - match self { - SymbolicFelt::Const(_, d) => *d, - SymbolicFelt::Val(_, d) => *d, - SymbolicFelt::Add(_, _, d) => *d, - SymbolicFelt::Mul(_, _, d) => *d, - SymbolicFelt::Sub(_, _, d) => *d, - SymbolicFelt::Div(_, _, d) => *d, - SymbolicFelt::Neg(_, d) => *d, - } - } -} - -impl SymbolicExt { - pub(crate) const fn digest(&self) -> Digest { - match self { - SymbolicExt::Const(_, d) => *d, - SymbolicExt::Base(_, d) => *d, - SymbolicExt::Val(_, d) => *d, - SymbolicExt::Add(_, _, d) => *d, - SymbolicExt::Mul(_, _, d) => *d, - SymbolicExt::Sub(_, _, d) => *d, - SymbolicExt::Div(_, _, d) => *d, - SymbolicExt::Neg(_, d) => *d, - } - } + Const(EF), + Base(SymbolicFelt), + Val(Ext), } -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, Copy)] pub enum SymbolicUsize { Const(usize), Var(SymbolicVar), @@ -191,25 +51,13 @@ pub enum ExtOperand> { } impl> ExtOperand { - pub fn digest(&self) -> Digest { - match self { - ExtOperand::Base(f) => SymbolicFelt::from(*f).digest().0.map(EF::from_base).into(), - ExtOperand::Const(ef) => (*ef).into(), - ExtOperand::Felt(f) => SymbolicFelt::from(*f).digest().0.map(EF::from_base).into(), - ExtOperand::Ext(e) => digest_id_ext::(e.0), - ExtOperand::SymFelt(f) => f.digest().0.map(EF::from_base).into(), - ExtOperand::Sym(e) => e.digest(), - } - } - pub fn symbolic(self) -> SymbolicExt { - let digest = self.digest(); match self { - ExtOperand::Base(f) => SymbolicExt::Base(Rc::new(SymbolicFelt::from(f)), digest), - ExtOperand::Const(ef) => SymbolicExt::Const(ef, digest), - ExtOperand::Felt(f) => SymbolicExt::Base(Rc::new(SymbolicFelt::from(f)), digest), - ExtOperand::Ext(e) => SymbolicExt::Val(e, digest), - ExtOperand::SymFelt(f) => SymbolicExt::Base(Rc::new(f), digest), + ExtOperand::Base(f) => SymbolicExt::Base(SymbolicFelt::from(f)), + ExtOperand::Const(ef) => SymbolicExt::Const(ef), + ExtOperand::Felt(f) => SymbolicExt::Base(SymbolicFelt::from(f)), + ExtOperand::Ext(e) => SymbolicExt::Val(e), + ExtOperand::SymFelt(f) => SymbolicExt::Base(f), ExtOperand::Sym(e) => e, } } @@ -221,7 +69,7 @@ pub trait ExtConst> { impl> ExtConst for EF { fn cons(self) -> SymbolicExt { - SymbolicExt::Const(self, self.into()) + SymbolicExt::Const(self) } } @@ -357,7 +205,7 @@ impl> AbstractField for SymbolicExt { } fn from_f(f: Self::F) -> Self { - SymbolicExt::Const(f, f.into()) + SymbolicExt::Const(f) } fn from_bool(b: bool) -> Self { SymbolicExt::from_f(EF::from_bool(b)) @@ -395,13 +243,13 @@ impl> AbstractField for SymbolicExt { impl From for SymbolicVar { fn from(n: N) -> Self { - SymbolicVar::Const(n, n.into()) + SymbolicVar::Const(n) } } impl From for SymbolicFelt { fn from(f: F) -> Self { - SymbolicFelt::Const(f, f.into()) + SymbolicFelt::Const(f) } } @@ -415,13 +263,13 @@ impl> From for SymbolicExt { impl From> for SymbolicVar { fn from(v: Var) -> Self { - SymbolicVar::Val(v, digest_id(v.0)) + SymbolicVar::Val(v) } } impl From> for SymbolicFelt { fn from(f: Felt) -> Self { - SymbolicFelt::Val(f, digest_id(f.0)) + SymbolicFelt::Val(f) } } @@ -437,8 +285,21 @@ impl Add for SymbolicVar { type Output = Self; fn add(self, rhs: Self) -> Self::Output { - let digest = self.digest() + rhs.digest(); - SymbolicVar::Add(Rc::new(self), Rc::new(rhs), digest) + match (self, rhs) { + (Self::Const(lhs), Self::Const(rhs)) => Self::Const(lhs + rhs), + (Self::Val(lhs), Self::Const(rhs)) => { + let res = unsafe { (*lhs.handle).add_const_v(lhs, rhs) }; + Self::Val(res) + } + (Self::Const(lhs), Self::Val(rhs)) => { + let res = unsafe { (*rhs.handle).add_v_const(lhs, rhs) }; + Self::Val(res) + } + (Self::Val(lhs), Self::Val(rhs)) => { + let res = unsafe { (*lhs.handle).add_v(lhs, rhs) }; + Self::Val(res) + } + } } } @@ -446,8 +307,21 @@ impl Add for SymbolicFelt { type Output = Self; fn add(self, rhs: Self) -> Self::Output { - let digest = self.digest() + rhs.digest(); - SymbolicFelt::Add(Rc::new(self), Rc::new(rhs), digest) + match (self, rhs) { + (Self::Const(lhs), Self::Const(rhs)) => Self::Const(lhs + rhs), + (Self::Val(lhs), Self::Const(rhs)) => { + let res = unsafe { (*lhs.handle).add_const_f(lhs, rhs) }; + Self::Val(res) + } + (Self::Const(lhs), Self::Val(rhs)) => { + let res = unsafe { (*rhs.handle).add_f_const(lhs, rhs) }; + Self::Val(res) + } + (Self::Val(lhs), Self::Val(rhs)) => { + let res = unsafe { (*lhs.handle).add_f(lhs, rhs) }; + Self::Val(res) + } + } } } @@ -456,8 +330,66 @@ impl, E: ExtensionOperand> Add for Sym fn add(self, rhs: E) -> Self::Output { let rhs = rhs.to_operand().symbolic(); - let digest = self.digest() + rhs.digest(); - SymbolicExt::Add(Rc::new(self), Rc::new(rhs), digest) + + match (self, rhs) { + (Self::Const(lhs), Self::Const(rhs)) => Self::Const(lhs + rhs), + (Self::Val(lhs), Self::Const(rhs)) => { + let res = unsafe { (*lhs.handle).add_const_e(lhs, rhs) }; + Self::Val(res) + } + (Self::Const(lhs), Self::Val(rhs)) => { + let res = unsafe { (*rhs.handle).add_e_const(lhs, rhs) }; + Self::Val(res) + } + (Self::Const(lhs), Self::Base(rhs)) => match rhs { + SymbolicFelt::Const(rhs) => Self::Const(lhs + rhs), + SymbolicFelt::Val(rhs) => { + let ext_handle_ptr = + unsafe { (*rhs.handle).ext_handle_ptr as *mut ExtHandle }; + let ext_handle: ManuallyDrop<_> = + unsafe { ManuallyDrop::new(Box::from_raw(ext_handle_ptr)) }; + let res = ext_handle.add_const_e_f(lhs, rhs, ext_handle_ptr); + Self::Val(res) + } + }, + (Self::Base(lhs), Self::Const(rhs)) => match lhs { + SymbolicFelt::Const(lhs) => Self::Const(rhs + lhs), + SymbolicFelt::Val(lhs) => { + let ext_handle_ptr = + unsafe { (*lhs.handle).ext_handle_ptr as *mut ExtHandle }; + let ext_handle: ManuallyDrop<_> = + unsafe { ManuallyDrop::new(Box::from_raw(ext_handle_ptr)) }; + let res = ext_handle.add_f_const_e(lhs, rhs, ext_handle_ptr); + Self::Val(res) + } + }, + + (Self::Val(lhs), Self::Val(rhs)) => { + let res = unsafe { (*lhs.handle).add_e(lhs, rhs) }; + Self::Val(res) + } + (Self::Base(lhs), Self::Base(rhs)) => Self::Base(lhs + rhs), + (Self::Base(lhs), Self::Val(rhs)) => match lhs { + SymbolicFelt::Const(lhs) => { + let res = unsafe { (*rhs.handle).add_e_const(EF::from_base(lhs), rhs) }; + Self::Val(res) + } + SymbolicFelt::Val(lhs) => { + let res = unsafe { (*rhs.handle).add_f_e(lhs, rhs) }; + Self::Val(res) + } + }, + (Self::Val(lhs), Self::Base(rhs)) => match rhs { + SymbolicFelt::Const(rhs) => { + let res = unsafe { (*lhs.handle).add_const_e(lhs, EF::from_base(rhs)) }; + Self::Val(res) + } + SymbolicFelt::Val(rhs) => { + let res = unsafe { (*lhs.handle).add_e_f(lhs, rhs) }; + Self::Val(res) + } + }, + } } } @@ -465,8 +397,21 @@ impl Mul for SymbolicVar { type Output = Self; fn mul(self, rhs: Self) -> Self::Output { - let digest = self.digest() * rhs.digest(); - SymbolicVar::Mul(Rc::new(self), Rc::new(rhs), digest) + match (self, rhs) { + (Self::Const(lhs), Self::Const(rhs)) => Self::Const(lhs * rhs), + (Self::Val(lhs), Self::Const(rhs)) => { + let res = unsafe { (*lhs.handle).mul_const_v(lhs, rhs) }; + Self::Val(res) + } + (Self::Const(lhs), Self::Val(rhs)) => { + let res = unsafe { (*rhs.handle).mul_v_const(lhs, rhs) }; + Self::Val(res) + } + (Self::Val(lhs), Self::Val(rhs)) => { + let res = unsafe { (*lhs.handle).mul_v(lhs, rhs) }; + Self::Val(res) + } + } } } @@ -474,8 +419,21 @@ impl Mul for SymbolicFelt { type Output = Self; fn mul(self, rhs: Self) -> Self::Output { - let digest = self.digest() * rhs.digest(); - SymbolicFelt::Mul(Rc::new(self), Rc::new(rhs), digest) + match (self, rhs) { + (Self::Const(lhs), Self::Const(rhs)) => Self::Const(lhs * rhs), + (Self::Val(lhs), Self::Const(rhs)) => { + let res = unsafe { (*lhs.handle).mul_const_f(lhs, rhs) }; + Self::Val(res) + } + (Self::Const(lhs), Self::Val(rhs)) => { + let res = unsafe { (*rhs.handle).mul_f_const(lhs, rhs) }; + Self::Val(res) + } + (Self::Val(lhs), Self::Val(rhs)) => { + let res = unsafe { (*lhs.handle).mul_f(lhs, rhs) }; + Self::Val(res) + } + } } } @@ -483,36 +441,66 @@ impl, E: Any> Mul for SymbolicExt { type Output = Self; fn mul(self, rhs: E) -> Self::Output { - let rhs = rhs.to_operand(); - let rhs_digest = rhs.digest(); - let prod_digest = self.digest() * rhs_digest; - match rhs { - ExtOperand::Base(f) => SymbolicExt::Mul( - Rc::new(self), - Rc::new(SymbolicExt::Base(Rc::new(SymbolicFelt::from(f)), rhs_digest)), - prod_digest, - ), - ExtOperand::Const(ef) => SymbolicExt::Mul( - Rc::new(self), - Rc::new(SymbolicExt::Const(ef, rhs_digest)), - prod_digest, - ), - ExtOperand::Felt(f) => SymbolicExt::Mul( - Rc::new(self), - Rc::new(SymbolicExt::Base(Rc::new(SymbolicFelt::from(f)), rhs_digest)), - prod_digest, - ), - ExtOperand::Ext(e) => SymbolicExt::Mul( - Rc::new(self), - Rc::new(SymbolicExt::Val(e, rhs_digest)), - prod_digest, - ), - ExtOperand::SymFelt(f) => SymbolicExt::Mul( - Rc::new(self), - Rc::new(SymbolicExt::Base(Rc::new(f), rhs_digest)), - prod_digest, - ), - ExtOperand::Sym(e) => SymbolicExt::Mul(Rc::new(self), Rc::new(e), prod_digest), + let rhs = rhs.to_operand().symbolic(); + + match (self, rhs) { + (Self::Const(lhs), Self::Const(rhs)) => Self::Const(lhs * rhs), + (Self::Val(lhs), Self::Const(rhs)) => { + let res = unsafe { (*lhs.handle).mul_const_e(lhs, rhs) }; + Self::Val(res) + } + (Self::Const(lhs), Self::Val(rhs)) => { + let res = unsafe { (*rhs.handle).mul_e_const(lhs, rhs) }; + Self::Val(res) + } + (Self::Const(lhs), Self::Base(rhs)) => match rhs { + SymbolicFelt::Const(rhs) => Self::Const(lhs * rhs), + SymbolicFelt::Val(rhs) => { + let ext_handle_ptr = + unsafe { (*rhs.handle).ext_handle_ptr as *mut ExtHandle }; + let ext_handle: ManuallyDrop<_> = + unsafe { ManuallyDrop::new(Box::from_raw(ext_handle_ptr)) }; + let res = ext_handle.mul_const_e_f(lhs, rhs, ext_handle_ptr); + Self::Val(res) + } + }, + (Self::Base(lhs), Self::Const(rhs)) => match lhs { + SymbolicFelt::Const(lhs) => Self::Const(EF::from_base(lhs) * rhs), + SymbolicFelt::Val(lhs) => { + let ext_handle_ptr = + unsafe { (*lhs.handle).ext_handle_ptr as *mut ExtHandle }; + let ext_handle: ManuallyDrop<_> = + unsafe { ManuallyDrop::new(Box::from_raw(ext_handle_ptr)) }; + let res = ext_handle.mul_f_const_e(lhs, rhs, ext_handle_ptr); + Self::Val(res) + } + }, + + (Self::Val(lhs), Self::Val(rhs)) => { + let res = unsafe { (*lhs.handle).mul_e(lhs, rhs) }; + Self::Val(res) + } + (Self::Base(lhs), Self::Base(rhs)) => Self::Base(lhs * rhs), + (Self::Base(lhs), Self::Val(rhs)) => match lhs { + SymbolicFelt::Const(lhs) => { + let res = unsafe { (*rhs.handle).mul_e_const(EF::from_base(lhs), rhs) }; + Self::Val(res) + } + SymbolicFelt::Val(lhs) => { + let res = unsafe { (*rhs.handle).mul_f_e(lhs, rhs) }; + Self::Val(res) + } + }, + (Self::Val(lhs), Self::Base(rhs)) => match rhs { + SymbolicFelt::Const(rhs) => { + let res = unsafe { (*lhs.handle).mul_const_e(lhs, EF::from_base(rhs)) }; + Self::Val(res) + } + SymbolicFelt::Val(rhs) => { + let res = unsafe { (*lhs.handle).mul_e_f(lhs, rhs) }; + Self::Val(res) + } + }, } } } @@ -521,8 +509,21 @@ impl Sub for SymbolicVar { type Output = Self; fn sub(self, rhs: Self) -> Self::Output { - let digest = self.digest() - rhs.digest(); - SymbolicVar::Sub(Rc::new(self), Rc::new(rhs), digest) + match (self, rhs) { + (Self::Const(lhs), Self::Const(rhs)) => Self::Const(lhs - rhs), + (Self::Val(lhs), Self::Const(rhs)) => { + let res = unsafe { (*lhs.handle).sub_v_const(lhs, rhs) }; + Self::Val(res) + } + (Self::Const(lhs), Self::Val(rhs)) => { + let res = unsafe { (*rhs.handle).sub_const_v(lhs, rhs) }; + Self::Val(res) + } + (Self::Val(lhs), Self::Val(rhs)) => { + let res = unsafe { (*lhs.handle).sub_v(lhs, rhs) }; + Self::Val(res) + } + } } } @@ -530,8 +531,21 @@ impl Sub for SymbolicFelt { type Output = Self; fn sub(self, rhs: Self) -> Self::Output { - let digest = self.digest() - rhs.digest(); - SymbolicFelt::Sub(Rc::new(self), Rc::new(rhs), digest) + match (self, rhs) { + (Self::Const(lhs), Self::Const(rhs)) => Self::Const(lhs - rhs), + (Self::Val(lhs), Self::Const(rhs)) => { + let res = unsafe { (*lhs.handle).sub_f_const(lhs, rhs) }; + Self::Val(res) + } + (Self::Const(lhs), Self::Val(rhs)) => { + let res = unsafe { (*rhs.handle).sub_const_f(lhs, rhs) }; + Self::Val(res) + } + (Self::Val(lhs), Self::Val(rhs)) => { + let res = unsafe { (*lhs.handle).sub_f(lhs, rhs) }; + Self::Val(res) + } + } } } @@ -539,32 +553,66 @@ impl, E: Any> Sub for SymbolicExt { type Output = Self; fn sub(self, rhs: E) -> Self::Output { - let rhs = rhs.to_operand(); - let rhs_digest = rhs.digest(); - let digest = self.digest() - rhs_digest; - match rhs { - ExtOperand::Base(f) => SymbolicExt::Sub( - Rc::new(self), - Rc::new(SymbolicExt::Base(Rc::new(SymbolicFelt::from(f)), rhs_digest)), - digest, - ), - ExtOperand::Const(ef) => { - SymbolicExt::Sub(Rc::new(self), Rc::new(SymbolicExt::Const(ef, rhs_digest)), digest) + let rhs = rhs.to_operand().symbolic(); + + match (self, rhs) { + (Self::Const(lhs), Self::Const(rhs)) => Self::Const(lhs - rhs), + (Self::Val(lhs), Self::Const(rhs)) => { + let res = unsafe { (*lhs.handle).sub_const_e(lhs, rhs) }; + Self::Val(res) + } + (Self::Const(lhs), Self::Val(rhs)) => { + let res = unsafe { (*rhs.handle).sub_e_const(lhs, rhs) }; + Self::Val(res) } - ExtOperand::Felt(f) => SymbolicExt::Sub( - Rc::new(self), - Rc::new(SymbolicExt::Base(Rc::new(SymbolicFelt::from(f)), rhs_digest)), - digest, - ), - ExtOperand::Ext(e) => { - SymbolicExt::Sub(Rc::new(self), Rc::new(SymbolicExt::Val(e, rhs_digest)), digest) + (Self::Const(lhs), Self::Base(rhs)) => match rhs { + SymbolicFelt::Const(rhs) => Self::Const(lhs - rhs), + SymbolicFelt::Val(rhs) => { + let ext_handle_ptr = + unsafe { (*rhs.handle).ext_handle_ptr as *mut ExtHandle }; + let ext_handle: ManuallyDrop<_> = + unsafe { ManuallyDrop::new(Box::from_raw(ext_handle_ptr)) }; + let res = ext_handle.sub_const_e_f(lhs, rhs, ext_handle_ptr); + Self::Val(res) + } + }, + (Self::Base(lhs), Self::Const(rhs)) => match lhs { + SymbolicFelt::Const(lhs) => Self::Const(EF::from_base(lhs) - rhs), + SymbolicFelt::Val(lhs) => { + let ext_handle_ptr = + unsafe { (*lhs.handle).ext_handle_ptr as *mut ExtHandle }; + let ext_handle: ManuallyDrop<_> = + unsafe { ManuallyDrop::new(Box::from_raw(ext_handle_ptr)) }; + let res = ext_handle.sub_f_const_e(lhs, rhs, ext_handle_ptr); + Self::Val(res) + } + }, + + (Self::Val(lhs), Self::Val(rhs)) => { + let res = unsafe { (*lhs.handle).sub_e(lhs, rhs) }; + Self::Val(res) } - ExtOperand::SymFelt(f) => SymbolicExt::Sub( - Rc::new(self), - Rc::new(SymbolicExt::Base(Rc::new(f), rhs_digest)), - digest, - ), - ExtOperand::Sym(e) => SymbolicExt::Sub(Rc::new(self), Rc::new(e), digest), + (Self::Base(lhs), Self::Base(rhs)) => Self::Base(lhs - rhs), + (Self::Base(lhs), Self::Val(rhs)) => match lhs { + SymbolicFelt::Const(lhs) => { + let res = unsafe { (*rhs.handle).sub_e_const(EF::from_base(lhs), rhs) }; + Self::Val(res) + } + SymbolicFelt::Val(lhs) => { + let res = unsafe { (*rhs.handle).sub_f_e(lhs, rhs) }; + Self::Val(res) + } + }, + (Self::Val(lhs), Self::Base(rhs)) => match rhs { + SymbolicFelt::Const(rhs) => { + let res = unsafe { (*lhs.handle).sub_const_e(lhs, EF::from_base(rhs)) }; + Self::Val(res) + } + SymbolicFelt::Val(rhs) => { + let res = unsafe { (*lhs.handle).sub_e_f(lhs, rhs) }; + Self::Val(res) + } + }, } } } @@ -573,10 +621,21 @@ impl Div for SymbolicFelt { type Output = Self; fn div(self, rhs: Self) -> Self::Output { - let self_digest = self.digest(); - let rhs_digest = rhs.digest(); - let digest = div_digests(self_digest, rhs_digest); - SymbolicFelt::Div(Rc::new(self), Rc::new(rhs), digest) + match (self, rhs) { + (Self::Const(lhs), Self::Const(rhs)) => Self::Const(lhs / rhs), + (Self::Val(lhs), Self::Const(rhs)) => { + let res = unsafe { (*lhs.handle).div_f_const(lhs, rhs) }; + Self::Val(res) + } + (Self::Const(lhs), Self::Val(rhs)) => { + let res = unsafe { (*rhs.handle).div_const_f(lhs, rhs) }; + Self::Val(res) + } + (Self::Val(lhs), Self::Val(rhs)) => { + let res = unsafe { (*lhs.handle).div_f(lhs, rhs) }; + Self::Val(res) + } + } } } @@ -584,32 +643,71 @@ impl, E: Any> Div for SymbolicExt { type Output = Self; fn div(self, rhs: E) -> Self::Output { - let rhs = rhs.to_operand(); - let rhs_digest = rhs.digest(); - let digest = div_digests(self.digest(), rhs_digest); - match rhs { - ExtOperand::Base(f) => SymbolicExt::Div( - Rc::new(self), - Rc::new(SymbolicExt::Base(Rc::new(SymbolicFelt::from(f)), rhs_digest)), - digest, - ), - ExtOperand::Const(ef) => { - SymbolicExt::Div(Rc::new(self), Rc::new(SymbolicExt::Const(ef, rhs_digest)), digest) + let rhs = rhs.to_operand().symbolic(); + + match (self, rhs) { + (Self::Const(lhs), Self::Const(rhs)) => Self::Const(lhs / rhs), + (Self::Val(lhs), Self::Const(rhs)) => { + let res = unsafe { (*lhs.handle).div_const_e(lhs, rhs) }; + Self::Val(res) + } + (Self::Const(lhs), Self::Val(rhs)) => { + let res = unsafe { (*rhs.handle).div_e_const(lhs, rhs) }; + Self::Val(res) } - ExtOperand::Felt(f) => SymbolicExt::Div( - Rc::new(self), - Rc::new(SymbolicExt::Base(Rc::new(SymbolicFelt::from(f)), rhs_digest)), - digest, - ), - ExtOperand::Ext(e) => { - SymbolicExt::Div(Rc::new(self), Rc::new(SymbolicExt::Val(e, rhs_digest)), digest) + (Self::Const(lhs), Self::Base(rhs)) => match rhs { + SymbolicFelt::Const(rhs) => Self::Const(lhs / EF::from_base(rhs)), + SymbolicFelt::Val(rhs) => { + let ext_handle_ptr = + unsafe { (*rhs.handle).ext_handle_ptr as *mut ExtHandle }; + let ext_handle: ManuallyDrop<_> = + unsafe { ManuallyDrop::new(Box::from_raw(ext_handle_ptr)) }; + let rhs = rhs.inverse(); + if let SymbolicFelt::Val(rhs) = rhs { + let res = ext_handle.mul_const_e_f(lhs, rhs, ext_handle_ptr); + Self::Val(res) + } else { + unreachable!() + } + } + }, + (Self::Base(lhs), Self::Const(rhs)) => match lhs { + SymbolicFelt::Const(lhs) => Self::Const(EF::from_base(lhs) / rhs), + SymbolicFelt::Val(lhs) => { + let ext_handle_ptr = + unsafe { (*lhs.handle).ext_handle_ptr as *mut ExtHandle }; + let ext_handle: ManuallyDrop<_> = + unsafe { ManuallyDrop::new(Box::from_raw(ext_handle_ptr)) }; + let res = ext_handle.div_f_const_e(lhs, rhs, ext_handle_ptr); + Self::Val(res) + } + }, + + (Self::Val(lhs), Self::Val(rhs)) => { + let res = unsafe { (*lhs.handle).div_e(lhs, rhs) }; + Self::Val(res) } - ExtOperand::SymFelt(f) => SymbolicExt::Div( - Rc::new(self), - Rc::new(SymbolicExt::Base(Rc::new(f), rhs_digest)), - digest, - ), - ExtOperand::Sym(e) => SymbolicExt::Div(Rc::new(self), Rc::new(e), digest), + (Self::Base(lhs), Self::Base(rhs)) => Self::Base(lhs / rhs), + (Self::Base(lhs), Self::Val(rhs)) => match lhs { + SymbolicFelt::Const(lhs) => { + let res = unsafe { (*rhs.handle).div_e_const(EF::from_base(lhs), rhs) }; + Self::Val(res) + } + SymbolicFelt::Val(lhs) => { + let res = unsafe { (*rhs.handle).div_f_e(lhs, rhs) }; + Self::Val(res) + } + }, + (Self::Val(lhs), Self::Base(rhs)) => match rhs { + SymbolicFelt::Const(rhs) => { + let res = unsafe { (*lhs.handle).div_const_e(lhs, EF::from_base(rhs)) }; + Self::Val(res) + } + SymbolicFelt::Val(rhs) => { + let res = unsafe { (*lhs.handle).div_e_f(lhs, rhs) }; + Self::Val(res) + } + }, } } } @@ -618,8 +716,13 @@ impl Neg for SymbolicVar { type Output = Self; fn neg(self) -> Self::Output { - let digest = -self.digest(); - SymbolicVar::Neg(Rc::new(self), digest) + match self { + SymbolicVar::Const(n) => SymbolicVar::Const(-n), + SymbolicVar::Val(n) => { + let res = unsafe { (*n.handle).neg_v(n) }; + SymbolicVar::Val(res) + } + } } } @@ -627,8 +730,13 @@ impl Neg for SymbolicFelt { type Output = Self; fn neg(self) -> Self::Output { - let digest = -self.digest(); - SymbolicFelt::Neg(Rc::new(self), digest) + match self { + SymbolicFelt::Const(f) => SymbolicFelt::Const(-f), + SymbolicFelt::Val(f) => { + let res = unsafe { (*f.handle).neg_f(f) }; + SymbolicFelt::Val(res) + } + } } } @@ -636,8 +744,14 @@ impl> Neg for SymbolicExt { type Output = Self; fn neg(self) -> Self::Output { - let digest = -self.digest(); - SymbolicExt::Neg(Rc::new(self), digest) + match self { + SymbolicExt::Const(ef) => SymbolicExt::Const(-ef), + SymbolicExt::Base(f) => SymbolicExt::Base(-f), + SymbolicExt::Val(ef) => { + let res = unsafe { (*ef.handle).neg_e(ef) }; + SymbolicExt::Val(res) + } + } } } @@ -680,8 +794,7 @@ impl Sub for SymbolicVar { type Output = Self; fn sub(self, rhs: N) -> Self::Output { - let digest = self.digest() - rhs; - SymbolicVar::Sub(Rc::new(self), Rc::new(SymbolicVar::from_f(rhs)), digest) + self - SymbolicVar::from(rhs) } } @@ -996,19 +1109,19 @@ impl Sum for SymbolicVar { impl AddAssign for SymbolicVar { fn add_assign(&mut self, rhs: Self) { - *self = self.clone() + rhs; + *self = *self + rhs; } } impl SubAssign for SymbolicVar { fn sub_assign(&mut self, rhs: Self) { - *self = self.clone() - rhs; + *self = *self - rhs; } } impl MulAssign for SymbolicVar { fn mul_assign(&mut self, rhs: Self) { - *self = self.clone() * rhs; + *self = *self * rhs; } } @@ -1032,19 +1145,19 @@ impl Product for SymbolicFelt { impl AddAssign for SymbolicFelt { fn add_assign(&mut self, rhs: Self) { - *self = self.clone() + rhs; + *self = *self + rhs; } } impl SubAssign for SymbolicFelt { fn sub_assign(&mut self, rhs: Self) { - *self = self.clone() - rhs; + *self = *self - rhs; } } impl MulAssign for SymbolicFelt { fn mul_assign(&mut self, rhs: Self) { - *self = self.clone() * rhs; + *self = *self * rhs; } } @@ -1074,25 +1187,25 @@ impl> Default for SymbolicExt { impl, E: Any> AddAssign for SymbolicExt { fn add_assign(&mut self, rhs: E) { - *self = self.clone() + rhs; + *self = *self + rhs; } } impl, E: Any> SubAssign for SymbolicExt { fn sub_assign(&mut self, rhs: E) { - *self = self.clone() - rhs; + *self = *self - rhs; } } impl, E: Any> MulAssign for SymbolicExt { fn mul_assign(&mut self, rhs: E) { - *self = self.clone() * rhs; + *self = *self * rhs; } } impl, E: Any> DivAssign for SymbolicExt { fn div_assign(&mut self, rhs: E) { - *self = self.clone() / rhs; + *self = *self / rhs; } } @@ -1135,14 +1248,14 @@ impl, E: Any> ExtensionOperand for E { // *Saftey*: We know that E is a Symbolic Felt and we can transmute it to // SymbolicFelt but we need to clone the pointer. let value_ref = unsafe { mem::transmute::<&E, &SymbolicFelt>(&self) }; - let value = value_ref.clone(); + let value = *value_ref; ExtOperand::::SymFelt(value) } ty if ty == TypeId::of::>() => { // *Saftey*: We know that E is a SymbolicExt and we can transmute it to // SymbolicExt but we need to clone the pointer. let value_ref = unsafe { mem::transmute::<&E, &SymbolicExt>(&self) }; - let value = value_ref.clone(); + let value = *value_ref; ExtOperand::::Sym(value) } ty if ty == TypeId::of::>() => { @@ -1324,7 +1437,7 @@ impl Sub> for Usize { impl MulAssign> for SymbolicFelt { fn mul_assign(&mut self, rhs: Felt) { - *self = self.clone() * Self::from(rhs); + *self = *self * Self::from(rhs); } } diff --git a/crates/recursion/compiler/src/ir/types.rs b/crates/recursion/compiler/src/ir/types.rs index 2cbef531e5..7dd9036075 100644 --- a/crates/recursion/compiler/src/ir/types.rs +++ b/crates/recursion/compiler/src/ir/types.rs @@ -1,32 +1,47 @@ use alloc::format; -use core::marker::PhantomData; -use std::{collections::HashMap, hash::Hash}; -use p3_field::{AbstractField, ExtensionField, Field}; +use p3_field::{AbstractExtensionField, AbstractField, ExtensionField, Field}; use serde::{Deserialize, Serialize}; use super::{ - Builder, Config, DslIr, ExtConst, FromConstant, MemIndex, MemVariable, Ptr, SymbolicExt, - SymbolicFelt, SymbolicUsize, SymbolicVar, Variable, + Builder, Config, DslIr, ExtConst, ExtHandle, FeltHandle, FromConstant, MemIndex, MemVariable, + Ptr, SymbolicExt, SymbolicFelt, SymbolicUsize, SymbolicVar, VarHandle, Variable, }; /// A variable that represents a native field element. /// /// Used for counters, simple loops, etc. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct Var(pub u32, pub PhantomData); +pub struct Var { + pub idx: u32, + pub(crate) handle: *mut VarHandle, +} /// A variable that represents an emulated field element. /// /// Used to do field arithmetic for recursive verification. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub struct Felt(pub u32, pub PhantomData); +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub struct Felt { + pub idx: u32, + pub(crate) handle: *mut FeltHandle, +} /// A variable that represents an emulated extension field element. /// /// Used to do extension field arithmetic for recursive verification. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub struct Ext(pub u32, pub PhantomData<(F, EF)>); +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub struct Ext { + pub idx: u32, + pub(crate) handle: *mut ExtHandle, +} + +unsafe impl Send for Var {} +unsafe impl Send for Ext {} +unsafe impl Send for Felt {} + +unsafe impl Sync for Var {} +unsafe impl Sync for Ext {} +unsafe impl Sync for Felt {} /// A variable that represents either a constant or variable counter. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] @@ -89,30 +104,30 @@ impl From for Usize { } impl Var { - pub const fn new(id: u32) -> Self { - Self(id, PhantomData) + pub const fn new(idx: u32, handle: *mut VarHandle) -> Self { + Self { idx, handle } } pub fn id(&self) -> String { - format!("var{}", self.0) + format!("var{}", self.idx) } pub fn loc(&self) -> String { - self.0.to_string() + self.idx.to_string() } } impl Felt { - pub const fn new(id: u32) -> Self { - Self(id, PhantomData) + pub const fn new(id: u32, handle: *mut FeltHandle) -> Self { + Self { idx: id, handle } } pub fn id(&self) -> String { - format!("felt{}", self.0) + format!("felt{}", self.idx) } pub fn loc(&self) -> String { - self.0.to_string() + self.idx.to_string() } pub fn inverse(&self) -> SymbolicFelt @@ -124,16 +139,16 @@ impl Felt { } impl Ext { - pub const fn new(id: u32) -> Self { - Self(id, PhantomData) + pub const fn new(id: u32, handle: *mut ExtHandle) -> Self { + Self { idx: id, handle } } pub fn id(&self) -> String { - format!("ext{}", self.0) + format!("ext{}", self.idx) } pub fn loc(&self) -> String { - self.0.to_string() + self.idx.to_string() } pub fn inverse(&self) -> SymbolicExt @@ -215,194 +230,25 @@ impl Variable for Usize { } } -impl Var { - fn assign_with_cache>( - &self, - src: SymbolicVar, - builder: &mut Builder, - cache: &mut HashMap, Self>, - ) { - if let Some(v) = cache.get(&src) { - builder.operations.push(DslIr::AddVI(*self, *v, C::N::zero())); - return; - } - match src { - SymbolicVar::Const(c, _) => { - builder.operations.push(DslIr::ImmV(*self, c)); - } - SymbolicVar::Val(v, _) => { - builder.operations.push(DslIr::AddVI(*self, v, C::N::zero())); - } - SymbolicVar::Add(lhs, rhs, _) => match (&*lhs, &*rhs) { - (SymbolicVar::Const(lhs, _), SymbolicVar::Const(rhs, _)) => { - let sum = *lhs + *rhs; - builder.operations.push(DslIr::ImmV(*self, sum)); - } - (SymbolicVar::Const(lhs, _), SymbolicVar::Val(rhs, _)) => { - builder.operations.push(DslIr::AddVI(*self, *rhs, *lhs)); - } - (SymbolicVar::Const(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign(rhs.clone(), builder); - builder.push(DslIr::AddVI(*self, rhs_value, *lhs)); - } - (SymbolicVar::Val(lhs, _), SymbolicVar::Const(rhs, _)) => { - builder.push(DslIr::AddVI(*self, *lhs, *rhs)); - } - (SymbolicVar::Val(lhs, _), SymbolicVar::Val(rhs, _)) => { - builder.push(DslIr::AddV(*self, *lhs, *rhs)); - } - (SymbolicVar::Val(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign(rhs.clone(), builder); - builder.push(DslIr::AddV(*self, *lhs, rhs_value)); - } - (lhs, SymbolicVar::Const(rhs, _)) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign(lhs.clone(), builder); - builder.push(DslIr::AddVI(*self, lhs_value, *rhs)); - } - (lhs, SymbolicVar::Val(rhs, _)) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign(lhs.clone(), builder); - builder.push(DslIr::AddV(*self, lhs_value, *rhs)); - } - (lhs, rhs) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_cache(lhs.clone(), builder, cache); - cache.insert(lhs.clone(), lhs_value); - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_cache(rhs.clone(), builder, cache); - cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::AddV(*self, lhs_value, rhs_value)); - } - }, - SymbolicVar::Mul(lhs, rhs, _) => match (&*lhs, &*rhs) { - (SymbolicVar::Const(lhs, _), SymbolicVar::Const(rhs, _)) => { - let product = *lhs * *rhs; - builder.push(DslIr::ImmV(*self, product)); - } - (SymbolicVar::Const(lhs, _), SymbolicVar::Val(rhs, _)) => { - builder.push(DslIr::MulVI(*self, *rhs, *lhs)); - } - (SymbolicVar::Const(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_cache(rhs.clone(), builder, cache); - cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::MulVI(*self, rhs_value, *lhs)); - } - (SymbolicVar::Val(lhs, _), SymbolicVar::Const(rhs, _)) => { - builder.push(DslIr::MulVI(*self, *lhs, *rhs)); - } - (SymbolicVar::Val(lhs, _), SymbolicVar::Val(rhs, _)) => { - builder.push(DslIr::MulV(*self, *lhs, *rhs)); - } - (SymbolicVar::Val(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_cache(rhs.clone(), builder, cache); - cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::MulV(*self, *lhs, rhs_value)); - } - (lhs, SymbolicVar::Const(rhs, _)) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_cache(lhs.clone(), builder, cache); - cache.insert(lhs.clone(), lhs_value); - builder.push(DslIr::MulVI(*self, lhs_value, *rhs)); - } - (lhs, SymbolicVar::Val(rhs, _)) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_cache(lhs.clone(), builder, cache); - cache.insert(lhs.clone(), lhs_value); - builder.push(DslIr::MulV(*self, lhs_value, *rhs)); - } - (lhs, rhs) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_cache(lhs.clone(), builder, cache); - cache.insert(lhs.clone(), lhs_value); - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_cache(rhs.clone(), builder, cache); - cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::MulV(*self, lhs_value, rhs_value)); - } - }, - SymbolicVar::Sub(lhs, rhs, _) => match (&*lhs, &*rhs) { - (SymbolicVar::Const(lhs, _), SymbolicVar::Const(rhs, _)) => { - let difference = *lhs - *rhs; - builder.push(DslIr::ImmV(*self, difference)); - } - (SymbolicVar::Const(lhs, _), SymbolicVar::Val(rhs, _)) => { - builder.push(DslIr::SubVIN(*self, *lhs, *rhs)); - } - (SymbolicVar::Const(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_cache(rhs.clone(), builder, cache); - cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::SubVIN(*self, *lhs, rhs_value)); - } - (SymbolicVar::Val(lhs, _), SymbolicVar::Const(rhs, _)) => { - builder.push(DslIr::SubVI(*self, *lhs, *rhs)); - } - (SymbolicVar::Val(lhs, _), SymbolicVar::Val(rhs, _)) => { - builder.push(DslIr::SubV(*self, *lhs, *rhs)); - } - (SymbolicVar::Val(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_cache(rhs.clone(), builder, cache); - cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::SubV(*self, *lhs, rhs_value)); - } - (lhs, SymbolicVar::Const(rhs, _)) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_cache(lhs.clone(), builder, cache); - cache.insert(lhs.clone(), lhs_value); - builder.push(DslIr::SubVI(*self, lhs_value, *rhs)); - } - (lhs, SymbolicVar::Val(rhs, _)) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_cache(lhs.clone(), builder, cache); - cache.insert(lhs.clone(), lhs_value); - builder.push(DslIr::SubV(*self, lhs_value, *rhs)); - } - (lhs, rhs) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_cache(lhs.clone(), builder, cache); - cache.insert(lhs.clone(), lhs_value); - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_cache(rhs.clone(), builder, cache); - cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::SubV(*self, lhs_value, rhs_value)); - } - }, - SymbolicVar::Neg(operand, _) => match &*operand { - SymbolicVar::Const(operand, _) => { - let negated = -*operand; - builder.push(DslIr::ImmV(*self, negated)); - } - SymbolicVar::Val(operand, _) => { - builder.push(DslIr::SubVIN(*self, C::N::zero(), *operand)); - } - operand => { - let operand_value = Self::uninit(builder); - operand_value.assign_with_cache(operand.clone(), builder, cache); - cache.insert(operand.clone(), operand_value); - builder.push(DslIr::SubVIN(*self, C::N::zero(), operand_value)); - } - }, - } - } -} - impl Variable for Var { type Expression = SymbolicVar; fn uninit(builder: &mut Builder) -> Self { - let var = Var(builder.variable_count, PhantomData); - builder.variable_count += 1; + let id = builder.variable_count(); + let var = Var::new(id, builder.var_handle.as_mut()); + builder.inner.get_mut().variable_count += 1; var } fn assign(&self, src: Self::Expression, builder: &mut Builder) { - self.assign_with_cache(src, builder, &mut HashMap::new()); + match src { + SymbolicVar::Const(src) => { + builder.push_op(DslIr::ImmV(*self, src)); + } + SymbolicVar::Val(src) => { + builder.push_op(DslIr::AddVI(*self, src, C::N::zero())); + } + } } fn assert_eq( @@ -414,35 +260,18 @@ impl Variable for Var { let rhs = rhs.into(); match (lhs, rhs) { - (SymbolicVar::Const(lhs, _), SymbolicVar::Const(rhs, _)) => { + (SymbolicVar::Const(lhs), SymbolicVar::Const(rhs)) => { assert_eq!(lhs, rhs, "Assertion failed at compile time"); } - (SymbolicVar::Const(lhs, _), SymbolicVar::Val(rhs, _)) => { + (SymbolicVar::Const(lhs), SymbolicVar::Val(rhs)) => { builder.trace_push(DslIr::AssertEqVI(rhs, lhs)); } - (SymbolicVar::Const(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign(rhs, builder); - builder.trace_push(DslIr::AssertEqVI(rhs_value, lhs)); - } - (SymbolicVar::Val(lhs, _), SymbolicVar::Const(rhs, _)) => { + (SymbolicVar::Val(lhs), SymbolicVar::Const(rhs)) => { builder.trace_push(DslIr::AssertEqVI(lhs, rhs)); } - (SymbolicVar::Val(lhs, _), SymbolicVar::Val(rhs, _)) => { + (SymbolicVar::Val(lhs), SymbolicVar::Val(rhs)) => { builder.trace_push(DslIr::AssertEqV(lhs, rhs)); } - (SymbolicVar::Val(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign(rhs, builder); - builder.trace_push(DslIr::AssertEqV(lhs, rhs_value)); - } - (lhs, rhs) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign(lhs, builder); - let rhs_value = Self::uninit(builder); - rhs_value.assign(rhs, builder); - builder.trace_push(DslIr::AssertEqV(lhs_value, rhs_value)); - } } } @@ -455,35 +284,18 @@ impl Variable for Var { let rhs = rhs.into(); match (lhs, rhs) { - (SymbolicVar::Const(lhs, _), SymbolicVar::Const(rhs, _)) => { + (SymbolicVar::Const(lhs), SymbolicVar::Const(rhs)) => { assert_ne!(lhs, rhs, "Assertion failed at compile time"); } - (SymbolicVar::Const(lhs, _), SymbolicVar::Val(rhs, _)) => { + (SymbolicVar::Const(lhs), SymbolicVar::Val(rhs)) => { builder.trace_push(DslIr::AssertNeVI(rhs, lhs)); } - (SymbolicVar::Const(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign(rhs, builder); - builder.trace_push(DslIr::AssertNeVI(rhs_value, lhs)); - } - (SymbolicVar::Val(lhs, _), SymbolicVar::Const(rhs, _)) => { + (SymbolicVar::Val(lhs), SymbolicVar::Const(rhs)) => { builder.trace_push(DslIr::AssertNeVI(lhs, rhs)); } - (SymbolicVar::Val(lhs, _), SymbolicVar::Val(rhs, _)) => { + (SymbolicVar::Val(lhs), SymbolicVar::Val(rhs)) => { builder.trace_push(DslIr::AssertNeV(lhs, rhs)); } - (SymbolicVar::Val(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign(rhs, builder); - builder.trace_push(DslIr::AssertNeV(lhs, rhs_value)); - } - (lhs, rhs) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign(lhs, builder); - let rhs_value = Self::uninit(builder); - rhs_value.assign(rhs, builder); - builder.trace_push(DslIr::AssertNeV(lhs_value, rhs_value)); - } } } } @@ -494,240 +306,11 @@ impl MemVariable for Var { } fn load(&self, ptr: Ptr, index: MemIndex, builder: &mut Builder) { - builder.push(DslIr::LoadV(*self, ptr, index)); + builder.push_op(DslIr::LoadV(*self, ptr, index)); } fn store(&self, ptr: Ptr<::N>, index: MemIndex, builder: &mut Builder) { - builder.push(DslIr::StoreV(*self, ptr, index)); - } -} - -impl Felt { - fn assign_with_cache>( - &self, - src: SymbolicFelt, - builder: &mut Builder, - cache: &mut HashMap, Self>, - ) { - if let Some(v) = cache.get(&src) { - builder.operations.push(DslIr::AddFI(*self, *v, C::F::zero())); - return; - } - match src { - SymbolicFelt::Const(c, _) => { - builder.operations.push(DslIr::ImmF(*self, c)); - } - SymbolicFelt::Val(v, _) => { - builder.operations.push(DslIr::AddFI(*self, v, C::F::zero())); - } - SymbolicFelt::Add(lhs, rhs, _) => match (&*lhs, &*rhs) { - (SymbolicFelt::Const(lhs, _), SymbolicFelt::Const(rhs, _)) => { - let sum = *lhs + *rhs; - builder.operations.push(DslIr::ImmF(*self, sum)); - } - (SymbolicFelt::Const(lhs, _), SymbolicFelt::Val(rhs, _)) => { - builder.operations.push(DslIr::AddFI(*self, *rhs, *lhs)); - } - (SymbolicFelt::Const(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_cache(rhs.clone(), builder, cache); - cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::AddFI(*self, rhs_value, *lhs)); - } - (SymbolicFelt::Val(lhs, _), SymbolicFelt::Const(rhs, _)) => { - builder.push(DslIr::AddFI(*self, *lhs, *rhs)); - } - (SymbolicFelt::Val(lhs, _), SymbolicFelt::Val(rhs, _)) => { - builder.push(DslIr::AddF(*self, *lhs, *rhs)); - } - (SymbolicFelt::Val(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_cache(rhs.clone(), builder, cache); - cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::AddF(*self, *lhs, rhs_value)); - } - (lhs, SymbolicFelt::Const(rhs, _)) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_cache(lhs.clone(), builder, cache); - cache.insert(lhs.clone(), lhs_value); - builder.push(DslIr::AddFI(*self, lhs_value, *rhs)); - } - (lhs, SymbolicFelt::Val(rhs, _)) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_cache(lhs.clone(), builder, cache); - cache.insert(lhs.clone(), lhs_value); - builder.push(DslIr::AddF(*self, lhs_value, *rhs)); - } - (lhs, rhs) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_cache(lhs.clone(), builder, cache); - cache.insert(lhs.clone(), lhs_value); - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_cache(rhs.clone(), builder, cache); - cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::AddF(*self, lhs_value, rhs_value)); - } - }, - SymbolicFelt::Mul(lhs, rhs, _) => match (&*lhs, &*rhs) { - (SymbolicFelt::Const(lhs, _), SymbolicFelt::Const(rhs, _)) => { - let product = *lhs * *rhs; - builder.push(DslIr::ImmF(*self, product)); - } - (SymbolicFelt::Const(lhs, _), SymbolicFelt::Val(rhs, _)) => { - builder.push(DslIr::MulFI(*self, *rhs, *lhs)); - } - (SymbolicFelt::Const(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_cache(rhs.clone(), builder, cache); - cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::MulFI(*self, rhs_value, *lhs)); - } - (SymbolicFelt::Val(lhs, _), SymbolicFelt::Const(rhs, _)) => { - builder.push(DslIr::MulFI(*self, *lhs, *rhs)); - } - (SymbolicFelt::Val(lhs, _), SymbolicFelt::Val(rhs, _)) => { - builder.push(DslIr::MulF(*self, *lhs, *rhs)); - } - (SymbolicFelt::Val(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_cache(rhs.clone(), builder, cache); - cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::MulF(*self, *lhs, rhs_value)); - } - (lhs, SymbolicFelt::Const(rhs, _)) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_cache(lhs.clone(), builder, cache); - cache.insert(lhs.clone(), lhs_value); - builder.push(DslIr::MulFI(*self, lhs_value, *rhs)); - } - (lhs, SymbolicFelt::Val(rhs, _)) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_cache(lhs.clone(), builder, cache); - cache.insert(lhs.clone(), lhs_value); - builder.push(DslIr::MulF(*self, lhs_value, *rhs)); - } - (lhs, rhs) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_cache(lhs.clone(), builder, cache); - cache.insert(lhs.clone(), lhs_value); - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_cache(rhs.clone(), builder, cache); - cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::MulF(*self, lhs_value, rhs_value)); - } - }, - SymbolicFelt::Sub(lhs, rhs, _) => match (&*lhs, &*rhs) { - (SymbolicFelt::Const(lhs, _), SymbolicFelt::Const(rhs, _)) => { - let difference = *lhs - *rhs; - builder.push(DslIr::ImmF(*self, difference)); - } - (SymbolicFelt::Const(lhs, _), SymbolicFelt::Val(rhs, _)) => { - builder.push(DslIr::SubFIN(*self, *lhs, *rhs)); - } - (SymbolicFelt::Const(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_cache(rhs.clone(), builder, cache); - cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::SubFIN(*self, *lhs, rhs_value)); - } - (SymbolicFelt::Val(lhs, _), SymbolicFelt::Const(rhs, _)) => { - builder.push(DslIr::SubFI(*self, *lhs, *rhs)); - } - (SymbolicFelt::Val(lhs, _), SymbolicFelt::Val(rhs, _)) => { - builder.push(DslIr::SubF(*self, *lhs, *rhs)); - } - (SymbolicFelt::Val(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_cache(rhs.clone(), builder, cache); - cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::SubF(*self, *lhs, rhs_value)); - } - (lhs, SymbolicFelt::Const(rhs, _)) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_cache(lhs.clone(), builder, cache); - cache.insert(lhs.clone(), lhs_value); - builder.push(DslIr::SubFI(*self, lhs_value, *rhs)); - } - (lhs, SymbolicFelt::Val(rhs, _)) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_cache(lhs.clone(), builder, cache); - cache.insert(lhs.clone(), lhs_value); - builder.push(DslIr::SubF(*self, lhs_value, *rhs)); - } - (lhs, rhs) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_cache(lhs.clone(), builder, cache); - cache.insert(lhs.clone(), lhs_value); - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_cache(rhs.clone(), builder, cache); - cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::SubF(*self, lhs_value, rhs_value)); - } - }, - SymbolicFelt::Div(lhs, rhs, _) => match (&*lhs, &*rhs) { - (SymbolicFelt::Const(lhs, _), SymbolicFelt::Const(rhs, _)) => { - let quotient = *lhs / *rhs; - builder.push(DslIr::ImmF(*self, quotient)); - } - (SymbolicFelt::Const(lhs, _), SymbolicFelt::Val(rhs, _)) => { - builder.push(DslIr::DivFIN(*self, *lhs, *rhs)); - } - (SymbolicFelt::Const(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_cache(rhs.clone(), builder, cache); - cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::DivFIN(*self, *lhs, rhs_value)); - } - (SymbolicFelt::Val(lhs, _), SymbolicFelt::Const(rhs, _)) => { - builder.push(DslIr::DivFI(*self, *lhs, *rhs)); - } - (SymbolicFelt::Val(lhs, _), SymbolicFelt::Val(rhs, _)) => { - builder.push(DslIr::DivF(*self, *lhs, *rhs)); - } - (SymbolicFelt::Val(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_cache(rhs.clone(), builder, cache); - cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::DivF(*self, *lhs, rhs_value)); - } - (lhs, SymbolicFelt::Const(rhs, _)) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_cache(lhs.clone(), builder, cache); - cache.insert(lhs.clone(), lhs_value); - builder.push(DslIr::DivFI(*self, lhs_value, *rhs)); - } - (lhs, SymbolicFelt::Val(rhs, _)) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_cache(lhs.clone(), builder, cache); - cache.insert(lhs.clone(), lhs_value); - builder.push(DslIr::DivF(*self, lhs_value, *rhs)); - } - (lhs, rhs) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_cache(lhs.clone(), builder, cache); - cache.insert(lhs.clone(), lhs_value); - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_cache(rhs.clone(), builder, cache); - cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::DivF(*self, lhs_value, rhs_value)); - } - }, - SymbolicFelt::Neg(operand, _) => match &*operand { - SymbolicFelt::Const(operand, _) => { - let negated = -*operand; - builder.push(DslIr::ImmF(*self, negated)); - } - SymbolicFelt::Val(operand, _) => { - builder.push(DslIr::SubFIN(*self, C::F::zero(), *operand)); - } - operand => { - let operand_value = Self::uninit(builder); - operand_value.assign_with_cache(operand.clone(), builder, cache); - cache.insert(operand.clone(), operand_value); - builder.push(DslIr::SubFIN(*self, C::F::zero(), operand_value)); - } - }, - } + builder.push_op(DslIr::StoreV(*self, ptr, index)); } } @@ -735,13 +318,21 @@ impl Variable for Felt { type Expression = SymbolicFelt; fn uninit(builder: &mut Builder) -> Self { - let felt = Felt(builder.variable_count, PhantomData); - builder.variable_count += 1; + let idx = builder.variable_count(); + let felt = Felt::::new(idx, builder.felt_handle.as_mut()); + builder.inner.get_mut().variable_count += 1; felt } fn assign(&self, src: Self::Expression, builder: &mut Builder) { - self.assign_with_cache(src, builder, &mut HashMap::new()); + match src { + SymbolicFelt::Const(src) => { + builder.push_op(DslIr::ImmF(*self, src)); + } + SymbolicFelt::Val(src) => { + builder.push_op(DslIr::AddFI(*self, src, C::F::zero())); + } + } } fn assert_eq( @@ -753,35 +344,18 @@ impl Variable for Felt { let rhs = rhs.into(); match (lhs, rhs) { - (SymbolicFelt::Const(lhs, _), SymbolicFelt::Const(rhs, _)) => { + (SymbolicFelt::Const(lhs), SymbolicFelt::Const(rhs)) => { assert_eq!(lhs, rhs, "Assertion failed at compile time"); } - (SymbolicFelt::Const(lhs, _), SymbolicFelt::Val(rhs, _)) => { + (SymbolicFelt::Const(lhs), SymbolicFelt::Val(rhs)) => { builder.trace_push(DslIr::AssertEqFI(rhs, lhs)); } - (SymbolicFelt::Const(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign(rhs, builder); - builder.trace_push(DslIr::AssertEqFI(rhs_value, lhs)); - } - (SymbolicFelt::Val(lhs, _), SymbolicFelt::Const(rhs, _)) => { + (SymbolicFelt::Val(lhs), SymbolicFelt::Const(rhs)) => { builder.trace_push(DslIr::AssertEqFI(lhs, rhs)); } - (SymbolicFelt::Val(lhs, _), SymbolicFelt::Val(rhs, _)) => { + (SymbolicFelt::Val(lhs), SymbolicFelt::Val(rhs)) => { builder.trace_push(DslIr::AssertEqF(lhs, rhs)); } - (SymbolicFelt::Val(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign(rhs, builder); - builder.trace_push(DslIr::AssertEqF(lhs, rhs_value)); - } - (lhs, rhs) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign(lhs, builder); - let rhs_value = Self::uninit(builder); - rhs_value.assign(rhs, builder); - builder.trace_push(DslIr::AssertEqF(lhs_value, rhs_value)); - } } } @@ -794,35 +368,18 @@ impl Variable for Felt { let rhs = rhs.into(); match (lhs, rhs) { - (SymbolicFelt::Const(lhs, _), SymbolicFelt::Const(rhs, _)) => { + (SymbolicFelt::Const(lhs), SymbolicFelt::Const(rhs)) => { assert_ne!(lhs, rhs, "Assertion failed at compile time"); } - (SymbolicFelt::Const(lhs, _), SymbolicFelt::Val(rhs, _)) => { + (SymbolicFelt::Const(lhs), SymbolicFelt::Val(rhs)) => { builder.trace_push(DslIr::AssertNeFI(rhs, lhs)); } - (SymbolicFelt::Const(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign(rhs, builder); - builder.trace_push(DslIr::AssertNeFI(rhs_value, lhs)); - } - (SymbolicFelt::Val(lhs, _), SymbolicFelt::Const(rhs, _)) => { + (SymbolicFelt::Val(lhs), SymbolicFelt::Const(rhs)) => { builder.trace_push(DslIr::AssertNeFI(lhs, rhs)); } - (SymbolicFelt::Val(lhs, _), SymbolicFelt::Val(rhs, _)) => { + (SymbolicFelt::Val(lhs), SymbolicFelt::Val(rhs)) => { builder.trace_push(DslIr::AssertNeF(lhs, rhs)); } - (SymbolicFelt::Val(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign(rhs, builder); - builder.trace_push(DslIr::AssertNeF(lhs, rhs_value)); - } - (lhs, rhs) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign(lhs, builder); - let rhs_value = Self::uninit(builder); - rhs_value.assign(rhs, builder); - builder.trace_push(DslIr::AssertNeF(lhs_value, rhs_value)); - } } } } @@ -833,285 +390,11 @@ impl MemVariable for Felt { } fn load(&self, ptr: Ptr, index: MemIndex, builder: &mut Builder) { - builder.push(DslIr::LoadF(*self, ptr, index)); + builder.push_op(DslIr::LoadF(*self, ptr, index)); } fn store(&self, ptr: Ptr<::N>, index: MemIndex, builder: &mut Builder) { - builder.push(DslIr::StoreF(*self, ptr, index)); - } -} - -impl> Ext { - fn assign_with_caches>( - &self, - src: SymbolicExt, - builder: &mut Builder, - ext_cache: &mut HashMap, Ext>, - base_cache: &mut HashMap, Felt>, - ) { - if let Some(v) = ext_cache.get(&src) { - builder.operations.push(DslIr::AddEI(*self, *v, C::EF::zero())); - return; - } - match src { - SymbolicExt::Base(v, _) => match &*v { - SymbolicFelt::Const(c, _) => { - builder.operations.push(DslIr::ImmE(*self, C::EF::from_base(*c))); - } - SymbolicFelt::Val(v, _) => { - builder.operations.push(DslIr::AddEFFI(*self, *v, C::EF::zero())); - } - v => { - let v_value = Felt::uninit(builder); - v_value.assign(v.clone(), builder); - builder.push(DslIr::AddEFFI(*self, v_value, C::EF::zero())); - } - }, - SymbolicExt::Const(c, _) => { - builder.operations.push(DslIr::ImmE(*self, c)); - } - SymbolicExt::Val(v, _) => { - builder.operations.push(DslIr::AddEI(*self, v, C::EF::zero())); - } - SymbolicExt::Add(lhs, rhs, _) => match (&*lhs, &*rhs) { - (SymbolicExt::Const(lhs, _), SymbolicExt::Const(rhs, _)) => { - let sum = *lhs + *rhs; - builder.operations.push(DslIr::ImmE(*self, sum)); - } - (SymbolicExt::Const(lhs, _), SymbolicExt::Val(rhs, _)) => { - builder.operations.push(DslIr::AddEI(*self, *rhs, *lhs)); - } - (SymbolicExt::Const(lhs, _), SymbolicExt::Base(rhs, _)) => match rhs.as_ref() { - SymbolicFelt::Const(rhs, _) => { - let sum = *lhs + C::EF::from_base(*rhs); - builder.operations.push(DslIr::ImmE(*self, sum)); - } - SymbolicFelt::Val(rhs, _) => { - builder.operations.push(DslIr::AddEFFI(*self, *rhs, *lhs)); - } - rhs => { - let rhs_value: Felt<_> = Felt::uninit(builder); - rhs_value.assign_with_cache(rhs.clone(), builder, base_cache); - base_cache.insert(rhs.clone(), rhs_value); - builder.operations.push(DslIr::AddEFFI(*self, rhs_value, *lhs)); - } - }, - (SymbolicExt::Const(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_caches(rhs.clone(), builder, ext_cache, base_cache); - ext_cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::AddEI(*self, rhs_value, *lhs)); - } - (SymbolicExt::Val(lhs, _), SymbolicExt::Const(rhs, _)) => { - builder.push(DslIr::AddEI(*self, *lhs, *rhs)); - } - (SymbolicExt::Val(lhs, _), SymbolicExt::Base(rhs, _)) => match rhs.as_ref() { - SymbolicFelt::Const(rhs, _) => { - builder.push(DslIr::AddEFI(*self, *lhs, *rhs)); - } - SymbolicFelt::Val(rhs, _) => { - builder.push(DslIr::AddEF(*self, *lhs, *rhs)); - } - rhs => { - let rhs = builder.eval(rhs.clone()); - builder.push(DslIr::AddEF(*self, *lhs, rhs)); - } - }, - (SymbolicExt::Val(lhs, _), SymbolicExt::Val(rhs, _)) => { - builder.push(DslIr::AddE(*self, *lhs, *rhs)); - } - (SymbolicExt::Val(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_caches(rhs.clone(), builder, ext_cache, base_cache); - ext_cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::AddE(*self, *lhs, rhs_value)); - } - (lhs, SymbolicExt::Const(rhs, _)) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_caches(lhs.clone(), builder, ext_cache, base_cache); - ext_cache.insert(lhs.clone(), lhs_value); - builder.push(DslIr::AddEI(*self, lhs_value, *rhs)); - } - (lhs, SymbolicExt::Val(rhs, _)) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_caches(lhs.clone(), builder, ext_cache, base_cache); - ext_cache.insert(lhs.clone(), lhs_value); - builder.push(DslIr::AddE(*self, lhs_value, *rhs)); - } - (lhs, rhs) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_caches(lhs.clone(), builder, ext_cache, base_cache); - ext_cache.insert(lhs.clone(), lhs_value); - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_caches(rhs.clone(), builder, ext_cache, base_cache); - ext_cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::AddE(*self, lhs_value, rhs_value)); - } - }, - SymbolicExt::Mul(lhs, rhs, _) => match (&*lhs, &*rhs) { - (SymbolicExt::Const(lhs, _), SymbolicExt::Const(rhs, _)) => { - let product = *lhs * *rhs; - builder.push(DslIr::ImmE(*self, product)); - } - (SymbolicExt::Const(lhs, _), SymbolicExt::Val(rhs, _)) => { - builder.push(DslIr::MulEI(*self, *rhs, *lhs)); - } - (SymbolicExt::Const(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_caches(rhs.clone(), builder, ext_cache, base_cache); - ext_cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::MulEI(*self, rhs_value, *lhs)); - } - (SymbolicExt::Val(lhs, _), SymbolicExt::Const(rhs, _)) => { - builder.push(DslIr::MulEI(*self, *lhs, *rhs)); - } - (SymbolicExt::Val(lhs, _), SymbolicExt::Val(rhs, _)) => { - builder.push(DslIr::MulE(*self, *lhs, *rhs)); - } - (SymbolicExt::Val(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_caches(rhs.clone(), builder, ext_cache, base_cache); - ext_cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::MulE(*self, *lhs, rhs_value)); - } - (lhs, SymbolicExt::Const(rhs, _)) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_caches(lhs.clone(), builder, ext_cache, base_cache); - ext_cache.insert(lhs.clone(), lhs_value); - builder.push(DslIr::MulEI(*self, lhs_value, *rhs)); - } - (lhs, SymbolicExt::Val(rhs, _)) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_caches(lhs.clone(), builder, ext_cache, base_cache); - ext_cache.insert(lhs.clone(), lhs_value); - builder.push(DslIr::MulE(*self, lhs_value, *rhs)); - } - (lhs, rhs) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_caches(lhs.clone(), builder, ext_cache, base_cache); - ext_cache.insert(lhs.clone(), lhs_value); - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_caches(rhs.clone(), builder, ext_cache, base_cache); - ext_cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::MulE(*self, lhs_value, rhs_value)); - } - }, - SymbolicExt::Sub(lhs, rhs, _) => match (&*lhs, &*rhs) { - (SymbolicExt::Const(lhs, _), SymbolicExt::Const(rhs, _)) => { - let difference = *lhs - *rhs; - builder.push(DslIr::ImmE(*self, difference)); - } - (SymbolicExt::Const(lhs, _), SymbolicExt::Val(rhs, _)) => { - builder.push(DslIr::SubEIN(*self, *lhs, *rhs)); - } - (SymbolicExt::Const(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_caches(rhs.clone(), builder, ext_cache, base_cache); - ext_cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::SubEIN(*self, *lhs, rhs_value)); - } - (SymbolicExt::Val(lhs, _), SymbolicExt::Const(rhs, _)) => { - builder.push(DslIr::SubEI(*self, *lhs, *rhs)); - } - (SymbolicExt::Val(lhs, _), SymbolicExt::Val(rhs, _)) => { - builder.push(DslIr::SubE(*self, *lhs, *rhs)); - } - (SymbolicExt::Val(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_caches(rhs.clone(), builder, ext_cache, base_cache); - ext_cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::SubE(*self, *lhs, rhs_value)); - } - (lhs, SymbolicExt::Const(rhs, _)) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_caches(lhs.clone(), builder, ext_cache, base_cache); - ext_cache.insert(lhs.clone(), lhs_value); - builder.push(DslIr::SubEI(*self, lhs_value, *rhs)); - } - (lhs, SymbolicExt::Val(rhs, _)) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_caches(lhs.clone(), builder, ext_cache, base_cache); - ext_cache.insert(lhs.clone(), lhs_value); - builder.push(DslIr::SubE(*self, lhs_value, *rhs)); - } - (lhs, rhs) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_caches(lhs.clone(), builder, ext_cache, base_cache); - ext_cache.insert(lhs.clone(), lhs_value); - let rhs_value = Self::uninit(builder); - rhs_value.assign(rhs.clone(), builder); - builder.push(DslIr::SubE(*self, lhs_value, rhs_value)); - } - }, - SymbolicExt::Div(lhs, rhs, _) => match (&*lhs, &*rhs) { - (SymbolicExt::Const(lhs, _), SymbolicExt::Const(rhs, _)) => { - let quotient = *lhs / *rhs; - builder.push(DslIr::ImmE(*self, quotient)); - } - (SymbolicExt::Const(lhs, _), SymbolicExt::Val(rhs, _)) => { - builder.push(DslIr::DivEIN(*self, *lhs, *rhs)); - } - (SymbolicExt::Const(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_caches(rhs.clone(), builder, ext_cache, base_cache); - ext_cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::DivEIN(*self, *lhs, rhs_value)); - } - (SymbolicExt::Val(lhs, _), SymbolicExt::Const(rhs, _)) => { - builder.push(DslIr::DivEI(*self, *lhs, *rhs)); - } - (SymbolicExt::Val(lhs, _), SymbolicExt::Val(rhs, _)) => { - builder.push(DslIr::DivE(*self, *lhs, *rhs)); - } - (SymbolicExt::Val(lhs, _), rhs) => { - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_caches(rhs.clone(), builder, ext_cache, base_cache); - ext_cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::DivE(*self, *lhs, rhs_value)); - } - (lhs, SymbolicExt::Const(rhs, _)) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_caches(lhs.clone(), builder, ext_cache, base_cache); - ext_cache.insert(lhs.clone(), lhs_value); - builder.push(DslIr::DivEI(*self, lhs_value, *rhs)); - } - (lhs, SymbolicExt::Val(rhs, _)) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_caches(lhs.clone(), builder, ext_cache, base_cache); - ext_cache.insert(lhs.clone(), lhs_value); - builder.push(DslIr::DivE(*self, lhs_value, *rhs)); - } - (lhs, rhs) => { - let lhs_value = Self::uninit(builder); - lhs_value.assign_with_caches(lhs.clone(), builder, ext_cache, base_cache); - ext_cache.insert(lhs.clone(), lhs_value); - let rhs_value = Self::uninit(builder); - rhs_value.assign_with_caches(rhs.clone(), builder, ext_cache, base_cache); - ext_cache.insert(rhs.clone(), rhs_value); - builder.push(DslIr::DivE(*self, lhs_value, rhs_value)); - } - }, - SymbolicExt::Neg(operand, _) => match &*operand { - SymbolicExt::Const(operand, _) => { - let negated = -*operand; - builder.push(DslIr::ImmE(*self, negated)); - } - SymbolicExt::Val(operand, _) => { - builder.push(DslIr::NegE(*self, *operand)); - } - operand => { - let operand_value = Self::uninit(builder); - operand_value.assign_with_caches( - operand.clone(), - builder, - ext_cache, - base_cache, - ); - ext_cache.insert(operand.clone(), operand_value); - builder.push(DslIr::NegE(*self, operand_value)); - } - }, - } + builder.push_op(DslIr::StoreF(*self, ptr, index)); } } @@ -1119,13 +402,29 @@ impl Variable for Ext { type Expression = SymbolicExt; fn uninit(builder: &mut Builder) -> Self { - let ext = Ext(builder.variable_count, PhantomData); - builder.variable_count += 1; + let idx = builder.variable_count(); + let ext = Ext::::new(idx, builder.ext_handle.as_mut()); + builder.inner.get_mut().variable_count += 1; ext } fn assign(&self, src: Self::Expression, builder: &mut Builder) { - self.assign_with_caches(src, builder, &mut HashMap::new(), &mut HashMap::new()); + match src { + SymbolicExt::Const(src) => { + builder.push_op(DslIr::ImmE(*self, src)); + } + SymbolicExt::Base(src) => match src { + SymbolicFelt::Const(src) => { + builder.push_op(DslIr::ImmE(*self, C::EF::from_base(src))); + } + SymbolicFelt::Val(src) => { + builder.push_op(DslIr::AddEFFI(*self, src, C::EF::zero())); + } + }, + SymbolicExt::Val(src) => { + builder.push_op(DslIr::AddEI(*self, src, C::EF::zero())); + } + } } fn assert_eq( @@ -1137,24 +436,24 @@ impl Variable for Ext { let rhs = rhs.into(); match (lhs, rhs) { - (SymbolicExt::Const(lhs, _), SymbolicExt::Const(rhs, _)) => { + (SymbolicExt::Const(lhs), SymbolicExt::Const(rhs)) => { assert_eq!(lhs, rhs, "Assertion failed at compile time"); } - (SymbolicExt::Const(lhs, _), SymbolicExt::Val(rhs, _)) => { + (SymbolicExt::Const(lhs), SymbolicExt::Val(rhs)) => { builder.trace_push(DslIr::AssertEqEI(rhs, lhs)); } - (SymbolicExt::Const(lhs, _), rhs) => { + (SymbolicExt::Const(lhs), rhs) => { let rhs_value = Self::uninit(builder); rhs_value.assign(rhs, builder); builder.trace_push(DslIr::AssertEqEI(rhs_value, lhs)); } - (SymbolicExt::Val(lhs, _), SymbolicExt::Const(rhs, _)) => { + (SymbolicExt::Val(lhs), SymbolicExt::Const(rhs)) => { builder.trace_push(DslIr::AssertEqEI(lhs, rhs)); } - (SymbolicExt::Val(lhs, _), SymbolicExt::Val(rhs, _)) => { + (SymbolicExt::Val(lhs), SymbolicExt::Val(rhs)) => { builder.trace_push(DslIr::AssertEqE(lhs, rhs)); } - (SymbolicExt::Val(lhs, _), rhs) => { + (SymbolicExt::Val(lhs), rhs) => { let rhs_value = Self::uninit(builder); rhs_value.assign(rhs, builder); builder.trace_push(DslIr::AssertEqE(lhs, rhs_value)); @@ -1178,24 +477,24 @@ impl Variable for Ext { let rhs = rhs.into(); match (lhs, rhs) { - (SymbolicExt::Const(lhs, _), SymbolicExt::Const(rhs, _)) => { + (SymbolicExt::Const(lhs), SymbolicExt::Const(rhs)) => { assert_ne!(lhs, rhs, "Assertion failed at compile time"); } - (SymbolicExt::Const(lhs, _), SymbolicExt::Val(rhs, _)) => { + (SymbolicExt::Const(lhs), SymbolicExt::Val(rhs)) => { builder.trace_push(DslIr::AssertNeEI(rhs, lhs)); } - (SymbolicExt::Const(lhs, _), rhs) => { + (SymbolicExt::Const(lhs), rhs) => { let rhs_value = Self::uninit(builder); rhs_value.assign(rhs, builder); builder.trace_push(DslIr::AssertNeEI(rhs_value, lhs)); } - (SymbolicExt::Val(lhs, _), SymbolicExt::Const(rhs, _)) => { + (SymbolicExt::Val(lhs), SymbolicExt::Const(rhs)) => { builder.trace_push(DslIr::AssertNeEI(lhs, rhs)); } - (SymbolicExt::Val(lhs, _), SymbolicExt::Val(rhs, _)) => { + (SymbolicExt::Val(lhs), SymbolicExt::Val(rhs)) => { builder.trace_push(DslIr::AssertNeE(lhs, rhs)); } - (SymbolicExt::Val(lhs, _), rhs) => { + (SymbolicExt::Val(lhs), rhs) => { let rhs_value = Self::uninit(builder); rhs_value.assign(rhs, builder); builder.trace_push(DslIr::AssertNeE(lhs, rhs_value)); @@ -1217,11 +516,11 @@ impl MemVariable for Ext { } fn load(&self, ptr: Ptr, index: MemIndex, builder: &mut Builder) { - builder.push(DslIr::LoadE(*self, ptr, index)); + builder.push_op(DslIr::LoadE(*self, ptr, index)); } fn store(&self, ptr: Ptr<::N>, index: MemIndex, builder: &mut Builder) { - builder.push(DslIr::StoreE(*self, ptr, index)); + builder.push_op(DslIr::StoreE(*self, ptr, index)); } } diff --git a/crates/recursion/compiler/src/ir/utils.rs b/crates/recursion/compiler/src/ir/utils.rs index 0276d6eac5..f327418be9 100644 --- a/crates/recursion/compiler/src/ir/utils.rs +++ b/crates/recursion/compiler/src/ir/utils.rs @@ -14,14 +14,14 @@ impl Builder { /// Select a variable based on a condition. pub fn select_v(&mut self, cond: Var, a: Var, b: Var) -> Var { let c = self.uninit(); - self.operations.push(DslIr::CircuitSelectV(cond, a, b, c)); + self.push_op(DslIr::CircuitSelectV(cond, a, b, c)); c } /// Select a felt based on a condition. pub fn select_f(&mut self, cond: Var, a: Felt, b: Felt) -> Felt { let c = self.uninit(); - self.operations.push(DslIr::CircuitSelectF(cond, a, b, c)); + self.push_op(DslIr::CircuitSelectF(cond, a, b, c)); c } @@ -33,7 +33,7 @@ impl Builder { b: Ext, ) -> Ext { let c = self.uninit(); - self.operations.push(DslIr::CircuitSelectE(cond, a, b, c)); + self.push_op(DslIr::CircuitSelectE(cond, a, b, c)); c } @@ -157,7 +157,7 @@ impl Builder { // Call the DslIR instruction ExpReverseBitsLen, which modifies the memory pointed to by // `x_copy_arr_ptr`. - self.push(DslIr::ExpReverseBitsLen(x_copy_arr_ptr, ptr.address, bit_len_var)); + self.push_op(DslIr::ExpReverseBitsLen(x_copy_arr_ptr, ptr.address, bit_len_var)); // Return the value stored at the address pointed to by `x_copy_arr_ptr`. self.get(&x_copy_arr, 0) @@ -227,14 +227,14 @@ impl Builder { pub fn felts2ext(&mut self, felts: &[Felt]) -> Ext { assert_eq!(felts.len(), 4); let out: Ext = self.uninit(); - self.push(DslIr::CircuitFelts2Ext(felts.try_into().unwrap(), out)); + self.push_op(DslIr::CircuitFelts2Ext(felts.try_into().unwrap(), out)); out } /// Converts an ext to a slice of felts. pub fn ext2felt(&mut self, value: Ext) -> Array> { let result = self.dyn_array(4); - self.operations.push(DslIr::HintExt2Felt(result.clone(), value)); + self.push_op(DslIr::HintExt2Felt(result.clone(), value)); // Verify that the decomposed extension element is correct. let mut reconstructed_ext: Ext = self.constant(C::EF::zero()); @@ -255,108 +255,7 @@ impl Builder { let b = self.uninit(); let c = self.uninit(); let d = self.uninit(); - self.operations.push(DslIr::CircuitExt2Felt([a, b, c, d], value)); + self.push_op(DslIr::CircuitExt2Felt([a, b, c, d], value)); [a, b, c, d] } } - -#[cfg(test)] -mod tests { - use p3_field::PrimeField32; - use p3_util::reverse_bits_len; - use rand::{thread_rng, Rng}; - use sp1_recursion_core::runtime::{Runtime, NUM_BITS}; - - use p3_field::AbstractField; - use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, StarkGenericConfig}; - - use crate::{ - asm::AsmBuilder, - ir::{Felt, Var}, - }; - - #[test] - fn test_num2bits() { - type SC = BabyBearPoseidon2; - type F = ::Val; - type EF = ::Challenge; - - let mut rng = thread_rng(); - let config = SC::default(); - - // Initialize a builder. - let mut builder = AsmBuilder::::default(); - - // Get a random var with `NUM_BITS` bits. - let num_val: F = rng.gen(); - - // Materialize the number as a var - let num: Var<_> = builder.eval(num_val); - // Materialize the number as a felt - let num_felt: Felt<_> = builder.eval(num_val); - - // Get the bits. - let bits = builder.num2bits_v(num); - let bits_felt = builder.num2bits_f(num_felt); - - // Compare the expected bits with the actual bits. - for i in 0..NUM_BITS { - // Get the i-th bit of the number. - let expected_bit = F::from_canonical_u32((num_val.as_canonical_u32() >> i) & 1); - // Compare the expected bit of the var with the actual bit. - let bit = builder.get(&bits, i); - builder.assert_var_eq(bit, expected_bit); - // Compare the expected bit of the felt with the actual bit. - let bit_felt = builder.get(&bits_felt, i); - builder.assert_var_eq(bit_felt, expected_bit); - } - - // Test the conversion back to a number. - let num_back = builder.bits2num_v(&bits); - builder.assert_var_eq(num_back, num); - let num_felt_back = builder.bits2num_f(&bits_felt); - builder.assert_felt_eq(num_felt_back, num_felt); - - let program = builder.compile_program(); - - let mut runtime = Runtime::::new(&program, config.perm.clone()); - runtime.run().unwrap(); - } - - #[test] - fn test_reverse_bits_len() { - type SC = BabyBearPoseidon2; - type F = ::Val; - type EF = ::Challenge; - - let mut rng = thread_rng(); - let config = SC::default(); - - // Initialize a builder. - let mut builder = AsmBuilder::::default(); - - // Get a random var with `NUM_BITS` bits. - let x_val: F = rng.gen(); - - // Materialize the number as a var - let x: Var<_> = builder.eval(x_val); - let x_bits = builder.num2bits_v(x); - - for i in 1..NUM_BITS { - // Get the reference value. - let expected_value = reverse_bits_len(x_val.as_canonical_u32() as usize, i); - let value_bits = builder.reverse_bits_len(&x_bits, i); - let value = builder.bits2num_v(&value_bits); - builder.assert_usize_eq(value, expected_value); - let var_i: Var<_> = builder.eval(F::from_canonical_usize(i)); - let value_var_bits = builder.reverse_bits_len(&x_bits, var_i); - let value_var = builder.bits2num_v(&value_var_bits); - builder.assert_usize_eq(value_var, expected_value); - } - - let program = builder.compile_program(); - - let mut runtime = Runtime::::new(&program, config.perm.clone()); - runtime.run().unwrap(); - } -} diff --git a/crates/recursion/compiler/src/lib.rs b/crates/recursion/compiler/src/lib.rs index c4ebc5bb10..9426adf9bf 100644 --- a/crates/recursion/compiler/src/lib.rs +++ b/crates/recursion/compiler/src/lib.rs @@ -3,13 +3,12 @@ extern crate alloc; -pub mod asm; pub mod circuit; pub mod config; pub mod constraints; pub mod ir; pub mod prelude { - pub use crate::{asm::AsmCompiler, ir::*}; + pub use crate::ir::*; pub use sp1_recursion_derive::DslVariable; } diff --git a/crates/recursion/compiler/tests/arithmetic.rs b/crates/recursion/compiler/tests/arithmetic.rs deleted file mode 100644 index d218854019..0000000000 --- a/crates/recursion/compiler/tests/arithmetic.rs +++ /dev/null @@ -1,84 +0,0 @@ -use rand::{thread_rng, Rng}; - -use p3_field::AbstractField; -use sp1_recursion_compiler::{ - asm::AsmBuilder, - ir::{Ext, ExtConst, Felt, SymbolicExt, Var}, -}; -use sp1_recursion_core::runtime::Runtime; -use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, StarkGenericConfig}; - -#[test] -fn test_compiler_arithmetic() { - let num_tests = 3; - let mut rng = thread_rng(); - type SC = BabyBearPoseidon2; - type F = ::Val; - type EF = ::Challenge; - let mut builder = AsmBuilder::::default(); - - let zero: Felt<_> = builder.eval(F::zero()); - let one: Felt<_> = builder.eval(F::one()); - - builder.assert_felt_eq(zero * one, F::zero()); - builder.assert_felt_eq(one * one, F::one()); - builder.assert_felt_eq(one + one, F::two()); - - let zero_ext: Ext<_, _> = builder.eval(EF::zero().cons()); - let one_ext: Ext<_, _> = builder.eval(EF::one().cons()); - - builder.assert_ext_eq(zero_ext * one_ext, EF::zero().cons()); - builder.assert_ext_eq(one_ext * one_ext, EF::one().cons()); - builder.assert_ext_eq(one_ext + one_ext, EF::two().cons()); - builder.assert_ext_eq(one_ext - one_ext, EF::zero().cons()); - - for _ in 0..num_tests { - let a_var_val = rng.gen::(); - let b_var_val = rng.gen::(); - let a_var: Var<_> = builder.eval(a_var_val); - let b_var: Var<_> = builder.eval(b_var_val); - builder.assert_var_eq(a_var + b_var, a_var_val + b_var_val); - builder.assert_var_eq(a_var * b_var, a_var_val * b_var_val); - builder.assert_var_eq(a_var - b_var, a_var_val - b_var_val); - builder.assert_var_eq(-a_var, -a_var_val); - - let a_felt_val = rng.gen::(); - let b_felt_val = rng.gen::(); - let a: Felt<_> = builder.eval(a_felt_val); - let b: Felt<_> = builder.eval(b_felt_val); - builder.assert_felt_eq(a + b, a_felt_val + b_felt_val); - builder.assert_felt_eq(a + b, a + b_felt_val); - builder.assert_felt_eq(a * b, a_felt_val * b_felt_val); - builder.assert_felt_eq(a - b, a_felt_val - b_felt_val); - builder.assert_felt_eq(a / b, a_felt_val / b_felt_val); - builder.assert_felt_eq(-a, -a_felt_val); - - let a_ext_val = rng.gen::(); - let b_ext_val = rng.gen::(); - let a_ext: Ext<_, _> = builder.eval(a_ext_val.cons()); - let b_ext: Ext<_, _> = builder.eval(b_ext_val.cons()); - builder.assert_ext_eq(a_ext + b_ext, (a_ext_val + b_ext_val).cons()); - builder.assert_ext_eq( - -a_ext / b_ext + (a_ext * b_ext) * (a_ext * b_ext), - (-a_ext_val / b_ext_val + (a_ext_val * b_ext_val) * (a_ext_val * b_ext_val)).cons(), - ); - let mut a_expr = SymbolicExt::from(a_ext); - let mut a_val = a_ext_val; - for _ in 0..10 { - a_expr += b_ext * a_val + EF::one(); - a_val += b_ext_val * a_val + EF::one(); - builder.assert_ext_eq(a_expr.clone(), a_val.cons()) - } - builder.assert_ext_eq(a_ext * b_ext, (a_ext_val * b_ext_val).cons()); - builder.assert_ext_eq(a_ext - b_ext, (a_ext_val - b_ext_val).cons()); - builder.assert_ext_eq(a_ext / b_ext, (a_ext_val / b_ext_val).cons()); - builder.assert_ext_eq(-a_ext, (-a_ext_val).cons()); - } - - let program = builder.compile_program(); - - let config = SC::default(); - let mut runtime = Runtime::::new(&program, config.perm.clone()); - runtime.run().unwrap(); - runtime.print_stats(); -} diff --git a/crates/recursion/compiler/tests/array.rs b/crates/recursion/compiler/tests/array.rs deleted file mode 100644 index 71dedb5485..0000000000 --- a/crates/recursion/compiler/tests/array.rs +++ /dev/null @@ -1,119 +0,0 @@ -use rand::{thread_rng, Rng}; - -use p3_field::AbstractField; -use sp1_recursion_compiler::{ - asm::AsmBuilder, - ir::{Array, Builder, Config, Ext, ExtConst, Felt, MemIndex, MemVariable, Ptr, Var, Variable}, -}; -use sp1_recursion_core::runtime::Runtime; -use sp1_recursion_derive::DslVariable; -use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, StarkGenericConfig}; - -#[derive(DslVariable, Clone, Debug)] -pub struct Point { - x: Var, - y: Felt, - z: Ext, -} - -#[test] -fn test_compiler_array() { - type SC = BabyBearPoseidon2; - type F = ::Val; - type EF = ::Challenge; - let mut builder = AsmBuilder::::default(); - - // Sum all the values of an array. - let len: usize = 1000; - let mut rng = thread_rng(); - - let mut static_array = builder.array::>(len); - - // Put values statically - for i in 0..len { - builder.set(&mut static_array, i, F::one()); - } - // Assert values set. - for i in 0..len { - let value = builder.get(&static_array, i); - builder.assert_var_eq(value, F::one()); - } - - let dyn_len: Var<_> = builder.eval(F::from_canonical_usize(len)); - let mut var_array = builder.array::>(dyn_len); - let mut felt_array = builder.array::>(dyn_len); - let mut ext_array = builder.array::>(dyn_len); - // Put values statically - let var_vals = (0..len).map(|_| rng.gen::()).collect::>(); - let felt_vals = (0..len).map(|_| rng.gen::()).collect::>(); - let ext_vals = (0..len).map(|_| rng.gen::()).collect::>(); - for i in 0..len { - builder.set(&mut var_array, i, var_vals[i]); - builder.set(&mut felt_array, i, felt_vals[i]); - builder.set(&mut ext_array, i, ext_vals[i].cons()); - } - // Assert values set. - for i in 0..len { - let var_value = builder.get(&var_array, i); - builder.assert_var_eq(var_value, var_vals[i]); - let felt_value = builder.get(&felt_array, i); - builder.assert_felt_eq(felt_value, felt_vals[i]); - let ext_value = builder.get(&ext_array, i); - builder.assert_ext_eq(ext_value, ext_vals[i].cons()); - } - - // Put values dynamically - builder.range(0, dyn_len).for_each(|i, builder| { - builder.set(&mut var_array, i, i * F::two()); - builder.set(&mut felt_array, i, F::from_canonical_u32(3)); - builder.set(&mut ext_array, i, (EF::from_canonical_u32(4)).cons()); - }); - - // Assert values set. - builder.range(0, dyn_len).for_each(|i, builder| { - let var_value = builder.get(&var_array, i); - builder.assert_var_eq(var_value, i * F::two()); - let felt_value = builder.get(&felt_array, i); - builder.assert_felt_eq(felt_value, F::from_canonical_u32(3)); - let ext_value = builder.get(&ext_array, i); - builder.assert_ext_eq(ext_value, EF::from_canonical_u32(4).cons()); - }); - - // Test the derived macro and mixed size allocations. - let mut point_array = builder.dyn_array::>(len); - - builder.range(0, dyn_len).for_each(|i, builder| { - let x: Var<_> = builder.eval(F::two()); - let y: Felt<_> = builder.eval(F::one()); - let z: Ext<_, _> = builder.eval(EF::one().cons()); - let point = Point { x, y, z }; - builder.set(&mut point_array, i, point); - }); - - builder.range(0, dyn_len).for_each(|i, builder| { - let point = builder.get(&point_array, i); - builder.assert_var_eq(point.x, F::two()); - builder.assert_felt_eq(point.y, F::one()); - builder.assert_ext_eq(point.z, EF::one().cons()); - }); - - let mut array = builder.dyn_array::>>(len); - - builder.range(0, array.len()).for_each(|i, builder| { - builder.set(&mut array, i, var_array.clone()); - }); - - builder.range(0, array.len()).for_each(|i, builder| { - let point_array_back = builder.get(&array, i); - builder.assert_eq::>(point_array_back, var_array.clone()); - }); - - let code = builder.compile_asm(); - println!("{code}"); - - let program = code.machine_code(); - - let config = SC::default(); - let mut runtime = Runtime::::new(&program, config.perm.clone()); - runtime.run().unwrap(); -} diff --git a/crates/recursion/compiler/tests/conditionals.rs b/crates/recursion/compiler/tests/conditionals.rs deleted file mode 100644 index 41827a42ed..0000000000 --- a/crates/recursion/compiler/tests/conditionals.rs +++ /dev/null @@ -1,87 +0,0 @@ -use p3_baby_bear::BabyBear; -use p3_field::{extension::BinomialExtensionField, AbstractField}; -use sp1_recursion_compiler::{asm::AsmBuilder, ir::Var}; -use sp1_recursion_core::runtime::Runtime; -use sp1_stark::baby_bear_poseidon2::BabyBearPoseidon2; - -#[test] -fn test_compiler_conditionals() { - type SC = BabyBearPoseidon2; - type F = BabyBear; - type EF = BinomialExtensionField; - let mut builder = AsmBuilder::::default(); - - let zero: Var<_> = builder.eval(F::zero()); - let one: Var<_> = builder.eval(F::one()); - let two: Var<_> = builder.eval(F::two()); - let three: Var<_> = builder.eval(F::from_canonical_u32(3)); - let four: Var<_> = builder.eval(F::from_canonical_u32(4)); - - let c: Var<_> = builder.eval(F::zero()); - builder.if_eq(zero, zero).then(|builder| { - builder.if_eq(one, one).then(|builder| { - builder.if_eq(two, two).then(|builder| { - builder.if_eq(three, three).then(|builder| { - builder.if_eq(four, four).then(|builder| builder.assign(c, F::one())) - }) - }) - }) - }); - builder.assert_var_eq(c, F::one()); - - let c: Var<_> = builder.eval(F::zero()); - builder.if_eq(zero, one).then_or_else( - |builder| { - builder - .if_eq(one, one) - .then(|builder| builder.if_eq(two, two).then(|builder| builder.assign(c, F::one()))) - }, - |builder| { - builder.if_ne(three, four).then_or_else(|_| {}, |builder| builder.assign(c, F::zero())) - }, - ); - builder.assert_var_eq(c, F::zero()); - - let code = builder.compile_asm(); - println!("{}", code); - // let program = builder.compile(); - let program = code.machine_code(); - - let config = SC::default(); - let mut runtime = Runtime::::new(&program, config.perm.clone()); - runtime.run().unwrap(); -} - -#[test] -fn test_compiler_conditionals_v2() { - type SC = BabyBearPoseidon2; - type F = BabyBear; - type EF = BinomialExtensionField; - let mut builder = AsmBuilder::::default(); - - let zero: Var<_> = builder.eval(F::zero()); - let one: Var<_> = builder.eval(F::one()); - let two: Var<_> = builder.eval(F::two()); - let three: Var<_> = builder.eval(F::from_canonical_u32(3)); - let four: Var<_> = builder.eval(F::from_canonical_u32(4)); - - let c: Var<_> = builder.eval(F::zero()); - builder.if_eq(zero, zero).then(|builder| { - builder.if_eq(one, one).then(|builder| { - builder.if_eq(two, two).then(|builder| { - builder.if_eq(three, three).then(|builder| { - builder.if_eq(four, four).then(|builder| builder.assign(c, F::one())) - }) - }) - }) - }); - - let code = builder.compile_asm(); - println!("{}", code); - // let program = builder.compile(); - let program = code.machine_code(); - - let config = SC::default(); - let mut runtime = Runtime::::new(&program, config.perm.clone()); - runtime.run().unwrap(); -} diff --git a/crates/recursion/compiler/tests/for_loops.rs b/crates/recursion/compiler/tests/for_loops.rs deleted file mode 100644 index b57fe3e43c..0000000000 --- a/crates/recursion/compiler/tests/for_loops.rs +++ /dev/null @@ -1,219 +0,0 @@ -use p3_baby_bear::BabyBear; -use p3_field::AbstractField; -use sp1_recursion_compiler::{ - asm::{AsmBuilder, AsmConfig}, - ir::{Array, SymbolicVar, Var}, -}; -use sp1_recursion_core::runtime::Runtime; -use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, StarkGenericConfig}; - -#[test] -fn test_compiler_for_loops() { - type SC = BabyBearPoseidon2; - type F = ::Val; - type EF = ::Challenge; - let mut builder = AsmBuilder::::default(); - - let n_val = BabyBear::from_canonical_u32(10); - let m_val = BabyBear::from_canonical_u32(5); - - let zero: Var<_> = builder.eval(F::zero()); - let n: Var<_> = builder.eval(n_val); - let m: Var<_> = builder.eval(m_val); - - let i_counter: Var<_> = builder.eval(F::zero()); - let total_counter: Var<_> = builder.eval(F::zero()); - builder.range(zero, n).for_each(|_, builder| { - builder.assign(i_counter, i_counter + F::one()); - - let j_counter: Var<_> = builder.eval(F::zero()); - builder.range(zero, m).for_each(|_, builder| { - builder.assign(total_counter, total_counter + F::one()); - builder.assign(j_counter, j_counter + F::one()); - }); - // Assert that the inner loop ran m times, in two different ways. - builder.assert_var_eq(j_counter, m_val); - builder.assert_var_eq(j_counter, m); - }); - // Assert that the outer loop ran n times, in two different ways. - builder.assert_var_eq(i_counter, n_val); - builder.assert_var_eq(i_counter, n); - // Assert that the total counter is equal to n * m, in two ways. - builder.assert_var_eq(total_counter, n_val * m_val); - builder.assert_var_eq(total_counter, n * m); - - let program = builder.compile_program(); - - let config = SC::default(); - let mut runtime = Runtime::::new(&program, config.perm.clone()); - runtime.run().unwrap(); -} - -#[test] -fn test_compiler_nested_array_loop() { - type SC = BabyBearPoseidon2; - type F = ::Val; - type EF = ::Challenge; - let mut builder = AsmBuilder::::default(); - type C = AsmConfig; - - let mut array: Array>> = builder.array(100); - - builder.range(0, array.len()).for_each(|i, builder| { - let mut inner_array = builder.array::>(10); - builder.range(0, inner_array.len()).for_each(|j, builder| { - builder.set(&mut inner_array, j, i + j); - }); - builder.set(&mut array, i, inner_array); - }); - - // Test that the array is correctly initialized. - builder.range(0, array.len()).for_each(|i, builder| { - let inner_array = builder.get(&array, i); - builder.range(0, inner_array.len()).for_each(|j, builder| { - let val = builder.get(&inner_array, j); - builder.assert_var_eq(val, i + j); - }); - }); - - let code = builder.compile_asm(); - - println!("{}", code); - - let program = code.machine_code(); - - let config = SC::default(); - let mut runtime = Runtime::::new(&program, config.perm.clone()); - runtime.run().unwrap(); -} - -#[test] -fn test_compiler_break() { - type SC = BabyBearPoseidon2; - type F = ::Val; - type EF = ::Challenge; - let mut builder = AsmBuilder::::default(); - type C = AsmConfig; - - let len = 100; - let break_len = F::from_canonical_usize(10); - - let mut array: Array> = builder.array(len); - - builder.range(0, array.len()).for_each(|i, builder| { - builder.set(&mut array, i, i); - - builder.if_eq(i, break_len).then(|builder| builder.break_loop()); - }); - - // Test that the array is correctly initialized. - - builder.range(0, array.len()).for_each(|i, builder| { - let value = builder.get(&array, i); - builder.if_eq(i, break_len + F::one()).then_or_else( - |builder| builder.assert_var_eq(value, i), - |builder| { - builder.assert_var_eq(value, F::zero()); - builder.break_loop(); - }, - ); - }); - - let is_break: Var<_> = builder.eval(F::one()); - builder.range(0, array.len()).for_each(|i, builder| { - let exp_value: Var<_> = builder.eval(i * is_break); - let value = builder.get(&array, i); - builder.assert_var_eq(value, exp_value); - builder.if_eq(i, break_len).then(|builder| builder.assign(is_break, F::zero())); - }); - - // Test the break instructions in a nested loop. - - let mut array: Array> = builder.array(len); - builder.range(0, array.len()).for_each(|i, builder| { - let counter: Var<_> = builder.eval(F::zero()); - - builder.range(0, i).for_each(|_, builder| { - builder.assign(counter, counter + F::one()); - builder.if_eq(counter, break_len).then(|builder| builder.break_loop()); - }); - - builder.set(&mut array, i, counter); - }); - - // Test that the array is correctly initialized. - - let is_break: Var<_> = builder.eval(F::one()); - builder.range(0, array.len()).for_each(|i, builder| { - let exp_value: Var<_> = - builder.eval(i * is_break + (SymbolicVar::::one() - is_break) * break_len); - let value = builder.get(&array, i); - builder.assert_var_eq(value, exp_value); - builder.if_eq(i, break_len).then(|builder| builder.assign(is_break, F::zero())); - }); - - let code = builder.compile_asm(); - - println!("{}", code); - - let program = code.machine_code(); - - let config = SC::default(); - let mut runtime = Runtime::::new(&program, config.perm.clone()); - runtime.run().unwrap(); -} - -#[test] -fn test_compiler_step_by() { - type SC = BabyBearPoseidon2; - type F = ::Val; - type EF = ::Challenge; - let mut builder = AsmBuilder::::default(); - - let n_val = BabyBear::from_canonical_u32(20); - - let zero: Var<_> = builder.eval(F::zero()); - let n: Var<_> = builder.eval(n_val); - - let i_counter: Var<_> = builder.eval(F::zero()); - builder.range(zero, n).step_by(2).for_each(|_, builder| { - builder.assign(i_counter, i_counter + F::one()); - }); - // Assert that the outer loop ran n times, in two different ways. - let n_exp = n_val / F::two(); - builder.assert_var_eq(i_counter, n_exp); - - let program = builder.compile_program(); - - let config = SC::default(); - let mut runtime = Runtime::::new(&program, config.perm.clone()); - runtime.run().unwrap(); -} - -#[test] -fn test_compiler_bneinc() { - type SC = BabyBearPoseidon2; - type F = ::Val; - type EF = ::Challenge; - let mut builder = AsmBuilder::::default(); - - let n_val = BabyBear::from_canonical_u32(20); - - let zero: Var<_> = builder.eval(F::zero()); - let n: Var<_> = builder.eval(n_val); - - let i_counter: Var<_> = builder.eval(F::zero()); - builder.range(zero, n).step_by(1).for_each(|_, builder| { - builder.assign(i_counter, i_counter + F::one()); - }); - - let code = builder.clone().compile_asm(); - - println!("{}", code); - - let program = builder.compile_program(); - - let config = SC::default(); - let mut runtime = Runtime::::new(&program, config.perm.clone()); - runtime.run().unwrap(); -} diff --git a/crates/recursion/compiler/tests/io.rs b/crates/recursion/compiler/tests/io.rs deleted file mode 100644 index 4ba5a86545..0000000000 --- a/crates/recursion/compiler/tests/io.rs +++ /dev/null @@ -1,42 +0,0 @@ -use p3_field::AbstractField; -use sp1_recursion_compiler::asm::AsmBuilder; -use sp1_recursion_core::runtime::Runtime; -use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, StarkGenericConfig}; - -#[test] -fn test_io() { - type SC = BabyBearPoseidon2; - type F = ::Val; - type EF = ::Challenge; - let mut builder = AsmBuilder::::default(); - - let arr = builder.hint_vars(); - builder.range(0, arr.len()).for_each(|i, builder| { - let el = builder.get(&arr, i); - builder.print_v(el); - }); - - let arr = builder.hint_felts(); - builder.range(0, arr.len()).for_each(|i, builder| { - let el = builder.get(&arr, i); - builder.print_f(el); - }); - - let arr = builder.hint_exts(); - builder.range(0, arr.len()).for_each(|i, builder| { - let el = builder.get(&arr, i); - builder.print_e(el); - }); - - let program = builder.compile_program(); - - let config = SC::default(); - let mut runtime = Runtime::::new(&program, config.perm.clone()); - runtime.witness_stream = vec![ - vec![F::zero().into(), F::zero().into(), F::one().into()], - vec![F::zero().into(), F::zero().into(), F::two().into()], - vec![F::one().into(), F::one().into(), F::two().into()], - ] - .into(); - runtime.run().unwrap(); -} diff --git a/crates/recursion/compiler/tests/lt.rs b/crates/recursion/compiler/tests/lt.rs deleted file mode 100644 index 698db8fcb4..0000000000 --- a/crates/recursion/compiler/tests/lt.rs +++ /dev/null @@ -1,28 +0,0 @@ -use p3_field::AbstractField; -use sp1_recursion_compiler::{asm::AsmBuilder, prelude::*}; -use sp1_recursion_core::runtime::Runtime; -use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, StarkGenericConfig}; - -#[test] -fn test_compiler_less_than() { - type SC = BabyBearPoseidon2; - type F = ::Val; - type EF = ::Challenge; - let mut builder = AsmBuilder::::default(); - - let a: Var<_> = builder.constant(F::from_canonical_u32(10)); - let b: Var<_> = builder.constant(F::from_canonical_u32(20)); - let c = builder.lt(a, b); - builder.assert_var_eq(c, F::one()); - - let a: Var<_> = builder.constant(F::from_canonical_u32(20)); - let b: Var<_> = builder.constant(F::from_canonical_u32(10)); - let c = builder.lt(a, b); - builder.assert_var_eq(c, F::zero()); - - let program = builder.compile_program(); - - let config = SC::default(); - let mut runtime = Runtime::::new(&program, config.perm.clone()); - runtime.run().unwrap(); -} diff --git a/crates/recursion/compiler/tests/poseidon2.rs b/crates/recursion/compiler/tests/poseidon2.rs deleted file mode 100644 index 6cad8032f2..0000000000 --- a/crates/recursion/compiler/tests/poseidon2.rs +++ /dev/null @@ -1,156 +0,0 @@ -use p3_field::{AbstractField, PrimeField32}; -use p3_symmetric::Permutation; -use rand::{thread_rng, Rng}; -use sp1_core_machine::utils::setup_logger; -use sp1_recursion_compiler::{ - asm::AsmBuilder, - ir::{Array, Var}, -}; -use sp1_recursion_core::runtime::{Runtime, PERMUTATION_WIDTH}; -use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, StarkGenericConfig}; - -#[test] -fn test_compiler_poseidon2_permute() { - type SC = BabyBearPoseidon2; - type F = ::Val; - type EF = ::Challenge; - - let mut rng = thread_rng(); - - let config = SC::default(); - let perm = &config.perm; - - let mut builder = AsmBuilder::::default(); - - let random_state_vals: [F; PERMUTATION_WIDTH] = rng.gen(); - // Execute the reference permutation - let expected_result = perm.permute(random_state_vals); - - // Execture the permutation in the VM - // Initialize an array and populate it with the entries. - let var_width: Var = builder.eval(F::from_canonical_usize(PERMUTATION_WIDTH)); - let mut random_state = builder.array(var_width); - for (i, val) in random_state_vals.iter().enumerate() { - builder.set(&mut random_state, i, *val); - } - - // Assert that the values are set correctly. - for (i, val) in random_state_vals.iter().enumerate() { - let res = builder.get(&random_state, i); - builder.assert_felt_eq(res, *val); - } - - let result = builder.poseidon2_permute(&random_state); - - assert!(matches!(result, Array::Dyn(_, _))); - - // Assert that the result is equal to the expected result. - for (i, val) in expected_result.iter().enumerate() { - let res = builder.get(&result, i); - builder.assert_felt_eq(res, *val); - } - - let program = builder.compile_program(); - - let mut runtime = Runtime::::new(&program, config.perm.clone()); - runtime.run().unwrap(); - println!( - "The program executed successfully, number of cycles: {}", - runtime.clk.as_canonical_u32() / 4 - ); -} - -#[test] -fn test_compiler_poseidon2_hash() { - setup_logger(); - type SC = BabyBearPoseidon2; - type F = ::Val; - type EF = ::Challenge; - - let mut rng = thread_rng(); - - let config = SC::default(); - - let mut builder = AsmBuilder::::default(); - - let random_state_vals_1: [F; 42] = rng.gen(); - println!("{:?}", random_state_vals_1); - let random_state_vals_2: [F; 42] = rng.gen(); - println!("{:?}", random_state_vals_2); - - let mut random_state_v1 = - builder.dyn_array(random_state_vals_1.len() + random_state_vals_2.len()); - for (i, val) in random_state_vals_1.iter().enumerate() { - builder.set(&mut random_state_v1, i, *val); - } - for (i, val) in random_state_vals_2.iter().enumerate() { - builder.set(&mut random_state_v1, i + random_state_vals_1.len(), *val); - } - - let mut random_state_v2_1 = builder.dyn_array(random_state_vals_1.len()); - for (i, val) in random_state_vals_1.iter().enumerate() { - builder.set(&mut random_state_v2_1, i, *val); - } - let mut random_state_v2_2 = builder.dyn_array(random_state_vals_2.len()); - for (i, val) in random_state_vals_2.iter().enumerate() { - builder.set(&mut random_state_v2_2, i, *val); - } - - let mut nested_random_state = builder.dyn_array(2); - builder.set(&mut nested_random_state, 0, random_state_v2_1.clone()); - builder.set(&mut nested_random_state, 1, random_state_v2_2.clone()); - - let result = builder.poseidon2_hash(&random_state_v1); - let result_x = builder.poseidon2_hash_x(&nested_random_state); - - builder.range(0, result.len()).for_each(|i, builder| { - let el = builder.get(&result, i); - let el_x = builder.get(&result_x, i); - builder.assert_felt_eq(el, el_x); - }); - - let program = builder.compile_program(); - - let mut runtime = Runtime::::new(&program, config.perm.clone()); - runtime.run().unwrap(); - println!( - "The program executed successfully, number of cycles: {}", - runtime.clk.as_canonical_u32() / 4 - ); - runtime.print_stats(); -} - -#[test] -fn test_compiler_poseidon2_hash_v2() { - type SC = BabyBearPoseidon2; - type F = ::Val; - type EF = ::Challenge; - - let mut rng = thread_rng(); - - let config = SC::default(); - - let mut builder = AsmBuilder::::default(); - - let random_state_vals: [F; 2] = rng.gen(); - - let mut random_state = builder.dyn_array(PERMUTATION_WIDTH); - for (i, val) in random_state_vals.iter().enumerate() { - builder.set(&mut random_state, i, *val); - } - - let idx: Var<_> = builder.eval(F::zero()); - builder.if_eq(idx, F::zero()).then(|builder| { - let element = builder.get(&random_state, idx); - builder.print_f(element); - }); - - let program = builder.compile_program(); - - let mut runtime = Runtime::::new(&program, config.perm.clone()); - runtime.run().unwrap(); - println!( - "The program executed successfully, number of cycles: {}", - runtime.clk.as_canonical_u32() / 4 - ); -} diff --git a/crates/recursion/compiler/tests/public_values.rs b/crates/recursion/compiler/tests/public_values.rs deleted file mode 100644 index 02115ab621..0000000000 --- a/crates/recursion/compiler/tests/public_values.rs +++ /dev/null @@ -1,29 +0,0 @@ -use p3_field::AbstractField; -use sp1_recursion_compiler::{asm::AsmBuilder, prelude::*}; -use sp1_recursion_core::runtime::Runtime; -use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, StarkGenericConfig}; - -#[test] -fn test_compiler_public_values() { - type SC = BabyBearPoseidon2; - type F = ::Val; - type EF = ::Challenge; - let mut builder = AsmBuilder::::default(); - - let a: Felt<_> = builder.constant(F::from_canonical_u32(10)); - let b: Felt<_> = builder.constant(F::from_canonical_u32(20)); - - let dyn_len: Var<_> = builder.eval(F::from_canonical_usize(2)); - let mut var_array = builder.dyn_array::>(dyn_len); - builder.set(&mut var_array, 0, a); - builder.set(&mut var_array, 1, b); - // builder.write_public_values(&var_array); - // builder.write_public_values(&var_array); - // builder.commit_public_values(); - - let program = builder.compile_program(); - - let config = SC::default(); - let mut runtime = Runtime::::new(&program, config.perm.clone()); - runtime.run().unwrap(); -} diff --git a/crates/recursion/core-v2/CHANGELOG.md b/crates/recursion/core-v2/CHANGELOG.md deleted file mode 100644 index efcaa44f4e..0000000000 --- a/crates/recursion/core-v2/CHANGELOG.md +++ /dev/null @@ -1,22 +0,0 @@ -# Changelog - -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [Unreleased] - -## [1.2.0-rc1](https://github.com/succinctlabs/sp1/releases/tag/sp1-recursion-core-v2-v1.2.0-rc1) - 2024-08-23 - -### Fixed - -- bug in exp_reverse_bits memory multiplicity ([#1378](https://github.com/succinctlabs/sp1/pull/1378)) -- fix imports -- cargo check on tests - -### Other - -- use crate `vec_map`, box large `Instruction` variants ([#1360](https://github.com/succinctlabs/sp1/pull/1360)) -- merge dev into experimental pt 2 ([#1341](https://github.com/succinctlabs/sp1/pull/1341)) -- resolve merge conflicts between dev and experimental diff --git a/crates/recursion/core-v2/Cargo.toml b/crates/recursion/core-v2/Cargo.toml deleted file mode 100644 index e2fb7cf83f..0000000000 --- a/crates/recursion/core-v2/Cargo.toml +++ /dev/null @@ -1,48 +0,0 @@ -[package] -name = "sp1-recursion-core-v2" -description = "SP1 is a performant, 100% open-source, contributor-friendly zkVM." -readme = "../../../README.md" -version = { workspace = true } -edition = { workspace = true } -license = { workspace = true } -repository = { workspace = true } -keywords = { workspace = true } -categories = { workspace = true } - -[dependencies] -p3-field = { workspace = true } -p3-util = { workspace = true } -p3-baby-bear = { workspace = true } -p3-air = { workspace = true } -p3-matrix = { workspace = true } -p3-maybe-rayon = { workspace = true } -p3-poseidon2 = { workspace = true } -p3-symmetric = { workspace = true } -sp1-derive = { workspace = true } -sp1-primitives = { workspace = true } -tracing = "0.1.40" -sp1-core-machine = { workspace = true } -sp1-core-executor = { workspace = true } -sp1-stark = { workspace = true } -sp1-recursion-core = { workspace = true } -hashbrown = { version = "0.14.5", features = ["serde"] } -itertools = "0.13.0" -p3-bn254-fr = { workspace = true } -p3-merkle-tree = { workspace = true } -p3-commit = { workspace = true } -p3-dft = { workspace = true } -p3-challenger = { workspace = true } -p3-fri = { workspace = true } -zkhash = "0.2.0" -ff = { version = "0.13", features = ["derive", "derive_bits"] } -serde = { version = "1.0", features = ["derive", "rc"] } -serde_with = "3.9.0" -backtrace = { version = "0.3.71", features = ["serde"] } -arrayref = "0.3.7" -static_assertions = "1.1.0" -num_cpus = "1.16.0" -thiserror = "1.0.60" -vec_map = "0.8.2" - -[dev-dependencies] -rand = "0.8.5" diff --git a/crates/recursion/core-v2/src/air.rs b/crates/recursion/core-v2/src/air.rs deleted file mode 100644 index 8fffef0115..0000000000 --- a/crates/recursion/core-v2/src/air.rs +++ /dev/null @@ -1,2 +0,0 @@ -// Export all the air stuff from `sp1_recursion_core` for now, until we will migrate it here. -pub use sp1_recursion_core::air::*; diff --git a/crates/recursion/core-v2/src/chips/dummy.rs b/crates/recursion/core-v2/src/chips/dummy.rs deleted file mode 100644 index f903cf5759..0000000000 --- a/crates/recursion/core-v2/src/chips/dummy.rs +++ /dev/null @@ -1,78 +0,0 @@ -use p3_air::{Air, BaseAir, PairBuilder}; -use p3_field::{Field, PrimeField32}; -use p3_matrix::{dense::RowMajorMatrix, Matrix}; -use sp1_derive::AlignedBorrow; -use sp1_stark::air::MachineAir; - -use crate::{builder::SP1RecursionAirBuilder, *}; - -/// A dummy chip with 1<< `log_height` rows, `COL_PADDING` main columns, `COL_PADDING` preprocessed -/// columns, and no constraints. -pub struct DummyChip { - log_height: usize, -} - -impl Default for DummyChip { - fn default() -> Self { - Self { log_height: 1 } - } -} - -impl DummyChip { - pub fn new(log_height: usize) -> Self { - Self { log_height } - } -} - -#[derive(AlignedBorrow, Debug, Clone, Copy)] -#[repr(C)] -pub struct DummyCols { - pub vals: [F; COL_PADDING], -} - -impl BaseAir for DummyChip { - fn width(&self) -> usize { - COL_PADDING - } -} - -impl MachineAir for DummyChip { - type Record = ExecutionRecord; - - type Program = crate::RecursionProgram; - - fn name(&self) -> String { - "DummyWide".to_string() - } - - fn generate_dependencies(&self, _: &Self::Record, _: &mut Self::Record) { - // This is a no-op. - } - - fn generate_trace(&self, _: &Self::Record, _: &mut Self::Record) -> RowMajorMatrix { - RowMajorMatrix::new(vec![F::zero(); COL_PADDING * (1 << self.log_height)], COL_PADDING) - } - - fn generate_preprocessed_trace(&self, _program: &Self::Program) -> Option> { - Some(RowMajorMatrix::new(vec![F::zero(); 1 << self.log_height], 1)) - } - - fn preprocessed_width(&self) -> usize { - 1 - } - - fn included(&self, _record: &Self::Record) -> bool { - COL_PADDING != 0 - } -} - -impl Air for DummyChip -where - AB: SP1RecursionAirBuilder + PairBuilder, -{ - fn eval(&self, builder: &mut AB) { - let main = builder.main(); - let local = main.row_slice(0); - builder.assert_zero(local[0]); - } -} diff --git a/crates/recursion/core-v2/src/lib.rs b/crates/recursion/core-v2/src/lib.rs deleted file mode 100644 index 61dd5cdc81..0000000000 --- a/crates/recursion/core-v2/src/lib.rs +++ /dev/null @@ -1,209 +0,0 @@ -use std::iter::once; - -use p3_field::PrimeField64; -use serde::{Deserialize, Serialize}; -use sp1_derive::AlignedBorrow; -use sp1_recursion_core::air::{Block, RecursionPublicValues}; - -pub mod air; -pub mod builder; -pub mod chips; -pub mod machine; -pub mod runtime; - -pub use runtime::*; - -// Re-export the stark stuff from `sp1_recursion_core` for now, until we will migrate it here. -pub use sp1_recursion_core::stark; - -use crate::chips::poseidon2_skinny::WIDTH; - -#[derive( - AlignedBorrow, Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize, Default, -)] -#[repr(transparent)] -pub struct Address(pub F); - -impl Address { - #[inline] - pub fn as_usize(&self) -> usize { - self.0.as_canonical_u64() as usize - } -} - -// ------------------------------------------------------------------------------------------------- - -/// The inputs and outputs to an operation of the base field ALU. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[repr(C)] -pub struct BaseAluIo { - pub out: V, - pub in1: V, - pub in2: V, -} - -pub type BaseAluEvent = BaseAluIo; - -/// An instruction invoking the extension field ALU. -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct BaseAluInstr { - pub opcode: BaseAluOpcode, - pub mult: F, - pub addrs: BaseAluIo>, -} - -// ------------------------------------------------------------------------------------------------- - -/// The inputs and outputs to an operation of the extension field ALU. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[repr(C)] -pub struct ExtAluIo { - pub out: V, - pub in1: V, - pub in2: V, -} - -pub type ExtAluEvent = ExtAluIo>; - -/// An instruction invoking the extension field ALU. -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct ExtAluInstr { - pub opcode: ExtAluOpcode, - pub mult: F, - pub addrs: ExtAluIo>, -} - -// ------------------------------------------------------------------------------------------------- - -/// The inputs and outputs to the manual memory management/memory initialization table. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct MemIo { - pub inner: V, -} - -#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct MemInstr { - pub addrs: MemIo>, - pub vals: MemIo>, - pub mult: F, - pub kind: MemAccessKind, -} - -pub type MemEvent = MemIo>; - -// ------------------------------------------------------------------------------------------------- - -#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub enum MemAccessKind { - Read, - Write, -} - -/// The inputs and outputs to a Poseidon2 permutation. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct Poseidon2Io { - pub input: [V; WIDTH], - pub output: [V; WIDTH], -} - -/// An instruction invoking the Poseidon2 permutation. -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct Poseidon2SkinnyInstr { - pub addrs: Poseidon2Io>, - pub mults: [F; WIDTH], -} - -pub type Poseidon2Event = Poseidon2Io; - -/// The inputs and outputs to an exp-reverse-bits operation. -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct ExpReverseBitsIo { - pub base: V, - // The bits of the exponent in little-endian order in a vec. - pub exp: Vec, - pub result: V, -} - -pub type Poseidon2WideEvent = Poseidon2Io; -pub type Poseidon2Instr = Poseidon2SkinnyInstr; - -/// An instruction invoking the exp-reverse-bits operation. -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct ExpReverseBitsInstr { - pub addrs: ExpReverseBitsIo>, - pub mult: F, -} - -/// The event encoding the inputs and outputs of an exp-reverse-bits operation. The `len` operand is -/// now stored as the length of the `exp` field. -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct ExpReverseBitsEvent { - pub base: F, - pub exp: Vec, - pub result: F, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct FriFoldIo { - pub ext_single: FriFoldExtSingleIo>, - pub ext_vec: FriFoldExtVecIo>>, - pub base_single: FriFoldBaseIo, -} - -/// The extension-field-valued single inputs to the FRI fold operation. -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct FriFoldExtSingleIo { - pub z: V, - pub alpha: V, -} - -/// The extension-field-valued vector inputs to the FRI fold operation. -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct FriFoldExtVecIo { - pub mat_opening: V, - pub ps_at_z: V, - pub alpha_pow_input: V, - pub ro_input: V, - pub alpha_pow_output: V, - pub ro_output: V, -} - -/// The base-field-valued inputs to the FRI fold operation. -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct FriFoldBaseIo { - pub x: V, -} - -/// An instruction invoking the FRI fold operation. Addresses for extension field elements are of -/// the same type as for base field elements. -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct FriFoldInstr { - pub base_single_addrs: FriFoldBaseIo>, - pub ext_single_addrs: FriFoldExtSingleIo>, - pub ext_vec_addrs: FriFoldExtVecIo>>, - pub alpha_pow_mults: Vec, - pub ro_mults: Vec, -} - -/// The event encoding the data of a single iteration within the FRI fold operation. -/// For any given event, we are accessing a single element of the `Vec` inputs, so that the event -/// is not a type alias for `FriFoldIo` like many of the other events. -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct FriFoldEvent { - pub base_single: FriFoldBaseIo, - pub ext_single: FriFoldExtSingleIo>, - pub ext_vec: FriFoldExtVecIo>, -} - -/// An instruction that will save the public values to the execution record and will commit to -/// it's digest. -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct CommitPublicValuesInstr { - pub pv_addrs: RecursionPublicValues>, -} - -/// The event for committing to the public values. -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct CommitPublicValuesEvent { - pub public_values: RecursionPublicValues, -} diff --git a/crates/recursion/core-v2/src/machine.rs b/crates/recursion/core-v2/src/machine.rs deleted file mode 100644 index a30a8846e9..0000000000 --- a/crates/recursion/core-v2/src/machine.rs +++ /dev/null @@ -1,325 +0,0 @@ -use p3_field::{extension::BinomiallyExtendable, PrimeField32}; -use sp1_recursion_core::runtime::D; -use sp1_stark::{Chip, StarkGenericConfig, StarkMachine, PROOF_MAX_NUM_PVS}; - -use crate::chips::{ - alu_base::BaseAluChip, - alu_ext::ExtAluChip, - dummy::DummyChip, - exp_reverse_bits::ExpReverseBitsLenChip, - fri_fold::FriFoldChip, - mem::{MemoryConstChip, MemoryVarChip}, - poseidon2_skinny::Poseidon2SkinnyChip, - poseidon2_wide::Poseidon2WideChip, - public_values::PublicValuesChip, -}; - -#[derive(sp1_derive::MachineAir)] -#[sp1_core_path = "sp1_core_machine"] -#[execution_record_path = "crate::ExecutionRecord"] -#[program_path = "crate::RecursionProgram"] -#[builder_path = "crate::builder::SP1RecursionAirBuilder"] -#[eval_trait_bound = "AB::Var: 'static"] -pub enum RecursionAir< - F: PrimeField32 + BinomiallyExtendable, - const DEGREE: usize, - const COL_PADDING: usize, -> { - // Program(ProgramChip), - MemoryConst(MemoryConstChip), - MemoryVar(MemoryVarChip), - BaseAlu(BaseAluChip), - ExtAlu(ExtAluChip), - // Cpu(CpuChip), - // MemoryGlobal(MemoryGlobalChip), - Poseidon2Skinny(Poseidon2SkinnyChip), - Poseidon2Wide(Poseidon2WideChip), - FriFold(FriFoldChip), - // RangeCheck(RangeCheckChip), - // Multi(MultiChip), - ExpReverseBitsLen(ExpReverseBitsLenChip), - PublicValues(PublicValuesChip), - DummyWide(DummyChip), -} - -impl, const DEGREE: usize, const COL_PADDING: usize> - RecursionAir -{ - /// A recursion machine that can have dynamic trace sizes. - pub fn machine>(config: SC) -> StarkMachine { - let chips = Self::get_all().into_iter().map(Chip::new).collect::>(); - StarkMachine::new(config, chips, PROOF_MAX_NUM_PVS) - } - - /// A recursion machine that can have dynamic trace sizes, and uses the wide variant of - /// Poseidon2. - pub fn machine_wide>(config: SC) -> StarkMachine { - let chips = Self::get_all_wide().into_iter().map(Chip::new).collect::>(); - StarkMachine::new(config, chips, PROOF_MAX_NUM_PVS) - } - - pub fn machine_with_padding>( - config: SC, - fri_fold_padding: usize, - poseidon2_padding: usize, - erbl_padding: usize, - ) -> StarkMachine { - let chips = Self::get_all_with_padding(fri_fold_padding, poseidon2_padding, erbl_padding) - .into_iter() - .map(Chip::new) - .collect::>(); - StarkMachine::new(config, chips, PROOF_MAX_NUM_PVS) - } - - pub fn dummy_machine>( - config: SC, - log_height: usize, - ) -> StarkMachine { - let chips = vec![RecursionAir::DummyWide(DummyChip::new(log_height))]; - StarkMachine::new(config, chips.into_iter().map(Chip::new).collect(), PROOF_MAX_NUM_PVS) - } - // /// A recursion machine with fixed trace sizes tuned to work specifically for the wrap layer. - // pub fn wrap_machine>(config: SC) -> StarkMachine { - // let chips = Self::get_wrap_all() - // .into_iter() - // .map(Chip::new) - // .collect::>(); - // StarkMachine::new(config, chips, PROOF_MAX_NUM_PVS) - // } - - // /// A recursion machine with fixed trace sizes tuned to work specifically for the wrap layer. - // pub fn wrap_machine_dyn>(config: SC) -> StarkMachine { let chips = Self::get_wrap_dyn_all() - // .into_iter() - // .map(Chip::new) - // .collect::>(); - // StarkMachine::new(config, chips, PROOF_MAX_NUM_PVS) - // } - - pub fn get_all() -> Vec { - vec![ - RecursionAir::MemoryConst(MemoryConstChip::default()), - RecursionAir::MemoryVar(MemoryVarChip::default()), - RecursionAir::BaseAlu(BaseAluChip::default()), - RecursionAir::ExtAlu(ExtAluChip::default()), - RecursionAir::Poseidon2Skinny(Poseidon2SkinnyChip::::default()), - // RecursionAir::Poseidon2Wide(Poseidon2WideChip::::default()), - RecursionAir::ExpReverseBitsLen(ExpReverseBitsLenChip::::default()), - RecursionAir::FriFold(FriFoldChip::::default()), - RecursionAir::PublicValues(PublicValuesChip::default()), - ] - } - - pub fn get_all_wide() -> Vec { - vec![ - // RecursionAir::Program(ProgramChip::default()), - RecursionAir::MemoryConst(MemoryConstChip::default()), - RecursionAir::MemoryVar(MemoryVarChip::default()), - RecursionAir::BaseAlu(BaseAluChip::default()), - RecursionAir::ExtAlu(ExtAluChip::default()), - // RecursionAir::Poseidon2Skinny(Poseidon2SkinnyChip::::default()), - RecursionAir::Poseidon2Wide(Poseidon2WideChip::::default()), - RecursionAir::ExpReverseBitsLen(ExpReverseBitsLenChip::::default()), - RecursionAir::FriFold(FriFoldChip::::default()), - RecursionAir::PublicValues(PublicValuesChip::default()), - ] - } - - pub fn get_all_with_padding( - fri_fold_padding: usize, - poseidon2_padding: usize, - erbl_padding: usize, - ) -> Vec { - vec![ - // RecursionAir::Program(ProgramChip::default()), - RecursionAir::MemoryConst(MemoryConstChip::default()), - RecursionAir::MemoryVar(MemoryVarChip::default()), - RecursionAir::BaseAlu(BaseAluChip::default()), - RecursionAir::ExtAlu(ExtAluChip::default()), - // RecursionAir::Poseidon2Wide(Poseidon2WideChip::::default()), - RecursionAir::Poseidon2Skinny(Poseidon2SkinnyChip:: { - fixed_log2_rows: Some(poseidon2_padding), - pad: true, - }), - RecursionAir::ExpReverseBitsLen(ExpReverseBitsLenChip:: { - fixed_log2_rows: Some(erbl_padding), - pad: true, - }), - RecursionAir::FriFold(FriFoldChip:: { - fixed_log2_rows: Some(fri_fold_padding), - pad: true, - }), - RecursionAir::PublicValues(PublicValuesChip::default()), - ] - } - - // pub fn get_wrap_dyn_all() -> Vec { - // once(RecursionAir::Program(ProgramChip)) - // .chain(once(RecursionAir::Cpu(CpuChip { - // fixed_log2_rows: None, - // _phantom: PhantomData, - // }))) - // .chain(once(RecursionAir::MemoryGlobal(MemoryGlobalChip { - // fixed_log2_rows: None, - // }))) - // .chain(once(RecursionAir::Multi(MultiChip { - // fixed_log2_rows: None, - // }))) - // .chain(once(RecursionAir::RangeCheck(RangeCheckChip::default()))) - // .chain(once(RecursionAir::ExpReverseBitsLen( - // ExpReverseBitsLenChip:: { - // fixed_log2_rows: None, - // pad: true, - // }, - // ))) - // .collect() - // } - - // pub fn get_wrap_all() -> Vec { - // once(RecursionAir::Program(ProgramChip)) - // .chain(once(RecursionAir::Cpu(CpuChip { - // fixed_log2_rows: Some(19), - // _phantom: PhantomData, - // }))) - // .chain(once(RecursionAir::MemoryGlobal(MemoryGlobalChip { - // fixed_log2_rows: Some(20), - // }))) - // .chain(once(RecursionAir::Multi(MultiChip { - // fixed_log2_rows: Some(17), - // }))) - // .chain(once(RecursionAir::RangeCheck(RangeCheckChip::default()))) - // .chain(once(RecursionAir::ExpReverseBitsLen( - // ExpReverseBitsLenChip:: { - // fixed_log2_rows: None, - // pad: true, - // }, - // ))) - // .collect() - // } -} - -#[cfg(test)] -pub mod tests { - - use std::sync::Arc; - - use machine::RecursionAir; - use p3_baby_bear::DiffusionMatrixBabyBear; - use p3_field::{ - extension::{BinomialExtensionField, HasFrobenius}, - AbstractExtensionField, AbstractField, Field, - }; - use rand::prelude::*; - use sp1_core_machine::utils::run_test_machine; - use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, StarkGenericConfig}; - - // TODO expand glob import - use crate::{runtime::instruction as instr, *}; - - type SC = BabyBearPoseidon2; - type F = ::Val; - type EF = ::Challenge; - type A = RecursionAir; - type B = RecursionAir; - - /// Runs the given program on machines that use the wide and skinny Poseidon2 chips. - pub fn run_recursion_test_machines(program: RecursionProgram) { - let program = Arc::new(program); - let mut runtime = - Runtime::::new(program.clone(), SC::new().perm); - runtime.run().unwrap(); - - // Run with the poseidon2 wide chip. - let wide_machine = A::machine_wide(BabyBearPoseidon2::default()); - let (pk, vk) = wide_machine.setup(&program); - let result = run_test_machine(vec![runtime.record.clone()], wide_machine, pk, vk); - if let Err(e) = result { - panic!("Verification failed: {:?}", e); - } - - // Run with the poseidon2 skinny chip. - let skinny_machine = B::machine(BabyBearPoseidon2::compressed()); - let (pk, vk) = skinny_machine.setup(&program); - let result = run_test_machine(vec![runtime.record], skinny_machine, pk, vk); - if let Err(e) = result { - panic!("Verification failed: {:?}", e); - } - } - - fn test_instructions(instructions: Vec>) { - let program = RecursionProgram { instructions, ..Default::default() }; - run_recursion_test_machines(program); - } - - #[test] - pub fn fibonacci() { - let n = 10; - - let instructions = once(instr::mem(MemAccessKind::Write, 1, 0, 0)) - .chain(once(instr::mem(MemAccessKind::Write, 2, 1, 1))) - .chain((2..=n).map(|i| instr::base_alu(BaseAluOpcode::AddF, 2, i, i - 2, i - 1))) - .chain(once(instr::mem(MemAccessKind::Read, 1, n - 1, 34))) - .chain(once(instr::mem(MemAccessKind::Read, 2, n, 55))) - .collect::>(); - - test_instructions(instructions); - } - - #[test] - #[should_panic] - pub fn div_nonzero_by_zero() { - let instructions = vec![ - instr::mem(MemAccessKind::Write, 1, 0, 0), - instr::mem(MemAccessKind::Write, 1, 1, 1), - instr::base_alu(BaseAluOpcode::DivF, 1, 2, 1, 0), - instr::mem(MemAccessKind::Read, 1, 2, 1), - ]; - - test_instructions(instructions); - } - - #[test] - pub fn div_zero_by_zero() { - let instructions = vec![ - instr::mem(MemAccessKind::Write, 1, 0, 0), - instr::mem(MemAccessKind::Write, 1, 1, 0), - instr::base_alu(BaseAluOpcode::DivF, 1, 2, 1, 0), - instr::mem(MemAccessKind::Read, 1, 2, 1), - ]; - - test_instructions(instructions); - } - - #[test] - pub fn field_norm() { - let mut instructions = Vec::new(); - - let mut rng = StdRng::seed_from_u64(0xDEADBEEF); - let mut addr = 0; - for _ in 0..100 { - let inner: [F; 4] = std::iter::repeat_with(|| { - core::array::from_fn(|_| rng.sample(rand::distributions::Standard)) - }) - .find(|xs| !xs.iter().all(F::is_zero)) - .unwrap(); - let x = BinomialExtensionField::::from_base_slice(&inner); - let gal = x.galois_group(); - - let mut acc = BinomialExtensionField::one(); - - instructions.push(instr::mem_ext(MemAccessKind::Write, 1, addr, acc)); - for conj in gal { - instructions.push(instr::mem_ext(MemAccessKind::Write, 1, addr + 1, conj)); - instructions.push(instr::ext_alu(ExtAluOpcode::MulE, 1, addr + 2, addr, addr + 1)); - - addr += 2; - acc *= conj; - } - let base_cmp: F = acc.as_base_slice()[0]; - instructions.push(instr::mem_single(MemAccessKind::Read, 1, addr, base_cmp)); - addr += 1; - } - - test_instructions(instructions); - } -} diff --git a/crates/recursion/core-v2/src/runtime/instruction.rs b/crates/recursion/core-v2/src/runtime/instruction.rs deleted file mode 100644 index a565cf9e34..0000000000 --- a/crates/recursion/core-v2/src/runtime/instruction.rs +++ /dev/null @@ -1,215 +0,0 @@ -use std::borrow::Borrow; - -use p3_field::{AbstractExtensionField, AbstractField}; -use serde::{Deserialize, Serialize}; - -use crate::*; - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub enum Instruction { - BaseAlu(BaseAluInstr), - ExtAlu(ExtAluInstr), - Mem(MemInstr), - Poseidon2(Box>), - ExpReverseBitsLen(ExpReverseBitsInstr), - HintBits(HintBitsInstr), - FriFold(Box>), - Print(PrintInstr), - HintExt2Felts(HintExt2FeltsInstr), - CommitPublicValues(Box>), - Hint(HintInstr), -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct HintBitsInstr { - /// Addresses and mults of the output bits. - pub output_addrs_mults: Vec<(Address, F)>, - /// Input value to decompose. - pub input_addr: Address, -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct PrintInstr { - pub field_elt_type: FieldEltType, - pub addr: Address, -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct HintInstr { - /// Addresses and mults of the output felts. - pub output_addrs_mults: Vec<(Address, F)>, -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct HintExt2FeltsInstr { - /// Addresses and mults of the output bits. - pub output_addrs_mults: [(Address, F); D], - /// Input value to decompose. - pub input_addr: Address, -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub enum FieldEltType { - Base, - Extension, -} - -pub fn base_alu( - opcode: BaseAluOpcode, - mult: u32, - out: u32, - in1: u32, - in2: u32, -) -> Instruction { - Instruction::BaseAlu(BaseAluInstr { - opcode, - mult: F::from_canonical_u32(mult), - addrs: BaseAluIo { - out: Address(F::from_canonical_u32(out)), - in1: Address(F::from_canonical_u32(in1)), - in2: Address(F::from_canonical_u32(in2)), - }, - }) -} - -pub fn ext_alu( - opcode: ExtAluOpcode, - mult: u32, - out: u32, - in1: u32, - in2: u32, -) -> Instruction { - Instruction::ExtAlu(ExtAluInstr { - opcode, - mult: F::from_canonical_u32(mult), - addrs: ExtAluIo { - out: Address(F::from_canonical_u32(out)), - in1: Address(F::from_canonical_u32(in1)), - in2: Address(F::from_canonical_u32(in2)), - }, - }) -} - -pub fn mem( - kind: MemAccessKind, - mult: u32, - addr: u32, - val: u32, -) -> Instruction { - mem_single(kind, mult, addr, F::from_canonical_u32(val)) -} - -pub fn mem_single( - kind: MemAccessKind, - mult: u32, - addr: u32, - val: F, -) -> Instruction { - mem_block(kind, mult, addr, Block::from(val)) -} - -pub fn mem_ext>( - kind: MemAccessKind, - mult: u32, - addr: u32, - val: EF, -) -> Instruction { - mem_block(kind, mult, addr, val.as_base_slice().into()) -} - -pub fn mem_block( - kind: MemAccessKind, - mult: u32, - addr: u32, - val: Block, -) -> Instruction { - Instruction::Mem(MemInstr { - addrs: MemIo { inner: Address(F::from_canonical_u32(addr)) }, - vals: MemIo { inner: val }, - mult: F::from_canonical_u32(mult), - kind, - }) -} - -pub fn poseidon2( - mults: [u32; WIDTH], - output: [u32; WIDTH], - input: [u32; WIDTH], -) -> Instruction { - Instruction::Poseidon2(Box::new(Poseidon2Instr { - mults: mults.map(F::from_canonical_u32), - addrs: Poseidon2Io { - output: output.map(F::from_canonical_u32).map(Address), - input: input.map(F::from_canonical_u32).map(Address), - }, - })) -} - -pub fn exp_reverse_bits_len( - mult: u32, - base: F, - exp: Vec, - result: F, -) -> Instruction { - Instruction::ExpReverseBitsLen(ExpReverseBitsInstr { - mult: F::from_canonical_u32(mult), - addrs: ExpReverseBitsIo { - base: Address(base), - exp: exp.into_iter().map(Address).collect(), - result: Address(result), - }, - }) -} - -#[allow(clippy::too_many_arguments)] -pub fn fri_fold( - z: u32, - alpha: u32, - x: u32, - mat_opening: Vec, - ps_at_z: Vec, - alpha_pow_input: Vec, - ro_input: Vec, - alpha_pow_output: Vec, - ro_output: Vec, - alpha_mults: Vec, - ro_mults: Vec, -) -> Instruction { - Instruction::FriFold(Box::new(FriFoldInstr { - base_single_addrs: FriFoldBaseIo { x: Address(F::from_canonical_u32(x)) }, - ext_single_addrs: FriFoldExtSingleIo { - z: Address(F::from_canonical_u32(z)), - alpha: Address(F::from_canonical_u32(alpha)), - }, - ext_vec_addrs: FriFoldExtVecIo { - mat_opening: mat_opening - .iter() - .map(|elm| Address(F::from_canonical_u32(*elm))) - .collect(), - ps_at_z: ps_at_z.iter().map(|elm| Address(F::from_canonical_u32(*elm))).collect(), - alpha_pow_input: alpha_pow_input - .iter() - .map(|elm| Address(F::from_canonical_u32(*elm))) - .collect(), - ro_input: ro_input.iter().map(|elm| Address(F::from_canonical_u32(*elm))).collect(), - alpha_pow_output: alpha_pow_output - .iter() - .map(|elm| Address(F::from_canonical_u32(*elm))) - .collect(), - ro_output: ro_output.iter().map(|elm| Address(F::from_canonical_u32(*elm))).collect(), - }, - alpha_pow_mults: alpha_mults.iter().map(|mult| F::from_canonical_u32(*mult)).collect(), - ro_mults: ro_mults.iter().map(|mult| F::from_canonical_u32(*mult)).collect(), - })) -} - -pub fn commit_public_values( - public_values_a: &RecursionPublicValues, -) -> Instruction { - let pv_a = public_values_a.to_vec().map(|pv| Address(F::from_canonical_u32(pv))); - let pv_address: &RecursionPublicValues> = pv_a.as_slice().borrow(); - - Instruction::CommitPublicValues(Box::new(CommitPublicValuesInstr { - pv_addrs: pv_address.clone(), - })) -} diff --git a/crates/recursion/core-v2/src/runtime/mod.rs b/crates/recursion/core-v2/src/runtime/mod.rs deleted file mode 100644 index 296517397e..0000000000 --- a/crates/recursion/core-v2/src/runtime/mod.rs +++ /dev/null @@ -1,523 +0,0 @@ -pub mod instruction; -mod memory; -mod opcode; -mod program; -mod record; - -// Avoid triggering annoying branch of thiserror derive macro. -use backtrace::Backtrace as Trace; -pub use instruction::Instruction; -use instruction::{FieldEltType, HintBitsInstr, HintExt2FeltsInstr, HintInstr, PrintInstr}; -use memory::*; -pub use opcode::*; -pub use program::*; -pub use record::*; - -use std::{ - array, - borrow::Borrow, - collections::VecDeque, - fmt::Debug, - io::{stdout, Write}, - iter::zip, - marker::PhantomData, - sync::Arc, -}; - -use hashbrown::HashMap; -use itertools::Itertools; -use p3_field::{AbstractField, ExtensionField, PrimeField32}; -use p3_poseidon2::{Poseidon2, Poseidon2ExternalMatrixGeneral}; -use p3_symmetric::{CryptographicPermutation, Permutation}; -use p3_util::reverse_bits_len; -use thiserror::Error; - -use sp1_recursion_core::air::{Block, RECURSIVE_PROOF_NUM_PV_ELTS}; - -/// TODO expand glob import once things are organized enough -use crate::*; - -/// The heap pointer address. -pub const HEAP_PTR: i32 = -4; -pub const HEAP_START_ADDRESS: usize = STACK_SIZE + 4; - -pub const STACK_SIZE: usize = 1 << 24; -pub const MEMORY_SIZE: usize = 1 << 28; - -/// The width of the Poseidon2 permutation. -pub const PERMUTATION_WIDTH: usize = 16; -pub const POSEIDON2_SBOX_DEGREE: u64 = 7; -pub const HASH_RATE: usize = 8; - -/// The current verifier implementation assumes that we are using a 256-bit hash with 32-bit -/// elements. -pub const DIGEST_SIZE: usize = 8; - -pub const NUM_BITS: usize = 31; - -pub const D: usize = 4; - -#[derive(Debug, Clone, Default)] -pub struct CycleTrackerEntry { - pub span_entered: bool, - pub span_enter_cycle: usize, - pub cumulative_cycles: usize, -} - -/// TODO fully document. -/// Taken from [`sp1_recursion_core::runtime::Runtime`]. -/// Many missing things (compared to the old `Runtime`) will need to be implemented. -pub struct Runtime<'a, F: PrimeField32, EF: ExtensionField, Diffusion> { - pub timestamp: usize, - - pub nb_poseidons: usize, - - pub nb_wide_poseidons: usize, - - pub nb_bit_decompositions: usize, - - pub nb_ext_ops: usize, - - pub nb_base_ops: usize, - - pub nb_memory_ops: usize, - - pub nb_branch_ops: usize, - - pub nb_exp_reverse_bits: usize, - - pub nb_fri_fold: usize, - - pub nb_print_f: usize, - - pub nb_print_e: usize, - - /// The current clock. - pub clk: F, - - /// The program counter. - pub pc: F, - - /// The program. - pub program: Arc>, - - /// Memory. From canonical usize of an Address to a MemoryEntry. - pub memory: MemVecMap, - - /// The execution record. - pub record: ExecutionRecord, - - pub witness_stream: VecDeque>, - - pub cycle_tracker: HashMap, - - /// The stream that print statements write to. - pub debug_stdout: Box, - - /// Entries for dealing with the Poseidon2 hash state. - perm: Option< - Poseidon2< - F, - Poseidon2ExternalMatrixGeneral, - Diffusion, - PERMUTATION_WIDTH, - POSEIDON2_SBOX_DEGREE, - >, - >, - - _marker_ef: PhantomData, - - _marker_diffusion: PhantomData, -} - -#[derive(Error, Debug)] -pub enum RuntimeError { - #[error( - "attempted to perform base field division {in1:?}/{in2:?} \ - from instruction {instr:?} at pc {pc:?}\nnearest pc with backtrace:\n{trace:?}" - )] - DivFOutOfDomain { - in1: F, - in2: F, - instr: BaseAluInstr, - pc: usize, - trace: Option<(usize, Trace)>, - }, - #[error( - "attempted to perform extension field division {in1:?}/{in2:?} \ - from instruction {instr:?} at pc {pc:?}\nnearest pc with backtrace:\n{trace:?}" - )] - DivEOutOfDomain { - in1: EF, - in2: EF, - instr: ExtAluInstr, - pc: usize, - trace: Option<(usize, Trace)>, - }, - #[error("failed to print to `debug_stdout`: {0}")] - DebugPrint(#[from] std::io::Error), - #[error("attempted to read from empty witness stream")] - EmptyWitnessStream, -} - -impl<'a, F: PrimeField32, EF: ExtensionField, Diffusion> Runtime<'a, F, EF, Diffusion> -where - Poseidon2< - F, - Poseidon2ExternalMatrixGeneral, - Diffusion, - PERMUTATION_WIDTH, - POSEIDON2_SBOX_DEGREE, - >: CryptographicPermutation<[F; PERMUTATION_WIDTH]>, -{ - pub fn new( - program: Arc>, - perm: Poseidon2< - F, - Poseidon2ExternalMatrixGeneral, - Diffusion, - PERMUTATION_WIDTH, - POSEIDON2_SBOX_DEGREE, - >, - ) -> Self { - let record = ExecutionRecord:: { program: program.clone(), ..Default::default() }; - let memory = Memory::with_capacity(program.total_memory); - Self { - timestamp: 0, - nb_poseidons: 0, - nb_wide_poseidons: 0, - nb_bit_decompositions: 0, - nb_exp_reverse_bits: 0, - nb_ext_ops: 0, - nb_base_ops: 0, - nb_memory_ops: 0, - nb_branch_ops: 0, - nb_fri_fold: 0, - nb_print_f: 0, - nb_print_e: 0, - clk: F::zero(), - program, - pc: F::zero(), - memory, - record, - witness_stream: VecDeque::new(), - cycle_tracker: HashMap::new(), - debug_stdout: Box::new(stdout()), - perm: Some(perm), - _marker_ef: PhantomData, - _marker_diffusion: PhantomData, - } - } - - pub fn print_stats(&self) { - tracing::debug!("Total Cycles: {}", self.timestamp); - tracing::debug!("Poseidon Skinny Operations: {}", self.nb_poseidons); - tracing::debug!("Poseidon Wide Operations: {}", self.nb_wide_poseidons); - tracing::debug!("Exp Reverse Bits Operations: {}", self.nb_exp_reverse_bits); - tracing::debug!("FriFold Operations: {}", self.nb_fri_fold); - tracing::debug!("Field Operations: {}", self.nb_base_ops); - tracing::debug!("Extension Operations: {}", self.nb_ext_ops); - tracing::debug!("Memory Operations: {}", self.nb_memory_ops); - tracing::debug!("Branch Operations: {}", self.nb_branch_ops); - for (name, entry) in self.cycle_tracker.iter().sorted_by_key(|(name, _)| *name) { - tracing::debug!("> {}: {}", name, entry.cumulative_cycles); - } - } - - fn nearest_pc_backtrace(&mut self) -> Option<(usize, Trace)> { - let trap_pc = self.pc.as_canonical_u32() as usize; - let trace = self.program.traces[trap_pc].clone(); - if let Some(mut trace) = trace { - trace.resolve(); - Some((trap_pc, trace)) - } else { - (0..trap_pc) - .rev() - .filter_map(|nearby_pc| { - let mut trace = self.program.traces.get(nearby_pc)?.clone()?; - trace.resolve(); - Some((nearby_pc, trace)) - }) - .next() - } - } - - /// Compare to [sp1_recursion_core::runtime::Runtime::run]. - pub fn run(&mut self) -> Result<(), RuntimeError> { - let early_exit_ts = std::env::var("RECURSION_EARLY_EXIT_TS") - .map_or(usize::MAX, |ts: String| ts.parse().unwrap()); - while self.pc < F::from_canonical_u32(self.program.instructions.len() as u32) { - let idx = self.pc.as_canonical_u32() as usize; - let instruction = self.program.instructions[idx].clone(); - - let next_clk = self.clk + F::from_canonical_u32(4); - let next_pc = self.pc + F::one(); - match instruction { - Instruction::BaseAlu(instr @ BaseAluInstr { opcode, mult, addrs }) => { - self.nb_base_ops += 1; - let in1 = self.memory.mr(addrs.in1).val[0]; - let in2 = self.memory.mr(addrs.in2).val[0]; - // Do the computation. - let out = match opcode { - BaseAluOpcode::AddF => in1 + in2, - BaseAluOpcode::SubF => in1 - in2, - BaseAluOpcode::MulF => in1 * in2, - BaseAluOpcode::DivF => match in1.try_div(in2) { - Some(x) => x, - None => { - // Check for division exceptions and error. Note that 0/0 is defined - // to be 1. - if in1.is_zero() { - AbstractField::one() - } else { - return Err(RuntimeError::DivFOutOfDomain { - in1, - in2, - instr, - pc: self.pc.as_canonical_u32() as usize, - trace: self.nearest_pc_backtrace(), - }); - } - } - }, - }; - self.memory.mw(addrs.out, Block::from(out), mult); - self.record.base_alu_events.push(BaseAluEvent { out, in1, in2 }); - } - Instruction::ExtAlu(instr @ ExtAluInstr { opcode, mult, addrs }) => { - self.nb_ext_ops += 1; - let in1 = self.memory.mr(addrs.in1).val; - let in2 = self.memory.mr(addrs.in2).val; - // Do the computation. - let in1_ef = EF::from_base_slice(&in1.0); - let in2_ef = EF::from_base_slice(&in2.0); - let out_ef = match opcode { - ExtAluOpcode::AddE => in1_ef + in2_ef, - ExtAluOpcode::SubE => in1_ef - in2_ef, - ExtAluOpcode::MulE => in1_ef * in2_ef, - ExtAluOpcode::DivE => match in1_ef.try_div(in2_ef) { - Some(x) => x, - None => { - // Check for division exceptions and error. Note that 0/0 is defined - // to be 1. - if in1_ef.is_zero() { - AbstractField::one() - } else { - return Err(RuntimeError::DivEOutOfDomain { - in1: in1_ef, - in2: in2_ef, - instr, - pc: self.pc.as_canonical_u32() as usize, - trace: self.nearest_pc_backtrace(), - }); - } - } - }, - }; - let out = Block::from(out_ef.as_base_slice()); - self.memory.mw(addrs.out, out, mult); - self.record.ext_alu_events.push(ExtAluEvent { out, in1, in2 }); - } - Instruction::Mem(MemInstr { - addrs: MemIo { inner: addr }, - vals: MemIo { inner: val }, - mult, - kind, - }) => { - self.nb_memory_ops += 1; - match kind { - MemAccessKind::Read => { - let mem_entry = self.memory.mr_mult(addr, mult); - assert_eq!( - mem_entry.val, val, - "stored memory value should be the specified value" - ); - } - MemAccessKind::Write => drop(self.memory.mw(addr, val, mult)), - } - self.record.mem_const_count += 1; - } - Instruction::Poseidon2(instr) => { - let Poseidon2Instr { addrs: Poseidon2Io { input, output }, mults } = *instr; - self.nb_poseidons += 1; - let in_vals = std::array::from_fn(|i| self.memory.mr(input[i]).val[0]); - let perm_output = self.perm.as_ref().unwrap().permute(in_vals); - - perm_output.iter().zip(output).zip(mults).for_each(|((&val, addr), mult)| { - self.memory.mw(addr, Block::from(val), mult); - }); - self.record - .poseidon2_events - .push(Poseidon2Event { input: in_vals, output: perm_output }); - } - Instruction::ExpReverseBitsLen(ExpReverseBitsInstr { - addrs: ExpReverseBitsIo { base, exp, result }, - mult, - }) => { - self.nb_exp_reverse_bits += 1; - let base_val = self.memory.mr(base).val[0]; - let exp_bits: Vec<_> = - exp.iter().map(|bit| self.memory.mr(*bit).val[0]).collect(); - let exp_val = exp_bits - .iter() - .enumerate() - .fold(0, |acc, (i, &val)| acc + val.as_canonical_u32() * (1 << i)); - let out = - base_val.exp_u64(reverse_bits_len(exp_val as usize, exp_bits.len()) as u64); - self.memory.mw(result, Block::from(out), mult); - self.record.exp_reverse_bits_len_events.push(ExpReverseBitsEvent { - result: out, - base: base_val, - exp: exp_bits, - }); - } - Instruction::HintBits(HintBitsInstr { output_addrs_mults, input_addr }) => { - self.nb_bit_decompositions += 1; - let num = self.memory.mr_mult(input_addr, F::zero()).val[0].as_canonical_u32(); - // Decompose the num into LE bits. - let bits = (0..output_addrs_mults.len()) - .map(|i| Block::from(F::from_canonical_u32((num >> i) & 1))) - .collect::>(); - // Write the bits to the array at dst. - for (bit, (addr, mult)) in bits.into_iter().zip(output_addrs_mults) { - self.memory.mw(addr, bit, mult); - self.record.mem_var_events.push(MemEvent { inner: bit }); - } - } - - Instruction::FriFold(instr) => { - let FriFoldInstr { - base_single_addrs, - ext_single_addrs, - ext_vec_addrs, - alpha_pow_mults, - ro_mults, - } = *instr; - self.nb_fri_fold += 1; - let x = self.memory.mr(base_single_addrs.x).val[0]; - let z = self.memory.mr(ext_single_addrs.z).val; - let z: EF = z.ext(); - let alpha = self.memory.mr(ext_single_addrs.alpha).val; - let alpha: EF = alpha.ext(); - let mat_opening = ext_vec_addrs - .mat_opening - .iter() - .map(|addr| self.memory.mr(*addr).val) - .collect_vec(); - let ps_at_z = ext_vec_addrs - .ps_at_z - .iter() - .map(|addr| self.memory.mr(*addr).val) - .collect_vec(); - - for m in 0..ps_at_z.len() { - // let m = F::from_canonical_u32(m); - // Get the opening values. - let p_at_x = mat_opening[m]; - let p_at_x: EF = p_at_x.ext(); - let p_at_z = ps_at_z[m]; - let p_at_z: EF = p_at_z.ext(); - - // Calculate the quotient and update the values - let quotient = (-p_at_z + p_at_x) / (-z + x); - - // First we peek to get the current value. - let alpha_pow: EF = - self.memory.mr(ext_vec_addrs.alpha_pow_input[m]).val.ext(); - - let ro: EF = self.memory.mr(ext_vec_addrs.ro_input[m]).val.ext(); - - let new_ro = ro + alpha_pow * quotient; - let new_alpha_pow = alpha_pow * alpha; - - let _ = self.memory.mw( - ext_vec_addrs.ro_output[m], - Block::from(new_ro.as_base_slice()), - ro_mults[m], - ); - - let _ = self.memory.mw( - ext_vec_addrs.alpha_pow_output[m], - Block::from(new_alpha_pow.as_base_slice()), - alpha_pow_mults[m], - ); - - self.record.fri_fold_events.push(FriFoldEvent { - base_single: FriFoldBaseIo { x }, - ext_single: FriFoldExtSingleIo { - z: Block::from(z.as_base_slice()), - alpha: Block::from(alpha.as_base_slice()), - }, - ext_vec: FriFoldExtVecIo { - mat_opening: Block::from(p_at_x.as_base_slice()), - ps_at_z: Block::from(p_at_z.as_base_slice()), - alpha_pow_input: Block::from(alpha_pow.as_base_slice()), - ro_input: Block::from(ro.as_base_slice()), - alpha_pow_output: Block::from(new_alpha_pow.as_base_slice()), - ro_output: Block::from(new_ro.as_base_slice()), - }, - }); - } - } - - Instruction::CommitPublicValues(instr) => { - let pv_addrs = instr.pv_addrs.to_vec(); - let pv_values: [F; RECURSIVE_PROOF_NUM_PV_ELTS] = - array::from_fn(|i| self.memory.mr(pv_addrs[i]).val[0]); - self.record.public_values = *pv_values.as_slice().borrow(); - self.record - .commit_pv_hash_events - .push(CommitPublicValuesEvent { public_values: self.record.public_values }); - } - - Instruction::Print(PrintInstr { field_elt_type, addr }) => match field_elt_type { - FieldEltType::Base => { - self.nb_print_f += 1; - let f = self.memory.mr_mult(addr, F::zero()).val[0]; - writeln!(self.debug_stdout, "PRINTF={f}") - } - FieldEltType::Extension => { - self.nb_print_e += 1; - let ef = self.memory.mr_mult(addr, F::zero()).val; - writeln!(self.debug_stdout, "PRINTEF={ef:?}") - } - } - .map_err(RuntimeError::DebugPrint)?, - Instruction::HintExt2Felts(HintExt2FeltsInstr { - output_addrs_mults, - input_addr, - }) => { - self.nb_bit_decompositions += 1; - let fs = self.memory.mr_mult(input_addr, F::zero()).val; - // Write the bits to the array at dst. - for (f, (addr, mult)) in fs.into_iter().zip(output_addrs_mults) { - let felt = Block::from(f); - self.memory.mw(addr, felt, mult); - self.record.mem_var_events.push(MemEvent { inner: felt }); - } - } - Instruction::Hint(HintInstr { output_addrs_mults }) => { - // Check that enough Blocks can be read, so `drain` does not panic. - if self.witness_stream.len() < output_addrs_mults.len() { - return Err(RuntimeError::EmptyWitnessStream); - } - let witness = self.witness_stream.drain(0..output_addrs_mults.len()); - for ((addr, mult), val) in zip(output_addrs_mults, witness) { - // Inline [`Self::mw`] to mutably borrow multiple fields of `self`. - self.memory.mw(addr, val, mult); - self.record.mem_var_events.push(MemEvent { inner: val }); - } - } - } - - self.pc = next_pc; - self.clk = next_clk; - self.timestamp += 1; - - if self.timestamp >= early_exit_ts { - break; - } - } - Ok(()) - } -} diff --git a/crates/recursion/core-v2/src/runtime/opcode.rs b/crates/recursion/core-v2/src/runtime/opcode.rs deleted file mode 100644 index 96a748d065..0000000000 --- a/crates/recursion/core-v2/src/runtime/opcode.rs +++ /dev/null @@ -1,17 +0,0 @@ -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub enum BaseAluOpcode { - AddF, - SubF, - MulF, - DivF, -} - -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub enum ExtAluOpcode { - AddE, - SubE, - MulE, - DivE, -} diff --git a/crates/recursion/core-v2/src/runtime/program.rs b/crates/recursion/core-v2/src/runtime/program.rs deleted file mode 100644 index faa5a6f730..0000000000 --- a/crates/recursion/core-v2/src/runtime/program.rs +++ /dev/null @@ -1,20 +0,0 @@ -use backtrace::Backtrace; -use p3_field::Field; -use serde::{Deserialize, Serialize}; -use sp1_stark::air::MachineProgram; - -use crate::*; - -#[derive(Debug, Clone, Default, Serialize, Deserialize)] -pub struct RecursionProgram { - pub instructions: Vec>, - pub total_memory: usize, - #[serde(skip)] - pub traces: Vec>, -} - -impl MachineProgram for RecursionProgram { - fn pc_start(&self) -> F { - F::zero() - } -} diff --git a/crates/recursion/core-v2/src/runtime/record.rs b/crates/recursion/core-v2/src/runtime/record.rs deleted file mode 100644 index 59bb10ad7c..0000000000 --- a/crates/recursion/core-v2/src/runtime/record.rs +++ /dev/null @@ -1,74 +0,0 @@ -use std::{array, sync::Arc}; - -use p3_field::{AbstractField, PrimeField32}; -use sp1_recursion_core::air::RecursionPublicValues; -use sp1_stark::{MachineRecord, SP1CoreOpts, PROOF_MAX_NUM_PVS}; - -// TODO expand glob imports -use crate::*; - -#[derive(Clone, Default, Debug)] -pub struct ExecutionRecord { - pub program: Arc>, - /// The index of the shard. - pub index: u32, - - pub base_alu_events: Vec>, - pub ext_alu_events: Vec>, - pub mem_const_count: usize, - pub mem_var_events: Vec>, - /// The public values. - pub public_values: RecursionPublicValues, - - pub poseidon2_events: Vec>, - pub exp_reverse_bits_len_events: Vec>, - pub fri_fold_events: Vec>, - pub commit_pv_hash_events: Vec>, -} - -impl MachineRecord for ExecutionRecord { - type Config = SP1CoreOpts; - - fn stats(&self) -> hashbrown::HashMap { - hashbrown::HashMap::from([("cpu_events".to_owned(), 1337usize)]) - } - - fn append(&mut self, other: &mut Self) { - // Exhaustive destructuring for refactoring purposes. - let Self { - program: _, - index: _, - base_alu_events, - ext_alu_events, - mem_const_count, - mem_var_events, - public_values: _, - poseidon2_events, - exp_reverse_bits_len_events, - fri_fold_events, - commit_pv_hash_events, - } = self; - base_alu_events.append(&mut other.base_alu_events); - ext_alu_events.append(&mut other.ext_alu_events); - *mem_const_count += other.mem_const_count; - mem_var_events.append(&mut other.mem_var_events); - poseidon2_events.append(&mut other.poseidon2_events); - exp_reverse_bits_len_events.append(&mut other.exp_reverse_bits_len_events); - fri_fold_events.append(&mut other.fri_fold_events); - commit_pv_hash_events.append(&mut other.commit_pv_hash_events); - } - - fn public_values(&self) -> Vec { - let pv_elms = self.public_values.to_vec(); - - let ret: [T; PROOF_MAX_NUM_PVS] = array::from_fn(|i| { - if i < pv_elms.len() { - T::from_canonical_u32(pv_elms[i].as_canonical_u32()) - } else { - T::zero() - } - }); - - ret.to_vec() - } -} diff --git a/crates/recursion/core/CHANGELOG.md b/crates/recursion/core/CHANGELOG.md index f2e143afe1..81f09b03f4 100644 --- a/crates/recursion/core/CHANGELOG.md +++ b/crates/recursion/core/CHANGELOG.md @@ -7,189 +7,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] -## [1.1.0](https://github.com/succinctlabs/sp1/compare/sp1-recursion-core-v1.0.1...sp1-recursion-core-v1.1.0) - 2024-08-02 - -### Added -- update tg ([#1214](https://github.com/succinctlabs/sp1/pull/1214)) - -### Fixed -- UB from `OpcodeSpecificCols` union ([#1050](https://github.com/succinctlabs/sp1/pull/1050)) - -### Other -- Merge branch 'main' into dev -- *(deps)* bump arrayref from 0.3.7 to 0.3.8 ([#1154](https://github.com/succinctlabs/sp1/pull/1154)) -- add audit reports ([#1142](https://github.com/succinctlabs/sp1/pull/1142)) - -## [1.0.0-rc1](https://github.com/succinctlabs/sp1/compare/sp1-recursion-core-v1.0.0-rc1...sp1-recursion-core-v1.0.0-rc1) - 2024-07-19 - -### Added - -- parallel recursion tracegen ([#1095](https://github.com/succinctlabs/sp1/pull/1095)) -- result instead of exit(1) on trap in recursion ([#1089](https://github.com/succinctlabs/sp1/pull/1089)) -- publish sp1 to crates.io ([#1052](https://github.com/succinctlabs/sp1/pull/1052)) -- critical constraint changes ([#1046](https://github.com/succinctlabs/sp1/pull/1046)) -- plonk circuit optimizations ([#972](https://github.com/succinctlabs/sp1/pull/972)) -- poseidon2 hash ([#885](https://github.com/succinctlabs/sp1/pull/885)) -- optimize cpu tracegen ([#949](https://github.com/succinctlabs/sp1/pull/949)) -- shrink/wrap multi opt -- generic const expr ([#854](https://github.com/succinctlabs/sp1/pull/854)) -- plonk prover ([#795](https://github.com/succinctlabs/sp1/pull/795)) -- exit code ([#750](https://github.com/succinctlabs/sp1/pull/750)) -- _(recursion)_ public values constraints ([#748](https://github.com/succinctlabs/sp1/pull/748)) -- _(recursion)_ HALT instruction ([#703](https://github.com/succinctlabs/sp1/pull/703)) -- program refactor ([#651](https://github.com/succinctlabs/sp1/pull/651)) -- e2e groth16 with contract verifier ([#671](https://github.com/succinctlabs/sp1/pull/671)) -- nextgen ci for sp1-prover ([#663](https://github.com/succinctlabs/sp1/pull/663)) -- _(recursion)_ Add interactions to poseidon2 skinny ([#658](https://github.com/succinctlabs/sp1/pull/658)) -- Adding docs for new `ProverClient` and `groth16` and `compressed` mode ([#627](https://github.com/succinctlabs/sp1/pull/627)) -- aggregation fixes ([#649](https://github.com/succinctlabs/sp1/pull/649)) -- improve circuit by 3-4x ([#648](https://github.com/succinctlabs/sp1/pull/648)) -- _(recursion)_ poseidon2 max constraint degree const generic ([#634](https://github.com/succinctlabs/sp1/pull/634)) -- regularize proof shape ([#641](https://github.com/succinctlabs/sp1/pull/641)) -- prover tweaks pt4 ([#632](https://github.com/succinctlabs/sp1/pull/632)) -- _(recursion)_ jump instruction constraints ([#617](https://github.com/succinctlabs/sp1/pull/617)) -- _(recursion)_ cpu branch constraints ([#578](https://github.com/succinctlabs/sp1/pull/578)) -- prover tweaks pt 2 ([#607](https://github.com/succinctlabs/sp1/pull/607)) -- prover tweaks ([#603](https://github.com/succinctlabs/sp1/pull/603)) -- _(recursion)_ memory access timestamp constraints ([#589](https://github.com/succinctlabs/sp1/pull/589)) -- enable arbitrary constraint degree ([#593](https://github.com/succinctlabs/sp1/pull/593)) -- recursion compress layer + RecursionAirWideDeg3 + RecursionAirSkinnyDeg7 + optimized groth16 ([#590](https://github.com/succinctlabs/sp1/pull/590)) -- fixing memory interactions ([#587](https://github.com/succinctlabs/sp1/pull/587)) -- _(recursion)_ memory builder + fri-fold precompile ([#581](https://github.com/succinctlabs/sp1/pull/581)) -- complete reduce program ([#565](https://github.com/succinctlabs/sp1/pull/565)) -- public inputs in gnark circuit ([#576](https://github.com/succinctlabs/sp1/pull/576)) -- _(recursion)_ cpu alu constraints ([#570](https://github.com/succinctlabs/sp1/pull/570)) -- _(recursion)_ recursion air builder ([#574](https://github.com/succinctlabs/sp1/pull/574)) -- simplify compiler load/store ([#572](https://github.com/succinctlabs/sp1/pull/572)) -- alu cpu columns ([#562](https://github.com/succinctlabs/sp1/pull/562)) -- recursion experiments ([#522](https://github.com/succinctlabs/sp1/pull/522)) -- _(recursion)_ impl `Poseidon2WideChip` ([#537](https://github.com/succinctlabs/sp1/pull/537)) -- groth16 circuit build script ([#541](https://github.com/succinctlabs/sp1/pull/541)) -- verify shard transitions + fixes ([#482](https://github.com/succinctlabs/sp1/pull/482)) -- preprocess memory program chip ([#480](https://github.com/succinctlabs/sp1/pull/480)) -- nested sp1 proof verification ([#494](https://github.com/succinctlabs/sp1/pull/494)) -- verify pc and shard transition in recursive proofs ([#514](https://github.com/succinctlabs/sp1/pull/514)) -- recursion profiling ([#521](https://github.com/succinctlabs/sp1/pull/521)) -- update to latest p3 ([#515](https://github.com/succinctlabs/sp1/pull/515)) -- gnark wrap test + cleanup ([#511](https://github.com/succinctlabs/sp1/pull/511)) -- reduce with different configs ([#508](https://github.com/succinctlabs/sp1/pull/508)) -- groth16 recursion e2e ([#502](https://github.com/succinctlabs/sp1/pull/502)) -- recursion optimizations + compiler cleanup ([#499](https://github.com/succinctlabs/sp1/pull/499)) -- recursion vm public values ([#495](https://github.com/succinctlabs/sp1/pull/495)) -- shard transition public values ([#466](https://github.com/succinctlabs/sp1/pull/466)) -- add support for witness in programs ([#476](https://github.com/succinctlabs/sp1/pull/476)) -- fri-fold precompile ([#479](https://github.com/succinctlabs/sp1/pull/479)) -- setup recursion prover crate ([#475](https://github.com/succinctlabs/sp1/pull/475)) -- gnark recursive verifier ([#457](https://github.com/succinctlabs/sp1/pull/457)) -- recursion cpu constraints ([#464](https://github.com/succinctlabs/sp1/pull/464)) -- public values ([#455](https://github.com/succinctlabs/sp1/pull/455)) -- Preprocessing + recursion ([#450](https://github.com/succinctlabs/sp1/pull/450)) -- _(precompile)_ add bn254 precompile ([#384](https://github.com/succinctlabs/sp1/pull/384)) -- verify shard ([#444](https://github.com/succinctlabs/sp1/pull/444)) -- _(WIP)_ end-to-end verfier ([#439](https://github.com/succinctlabs/sp1/pull/439)) -- working two adic pcs verifier in recursive zkvm ([#434](https://github.com/succinctlabs/sp1/pull/434)) -- num2bits ([#426](https://github.com/succinctlabs/sp1/pull/426)) -- poseidon2 permute ([#423](https://github.com/succinctlabs/sp1/pull/423)) -- verify constraints ([#409](https://github.com/succinctlabs/sp1/pull/409)) -- poseidon2 air ([#397](https://github.com/succinctlabs/sp1/pull/397)) -- checkpoint runtime for constant memory usage ([#389](https://github.com/succinctlabs/sp1/pull/389)) -- update to the latest plonky3 version ([#398](https://github.com/succinctlabs/sp1/pull/398)) -- array and symbolic evaluation ([#390](https://github.com/succinctlabs/sp1/pull/390)) -- extension in vm backend ([#382](https://github.com/succinctlabs/sp1/pull/382)) -- new ir ([#373](https://github.com/succinctlabs/sp1/pull/373)) -- core recursion air constraints ([#359](https://github.com/succinctlabs/sp1/pull/359)) -- recursive DSL initial commit ([#357](https://github.com/succinctlabs/sp1/pull/357)) -- recursion program table + memory tracing ([#356](https://github.com/succinctlabs/sp1/pull/356)) -- initial recursion core ([#354](https://github.com/succinctlabs/sp1/pull/354)) -- new README img ([#226](https://github.com/succinctlabs/sp1/pull/226)) -- readme updates ([#205](https://github.com/succinctlabs/sp1/pull/205)) -- more final touches ([#194](https://github.com/succinctlabs/sp1/pull/194)) -- curtaup + release system + cargo prove CLI updates ([#178](https://github.com/succinctlabs/sp1/pull/178)) -- (perf) updates from Plonky3 and verifier refactor ([#156](https://github.com/succinctlabs/sp1/pull/156)) -- developer experience improvements ([#145](https://github.com/succinctlabs/sp1/pull/145)) -- toolchain build from source & install ([#113](https://github.com/succinctlabs/sp1/pull/113)) -- io::read io::write ([#126](https://github.com/succinctlabs/sp1/pull/126)) -- tracing, profiling, benchmarking ([#99](https://github.com/succinctlabs/sp1/pull/99)) +## [1.2.0-rc1](https://github.com/succinctlabs/sp1/releases/tag/sp1-recursion-core-v1.2.0-rc1) - 2024-08-23 ### Fixed -- Allen's Poseidon2 fixes ([#1099](https://github.com/succinctlabs/sp1/pull/1099)) -- Allen's exp_reverse_bits_len fixes ([#1074](https://github.com/succinctlabs/sp1/pull/1074)) -- multi-builder first/last row issue ([#997](https://github.com/succinctlabs/sp1/pull/997)) -- recursion runtime -- changed fixed size for multi table ([#966](https://github.com/succinctlabs/sp1/pull/966)) -- frifold flag column consistency ([#946](https://github.com/succinctlabs/sp1/pull/946)) -- recursion audit fixes for Issues 7-10 ([#937](https://github.com/succinctlabs/sp1/pull/937)) -- memory finalize duplicate address attack from audit ([#934](https://github.com/succinctlabs/sp1/pull/934)) -- fix things -- fix -- _(recursion)_ num2bits fixes ([#732](https://github.com/succinctlabs/sp1/pull/732)) -- _(recursion)_ poseidon2 external flag ([#747](https://github.com/succinctlabs/sp1/pull/747)) -- _(recursion)_ enable mul constraint ([#686](https://github.com/succinctlabs/sp1/pull/686)) -- fixes to the multi table ([#669](https://github.com/succinctlabs/sp1/pull/669)) -- fri fold mem access ([#660](https://github.com/succinctlabs/sp1/pull/660)) -- verify reduced proofs ([#655](https://github.com/succinctlabs/sp1/pull/655)) -- _(recursion)_ fixes for fri fold and poseidon2 ([#654](https://github.com/succinctlabs/sp1/pull/654)) -- high degree constraints in recursion ([#619](https://github.com/succinctlabs/sp1/pull/619)) -- circuit sponge absorb rate ([#618](https://github.com/succinctlabs/sp1/pull/618)) -- deferred proofs + cleanup hash_vkey ([#615](https://github.com/succinctlabs/sp1/pull/615)) -- comment out MUL constraints ([#602](https://github.com/succinctlabs/sp1/pull/602)) -- update Poseidon2 air to match plonky3 ([#600](https://github.com/succinctlabs/sp1/pull/600)) -- circuit verification ([#599](https://github.com/succinctlabs/sp1/pull/599)) -- poseidon2wide `is_real` ([#591](https://github.com/succinctlabs/sp1/pull/591)) -- _(recursion)_ poseidon2 chip matches plonky3 ([#548](https://github.com/succinctlabs/sp1/pull/548)) -- observe only non-padded public values ([#523](https://github.com/succinctlabs/sp1/pull/523)) -- few regression fixes ([#441](https://github.com/succinctlabs/sp1/pull/441)) -- ci ([#401](https://github.com/succinctlabs/sp1/pull/401)) +- bug in exp_reverse_bits memory multiplicity ([#1378](https://github.com/succinctlabs/sp1/pull/1378)) +- fix imports +- cargo check on tests ### Other -- poseidon2 parallel tracegen ([#1118](https://github.com/succinctlabs/sp1/pull/1118)) -- _(deps)_ bump serde_with from 3.8.3 to 3.9.0 ([#1103](https://github.com/succinctlabs/sp1/pull/1103)) -- use global workspace version ([#1102](https://github.com/succinctlabs/sp1/pull/1102)) -- fix release-plz ([#1088](https://github.com/succinctlabs/sp1/pull/1088)) -- add release-plz ([#1086](https://github.com/succinctlabs/sp1/pull/1086)) -- _(deps)_ bump serde_with from 3.8.1 to 3.8.3 ([#1064](https://github.com/succinctlabs/sp1/pull/1064)) -- merge main -> dev ([#969](https://github.com/succinctlabs/sp1/pull/969)) -- format PR [#934](https://github.com/succinctlabs/sp1/pull/934) ([#939](https://github.com/succinctlabs/sp1/pull/939)) -- Refactored is_last and is_first columns; added constraint to make sure that the last real row has is_last on. -- all hail clippy -- Removed defunct test -- please clippy -- Merge branch 'dev' into erabinov/exp_rev_precompile -- Version of exp_rev_precompile -- hm -- remove test -- fixes ([#821](https://github.com/succinctlabs/sp1/pull/821)) -- change challenger rate from 16 to 8 ([#807](https://github.com/succinctlabs/sp1/pull/807)) -- clippy fixes -- remove unecessary todos in recursion -- Make some functions const ([#774](https://github.com/succinctlabs/sp1/pull/774)) -- Clean up TOML files ([#796](https://github.com/succinctlabs/sp1/pull/796)) -- _(recursion)_ heap ptr checks ([#775](https://github.com/succinctlabs/sp1/pull/775)) -- _(recursion)_ convert ext2felt to hint ([#771](https://github.com/succinctlabs/sp1/pull/771)) -- update all dependencies ([#689](https://github.com/succinctlabs/sp1/pull/689)) -- _(recursion)_ poseidon2 loose ends ([#672](https://github.com/succinctlabs/sp1/pull/672)) -- sdk tweaks ([#653](https://github.com/succinctlabs/sp1/pull/653)) -- _(recursion)_ consolidate initial and finalize memory tables ([#656](https://github.com/succinctlabs/sp1/pull/656)) -- _(recursion)_ cpu column chores ([#614](https://github.com/succinctlabs/sp1/pull/614)) -- _(recursion)_ re-organized cpu chip and trace ([#613](https://github.com/succinctlabs/sp1/pull/613)) -- poseidon2 config change ([#609](https://github.com/succinctlabs/sp1/pull/609)) -- cleanup prover ([#551](https://github.com/succinctlabs/sp1/pull/551)) -- cleanup program + add missing constraints ([#547](https://github.com/succinctlabs/sp1/pull/547)) -- make ci faster ([#536](https://github.com/succinctlabs/sp1/pull/536)) -- attach dummy wide poseidon2 ([#512](https://github.com/succinctlabs/sp1/pull/512)) -- add poseidon2 chip to recursionAIR ([#504](https://github.com/succinctlabs/sp1/pull/504)) -- _(recursion)_ reduce program ([#497](https://github.com/succinctlabs/sp1/pull/497)) -- for loop optimizations -- update to latest plonky3 main ([#491](https://github.com/succinctlabs/sp1/pull/491)) -- sunday cleanup ([#363](https://github.com/succinctlabs/sp1/pull/363)) -- recursion core cleanup ([#355](https://github.com/succinctlabs/sp1/pull/355)) -- final touches for public release ([#239](https://github.com/succinctlabs/sp1/pull/239)) -- update docs with slight nits ([#224](https://github.com/succinctlabs/sp1/pull/224)) -- sp1 rename ([#212](https://github.com/succinctlabs/sp1/pull/212)) -- enshrine AlignedBorrow macro ([#209](https://github.com/succinctlabs/sp1/pull/209)) -- readme cleanup ([#196](https://github.com/succinctlabs/sp1/pull/196)) -- rename succinct to curta ([#192](https://github.com/succinctlabs/sp1/pull/192)) -- better curta graphic ([#184](https://github.com/succinctlabs/sp1/pull/184)) -- Initial commit +- use crate `vec_map`, box large `Instruction` variants ([#1360](https://github.com/succinctlabs/sp1/pull/1360)) +- merge dev into experimental pt 2 ([#1341](https://github.com/succinctlabs/sp1/pull/1341)) +- resolve merge conflicts between dev and experimental diff --git a/crates/recursion/core/Cargo.toml b/crates/recursion/core/Cargo.toml index 7803c2f52e..1a93e7b098 100644 --- a/crates/recursion/core/Cargo.toml +++ b/crates/recursion/core/Cargo.toml @@ -23,7 +23,6 @@ sp1-primitives = { workspace = true } tracing = "0.1.40" sp1-core-machine = { workspace = true } sp1-stark = { workspace = true } -sp1-core-executor = { workspace = true } hashbrown = { version = "0.14.5", features = ["serde"] } itertools = "0.13.0" p3-bn254-fr = { workspace = true } @@ -35,11 +34,10 @@ p3-fri = { workspace = true } zkhash = "0.2.0" ff = { version = "0.13", features = ["derive", "derive_bits"] } serde = { version = "1.0", features = ["derive", "rc"] } -serde_with = "3.9.0" backtrace = { version = "0.3.71", features = ["serde"] } -arrayref = "0.3.8" static_assertions = "1.1.0" -num_cpus = "1.16.0" +thiserror = "1.0.60" +vec_map = "0.8.2" [dev-dependencies] rand = "0.8.5" diff --git a/crates/recursion/core/src/air/builder.rs b/crates/recursion/core/src/air/builder.rs index 0794f09bd9..f902f8d626 100644 --- a/crates/recursion/core/src/air/builder.rs +++ b/crates/recursion/core/src/air/builder.rs @@ -1,17 +1,15 @@ -use crate::{ - cpu::{InstructionCols, OpcodeSelectorCols}, - memory::{MemoryAccessTimestampCols, MemoryCols}, - range_check::RangeCheckOpcode, -}; use core::iter::{once, repeat}; use p3_air::{AirBuilder, AirBuilderWithPublicValues}; use p3_field::AbstractField; use sp1_stark::{ - air::{AirInteraction, BaseAirBuilder, MachineAirBuilder}, + air::{AirInteraction, BaseAirBuilder, InteractionScope, MachineAirBuilder}, InteractionKind, }; -use super::Block; +use super::{ + Block, InstructionCols, MemoryAccessTimestampCols, MemoryCols, OpcodeSelectorCols, + RangeCheckOpcode, +}; /// A trait which contains all helper methods for building SP1 recursion machine AIRs. pub trait SP1RecursionAirBuilder: @@ -50,8 +48,14 @@ pub trait RecursionMemoryAirBuilder: RecursionInteractionAirBuilder { .chain(memory_access.value().clone().map(Into::into)) .collect(); - self.receive(AirInteraction::new(prev_values, is_real.clone(), InteractionKind::Memory)); - self.send(AirInteraction::new(current_values, is_real, InteractionKind::Memory)); + self.receive( + AirInteraction::new(prev_values, is_real.clone(), InteractionKind::Memory), + InteractionScope::Local, + ); + self.send( + AirInteraction::new(current_values, is_real, InteractionKind::Memory), + InteractionScope::Local, + ); } fn recursion_eval_memory_access_single + Clone>( @@ -82,8 +86,14 @@ pub trait RecursionMemoryAirBuilder: RecursionInteractionAirBuilder { .chain(repeat(Self::Expr::zero()).take(3)) .collect(); - self.receive(AirInteraction::new(prev_values, is_real.clone(), InteractionKind::Memory)); - self.send(AirInteraction::new(current_values, is_real, InteractionKind::Memory)); + self.receive( + AirInteraction::new(prev_values, is_real.clone(), InteractionKind::Memory), + InteractionScope::Local, + ); + self.send( + AirInteraction::new(current_values, is_real, InteractionKind::Memory), + InteractionScope::Local, + ); } /// Verifies that the memory access happens after the previous memory access. @@ -151,11 +161,14 @@ pub trait RecursionInteractionAirBuilder: BaseAirBuilder { val: impl Into, is_real: impl Into, ) { - self.send(AirInteraction::new( - vec![range_check_opcode.into(), val.into()], - is_real.into(), - InteractionKind::Range, - )); + self.send( + AirInteraction::new( + vec![range_check_opcode.into(), val.into()], + is_real.into(), + InteractionKind::Range, + ), + InteractionScope::Global, + ); } /// Receives a range check operation to be processed. @@ -165,11 +178,14 @@ pub trait RecursionInteractionAirBuilder: BaseAirBuilder { val: impl Into, is_real: impl Into, ) { - self.receive(AirInteraction::new( - vec![range_check_opcode.into(), val.into()], - is_real.into(), - InteractionKind::Range, - )); + self.receive( + AirInteraction::new( + vec![range_check_opcode.into(), val.into()], + is_real.into(), + InteractionKind::Range, + ), + InteractionScope::Global, + ); } fn send_program + Copy>( @@ -183,11 +199,10 @@ pub trait RecursionInteractionAirBuilder: BaseAirBuilder { .chain(instruction.into_iter().map(|x| x.into())) .chain(selectors.into_iter().map(|x| x.into())) .collect::>(); - self.send(AirInteraction::new( - program_interaction_vals, - is_real.into(), - InteractionKind::Program, - )); + self.send( + AirInteraction::new(program_interaction_vals, is_real.into(), InteractionKind::Program), + InteractionScope::Global, + ); } fn receive_program + Copy>( @@ -201,11 +216,10 @@ pub trait RecursionInteractionAirBuilder: BaseAirBuilder { .chain(instruction.into_iter().map(|x| x.into())) .chain(selectors.into_iter().map(|x| x.into())) .collect::>(); - self.receive(AirInteraction::new( - program_interaction_vals, - is_real.into(), - InteractionKind::Program, - )); + self.receive( + AirInteraction::new(program_interaction_vals, is_real.into(), InteractionKind::Program), + InteractionScope::Global, + ); } fn send_table + Clone>( @@ -216,7 +230,10 @@ pub trait RecursionInteractionAirBuilder: BaseAirBuilder { ) { let table_interaction_vals = table.iter().map(|x| x.clone().into()); let values = once(opcode.into()).chain(table_interaction_vals).collect(); - self.send(AirInteraction::new(values, is_real.into(), InteractionKind::Syscall)); + self.send( + AirInteraction::new(values, is_real.into(), InteractionKind::Syscall), + InteractionScope::Local, + ); } fn receive_table + Clone>( @@ -227,6 +244,9 @@ pub trait RecursionInteractionAirBuilder: BaseAirBuilder { ) { let table_interaction_vals = table.iter().map(|x| x.clone().into()); let values = once(opcode.into()).chain(table_interaction_vals).collect(); - self.receive(AirInteraction::new(values, is_real.into(), InteractionKind::Syscall)); + self.receive( + AirInteraction::new(values, is_real.into(), InteractionKind::Syscall), + InteractionScope::Local, + ); } } diff --git a/crates/recursion/core/src/cpu/columns/instruction.rs b/crates/recursion/core/src/air/instruction.rs similarity index 59% rename from crates/recursion/core/src/cpu/columns/instruction.rs rename to crates/recursion/core/src/air/instruction.rs index 8ce7aa9975..1b1a67079e 100644 --- a/crates/recursion/core/src/cpu/columns/instruction.rs +++ b/crates/recursion/core/src/air/instruction.rs @@ -1,5 +1,4 @@ -use crate::{air::Block, cpu::Instruction}; -use p3_field::PrimeField; +use crate::air::Block; use sp1_derive::AlignedBorrow; use std::{iter::once, vec::IntoIter}; @@ -16,19 +15,6 @@ pub struct InstructionCols { pub size_imm: T, } -impl InstructionCols { - pub fn populate(&mut self, instruction: &Instruction) { - self.opcode = instruction.opcode.as_field::(); - self.op_a = instruction.op_a; - self.op_b = instruction.op_b; - self.op_c = instruction.op_c; - self.imm_b = F::from_bool(instruction.imm_b); - self.imm_c = F::from_bool(instruction.imm_c); - self.offset_imm = instruction.offset_imm; - self.size_imm = instruction.size_imm; - } -} - impl IntoIterator for InstructionCols { type Item = T; type IntoIter = IntoIter; diff --git a/crates/recursion/core/src/memory/columns.rs b/crates/recursion/core/src/air/memory.rs similarity index 100% rename from crates/recursion/core/src/memory/columns.rs rename to crates/recursion/core/src/air/memory.rs diff --git a/crates/recursion/core/src/air/mod.rs b/crates/recursion/core/src/air/mod.rs index 6fb0451170..70e43f0968 100644 --- a/crates/recursion/core/src/air/mod.rs +++ b/crates/recursion/core/src/air/mod.rs @@ -1,15 +1,23 @@ mod block; mod builder; mod extension; +mod instruction; mod is_ext_zero; mod is_zero; +mod memory; mod multi_builder; +mod opcode; mod public_values; +mod range_check; pub use block::*; pub use builder::*; pub use extension::*; +pub use instruction::*; pub use is_ext_zero::*; pub use is_zero::*; +pub use memory::*; pub use multi_builder::*; +pub use opcode::*; pub use public_values::*; +pub use range_check::*; diff --git a/crates/recursion/core/src/air/multi_builder.rs b/crates/recursion/core/src/air/multi_builder.rs index 48711b5294..57027d8fa4 100644 --- a/crates/recursion/core/src/air/multi_builder.rs +++ b/crates/recursion/core/src/air/multi_builder.rs @@ -2,7 +2,7 @@ use p3_air::{ AirBuilder, AirBuilderWithPublicValues, ExtensionBuilder, FilteredAirBuilder, PermutationAirBuilder, }; -use sp1_stark::air::MessageBuilder; +use sp1_stark::air::{InteractionScope, MessageBuilder}; /// The MultiBuilder is used for the multi table. It is used to create a virtual builder for one of /// the sub tables in the multi table. @@ -85,12 +85,12 @@ impl<'a, AB: PermutationAirBuilder> PermutationAirBuilder for MultiBuilder<'a, A } impl<'a, AB: AirBuilder + MessageBuilder, M> MessageBuilder for MultiBuilder<'a, AB> { - fn send(&mut self, message: M) { - self.inner.send(message); + fn send(&mut self, message: M, scope: InteractionScope) { + self.inner.send(message, scope); } - fn receive(&mut self, message: M) { - self.inner.receive(message); + fn receive(&mut self, message: M, scope: InteractionScope) { + self.inner.receive(message, scope); } } diff --git a/crates/recursion/core/src/air/opcode.rs b/crates/recursion/core/src/air/opcode.rs new file mode 100644 index 0000000000..ecb4108e02 --- /dev/null +++ b/crates/recursion/core/src/air/opcode.rs @@ -0,0 +1,59 @@ +use std::borrow::BorrowMut; + +use sp1_derive::AlignedBorrow; + +pub(crate) const OPCODE_COUNT: usize = core::mem::size_of::>(); + +/// Selectors for the opcode. +/// +/// This contains selectors for the different opcodes corresponding to variants of the [`Opcode`] +/// enum. +#[derive(AlignedBorrow, Clone, Copy, Default, Debug)] +#[repr(C)] +pub struct OpcodeSelectorCols { + // Arithmetic field instructions. + pub is_add: T, + pub is_sub: T, + pub is_mul: T, + pub is_div: T, + pub is_ext: T, + + // Memory instructions. + pub is_load: T, + pub is_store: T, + + // Branch instructions. + pub is_beq: T, + pub is_bne: T, + pub is_bneinc: T, + + // Jump instructions. + pub is_jal: T, + pub is_jalr: T, + + // System instructions. + pub is_trap: T, + pub is_noop: T, + pub is_halt: T, + + pub is_poseidon: T, + pub is_fri_fold: T, + pub is_commit: T, + pub is_ext_to_felt: T, + pub is_exp_reverse_bits_len: T, + pub is_heap_expand: T, +} + +impl IntoIterator for &OpcodeSelectorCols { + type Item = T; + + type IntoIter = std::array::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + let mut array = [self.is_add; OPCODE_COUNT]; + let mut_ref: &mut OpcodeSelectorCols = array.as_mut_slice().borrow_mut(); + + *mut_ref = *self; + array.into_iter() + } +} diff --git a/crates/recursion/core/src/air/public_values.rs b/crates/recursion/core/src/air/public_values.rs index 3a30efc84f..fd5baed415 100644 --- a/crates/recursion/core/src/air/public_values.rs +++ b/crates/recursion/core/src/air/public_values.rs @@ -11,12 +11,12 @@ use sp1_stark::{air::POSEIDON_NUM_WORDS, Word, PROOF_MAX_NUM_PVS}; use static_assertions::const_assert_eq; use std::{ borrow::BorrowMut, - mem::{size_of, transmute}, + mem::{size_of, transmute, MaybeUninit}, }; pub const PV_DIGEST_NUM_WORDS: usize = 8; -pub const CHALLENGER_STATE_NUM_ELTS: usize = 50; +pub const CHALLENGER_STATE_NUM_ELTS: usize = size_of::>(); pub const RECURSIVE_PROOF_NUM_PV_ELTS: usize = size_of::>(); @@ -46,7 +46,7 @@ pub struct ChallengerPublicValues { pub output_buffer: [T; PERMUTATION_WIDTH], } -impl ChallengerPublicValues { +impl ChallengerPublicValues { pub fn set_challenger>( &self, challenger: &mut DuplexChallenger, @@ -59,6 +59,18 @@ impl ChallengerPublicValues { let num_outputs = self.num_outputs.as_canonical_u32() as usize; challenger.output_buffer = self.output_buffer[..num_outputs].to_vec(); } + + pub fn as_array(&self) -> [T; CHALLENGER_STATE_NUM_ELTS] + where + T: Copy, + { + unsafe { + let mut ret = [MaybeUninit::::zeroed().assume_init(); CHALLENGER_STATE_NUM_ELTS]; + let pv: &mut ChallengerPublicValues = ret.as_mut_slice().borrow_mut(); + *pv = *self; + ret + } + } } /// The PublicValues struct is used to store all of a reduce proof's public values. @@ -116,33 +128,57 @@ pub struct RecursionPublicValues { /// The commitment to the sp1 program being proven. pub sp1_vk_digest: [T; DIGEST_SIZE], - /// The commitment to the compress key being used in recursive verification. - pub compress_vk_digest: [T; DIGEST_SIZE], + /// The root of the vk merkle tree. + pub vk_root: [T; DIGEST_SIZE], /// The leaf challenger containing the entropy from the main trace commitment. pub leaf_challenger: ChallengerPublicValues, - /// Current cumulative sum of lookup bus. + /// Current cumulative sum of lookup bus. Note that for recursive proofs for core proofs, this + /// contains the global cumulative sum. For all other proofs, it's the local cumulative sum. pub cumulative_sum: [T; 4], /// Whether the proof completely proves the program execution. pub is_complete: T, - /// The digest of all the previous public values elements. - pub digest: [T; DIGEST_SIZE], + /// Whether the proof represents a collection of shards which contain at least one execution + /// shard, i.e. a shard that contains the `cpu` chip. + pub contains_execution_shard: T, /// The exit code of the program. Note that this is not part of the public values digest, /// since it's value will be individually constrained. pub exit_code: T, + + /// The digest of all the previous public values elements. + pub digest: [T; DIGEST_SIZE], } /// Converts the public values to an array of elements. -impl RecursionPublicValues { - pub fn to_vec(&self) -> [F; RECURSIVE_PROOF_NUM_PV_ELTS] { - let mut ret = [F::default(); RECURSIVE_PROOF_NUM_PV_ELTS]; - let pv: &mut RecursionPublicValues = ret.as_mut_slice().borrow_mut(); +impl RecursionPublicValues { + pub fn as_array(&self) -> [F; RECURSIVE_PROOF_NUM_PV_ELTS] { + unsafe { + let mut ret = [MaybeUninit::::zeroed().assume_init(); RECURSIVE_PROOF_NUM_PV_ELTS]; + let pv: &mut RecursionPublicValues = ret.as_mut_slice().borrow_mut(); + *pv = *self; + ret + } + } +} + +impl IntoIterator for RecursionPublicValues { + type Item = T; + type IntoIter = std::array::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.as_array().into_iter() + } +} + +impl IntoIterator for ChallengerPublicValues { + type Item = T; + type IntoIter = std::array::IntoIter; - *pv = *self; - ret + fn into_iter(self) -> Self::IntoIter { + self.as_array().into_iter() } } diff --git a/crates/recursion/core/src/range_check/opcode.rs b/crates/recursion/core/src/air/range_check.rs similarity index 88% rename from crates/recursion/core/src/range_check/opcode.rs rename to crates/recursion/core/src/air/range_check.rs index dd1d1c9186..6ec7e10bda 100644 --- a/crates/recursion/core/src/range_check/opcode.rs +++ b/crates/recursion/core/src/air/range_check.rs @@ -1,7 +1,8 @@ use p3_field::Field; use serde::{Deserialize, Serialize}; -use crate::range_check::NUM_RANGE_CHECK_OPS; +/// The number of different range check operations. +pub const NUM_RANGE_CHECK_OPS: usize = 2; /// A byte opcode which the chip can process. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] diff --git a/crates/recursion/core-v2/src/builder.rs b/crates/recursion/core/src/builder.rs similarity index 69% rename from crates/recursion/core-v2/src/builder.rs rename to crates/recursion/core/src/builder.rs index 1542081a6c..ae30991e04 100644 --- a/crates/recursion/core-v2/src/builder.rs +++ b/crates/recursion/core/src/builder.rs @@ -1,12 +1,13 @@ +use std::iter::once; + use p3_air::AirBuilderWithPublicValues; use p3_field::AbstractField; -use sp1_recursion_core::air::Block; use sp1_stark::{ - air::{AirInteraction, BaseAirBuilder, MachineAirBuilder}, + air::{AirInteraction, BaseAirBuilder, InteractionScope, MachineAirBuilder}, InteractionKind, }; -use crate::*; +use crate::{air::Block, Address}; /// A trait which contains all helper methods for building SP1 recursion machine AIRs. pub trait SP1RecursionAirBuilder: MachineAirBuilder + RecursionAirBuilder {} @@ -32,11 +33,14 @@ pub trait RecursionAirBuilder: BaseAirBuilder { val: Block, mult: impl Into, ) { - self.send(AirInteraction::new( - once(addr.0).chain(val).map(Into::into).collect(), - mult.into(), - InteractionKind::Memory, - )); + self.send( + AirInteraction::new( + once(addr.0).chain(val).map(Into::into).collect(), + mult.into(), + InteractionKind::Memory, + ), + InteractionScope::Local, + ); } fn receive_single>( @@ -56,10 +60,13 @@ pub trait RecursionAirBuilder: BaseAirBuilder { val: Block, mult: impl Into, ) { - self.receive(AirInteraction::new( - once(addr.0).chain(val).map(Into::into).collect(), - mult.into(), - InteractionKind::Memory, - )); + self.receive( + AirInteraction::new( + once(addr.0).chain(val).map(Into::into).collect(), + mult.into(), + InteractionKind::Memory, + ), + InteractionScope::Local, + ); } } diff --git a/crates/recursion/core-v2/src/chips/alu_base.rs b/crates/recursion/core/src/chips/alu_base.rs similarity index 92% rename from crates/recursion/core-v2/src/chips/alu_base.rs rename to crates/recursion/core/src/chips/alu_base.rs index ccce0581ed..ba29d34434 100644 --- a/crates/recursion/core-v2/src/chips/alu_base.rs +++ b/crates/recursion/core/src/chips/alu_base.rs @@ -10,10 +10,10 @@ use std::{borrow::BorrowMut, iter::zip}; use crate::{builder::SP1RecursionAirBuilder, *}; -pub const NUM_BASE_ALU_ENTRIES_PER_ROW: usize = 8; +pub const NUM_BASE_ALU_ENTRIES_PER_ROW: usize = 4; #[derive(Default)] -pub struct BaseAluChip {} +pub struct BaseAluChip; pub const NUM_BASE_ALU_COLS: usize = core::mem::size_of::>(); @@ -84,8 +84,13 @@ impl MachineAir for BaseAluChip { .collect::>(); let nb_rows = instrs.len().div_ceil(NUM_BASE_ALU_ENTRIES_PER_ROW); - let padded_nb_rows = next_power_of_two(nb_rows, None); + let fixed_log2_rows = program.fixed_log2_rows(self); + let padded_nb_rows = match fixed_log2_rows { + Some(log2_rows) => 1 << log2_rows, + None => next_power_of_two(nb_rows, None), + }; let mut values = vec![F::zero(); padded_nb_rows * NUM_BASE_ALU_PREPROCESSED_COLS]; + // Generate the trace rows & corresponding records for each chunk of events in parallel. let populate_len = instrs.len() * NUM_BASE_ALU_ACCESS_COLS; values[..populate_len].par_chunks_mut(NUM_BASE_ALU_ACCESS_COLS).zip_eq(instrs).for_each( @@ -121,8 +126,13 @@ impl MachineAir for BaseAluChip { fn generate_trace(&self, input: &Self::Record, _: &mut Self::Record) -> RowMajorMatrix { let events = &input.base_alu_events; let nb_rows = events.len().div_ceil(NUM_BASE_ALU_ENTRIES_PER_ROW); - let padded_nb_rows = next_power_of_two(nb_rows, None); + let fixed_log2_rows = input.fixed_log2_rows(self); + let padded_nb_rows = match fixed_log2_rows { + Some(log2_rows) => 1 << log2_rows, + None => next_power_of_two(nb_rows, None), + }; let mut values = vec![F::zero(); padded_nb_rows * NUM_BASE_ALU_COLS]; + // Generate the trace rows & corresponding records for each chunk of events in parallel. let populate_len = events.len() * NUM_BASE_ALU_VALUE_COLS; values[..populate_len].par_chunks_mut(NUM_BASE_ALU_VALUE_COLS).zip_eq(events).for_each( @@ -164,8 +174,8 @@ where builder.when(is_add).assert_eq(in1 + in2, out); builder.when(is_sub).assert_eq(in1, in2 + out); - builder.when(is_mul).assert_eq(in1 * in2, out); - builder.when(is_div).assert_eq(in1, in2 * out); + builder.when(is_mul).assert_eq(out, in1 * in2); + builder.when(is_div).assert_eq(in2 * out, in1); builder.receive_single(addrs.in1, in1, is_real.clone()); @@ -198,7 +208,7 @@ mod tests { base_alu_events: vec![BaseAluIo { out: F::one(), in1: F::one(), in2: F::one() }], ..Default::default() }; - let chip = BaseAluChip::default(); + let chip = BaseAluChip; let trace: RowMajorMatrix = chip.generate_trace(&shard, &mut ExecutionRecord::default()); println!("{:?}", trace.values) } diff --git a/crates/recursion/core-v2/src/chips/alu_ext.rs b/crates/recursion/core/src/chips/alu_ext.rs similarity index 94% rename from crates/recursion/core-v2/src/chips/alu_ext.rs rename to crates/recursion/core/src/chips/alu_ext.rs index 39a92c7f8a..89e7940e49 100644 --- a/crates/recursion/core-v2/src/chips/alu_ext.rs +++ b/crates/recursion/core/src/chips/alu_ext.rs @@ -13,7 +13,7 @@ use crate::{builder::SP1RecursionAirBuilder, *}; pub const NUM_EXT_ALU_ENTRIES_PER_ROW: usize = 4; #[derive(Default)] -pub struct ExtAluChip {} +pub struct ExtAluChip; pub const NUM_EXT_ALU_COLS: usize = core::mem::size_of::>(); @@ -82,8 +82,13 @@ impl> MachineAir for ExtAluChip { .collect::>(); let nb_rows = instrs.len().div_ceil(NUM_EXT_ALU_ENTRIES_PER_ROW); - let padded_nb_rows = next_power_of_two(nb_rows, None); + let fixed_log2_rows = program.fixed_log2_rows(self); + let padded_nb_rows = match fixed_log2_rows { + Some(log2_rows) => 1 << log2_rows, + None => next_power_of_two(nb_rows, None), + }; let mut values = vec![F::zero(); padded_nb_rows * NUM_EXT_ALU_PREPROCESSED_COLS]; + // Generate the trace rows & corresponding records for each chunk of events in parallel. let populate_len = instrs.len() * NUM_EXT_ALU_ACCESS_COLS; values[..populate_len].par_chunks_mut(NUM_EXT_ALU_ACCESS_COLS).zip_eq(instrs).for_each( @@ -119,8 +124,13 @@ impl> MachineAir for ExtAluChip { fn generate_trace(&self, input: &Self::Record, _: &mut Self::Record) -> RowMajorMatrix { let events = &input.ext_alu_events; let nb_rows = events.len().div_ceil(NUM_EXT_ALU_ENTRIES_PER_ROW); - let padded_nb_rows = next_power_of_two(nb_rows, None); + let fixed_log2_rows = input.fixed_log2_rows(self); + let padded_nb_rows = match fixed_log2_rows { + Some(log2_rows) => 1 << log2_rows, + None => next_power_of_two(nb_rows, None), + }; let mut values = vec![F::zero(); padded_nb_rows * NUM_EXT_ALU_COLS]; + // Generate the trace rows & corresponding records for each chunk of events in parallel. let populate_len = events.len() * NUM_EXT_ALU_VALUE_COLS; values[..populate_len].par_chunks_mut(NUM_EXT_ALU_VALUE_COLS).zip_eq(events).for_each( @@ -188,8 +198,8 @@ mod tests { use p3_matrix::dense::RowMajorMatrix; use rand::{rngs::StdRng, Rng, SeedableRng}; - use sp1_recursion_core::stark::config::BabyBearPoseidon2Outer; use sp1_stark::StarkGenericConfig; + use stark::BabyBearPoseidon2Outer; use super::*; @@ -207,7 +217,7 @@ mod tests { }], ..Default::default() }; - let chip = ExtAluChip::default(); + let chip = ExtAluChip; let trace: RowMajorMatrix = chip.generate_trace(&shard, &mut ExecutionRecord::default()); println!("{:?}", trace.values) } diff --git a/crates/recursion/core-v2/src/chips/exp_reverse_bits.rs b/crates/recursion/core/src/chips/exp_reverse_bits.rs similarity index 94% rename from crates/recursion/core-v2/src/chips/exp_reverse_bits.rs rename to crates/recursion/core/src/chips/exp_reverse_bits.rs index dfa1bb9240..b5a8655236 100644 --- a/crates/recursion/core-v2/src/chips/exp_reverse_bits.rs +++ b/crates/recursion/core/src/chips/exp_reverse_bits.rs @@ -22,15 +22,8 @@ pub const NUM_EXP_REVERSE_BITS_LEN_COLS: usize = core::mem::size_of::>(); -pub struct ExpReverseBitsLenChip { - pub fixed_log2_rows: Option, - pub pad: bool, -} -impl Default for ExpReverseBitsLenChip { - fn default() -> Self { - Self { fixed_log2_rows: None, pad: true } - } -} +#[derive(Clone, Debug, Copy, Default)] +pub struct ExpReverseBitsLenChip; #[derive(AlignedBorrow, Clone, Copy, Debug)] #[repr(C)] @@ -126,13 +119,11 @@ impl MachineAir for ExpReverseBitsLenCh }); // Pad the trace to a power of two. - if self.pad { - pad_rows_fixed( - &mut rows, - || [F::zero(); NUM_EXP_REVERSE_BITS_LEN_PREPROCESSED_COLS], - self.fixed_log2_rows, - ); - } + pad_rows_fixed( + &mut rows, + || [F::zero(); NUM_EXP_REVERSE_BITS_LEN_PREPROCESSED_COLS], + program.fixed_log2_rows(self), + ); let trace = RowMajorMatrix::new( rows.into_iter().flatten().collect(), @@ -178,13 +169,11 @@ impl MachineAir for ExpReverseBitsLenCh }); // Pad the trace to a power of two. - if self.pad { - pad_rows_fixed( - &mut overall_rows, - || [F::zero(); NUM_EXP_REVERSE_BITS_LEN_COLS].to_vec(), - self.fixed_log2_rows, - ); - } + pad_rows_fixed( + &mut overall_rows, + || [F::zero(); NUM_EXP_REVERSE_BITS_LEN_COLS].to_vec(), + input.fixed_log2_rows(self), + ); // Convert the trace to a row major matrix. let trace = RowMajorMatrix::new( @@ -311,7 +300,6 @@ mod tests { use p3_util::reverse_bits_len; use rand::{rngs::StdRng, Rng, SeedableRng}; use sp1_core_machine::utils::setup_logger; - use sp1_recursion_core::stark::config::BabyBearPoseidon2Outer; use sp1_stark::{air::MachineAir, StarkGenericConfig}; use std::iter::once; @@ -323,6 +311,7 @@ mod tests { chips::exp_reverse_bits::ExpReverseBitsLenChip, machine::tests::run_recursion_test_machines, runtime::{instruction as instr, ExecutionRecord}, + stark::BabyBearPoseidon2Outer, ExpReverseBitsEvent, Instruction, MemAccessKind, RecursionProgram, }; @@ -394,7 +383,7 @@ mod tests { }], ..Default::default() }; - let chip = ExpReverseBitsLenChip::<3>::default(); + let chip = ExpReverseBitsLenChip::<3>; let trace: RowMajorMatrix = chip.generate_trace(&shard, &mut ExecutionRecord::default()); println!("{:?}", trace.values) } diff --git a/crates/recursion/core-v2/src/chips/fri_fold.rs b/crates/recursion/core/src/chips/fri_fold.rs similarity index 99% rename from crates/recursion/core-v2/src/chips/fri_fold.rs rename to crates/recursion/core/src/chips/fri_fold.rs index 875fce491b..063037032c 100644 --- a/crates/recursion/core-v2/src/chips/fri_fold.rs +++ b/crates/recursion/core/src/chips/fri_fold.rs @@ -13,9 +13,9 @@ use p3_matrix::{dense::RowMajorMatrix, Matrix}; use sp1_stark::air::{BaseAirBuilder, ExtensionAirBuilder}; use sp1_derive::AlignedBorrow; -use sp1_recursion_core::air::Block; use crate::{ + air::Block, builder::SP1RecursionAirBuilder, runtime::{Instruction, RecursionProgram}, ExecutionRecord, FriFoldInstr, @@ -355,7 +355,6 @@ mod tests { use p3_field::AbstractExtensionField; use rand::{rngs::StdRng, Rng, SeedableRng}; use sp1_core_machine::utils::setup_logger; - use sp1_recursion_core::{air::Block, stark::config::BabyBearPoseidon2Outer}; use sp1_stark::{air::MachineAir, StarkGenericConfig}; use std::mem::size_of; @@ -364,9 +363,11 @@ mod tests { use p3_matrix::dense::RowMajorMatrix; use crate::{ + air::Block, chips::fri_fold::FriFoldChip, machine::tests::run_recursion_test_machines, runtime::{instruction as instr, ExecutionRecord}, + stark::BabyBearPoseidon2Outer, FriFoldBaseIo, FriFoldEvent, FriFoldExtSingleIo, FriFoldExtVecIo, Instruction, MemAccessKind, RecursionProgram, }; diff --git a/crates/recursion/core-v2/src/chips/mem/constant.rs b/crates/recursion/core/src/chips/mem/constant.rs similarity index 86% rename from crates/recursion/core-v2/src/chips/mem/constant.rs rename to crates/recursion/core/src/chips/mem/constant.rs index 681cbac8dd..27bc64b8da 100644 --- a/crates/recursion/core-v2/src/chips/mem/constant.rs +++ b/crates/recursion/core/src/chips/mem/constant.rs @@ -3,7 +3,7 @@ use itertools::Itertools; use p3_air::{Air, BaseAir, PairBuilder}; use p3_field::PrimeField32; use p3_matrix::{dense::RowMajorMatrix, Matrix}; -use sp1_core_machine::utils::pad_to_power_of_two; +use sp1_core_machine::utils::pad_rows_fixed; use sp1_derive::AlignedBorrow; use sp1_stark::air::MachineAir; use std::{borrow::BorrowMut, iter::zip, marker::PhantomData}; @@ -12,11 +12,11 @@ use crate::{builder::SP1RecursionAirBuilder, *}; use super::MemoryAccessCols; -pub const NUM_MEM_ENTRIES_PER_ROW: usize = 6; +pub const NUM_CONST_MEM_ENTRIES_PER_ROW: usize = 2; #[derive(Default)] pub struct MemoryChip { - _data: PhantomData, + _marker: PhantomData, } pub const NUM_MEM_INIT_COLS: usize = core::mem::size_of::>(); @@ -34,7 +34,7 @@ pub const NUM_MEM_PREPROCESSED_INIT_COLS: usize = #[derive(AlignedBorrow, Debug, Clone, Copy)] #[repr(C)] pub struct MemoryPreprocessedCols { - values_and_accesses: [(Block, MemoryAccessCols); NUM_MEM_ENTRIES_PER_ROW], + values_and_accesses: [(Block, MemoryAccessCols); NUM_CONST_MEM_ENTRIES_PER_ROW], } impl BaseAir for MemoryChip { fn width(&self) -> usize { @@ -55,7 +55,7 @@ impl MachineAir for MemoryChip { } fn generate_preprocessed_trace(&self, program: &Self::Program) -> Option> { - let rows = program + let mut rows = program .instructions .iter() .filter_map(|instruction| match instruction { @@ -70,7 +70,7 @@ impl MachineAir for MemoryChip { } _ => None, }) - .chunks(NUM_MEM_ENTRIES_PER_ROW) + .chunks(NUM_CONST_MEM_ENTRIES_PER_ROW) .into_iter() .map(|row_vs_as| { let mut row = [F::zero(); NUM_MEM_PREPROCESSED_INIT_COLS]; @@ -82,15 +82,19 @@ impl MachineAir for MemoryChip { }) .collect::>(); + // Pad the rows to the next power of two. + pad_rows_fixed( + &mut rows, + || [F::zero(); NUM_MEM_PREPROCESSED_INIT_COLS], + program.fixed_log2_rows(self), + ); + // Convert the trace to a row major matrix. - let mut trace = RowMajorMatrix::new( + let trace = RowMajorMatrix::new( rows.into_iter().flatten().collect::>(), NUM_MEM_PREPROCESSED_INIT_COLS, ); - // Pad the trace to a power of two. - pad_to_power_of_two::(&mut trace.values); - Some(trace) } @@ -103,19 +107,16 @@ impl MachineAir for MemoryChip { let num_rows = input .mem_const_count .checked_sub(1) - .map(|x| x / NUM_MEM_ENTRIES_PER_ROW + 1) + .map(|x| x / NUM_CONST_MEM_ENTRIES_PER_ROW + 1) .unwrap_or_default(); - let rows = + let mut rows = std::iter::repeat([F::zero(); NUM_MEM_INIT_COLS]).take(num_rows).collect::>(); - // Convert the trace to a row major matrix. - let mut trace = - RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), NUM_MEM_INIT_COLS); - - // Pad the trace to a power of two. - pad_to_power_of_two::(&mut trace.values); + // Pad the rows to the next power of two. + pad_rows_fixed(&mut rows, || [F::zero(); NUM_MEM_INIT_COLS], input.fixed_log2_rows(self)); - trace + // Convert the trace to a row major matrix. + RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), NUM_MEM_INIT_COLS) } fn included(&self, _record: &Self::Record) -> bool { @@ -147,8 +148,8 @@ mod tests { use p3_field::AbstractField; use p3_matrix::dense::RowMajorMatrix; + use crate::stark::BabyBearPoseidon2Outer; use sp1_core_machine::utils::run_test_machine; - use sp1_recursion_core::stark::config::BabyBearPoseidon2Outer; use sp1_stark::{BabyBearPoseidon2Inner, StarkGenericConfig}; use super::*; @@ -158,7 +159,7 @@ mod tests { type SC = BabyBearPoseidon2Outer; type F = ::Val; type EF = ::Challenge; - type A = RecursionAir; + type A = RecursionAir; pub fn prove_program(program: RecursionProgram) { let program = Arc::new(program); @@ -169,7 +170,7 @@ mod tests { runtime.run().unwrap(); let config = SC::new(); - let machine = A::machine_wide(config); + let machine = A::compress_machine(config); let (pk, vk) = machine.setup(&program); let result = run_test_machine(vec![runtime.record], machine, pk, vk); if let Err(e) = result { diff --git a/crates/recursion/core-v2/src/chips/mem/mod.rs b/crates/recursion/core/src/chips/mem/mod.rs similarity index 100% rename from crates/recursion/core-v2/src/chips/mem/mod.rs rename to crates/recursion/core/src/chips/mem/mod.rs diff --git a/crates/recursion/core-v2/src/chips/mem/variable.rs b/crates/recursion/core/src/chips/mem/variable.rs similarity index 88% rename from crates/recursion/core-v2/src/chips/mem/variable.rs rename to crates/recursion/core/src/chips/mem/variable.rs index a2eafb10a3..a5b1fb3156 100644 --- a/crates/recursion/core-v2/src/chips/mem/variable.rs +++ b/crates/recursion/core/src/chips/mem/variable.rs @@ -4,7 +4,7 @@ use p3_air::{Air, BaseAir, PairBuilder}; use p3_field::PrimeField32; use p3_matrix::{dense::RowMajorMatrix, Matrix}; use p3_maybe_rayon::prelude::*; -use sp1_core_machine::utils::{next_power_of_two, pad_to_power_of_two}; +use sp1_core_machine::utils::{next_power_of_two, pad_rows_fixed}; use sp1_derive::AlignedBorrow; use sp1_stark::air::MachineAir; use std::{borrow::BorrowMut, iter::zip, marker::PhantomData}; @@ -13,11 +13,11 @@ use crate::{builder::SP1RecursionAirBuilder, *}; use super::{MemoryAccessCols, NUM_MEM_ACCESS_COLS}; -pub const NUM_MEM_ENTRIES_PER_ROW: usize = 16; +pub const NUM_VAR_MEM_ENTRIES_PER_ROW: usize = 2; #[derive(Default)] pub struct MemoryChip { - _data: PhantomData, + _marker: PhantomData, } pub const NUM_MEM_INIT_COLS: usize = core::mem::size_of::>(); @@ -25,7 +25,7 @@ pub const NUM_MEM_INIT_COLS: usize = core::mem::size_of::>(); #[derive(AlignedBorrow, Debug, Clone, Copy)] #[repr(C)] pub struct MemoryCols { - values: [Block; NUM_MEM_ENTRIES_PER_ROW], + values: [Block; NUM_VAR_MEM_ENTRIES_PER_ROW], } pub const NUM_MEM_PREPROCESSED_INIT_COLS: usize = @@ -34,7 +34,7 @@ pub const NUM_MEM_PREPROCESSED_INIT_COLS: usize = #[derive(AlignedBorrow, Debug, Clone, Copy)] #[repr(C)] pub struct MemoryPreprocessedCols { - accesses: [MemoryAccessCols; NUM_MEM_ENTRIES_PER_ROW], + accesses: [MemoryAccessCols; NUM_VAR_MEM_ENTRIES_PER_ROW], } impl BaseAir for MemoryChip { @@ -74,9 +74,13 @@ impl MachineAir for MemoryChip { }) .collect::>(); - let nb_rows = accesses.len().div_ceil(NUM_MEM_ENTRIES_PER_ROW); - let padded_nb_rows = next_power_of_two(nb_rows, None); + let nb_rows = accesses.len().div_ceil(NUM_VAR_MEM_ENTRIES_PER_ROW); + let padded_nb_rows = match program.fixed_log2_rows(self) { + Some(log2_rows) => 1 << log2_rows, + None => next_power_of_two(nb_rows, None), + }; let mut values = vec![F::zero(); padded_nb_rows * NUM_MEM_PREPROCESSED_INIT_COLS]; + // Generate the trace rows & corresponding records for each chunk of events in parallel. let populate_len = accesses.len() * NUM_MEM_ACCESS_COLS; values[..populate_len] @@ -93,9 +97,9 @@ impl MachineAir for MemoryChip { fn generate_trace(&self, input: &Self::Record, _: &mut Self::Record) -> RowMajorMatrix { // Generate the trace rows & corresponding records for each chunk of events in parallel. - let rows = input + let mut rows = input .mem_var_events - .chunks(NUM_MEM_ENTRIES_PER_ROW) + .chunks(NUM_VAR_MEM_ENTRIES_PER_ROW) .map(|row_events| { let mut row = [F::zero(); NUM_MEM_INIT_COLS]; let cols: &mut MemoryCols<_> = row.as_mut_slice().borrow_mut(); @@ -106,14 +110,11 @@ impl MachineAir for MemoryChip { }) .collect::>(); - // Convert the trace to a row major matrix. - let mut trace = - RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), NUM_MEM_INIT_COLS); + // Pad the rows to the next power of two. + pad_rows_fixed(&mut rows, || [F::zero(); NUM_MEM_INIT_COLS], input.fixed_log2_rows(self)); - // Pad the trace to a power of two. - pad_to_power_of_two::(&mut trace.values); - - trace + // Convert the trace to a row major matrix. + RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), NUM_MEM_INIT_COLS) } fn included(&self, _record: &Self::Record) -> bool { diff --git a/crates/recursion/core-v2/src/chips/mod.rs b/crates/recursion/core/src/chips/mod.rs similarity index 91% rename from crates/recursion/core-v2/src/chips/mod.rs rename to crates/recursion/core/src/chips/mod.rs index 3a717452c2..9f82bbccef 100644 --- a/crates/recursion/core-v2/src/chips/mod.rs +++ b/crates/recursion/core/src/chips/mod.rs @@ -1,6 +1,5 @@ pub mod alu_base; pub mod alu_ext; -pub mod dummy; pub mod exp_reverse_bits; pub mod fri_fold; pub mod mem; diff --git a/crates/recursion/core-v2/src/chips/poseidon2_skinny/air.rs b/crates/recursion/core/src/chips/poseidon2_skinny/air.rs similarity index 100% rename from crates/recursion/core-v2/src/chips/poseidon2_skinny/air.rs rename to crates/recursion/core/src/chips/poseidon2_skinny/air.rs diff --git a/crates/recursion/core-v2/src/chips/poseidon2_skinny/columns/mod.rs b/crates/recursion/core/src/chips/poseidon2_skinny/columns/mod.rs similarity index 100% rename from crates/recursion/core-v2/src/chips/poseidon2_skinny/columns/mod.rs rename to crates/recursion/core/src/chips/poseidon2_skinny/columns/mod.rs diff --git a/crates/recursion/core-v2/src/chips/poseidon2_skinny/columns/preprocessed.rs b/crates/recursion/core/src/chips/poseidon2_skinny/columns/preprocessed.rs similarity index 100% rename from crates/recursion/core-v2/src/chips/poseidon2_skinny/columns/preprocessed.rs rename to crates/recursion/core/src/chips/poseidon2_skinny/columns/preprocessed.rs diff --git a/crates/recursion/core-v2/src/chips/poseidon2_skinny/mod.rs b/crates/recursion/core/src/chips/poseidon2_skinny/mod.rs similarity index 94% rename from crates/recursion/core-v2/src/chips/poseidon2_skinny/mod.rs rename to crates/recursion/core/src/chips/poseidon2_skinny/mod.rs index 332075143f..637c00f3f9 100644 --- a/crates/recursion/core-v2/src/chips/poseidon2_skinny/mod.rs +++ b/crates/recursion/core/src/chips/poseidon2_skinny/mod.rs @@ -1,3 +1,5 @@ +use std::marker::PhantomData; + use p3_baby_bear::{MONTY_INVERSE, POSEIDON2_INTERNAL_MATRIX_DIAG_16_BABYBEAR_MONTY}; use p3_field::{AbstractField, PrimeField32}; @@ -17,16 +19,13 @@ pub const NUM_ROUNDS: usize = NUM_EXTERNAL_ROUNDS + NUM_INTERNAL_ROUNDS; /// A chip that implements the Poseidon2 permutation in the skinny variant (one external round per /// row and one row for all internal rounds). -pub struct Poseidon2SkinnyChip { - pub fixed_log2_rows: Option, - pub pad: bool, -} +pub struct Poseidon2SkinnyChip(PhantomData<()>); impl Default for Poseidon2SkinnyChip { fn default() -> Self { // We only support machines with degree 9. assert!(DEGREE >= 9); - Self { fixed_log2_rows: None, pad: true } + Self(PhantomData) } } pub fn apply_m_4(x: &mut [AF]) @@ -83,8 +82,8 @@ pub(crate) mod tests { use p3_field::{AbstractField, PrimeField32}; use p3_symmetric::Permutation; + use crate::stark::BabyBearPoseidon2Outer; use sp1_core_machine::utils::{run_test_machine, setup_logger}; - use sp1_recursion_core::stark::config::BabyBearPoseidon2Outer; use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, inner_perm, StarkGenericConfig}; use zkhash::ark_ff::UniformRand; @@ -96,7 +95,7 @@ pub(crate) mod tests { type SC = BabyBearPoseidon2Outer; type F = ::Val; type EF = ::Challenge; - type B = RecursionAir; + type B = RecursionAir; let input = [1; WIDTH]; let output = inner_perm() @@ -141,7 +140,7 @@ pub(crate) mod tests { runtime.run().unwrap(); let config = SC::new(); - let machine_deg_9 = B::machine(config); + let machine_deg_9 = B::wrap_machine(config); let (pk_9, vk_9) = machine_deg_9.setup(&program); let result_deg_9 = run_test_machine(vec![runtime.record], machine_deg_9, pk_9, vk_9); if let Err(e) = result_deg_9 { diff --git a/crates/recursion/core-v2/src/chips/poseidon2_skinny/trace.rs b/crates/recursion/core/src/chips/poseidon2_skinny/trace.rs similarity index 92% rename from crates/recursion/core-v2/src/chips/poseidon2_skinny/trace.rs rename to crates/recursion/core/src/chips/poseidon2_skinny/trace.rs index cddde880a1..ecd9c57550 100644 --- a/crates/recursion/core-v2/src/chips/poseidon2_skinny/trace.rs +++ b/crates/recursion/core/src/chips/poseidon2_skinny/trace.rs @@ -6,7 +6,7 @@ use std::{ use itertools::Itertools; use p3_field::PrimeField32; -use p3_matrix::{dense::RowMajorMatrix, Matrix}; +use p3_matrix::dense::RowMajorMatrix; use sp1_core_machine::utils::pad_rows_fixed; use sp1_primitives::RC_16_30_U32; use sp1_stark::air::MachineAir; @@ -96,24 +96,12 @@ impl MachineAir for Poseidon2SkinnyChip rows.extend(row_add.into_iter()); } - if self.pad { - // Pad the trace to a power of two. - // This will need to be adjusted when the AIR constraints are implemented. - pad_rows_fixed(&mut rows, || [F::zero(); NUM_POSEIDON2_COLS], self.fixed_log2_rows); - } + // Pad the trace to a power of two. + // This will need to be adjusted when the AIR constraints are implemented. + pad_rows_fixed(&mut rows, || [F::zero(); NUM_POSEIDON2_COLS], input.fixed_log2_rows(self)); // Convert the trace to a row major matrix. - let trace = - RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), NUM_POSEIDON2_COLS); - - #[cfg(debug_assertions)] - println!( - "poseidon2 skinny main trace dims is width: {:?}, height: {:?}", - trace.width(), - trace.height() - ); - - trace + RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), NUM_POSEIDON2_COLS) } fn included(&self, _record: &Self::Record) -> bool { @@ -190,15 +178,14 @@ impl MachineAir for Poseidon2SkinnyChip }); }, ); - if self.pad { - // Pad the trace to a power of two. - // This may need to be adjusted when the AIR constraints are implemented. - pad_rows_fixed( - &mut rows, - || [F::zero(); PREPROCESSED_POSEIDON2_WIDTH], - self.fixed_log2_rows, - ); - } + + // Pad the trace to a power of two. + // This may need to be adjusted when the AIR constraints are implemented. + pad_rows_fixed( + &mut rows, + || [F::zero(); PREPROCESSED_POSEIDON2_WIDTH], + program.fixed_log2_rows(self), + ); let trace_rows = rows.into_iter().flatten().collect::>(); Some(RowMajorMatrix::new(trace_rows, PREPROCESSED_POSEIDON2_WIDTH)) } diff --git a/crates/recursion/core-v2/src/chips/poseidon2_wide/air.rs b/crates/recursion/core/src/chips/poseidon2_wide/air.rs similarity index 98% rename from crates/recursion/core-v2/src/chips/poseidon2_wide/air.rs rename to crates/recursion/core/src/chips/poseidon2_wide/air.rs index 528105ac60..1bf726bca7 100644 --- a/crates/recursion/core-v2/src/chips/poseidon2_wide/air.rs +++ b/crates/recursion/core/src/chips/poseidon2_wide/air.rs @@ -7,7 +7,6 @@ use p3_air::{Air, BaseAir, PairBuilder}; use p3_field::AbstractField; use p3_matrix::Matrix; use sp1_primitives::RC_16_30_U32; -use sp1_recursion_core::poseidon2_wide::NUM_EXTERNAL_ROUNDS; use crate::builder::SP1RecursionAirBuilder; @@ -16,7 +15,8 @@ use super::{ permutation::Poseidon2, preprocessed::Poseidon2PreprocessedCols, NUM_POSEIDON2_DEGREE3_COLS, NUM_POSEIDON2_DEGREE9_COLS, }, - external_linear_layer, internal_linear_layer, Poseidon2WideChip, NUM_INTERNAL_ROUNDS, WIDTH, + external_linear_layer, internal_linear_layer, Poseidon2WideChip, NUM_EXTERNAL_ROUNDS, + NUM_INTERNAL_ROUNDS, WIDTH, }; impl BaseAir for Poseidon2WideChip { diff --git a/crates/recursion/core-v2/src/chips/poseidon2_wide/columns/mod.rs b/crates/recursion/core/src/chips/poseidon2_wide/columns/mod.rs similarity index 100% rename from crates/recursion/core-v2/src/chips/poseidon2_wide/columns/mod.rs rename to crates/recursion/core/src/chips/poseidon2_wide/columns/mod.rs diff --git a/crates/recursion/core-v2/src/chips/poseidon2_wide/columns/permutation.rs b/crates/recursion/core/src/chips/poseidon2_wide/columns/permutation.rs similarity index 98% rename from crates/recursion/core-v2/src/chips/poseidon2_wide/columns/permutation.rs rename to crates/recursion/core/src/chips/poseidon2_wide/columns/permutation.rs index 54f54d4076..45fc461e29 100644 --- a/crates/recursion/core-v2/src/chips/poseidon2_wide/columns/permutation.rs +++ b/crates/recursion/core/src/chips/poseidon2_wide/columns/permutation.rs @@ -4,9 +4,8 @@ use std::{ }; use sp1_derive::AlignedBorrow; -use sp1_recursion_core::poseidon2_wide::NUM_EXTERNAL_ROUNDS; -use crate::chips::poseidon2_wide::{NUM_INTERNAL_ROUNDS, WIDTH}; +use crate::chips::poseidon2_wide::{NUM_EXTERNAL_ROUNDS, NUM_INTERNAL_ROUNDS, WIDTH}; use super::{POSEIDON2_DEGREE3_COL_MAP, POSEIDON2_DEGREE9_COL_MAP}; diff --git a/crates/recursion/core-v2/src/chips/poseidon2_wide/columns/preprocessed.rs b/crates/recursion/core/src/chips/poseidon2_wide/columns/preprocessed.rs similarity index 100% rename from crates/recursion/core-v2/src/chips/poseidon2_wide/columns/preprocessed.rs rename to crates/recursion/core/src/chips/poseidon2_wide/columns/preprocessed.rs diff --git a/crates/recursion/core-v2/src/chips/poseidon2_wide/mod.rs b/crates/recursion/core/src/chips/poseidon2_wide/mod.rs similarity index 90% rename from crates/recursion/core-v2/src/chips/poseidon2_wide/mod.rs rename to crates/recursion/core/src/chips/poseidon2_wide/mod.rs index bf976e1e4d..637c5afca9 100644 --- a/crates/recursion/core-v2/src/chips/poseidon2_wide/mod.rs +++ b/crates/recursion/core/src/chips/poseidon2_wide/mod.rs @@ -22,16 +22,8 @@ pub const NUM_INTERNAL_ROUNDS: usize = 13; pub const NUM_ROUNDS: usize = NUM_EXTERNAL_ROUNDS + NUM_INTERNAL_ROUNDS; /// A chip that implements addition for the opcode Poseidon2Wide. -pub struct Poseidon2WideChip { - pub fixed_log2_rows: Option, - pub pad: bool, -} - -impl Default for Poseidon2WideChip { - fn default() -> Self { - Self { fixed_log2_rows: None, pad: true } - } -} +#[derive(Default, Debug, Clone, Copy)] +pub struct Poseidon2WideChip; impl<'a, const DEGREE: usize> Poseidon2WideChip { /// Transmute a row it to an immutable Poseidon2 instance. @@ -106,15 +98,14 @@ pub(crate) mod tests { use std::{iter::once, sync::Arc}; use crate::{ - machine::RecursionAir, runtime::instruction as instr, MemAccessKind, RecursionProgram, - Runtime, + machine::RecursionAir, runtime::instruction as instr, stark::BabyBearPoseidon2Outer, + MemAccessKind, RecursionProgram, Runtime, }; use p3_baby_bear::{BabyBear, DiffusionMatrixBabyBear}; use p3_field::{AbstractField, PrimeField32}; use p3_symmetric::Permutation; use sp1_core_machine::utils::{run_test_machine, setup_logger}; - use sp1_recursion_core::stark::config::BabyBearPoseidon2Outer; use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, inner_perm, StarkGenericConfig}; use zkhash::ark_ff::UniformRand; @@ -126,8 +117,8 @@ pub(crate) mod tests { type SC = BabyBearPoseidon2Outer; type F = ::Val; type EF = ::Challenge; - type A = RecursionAir; - type B = RecursionAir; + type A = RecursionAir; + type B = RecursionAir; let input = [1; WIDTH]; let output = inner_perm() @@ -172,7 +163,7 @@ pub(crate) mod tests { runtime.run().unwrap(); let config = SC::new(); - let machine_deg_3 = A::machine_wide(config); + let machine_deg_3 = A::compress_machine(config); let (pk_3, vk_3) = machine_deg_3.setup(&program); let result_deg_3 = run_test_machine(vec![runtime.record.clone()], machine_deg_3, pk_3, vk_3); @@ -181,7 +172,7 @@ pub(crate) mod tests { } let config = SC::new(); - let machine_deg_9 = B::machine_wide(config); + let machine_deg_9 = B::compress_machine(config); let (pk_9, vk_9) = machine_deg_9.setup(&program); let result_deg_9 = run_test_machine(vec![runtime.record], machine_deg_9, pk_9, vk_9); if let Err(e) = result_deg_9 { diff --git a/crates/recursion/core-v2/src/chips/poseidon2_wide/trace.rs b/crates/recursion/core/src/chips/poseidon2_wide/trace.rs similarity index 95% rename from crates/recursion/core-v2/src/chips/poseidon2_wide/trace.rs rename to crates/recursion/core/src/chips/poseidon2_wide/trace.rs index bed204340d..0d5c666265 100644 --- a/crates/recursion/core-v2/src/chips/poseidon2_wide/trace.rs +++ b/crates/recursion/core/src/chips/poseidon2_wide/trace.rs @@ -9,9 +9,6 @@ use sp1_primitives::RC_16_30_U32; use sp1_stark::air::MachineAir; use tracing::instrument; -#[cfg(debug_assertions)] -use p3_matrix::Matrix; - use crate::{ chips::{ mem::MemoryAccessCols, @@ -47,7 +44,10 @@ impl MachineAir for Poseidon2WideChip, ) -> RowMajorMatrix { let events = &input.poseidon2_events; - let padded_nb_rows = next_power_of_two(events.len(), self.fixed_log2_rows); + let padded_nb_rows = match input.fixed_log2_rows(self) { + Some(log2_rows) => 1 << log2_rows, + None => next_power_of_two(events.len(), None), + }; let num_columns = >::width(self); let mut values = vec![F::zero(); padded_nb_rows * num_columns]; @@ -71,16 +71,7 @@ impl MachineAir for Poseidon2WideChip bool { @@ -102,7 +93,10 @@ impl MachineAir for Poseidon2WideChip>(); - let padded_nb_rows = next_power_of_two(instrs.len(), self.fixed_log2_rows); + let padded_nb_rows = match program.fixed_log2_rows(self) { + Some(log2_rows) => 1 << log2_rows, + None => next_power_of_two(instrs.len(), None), + }; let mut values = vec![F::zero(); padded_nb_rows * PREPROCESSED_POSEIDON2_WIDTH]; let populate_len = instrs.len() * PREPROCESSED_POSEIDON2_WIDTH; @@ -307,7 +301,7 @@ mod tests { ], ..Default::default() }; - let chip_3 = Poseidon2WideChip::<3>::default(); + let chip_3 = Poseidon2WideChip::<3>; let _: RowMajorMatrix = chip_3.generate_trace(&shard, &mut ExecutionRecord::default()); } @@ -329,7 +323,7 @@ mod tests { ], ..Default::default() }; - let chip_9 = Poseidon2WideChip::<9>::default(); + let chip_9 = Poseidon2WideChip::<9>; let _: RowMajorMatrix = chip_9.generate_trace(&shard, &mut ExecutionRecord::default()); } } diff --git a/crates/recursion/core-v2/src/chips/public_values.rs b/crates/recursion/core/src/chips/public_values.rs similarity index 93% rename from crates/recursion/core-v2/src/chips/public_values.rs rename to crates/recursion/core/src/chips/public_values.rs index b461cee328..e81ed89758 100644 --- a/crates/recursion/core-v2/src/chips/public_values.rs +++ b/crates/recursion/core/src/chips/public_values.rs @@ -5,10 +5,10 @@ use p3_field::PrimeField32; use p3_matrix::{dense::RowMajorMatrix, Matrix}; use sp1_core_machine::utils::pad_rows_fixed; use sp1_derive::AlignedBorrow; -use sp1_recursion_core::air::{RecursionPublicValues, RECURSIVE_PROOF_NUM_PV_ELTS}; use sp1_stark::air::MachineAir; use crate::{ + air::{RecursionPublicValues, RECURSIVE_PROOF_NUM_PV_ELTS}, builder::SP1RecursionAirBuilder, runtime::{Instruction, RecursionProgram}, ExecutionRecord, @@ -22,8 +22,10 @@ pub const NUM_PUBLIC_VALUES_COLS: usize = core::mem::size_of::>(); +pub(crate) const PUB_VALUES_LOG_HEIGHT: usize = 4; + #[derive(Default)] -pub struct PublicValuesChip {} +pub struct PublicValuesChip; /// The preprocessed columns for the CommitPVHash instruction. #[derive(AlignedBorrow, Debug, Clone, Copy)] @@ -94,7 +96,12 @@ impl MachineAir for PublicValuesChip { } // Pad the preprocessed rows to 8 rows. - pad_rows_fixed(&mut rows, || [F::zero(); NUM_PUBLIC_VALUES_PREPROCESSED_COLS], Some(3)); + // gpu code breaks for small traces + pad_rows_fixed( + &mut rows, + || [F::zero(); NUM_PUBLIC_VALUES_PREPROCESSED_COLS], + Some(PUB_VALUES_LOG_HEIGHT), + ); let trace = RowMajorMatrix::new( rows.into_iter().flatten().collect(), @@ -127,7 +134,11 @@ impl MachineAir for PublicValuesChip { } // Pad the trace to 8 rows. - pad_rows_fixed(&mut rows, || [F::zero(); NUM_PUBLIC_VALUES_COLS], Some(3)); + pad_rows_fixed( + &mut rows, + || [F::zero(); NUM_PUBLIC_VALUES_COLS], + Some(PUB_VALUES_LOG_HEIGHT), + ); // Convert the trace to a row major matrix. RowMajorMatrix::new(rows.into_iter().flatten().collect(), NUM_PUBLIC_VALUES_COLS) @@ -169,10 +180,7 @@ where mod tests { use rand::{rngs::StdRng, Rng, SeedableRng}; use sp1_core_machine::utils::setup_logger; - use sp1_recursion_core::{ - air::{RecursionPublicValues, NUM_PV_ELMS_TO_HASH, RECURSIVE_PROOF_NUM_PV_ELTS}, - stark::config::BabyBearPoseidon2Outer, - }; + use sp1_stark::{air::MachineAir, StarkGenericConfig}; use std::{array, borrow::Borrow}; @@ -181,9 +189,11 @@ mod tests { use p3_matrix::dense::RowMajorMatrix; use crate::{ + air::{RecursionPublicValues, NUM_PV_ELMS_TO_HASH, RECURSIVE_PROOF_NUM_PV_ELTS}, chips::public_values::PublicValuesChip, machine::tests::run_recursion_test_machines, runtime::{instruction as instr, ExecutionRecord}, + stark::BabyBearPoseidon2Outer, CommitPublicValuesEvent, MemAccessKind, RecursionProgram, DIGEST_SIZE, }; @@ -235,7 +245,7 @@ mod tests { }], ..Default::default() }; - let chip = PublicValuesChip::default(); + let chip = PublicValuesChip; let trace: RowMajorMatrix = chip.generate_trace(&shard, &mut ExecutionRecord::default()); println!("{:?}", trace.values) } diff --git a/crates/recursion/core/src/cpu/air/alu.rs b/crates/recursion/core/src/cpu/air/alu.rs deleted file mode 100644 index 54e11305ed..0000000000 --- a/crates/recursion/core/src/cpu/air/alu.rs +++ /dev/null @@ -1,54 +0,0 @@ -use p3_air::AirBuilder; -use p3_field::{AbstractField, Field}; -use sp1_stark::air::{BinomialExtension, ExtensionAirBuilder}; - -use crate::{ - air::{BinomialExtensionUtils, SP1RecursionAirBuilder}, - cpu::{CpuChip, CpuCols}, - memory::MemoryCols, -}; - -impl CpuChip { - /// Eval the ALU instructions. - /// - /// # Warning - /// The division constraints allow a = 0/0 for any a. - pub fn eval_alu(&self, builder: &mut AB, local: &CpuCols) - where - AB: SP1RecursionAirBuilder, - { - let one = AB::Expr::one(); - let is_alu_instruction = self.is_alu_instruction::(local); - - // Convert operand values from Block to BinomialExtension. - let a_ext: BinomialExtension = - BinomialExtensionUtils::from_block(local.a.value().map(|x| x.into())); - let b_ext: BinomialExtension = - BinomialExtensionUtils::from_block(local.b.value().map(|x| x.into())); - let c_ext: BinomialExtension = - BinomialExtensionUtils::from_block(local.c.value().map(|x| x.into())); - - // Verify that the b and c registers are base elements for field operations. - builder - .when(is_alu_instruction.clone()) - .when(one.clone() - local.selectors.is_ext) - .assert_is_base_element(b_ext.clone()); - builder - .when(is_alu_instruction) - .when(one - local.selectors.is_ext) - .assert_is_base_element(c_ext.clone()); - - // Verify the actual operation. - builder - .when(local.selectors.is_add) - .assert_ext_eq(a_ext.clone(), b_ext.clone() + c_ext.clone()); - builder - .when(local.selectors.is_sub) - .assert_ext_eq(a_ext.clone(), b_ext.clone() - c_ext.clone()); - builder - .when(local.selectors.is_mul) - .assert_ext_eq(a_ext.clone(), b_ext.clone() * c_ext.clone()); - // For div operation, we assert that b == a * c (equivalent to a == b / c). - builder.when(local.selectors.is_div).assert_ext_eq(b_ext, a_ext * c_ext); - } -} diff --git a/crates/recursion/core/src/cpu/air/branch.rs b/crates/recursion/core/src/cpu/air/branch.rs deleted file mode 100644 index 2bccb417d6..0000000000 --- a/crates/recursion/core/src/cpu/air/branch.rs +++ /dev/null @@ -1,79 +0,0 @@ -use p3_air::AirBuilder; -use p3_field::{AbstractField, Field}; -use sp1_stark::air::{BinomialExtension, ExtensionAirBuilder}; - -use crate::{ - air::{ - BinomialExtensionUtils, Block, BlockBuilder, IsExtZeroOperation, SP1RecursionAirBuilder, - }, - cpu::{CpuChip, CpuCols}, - memory::MemoryCols, -}; - -impl CpuChip { - /// Eval the BRANCH operations. - pub fn eval_branch( - &self, - builder: &mut AB, - local: &CpuCols, - next_pc: &mut AB::Expr, - ) where - AB: SP1RecursionAirBuilder, - { - let branch_cols = local.opcode_specific.branch(); - let is_branch_instruction = self.is_branch_instruction::(local); - let one = AB::Expr::one(); - - // Convert operand values from Block to BinomialExtension. Note that it gets the - // previous value of the `a` and `b` operands, since BNENIC will modify `a`. - let a_prev_ext: BinomialExtension = - BinomialExtensionUtils::from_block(local.a.prev_value().map(|x| x.into())); - let a_ext: BinomialExtension = - BinomialExtensionUtils::from_block(local.a.value().map(|x| x.into())); - let b_ext: BinomialExtension = - BinomialExtensionUtils::from_block(local.b.value().map(|x| x.into())); - let one_ext: BinomialExtension = - BinomialExtensionUtils::from_block(Block::from(one.clone())); - - let expected_a_ext = a_prev_ext + one_ext; - - // If the instruction is a BNEINC, verify that the a value is incremented by one. - builder - .when(local.is_real) - .when(local.selectors.is_bneinc) - .assert_block_eq(a_ext.as_block(), expected_a_ext.as_block()); - - let comparison_diff = a_ext - b_ext; - - // Verify branch_cols.camparison_diff col. - builder.when(is_branch_instruction.clone()).assert_ext_eq( - BinomialExtension::from(branch_cols.comparison_diff_val), - comparison_diff, - ); - - // Verify branch_cols.comparison_diff.result col. - IsExtZeroOperation::::eval( - builder, - BinomialExtension::from(branch_cols.comparison_diff_val), - branch_cols.comparison_diff, - is_branch_instruction.clone(), - ); - - // Verify branch_col.do_branch col. - let mut do_branch = local.selectors.is_beq * branch_cols.comparison_diff.result; - do_branch += local.selectors.is_bne * (one.clone() - branch_cols.comparison_diff.result); - do_branch += local.selectors.is_bneinc * (one.clone() - branch_cols.comparison_diff.result); - builder.when(is_branch_instruction.clone()).assert_eq(branch_cols.do_branch, do_branch); - - // Verify branch_col.next_pc col. - let pc_offset = local.c.value().0[0]; - let expected_next_pc = - builder.if_else(branch_cols.do_branch, local.pc + pc_offset, local.pc + one); - builder - .when(is_branch_instruction.clone()) - .assert_eq(branch_cols.next_pc, expected_next_pc); - - // Add to the `next_pc` expression. - *next_pc = is_branch_instruction * branch_cols.next_pc; - } -} diff --git a/crates/recursion/core/src/cpu/air/heap.rs b/crates/recursion/core/src/cpu/air/heap.rs deleted file mode 100644 index 170f3246f9..0000000000 --- a/crates/recursion/core/src/cpu/air/heap.rs +++ /dev/null @@ -1,29 +0,0 @@ -use p3_field::{AbstractField, Field}; - -use crate::{ - air::SP1RecursionAirBuilder, - cpu::{CpuChip, CpuCols}, - memory::MemoryCols, - runtime::HEAP_START_ADDRESS, -}; - -impl CpuChip { - /// Eval the heap ptr. - ///s - /// This function will ensure that the heap size never goes above 2^28. - pub fn eval_heap_ptr(&self, builder: &mut AB, local: &CpuCols) - where - AB: SP1RecursionAirBuilder, - { - let heap_columns = local.opcode_specific.heap_expand(); - - let heap_size = local.a.value()[0] - AB::Expr::from_canonical_usize(HEAP_START_ADDRESS); - - builder.eval_range_check_28bits( - heap_size, - heap_columns.diff_16bit_limb, - heap_columns.diff_12bit_limb, - local.selectors.is_heap_expand, - ); - } -} diff --git a/crates/recursion/core/src/cpu/air/jump.rs b/crates/recursion/core/src/cpu/air/jump.rs deleted file mode 100644 index 8dbd002c02..0000000000 --- a/crates/recursion/core/src/cpu/air/jump.rs +++ /dev/null @@ -1,44 +0,0 @@ -use p3_air::AirBuilder; -use p3_field::{AbstractField, Field}; - -use crate::{ - air::{Block, BlockBuilder, SP1RecursionAirBuilder}, - cpu::{CpuChip, CpuCols}, - memory::MemoryCols, - runtime::STACK_SIZE, -}; - -impl CpuChip { - /// Eval the JUMP instructions. - /// - /// This method will verify the fp column values and add to the `next_pc` expression. - pub fn eval_jump( - &self, - builder: &mut AB, - local: &CpuCols, - next: &CpuCols, - next_pc: &mut AB::Expr, - ) where - AB: SP1RecursionAirBuilder, - { - let is_jump_instr = self.is_jump_instruction::(local); - - // Verify the next row's fp. - builder.when_first_row().assert_eq(local.fp, F::from_canonical_usize(STACK_SIZE)); - let not_jump_instruction = AB::Expr::one() - is_jump_instr.clone(); - let expected_next_fp = local.selectors.is_jal * (local.fp + local.c.value()[0]) - + local.selectors.is_jalr * local.c.value()[0] - + not_jump_instruction * local.fp; - builder.when_transition().when(next.is_real).assert_eq(next.fp, expected_next_fp); - - // Verify the a operand values. - let expected_a_val = local.selectors.is_jal * local.pc - + local.selectors.is_jalr * (local.pc + AB::Expr::one()); - let expected_a_val_block = Block::from(expected_a_val); - builder.when(is_jump_instr).assert_block_eq(*local.a.value(), expected_a_val_block); - - // Add to the `next_pc` expression. - *next_pc += local.selectors.is_jal * (local.pc + local.b.value()[0]); - *next_pc += local.selectors.is_jalr * local.b.value()[0]; - } -} diff --git a/crates/recursion/core/src/cpu/air/memory.rs b/crates/recursion/core/src/cpu/air/memory.rs deleted file mode 100644 index 9ba2912958..0000000000 --- a/crates/recursion/core/src/cpu/air/memory.rs +++ /dev/null @@ -1,45 +0,0 @@ -use p3_air::AirBuilder; -use p3_field::Field; -use sp1_core_executor::events::MemoryAccessPosition; - -use crate::{ - air::{BlockBuilder, SP1RecursionAirBuilder}, - cpu::{CpuChip, CpuCols}, - memory::MemoryCols, -}; - -impl CpuChip { - // Eval the MEMORY instructions. - pub fn eval_memory(&self, builder: &mut AB, local: &CpuCols) - where - AB: SP1RecursionAirBuilder, - { - let is_memory_instr = self.is_memory_instruction::(local); - let index = local.c.value()[0]; - let ptr = local.b.value()[0]; - let memory_addr = ptr + index * local.instruction.size_imm + local.instruction.offset_imm; - - let memory_cols = local.opcode_specific.memory(); - - // Check that the memory_cols.memory_addr column equals the computed memory_addr. - builder.when(is_memory_instr.clone()).assert_eq(memory_addr, memory_cols.memory_addr); - - builder.recursion_eval_memory_access( - local.clk + AB::F::from_canonical_u32(MemoryAccessPosition::Memory as u32), - memory_cols.memory_addr, - &memory_cols.memory, - is_memory_instr.clone(), - ); - - // Constraints on the memory column depending on load or store. - // We read from memory when it is a load. - builder - .when(local.selectors.is_load) - .assert_block_eq(*memory_cols.memory.prev_value(), *memory_cols.memory.value()); - // When there is a store, we ensure that we are writing the value of the a operand to the - // memory. - builder - .when(is_memory_instr) - .assert_block_eq(*local.a.value(), *memory_cols.memory.value()); - } -} diff --git a/crates/recursion/core/src/cpu/air/mod.rs b/crates/recursion/core/src/cpu/air/mod.rs deleted file mode 100644 index 437696a0ca..0000000000 --- a/crates/recursion/core/src/cpu/air/mod.rs +++ /dev/null @@ -1,298 +0,0 @@ -mod alu; -mod branch; -mod heap; -mod jump; -mod memory; -mod operands; -mod public_values; -mod system; - -use std::borrow::Borrow; - -use p3_air::{Air, AirBuilder}; -use p3_field::{AbstractField, Field}; -use p3_matrix::Matrix; -use sp1_stark::air::BaseAirBuilder; - -use crate::{ - air::{RecursionPublicValues, SP1RecursionAirBuilder, RECURSIVE_PROOF_NUM_PV_ELTS}, - cpu::{columns::SELECTOR_COL_MAP, CpuChip, CpuCols}, - memory::MemoryCols, -}; - -impl Air for CpuChip -where - AB: SP1RecursionAirBuilder, -{ - fn eval(&self, builder: &mut AB) { - let main = builder.main(); - let (local, next) = (main.row_slice(0), main.row_slice(1)); - let local: &CpuCols = (*local).borrow(); - let next: &CpuCols = (*next).borrow(); - let pv = builder.public_values(); - let pv_elms: [AB::Expr; RECURSIVE_PROOF_NUM_PV_ELTS] = - core::array::from_fn(|i| pv[i].into()); - let public_values: &RecursionPublicValues = pv_elms.as_slice().borrow(); - - let zero = AB::Expr::zero(); - let one = AB::Expr::one(); - - // Constrain the program. - - // Constraints for "fake" columns. - builder.when_not(local.is_real).assert_one(local.instruction.imm_b); - builder.when_not(local.is_real).assert_one(local.instruction.imm_c); - builder.when_not(local.is_real).assert_one(local.selectors.is_noop); - - local - .selectors - .into_iter() - .enumerate() - .filter(|(i, _)| *i != SELECTOR_COL_MAP.is_noop) - .for_each(|(_, selector)| builder.when_not(local.is_real).assert_zero(selector)); - - // Initialize clk and pc. - builder.when_first_row().assert_zero(local.clk); - builder.when_first_row().assert_zero(local.pc); - - builder.send_program(local.pc, local.instruction, local.selectors, local.is_real); - - // Constrain the operands. - self.eval_operands(builder, local); - - // Constrain memory instructions. - self.eval_memory(builder, local); - - // Constrain ALU instructions. - self.eval_alu(builder, local); - - // Constrain branches and jumps and constrain the next pc. - { - // Expression for the expected next_pc. This will be added to in `eval_branch` and - // `eval_jump` to account for possible jumps and branches. - let mut next_pc = zero; - - self.eval_branch(builder, local, &mut next_pc); - - self.eval_jump(builder, local, next, &mut next_pc); - - // If the instruction is not a jump or branch instruction, then next pc = pc + 1. - let not_branch_or_jump = one.clone() - - self.is_branch_instruction::(local) - - self.is_jump_instruction::(local); - next_pc += not_branch_or_jump.clone() * (local.pc + one); - - builder.when_transition().when(next.is_real).assert_eq(next_pc, next.pc); - } - - // Constrain the syscalls. - let send_syscall = local.selectors.is_poseidon - + local.selectors.is_fri_fold - + local.selectors.is_exp_reverse_bits_len; - - let operands = [ - local.clk.into(), - local.a.value()[0].into(), - local.b.value()[0].into(), - local.c.value()[0] + local.instruction.offset_imm, - ]; - builder.send_table(local.instruction.opcode, &operands, send_syscall); - - // Constrain the public values digest. - self.eval_commit(builder, local, public_values.digest.clone()); - - // Constrain the clk. - self.eval_clk(builder, local, next); - - // Constrain the system instructions (TRAP, HALT). - self.eval_system_instructions(builder, local, next, public_values); - - // Verify the heap size. - self.eval_heap_ptr(builder, local); - - // Constrain the is_real_flag. - self.eval_is_real(builder, local, next); - - // Create a dummy constraint of the given degree to compress the permutation columns. - let mut expr = local.is_real * local.is_real; - for _ in 0..(L - 2) { - expr *= local.is_real.into(); - } - builder.assert_eq(expr.clone(), expr.clone()); - } -} - -impl CpuChip { - /// Eval the clk. - /// - /// For all instructions except for FRI fold, the next clk is the current clk + 4. - /// For FRI fold, the next clk is the current clk + number of FRI_FOLD iterations. That value - /// is stored in the `a` operand. - pub fn eval_clk(&self, builder: &mut AB, local: &CpuCols, next: &CpuCols) - where - AB: SP1RecursionAirBuilder, - { - builder - .when_transition() - .when(next.is_real) - .when_not(local.selectors.is_fri_fold + local.selectors.is_exp_reverse_bits_len) - .assert_eq(local.clk.into() + AB::F::from_canonical_u32(4), next.clk); - - builder - .when_transition() - .when(next.is_real) - .when(local.selectors.is_fri_fold) - .assert_eq(local.clk.into() + local.a.value()[0], next.clk); - - builder - .when_transition() - .when(next.is_real) - .when(local.selectors.is_exp_reverse_bits_len) - .assert_eq(local.clk.into() + local.c.value()[0], next.clk); - } - - /// Eval the is_real flag. - pub fn eval_is_real( - &self, - builder: &mut AB, - local: &CpuCols, - next: &CpuCols, - ) where - AB: SP1RecursionAirBuilder, - { - builder.assert_bool(local.is_real); - - // First row should be real. - builder.when_first_row().assert_one(local.is_real); - - // Once rows transition to not real, then they should stay not real. - builder.when_transition().when_not(local.is_real).assert_zero(next.is_real); - } - - /// Expr to check for alu instructions. - pub fn is_alu_instruction(&self, local: &CpuCols) -> AB::Expr - where - AB: SP1RecursionAirBuilder, - { - local.selectors.is_add - + local.selectors.is_sub - + local.selectors.is_mul - + local.selectors.is_div - } - - /// Expr to check for branch instructions. - pub fn is_branch_instruction(&self, local: &CpuCols) -> AB::Expr - where - AB: SP1RecursionAirBuilder, - { - local.selectors.is_beq + local.selectors.is_bne + local.selectors.is_bneinc - } - - /// Expr to check for jump instructions. - pub fn is_jump_instruction(&self, local: &CpuCols) -> AB::Expr - where - AB: SP1RecursionAirBuilder, - { - local.selectors.is_jal + local.selectors.is_jalr - } - - /// Expr to check for memory instructions. - pub fn is_memory_instruction(&self, local: &CpuCols) -> AB::Expr - where - AB: SP1RecursionAirBuilder, - { - local.selectors.is_load + local.selectors.is_store - } - - /// Expr to check for instructions that only read from operand `a`. - pub fn is_op_a_read_only_instruction(&self, local: &CpuCols) -> AB::Expr - where - AB: SP1RecursionAirBuilder, - { - local.selectors.is_beq - + local.selectors.is_bne - + local.selectors.is_fri_fold - + local.selectors.is_poseidon - + local.selectors.is_store - + local.selectors.is_noop - + local.selectors.is_ext_to_felt - + local.selectors.is_commit - + local.selectors.is_trap - + local.selectors.is_halt - + local.selectors.is_exp_reverse_bits_len - } - - /// Expr to check for instructions that are commit instructions. - pub fn is_commit_instruction(&self, local: &CpuCols) -> AB::Expr - where - AB: SP1RecursionAirBuilder, - { - local.selectors.is_commit.into() - } - - /// Expr to check for system instructions. - pub fn is_system_instruction(&self, local: &CpuCols) -> AB::Expr - where - AB: SP1RecursionAirBuilder, - { - local.selectors.is_trap + local.selectors.is_halt - } -} - -#[cfg(test)] -mod tests { - use itertools::Itertools; - use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, StarkGenericConfig}; - use std::time::Instant; - - use p3_baby_bear::{BabyBear, DiffusionMatrixBabyBear}; - use p3_field::AbstractField; - use p3_matrix::{dense::RowMajorMatrix, Matrix}; - use p3_poseidon2::{Poseidon2, Poseidon2ExternalMatrixGeneral}; - use sp1_core_machine::utils::{uni_stark_prove, uni_stark_verify}; - use sp1_stark::air::MachineAir; - - use crate::{air::Block, memory::MemoryGlobalChip, runtime::ExecutionRecord}; - - #[test] - fn test_cpu_unistark() { - let config = BabyBearPoseidon2::compressed(); - let mut challenger = config.challenger(); - - let chip = MemoryGlobalChip { fixed_log2_rows: None }; - - let test_vals = (0..16).map(BabyBear::from_canonical_u32).collect_vec(); - - let mut input_exec = ExecutionRecord::::default(); - for val in test_vals.into_iter() { - let event = (val, val, Block::from(BabyBear::zero())); - input_exec.last_memory_record.push(event); - } - - // Add a dummy initialize event because the AIR expects at least one. - input_exec.first_memory_record.push((BabyBear::zero(), Block::from(BabyBear::zero()))); - - println!("input exec: {:?}", input_exec.last_memory_record.len()); - let trace: RowMajorMatrix = - chip.generate_trace(&input_exec, &mut ExecutionRecord::::default()); - println!("trace dims is width: {:?}, height: {:?}", trace.width(), trace.height()); - - let start = Instant::now(); - let proof = uni_stark_prove(&config, &chip, &mut challenger, trace); - let duration = start.elapsed().as_secs_f64(); - println!("proof duration = {:?}", duration); - - let mut challenger: p3_challenger::DuplexChallenger< - BabyBear, - Poseidon2, - 16, - 8, - > = config.challenger(); - let start = Instant::now(); - uni_stark_verify(&config, &chip, &mut challenger, &proof) - .expect("expected proof to be valid"); - - let duration = start.elapsed().as_secs_f64(); - println!("verify duration = {:?}", duration); - } -} diff --git a/crates/recursion/core/src/cpu/air/operands.rs b/crates/recursion/core/src/cpu/air/operands.rs deleted file mode 100644 index 68f0391d7c..0000000000 --- a/crates/recursion/core/src/cpu/air/operands.rs +++ /dev/null @@ -1,51 +0,0 @@ -use p3_field::{AbstractField, Field}; -use sp1_core_executor::events::MemoryAccessPosition; - -use crate::{ - air::{BlockBuilder, SP1RecursionAirBuilder}, - cpu::{CpuChip, CpuCols}, - memory::MemoryCols, -}; - -impl CpuChip { - /// Eval the operands. - pub fn eval_operands(&self, builder: &mut AB, local: &CpuCols) - where - AB: SP1RecursionAirBuilder, - { - // Constraint the case of immediates for the b and c operands. - builder - .when(local.instruction.imm_b) - .assert_block_eq::(*local.b.value(), local.instruction.op_b); - builder - .when(local.instruction.imm_c) - .assert_block_eq::(*local.c.value(), local.instruction.op_c); - - // Constraint the operand accesses. - let a_addr = local.fp.into() + local.instruction.op_a.into(); - builder.recursion_eval_memory_access( - local.clk + AB::F::from_canonical_u32(MemoryAccessPosition::A as u32), - a_addr, - &local.a, - local.is_real.into(), - ); - // If the instruction only reads from operand A, then verify that previous and current - // values are equal. - let is_op_a_read_only = self.is_op_a_read_only_instruction::(local); - builder.when(is_op_a_read_only).assert_block_eq(*local.a.prev_value(), *local.a.value()); - - builder.recursion_eval_memory_access( - local.clk + AB::F::from_canonical_u32(MemoryAccessPosition::B as u32), - local.fp.into() + local.instruction.op_b[0].into(), - &local.b, - AB::Expr::one() - local.instruction.imm_b.into(), - ); - - builder.recursion_eval_memory_access( - local.clk + AB::F::from_canonical_u32(MemoryAccessPosition::C as u32), - local.fp.into() + local.instruction.op_c[0].into(), - &local.c, - AB::Expr::one() - local.instruction.imm_c.into(), - ); - } -} diff --git a/crates/recursion/core/src/cpu/air/public_values.rs b/crates/recursion/core/src/cpu/air/public_values.rs deleted file mode 100644 index 533c17b4fc..0000000000 --- a/crates/recursion/core/src/cpu/air/public_values.rs +++ /dev/null @@ -1,55 +0,0 @@ -use p3_air::AirBuilder; -use p3_field::{AbstractField, Field}; - -use crate::{ - air::{BlockBuilder, SP1RecursionAirBuilder}, - cpu::{CpuChip, CpuCols}, - memory::MemoryCols, - runtime::DIGEST_SIZE, -}; - -impl CpuChip { - /// Eval the COMMIT instructions. - /// - /// This method will verify the committed public value. - pub fn eval_commit( - &self, - builder: &mut AB, - local: &CpuCols, - commit_digest: [AB::Expr; DIGEST_SIZE], - ) where - AB: SP1RecursionAirBuilder, - { - let public_values_cols = local.opcode_specific.public_values(); - let is_commit_instruction = self.is_commit_instruction::(local); - - // Verify all elements in the index bitmap are bools. - let mut bitmap_sum = AB::Expr::zero(); - for bit in public_values_cols.idx_bitmap.iter() { - builder.when(is_commit_instruction.clone()).assert_bool(*bit); - bitmap_sum += (*bit).into(); - } - // When the instruction is COMMIT there should be exactly one set bit. - builder.when(is_commit_instruction.clone()).assert_one(bitmap_sum.clone()); - - // Verify that idx passed in the b operand corresponds to the set bit in index bitmap. - for (i, bit) in public_values_cols.idx_bitmap.iter().enumerate() { - builder.when(*bit * is_commit_instruction.clone()).assert_block_eq( - *local.b.prev_value(), - AB::Expr::from_canonical_u32(i as u32).into(), - ); - } - - // Calculated the expected public value. - let expected_pv_digest_element = - builder.index_array(&commit_digest, &public_values_cols.idx_bitmap); - - // Get the committed public value in the program from operand a. - let digest_element = local.a.prev_value(); - - // Verify the public value element. - builder - .when(is_commit_instruction.clone()) - .assert_block_eq(expected_pv_digest_element.into(), *digest_element); - } -} diff --git a/crates/recursion/core/src/cpu/air/system.rs b/crates/recursion/core/src/cpu/air/system.rs deleted file mode 100644 index 9c5420b99e..0000000000 --- a/crates/recursion/core/src/cpu/air/system.rs +++ /dev/null @@ -1,44 +0,0 @@ -use p3_air::AirBuilder; -use p3_field::Field; -use sp1_stark::air::BaseAirBuilder; - -use crate::{ - air::{RecursionPublicValues, SP1RecursionAirBuilder}, - cpu::{CpuChip, CpuCols}, -}; - -impl CpuChip { - /// Eval the system instructions (TRAP, HALT). - pub fn eval_system_instructions( - &self, - builder: &mut AB, - local: &CpuCols, - next: &CpuCols, - public_values: &RecursionPublicValues, - ) where - AB: SP1RecursionAirBuilder, - { - let is_system_instruction = self.is_system_instruction::(local); - - // Verify that the last real row is either TRAP or HALT. - builder - .when_transition() - .when(local.is_real) - .when_not(next.is_real) - .assert_one(is_system_instruction.clone()); - - builder.when_last_row().when(local.is_real).assert_one(is_system_instruction.clone()); - - // Verify that all other real rows are not TRAP or HALT. - builder - .when_transition() - .when(local.is_real) - .when(next.is_real) - .assert_zero(is_system_instruction); - - // Verify the correct public value exit code. - builder.when(local.selectors.is_trap).assert_one(public_values.exit_code.clone()); - - builder.when(local.selectors.is_halt).assert_zero(public_values.exit_code.clone()); - } -} diff --git a/crates/recursion/core/src/cpu/columns/branch.rs b/crates/recursion/core/src/cpu/columns/branch.rs deleted file mode 100644 index 58e33081fd..0000000000 --- a/crates/recursion/core/src/cpu/columns/branch.rs +++ /dev/null @@ -1,17 +0,0 @@ -use sp1_derive::AlignedBorrow; -use sp1_stark::air::BinomialExtension; -use std::mem::size_of; - -use crate::air::IsExtZeroOperation; - -#[allow(dead_code)] -pub const NUM_BRANCH_COLS: usize = size_of::>(); - -#[derive(AlignedBorrow, Default, Debug, Clone, Copy)] -#[repr(C)] -pub struct BranchCols { - pub(crate) comparison_diff: IsExtZeroOperation, - pub(crate) comparison_diff_val: BinomialExtension, - pub(crate) do_branch: T, - pub(crate) next_pc: T, -} diff --git a/crates/recursion/core/src/cpu/columns/heap_expand.rs b/crates/recursion/core/src/cpu/columns/heap_expand.rs deleted file mode 100644 index fcdaa882d9..0000000000 --- a/crates/recursion/core/src/cpu/columns/heap_expand.rs +++ /dev/null @@ -1,8 +0,0 @@ -use sp1_derive::AlignedBorrow; - -#[derive(AlignedBorrow, Default, Debug, Clone, Copy)] -#[repr(C)] -pub struct HeapExpandCols { - pub diff_16bit_limb: T, - pub diff_12bit_limb: T, -} diff --git a/crates/recursion/core/src/cpu/columns/memory.rs b/crates/recursion/core/src/cpu/columns/memory.rs deleted file mode 100644 index 91d74074fd..0000000000 --- a/crates/recursion/core/src/cpu/columns/memory.rs +++ /dev/null @@ -1,14 +0,0 @@ -use sp1_derive::AlignedBorrow; -use std::mem::size_of; - -use crate::memory::MemoryReadWriteCols; - -#[allow(dead_code)] -pub const NUM_MEMORY_COLS: usize = size_of::>(); - -#[derive(AlignedBorrow, Default, Debug, Clone, Copy)] -#[repr(C)] -pub struct MemoryCols { - pub(crate) memory_addr: T, - pub(crate) memory: MemoryReadWriteCols, -} diff --git a/crates/recursion/core/src/cpu/columns/mod.rs b/crates/recursion/core/src/cpu/columns/mod.rs deleted file mode 100644 index 5cdd25a1c3..0000000000 --- a/crates/recursion/core/src/cpu/columns/mod.rs +++ /dev/null @@ -1,49 +0,0 @@ -use std::mem::size_of; - -use crate::memory::{MemoryReadCols, MemoryReadWriteCols}; -use p3_air::BaseAir; -use sp1_derive::AlignedBorrow; - -mod branch; -mod heap_expand; -mod instruction; -mod memory; -mod opcode; -mod opcode_specific; -mod public_values; - -pub use instruction::*; -pub use opcode::*; -pub use public_values::*; - -use self::opcode_specific::OpcodeSpecificCols; - -use super::CpuChip; - -pub const NUM_CPU_COLS: usize = size_of::>(); - -impl BaseAir for CpuChip { - fn width(&self) -> usize { - NUM_CPU_COLS - } -} - -/// The column layout for the chip. -#[derive(AlignedBorrow, Default, Clone, Debug)] -#[repr(C)] -pub struct CpuCols { - pub clk: T, - pub pc: T, - pub fp: T, - - pub instruction: InstructionCols, - pub selectors: OpcodeSelectorCols, - - pub a: MemoryReadWriteCols, - pub b: MemoryReadCols, - pub c: MemoryReadCols, - - pub opcode_specific: OpcodeSpecificCols, - - pub is_real: T, -} diff --git a/crates/recursion/core/src/cpu/columns/opcode.rs b/crates/recursion/core/src/cpu/columns/opcode.rs deleted file mode 100644 index 9bc733b83e..0000000000 --- a/crates/recursion/core/src/cpu/columns/opcode.rs +++ /dev/null @@ -1,124 +0,0 @@ -use std::{borrow::BorrowMut, mem::transmute}; - -use p3_field::PrimeField32; -use p3_util::indices_arr; -use sp1_derive::AlignedBorrow; - -use crate::{ - cpu::Instruction, - runtime::{instruction_is_heap_expand, Opcode}, -}; - -pub(crate) const OPCODE_COUNT: usize = core::mem::size_of::>(); - -const fn make_col_map() -> OpcodeSelectorCols { - let indices_arr = indices_arr::(); - unsafe { transmute::<[usize; OPCODE_COUNT], OpcodeSelectorCols>(indices_arr) } -} - -pub(crate) const SELECTOR_COL_MAP: OpcodeSelectorCols = make_col_map(); - -/// Selectors for the opcode. -/// -/// This contains selectors for the different opcodes corresponding to variants of the [`Opcode`] -/// enum. -#[derive(AlignedBorrow, Clone, Copy, Default, Debug)] -#[repr(C)] -pub struct OpcodeSelectorCols { - // Arithmetic field instructions. - pub is_add: T, - pub is_sub: T, - pub is_mul: T, - pub is_div: T, - pub is_ext: T, - - // Memory instructions. - pub is_load: T, - pub is_store: T, - - // Branch instructions. - pub is_beq: T, - pub is_bne: T, - pub is_bneinc: T, - - // Jump instructions. - pub is_jal: T, - pub is_jalr: T, - - // System instructions. - pub is_trap: T, - pub is_noop: T, - pub is_halt: T, - - pub is_poseidon: T, - pub is_fri_fold: T, - pub is_commit: T, - pub is_ext_to_felt: T, - pub is_exp_reverse_bits_len: T, - pub is_heap_expand: T, -} - -impl OpcodeSelectorCols { - /// Populates the opcode columns with the given instruction. - /// - /// The opcode flag should be set to 1 for the relevant opcode and 0 for the rest. We already - /// assume that the state of the columns is set to zero at the start of the function, so we only - /// need to set the relevant opcode column to 1. - pub fn populate(&mut self, instruction: &Instruction) { - match instruction.opcode { - Opcode::ADD | Opcode::EADD => self.is_add = F::one(), - Opcode::SUB | Opcode::ESUB => self.is_sub = F::one(), - Opcode::MUL | Opcode::EMUL => self.is_mul = F::one(), - Opcode::DIV | Opcode::EDIV => self.is_div = F::one(), - Opcode::LOAD => self.is_load = F::one(), - Opcode::STORE => self.is_store = F::one(), - Opcode::BEQ => self.is_beq = F::one(), - Opcode::BNE => self.is_bne = F::one(), - Opcode::BNEINC => self.is_bneinc = F::one(), - Opcode::JAL => self.is_jal = F::one(), - Opcode::JALR => self.is_jalr = F::one(), - Opcode::TRAP => self.is_trap = F::one(), - Opcode::HALT => self.is_halt = F::one(), - Opcode::FRIFold => self.is_fri_fold = F::one(), - Opcode::Poseidon2Compress | Opcode::Poseidon2Absorb | Opcode::Poseidon2Finalize => { - self.is_poseidon = F::one() - } - Opcode::ExpReverseBitsLen => self.is_exp_reverse_bits_len = F::one(), - Opcode::Commit => self.is_commit = F::one(), - Opcode::HintExt2Felt => self.is_ext_to_felt = F::one(), - - Opcode::Hint - | Opcode::HintBits - | Opcode::PrintF - | Opcode::PrintE - | Opcode::RegisterPublicValue - | Opcode::CycleTracker => { - self.is_noop = F::one(); - } - - Opcode::HintLen | Opcode::LessThanF => {} - } - - if matches!(instruction.opcode, Opcode::EADD | Opcode::ESUB | Opcode::EMUL | Opcode::EDIV) { - self.is_ext = F::one(); - } - - if instruction_is_heap_expand(instruction) { - self.is_heap_expand = F::one(); - } - } -} - -impl IntoIterator for &OpcodeSelectorCols { - type Item = T; - - type IntoIter = std::array::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - let mut array = [self.is_add; OPCODE_COUNT]; - let mut_ref: &mut OpcodeSelectorCols = array.as_mut_slice().borrow_mut(); - - *mut_ref = *self; - array.into_iter() - } -} diff --git a/crates/recursion/core/src/cpu/columns/opcode_specific.rs b/crates/recursion/core/src/cpu/columns/opcode_specific.rs deleted file mode 100644 index 0e71e41fad..0000000000 --- a/crates/recursion/core/src/cpu/columns/opcode_specific.rs +++ /dev/null @@ -1,74 +0,0 @@ -use std::{ - fmt::{Debug, Formatter}, - mem::{size_of, transmute}, -}; - -use static_assertions::const_assert; - -use super::{ - branch::BranchCols, heap_expand::HeapExpandCols, memory::MemoryCols, - public_values::PublicValuesCols, -}; - -pub const NUM_OPCODE_SPECIFIC_COLS: usize = size_of::>(); - -/// Shared columns whose interpretation depends on the instruction being executed. -#[derive(Clone, Copy)] -#[repr(C)] -pub union OpcodeSpecificCols { - branch: BranchCols, - memory: MemoryCols, - public_values: PublicValuesCols, - heap_expand: HeapExpandCols, -} - -impl Default for OpcodeSpecificCols { - fn default() -> Self { - // We must use the largest field to avoid uninitialized padding bytes. - const_assert!(size_of::>() == size_of::>()); - - OpcodeSpecificCols { memory: MemoryCols::::default() } - } -} - -impl Debug for OpcodeSpecificCols { - fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { - // SAFETY: repr(C) ensures uniform fields are in declaration order with no padding. - let self_arr: &[T; NUM_OPCODE_SPECIFIC_COLS] = unsafe { transmute(self) }; - Debug::fmt(self_arr, f) - } -} - -// SAFETY: Each view is a valid interpretation of the underlying array. -impl OpcodeSpecificCols { - pub fn branch(&self) -> &BranchCols { - unsafe { &self.branch } - } - pub fn branch_mut(&mut self) -> &mut BranchCols { - unsafe { &mut self.branch } - } - - pub fn memory(&self) -> &MemoryCols { - unsafe { &self.memory } - } - - pub fn memory_mut(&mut self) -> &mut MemoryCols { - unsafe { &mut self.memory } - } - - pub fn public_values(&self) -> &PublicValuesCols { - unsafe { &self.public_values } - } - - pub fn public_values_mut(&mut self) -> &mut PublicValuesCols { - unsafe { &mut self.public_values } - } - - pub fn heap_expand(&self) -> &HeapExpandCols { - unsafe { &self.heap_expand } - } - - pub fn heap_expand_mut(&mut self) -> &mut HeapExpandCols { - unsafe { &mut self.heap_expand } - } -} diff --git a/crates/recursion/core/src/cpu/columns/public_values.rs b/crates/recursion/core/src/cpu/columns/public_values.rs deleted file mode 100644 index a7cc631044..0000000000 --- a/crates/recursion/core/src/cpu/columns/public_values.rs +++ /dev/null @@ -1,13 +0,0 @@ -use sp1_derive::AlignedBorrow; -use std::mem::size_of; - -use crate::runtime::DIGEST_SIZE; - -#[allow(dead_code)] -pub const NUM_PUBLIC_VALUES_COLS: usize = size_of::>(); - -#[derive(AlignedBorrow, Default, Debug, Clone, Copy)] -#[repr(C)] -pub struct PublicValuesCols { - pub(crate) idx_bitmap: [T; DIGEST_SIZE], -} diff --git a/crates/recursion/core/src/cpu/mod.rs b/crates/recursion/core/src/cpu/mod.rs deleted file mode 100644 index 2b866b7819..0000000000 --- a/crates/recursion/core/src/cpu/mod.rs +++ /dev/null @@ -1,29 +0,0 @@ -pub mod air; -pub mod columns; -mod trace; - -use crate::air::Block; -pub use crate::{memory::MemoryRecord, runtime::Instruction}; - -pub use columns::*; - -#[derive(Debug, Clone)] -pub struct CpuEvent { - pub clk: F, - pub pc: F, - pub fp: F, - pub instruction: Instruction, - pub a: Block, - pub a_record: Option>, - pub b: Block, - pub b_record: Option>, - pub c: Block, - pub c_record: Option>, - pub memory_record: Option>, -} - -#[derive(Default)] -pub struct CpuChip { - pub fixed_log2_rows: Option, - pub _phantom: std::marker::PhantomData, -} diff --git a/crates/recursion/core/src/cpu/trace.rs b/crates/recursion/core/src/cpu/trace.rs deleted file mode 100644 index fe209115ef..0000000000 --- a/crates/recursion/core/src/cpu/trace.rs +++ /dev/null @@ -1,141 +0,0 @@ -use std::borrow::BorrowMut; - -use crate::{ - air::BinomialExtensionUtils, - memory::MemoryCols, - runtime::{ - get_heap_size_range_check_events, instruction_is_heap_expand, ExecutionRecord, Opcode, - RecursionProgram, D, - }, -}; -use p3_field::{extension::BinomiallyExtendable, PrimeField32}; -use p3_matrix::dense::RowMajorMatrix; -use p3_maybe_rayon::prelude::{IndexedParallelIterator, ParallelIterator, ParallelSliceMut}; -use sp1_core_machine::utils::{next_power_of_two, par_for_each_row}; -use sp1_stark::air::{BinomialExtension, MachineAir}; -use tracing::instrument; - -use super::{CpuChip, CpuCols, NUM_CPU_COLS}; - -impl, const L: usize> MachineAir for CpuChip { - type Record = ExecutionRecord; - type Program = RecursionProgram; - - fn name(&self) -> String { - "CPU".to_string() - } - - fn generate_dependencies(&self, _: &Self::Record, _: &mut Self::Record) { - // There are no dependencies, since we do it all in the runtime. This is just a placeholder. - } - - #[instrument(name = "generate cpu trace", level = "debug", skip_all, fields(rows = input.cpu_events.len()))] - fn generate_trace( - &self, - input: &ExecutionRecord, - _: &mut ExecutionRecord, - ) -> RowMajorMatrix { - let nb_events = input.cpu_events.len(); - let padded_nb_rows = next_power_of_two(nb_events, self.fixed_log2_rows); - let mut values = vec![F::zero(); padded_nb_rows * NUM_CPU_COLS]; - - par_for_each_row(&mut values, NUM_CPU_COLS, |i, row| { - if i >= nb_events { - return; - } - let event = &input.cpu_events[i]; - let cols: &mut CpuCols = row.borrow_mut(); - - cols.clk = event.clk; - cols.pc = event.pc; - cols.fp = event.fp; - - // Populate the instruction related columns. - cols.selectors.populate(&event.instruction); - cols.instruction.populate(&event.instruction); - - // Populate the register columns. - if let Some(record) = &event.a_record { - cols.a.populate(record); - } - if let Some(record) = &event.b_record { - cols.b.populate(record); - } else { - *cols.b.value_mut() = event.instruction.op_b; - } - if let Some(record) = &event.c_record { - cols.c.populate(record); - } else { - *cols.c.value_mut() = event.instruction.op_c; - } - if let Some(record) = &event.memory_record { - let memory_cols = cols.opcode_specific.memory_mut(); - memory_cols.memory.populate(record); - memory_cols.memory_addr = record.addr; - } - - // Populate the heap columns. - if instruction_is_heap_expand(&event.instruction) { - let (u16_range_check, u12_range_check) = - get_heap_size_range_check_events(cols.a.value()[0]); - - let heap_cols = cols.opcode_specific.heap_expand_mut(); - heap_cols.diff_16bit_limb = F::from_canonical_u16(u16_range_check.val); - heap_cols.diff_12bit_limb = F::from_canonical_u16(u12_range_check.val); - } - - // Populate the branch columns. - if matches!(event.instruction.opcode, Opcode::BEQ | Opcode::BNE | Opcode::BNEINC) { - let branch_cols = cols.opcode_specific.branch_mut(); - let a_ext: BinomialExtension = - BinomialExtensionUtils::from_block(*cols.a.value()); - let b_ext: BinomialExtension = - BinomialExtensionUtils::from_block(*cols.b.value()); - - let (comparison_diff, do_branch) = match event.instruction.opcode { - Opcode::BEQ => (a_ext - b_ext, a_ext == b_ext), - Opcode::BNE | Opcode::BNEINC => (a_ext - b_ext, a_ext != b_ext), - _ => unreachable!(), - }; - - branch_cols.comparison_diff.populate((comparison_diff).as_block()); - branch_cols.comparison_diff_val = comparison_diff; - branch_cols.do_branch = F::from_bool(do_branch); - branch_cols.next_pc = if do_branch { - event.pc + event.instruction.op_c[0] - } else { - event.pc + F::one() - }; - } - - // Populate the public values columns. - if event.instruction.opcode == Opcode::Commit { - let public_values_cols = cols.opcode_specific.public_values_mut(); - let idx = cols.b.prev_value()[0].as_canonical_u32() as usize; - public_values_cols.idx_bitmap[idx] = F::one(); - } - - cols.is_real = F::one(); - }); - - let mut trace = RowMajorMatrix::new(values, NUM_CPU_COLS); - - // Fill in the dummy values for the padding rows. - let padded_rows = - trace.values.par_chunks_mut(NUM_CPU_COLS).enumerate().skip(input.cpu_events.len()); - padded_rows.for_each(|(i, row)| { - let cols: &mut CpuCols = row.borrow_mut(); - cols.selectors.is_noop = F::one(); - cols.instruction.imm_b = F::one(); - cols.instruction.imm_c = F::one(); - cols.clk = F::from_canonical_u32(4) * F::from_canonical_usize(i); - cols.instruction.imm_b = F::from_canonical_u32(1); - cols.instruction.imm_c = F::from_canonical_u32(1); - }); - trace - } - - fn included(&self, _: &Self::Record) -> bool { - true - } -} diff --git a/crates/recursion/core/src/exp_reverse_bits/mod.rs b/crates/recursion/core/src/exp_reverse_bits/mod.rs deleted file mode 100644 index 06c9de7d98..0000000000 --- a/crates/recursion/core/src/exp_reverse_bits/mod.rs +++ /dev/null @@ -1,511 +0,0 @@ -#![allow(clippy::needless_range_loop)] - -use crate::{ - air::{Block, IsZeroOperation, RecursionMemoryAirBuilder}, - memory::{MemoryReadSingleCols, MemoryReadWriteSingleCols}, - runtime::Opcode, -}; -use core::borrow::Borrow; -use p3_air::{Air, AirBuilder, BaseAir}; -use p3_field::{AbstractField, PrimeField32}; -use p3_matrix::{dense::RowMajorMatrix, Matrix}; -use p3_util::reverse_bits_len; -use sp1_core_machine::utils::{next_power_of_two, par_for_each_row}; -use sp1_derive::AlignedBorrow; -use sp1_stark::air::{BaseAirBuilder, ExtensionAirBuilder, MachineAir, SP1AirBuilder}; -use std::borrow::BorrowMut; -use tracing::instrument; - -use crate::{ - air::SP1RecursionAirBuilder, - memory::MemoryRecord, - runtime::{ExecutionRecord, RecursionProgram}, -}; - -pub const NUM_EXP_REVERSE_BITS_LEN_COLS: usize = core::mem::size_of::>(); - -#[derive(Default)] -pub struct ExpReverseBitsLenChip { - pub fixed_log2_rows: Option, - pub pad: bool, -} - -#[derive(Debug, Clone)] -pub struct ExpReverseBitsLenEvent { - /// The clk cycle for the event. - pub clk: F, - - /// Memory records to keep track of the value stored in the x parameter, and the current bit - /// of the exponent being scanned. - pub x: MemoryRecord, - pub current_bit: MemoryRecord, - - /// The length parameter of the function. - pub len: F, - - /// The previous accumulator value, needed to compute the current accumulator value. - pub prev_accum: F, - - /// The current accumulator value. - pub accum: F, - - /// A pointer to the memory address storing the exponent. - pub ptr: F, - - /// A pointer to the memory address storing the base. - pub base_ptr: F, - - /// Which step (in the range 0..len) of the computation we are in. - pub iteration_num: F, -} - -impl ExpReverseBitsLenEvent { - /// A way to construct a list of dummy events from input x and clk, used for testing. - pub fn dummy_from_input(x: F, exponent: u32, len: F, timestamp: F) -> Vec { - let mut events = Vec::new(); - let mut new_len = len; - let mut new_exponent = exponent; - let mut accum = F::one(); - - for i in 0..len.as_canonical_u32() { - let current_bit = new_exponent % 2; - let prev_accum = accum; - accum = prev_accum * prev_accum * if current_bit == 0 { F::one() } else { x }; - events.push(Self { - clk: timestamp + F::from_canonical_u32(i), - x: MemoryRecord::new_write( - F::one(), - Block::from([ - if i == len.as_canonical_u32() - 1 { accum } else { x }, - F::zero(), - F::zero(), - F::zero(), - ]), - timestamp + F::from_canonical_u32(i), - Block::from([x, F::zero(), F::zero(), F::zero()]), - timestamp + F::from_canonical_u32(i) - F::one(), - ), - current_bit: MemoryRecord::new_read( - F::zero(), - Block::from([ - F::from_canonical_u32(current_bit), - F::zero(), - F::zero(), - F::zero(), - ]), - timestamp + F::from_canonical_u32(i), - timestamp + F::from_canonical_u32(i) - F::one(), - ), - len: new_len, - prev_accum, - accum, - ptr: F::from_canonical_u32(i), - base_ptr: F::one(), - iteration_num: F::from_canonical_u32(i), - }); - new_exponent /= 2; - new_len -= F::one(); - } - assert_eq!( - accum, - x.exp_u64(reverse_bits_len(exponent as usize, len.as_canonical_u32() as usize) as u64) - ); - events - } -} - -#[derive(AlignedBorrow, Debug, Clone, Copy)] -#[repr(C)] -pub struct ExpReverseBitsLenCols { - pub clk: T, - - /// The base of the exponentiation. - pub x: MemoryReadWriteSingleCols, - - /// The length parameter of the exponentiation. This is decremented by 1 every iteration. - pub len: T, - - /// The current bit of the exponent. This is read from memory. - pub current_bit: MemoryReadSingleCols, - - /// The previous accumulator squared. - pub prev_accum_squared: T, - - /// The accumulator of the current iteration. - pub accum: T, - - /// A flag column to check whether the current row represents the last iteration of the - /// computation. - pub is_last: IsZeroOperation, - - /// A flag column to check whether the current row represents the first iteration of the - /// computation. - pub is_first: IsZeroOperation, - - /// A column to count up from 0 to the length of the exponent. - pub iteration_num: T, - - /// A column which equals x if `current_bit` is on, and 1 otherwise. - pub multiplier: T, - - /// The memory address storing the exponent. - pub ptr: T, - - /// The memory address storing the base. - pub base_ptr: T, - - /// A flag column to check whether the base_ptr memory is accessed. Is equal to `is_first` OR - /// `is_last`. - pub x_mem_access_flag: T, - - pub is_real: T, -} - -impl BaseAir for ExpReverseBitsLenChip { - fn width(&self) -> usize { - NUM_EXP_REVERSE_BITS_LEN_COLS - } -} - -impl MachineAir for ExpReverseBitsLenChip { - type Record = ExecutionRecord; - - type Program = RecursionProgram; - - fn name(&self) -> String { - "ExpReverseBitsLen".to_string() - } - - fn generate_dependencies(&self, _: &Self::Record, _: &mut Self::Record) { - // This is a no-op. - } - - #[instrument(name = "generate exp reverse bits len trace", level = "debug", skip_all, fields(rows = input.exp_reverse_bits_len_events.len()))] - fn generate_trace( - &self, - input: &ExecutionRecord, - _: &mut ExecutionRecord, - ) -> RowMajorMatrix { - let nb_events = input.exp_reverse_bits_len_events.len(); - let nb_rows = - if self.pad { next_power_of_two(nb_events, self.fixed_log2_rows) } else { nb_events }; - let mut values = vec![F::zero(); nb_rows * NUM_EXP_REVERSE_BITS_LEN_COLS]; - - par_for_each_row(&mut values, NUM_EXP_REVERSE_BITS_LEN_COLS, |i, row| { - if i >= nb_events { - return; - } - let event = &input.exp_reverse_bits_len_events[i]; - let cols: &mut ExpReverseBitsLenCols = row.borrow_mut(); - - cols.clk = event.clk; - - cols.x.populate(&event.x); - cols.current_bit.populate(&event.current_bit); - cols.len = event.len; - cols.accum = event.accum; - cols.prev_accum_squared = event.prev_accum * event.prev_accum; - cols.is_last.populate(F::one() - event.len); - cols.is_first.populate(event.iteration_num); - cols.is_real = F::one(); - cols.iteration_num = event.iteration_num; - cols.multiplier = - if event.current_bit.value == Block([F::one(), F::zero(), F::zero(), F::zero()]) { - // The event may change the value stored in the x memory access, and we need to - // use the previous value. - event.x.prev_value[0] - } else { - F::one() - }; - cols.ptr = event.ptr; - cols.base_ptr = event.base_ptr; - cols.x_mem_access_flag = - F::from_bool(cols.len == F::one() || cols.iteration_num == F::zero()); - }); - - // Convert the trace to a row major matrix. - let trace = RowMajorMatrix::new(values, NUM_EXP_REVERSE_BITS_LEN_COLS); - - #[cfg(debug_assertions)] - println!( - "exp reverse bits len trace dims is width: {:?}, height: {:?}", - trace.width(), - trace.height() - ); - - trace - } - - fn included(&self, record: &Self::Record) -> bool { - !record.exp_reverse_bits_len_events.is_empty() - } -} - -impl ExpReverseBitsLenChip { - pub fn eval_exp_reverse_bits_len< - AB: BaseAirBuilder + ExtensionAirBuilder + RecursionMemoryAirBuilder + SP1AirBuilder, - >( - &self, - builder: &mut AB, - local: &ExpReverseBitsLenCols, - next: &ExpReverseBitsLenCols, - memory_access: AB::Var, - ) { - // Dummy constraints to normalize to DEGREE when DEGREE > 3. - if DEGREE > 3 { - let lhs = (0..DEGREE).map(|_| local.is_real.into()).product::(); - let rhs = (0..DEGREE).map(|_| local.is_real.into()).product::(); - builder.assert_eq(lhs, rhs); - } - - // Constraint that the operands are sent from the CPU table. - let operands = - [local.clk.into(), local.base_ptr.into(), local.ptr.into(), local.len.into()]; - builder.receive_table( - Opcode::ExpReverseBitsLen.as_field::(), - &operands, - local.is_first.result, - ); - - // Make sure that local.is_first.result is not on for fake rows, so we don't receive - // operands for a fake row. - builder.when_not(local.is_real).assert_zero(local.is_first.result); - - IsZeroOperation::::eval( - builder, - AB::Expr::one() - local.len, - local.is_last, - local.is_real.into(), - ); - - IsZeroOperation::::eval( - builder, - local.iteration_num.into(), - local.is_first, - local.is_real.into(), - ); - - // All real columns need to be in succession. - builder.when_transition().assert_zero((AB::Expr::one() - local.is_real) * next.is_real); - - // Assert that the boolean columns are boolean. - builder.assert_bool(local.is_real); - - let current_bit_val = local.current_bit.access.value; - - // Probably redundant, but we assert here that the current bit value is boolean. - builder.assert_bool(current_bit_val); - - // Assert that `is_first` is on for the first row. - builder.when_first_row().assert_one(local.is_first.result); - - // Assert that the next row after a row for which `is_last` is on has `is_first` on. - builder - .when_transition() - .when(next.is_real * local.is_last.result) - .assert_one(next.is_first.result); - - // The accumulator needs to start with the multiplier for every `is_first` row. - builder.when(local.is_first.result).assert_eq(local.accum, local.multiplier); - - // Assert that the last real row has `is_last` on. - builder - .when_transition() - .when(local.is_real * (AB::Expr::one() - next.is_real)) - .assert_one(local.is_last.result); - - builder.when_last_row().when(local.is_real).assert_one(local.is_last.result); - - // `multiplier` is x if the current bit is 1, and 1 if the current bit is 0. - builder.when(current_bit_val).assert_eq(local.multiplier, local.x.prev_value); - builder - .when(local.is_real) - .when_not(current_bit_val) - .assert_eq(local.multiplier, AB::Expr::one()); - - // To get `next.accum`, we multiply `local.prev_accum_squared` by `local.multiplier` when - // not `is_first`. - builder - .when_not(local.is_first.result) - .assert_eq(local.accum, local.prev_accum_squared * local.multiplier); - - // Constrain the accum_squared column. - builder - .when_transition() - .when_not(local.is_last.result) - .assert_eq(next.prev_accum_squared, local.accum * local.accum); - - // Constrain the memory address `base_ptr` to be the same as the next, as long as not - // `is_last`. - builder - .when_transition() - .when_not(local.is_last.result) - .assert_eq(local.base_ptr, next.base_ptr); - - // Constrain the memory address `ptr` to increment by one except when - // `is_last` - builder - .when_transition() - .when(next.is_real) - .when_not(local.is_last.result) - .assert_eq(next.ptr, local.ptr + AB::Expr::one()); - - // The `len` counter must decrement when not `is_last`. - builder - .when_transition() - .when(local.is_real) - .when_not(local.is_last.result) - .assert_eq(local.len, next.len + AB::Expr::one()); - - // The `iteration_num` counter must increment when not `is_last`. - builder - .when_transition() - .when(local.is_real) - .when_not(local.is_last.result) - .assert_eq(local.iteration_num + AB::Expr::one(), next.iteration_num); - - // The `iteration_num` counter must be 0 iff `is_first` is on. - builder.when(local.is_first.result).assert_eq(local.iteration_num, AB::Expr::zero()); - - // Access the memory for current_bit. - builder.recursion_eval_memory_access_single( - local.clk, - local.ptr, - &local.current_bit, - memory_access, - ); - - // Constrain that the x_mem_access_flag is true when `is_first` or `is_last`. - builder.when(local.is_real).assert_eq( - local.x_mem_access_flag, - local.is_first.result + local.is_last.result - - local.is_first.result * local.is_last.result, - ); - - // Make sure that x is only accessed when `is_real` is 1. - builder.when_not(local.is_real).assert_zero(local.x_mem_access_flag); - - // Access the memory for x. - // This only needs to be done for the first and last iterations. - builder.recursion_eval_memory_access_single( - local.clk, - local.base_ptr, - &local.x, - local.x_mem_access_flag, - ); - - // The `base_ptr` column stays the same when not `is_last`. - builder - .when_transition() - .when(next.is_real) - .when_not(local.is_last.result) - .assert_eq(next.base_ptr, local.base_ptr); - - // Ensure sequential `clk` values. - builder - .when_transition() - .when_not(local.is_last.result) - .when(next.is_real) - .assert_eq(local.clk + AB::Expr::one(), next.clk); - - // Ensure that the value at the x memory access is unchanged when not `is_last`. - builder - .when_transition() - .when(next.is_real) - .when_not(local.is_last.result) - .assert_eq(local.x.access.value, next.x.prev_value); - - builder - .when_transition() - .when_not(local.is_last.result) - .assert_eq(local.x.access.value, local.x.prev_value); - - // Ensure that the value at the x memory access is `accum` when `is_last`. - builder.when(local.is_last.result).assert_eq(local.accum, local.x.access.value); - } - - pub const fn do_exp_bit_memory_access(local: &ExpReverseBitsLenCols) -> T { - local.is_real - } -} - -impl Air for ExpReverseBitsLenChip -where - AB: SP1RecursionAirBuilder, -{ - fn eval(&self, builder: &mut AB) { - let main = builder.main(); - let (local, next) = (main.row_slice(0), main.row_slice(1)); - let local: &ExpReverseBitsLenCols = (*local).borrow(); - let next: &ExpReverseBitsLenCols = (*next).borrow(); - self.eval_exp_reverse_bits_len::( - builder, - local, - next, - Self::do_exp_bit_memory_access::(local), - ); - } -} - -#[cfg(test)] -mod tests { - use itertools::Itertools; - use sp1_stark::{air::MachineAir, baby_bear_poseidon2::BabyBearPoseidon2, StarkGenericConfig}; - use std::time::Instant; - - use p3_baby_bear::{BabyBear, DiffusionMatrixBabyBear}; - use p3_field::AbstractField; - use p3_matrix::{dense::RowMajorMatrix, Matrix}; - use p3_poseidon2::{Poseidon2, Poseidon2ExternalMatrixGeneral}; - use sp1_core_machine::utils::{uni_stark_prove, uni_stark_verify}; - - use crate::{ - exp_reverse_bits::{ExpReverseBitsLenChip, ExpReverseBitsLenEvent}, - runtime::ExecutionRecord, - }; - - #[test] - fn prove_babybear() { - let config = BabyBearPoseidon2::compressed(); - let mut challenger = config.challenger(); - - let chip = ExpReverseBitsLenChip::<5> { pad: true, fixed_log2_rows: None }; - - let test_xs = (1..16).map(BabyBear::from_canonical_u32).collect_vec(); - - let test_exponents = (1..16).collect_vec(); - - let mut input_exec = ExecutionRecord::::default(); - for (x, exponent) in test_xs.into_iter().zip_eq(test_exponents) { - let mut events = ExpReverseBitsLenEvent::dummy_from_input( - x, - exponent, - BabyBear::from_canonical_u32(exponent.ilog2() + 1), - x, - ); - input_exec.exp_reverse_bits_len_events.append(&mut events); - } - println!("input exec: {:?}", input_exec.exp_reverse_bits_len_events.len()); - let trace: RowMajorMatrix = - chip.generate_trace(&input_exec, &mut ExecutionRecord::::default()); - println!("trace dims is width: {:?}, height: {:?}", trace.width(), trace.height()); - - let start = Instant::now(); - let proof = uni_stark_prove(&config, &chip, &mut challenger, trace); - let duration = start.elapsed().as_secs_f64(); - println!("proof duration = {:?}", duration); - - let mut challenger: p3_challenger::DuplexChallenger< - BabyBear, - Poseidon2, - 16, - 8, - > = config.challenger(); - let start = Instant::now(); - uni_stark_verify(&config, &chip, &mut challenger, &proof) - .expect("expected proof to be valid"); - - let duration = start.elapsed().as_secs_f64(); - println!("verify duration = {:?}", duration); - } -} diff --git a/crates/recursion/core/src/fri_fold/mod.rs b/crates/recursion/core/src/fri_fold/mod.rs deleted file mode 100644 index cbde3e91db..0000000000 --- a/crates/recursion/core/src/fri_fold/mod.rs +++ /dev/null @@ -1,370 +0,0 @@ -#![allow(clippy::needless_range_loop)] - -use crate::{ - memory::{MemoryReadCols, MemoryReadSingleCols, MemoryReadWriteCols}, - runtime::Opcode, -}; -use core::borrow::Borrow; -use p3_air::{Air, AirBuilder, BaseAir}; -use p3_field::{AbstractField, PrimeField32}; -use p3_matrix::{dense::RowMajorMatrix, Matrix}; -use sp1_core_machine::utils::{next_power_of_two, par_for_each_row}; -use sp1_derive::AlignedBorrow; -use sp1_stark::air::{BaseAirBuilder, BinomialExtension, MachineAir}; -use std::borrow::BorrowMut; -use tracing::instrument; - -use crate::{ - air::SP1RecursionAirBuilder, - memory::MemoryRecord, - runtime::{ExecutionRecord, RecursionProgram}, -}; - -pub const NUM_FRI_FOLD_COLS: usize = core::mem::size_of::>(); - -#[derive(Default)] -pub struct FriFoldChip { - pub fixed_log2_rows: Option, - pub pad: bool, -} - -#[derive(Debug, Clone)] -pub struct FriFoldEvent { - pub clk: F, - pub m: F, - pub input_ptr: F, - pub is_last_iteration: F, - - pub z: MemoryRecord, - pub alpha: MemoryRecord, - pub x: MemoryRecord, - pub log_height: MemoryRecord, - pub mat_opening_ptr: MemoryRecord, - pub ps_at_z_ptr: MemoryRecord, - pub alpha_pow_ptr: MemoryRecord, - pub ro_ptr: MemoryRecord, - - pub p_at_x: MemoryRecord, - pub p_at_z: MemoryRecord, - - pub alpha_pow_at_log_height: MemoryRecord, - pub ro_at_log_height: MemoryRecord, -} - -#[derive(AlignedBorrow, Debug, Clone, Copy)] -#[repr(C)] -pub struct FriFoldCols { - pub clk: T, - - /// The parameters into the FRI fold precompile. These values are only read from memory. - pub m: T, - pub input_ptr: T, - - /// At the last iteraction of a FRI_FOLD invocation. - pub is_last_iteration: T, - - /// The inputs stored in memory. All the values are just read from memory. - pub z: MemoryReadCols, - pub alpha: MemoryReadCols, - pub x: MemoryReadSingleCols, - - pub log_height: MemoryReadSingleCols, - pub mat_opening_ptr: MemoryReadSingleCols, - pub ps_at_z_ptr: MemoryReadSingleCols, - pub alpha_pow_ptr: MemoryReadSingleCols, - pub ro_ptr: MemoryReadSingleCols, - - pub p_at_x: MemoryReadCols, - pub p_at_z: MemoryReadCols, - - /// The values here are read and then written. - pub alpha_pow_at_log_height: MemoryReadWriteCols, - pub ro_at_log_height: MemoryReadWriteCols, - - pub is_real: T, -} - -impl BaseAir for FriFoldChip { - fn width(&self) -> usize { - NUM_FRI_FOLD_COLS - } -} - -impl MachineAir for FriFoldChip { - type Record = ExecutionRecord; - - type Program = RecursionProgram; - - fn name(&self) -> String { - "FriFold".to_string() - } - - fn generate_dependencies(&self, _: &Self::Record, _: &mut Self::Record) { - // This is a no-op. - } - - #[instrument(name = "generate fri fold trace", level = "debug", skip_all, fields(rows = input.fri_fold_events.len()))] - fn generate_trace( - &self, - input: &ExecutionRecord, - _: &mut ExecutionRecord, - ) -> RowMajorMatrix { - let nb_events = input.fri_fold_events.len(); - let nb_rows = - if self.pad { next_power_of_two(nb_events, self.fixed_log2_rows) } else { nb_events }; - let mut values = vec![F::zero(); nb_rows * NUM_FRI_FOLD_COLS]; - - par_for_each_row(&mut values, NUM_FRI_FOLD_COLS, |i, row| { - if i >= nb_events { - return; - } - let event = &input.fri_fold_events[i]; - let cols: &mut FriFoldCols = row.borrow_mut(); - - cols.clk = event.clk; - cols.m = event.m; - cols.input_ptr = event.input_ptr; - cols.is_last_iteration = event.is_last_iteration; - cols.is_real = F::one(); - - cols.z.populate(&event.z); - cols.alpha.populate(&event.alpha); - cols.x.populate(&event.x); - cols.log_height.populate(&event.log_height); - cols.mat_opening_ptr.populate(&event.mat_opening_ptr); - cols.ps_at_z_ptr.populate(&event.ps_at_z_ptr); - cols.alpha_pow_ptr.populate(&event.alpha_pow_ptr); - cols.ro_ptr.populate(&event.ro_ptr); - - cols.p_at_x.populate(&event.p_at_x); - cols.p_at_z.populate(&event.p_at_z); - - cols.alpha_pow_at_log_height.populate(&event.alpha_pow_at_log_height); - cols.ro_at_log_height.populate(&event.ro_at_log_height); - }); - - // Convert the trace to a row major matrix. - let trace = RowMajorMatrix::new(values, NUM_FRI_FOLD_COLS); - - #[cfg(debug_assertions)] - println!("fri fold trace dims is width: {:?}, height: {:?}", trace.width(), trace.height()); - - trace - } - - fn included(&self, record: &Self::Record) -> bool { - !record.fri_fold_events.is_empty() - } -} - -impl FriFoldChip { - pub fn eval_fri_fold( - &self, - builder: &mut AB, - local: &FriFoldCols, - next: &FriFoldCols, - receive_table: AB::Var, - memory_access: AB::Var, - ) { - // Constraint that the operands are sent from the CPU table. - let first_iteration_clk = local.clk.into() - local.m.into(); - let total_num_iterations = local.m.into() + AB::Expr::one(); - let operands = - [first_iteration_clk, total_num_iterations, local.input_ptr.into(), AB::Expr::zero()]; - builder.receive_table(Opcode::FRIFold.as_field::(), &operands, receive_table); - - builder.assert_bool(local.is_last_iteration); - builder.assert_bool(local.is_real); - - builder - .when_transition() - .when_not(local.is_last_iteration) - .assert_eq(local.is_real, next.is_real); - - builder.when(local.is_last_iteration).assert_one(local.is_real); - - builder.when_transition().when_not(local.is_real).assert_zero(next.is_real); - - builder.when_last_row().when_not(local.is_last_iteration).assert_zero(local.is_real); - - // Ensure that all first iteration rows has a m value of 0. - builder.when_first_row().assert_zero(local.m); - builder - .when(local.is_last_iteration) - .when_transition() - .when(next.is_real) - .assert_zero(next.m); - - // Ensure that all rows for a FRI FOLD invocation have the same input_ptr and sequential clk - // and m values. - builder - .when_transition() - .when_not(local.is_last_iteration) - .when(next.is_real) - .assert_eq(next.m, local.m + AB::Expr::one()); - builder - .when_transition() - .when_not(local.is_last_iteration) - .when(next.is_real) - .assert_eq(local.input_ptr, next.input_ptr); - builder - .when_transition() - .when_not(local.is_last_iteration) - .when(next.is_real) - .assert_eq(local.clk + AB::Expr::one(), next.clk); - - // Constrain read for `z` at `input_ptr` - builder.recursion_eval_memory_access( - local.clk, - local.input_ptr + AB::Expr::zero(), - &local.z, - memory_access, - ); - - // Constrain read for `alpha` - builder.recursion_eval_memory_access( - local.clk, - local.input_ptr + AB::Expr::one(), - &local.alpha, - memory_access, - ); - - // Constrain read for `x` - builder.recursion_eval_memory_access_single( - local.clk, - local.input_ptr + AB::Expr::from_canonical_u32(2), - &local.x, - memory_access, - ); - - // Constrain read for `log_height` - builder.recursion_eval_memory_access_single( - local.clk, - local.input_ptr + AB::Expr::from_canonical_u32(3), - &local.log_height, - memory_access, - ); - - // Constrain read for `mat_opening_ptr` - builder.recursion_eval_memory_access_single( - local.clk, - local.input_ptr + AB::Expr::from_canonical_u32(4), - &local.mat_opening_ptr, - memory_access, - ); - - // Constrain read for `ps_at_z_ptr` - builder.recursion_eval_memory_access_single( - local.clk, - local.input_ptr + AB::Expr::from_canonical_u32(6), - &local.ps_at_z_ptr, - memory_access, - ); - - // Constrain read for `alpha_pow_ptr` - builder.recursion_eval_memory_access_single( - local.clk, - local.input_ptr + AB::Expr::from_canonical_u32(8), - &local.alpha_pow_ptr, - memory_access, - ); - - // Constrain read for `ro_ptr` - builder.recursion_eval_memory_access_single( - local.clk, - local.input_ptr + AB::Expr::from_canonical_u32(10), - &local.ro_ptr, - memory_access, - ); - - // Constrain read for `p_at_x` - builder.recursion_eval_memory_access( - local.clk, - local.mat_opening_ptr.access.value.into() + local.m.into(), - &local.p_at_x, - memory_access, - ); - - // Constrain read for `p_at_z` - builder.recursion_eval_memory_access( - local.clk, - local.ps_at_z_ptr.access.value.into() + local.m.into(), - &local.p_at_z, - memory_access, - ); - - // Update alpha_pow_at_log_height. - // 1. Constrain old and new value against memory - builder.recursion_eval_memory_access( - local.clk, - local.alpha_pow_ptr.access.value.into() + local.log_height.access.value.into(), - &local.alpha_pow_at_log_height, - memory_access, - ); - - // 2. Constrain new_value = old_value * alpha. - let alpha = local.alpha.access.value.as_extension::(); - let alpha_pow_at_log_height = local.alpha_pow_at_log_height.prev_value.as_extension::(); - let new_alpha_pow_at_log_height = - local.alpha_pow_at_log_height.access.value.as_extension::(); - - builder.assert_ext_eq(alpha_pow_at_log_height.clone() * alpha, new_alpha_pow_at_log_height); - - // Update ro_at_log_height. - // 1. Constrain old and new value against memory. - builder.recursion_eval_memory_access( - local.clk, - local.ro_ptr.access.value.into() + local.log_height.access.value.into(), - &local.ro_at_log_height, - memory_access, - ); - - // 2. Constrain new_value = old_alpha_pow_at_log_height * quotient + old_value, - // where quotient = (p_at_x - p_at_z) / (x - z) - // <=> (new_value - old_value) * (z - x) = old_alpha_pow_at_log_height * (p_at_x - p_at_z) - let p_at_z = local.p_at_z.access.value.as_extension::(); - let p_at_x = local.p_at_x.access.value.as_extension::(); - let z = local.z.access.value.as_extension::(); - let x = local.x.access.value.into(); - - let ro_at_log_height = local.ro_at_log_height.prev_value.as_extension::(); - let new_ro_at_log_height = local.ro_at_log_height.access.value.as_extension::(); - builder.assert_ext_eq( - (new_ro_at_log_height - ro_at_log_height) * (BinomialExtension::from_base(x) - z), - (p_at_x - p_at_z) * alpha_pow_at_log_height, - ); - } - - pub const fn do_receive_table(local: &FriFoldCols) -> T { - local.is_last_iteration - } - - pub const fn do_memory_access(local: &FriFoldCols) -> T { - local.is_real - } -} - -impl Air for FriFoldChip -where - AB: SP1RecursionAirBuilder, -{ - fn eval(&self, builder: &mut AB) { - let main = builder.main(); - let (local, next) = (main.row_slice(0), main.row_slice(1)); - let local: &FriFoldCols = (*local).borrow(); - let next: &FriFoldCols = (*next).borrow(); - - // Dummy constraints to normalize to DEGREE. - let lhs = (0..DEGREE).map(|_| local.is_real.into()).product::(); - let rhs = (0..DEGREE).map(|_| local.is_real.into()).product::(); - builder.assert_eq(lhs, rhs); - - self.eval_fri_fold::( - builder, - local, - next, - Self::do_receive_table::(local), - Self::do_memory_access::(local), - ); - } -} diff --git a/crates/recursion/core/src/lib.rs b/crates/recursion/core/src/lib.rs index f1c93c956d..63fb26691d 100644 --- a/crates/recursion/core/src/lib.rs +++ b/crates/recursion/core/src/lib.rs @@ -1,13 +1,210 @@ +use p3_field::PrimeField64; +use serde::{Deserialize, Serialize}; +use sp1_derive::AlignedBorrow; + +use crate::air::{Block, RecursionPublicValues}; + pub mod air; -pub mod cpu; -pub mod exp_reverse_bits; -pub mod fri_fold; -pub mod memory; -pub mod multi; -pub mod poseidon2_wide; -pub mod program; -pub mod range_check; +pub mod builder; +pub mod chips; +pub mod machine; pub mod runtime; +pub mod shape; pub mod stark; -extern crate alloc; +pub use runtime::*; + +// Re-export the stark stuff from `sp1_recursion_core` for now, until we will migrate it here. +// pub use sp1_recursion_core::stark; + +use crate::chips::poseidon2_skinny::WIDTH; + +#[derive( + AlignedBorrow, Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize, Default, +)] +#[repr(transparent)] +pub struct Address(pub F); + +impl Address { + #[inline] + pub fn as_usize(&self) -> usize { + self.0.as_canonical_u64() as usize + } +} + +// ------------------------------------------------------------------------------------------------- + +/// The inputs and outputs to an operation of the base field ALU. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[repr(C)] +pub struct BaseAluIo { + pub out: V, + pub in1: V, + pub in2: V, +} + +pub type BaseAluEvent = BaseAluIo; + +/// An instruction invoking the extension field ALU. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct BaseAluInstr { + pub opcode: BaseAluOpcode, + pub mult: F, + pub addrs: BaseAluIo>, +} + +// ------------------------------------------------------------------------------------------------- + +/// The inputs and outputs to an operation of the extension field ALU. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[repr(C)] +pub struct ExtAluIo { + pub out: V, + pub in1: V, + pub in2: V, +} + +pub type ExtAluEvent = ExtAluIo>; + +/// An instruction invoking the extension field ALU. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ExtAluInstr { + pub opcode: ExtAluOpcode, + pub mult: F, + pub addrs: ExtAluIo>, +} + +// ------------------------------------------------------------------------------------------------- + +/// The inputs and outputs to the manual memory management/memory initialization table. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct MemIo { + pub inner: V, +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct MemInstr { + pub addrs: MemIo>, + pub vals: MemIo>, + pub mult: F, + pub kind: MemAccessKind, +} + +pub type MemEvent = MemIo>; + +// ------------------------------------------------------------------------------------------------- + +#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub enum MemAccessKind { + Read, + Write, +} + +/// The inputs and outputs to a Poseidon2 permutation. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct Poseidon2Io { + pub input: [V; WIDTH], + pub output: [V; WIDTH], +} + +/// An instruction invoking the Poseidon2 permutation. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct Poseidon2SkinnyInstr { + pub addrs: Poseidon2Io>, + pub mults: [F; WIDTH], +} + +pub type Poseidon2Event = Poseidon2Io; + +/// The inputs and outputs to an exp-reverse-bits operation. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct ExpReverseBitsIo { + pub base: V, + // The bits of the exponent in little-endian order in a vec. + pub exp: Vec, + pub result: V, +} + +pub type Poseidon2WideEvent = Poseidon2Io; +pub type Poseidon2Instr = Poseidon2SkinnyInstr; + +/// An instruction invoking the exp-reverse-bits operation. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ExpReverseBitsInstr { + pub addrs: ExpReverseBitsIo>, + pub mult: F, +} + +/// The event encoding the inputs and outputs of an exp-reverse-bits operation. The `len` operand is +/// now stored as the length of the `exp` field. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct ExpReverseBitsEvent { + pub base: F, + pub exp: Vec, + pub result: F, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct FriFoldIo { + pub ext_single: FriFoldExtSingleIo>, + pub ext_vec: FriFoldExtVecIo>>, + pub base_single: FriFoldBaseIo, +} + +/// The extension-field-valued single inputs to the FRI fold operation. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct FriFoldExtSingleIo { + pub z: V, + pub alpha: V, +} + +/// The extension-field-valued vector inputs to the FRI fold operation. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct FriFoldExtVecIo { + pub mat_opening: V, + pub ps_at_z: V, + pub alpha_pow_input: V, + pub ro_input: V, + pub alpha_pow_output: V, + pub ro_output: V, +} + +/// The base-field-valued inputs to the FRI fold operation. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct FriFoldBaseIo { + pub x: V, +} + +/// An instruction invoking the FRI fold operation. Addresses for extension field elements are of +/// the same type as for base field elements. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct FriFoldInstr { + pub base_single_addrs: FriFoldBaseIo>, + pub ext_single_addrs: FriFoldExtSingleIo>, + pub ext_vec_addrs: FriFoldExtVecIo>>, + pub alpha_pow_mults: Vec, + pub ro_mults: Vec, +} + +/// The event encoding the data of a single iteration within the FRI fold operation. +/// For any given event, we are accessing a single element of the `Vec` inputs, so that the event +/// is not a type alias for `FriFoldIo` like many of the other events. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct FriFoldEvent { + pub base_single: FriFoldBaseIo, + pub ext_single: FriFoldExtSingleIo>, + pub ext_vec: FriFoldExtVecIo>, +} + +/// An instruction that will save the public values to the execution record and will commit to +/// it's digest. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct CommitPublicValuesInstr { + pub pv_addrs: RecursionPublicValues>, +} + +/// The event for committing to the public values. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct CommitPublicValuesEvent { + pub public_values: RecursionPublicValues, +} diff --git a/crates/recursion/core/src/machine.rs b/crates/recursion/core/src/machine.rs new file mode 100644 index 0000000000..48713ba9df --- /dev/null +++ b/crates/recursion/core/src/machine.rs @@ -0,0 +1,359 @@ +use std::ops::{Add, AddAssign}; + +use hashbrown::HashMap; +use p3_field::{extension::BinomiallyExtendable, PrimeField32}; +use sp1_stark::{ + air::{InteractionScope, MachineAir}, + Chip, ProofShape, StarkGenericConfig, StarkMachine, PROOF_MAX_NUM_PVS, +}; + +use crate::{ + chips::{ + alu_base::{BaseAluChip, NUM_BASE_ALU_ENTRIES_PER_ROW}, + alu_ext::{ExtAluChip, NUM_EXT_ALU_ENTRIES_PER_ROW}, + exp_reverse_bits::ExpReverseBitsLenChip, + fri_fold::FriFoldChip, + mem::{ + constant::NUM_CONST_MEM_ENTRIES_PER_ROW, variable::NUM_VAR_MEM_ENTRIES_PER_ROW, + MemoryConstChip, MemoryVarChip, + }, + poseidon2_skinny::Poseidon2SkinnyChip, + poseidon2_wide::Poseidon2WideChip, + public_values::{PublicValuesChip, PUB_VALUES_LOG_HEIGHT}, + }, + instruction::{HintBitsInstr, HintExt2FeltsInstr, HintInstr}, + shape::RecursionShape, + ExpReverseBitsInstr, Instruction, RecursionProgram, D, +}; + +#[derive(sp1_derive::MachineAir)] +#[sp1_core_path = "sp1_core_machine"] +#[execution_record_path = "crate::ExecutionRecord"] +#[program_path = "crate::RecursionProgram"] +#[builder_path = "crate::builder::SP1RecursionAirBuilder"] +#[eval_trait_bound = "AB::Var: 'static"] +pub enum RecursionAir, const DEGREE: usize> { + MemoryConst(MemoryConstChip), + MemoryVar(MemoryVarChip), + BaseAlu(BaseAluChip), + ExtAlu(ExtAluChip), + Poseidon2Skinny(Poseidon2SkinnyChip), + Poseidon2Wide(Poseidon2WideChip), + FriFold(FriFoldChip), + ExpReverseBitsLen(ExpReverseBitsLenChip), + PublicValues(PublicValuesChip), +} + +#[derive(Debug, Clone, Copy, Default)] +pub struct RecursionAirEventCount { + mem_const_events: usize, + mem_var_events: usize, + base_alu_events: usize, + ext_alu_events: usize, + poseidon2_wide_events: usize, + fri_fold_events: usize, + exp_reverse_bits_len_events: usize, +} + +impl, const DEGREE: usize> RecursionAir { + /// Get a machine with all chips, except the dummy chip. + pub fn machine_wide_with_all_chips>( + config: SC, + ) -> StarkMachine { + let chips = [ + RecursionAir::MemoryConst(MemoryConstChip::default()), + RecursionAir::MemoryVar(MemoryVarChip::default()), + RecursionAir::BaseAlu(BaseAluChip), + RecursionAir::ExtAlu(ExtAluChip), + RecursionAir::Poseidon2Wide(Poseidon2WideChip::), + RecursionAir::FriFold(FriFoldChip::::default()), + RecursionAir::ExpReverseBitsLen(ExpReverseBitsLenChip::), + RecursionAir::PublicValues(PublicValuesChip), + ] + .map(Chip::new) + .into_iter() + .collect::>(); + StarkMachine::new(config, chips, PROOF_MAX_NUM_PVS, false) + } + + /// Get a machine with all chips, except the dummy chip. + pub fn machine_skinny_with_all_chips>( + config: SC, + ) -> StarkMachine { + let chips = [ + RecursionAir::MemoryConst(MemoryConstChip::default()), + RecursionAir::MemoryVar(MemoryVarChip::default()), + RecursionAir::BaseAlu(BaseAluChip), + RecursionAir::ExtAlu(ExtAluChip), + RecursionAir::Poseidon2Skinny(Poseidon2SkinnyChip::::default()), + RecursionAir::FriFold(FriFoldChip::::default()), + RecursionAir::ExpReverseBitsLen(ExpReverseBitsLenChip::), + RecursionAir::PublicValues(PublicValuesChip), + ] + .map(Chip::new) + .into_iter() + .collect::>(); + StarkMachine::new(config, chips, PROOF_MAX_NUM_PVS, false) + } + + /// A machine with dyunamic chip sizes that includes the wide variant of the Poseidon2 chip. + pub fn compress_machine>(config: SC) -> StarkMachine { + let chips = [ + RecursionAir::MemoryConst(MemoryConstChip::default()), + RecursionAir::MemoryVar(MemoryVarChip::default()), + RecursionAir::BaseAlu(BaseAluChip), + RecursionAir::ExtAlu(ExtAluChip), + RecursionAir::Poseidon2Wide(Poseidon2WideChip::), + RecursionAir::ExpReverseBitsLen(ExpReverseBitsLenChip::), + RecursionAir::PublicValues(PublicValuesChip), + ] + .map(Chip::new) + .into_iter() + .collect::>(); + StarkMachine::new(config, chips, PROOF_MAX_NUM_PVS, false) + } + + pub fn shrink_machine>(config: SC) -> StarkMachine { + Self::compress_machine(config) + } + + /// A machine with dynamic chip sizes that includes the skinny variant of the Poseidon2 chip. + /// + /// This machine assumes that the `shrink` stage has a fixed shape, so there is no need to + /// fix the trace sizes. + pub fn wrap_machine>(config: SC) -> StarkMachine { + let chips = [ + RecursionAir::MemoryConst(MemoryConstChip::default()), + RecursionAir::MemoryVar(MemoryVarChip::default()), + RecursionAir::BaseAlu(BaseAluChip), + RecursionAir::ExtAlu(ExtAluChip), + RecursionAir::Poseidon2Skinny(Poseidon2SkinnyChip::::default()), + // RecursionAir::ExpReverseBitsLen(ExpReverseBitsLenChip::), + RecursionAir::PublicValues(PublicValuesChip), + ] + .map(Chip::new) + .into_iter() + .collect::>(); + StarkMachine::new(config, chips, PROOF_MAX_NUM_PVS, false) + } + + pub fn shrink_shape() -> RecursionShape { + let shape = HashMap::from( + [ + (Self::MemoryConst(MemoryConstChip::default()), 17), + (Self::MemoryVar(MemoryVarChip::default()), 18), + (Self::BaseAlu(BaseAluChip), 20), + (Self::ExtAlu(ExtAluChip), 18), + (Self::Poseidon2Wide(Poseidon2WideChip::), 16), + (Self::ExpReverseBitsLen(ExpReverseBitsLenChip::), 16), + (Self::PublicValues(PublicValuesChip), PUB_VALUES_LOG_HEIGHT), + ] + .map(|(chip, log_height)| (chip.name(), log_height)), + ); + RecursionShape { inner: shape } + } + + pub fn heights(program: &RecursionProgram) -> Vec<(String, usize)> { + let heights = program + .instructions + .iter() + .fold(RecursionAirEventCount::default(), |heights, instruction| heights + instruction); + + [ + ( + Self::MemoryConst(MemoryConstChip::default()), + heights.mem_const_events.div_ceil(NUM_CONST_MEM_ENTRIES_PER_ROW), + ), + ( + Self::MemoryVar(MemoryVarChip::default()), + heights.mem_var_events.div_ceil(NUM_VAR_MEM_ENTRIES_PER_ROW), + ), + ( + Self::BaseAlu(BaseAluChip), + heights.base_alu_events.div_ceil(NUM_BASE_ALU_ENTRIES_PER_ROW), + ), + ( + Self::ExtAlu(ExtAluChip), + heights.ext_alu_events.div_ceil(NUM_EXT_ALU_ENTRIES_PER_ROW), + ), + (Self::Poseidon2Wide(Poseidon2WideChip::), heights.poseidon2_wide_events), + ( + Self::ExpReverseBitsLen(ExpReverseBitsLenChip::), + heights.exp_reverse_bits_len_events, + ), + (Self::PublicValues(PublicValuesChip), PUB_VALUES_LOG_HEIGHT), + ] + .map(|(chip, log_height)| (chip.name(), log_height)) + .to_vec() + } +} + +impl AddAssign<&Instruction> for RecursionAirEventCount { + #[inline] + fn add_assign(&mut self, rhs: &Instruction) { + match rhs { + Instruction::BaseAlu(_) => self.base_alu_events += 1, + Instruction::ExtAlu(_) => self.ext_alu_events += 1, + Instruction::Mem(_) => self.mem_const_events += 1, + Instruction::Poseidon2(_) => self.poseidon2_wide_events += 1, + Instruction::ExpReverseBitsLen(ExpReverseBitsInstr { addrs, .. }) => { + self.exp_reverse_bits_len_events += addrs.exp.len() + } + Instruction::Hint(HintInstr { output_addrs_mults }) + | Instruction::HintBits(HintBitsInstr { + output_addrs_mults, + input_addr: _, // No receive interaction for the hint operation + }) => self.mem_var_events += output_addrs_mults.len(), + Instruction::HintExt2Felts(HintExt2FeltsInstr { + output_addrs_mults, + input_addr: _, // No receive interaction for the hint operation + }) => self.mem_var_events += output_addrs_mults.len(), + Instruction::FriFold(_) => self.fri_fold_events += 1, + Instruction::CommitPublicValues(_) => {} + Instruction::Print(_) => {} + } + } +} + +impl Add<&Instruction> for RecursionAirEventCount { + type Output = Self; + + #[inline] + fn add(mut self, rhs: &Instruction) -> Self::Output { + self += rhs; + self + } +} + +impl From for ProofShape { + fn from(value: RecursionShape) -> Self { + value.inner.into_iter().collect() + } +} + +#[cfg(test)] +pub mod tests { + + use std::{iter::once, sync::Arc}; + + use machine::RecursionAir; + use p3_baby_bear::DiffusionMatrixBabyBear; + use p3_field::{ + extension::{BinomialExtensionField, HasFrobenius}, + AbstractExtensionField, AbstractField, Field, + }; + use rand::prelude::*; + use sp1_core_machine::utils::run_test_machine; + use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, StarkGenericConfig}; + + // TODO expand glob import + use crate::{runtime::instruction as instr, *}; + + type SC = BabyBearPoseidon2; + type F = ::Val; + type EF = ::Challenge; + type A = RecursionAir; + type B = RecursionAir; + + /// Runs the given program on machines that use the wide and skinny Poseidon2 chips. + pub fn run_recursion_test_machines(program: RecursionProgram) { + let program = Arc::new(program); + let mut runtime = + Runtime::::new(program.clone(), SC::new().perm); + runtime.run().unwrap(); + + // Run with the poseidon2 wide chip. + let machine = A::machine_wide_with_all_chips(BabyBearPoseidon2::default()); + let (pk, vk) = machine.setup(&program); + let result = run_test_machine(vec![runtime.record.clone()], machine, pk, vk); + if let Err(e) = result { + panic!("Verification failed: {:?}", e); + } + + // Run with the poseidon2 skinny chip. + let skinny_machine = + B::machine_skinny_with_all_chips(BabyBearPoseidon2::ultra_compressed()); + let (pk, vk) = skinny_machine.setup(&program); + let result = run_test_machine(vec![runtime.record], skinny_machine, pk, vk); + if let Err(e) = result { + panic!("Verification failed: {:?}", e); + } + } + + fn test_instructions(instructions: Vec>) { + let program = RecursionProgram { instructions, ..Default::default() }; + run_recursion_test_machines(program); + } + + #[test] + pub fn fibonacci() { + let n = 10; + + let instructions = once(instr::mem(MemAccessKind::Write, 1, 0, 0)) + .chain(once(instr::mem(MemAccessKind::Write, 2, 1, 1))) + .chain((2..=n).map(|i| instr::base_alu(BaseAluOpcode::AddF, 2, i, i - 2, i - 1))) + .chain(once(instr::mem(MemAccessKind::Read, 1, n - 1, 34))) + .chain(once(instr::mem(MemAccessKind::Read, 2, n, 55))) + .collect::>(); + + test_instructions(instructions); + } + + #[test] + #[should_panic] + pub fn div_nonzero_by_zero() { + let instructions = vec![ + instr::mem(MemAccessKind::Write, 1, 0, 0), + instr::mem(MemAccessKind::Write, 1, 1, 1), + instr::base_alu(BaseAluOpcode::DivF, 1, 2, 1, 0), + instr::mem(MemAccessKind::Read, 1, 2, 1), + ]; + + test_instructions(instructions); + } + + #[test] + pub fn div_zero_by_zero() { + let instructions = vec![ + instr::mem(MemAccessKind::Write, 1, 0, 0), + instr::mem(MemAccessKind::Write, 1, 1, 0), + instr::base_alu(BaseAluOpcode::DivF, 1, 2, 1, 0), + instr::mem(MemAccessKind::Read, 1, 2, 1), + ]; + + test_instructions(instructions); + } + + #[test] + pub fn field_norm() { + let mut instructions = Vec::new(); + + let mut rng = StdRng::seed_from_u64(0xDEADBEEF); + let mut addr = 0; + for _ in 0..100 { + let inner: [F; 4] = std::iter::repeat_with(|| { + core::array::from_fn(|_| rng.sample(rand::distributions::Standard)) + }) + .find(|xs| !xs.iter().all(F::is_zero)) + .unwrap(); + let x = BinomialExtensionField::::from_base_slice(&inner); + let gal = x.galois_group(); + + let mut acc = BinomialExtensionField::one(); + + instructions.push(instr::mem_ext(MemAccessKind::Write, 1, addr, acc)); + for conj in gal { + instructions.push(instr::mem_ext(MemAccessKind::Write, 1, addr + 1, conj)); + instructions.push(instr::ext_alu(ExtAluOpcode::MulE, 1, addr + 2, addr, addr + 1)); + + addr += 2; + acc *= conj; + } + let base_cmp: F = acc.as_base_slice()[0]; + instructions.push(instr::mem_single(MemAccessKind::Read, 1, addr, base_cmp)); + addr += 1; + } + + test_instructions(instructions); + } +} diff --git a/crates/recursion/core/src/memory/air.rs b/crates/recursion/core/src/memory/air.rs deleted file mode 100644 index 2b0257bcb8..0000000000 --- a/crates/recursion/core/src/memory/air.rs +++ /dev/null @@ -1,266 +0,0 @@ -use core::mem::size_of; -use p3_air::{Air, AirBuilder, BaseAir}; -use p3_field::{AbstractField, PrimeField32}; -use p3_matrix::{dense::RowMajorMatrix, Matrix}; -use sp1_core_machine::utils::{next_power_of_two, par_for_each_row}; -use sp1_stark::{ - air::{AirInteraction, MachineAir}, - InteractionKind, -}; -use std::borrow::{Borrow, BorrowMut}; -use tracing::instrument; - -use super::columns::MemoryInitCols; -use crate::{ - air::{Block, SP1RecursionAirBuilder}, - memory::MemoryGlobalChip, - runtime::{ExecutionRecord, RecursionProgram}, -}; - -pub(crate) const NUM_MEMORY_INIT_COLS: usize = size_of::>(); - -#[allow(dead_code)] -impl MemoryGlobalChip { - pub const fn new() -> Self { - Self { fixed_log2_rows: None } - } -} - -impl MachineAir for MemoryGlobalChip { - type Record = ExecutionRecord; - type Program = RecursionProgram; - - fn name(&self) -> String { - "MemoryGlobalChip".to_string() - } - - fn generate_dependencies(&self, _: &Self::Record, _: &mut Self::Record) { - // This is a no-op. - } - - #[instrument(name = "generate memory trace", level = "debug", skip_all, fields(first_rows = input.first_memory_record.len(), last_rows = input.last_memory_record.len()))] - fn generate_trace( - &self, - input: &Self::Record, - _output: &mut Self::Record, - ) -> RowMajorMatrix { - let nb_events = input.first_memory_record.len() + input.last_memory_record.len(); - let nb_rows = next_power_of_two(nb_events, self.fixed_log2_rows); - let mut values = vec![F::zero(); nb_rows * NUM_MEMORY_INIT_COLS]; - - par_for_each_row(&mut values, NUM_MEMORY_INIT_COLS, |i, row| { - if i >= nb_events { - return; - } - let cols: &mut MemoryInitCols = row.borrow_mut(); - - if i < input.first_memory_record.len() { - let (addr, value) = &input.first_memory_record[i]; - cols.addr = *addr; - cols.timestamp = F::zero(); - cols.value = *value; - cols.is_initialize = F::one(); - - cols.is_real = F::one(); - } else { - let (addr, timestamp, value) = - &input.last_memory_record[i - input.first_memory_record.len()]; - let last = i == nb_events - 1; - let (next_addr, _, _) = if last { - &(F::zero(), F::zero(), Block::from(F::zero())) - } else { - &input.last_memory_record[i - input.first_memory_record.len() + 1] - }; - cols.addr = *addr; - cols.timestamp = *timestamp; - cols.value = *value; - cols.is_finalize = F::one(); - (cols.diff_16bit_limb, cols.diff_12bit_limb) = if !last { - compute_addr_diff(*next_addr, *addr, true) - } else { - (F::zero(), F::zero()) - }; - (cols.addr_16bit_limb, cols.addr_12bit_limb) = - compute_addr_diff(*addr, F::zero(), false); - - cols.is_real = F::one(); - cols.is_range_check = F::from_bool(!last); - } - }); - - RowMajorMatrix::new(values, NUM_MEMORY_INIT_COLS) - } - - fn included(&self, shard: &Self::Record) -> bool { - !shard.first_memory_record.is_empty() || !shard.last_memory_record.is_empty() - } -} - -impl BaseAir for MemoryGlobalChip { - fn width(&self) -> usize { - NUM_MEMORY_INIT_COLS - } -} - -/// Computes the difference between the `addr` and `prev_addr` and returns the 16-bit limb and -/// 12-bit limbs of the difference. -/// -/// The parameter `subtract_one` is expected to be `true` when `addr` and `prev_addr` are -/// consecutive addresses in the global memory table (we don't allow repeated addresses), and -/// `false` when this function is used to perform the 28-bit range check on the `addr` field. -pub fn compute_addr_diff(addr: F, prev_addr: F, subtract_one: bool) -> (F, F) { - let diff = addr.as_canonical_u32() - prev_addr.as_canonical_u32() - subtract_one as u32; - let diff_16bit_limb = diff & 0xffff; - let diff_12bit_limb = (diff >> 16) & 0xfff; - (F::from_canonical_u32(diff_16bit_limb), F::from_canonical_u32(diff_12bit_limb)) -} - -impl Air for MemoryGlobalChip -where - AB: SP1RecursionAirBuilder, -{ - fn eval(&self, builder: &mut AB) { - let main = builder.main(); - let local = main.row_slice(0); - let next = main.row_slice(1); - let local: &MemoryInitCols = (*local).borrow(); - let next: &MemoryInitCols = (*next).borrow(); - - // Verify that is_initialize and is_finalize and 1-is_real are bool and that at most one - // is true. - builder.assert_bool(local.is_initialize); - builder.assert_bool(local.is_finalize); - builder.assert_bool(local.is_real); - builder.assert_bool( - local.is_initialize + local.is_finalize + (AB::Expr::one() - local.is_real), - ); - builder.assert_bool(local.is_range_check); - - // Assert the is_initialize rows come before the is_finalize rows, and those come before the - // padding rows. - // The first row should be an initialize row. - builder.when_first_row().assert_one(local.is_initialize); - - // After an initialize row, we should either have a finalize row, or another initialize row. - builder - .when_transition() - .when(local.is_initialize) - .assert_one(next.is_initialize + next.is_finalize); - - // After a finalize row, we should either have a finalize row, or a padding row. - builder - .when_transition() - .when(local.is_finalize) - .assert_one(next.is_finalize + (AB::Expr::one() - next.is_real)); - - // After a padding row, we should only have another padding row. - builder.when_transition().when(AB::Expr::one() - local.is_real).assert_zero(next.is_real); - - // The last row should be a padding row or a finalize row. - builder.when_last_row().assert_one(local.is_finalize + AB::Expr::one() - local.is_real); - - // Ensure that the is_range_check column is properly computed. - // The flag column `is_range_check` is set iff is_finalize is set AND next.is_finalize is - // set. - builder.when(local.is_range_check).assert_one(local.is_finalize * next.is_finalize); - builder.when_not(local.is_range_check).assert_zero(local.is_finalize * next.is_finalize); - - // Send requests for the 28-bit range checks and ensure that the limbs are correctly - // computed. - builder.eval_range_check_28bits( - next.addr - local.addr - AB::Expr::one(), - local.diff_16bit_limb, - local.diff_12bit_limb, - local.is_range_check, - ); - - builder.eval_range_check_28bits( - local.addr, - local.addr_16bit_limb, - local.addr_12bit_limb, - local.is_finalize, - ); - - builder.send(AirInteraction::new( - vec![ - local.timestamp.into(), - local.addr.into(), - local.value[0].into(), - local.value[1].into(), - local.value[2].into(), - local.value[3].into(), - ], - local.is_initialize.into(), - InteractionKind::Memory, - )); - builder.receive(AirInteraction::new( - vec![ - local.timestamp.into(), - local.addr.into(), - local.value[0].into(), - local.value[1].into(), - local.value[2].into(), - local.value[3].into(), - ], - local.is_finalize.into(), - InteractionKind::Memory, - )); - } -} - -#[cfg(test)] -mod tests { - use itertools::Itertools; - use sp1_stark::{air::MachineAir, baby_bear_poseidon2::BabyBearPoseidon2, StarkGenericConfig}; - use std::time::Instant; - - use p3_baby_bear::{BabyBear, DiffusionMatrixBabyBear}; - use p3_field::AbstractField; - use p3_matrix::{dense::RowMajorMatrix, Matrix}; - use p3_poseidon2::{Poseidon2, Poseidon2ExternalMatrixGeneral}; - use sp1_core_machine::utils::{uni_stark_prove, uni_stark_verify}; - - use crate::{air::Block, memory::MemoryGlobalChip, runtime::ExecutionRecord}; - - #[test] - fn prove_babybear() { - let config = BabyBearPoseidon2::compressed(); - let mut challenger = config.challenger(); - - let chip = MemoryGlobalChip { fixed_log2_rows: None }; - - let test_vals = (0..16).map(BabyBear::from_canonical_u32).collect_vec(); - - let mut input_exec = ExecutionRecord::::default(); - for val in test_vals.into_iter() { - let event = (val, val, Block::from(BabyBear::zero())); - input_exec.last_memory_record.push(event); - } - - // Add a dummy initialize event because the AIR expects at least one. - input_exec.first_memory_record.push((BabyBear::zero(), Block::from(BabyBear::zero()))); - - println!("input exec: {:?}", input_exec.last_memory_record.len()); - let trace: RowMajorMatrix = - chip.generate_trace(&input_exec, &mut ExecutionRecord::::default()); - println!("trace dims is width: {:?}, height: {:?}", trace.width(), trace.height()); - - let start = Instant::now(); - let proof = uni_stark_prove(&config, &chip, &mut challenger, trace); - let duration = start.elapsed().as_secs_f64(); - println!("proof duration = {:?}", duration); - - let mut challenger: p3_challenger::DuplexChallenger< - BabyBear, - Poseidon2, - 16, - 8, - > = config.challenger(); - let start = Instant::now(); - uni_stark_verify(&config, &chip, &mut challenger, &proof) - .expect("expected proof to be valid"); - - let duration = start.elapsed().as_secs_f64(); - println!("verify duration = {:?}", duration); - } -} diff --git a/crates/recursion/core/src/memory/mod.rs b/crates/recursion/core/src/memory/mod.rs deleted file mode 100644 index bde6d22b48..0000000000 --- a/crates/recursion/core/src/memory/mod.rs +++ /dev/null @@ -1,112 +0,0 @@ -mod air; -mod columns; - -use p3_field::PrimeField32; - -use crate::air::Block; -pub use air::compute_addr_diff; -pub use columns::*; - -#[allow(clippy::manual_non_exhaustive)] -#[derive(Debug, Clone, Copy)] -pub struct MemoryRecord { - pub addr: F, - pub value: Block, - pub prev_value: Block, - pub timestamp: F, - pub prev_timestamp: F, - pub diff_16bit_limb: F, - pub diff_12bit_limb: F, - _private: (), -} - -/// Computes the difference between the current memory access timestamp and the previous one's. -/// -/// This function will compute the difference minus one and then decompose the result into a 16 bit -/// limb and 12 bit limb. The minus one is needed since a difference of zero is not valid. Also, -/// we assume that the clk/timestamp value will always be less than 2^28. -fn compute_diff(timestamp: F, prev_timestamp: F) -> (F, F) { - let diff_minus_one = timestamp.as_canonical_u32() - prev_timestamp.as_canonical_u32() - 1; - let diff_16bit_limb = diff_minus_one & 0xffff; - let diff_12bit_limb = (diff_minus_one >> 16) & 0xfff; - (F::from_canonical_u32(diff_16bit_limb), F::from_canonical_u32(diff_12bit_limb)) -} - -impl MemoryRecord { - pub fn new_write( - addr: F, - value: Block, - timestamp: F, - prev_value: Block, - prev_timestamp: F, - ) -> Self { - assert!(timestamp >= prev_timestamp); - let (diff_16bit_limb, diff_12bit_limb) = compute_diff(timestamp, prev_timestamp); - Self { - addr, - value, - prev_value, - timestamp, - prev_timestamp, - diff_16bit_limb, - diff_12bit_limb, - _private: (), - } - } - - pub fn new_read(addr: F, value: Block, timestamp: F, prev_timestamp: F) -> Self { - assert!(timestamp >= prev_timestamp); - let (diff_16bit_limb, diff_12bit_limb) = compute_diff(timestamp, prev_timestamp); - Self { - addr, - value, - prev_value: value, - timestamp, - prev_timestamp, - diff_16bit_limb, - diff_12bit_limb, - _private: (), - } - } -} - -impl MemoryReadWriteCols { - pub fn populate(&mut self, record: &MemoryRecord) { - self.prev_value = record.prev_value; - self.access.populate(record.value, record); - } -} - -impl MemoryReadCols { - pub fn populate(&mut self, record: &MemoryRecord) { - self.access.populate(record.value, record); - } -} - -impl MemoryReadWriteSingleCols { - pub fn populate(&mut self, record: &MemoryRecord) { - self.prev_value = record.prev_value[0]; - self.access.populate(record.value[0], record); - } -} - -impl MemoryReadSingleCols { - pub fn populate(&mut self, record: &MemoryRecord) { - self.access.populate(record.value[0], record); - } -} - -impl MemoryAccessCols { - /// Populate the memory access columns. - pub fn populate(&mut self, value: TValue, record: &MemoryRecord) { - self.value = value; - self.prev_timestamp = record.prev_timestamp; - self.diff_16bit_limb = record.diff_16bit_limb; - self.diff_12bit_limb = record.diff_12bit_limb; - } -} - -#[derive(Default)] -pub struct MemoryGlobalChip { - pub fixed_log2_rows: Option, -} diff --git a/crates/recursion/core/src/multi/mod.rs b/crates/recursion/core/src/multi/mod.rs deleted file mode 100644 index 6c4cc566db..0000000000 --- a/crates/recursion/core/src/multi/mod.rs +++ /dev/null @@ -1,376 +0,0 @@ -use std::{ - array, - borrow::{Borrow, BorrowMut}, - cmp::max, - ops::Deref, -}; - -use itertools::Itertools; -use p3_air::{Air, AirBuilder, BaseAir}; -use p3_field::{AbstractField, PrimeField32}; -use p3_matrix::{dense::RowMajorMatrix, Matrix}; -use sp1_core_machine::utils::pad_rows_fixed; -use sp1_derive::AlignedBorrow; -use sp1_stark::air::{BaseAirBuilder, MachineAir}; - -use crate::{ - air::{MultiBuilder, SP1RecursionAirBuilder}, - fri_fold::{FriFoldChip, FriFoldCols}, - poseidon2_wide::{columns::Poseidon2, Poseidon2WideChip, WIDTH}, - runtime::{ExecutionRecord, RecursionProgram}, -}; - -pub const NUM_MULTI_COLS: usize = core::mem::size_of::>(); - -#[derive(Default)] -pub struct MultiChip { - pub fixed_log2_rows: Option, -} - -#[derive(AlignedBorrow, Clone, Copy)] -#[repr(C)] -pub struct MultiCols { - pub is_fri_fold: T, - - /// Rows that needs to receive a fri_fold syscall. - pub fri_fold_receive_table: T, - /// Rows that needs to access memory. - pub fri_fold_memory_access: T, - - pub is_poseidon2: T, - - /// A flag column to indicate whether the row is the first poseidon2 row. - pub poseidon2_first_row: T, - /// A flag column to indicate whether the row is the last poseidon2 row. - pub poseidon2_last_row: T, - - /// Similar for Fri_fold. - pub fri_fold_last_row: T, - - /// Rows that needs to receive a poseidon2 syscall. - pub poseidon2_receive_table: T, - /// Hash/Permute state entries that needs to access memory. This is for the the first half of - /// the permute state. - pub poseidon2_1st_half_memory_access: [T; WIDTH / 2], - /// Flag to indicate if all of the second half of a compress state needs to access memory. - pub poseidon2_2nd_half_memory_access: T, - /// Rows that need to send a range check. - pub poseidon2_send_range_check: T, -} - -impl BaseAir for MultiChip { - fn width(&self) -> usize { - let fri_fold_width = Self::fri_fold_width::(); - let poseidon2_width = Self::poseidon2_width::(); - - max(fri_fold_width, poseidon2_width) + NUM_MULTI_COLS - } -} - -impl MachineAir for MultiChip { - type Record = ExecutionRecord; - - type Program = RecursionProgram; - - fn name(&self) -> String { - "Multi".to_string() - } - - fn generate_trace( - &self, - input: &ExecutionRecord, - output: &mut ExecutionRecord, - ) -> RowMajorMatrix { - let fri_fold_chip = FriFoldChip:: { fixed_log2_rows: None, pad: false }; - let poseidon2 = Poseidon2WideChip:: { fixed_log2_rows: None, pad: false }; - let fri_fold_trace = fri_fold_chip.generate_trace(input, output); - let mut poseidon2_trace = poseidon2.generate_trace(input, output); - - let fri_fold_height = fri_fold_trace.height(); - let poseidon2_height = poseidon2_trace.height(); - - let num_columns = as BaseAir>::width(self); - - let mut rows = fri_fold_trace - .clone() - .rows_mut() - .chain(poseidon2_trace.rows_mut()) - .enumerate() - .map(|(i, instruction_row)| { - let process_fri_fold = i < fri_fold_trace.height(); - - let mut row = vec![F::zero(); num_columns]; - row[NUM_MULTI_COLS..NUM_MULTI_COLS + instruction_row.len()] - .copy_from_slice(instruction_row); - - if process_fri_fold { - let multi_cols: &mut MultiCols = row[0..NUM_MULTI_COLS].borrow_mut(); - multi_cols.is_fri_fold = F::one(); - - let fri_fold_cols: &FriFoldCols = (*instruction_row).borrow(); - multi_cols.fri_fold_receive_table = - FriFoldChip::::do_receive_table(fri_fold_cols); - multi_cols.fri_fold_memory_access = - FriFoldChip::::do_memory_access(fri_fold_cols); - if i == fri_fold_trace.height() - 1 { - multi_cols.fri_fold_last_row = F::one(); - } - } else { - let multi_cols: &mut MultiCols = row[0..NUM_MULTI_COLS].borrow_mut(); - multi_cols.is_poseidon2 = F::one(); - - let poseidon2_cols = Poseidon2WideChip::::convert::(instruction_row); - multi_cols.poseidon2_receive_table = - poseidon2_cols.control_flow().is_syscall_row; - multi_cols.poseidon2_1st_half_memory_access = - array::from_fn(|i| poseidon2_cols.memory().memory_slot_used[i]); - multi_cols.poseidon2_2nd_half_memory_access = - poseidon2_cols.control_flow().is_compress; - multi_cols.poseidon2_send_range_check = poseidon2_cols.control_flow().is_absorb; - - // The first row of the poseidon2 trace has index fri_fold_trace.height() - multi_cols.poseidon2_first_row = F::from_bool(i == fri_fold_height); - multi_cols.poseidon2_last_row = - F::from_bool(i == fri_fold_height + poseidon2_height - 1); - } - - row - }) - .collect_vec(); - - // Pad the trace to a power of two. - pad_rows_fixed(&mut rows, || vec![F::zero(); num_columns], self.fixed_log2_rows); - - // Convert the trace to a row major matrix. - RowMajorMatrix::new(rows.into_iter().flatten().collect(), num_columns) - } - - fn included(&self, _: &Self::Record) -> bool { - true - } -} - -impl Air for MultiChip -where - AB: SP1RecursionAirBuilder, - AB::Var: 'static, -{ - fn eval(&self, builder: &mut AB) { - let main = builder.main(); - let (local, next) = (main.row_slice(0), main.row_slice(1)); - - let local_slice: &[::Var] = &local; - let next_slice: &[::Var] = &next; - let local_multi_cols: &MultiCols = local_slice[0..NUM_MULTI_COLS].borrow(); - let next_multi_cols: &MultiCols = next_slice[0..NUM_MULTI_COLS].borrow(); - - // Dummy constraints to normalize to DEGREE. - let lhs = (0..DEGREE).map(|_| local_multi_cols.is_poseidon2.into()).product::(); - let rhs = (0..DEGREE).map(|_| local_multi_cols.is_poseidon2.into()).product::(); - builder.assert_eq(lhs, rhs); - - let next_is_real = next_multi_cols.is_fri_fold + next_multi_cols.is_poseidon2; - let local_is_real = local_multi_cols.is_fri_fold + local_multi_cols.is_poseidon2; - - // Assert that is_fri_fold and is_poseidon2 are bool and that at most one is set. - builder.assert_bool(local_multi_cols.is_fri_fold); - builder.assert_bool(local_multi_cols.is_poseidon2); - builder.assert_bool(local_is_real.clone()); - - // Constrain the flags to be boolean. - builder.assert_bool(local_multi_cols.poseidon2_first_row); - builder.assert_bool(local_multi_cols.poseidon2_last_row); - builder.assert_bool(local_multi_cols.fri_fold_last_row); - - // Constrain that the flags are computed correctly. - builder.when_transition().assert_eq( - local_multi_cols.is_fri_fold * (AB::Expr::one() - next_multi_cols.is_fri_fold), - local_multi_cols.fri_fold_last_row, - ); - builder - .when_last_row() - .assert_eq(local_multi_cols.is_fri_fold, local_multi_cols.fri_fold_last_row); - builder - .when_first_row() - .assert_eq(local_multi_cols.is_poseidon2, local_multi_cols.poseidon2_first_row); - builder.when_transition().assert_eq( - next_multi_cols.poseidon2_first_row, - local_multi_cols.is_fri_fold * next_multi_cols.is_poseidon2, - ); - builder.when_transition().assert_eq( - local_multi_cols.is_poseidon2 * (AB::Expr::one() - next_multi_cols.is_poseidon2), - local_multi_cols.poseidon2_last_row, - ); - builder - .when_last_row() - .assert_eq(local_multi_cols.is_poseidon2, local_multi_cols.poseidon2_last_row); - - // Fri fold requires that it's rows are contiguous, since each invocation spans multiple - // rows and it's AIR checks for consistencies among them. The following constraints - // enforce that all the fri fold rows are first, then the posiedon2 rows, and - // finally any padded (non-real) rows. - - // First verify that all real rows are contiguous. - builder.when_transition().when_not(local_is_real.clone()).assert_zero(next_is_real.clone()); - - // Next, verify that all fri fold rows are before the poseidon2 rows within the real rows - // section. - builder - .when_transition() - .when(next_is_real) - .when(local_multi_cols.is_poseidon2) - .assert_one(next_multi_cols.is_poseidon2); - - let mut sub_builder = MultiBuilder::new( - builder, - local_multi_cols.is_fri_fold.into(), - builder.is_first_row(), - local_multi_cols.fri_fold_last_row.into(), - next_multi_cols.is_fri_fold.into(), - ); - - let local_fri_fold_cols = Self::fri_fold(&local); - let next_fri_fold_cols = Self::fri_fold(&next); - - sub_builder.assert_eq( - local_multi_cols.is_fri_fold - * FriFoldChip::::do_memory_access::(&local_fri_fold_cols), - local_multi_cols.fri_fold_memory_access, - ); - sub_builder.assert_eq( - local_multi_cols.is_fri_fold - * FriFoldChip::::do_receive_table::(&local_fri_fold_cols), - local_multi_cols.fri_fold_receive_table, - ); - - let fri_fold_chip = FriFoldChip::::default(); - fri_fold_chip.eval_fri_fold( - &mut sub_builder, - &local_fri_fold_cols, - &next_fri_fold_cols, - local_multi_cols.fri_fold_receive_table, - local_multi_cols.fri_fold_memory_access, - ); - - let mut sub_builder = MultiBuilder::new( - builder, - local_multi_cols.is_poseidon2.into(), - local_multi_cols.poseidon2_first_row.into(), - local_multi_cols.poseidon2_last_row.into(), - next_multi_cols.is_poseidon2.into(), - ); - - let poseidon2_columns = MultiChip::::poseidon2(local_slice); - sub_builder.assert_eq( - local_multi_cols.is_poseidon2 * poseidon2_columns.control_flow().is_syscall_row, - local_multi_cols.poseidon2_receive_table, - ); - local_multi_cols.poseidon2_1st_half_memory_access.iter().enumerate().for_each( - |(i, mem_access)| { - sub_builder.assert_eq( - local_multi_cols.is_poseidon2 * poseidon2_columns.memory().memory_slot_used[i], - *mem_access, - ); - }, - ); - - sub_builder.assert_eq( - local_multi_cols.is_poseidon2 * poseidon2_columns.control_flow().is_compress, - local_multi_cols.poseidon2_2nd_half_memory_access, - ); - - sub_builder.assert_eq( - local_multi_cols.is_poseidon2 * poseidon2_columns.control_flow().is_absorb, - local_multi_cols.poseidon2_send_range_check, - ); - - let poseidon2_chip = Poseidon2WideChip::::default(); - poseidon2_chip.eval_poseidon2( - &mut sub_builder, - poseidon2_columns.as_ref(), - MultiChip::::poseidon2(next_slice).as_ref(), - local_multi_cols.poseidon2_receive_table, - local_multi_cols.poseidon2_1st_half_memory_access, - local_multi_cols.poseidon2_2nd_half_memory_access, - local_multi_cols.poseidon2_send_range_check, - ); - } -} - -impl MultiChip { - fn fri_fold_width() -> usize { - as BaseAir>::width(&FriFoldChip::::default()) - } - - fn fri_fold(row: &dyn Deref) -> FriFoldCols { - let row_slice: &[T] = row; - let fri_fold_width = Self::fri_fold_width::(); - let fri_fold_cols: &FriFoldCols = - (row_slice[NUM_MULTI_COLS..NUM_MULTI_COLS + fri_fold_width]).borrow(); - - *fri_fold_cols - } - - fn poseidon2_width() -> usize { - as BaseAir>::width(&Poseidon2WideChip::::default()) - } - - fn poseidon2<'a, T>(row: impl Deref) -> Box + 'a> - where - T: Copy + 'a, - { - let row_slice: &[T] = &row; - let poseidon2_width = Self::poseidon2_width::(); - - Poseidon2WideChip::::convert::( - &row_slice[NUM_MULTI_COLS..NUM_MULTI_COLS + poseidon2_width], - ) - } -} - -#[cfg(test)] -mod tests { - use std::time::Instant; - - use p3_baby_bear::{BabyBear, DiffusionMatrixBabyBear}; - use p3_matrix::{dense::RowMajorMatrix, Matrix}; - use p3_poseidon2::{Poseidon2, Poseidon2ExternalMatrixGeneral}; - use sp1_core_machine::utils::{uni_stark_prove, uni_stark_verify}; - use sp1_stark::{air::MachineAir, baby_bear_poseidon2::BabyBearPoseidon2, StarkGenericConfig}; - - use crate::{ - multi::MultiChip, poseidon2_wide::tests::generate_test_execution_record, - runtime::ExecutionRecord, - }; - - #[test] - fn prove_babybear() { - let config = BabyBearPoseidon2::compressed(); - let mut challenger = config.challenger(); - - let chip = MultiChip::<9> { fixed_log2_rows: None }; - - let input_exec = generate_test_execution_record(false); - let trace: RowMajorMatrix = - chip.generate_trace(&input_exec, &mut ExecutionRecord::::default()); - println!("trace dims is width: {:?}, height: {:?}", trace.width(), trace.height()); - - let start = Instant::now(); - let proof = uni_stark_prove(&config, &chip, &mut challenger, trace); - let duration = start.elapsed().as_secs_f64(); - println!("proof duration = {:?}", duration); - - let mut challenger: p3_challenger::DuplexChallenger< - BabyBear, - Poseidon2, - 16, - 8, - > = config.challenger(); - let start = Instant::now(); - uni_stark_verify(&config, &chip, &mut challenger, &proof) - .expect("expected proof to be valid"); - - let duration = start.elapsed().as_secs_f64(); - println!("verify duration = {:?}", duration); - } -} diff --git a/crates/recursion/core/src/poseidon2_wide/air/control_flow.rs b/crates/recursion/core/src/poseidon2_wide/air/control_flow.rs deleted file mode 100644 index 6cd28028c1..0000000000 --- a/crates/recursion/core/src/poseidon2_wide/air/control_flow.rs +++ /dev/null @@ -1,419 +0,0 @@ -use p3_air::AirBuilder; -use p3_field::AbstractField; -use sp1_core_machine::operations::IsZeroOperation; -use sp1_stark::air::BaseAirBuilder; - -use crate::{ - air::SP1RecursionAirBuilder, - poseidon2_wide::{ - columns::{ - control_flow::ControlFlow, opcode_workspace::OpcodeWorkspace, - syscall_params::SyscallParams, Poseidon2, - }, - Poseidon2WideChip, RATE, - }, - range_check::RangeCheckOpcode, -}; - -impl Poseidon2WideChip { - /// Constraints related to control flow. - pub(crate) fn eval_control_flow( - &self, - builder: &mut AB, - local_row: &dyn Poseidon2, - next_row: &dyn Poseidon2, - send_range_check: AB::Var, - ) where - AB::Var: 'static, - { - let local_control_flow = local_row.control_flow(); - let next_control_flow = next_row.control_flow(); - - let local_is_real = local_control_flow.is_compress - + local_control_flow.is_absorb - + local_control_flow.is_finalize; - let next_is_real = next_control_flow.is_compress - + next_control_flow.is_absorb - + next_control_flow.is_finalize; - - builder.assert_bool(local_control_flow.is_compress); - builder.assert_bool(local_control_flow.is_compress_output); - builder.assert_bool(local_control_flow.is_absorb); - builder.assert_bool(local_control_flow.is_finalize); - builder.assert_bool(local_control_flow.is_syscall_row); - builder.assert_bool(local_is_real.clone()); - - self.eval_global_control_flow( - builder, - local_control_flow, - next_control_flow, - local_row.syscall_params(), - next_row.syscall_params(), - local_row.opcode_workspace(), - next_row.opcode_workspace(), - local_is_real.clone(), - next_is_real.clone(), - ); - - self.eval_hash_control_flow( - builder, - local_control_flow, - local_row.opcode_workspace(), - next_row.opcode_workspace(), - local_row.syscall_params(), - send_range_check, - ); - - builder.when(local_control_flow.is_syscall_row).assert_one(local_is_real); - } - - /// This function will verify that all hash rows are before the compress rows and that the first - /// row is the first absorb syscall. These constraints will require that there is at least one - /// absorb, finalize, and compress system call. - #[allow(clippy::too_many_arguments)] - fn eval_global_control_flow( - &self, - builder: &mut AB, - local_control_flow: &ControlFlow, - next_control_flow: &ControlFlow, - local_syscall_params: &SyscallParams, - next_syscall_params: &SyscallParams, - local_opcode_workspace: &OpcodeWorkspace, - next_opcode_workspace: &OpcodeWorkspace, - local_is_real: AB::Expr, - next_is_real: AB::Expr, - ) { - // We require that the first row is an absorb syscall and that the hash_num == 0 and - // absorb_num == 0. - let mut first_row_builder = builder.when_first_row(); - first_row_builder.assert_one(local_control_flow.is_absorb); - first_row_builder.assert_one(local_control_flow.is_syscall_row); - first_row_builder.assert_zero(local_opcode_workspace.absorb().hash_num); - first_row_builder.assert_zero(local_opcode_workspace.absorb().absorb_num); - first_row_builder.assert_one(local_opcode_workspace.absorb().is_first_hash_row); - - // For absorb rows, constrain the following: - // 1) when last absorb row, then the next row is a either an absorb or finalize syscall row. - // 2) when last absorb row and the next row is an absorb row, then absorb_num' = absorb_num - // + 1. - // 3) when not last absorb row, then the next row is an absorb non syscall row. - // 4) when not last absorb row, then absorb_num' = absorb_num. - // 5) hash_num == hash_num'. - { - let mut transition_builder = builder.when_transition(); - - let mut absorb_last_row_builder = - transition_builder.when(local_control_flow.is_absorb_last_row); - absorb_last_row_builder - .assert_one(next_control_flow.is_absorb + next_control_flow.is_finalize); - absorb_last_row_builder.assert_one(next_control_flow.is_syscall_row); - absorb_last_row_builder.when(next_control_flow.is_absorb).assert_eq( - next_opcode_workspace.absorb().absorb_num, - local_opcode_workspace.absorb().absorb_num + AB::Expr::one(), - ); - - let mut absorb_not_last_row_builder = - transition_builder.when(local_control_flow.is_absorb_not_last_row); - absorb_not_last_row_builder.assert_one(next_control_flow.is_absorb); - absorb_not_last_row_builder.assert_zero(next_control_flow.is_syscall_row); - absorb_not_last_row_builder.assert_eq( - local_opcode_workspace.absorb().absorb_num, - next_opcode_workspace.absorb().absorb_num, - ); - - let mut absorb_transition_builder = - transition_builder.when(local_control_flow.is_absorb); - absorb_transition_builder.when(next_control_flow.is_absorb).assert_eq( - local_opcode_workspace.absorb().hash_num, - next_opcode_workspace.absorb().hash_num, - ); - absorb_transition_builder.when(next_control_flow.is_finalize).assert_eq( - local_opcode_workspace.absorb().hash_num, - next_syscall_params.finalize().hash_num, - ); - } - - // For finalize rows, constrain the following: - // 1) next row is syscall compress or syscall absorb. - // 2) if next row is absorb -> hash_num + 1 == hash_num' - // 3) if next row is absorb -> absorb_num' == 0 - // 4) if next row is absorb -> is_first_hash' == true - { - let mut transition_builder = builder.when_transition(); - let mut finalize_transition_builder = - transition_builder.when(local_control_flow.is_finalize); - - finalize_transition_builder - .assert_one(next_control_flow.is_absorb + next_control_flow.is_compress); - finalize_transition_builder.assert_one(next_control_flow.is_syscall_row); - - finalize_transition_builder.when(next_control_flow.is_absorb).assert_eq( - local_syscall_params.finalize().hash_num + AB::Expr::one(), - next_opcode_workspace.absorb().hash_num, - ); - finalize_transition_builder - .when(next_control_flow.is_absorb) - .assert_zero(next_opcode_workspace.absorb().absorb_num); - finalize_transition_builder - .when(next_control_flow.is_absorb) - .assert_one(next_opcode_workspace.absorb().is_first_hash_row); - } - - // For compress rows, constrain the following: - // 1) if compress syscall -> next row is a compress output - // 2) if compress output -> next row is a compress syscall or not real - { - builder.assert_eq( - local_control_flow.is_compress_output, - local_control_flow.is_compress - * (AB::Expr::one() - local_control_flow.is_syscall_row), - ); - - let mut transition_builder = builder.when_transition(); - - transition_builder - .when(local_control_flow.is_compress) - .when(local_control_flow.is_syscall_row) - .assert_one(next_control_flow.is_compress_output); - - // When we are at a compress output row, then ensure next row is either not real or is a - // compress syscall row. - transition_builder.when(local_control_flow.is_compress_output).assert_one( - (AB::Expr::one() - next_is_real.clone()) - + next_control_flow.is_compress * next_control_flow.is_syscall_row, - ); - } - - // Constrain that there is only one is_real -> not is real transition. Also contrain that - // the last real row is a compress output row. - { - let mut transition_builder = builder.when_transition(); - - transition_builder.when_not(local_is_real.clone()).assert_zero(next_is_real.clone()); - - transition_builder - .when(local_is_real.clone()) - .when_not(next_is_real.clone()) - .assert_one(local_control_flow.is_compress_output); - - builder - .when_last_row() - .when(local_is_real.clone()) - .assert_one(local_control_flow.is_compress_output); - } - } - - #[allow(clippy::too_many_arguments)] - fn eval_hash_control_flow( - &self, - builder: &mut AB, - local_control_flow: &ControlFlow, - local_opcode_workspace: &OpcodeWorkspace, - next_opcode_workspace: &OpcodeWorkspace, - local_syscall_params: &SyscallParams, - send_range_check: AB::Var, - ) { - let local_hash_workspace = local_opcode_workspace.absorb(); - let next_hash_workspace = next_opcode_workspace.absorb(); - let last_row_ending_cursor_is_seven = - local_hash_workspace.last_row_ending_cursor_is_seven.result; - - // Verify that the hash_num and absorb_num are correctly decomposed from the syscall - // hash_and_absorb_num param. - // Also range check that both hash_num is within [0, 2^16 - 1] and absorb_num is within [0, - // 2^12 - 1]; - { - let mut absorb_builder = builder.when(local_control_flow.is_absorb); - - absorb_builder.assert_eq( - local_hash_workspace.hash_num * AB::Expr::from_canonical_u32(1 << 12) - + local_hash_workspace.absorb_num, - local_syscall_params.absorb().hash_and_absorb_num, - ); - builder.send_range_check( - AB::Expr::from_canonical_u8(RangeCheckOpcode::U16 as u8), - local_hash_workspace.hash_num, - send_range_check, - ); - builder.send_range_check( - AB::Expr::from_canonical_u8(RangeCheckOpcode::U12 as u8), - local_hash_workspace.absorb_num, - send_range_check, - ); - } - - // Constrain the materialized control flow flags. - { - let mut absorb_builder = builder.when(local_control_flow.is_absorb); - - absorb_builder.assert_eq( - local_hash_workspace.is_syscall_not_last_row, - local_control_flow.is_syscall_row - * (AB::Expr::one() - local_hash_workspace.is_last_row::()), - ); - absorb_builder.assert_eq( - local_hash_workspace.not_syscall_not_last_row, - (AB::Expr::one() - local_control_flow.is_syscall_row) - * (AB::Expr::one() - local_hash_workspace.is_last_row::()), - ); - absorb_builder.assert_eq( - local_hash_workspace.is_syscall_is_last_row, - local_control_flow.is_syscall_row * local_hash_workspace.is_last_row::(), - ); - absorb_builder.assert_eq( - local_hash_workspace.not_syscall_is_last_row, - (AB::Expr::one() - local_control_flow.is_syscall_row) - * local_hash_workspace.is_last_row::(), - ); - absorb_builder.assert_eq( - local_hash_workspace.is_last_row_ending_cursor_is_seven, - local_hash_workspace.is_last_row::() * last_row_ending_cursor_is_seven, - ); - absorb_builder.assert_eq( - local_hash_workspace.is_last_row_ending_cursor_not_seven, - local_hash_workspace.is_last_row::() - * (AB::Expr::one() - last_row_ending_cursor_is_seven), - ); - - builder.assert_eq( - local_control_flow.is_absorb_not_last_row, - local_control_flow.is_absorb - * (AB::Expr::one() - local_hash_workspace.is_last_row::()), - ); - builder.assert_eq( - local_control_flow.is_absorb_last_row, - local_control_flow.is_absorb * local_hash_workspace.is_last_row::(), - ); - - builder.assert_eq( - local_control_flow.is_absorb_no_perm, - local_control_flow.is_absorb - * (AB::Expr::one() - local_hash_workspace.do_perm::()), - ); - } - - // For the absorb syscall row, ensure correct value of num_remaining_rows, - // last_row_num_consumed, and num_remaining_rows_is_zero. - { - let mut absorb_builder = builder.when(local_control_flow.is_absorb); - - // Verify that state_cursor + syscall input_len - 1 == num_remaining_rows * RATE + - // last_row_ending_cursor. The minus one is needed, since - // `last_row_ending_cursor` is inclusive of the last element, - // while state_cursor + syscall input_len is not. - absorb_builder.when(local_control_flow.is_syscall_row).assert_eq( - local_hash_workspace.state_cursor + local_syscall_params.absorb().input_len - - AB::Expr::one(), - local_hash_workspace.num_remaining_rows * AB::Expr::from_canonical_usize(RATE) - + local_hash_workspace.last_row_ending_cursor, - ); - - // Range check that last_row_ending_cursor is between [0, 7]. - (0..3).for_each(|i| { - absorb_builder.assert_bool(local_hash_workspace.last_row_ending_cursor_bitmap[i]) - }); - let expected_last_row_ending_cursor: AB::Expr = local_hash_workspace - .last_row_ending_cursor_bitmap - .iter() - .zip(0..3) - .map(|(bit, exp)| *bit * AB::Expr::from_canonical_u32(2u32.pow(exp))) - .sum::(); - absorb_builder.when(local_control_flow.is_syscall_row).assert_eq( - local_hash_workspace.last_row_ending_cursor, - expected_last_row_ending_cursor, - ); - - // Range check that input_len < 2^16. This check is only needed for absorb syscall - // rows, but we send it for all absorb rows, since the `is_real` parameter - // must be an expression with at most degree 1. - builder.send_range_check( - AB::Expr::from_canonical_u8(RangeCheckOpcode::U16 as u8), - local_syscall_params.absorb().input_len, - send_range_check, - ); - - // Range check that num_remaining_rows is between [0, 2^16-1]. - builder.send_range_check( - AB::Expr::from_canonical_u8(RangeCheckOpcode::U16 as u8), - local_hash_workspace.num_remaining_rows, - send_range_check, - ); - } - - // For all non last absorb rows, verify that num_remaining_rows decrements and - // that last_row_ending_cursor is copied down. - { - let mut transition_builder = builder.when_transition(); - let mut absorb_transition_builder = - transition_builder.when(local_control_flow.is_absorb); - - absorb_transition_builder.when_not(local_hash_workspace.is_last_row::()).assert_eq( - next_hash_workspace.num_remaining_rows, - local_hash_workspace.num_remaining_rows - AB::Expr::one(), - ); - - // Copy down the last_row_ending_cursor value within the absorb call. - absorb_transition_builder.when_not(local_hash_workspace.is_last_row::()).assert_eq( - next_hash_workspace.last_row_ending_cursor, - local_hash_workspace.last_row_ending_cursor, - ); - } - - // Constrain the state cursor. There are three constraints: - // 1) For the first hash row, verify that state_cursor == 0. - // 2) For the last absorb rows, verify that constrain state_cursor' = - // (last_row_ending_cursor + 1) % RATE. - // 3) For all non syscall rows, the state_cursor should be 0. - { - let mut absorb_builder = builder.when(local_control_flow.is_absorb); - - absorb_builder - .when(local_hash_workspace.is_first_hash_row) - .assert_zero(local_hash_workspace.state_cursor); - - absorb_builder - .when(local_hash_workspace.is_last_row_ending_cursor_is_seven) - .assert_zero(next_hash_workspace.state_cursor); - - absorb_builder - .when(local_hash_workspace.is_last_row_ending_cursor_not_seven) - .assert_eq( - next_hash_workspace.state_cursor, - local_hash_workspace.last_row_ending_cursor + AB::Expr::one(), - ); - - absorb_builder - .when_not(local_control_flow.is_syscall_row) - .assert_zero(local_hash_workspace.state_cursor); - } - - // Eval the absorb's iszero operations. - { - // Drop absorb_builder so that builder can be used in the IsZeroOperation eval. - IsZeroOperation::::eval( - builder, - local_hash_workspace.last_row_ending_cursor - AB::Expr::from_canonical_usize(7), - local_hash_workspace.last_row_ending_cursor_is_seven, - local_control_flow.is_absorb.into(), - ); - - IsZeroOperation::::eval( - builder, - local_hash_workspace.num_remaining_rows.into(), - local_hash_workspace.num_remaining_rows_is_zero, - local_control_flow.is_absorb.into(), - ); - } - - // Apply control flow constraints for finalize. - { - // Eval state_cursor_is_zero. - IsZeroOperation::::eval( - builder, - local_opcode_workspace.finalize().state_cursor.into(), - local_opcode_workspace.finalize().state_cursor_is_zero, - local_control_flow.is_finalize.into(), - ); - } - } -} diff --git a/crates/recursion/core/src/poseidon2_wide/air/memory.rs b/crates/recursion/core/src/poseidon2_wide/air/memory.rs deleted file mode 100644 index 4d4e783972..0000000000 --- a/crates/recursion/core/src/poseidon2_wide/air/memory.rs +++ /dev/null @@ -1,220 +0,0 @@ -use p3_air::AirBuilder; -use p3_field::AbstractField; -use sp1_stark::air::BaseAirBuilder; - -use crate::{ - air::SP1RecursionAirBuilder, - memory::MemoryCols, - poseidon2_wide::{ - columns::{ - control_flow::ControlFlow, memory::Memory, opcode_workspace::OpcodeWorkspace, - syscall_params::SyscallParams, - }, - Poseidon2WideChip, WIDTH, - }, -}; - -impl Poseidon2WideChip { - /// Eval the memory related columns. - #[allow(clippy::too_many_arguments)] - pub(crate) fn eval_mem( - &self, - builder: &mut AB, - syscall_params: &SyscallParams, - local_memory: &Memory, - next_memory: &Memory, - opcode_workspace: &OpcodeWorkspace, - control_flow: &ControlFlow, - first_half_memory_access: [AB::Var; WIDTH / 2], - second_half_memory_access: AB::Var, - ) { - let clk = syscall_params.get_raw_params()[0]; - let is_real = control_flow.is_compress + control_flow.is_absorb + control_flow.is_finalize; - - // Constrain the memory flags. - for i in 0..WIDTH / 2 { - builder.assert_bool(local_memory.memory_slot_used[i]); - - // The memory slot flag will be used as the memory access multiplicity flag, so we need - // to ensure that those values are zero for all non real rows. - builder.when_not(is_real.clone()).assert_zero(local_memory.memory_slot_used[i]); - - // For compress and finalize, all of the slots should be true. - builder - .when(control_flow.is_compress + control_flow.is_finalize) - .assert_one(local_memory.memory_slot_used[i]); - - // For absorb, need to make sure the memory_slots_used is consistent with the - // start_cursor and end_cursor (i.e. start_cursor + num_consumed); - self.eval_absorb_memory_slots(builder, control_flow, local_memory, opcode_workspace); - } - - // Verify the start_addr column. - { - // For compress syscall rows, the start_addr should be the param's left ptr. - builder - .when(control_flow.is_compress * control_flow.is_syscall_row) - .assert_eq(syscall_params.compress().left_ptr, local_memory.start_addr); - - // For compress output rows, the start_addr should be the param's dst ptr. - builder - .when(control_flow.is_compress_output) - .assert_eq(syscall_params.compress().dst_ptr, local_memory.start_addr); - - // For absorb syscall rows, the start_addr should initially be from the syscall param's - // input_ptr, and for subsequent rows, it's incremented by the number of consumed - // elements. - builder - .when(control_flow.is_absorb) - .when(control_flow.is_syscall_row) - .assert_eq(syscall_params.absorb().input_ptr, local_memory.start_addr); - builder.when(control_flow.is_absorb_not_last_row).assert_eq( - next_memory.start_addr, - local_memory.start_addr + opcode_workspace.absorb().num_consumed::(), - ); - - // For finalize syscall rows, the start_addr should be the param's output ptr. - builder - .when(control_flow.is_finalize) - .assert_eq(syscall_params.finalize().output_ptr, local_memory.start_addr); - } - - // Contrain memory access for the first half of the memory accesses. - { - let mut addr: AB::Expr = local_memory.start_addr.into(); - for i in 0..WIDTH / 2 { - builder.recursion_eval_memory_access_single( - clk + control_flow.is_compress_output, - addr.clone(), - &local_memory.memory_accesses[i], - first_half_memory_access[i], - ); - - let compress_syscall_row = control_flow.is_compress * control_flow.is_syscall_row; - // For read only accesses, assert the value didn't change. - builder.when(compress_syscall_row + control_flow.is_absorb).assert_eq( - *local_memory.memory_accesses[i].prev_value(), - *local_memory.memory_accesses[i].value(), - ); - - addr = addr.clone() + local_memory.memory_slot_used[i].into(); - } - } - - // Contrain memory access for the 2nd half of the memory accesses. - { - let compress_workspace = opcode_workspace.compress(); - - // Verify the start addr. - let is_compress_syscall = control_flow.is_compress * control_flow.is_syscall_row; - builder - .when(is_compress_syscall.clone()) - .assert_eq(compress_workspace.start_addr, syscall_params.compress().right_ptr); - builder.when(control_flow.is_compress_output).assert_eq( - compress_workspace.start_addr, - syscall_params.compress().dst_ptr + AB::Expr::from_canonical_usize(WIDTH / 2), - ); - - let mut addr: AB::Expr = compress_workspace.start_addr.into(); - for i in 0..WIDTH / 2 { - builder.recursion_eval_memory_access_single( - clk + control_flow.is_compress_output, - addr.clone(), - &compress_workspace.memory_accesses[i], - second_half_memory_access, - ); - - // For read only accesses, assert the value didn't change. - builder.when(is_compress_syscall.clone()).assert_eq( - *compress_workspace.memory_accesses[i].prev_value(), - *compress_workspace.memory_accesses[i].value(), - ); - - addr = addr.clone() + AB::Expr::one(); - } - } - } - - fn eval_absorb_memory_slots( - &self, - builder: &mut AB, - control_flow: &ControlFlow, - local_memory: &Memory, - opcode_workspace: &OpcodeWorkspace, - ) { - // To verify that the absorb memory slots are correct, we take the derivative of the memory - // slots, (e.g. memory_slot_used[i] - memory_slot_used[i - 1]), and assert the - // following: - // 1) when start_mem_idx_bitmap[i] == 1 -> derivative == 1 - // 2) when end_mem_idx_bitmap[i + 1] == 1 -> derivative == -1 - // 3) when start_mem_idx_bitmap[i] == 0 and end_mem_idx_bitmap[i + 1] == 0 -> derivative == - // 0 - let mut absorb_builder = builder.when(control_flow.is_absorb); - - let start_mem_idx_bitmap = opcode_workspace.absorb().start_mem_idx_bitmap; - let end_mem_idx_bitmap = opcode_workspace.absorb().end_mem_idx_bitmap; - for i in 0..WIDTH / 2 { - let derivative: AB::Expr = if i == 0 { - local_memory.memory_slot_used[i].into() - } else { - local_memory.memory_slot_used[i] - local_memory.memory_slot_used[i - 1] - }; - - let is_start_mem_idx = start_mem_idx_bitmap[i].into(); - - let is_previous_end_mem_idx = - if i == 0 { AB::Expr::zero() } else { end_mem_idx_bitmap[i - 1].into() }; - - absorb_builder.when(is_start_mem_idx.clone()).assert_one(derivative.clone()); - - absorb_builder - .when(is_previous_end_mem_idx.clone()) - .assert_zero(derivative.clone() + AB::Expr::one()); - - absorb_builder - .when_not(is_start_mem_idx + is_previous_end_mem_idx) - .assert_zero(derivative); - } - - // Verify that all elements of start_mem_idx_bitmap and end_mem_idx_bitmap are bool. - // Also verify that exactly one of the bits in start_mem_idx_bitmap and end_mem_idx_bitmap - // is one. - let mut start_mem_idx_bitmap_sum = AB::Expr::zero(); - start_mem_idx_bitmap.iter().for_each(|bit| { - absorb_builder.assert_bool(*bit); - start_mem_idx_bitmap_sum += (*bit).into(); - }); - absorb_builder.assert_one(start_mem_idx_bitmap_sum); - - let mut end_mem_idx_bitmap_sum = AB::Expr::zero(); - end_mem_idx_bitmap.iter().for_each(|bit| { - absorb_builder.assert_bool(*bit); - end_mem_idx_bitmap_sum += (*bit).into(); - }); - absorb_builder.assert_one(end_mem_idx_bitmap_sum); - - // Verify correct value of start_mem_idx_bitmap and end_mem_idx_bitmap. - let start_mem_idx: AB::Expr = start_mem_idx_bitmap - .iter() - .enumerate() - .map(|(i, bit)| AB::Expr::from_canonical_usize(i) * *bit) - .sum(); - absorb_builder.assert_eq(start_mem_idx, opcode_workspace.absorb().state_cursor); - - let end_mem_idx: AB::Expr = end_mem_idx_bitmap - .iter() - .enumerate() - .map(|(i, bit)| AB::Expr::from_canonical_usize(i) * *bit) - .sum(); - - // When we are not in the last row, end_mem_idx should be zero. - absorb_builder - .when_not(opcode_workspace.absorb().is_last_row::()) - .assert_zero(end_mem_idx.clone() - AB::Expr::from_canonical_usize(7)); - - // When we are in the last row, end_mem_idx bitmap should equal last_row_ending_cursor. - absorb_builder - .when(opcode_workspace.absorb().is_last_row::()) - .assert_eq(end_mem_idx, opcode_workspace.absorb().last_row_ending_cursor); - } -} diff --git a/crates/recursion/core/src/poseidon2_wide/air/mod.rs b/crates/recursion/core/src/poseidon2_wide/air/mod.rs deleted file mode 100644 index c121103e50..0000000000 --- a/crates/recursion/core/src/poseidon2_wide/air/mod.rs +++ /dev/null @@ -1,203 +0,0 @@ -//! The air module contains the AIR constraints for the poseidon2 chip. Those constraints will -//! enforce the following properties: -//! -//! # Layout of the poseidon2 chip: -//! -//! All the hash related rows should be in the first part of the chip and all the compress -//! related rows in the second part. E.g. the chip should have this format: -//! -//! absorb row (for hash num 1) -//! absorb row (for hash num 1) -//! absorb row (for hash num 1) -//! finalize row (for hash num 1) -//! absorb row (for hash num 2) -//! absorb row (for hash num 2) -//! finalize row (for hash num 2) -//! . -//! . -//! . -//! compress syscall/input row -//! compress output row -//! -//! # Absorb rows -//! -//! For absorb rows, the AIR needs to ensure that all of the input is written into the hash state -//! and that its written into the correct parts of that state. To do this, the AIR will first -//! ensure the correct values for num_remaining_rows (e.g. total number of rows of an absorb -//! syscall) and the last_row_ending_cursor. It does this by checking the following: -//! -//! 1. start_state_cursor + syscall_input_len == num_remaining_rows * RATE + last_row_ending_cursor -//! 2. range check syscall_input_len to be [0, 2^16 - 1] -//! 3. range check last_row_ending_cursor to be [0, RATE] -//! -//! For all subsequent absorb rows, the num_remaining_rows will be decremented by 1, and the -//! last_row_ending_cursor will be copied down to all of the rows. Also, for the next -//! absorb/finalize syscall, its state_cursor is set to (last_row_ending_cursor + 1) % RATE. -//! -//! From num_remaining_rows and syscall column, we know the absorb's first row and last row. -//! From that fact, we can then enforce the following state writes. -//! -//! 1. is_first_row && is_last_row -> state writes are [state_cursor..state_cursor + -//! last_row_ending_cursor] -//! 2. is_first_row && !is_last_row -> state writes are [state_cursor..RATE - 1] -//! 3. !is_first_row && !is_last_row -> state writes are [0..RATE - 1] -//! 4. !is_first_row && is_last_row -> state writes are [0..last_row_ending_cursor] -//! -//! From the state writes range, we can then populate a bitmap that specifies which state elements -//! should be overwritten (stored in Memory.memory_slot_used columns). To verify that this bitmap -//! is correct, we utilize the column's derivative (memory_slot_used[i] - memory_slot_used[i-1], -//! where memory_slot_used[-1] is 0). -//! -//! 1. When idx == state write start_idx -> derivative == 1 -//! 2. When idx == (state write end_idx - 1) -> derivative == -1 -//! 3. For all other cases, derivative == 0 -//! -//! In addition to determining the hash state writes, the AIR also needs to ensure that the do_perm -//! flag is correct (which is used to determine if a permutation should be done). It does this -//! by enforcing the following. -//! -//! 1. is_first_row && !is_last_row -> do_perm == 1 -//! 2. !is_first_row && !is_last_row -> do_perm == 1 -//! 3. is_last_row && last_row_ending_cursor == RATE - 1 -> do_perm == 1 -//! 4. is_last_row && last_row_ending_cursor != RATE - 1 -> do_perm == 0 -//! -//! # Finalize rows -//! -//! For finalize, the main flag that needs to be checked is do_perm. If state_cursor == 0, then -//! do_perm should be 0, otherwise it should be 1. If state_cursor == 0, that means that the -//! previous row did a perm. -//! -//! # Compress rows -//! -//! For compress, the main invariants that needs to be checked is that all syscall compress rows -//! verifies the correct memory read accesses, does the permutation, and copies the permuted value -//! into the next row. That row should then verify the correct memory write accesses. - -use p3_air::{Air, BaseAir}; -use p3_matrix::Matrix; - -use crate::air::SP1RecursionAirBuilder; - -pub mod control_flow; -pub mod memory; -pub mod permutation; -pub mod state_transition; -pub mod syscall_params; - -use super::{ - columns::{Poseidon2, NUM_POSEIDON2_DEGREE3_COLS, NUM_POSEIDON2_DEGREE9_COLS}, - Poseidon2WideChip, WIDTH, -}; - -impl BaseAir for Poseidon2WideChip { - fn width(&self) -> usize { - if DEGREE == 3 { - NUM_POSEIDON2_DEGREE3_COLS - } else if DEGREE == 9 || DEGREE == 17 { - NUM_POSEIDON2_DEGREE9_COLS - } else { - panic!("Unsupported degree: {}", DEGREE); - } - } -} - -impl Air for Poseidon2WideChip -where - AB: SP1RecursionAirBuilder, - AB::Var: 'static, -{ - fn eval(&self, builder: &mut AB) { - let main = builder.main(); - let local_row = Self::convert::(main.row_slice(0)); - let next_row = Self::convert::(main.row_slice(1)); - - // Dummy constraints to normalize to DEGREE. - let lhs = - (0..DEGREE).map(|_| local_row.control_flow().is_compress.into()).product::(); - let rhs = - (0..DEGREE).map(|_| local_row.control_flow().is_compress.into()).product::(); - builder.assert_eq(lhs, rhs); - - self.eval_poseidon2( - builder, - local_row.as_ref(), - next_row.as_ref(), - local_row.control_flow().is_syscall_row, - local_row.memory().memory_slot_used, - local_row.control_flow().is_compress, - local_row.control_flow().is_absorb, - ); - } -} - -impl Poseidon2WideChip { - #[allow(clippy::too_many_arguments)] - pub(crate) fn eval_poseidon2( - &self, - builder: &mut AB, - local_row: &dyn Poseidon2, - next_row: &dyn Poseidon2, - receive_syscall: AB::Var, - first_half_memory_access: [AB::Var; WIDTH / 2], - second_half_memory_access: AB::Var, - send_range_check: AB::Var, - ) where - AB: SP1RecursionAirBuilder, - AB::Var: 'static, - { - let local_control_flow = local_row.control_flow(); - let next_control_flow = next_row.control_flow(); - let local_syscall = local_row.syscall_params(); - let next_syscall = next_row.syscall_params(); - let local_memory = local_row.memory(); - let next_memory = next_row.memory(); - let local_perm = local_row.permutation(); - let local_opcode_workspace = local_row.opcode_workspace(); - let next_opcode_workspace = next_row.opcode_workspace(); - - // Check that all the control flow columns are correct. - self.eval_control_flow(builder, local_row, next_row, send_range_check); - - // Check that the syscall columns are correct. - self.eval_syscall_params( - builder, - local_syscall, - next_syscall, - local_control_flow, - next_control_flow, - receive_syscall, - ); - - // Check that all the memory access columns are correct. - self.eval_mem( - builder, - local_syscall, - local_memory, - next_memory, - local_opcode_workspace, - local_control_flow, - first_half_memory_access, - second_half_memory_access, - ); - - // Check that the permutation columns are correct. - self.eval_perm( - builder, - local_perm.as_ref(), - local_memory, - local_opcode_workspace, - local_control_flow, - ); - - // Check that the permutation output is copied to the next row correctly. - self.eval_state_transition( - builder, - local_control_flow, - local_opcode_workspace, - next_opcode_workspace, - local_perm.as_ref(), - local_memory, - next_memory, - ); - } -} diff --git a/crates/recursion/core/src/poseidon2_wide/air/permutation.rs b/crates/recursion/core/src/poseidon2_wide/air/permutation.rs deleted file mode 100644 index 673a5c50dc..0000000000 --- a/crates/recursion/core/src/poseidon2_wide/air/permutation.rs +++ /dev/null @@ -1,171 +0,0 @@ -use std::array; - -use p3_field::AbstractField; -use sp1_primitives::RC_16_30_U32; - -use crate::{ - air::SP1RecursionAirBuilder, - memory::MemoryCols, - poseidon2_wide::{ - columns::{ - control_flow::ControlFlow, memory::Memory, opcode_workspace::OpcodeWorkspace, - permutation::Permutation, - }, - external_linear_layer, internal_linear_layer, Poseidon2WideChip, NUM_EXTERNAL_ROUNDS, - NUM_INTERNAL_ROUNDS, WIDTH, - }, -}; - -impl Poseidon2WideChip { - pub(crate) fn eval_perm( - &self, - builder: &mut AB, - perm_cols: &dyn Permutation, - memory: &Memory, - opcode_workspace: &OpcodeWorkspace, - control_flow: &ControlFlow, - ) { - // Construct the input array of the permutation. That array is dependent on the row type. - // For compress_syscall rows, the input is from the memory access values. For absorb, the - // input is the previous state, with select elements being read from the memory access - // values. For finalize, the input is the previous state. - let input: [AB::Expr; WIDTH] = array::from_fn(|i| { - let previous_state = opcode_workspace.absorb().previous_state[i]; - - let (compress_input, absorb_input, finalize_input) = if i < WIDTH / 2 { - let mem_value = *memory.memory_accesses[i].value(); - - let compress_input = mem_value; - let absorb_input = - builder.if_else(memory.memory_slot_used[i], mem_value, previous_state); - let finalize_input = previous_state.into(); - - (compress_input, absorb_input, finalize_input) - } else { - let compress_input = - *opcode_workspace.compress().memory_accesses[i - WIDTH / 2].value(); - let absorb_input = previous_state.into(); - let finalize_input = previous_state.into(); - - (compress_input, absorb_input, finalize_input) - }; - - control_flow.is_compress * compress_input - + control_flow.is_absorb * absorb_input - + control_flow.is_finalize * finalize_input - }); - - // Apply the initial round. - let initial_round_output = { - let mut initial_round_output = input; - external_linear_layer(&mut initial_round_output); - initial_round_output - }; - let external_round_0_state: [AB::Expr; WIDTH] = core::array::from_fn(|i| { - let state = perm_cols.external_rounds_state()[0]; - state[i].into() - }); - - builder.assert_all_eq(external_round_0_state.clone(), initial_round_output); - - // Apply the first half of external rounds. - for r in 0..NUM_EXTERNAL_ROUNDS / 2 { - self.eval_external_round(builder, perm_cols, r); - } - - // Apply the internal rounds. - self.eval_internal_rounds(builder, perm_cols); - - // Apply the second half of external rounds. - for r in NUM_EXTERNAL_ROUNDS / 2..NUM_EXTERNAL_ROUNDS { - self.eval_external_round(builder, perm_cols, r); - } - } - - fn eval_external_round( - &self, - builder: &mut AB, - perm_cols: &dyn Permutation, - r: usize, - ) { - let external_state = perm_cols.external_rounds_state()[r]; - - // Add the round constants. - let round = if r < NUM_EXTERNAL_ROUNDS / 2 { r } else { r + NUM_INTERNAL_ROUNDS }; - let add_rc: [AB::Expr; WIDTH] = core::array::from_fn(|i| { - external_state[i].into() + AB::F::from_wrapped_u32(RC_16_30_U32[round][i]) - }); - - // Apply the sboxes. - // See `populate_external_round` for why we don't have columns for the sbox output here. - let mut sbox_deg_7: [AB::Expr; WIDTH] = core::array::from_fn(|_| AB::Expr::zero()); - let mut sbox_deg_3: [AB::Expr; WIDTH] = core::array::from_fn(|_| AB::Expr::zero()); - for i in 0..WIDTH { - let calculated_sbox_deg_3 = add_rc[i].clone() * add_rc[i].clone() * add_rc[i].clone(); - - if let Some(external_sbox) = perm_cols.external_rounds_sbox() { - builder.assert_eq(external_sbox[r][i].into(), calculated_sbox_deg_3); - sbox_deg_3[i] = external_sbox[r][i].into(); - } else { - sbox_deg_3[i] = calculated_sbox_deg_3; - } - - sbox_deg_7[i] = sbox_deg_3[i].clone() * sbox_deg_3[i].clone() * add_rc[i].clone(); - } - - // Apply the linear layer. - let mut state = sbox_deg_7; - external_linear_layer(&mut state); - - let next_state_cols = if r == NUM_EXTERNAL_ROUNDS / 2 - 1 { - perm_cols.internal_rounds_state() - } else if r == NUM_EXTERNAL_ROUNDS - 1 { - perm_cols.perm_output() - } else { - &perm_cols.external_rounds_state()[r + 1] - }; - for i in 0..WIDTH { - builder.assert_eq(next_state_cols[i], state[i].clone()); - } - } - - fn eval_internal_rounds( - &self, - builder: &mut AB, - perm_cols: &dyn Permutation, - ) { - let state = &perm_cols.internal_rounds_state(); - let s0 = perm_cols.internal_rounds_s0(); - let mut state: [AB::Expr; WIDTH] = core::array::from_fn(|i| state[i].into()); - for r in 0..NUM_INTERNAL_ROUNDS { - // Add the round constant. - let round = r + NUM_EXTERNAL_ROUNDS / 2; - let add_rc = if r == 0 { state[0].clone() } else { s0[r - 1].into() } - + AB::Expr::from_wrapped_u32(RC_16_30_U32[round][0]); - - let mut sbox_deg_3 = add_rc.clone() * add_rc.clone() * add_rc.clone(); - if let Some(internal_sbox) = perm_cols.internal_rounds_sbox() { - builder.assert_eq(internal_sbox[r], sbox_deg_3); - sbox_deg_3 = internal_sbox[r].into(); - } - - // See `populate_internal_rounds` for why we don't have columns for the sbox output - // here. - let sbox_deg_7 = sbox_deg_3.clone() * sbox_deg_3.clone() * add_rc.clone(); - - // Apply the linear layer. - // See `populate_internal_rounds` for why we don't have columns for the new state here. - state[0] = sbox_deg_7.clone(); - internal_linear_layer(&mut state); - - if r < NUM_INTERNAL_ROUNDS - 1 { - builder.assert_eq(s0[r], state[0].clone()); - } - } - - let external_state = perm_cols.external_rounds_state()[NUM_EXTERNAL_ROUNDS / 2]; - for i in 0..WIDTH { - builder.assert_eq(external_state[i], state[i].clone()) - } - } -} diff --git a/crates/recursion/core/src/poseidon2_wide/air/state_transition.rs b/crates/recursion/core/src/poseidon2_wide/air/state_transition.rs deleted file mode 100644 index 0780245b6a..0000000000 --- a/crates/recursion/core/src/poseidon2_wide/air/state_transition.rs +++ /dev/null @@ -1,119 +0,0 @@ -use std::array; - -use p3_air::AirBuilder; -use sp1_stark::air::BaseAirBuilder; - -use crate::{ - air::SP1RecursionAirBuilder, - memory::MemoryCols, - poseidon2_wide::{ - columns::{ - control_flow::ControlFlow, memory::Memory, opcode_workspace::OpcodeWorkspace, - permutation::Permutation, - }, - Poseidon2WideChip, WIDTH, - }, - runtime::DIGEST_SIZE, -}; - -impl Poseidon2WideChip { - #[allow(clippy::too_many_arguments)] - pub(crate) fn eval_state_transition( - &self, - builder: &mut AB, - control_flow: &ControlFlow, - local_opcode_workspace: &OpcodeWorkspace, - next_opcode_workspace: &OpcodeWorkspace, - permutation: &dyn Permutation, - local_memory: &Memory, - next_memory: &Memory, - ) { - // For compress syscall rows, verify that the permutation output's state is equal to - // the compress output memory values. - { - let compress_output_mem_values: [AB::Var; WIDTH] = array::from_fn(|i| { - if i < WIDTH / 2 { - *next_memory.memory_accesses[i].value() - } else { - *next_opcode_workspace.compress().memory_accesses[i - WIDTH / 2].value() - } - }); - - builder - .when_transition() - .when(control_flow.is_compress) - .when(control_flow.is_syscall_row) - .assert_all_eq(compress_output_mem_values, *permutation.perm_output()); - } - - // Absorb rows. - { - // Check that the state is zero on the first_hash_row. - builder - .when(control_flow.is_absorb) - .when(local_opcode_workspace.absorb().is_first_hash_row) - .assert_all_zero(local_opcode_workspace.absorb().previous_state); - - // Check that the state is equal to the permutation output when the permutation is - // applied. - builder - .when(control_flow.is_absorb) - .when(local_opcode_workspace.absorb().do_perm::()) - .assert_all_eq(local_opcode_workspace.absorb().state, *permutation.perm_output()); - - // Construct the input into the permutation. - let input: [AB::Expr; WIDTH] = array::from_fn(|i| { - if i < WIDTH / 2 { - builder.if_else( - local_memory.memory_slot_used[i], - *local_memory.memory_accesses[i].value(), - local_opcode_workspace.absorb().previous_state[i], - ) - } else { - local_opcode_workspace.absorb().previous_state[i].into() - } - }); - - // Check that the state is equal the the permutation input when the permutation is not - // applied. - builder - .when(control_flow.is_absorb_no_perm) - .assert_all_eq(local_opcode_workspace.absorb().state, input); - - // Check that the state is copied to the next row. - builder.when_transition().when(control_flow.is_absorb).assert_all_eq( - local_opcode_workspace.absorb().state, - next_opcode_workspace.absorb().previous_state, - ); - } - - // Finalize rows. - { - // Check that the state is equal to the permutation output when the permutation is - // applied. - builder - .when(control_flow.is_finalize) - .when(local_opcode_workspace.finalize().do_perm::()) - .assert_all_eq(local_opcode_workspace.finalize().state, *permutation.perm_output()); - - // Check that the state is equal to the previous state when the permutation is not - // applied. - builder - .when(control_flow.is_finalize) - .when_not(local_opcode_workspace.finalize().do_perm::()) - .assert_all_eq( - local_opcode_workspace.finalize().state, - local_opcode_workspace.finalize().previous_state, - ); - - // Check that the finalize memory values are equal to the state. - let output_mem_values: [AB::Var; DIGEST_SIZE] = - array::from_fn(|i| *local_memory.memory_accesses[i].value()); - - builder.when(control_flow.is_finalize).assert_all_eq( - output_mem_values, - local_opcode_workspace.finalize().state[0..DIGEST_SIZE].to_vec(), - ); - } - } -} diff --git a/crates/recursion/core/src/poseidon2_wide/air/syscall_params.rs b/crates/recursion/core/src/poseidon2_wide/air/syscall_params.rs deleted file mode 100644 index c33e99fa5e..0000000000 --- a/crates/recursion/core/src/poseidon2_wide/air/syscall_params.rs +++ /dev/null @@ -1,81 +0,0 @@ -use p3_air::AirBuilder; -use sp1_stark::air::BaseAirBuilder; - -use crate::{ - air::SP1RecursionAirBuilder, - poseidon2_wide::{ - columns::{control_flow::ControlFlow, syscall_params::SyscallParams}, - Poseidon2WideChip, - }, - runtime::Opcode, -}; - -impl Poseidon2WideChip { - /// Eval the syscall parameters. - pub(crate) fn eval_syscall_params( - &self, - builder: &mut AB, - local_syscall: &SyscallParams, - next_syscall: &SyscallParams, - local_control_flow: &ControlFlow, - next_control_flow: &ControlFlow, - receive_syscall: AB::Var, - ) { - // Constraint that the operands are sent from the CPU table. - let params = local_syscall.get_raw_params(); - let opcodes: [AB::Expr; 3] = - [Opcode::Poseidon2Compress, Opcode::Poseidon2Absorb, Opcode::Poseidon2Finalize] - .map(|x| x.as_field::().into()); - let opcode_selectors = [ - local_control_flow.is_compress, - local_control_flow.is_absorb, - local_control_flow.is_finalize, - ]; - - let used_opcode: AB::Expr = opcodes - .iter() - .zip(opcode_selectors.iter()) - .map(|(opcode, opcode_selector)| opcode.clone() * *opcode_selector) - .sum(); - - builder.receive_table(used_opcode, ¶ms, receive_syscall); - - let mut transition_builder = builder.when_transition(); - - // Verify that the syscall parameters are copied to the compress output row. - { - let mut compress_syscall_builder = transition_builder - .when(local_control_flow.is_compress * local_control_flow.is_syscall_row); - - let local_syscall_params = local_syscall.compress(); - let next_syscall_params = next_syscall.compress(); - compress_syscall_builder.assert_eq(local_syscall_params.clk, next_syscall_params.clk); - compress_syscall_builder - .assert_eq(local_syscall_params.dst_ptr, next_syscall_params.dst_ptr); - compress_syscall_builder - .assert_eq(local_syscall_params.left_ptr, next_syscall_params.left_ptr); - compress_syscall_builder - .assert_eq(local_syscall_params.right_ptr, next_syscall_params.right_ptr); - } - - // Verify that the syscall parameters are copied down to all the non syscall absorb rows. - { - let mut absorb_syscall_builder = transition_builder.when(local_control_flow.is_absorb); - let mut absorb_syscall_builder = - absorb_syscall_builder.when_not(next_control_flow.is_syscall_row); - - let local_syscall_params = local_syscall.absorb(); - let next_syscall_params = next_syscall.absorb(); - - absorb_syscall_builder.assert_eq(local_syscall_params.clk, next_syscall_params.clk); - absorb_syscall_builder.assert_eq( - local_syscall_params.hash_and_absorb_num, - next_syscall_params.hash_and_absorb_num, - ); - absorb_syscall_builder - .assert_eq(local_syscall_params.input_ptr, next_syscall_params.input_ptr); - absorb_syscall_builder - .assert_eq(local_syscall_params.input_len, next_syscall_params.input_len); - } - } -} diff --git a/crates/recursion/core/src/poseidon2_wide/columns/control_flow.rs b/crates/recursion/core/src/poseidon2_wide/columns/control_flow.rs deleted file mode 100644 index 06b13534b4..0000000000 --- a/crates/recursion/core/src/poseidon2_wide/columns/control_flow.rs +++ /dev/null @@ -1,26 +0,0 @@ -use sp1_derive::AlignedBorrow; - -/// Columns related to control flow. -#[derive(AlignedBorrow, Clone, Copy, Debug)] -#[repr(C)] -pub struct ControlFlow { - /// Specifies if this row is for compress. - pub is_compress: T, - /// Specifies if this row is for the compress output. - pub is_compress_output: T, - - /// Specifies if this row is for absorb. - pub is_absorb: T, - /// Specifies if this row is for absorb with no permutation. - pub is_absorb_no_perm: T, - /// Specifies if this row is for an absorb that is not the last row. - pub is_absorb_not_last_row: T, - /// Specifies if this row is for an absorb that is the last row. - pub is_absorb_last_row: T, - - /// Specifies if this row is for finalize. - pub is_finalize: T, - - /// Specifies if this row needs to recieve a syscall interaction. - pub is_syscall_row: T, -} diff --git a/crates/recursion/core/src/poseidon2_wide/columns/memory.rs b/crates/recursion/core/src/poseidon2_wide/columns/memory.rs deleted file mode 100644 index 63b62783ad..0000000000 --- a/crates/recursion/core/src/poseidon2_wide/columns/memory.rs +++ /dev/null @@ -1,17 +0,0 @@ -use sp1_derive::AlignedBorrow; - -use crate::{memory::MemoryReadWriteSingleCols, poseidon2_wide::WIDTH}; - -/// This struct is the columns for the WIDTH/2 sequential memory slots. -/// For compress rows, this is used for the first half of read/write from the permutation state. -/// For hash related rows, this is reading absorb input and writing finalize output. -#[derive(AlignedBorrow, Clone, Copy, Debug)] -#[repr(C)] -pub struct Memory { - /// The first address of the memory sequence. - pub start_addr: T, - /// Bitmap if whether the memory address is accessed. This is set to all 1 for compress and - /// finalize rows. - pub memory_slot_used: [T; WIDTH / 2], - pub memory_accesses: [MemoryReadWriteSingleCols; WIDTH / 2], -} diff --git a/crates/recursion/core/src/poseidon2_wide/columns/mod.rs b/crates/recursion/core/src/poseidon2_wide/columns/mod.rs deleted file mode 100644 index 886002d378..0000000000 --- a/crates/recursion/core/src/poseidon2_wide/columns/mod.rs +++ /dev/null @@ -1,249 +0,0 @@ -use std::mem::{size_of, transmute}; - -use sp1_core_machine::utils::indices_arr; -use sp1_derive::AlignedBorrow; - -use self::{ - control_flow::ControlFlow, - memory::Memory, - opcode_workspace::OpcodeWorkspace, - permutation::{Permutation, PermutationNoSbox, PermutationSBox}, - syscall_params::SyscallParams, -}; - -use super::WIDTH; - -pub mod control_flow; -pub mod memory; -pub mod opcode_workspace; -pub mod permutation; -pub mod syscall_params; - -/// Trait for getter methods for Poseidon2 columns. -pub trait Poseidon2<'a, T: Copy + 'a> { - fn control_flow(&self) -> &ControlFlow; - - fn syscall_params(&self) -> &SyscallParams; - - fn memory(&self) -> &Memory; - - fn opcode_workspace(&self) -> &OpcodeWorkspace; - - fn permutation(&self) -> Box + 'a>; -} - -/// Trait for setter methods for Poseidon2 columns. -pub trait Poseidon2Mut<'a, T: Copy + 'a> { - fn control_flow_mut(&mut self) -> &mut ControlFlow; - - fn syscall_params_mut(&mut self) -> &mut SyscallParams; - - fn memory_mut(&mut self) -> &mut Memory; - - fn opcode_workspace_mut(&mut self) -> &mut OpcodeWorkspace; -} - -/// Enum to enable dynamic dispatch for the Poseidon2 columns. -#[allow(dead_code)] -enum Poseidon2Enum { - P2Degree3(Poseidon2Degree3), - P2Degree9(Poseidon2Degree9), -} - -impl<'a, T: Copy + 'a> Poseidon2<'a, T> for Poseidon2Enum { - // type Perm = PermutationSBox; - - fn control_flow(&self) -> &ControlFlow { - match self { - Poseidon2Enum::P2Degree3(p) => p.control_flow(), - Poseidon2Enum::P2Degree9(p) => p.control_flow(), - } - } - - fn syscall_params(&self) -> &SyscallParams { - match self { - Poseidon2Enum::P2Degree3(p) => p.syscall_params(), - Poseidon2Enum::P2Degree9(p) => p.syscall_params(), - } - } - - fn memory(&self) -> &Memory { - match self { - Poseidon2Enum::P2Degree3(p) => p.memory(), - Poseidon2Enum::P2Degree9(p) => p.memory(), - } - } - - fn opcode_workspace(&self) -> &OpcodeWorkspace { - match self { - Poseidon2Enum::P2Degree3(p) => p.opcode_workspace(), - Poseidon2Enum::P2Degree9(p) => p.opcode_workspace(), - } - } - - fn permutation(&self) -> Box + 'a> { - match self { - Poseidon2Enum::P2Degree3(p) => p.permutation(), - Poseidon2Enum::P2Degree9(p) => p.permutation(), - } - } -} - -/// Enum to enable dynamic dispatch for the Poseidon2 columns. -#[allow(dead_code)] -enum Poseidon2MutEnum<'a, T: Copy> { - P2Degree3(&'a mut Poseidon2Degree3), - P2Degree9(&'a mut Poseidon2Degree9), -} - -impl<'a, T: Copy + 'a> Poseidon2Mut<'a, T> for Poseidon2MutEnum<'a, T> { - fn control_flow_mut(&mut self) -> &mut ControlFlow { - match self { - Poseidon2MutEnum::P2Degree3(p) => p.control_flow_mut(), - Poseidon2MutEnum::P2Degree9(p) => p.control_flow_mut(), - } - } - - fn syscall_params_mut(&mut self) -> &mut SyscallParams { - match self { - Poseidon2MutEnum::P2Degree3(p) => p.syscall_params_mut(), - Poseidon2MutEnum::P2Degree9(p) => p.syscall_params_mut(), - } - } - - fn memory_mut(&mut self) -> &mut Memory { - match self { - Poseidon2MutEnum::P2Degree3(p) => p.memory_mut(), - Poseidon2MutEnum::P2Degree9(p) => p.memory_mut(), - } - } - - fn opcode_workspace_mut(&mut self) -> &mut OpcodeWorkspace { - match self { - Poseidon2MutEnum::P2Degree3(p) => p.opcode_workspace_mut(), - Poseidon2MutEnum::P2Degree9(p) => p.opcode_workspace_mut(), - } - } -} - -pub const NUM_POSEIDON2_DEGREE3_COLS: usize = size_of::>(); - -const fn make_col_map_degree3() -> Poseidon2Degree3 { - let indices_arr = indices_arr::(); - unsafe { - transmute::<[usize; NUM_POSEIDON2_DEGREE3_COLS], Poseidon2Degree3>(indices_arr) - } -} -pub const POSEIDON2_DEGREE3_COL_MAP: Poseidon2Degree3 = make_col_map_degree3(); - -/// Struct for the poseidon2 chip that contains sbox columns. -#[derive(AlignedBorrow, Clone, Copy)] -#[repr(C)] -pub struct Poseidon2Degree3 { - pub control_flow: ControlFlow, - pub syscall_input: SyscallParams, - pub memory: Memory, - pub opcode_specific_cols: OpcodeWorkspace, - pub permutation_cols: PermutationSBox, - pub state_cursor: [T; WIDTH / 2], // Only used for absorb -} - -impl<'a, T: Copy + 'a> Poseidon2<'a, T> for Poseidon2Degree3 { - fn control_flow(&self) -> &ControlFlow { - &self.control_flow - } - - fn syscall_params(&self) -> &SyscallParams { - &self.syscall_input - } - - fn memory(&self) -> &Memory { - &self.memory - } - - fn opcode_workspace(&self) -> &OpcodeWorkspace { - &self.opcode_specific_cols - } - - fn permutation(&self) -> Box + 'a> { - Box::new(self.permutation_cols) - } -} - -impl<'a, T: Copy + 'a> Poseidon2Mut<'a, T> for &'a mut Poseidon2Degree3 { - fn control_flow_mut(&mut self) -> &mut ControlFlow { - &mut self.control_flow - } - - fn syscall_params_mut(&mut self) -> &mut SyscallParams { - &mut self.syscall_input - } - - fn memory_mut(&mut self) -> &mut Memory { - &mut self.memory - } - - fn opcode_workspace_mut(&mut self) -> &mut OpcodeWorkspace { - &mut self.opcode_specific_cols - } -} - -pub const NUM_POSEIDON2_DEGREE9_COLS: usize = size_of::>(); -const fn make_col_map_degree9() -> Poseidon2Degree9 { - let indices_arr = indices_arr::(); - unsafe { - transmute::<[usize; NUM_POSEIDON2_DEGREE9_COLS], Poseidon2Degree9>(indices_arr) - } -} -pub const POSEIDON2_DEGREE9_COL_MAP: Poseidon2Degree9 = make_col_map_degree9(); - -/// Struct for the poseidon2 chip that doesn't contain sbox columns. -#[derive(AlignedBorrow, Clone, Copy)] -#[repr(C)] -pub struct Poseidon2Degree9 { - pub control_flow: ControlFlow, - pub syscall_input: SyscallParams, - pub memory: Memory, - pub opcode_specific_cols: OpcodeWorkspace, - pub permutation_cols: PermutationNoSbox, -} - -impl<'a, T: Copy + 'a> Poseidon2<'a, T> for Poseidon2Degree9 { - fn control_flow(&self) -> &ControlFlow { - &self.control_flow - } - - fn syscall_params(&self) -> &SyscallParams { - &self.syscall_input - } - - fn memory(&self) -> &Memory { - &self.memory - } - - fn opcode_workspace(&self) -> &OpcodeWorkspace { - &self.opcode_specific_cols - } - - fn permutation(&self) -> Box + 'a> { - Box::new(self.permutation_cols) - } -} - -impl<'a, T: Copy + 'a> Poseidon2Mut<'a, T> for &'a mut Poseidon2Degree9 { - fn control_flow_mut(&mut self) -> &mut ControlFlow { - &mut self.control_flow - } - - fn syscall_params_mut(&mut self) -> &mut SyscallParams { - &mut self.syscall_input - } - - fn memory_mut(&mut self) -> &mut Memory { - &mut self.memory - } - - fn opcode_workspace_mut(&mut self) -> &mut OpcodeWorkspace { - &mut self.opcode_specific_cols - } -} diff --git a/crates/recursion/core/src/poseidon2_wide/columns/opcode_workspace.rs b/crates/recursion/core/src/poseidon2_wide/columns/opcode_workspace.rs deleted file mode 100644 index a208065f21..0000000000 --- a/crates/recursion/core/src/poseidon2_wide/columns/opcode_workspace.rs +++ /dev/null @@ -1,147 +0,0 @@ -use p3_field::AbstractField; -use sp1_core_machine::operations::IsZeroOperation; -use sp1_derive::AlignedBorrow; - -use crate::{ - air::SP1RecursionAirBuilder, - memory::MemoryReadWriteSingleCols, - poseidon2_wide::{RATE, WIDTH}, -}; - -/// Workspace columns. They are different for each opcode. -#[derive(AlignedBorrow, Clone, Copy)] -#[repr(C)] -pub union OpcodeWorkspace { - compress: CompressWorkspace, - absorb: AbsorbWorkspace, - finalize: FinalizeWorkspace, -} -/// Getter and setter functions for the opcode workspace. -impl OpcodeWorkspace { - pub fn compress(&self) -> &CompressWorkspace { - unsafe { &self.compress } - } - - pub fn compress_mut(&mut self) -> &mut CompressWorkspace { - unsafe { &mut self.compress } - } - - pub fn absorb(&self) -> &AbsorbWorkspace { - unsafe { &self.absorb } - } - - pub fn absorb_mut(&mut self) -> &mut AbsorbWorkspace { - unsafe { &mut self.absorb } - } - - pub fn finalize(&self) -> &FinalizeWorkspace { - unsafe { &self.finalize } - } - - pub fn finalize_mut(&mut self) -> &mut FinalizeWorkspace { - unsafe { &mut self.finalize } - } -} - -/// Workspace columns for compress. This is used memory read/writes for the 2nd half of the -/// compress permutation state. -#[derive(AlignedBorrow, Clone, Copy)] -#[repr(C)] -pub struct CompressWorkspace { - pub start_addr: T, - pub memory_accesses: [MemoryReadWriteSingleCols; WIDTH / 2], -} - -/// Workspace columns for absorb. -#[derive(AlignedBorrow, Clone, Copy, Debug)] -#[repr(C)] -pub struct AbsorbWorkspace { - /// State related columns. - pub previous_state: [T; WIDTH], - pub state: [T; WIDTH], - pub state_cursor: T, - - /// Control flow columns. - pub hash_num: T, - pub absorb_num: T, - pub is_first_hash_row: T, - pub num_remaining_rows: T, - pub num_remaining_rows_is_zero: IsZeroOperation, - - /// Memory columns. - pub start_mem_idx_bitmap: [T; WIDTH / 2], - pub end_mem_idx_bitmap: [T; WIDTH / 2], - - /// This is the state index of that last element consumed by the absorb syscall. - pub last_row_ending_cursor: T, - pub last_row_ending_cursor_is_seven: IsZeroOperation, /* Needed when doing the - * (last_row_ending_cursor_is_seven - * + 1) % 8 calculation. */ - pub last_row_ending_cursor_bitmap: [T; 3], - - /// Materialized control flow flags to deal with max contraint degree. - /// Is an absorb syscall row which is not the last row for that absorb. - pub is_syscall_not_last_row: T, - /// Is an absorb syscall row that is the last row for that absorb. - pub is_syscall_is_last_row: T, - /// Is not an absorb syscall row and is not the last row for that absorb. - pub not_syscall_not_last_row: T, - /// Is not an absorb syscall row and is last row for that absorb. - pub not_syscall_is_last_row: T, - /// Is the last of an absorb and the state is filled up (e.g. it's ending cursor is 7). - pub is_last_row_ending_cursor_is_seven: T, - /// Is the last of an absorb and the state is not filled up (e.g. it's ending cursor is not 7). - pub is_last_row_ending_cursor_not_seven: T, -} - -/// Methods that are "virtual" columns (e.g. will return expressions). -impl AbsorbWorkspace { - pub(crate) fn is_last_row(&self) -> AB::Expr - where - T: Into, - { - self.num_remaining_rows_is_zero.result.into() - } - - pub(crate) fn do_perm(&self) -> AB::Expr - where - T: Into, - { - self.is_syscall_not_last_row.into() - + self.not_syscall_not_last_row.into() - + self.is_last_row_ending_cursor_is_seven.into() - } - - pub(crate) fn num_consumed(&self) -> AB::Expr - where - T: Into, - { - self.is_syscall_not_last_row.into() - * (AB::Expr::from_canonical_usize(RATE) - self.state_cursor.into()) - + self.is_syscall_is_last_row.into() - * (self.last_row_ending_cursor.into() - self.state_cursor.into() + AB::Expr::one()) - + self.not_syscall_not_last_row.into() * AB::Expr::from_canonical_usize(RATE) - + self.not_syscall_is_last_row.into() - * (self.last_row_ending_cursor.into() + AB::Expr::one()) - } -} - -/// Workspace columns for finalize. -#[derive(AlignedBorrow, Clone, Copy)] -#[repr(C)] -pub struct FinalizeWorkspace { - /// State related columns. - pub previous_state: [T; WIDTH], - pub state: [T; WIDTH], - pub state_cursor: T, - pub state_cursor_is_zero: IsZeroOperation, -} - -impl FinalizeWorkspace { - pub(crate) fn do_perm(&self) -> AB::Expr - where - T: Into, - { - AB::Expr::one() - self.state_cursor_is_zero.result.into() - } -} diff --git a/crates/recursion/core/src/poseidon2_wide/columns/permutation.rs b/crates/recursion/core/src/poseidon2_wide/columns/permutation.rs deleted file mode 100644 index 01db4fbed9..0000000000 --- a/crates/recursion/core/src/poseidon2_wide/columns/permutation.rs +++ /dev/null @@ -1,235 +0,0 @@ -use std::{borrow::BorrowMut, mem::size_of}; - -use sp1_derive::AlignedBorrow; - -use crate::poseidon2_wide::{NUM_EXTERNAL_ROUNDS, NUM_INTERNAL_ROUNDS, WIDTH}; - -use super::{POSEIDON2_DEGREE3_COL_MAP, POSEIDON2_DEGREE9_COL_MAP}; - -/// Trait that describes getter functions for the permutation columns. -pub trait Permutation { - fn external_rounds_state(&self) -> &[[T; WIDTH]]; - - fn internal_rounds_state(&self) -> &[T; WIDTH]; - - fn internal_rounds_s0(&self) -> &[T; NUM_INTERNAL_ROUNDS - 1]; - - fn external_rounds_sbox(&self) -> Option<&[[T; WIDTH]; NUM_EXTERNAL_ROUNDS]>; - - fn internal_rounds_sbox(&self) -> Option<&[T; NUM_INTERNAL_ROUNDS]>; - - fn perm_output(&self) -> &[T; WIDTH]; -} - -/// Trait that describes setter functions for the permutation columns. -pub trait PermutationMut { - #[allow(clippy::type_complexity)] - fn get_cols_mut( - &mut self, - ) -> ( - &mut [[T; WIDTH]], - &mut [T; WIDTH], - &mut [T; NUM_INTERNAL_ROUNDS - 1], - Option<&mut [[T; WIDTH]; NUM_EXTERNAL_ROUNDS]>, - Option<&mut [T; NUM_INTERNAL_ROUNDS]>, - &mut [T; WIDTH], - ); -} - -/// Permutation columns struct with S-boxes. -#[derive(AlignedBorrow, Clone, Copy)] -#[repr(C)] -pub struct PermutationSBox { - pub external_rounds_state: [[T; WIDTH]; NUM_EXTERNAL_ROUNDS], - pub internal_rounds_state: [T; WIDTH], - pub internal_rounds_s0: [T; NUM_INTERNAL_ROUNDS - 1], - pub external_rounds_sbox: [[T; WIDTH]; NUM_EXTERNAL_ROUNDS], - pub internal_rounds_sbox: [T; NUM_INTERNAL_ROUNDS], - pub output_state: [T; WIDTH], -} - -impl Permutation for PermutationSBox { - fn external_rounds_state(&self) -> &[[T; WIDTH]] { - &self.external_rounds_state - } - - fn internal_rounds_state(&self) -> &[T; WIDTH] { - &self.internal_rounds_state - } - - fn internal_rounds_s0(&self) -> &[T; NUM_INTERNAL_ROUNDS - 1] { - &self.internal_rounds_s0 - } - - fn external_rounds_sbox(&self) -> Option<&[[T; WIDTH]; NUM_EXTERNAL_ROUNDS]> { - Some(&self.external_rounds_sbox) - } - - fn internal_rounds_sbox(&self) -> Option<&[T; NUM_INTERNAL_ROUNDS]> { - Some(&self.internal_rounds_sbox) - } - - fn perm_output(&self) -> &[T; WIDTH] { - &self.output_state - } -} - -impl PermutationMut for &mut PermutationSBox { - fn get_cols_mut( - &mut self, - ) -> ( - &mut [[T; WIDTH]], - &mut [T; WIDTH], - &mut [T; NUM_INTERNAL_ROUNDS - 1], - Option<&mut [[T; WIDTH]; NUM_EXTERNAL_ROUNDS]>, - Option<&mut [T; NUM_INTERNAL_ROUNDS]>, - &mut [T; WIDTH], - ) { - ( - &mut self.external_rounds_state, - &mut self.internal_rounds_state, - &mut self.internal_rounds_s0, - Some(&mut self.external_rounds_sbox), - Some(&mut self.internal_rounds_sbox), - &mut self.output_state, - ) - } -} - -/// Permutation columns struct without S-boxes. -#[derive(AlignedBorrow, Clone, Copy)] -#[repr(C)] -pub struct PermutationNoSbox { - pub external_rounds_state: [[T; WIDTH]; NUM_EXTERNAL_ROUNDS], - pub internal_rounds_state: [T; WIDTH], - pub internal_rounds_s0: [T; NUM_INTERNAL_ROUNDS - 1], - pub output_state: [T; WIDTH], -} - -impl Permutation for PermutationNoSbox { - fn external_rounds_state(&self) -> &[[T; WIDTH]] { - &self.external_rounds_state - } - - fn internal_rounds_state(&self) -> &[T; WIDTH] { - &self.internal_rounds_state - } - - fn internal_rounds_s0(&self) -> &[T; NUM_INTERNAL_ROUNDS - 1] { - &self.internal_rounds_s0 - } - - fn external_rounds_sbox(&self) -> Option<&[[T; WIDTH]; NUM_EXTERNAL_ROUNDS]> { - None - } - - fn internal_rounds_sbox(&self) -> Option<&[T; NUM_INTERNAL_ROUNDS]> { - None - } - - fn perm_output(&self) -> &[T; WIDTH] { - &self.output_state - } -} - -impl PermutationMut for &mut PermutationNoSbox { - fn get_cols_mut( - &mut self, - ) -> ( - &mut [[T; WIDTH]], - &mut [T; WIDTH], - &mut [T; NUM_INTERNAL_ROUNDS - 1], - Option<&mut [[T; WIDTH]; NUM_EXTERNAL_ROUNDS]>, - Option<&mut [T; NUM_INTERNAL_ROUNDS]>, - &mut [T; WIDTH], - ) { - ( - &mut self.external_rounds_state, - &mut self.internal_rounds_state, - &mut self.internal_rounds_s0, - None, - None, - &mut self.output_state, - ) - } -} - -/// Permutation columns struct without S-boxes and half of the external rounds. -#[derive(AlignedBorrow, Clone, Copy)] -#[repr(C)] -pub struct PermutationNoSboxHalfExternal { - pub external_rounds_state: [[T; WIDTH]; NUM_EXTERNAL_ROUNDS / 2], - pub internal_rounds_state: [T; WIDTH], - pub internal_rounds_s0: [T; NUM_INTERNAL_ROUNDS - 1], - pub output_state: [T; WIDTH], -} - -impl Permutation for PermutationNoSboxHalfExternal { - fn external_rounds_state(&self) -> &[[T; WIDTH]] { - &self.external_rounds_state - } - - fn internal_rounds_state(&self) -> &[T; WIDTH] { - &self.internal_rounds_state - } - - fn internal_rounds_s0(&self) -> &[T; NUM_INTERNAL_ROUNDS - 1] { - &self.internal_rounds_s0 - } - - fn external_rounds_sbox(&self) -> Option<&[[T; WIDTH]; NUM_EXTERNAL_ROUNDS]> { - None - } - - fn internal_rounds_sbox(&self) -> Option<&[T; NUM_INTERNAL_ROUNDS]> { - None - } - - fn perm_output(&self) -> &[T; WIDTH] { - &self.output_state - } -} - -impl PermutationMut for &mut PermutationNoSboxHalfExternal { - fn get_cols_mut( - &mut self, - ) -> ( - &mut [[T; WIDTH]], - &mut [T; WIDTH], - &mut [T; NUM_INTERNAL_ROUNDS - 1], - Option<&mut [[T; WIDTH]; NUM_EXTERNAL_ROUNDS]>, - Option<&mut [T; NUM_INTERNAL_ROUNDS]>, - &mut [T; WIDTH], - ) { - ( - &mut self.external_rounds_state, - &mut self.internal_rounds_state, - &mut self.internal_rounds_s0, - None, - None, - &mut self.output_state, - ) - } -} - -pub fn permutation_mut<'a, 'b: 'a, T, const DEGREE: usize>( - row: &'b mut [T], -) -> Box + 'a> -where - T: Copy, -{ - if DEGREE == 3 { - let start = POSEIDON2_DEGREE3_COL_MAP.permutation_cols.external_rounds_state[0][0]; - let end = start + size_of::>(); - let convert: &mut PermutationSBox = row[start..end].borrow_mut(); - Box::new(convert) - } else if DEGREE == 9 || DEGREE == 17 { - let start = POSEIDON2_DEGREE9_COL_MAP.permutation_cols.external_rounds_state[0][0]; - let end = start + size_of::>(); - - let convert: &mut PermutationNoSbox = row[start..end].borrow_mut(); - Box::new(convert) - } else { - panic!("Unsupported degree"); - } -} diff --git a/crates/recursion/core/src/poseidon2_wide/columns/syscall_params.rs b/crates/recursion/core/src/poseidon2_wide/columns/syscall_params.rs deleted file mode 100644 index cca164ceb5..0000000000 --- a/crates/recursion/core/src/poseidon2_wide/columns/syscall_params.rs +++ /dev/null @@ -1,77 +0,0 @@ -use std::mem::size_of; - -use sp1_derive::AlignedBorrow; - -const SYSCALL_PARAMS_SIZE: usize = size_of::>(); - -/// Syscall params columns. They are different for each opcode. -#[derive(AlignedBorrow, Clone, Copy)] -#[repr(C)] -pub union SyscallParams { - compress: CompressParams, - absorb: AbsorbParams, - finalize: FinalizeParams, -} - -impl SyscallParams { - pub fn compress(&self) -> &CompressParams { - assert!(size_of::>() == SYSCALL_PARAMS_SIZE); - unsafe { &self.compress } - } - - pub fn compress_mut(&mut self) -> &mut CompressParams { - unsafe { &mut self.compress } - } - - pub fn absorb(&self) -> &AbsorbParams { - assert!(size_of::>() == SYSCALL_PARAMS_SIZE); - unsafe { &self.absorb } - } - - pub fn absorb_mut(&mut self) -> &mut AbsorbParams { - unsafe { &mut self.absorb } - } - - pub fn finalize(&self) -> &FinalizeParams { - assert!(size_of::>() == SYSCALL_PARAMS_SIZE); - unsafe { &self.finalize } - } - - pub fn finalize_mut(&mut self) -> &mut FinalizeParams { - unsafe { &mut self.finalize } - } - - pub fn get_raw_params(&self) -> [T; SYSCALL_PARAMS_SIZE] { - // All of the union's fields should have the same size, so just choose one of them to return - // the elements. - let compress = self.compress(); - [compress.clk, compress.dst_ptr, compress.left_ptr, compress.right_ptr] - } -} - -#[derive(AlignedBorrow, Clone, Copy)] -#[repr(C)] -pub struct CompressParams { - pub clk: T, - pub dst_ptr: T, - pub left_ptr: T, - pub right_ptr: T, -} - -#[derive(AlignedBorrow, Clone, Copy)] -#[repr(C)] -pub struct AbsorbParams { - pub clk: T, - pub hash_and_absorb_num: T, - pub input_ptr: T, - pub input_len: T, -} - -#[derive(AlignedBorrow, Clone, Copy)] -#[repr(C)] -pub struct FinalizeParams { - pub clk: T, - pub hash_num: T, - pub output_ptr: T, - pub pad: T, -} diff --git a/crates/recursion/core/src/poseidon2_wide/events.rs b/crates/recursion/core/src/poseidon2_wide/events.rs deleted file mode 100644 index d1e66096a8..0000000000 --- a/crates/recursion/core/src/poseidon2_wide/events.rs +++ /dev/null @@ -1,158 +0,0 @@ -use p3_field::PrimeField32; -use p3_symmetric::Permutation; - -use crate::{memory::MemoryRecord, poseidon2_wide::WIDTH, runtime::DIGEST_SIZE}; - -use super::RATE; - -#[derive(Debug, Clone)] -pub enum Poseidon2HashEvent { - Absorb(Poseidon2AbsorbEvent), - Finalize(Poseidon2FinalizeEvent), -} - -#[derive(Debug, Clone)] -pub struct Poseidon2CompressEvent { - pub clk: F, - pub dst: F, // from a_val - pub left: F, // from b_val - pub right: F, // from c_val - pub input: [F; WIDTH], - pub result_array: [F; WIDTH], - pub input_records: [MemoryRecord; WIDTH], - pub result_records: [MemoryRecord; WIDTH], -} - -#[derive(Debug, Clone)] -pub struct Poseidon2AbsorbEvent { - pub clk: F, - pub hash_and_absorb_num: F, // from a_val - pub input_addr: F, // from b_val - pub input_len: F, // from c_val - - pub hash_num: F, - pub absorb_num: F, - pub iterations: Vec>, -} - -impl Poseidon2AbsorbEvent { - pub(crate) fn new( - clk: F, - hash_and_absorb_num: F, - input_addr: F, - input_len: F, - hash_num: F, - absorb_num: F, - ) -> Self { - Self { - clk, - hash_and_absorb_num, - input_addr, - input_len, - hash_num, - absorb_num, - iterations: Vec::new(), - } - } -} - -impl Poseidon2AbsorbEvent { - pub(crate) fn populate_iterations( - &mut self, - start_addr: F, - input_len: F, - memory_records: &[MemoryRecord], - permuter: &impl Permutation<[F; WIDTH]>, - hash_state: &mut [F; WIDTH], - hash_state_cursor: &mut usize, - ) -> usize { - let mut nb_permutes = 0; - let mut input_records = Vec::new(); - let mut previous_state = *hash_state; - let mut iter_num_consumed = 0; - - let start_addr = start_addr.as_canonical_u32(); - let end_addr = start_addr + input_len.as_canonical_u32(); - - for (addr_iter, memory_record) in (start_addr..end_addr).zip(memory_records.iter()) { - input_records.push(*memory_record); - - hash_state[*hash_state_cursor] = memory_record.value[0]; - *hash_state_cursor += 1; - iter_num_consumed += 1; - - // Do a permutation when the hash state is full. - if *hash_state_cursor == RATE { - nb_permutes += 1; - let perm_input = *hash_state; - *hash_state = permuter.permute(*hash_state); - - self.iterations.push(Poseidon2AbsorbIteration { - state_cursor: *hash_state_cursor - iter_num_consumed, - start_addr: F::from_canonical_u32(addr_iter - iter_num_consumed as u32 + 1), - input_records, - perm_input, - perm_output: *hash_state, - previous_state, - state: *hash_state, - do_perm: true, - }); - - previous_state = *hash_state; - input_records = Vec::new(); - *hash_state_cursor = 0; - iter_num_consumed = 0; - } - } - - if *hash_state_cursor != 0 { - nb_permutes += 1; - // Note that we still do a permutation, generate the trace and enforce permutation - // constraints for every absorb and finalize row. - self.iterations.push(Poseidon2AbsorbIteration { - state_cursor: *hash_state_cursor - iter_num_consumed, - start_addr: F::from_canonical_u32(end_addr - iter_num_consumed as u32), - input_records, - perm_input: *hash_state, - perm_output: permuter.permute(*hash_state), - previous_state, - state: *hash_state, - do_perm: false, - }); - } - nb_permutes - } -} - -#[derive(Debug, Clone)] -pub struct Poseidon2AbsorbIteration { - pub state_cursor: usize, - pub start_addr: F, - pub input_records: Vec>, - - pub perm_input: [F; WIDTH], - pub perm_output: [F; WIDTH], - - pub previous_state: [F; WIDTH], - pub state: [F; WIDTH], - - pub do_perm: bool, -} - -#[derive(Debug, Clone)] -pub struct Poseidon2FinalizeEvent { - pub clk: F, - pub hash_num: F, // from a_val - pub output_ptr: F, // from b_val - pub output_records: [MemoryRecord; DIGEST_SIZE], - - pub state_cursor: usize, - - pub perm_input: [F; WIDTH], - pub perm_output: [F; WIDTH], - - pub previous_state: [F; WIDTH], - pub state: [F; WIDTH], - - pub do_perm: bool, -} diff --git a/crates/recursion/core/src/poseidon2_wide/mod.rs b/crates/recursion/core/src/poseidon2_wide/mod.rs deleted file mode 100644 index e7e6f9cc11..0000000000 --- a/crates/recursion/core/src/poseidon2_wide/mod.rs +++ /dev/null @@ -1,319 +0,0 @@ -#![allow(clippy::needless_range_loop)] - -use std::{ - borrow::{Borrow, BorrowMut}, - ops::Deref, -}; - -use p3_baby_bear::{MONTY_INVERSE, POSEIDON2_INTERNAL_MATRIX_DIAG_16_BABYBEAR_MONTY}; -use p3_field::{AbstractField, PrimeField32}; - -pub mod air; -pub mod columns; -pub mod events; -pub mod trace; - -use p3_poseidon2::matmul_internal; - -use self::columns::{Poseidon2, Poseidon2Degree3, Poseidon2Degree9, Poseidon2Mut}; - -/// The width of the permutation. -pub const WIDTH: usize = 16; -pub const RATE: usize = WIDTH / 2; - -pub const NUM_EXTERNAL_ROUNDS: usize = 8; -pub const NUM_INTERNAL_ROUNDS: usize = 13; -pub const NUM_ROUNDS: usize = NUM_EXTERNAL_ROUNDS + NUM_INTERNAL_ROUNDS; - -/// A chip that implements addition for the opcode ADD. -#[derive(Default)] -pub struct Poseidon2WideChip { - pub fixed_log2_rows: Option, - pub pad: bool, -} - -impl<'a, const DEGREE: usize> Poseidon2WideChip { - /// Transmute a row it to an immutable Poseidon2 instance. - pub(crate) fn convert(row: impl Deref) -> Box + 'a> - where - T: Copy + 'a, - { - if DEGREE == 3 { - let convert: &Poseidon2Degree3 = (*row).borrow(); - Box::new(*convert) - } else if DEGREE == 9 || DEGREE == 17 { - let convert: &Poseidon2Degree9 = (*row).borrow(); - Box::new(*convert) - } else { - panic!("Unsupported degree"); - } - } - - /// Transmute a row it to a mutable Poseidon2 instance. - pub(crate) fn convert_mut<'b: 'a, F: PrimeField32>( - &self, - row: &'b mut [F], - ) -> Box + 'a> { - if DEGREE == 3 { - let convert: &mut Poseidon2Degree3 = row.borrow_mut(); - Box::new(convert) - } else if DEGREE == 9 || DEGREE == 17 { - let convert: &mut Poseidon2Degree9 = row.borrow_mut(); - Box::new(convert) - } else { - panic!("Unsupported degree"); - } - } -} - -pub fn apply_m_4(x: &mut [AF]) -where - AF: AbstractField, -{ - let t01 = x[0].clone() + x[1].clone(); - let t23 = x[2].clone() + x[3].clone(); - let t0123 = t01.clone() + t23.clone(); - let t01123 = t0123.clone() + x[1].clone(); - let t01233 = t0123.clone() + x[3].clone(); - // The order here is important. Need to overwrite x[0] and x[2] after x[1] and x[3]. - x[3] = t01233.clone() + x[0].double(); // 3*x[0] + x[1] + x[2] + 2*x[3] - x[1] = t01123.clone() + x[2].double(); // x[0] + 2*x[1] + 3*x[2] + x[3] - x[0] = t01123 + t01; // 2*x[0] + 3*x[1] + x[2] + x[3] - x[2] = t01233 + t23; // x[0] + x[1] + 2*x[2] + 3*x[3] -} - -pub(crate) fn external_linear_layer(state: &mut [AF; WIDTH]) { - for j in (0..WIDTH).step_by(4) { - apply_m_4(&mut state[j..j + 4]); - } - let sums: [AF; 4] = - core::array::from_fn(|k| (0..WIDTH).step_by(4).map(|j| state[j + k].clone()).sum::()); - - for j in 0..WIDTH { - state[j] += sums[j % 4].clone(); - } -} - -pub(crate) fn internal_linear_layer(state: &mut [F; WIDTH]) { - let matmul_constants: [::F; WIDTH] = - POSEIDON2_INTERNAL_MATRIX_DIAG_16_BABYBEAR_MONTY - .iter() - .map(|x| ::F::from_wrapped_u32(x.as_canonical_u32())) - .collect::>() - .try_into() - .unwrap(); - matmul_internal(state, matmul_constants); - let monty_inverse = F::from_wrapped_u32(MONTY_INVERSE.as_canonical_u32()); - state.iter_mut().for_each(|i| *i *= monty_inverse.clone()); -} - -#[cfg(test)] -pub(crate) mod tests { - use std::{array, time::Instant}; - - use crate::{ - air::Block, - memory::MemoryRecord, - poseidon2_wide::events::Poseidon2HashEvent, - runtime::{ExecutionRecord, DIGEST_SIZE}, - }; - use itertools::Itertools; - use p3_baby_bear::{BabyBear, DiffusionMatrixBabyBear}; - use p3_field::AbstractField; - use p3_matrix::dense::RowMajorMatrix; - use p3_poseidon2::{Poseidon2, Poseidon2ExternalMatrixGeneral}; - use p3_symmetric::Permutation; - use rand::random; - - use sp1_core_machine::utils::{uni_stark_prove, uni_stark_verify}; - use sp1_stark::{ - air::MachineAir, baby_bear_poseidon2::BabyBearPoseidon2, inner_perm, StarkGenericConfig, - }; - use zkhash::ark_ff::UniformRand; - - use super::{ - events::{Poseidon2AbsorbEvent, Poseidon2CompressEvent, Poseidon2FinalizeEvent}, - Poseidon2WideChip, WIDTH, - }; - - fn poseidon2_wide_prove_babybear_degree( - input_exec: ExecutionRecord, - ) { - let chip = Poseidon2WideChip:: { fixed_log2_rows: None, pad: true }; - - let trace: RowMajorMatrix = - chip.generate_trace(&input_exec, &mut ExecutionRecord::::default()); - - let config = BabyBearPoseidon2::compressed(); - let mut challenger = config.challenger(); - - let start = Instant::now(); - let proof = uni_stark_prove(&config, &chip, &mut challenger, trace); - let duration = start.elapsed().as_secs_f64(); - println!("proof duration = {:?}", duration); - - let mut challenger = config.challenger(); - let start = Instant::now(); - uni_stark_verify(&config, &chip, &mut challenger, &proof) - .expect("expected proof to be valid"); - - let duration = start.elapsed().as_secs_f64(); - println!("verify duration = {:?}", duration); - } - - fn dummy_memory_access_records( - memory_values: Vec, - prev_ts: BabyBear, - ts: BabyBear, - ) -> Vec> { - memory_values - .iter() - .map(|value| MemoryRecord::new_read(BabyBear::zero(), Block::from(*value), ts, prev_ts)) - .collect_vec() - } - - pub(crate) fn generate_test_execution_record( - incorrect_trace: bool, - ) -> ExecutionRecord { - const NUM_ABSORBS: usize = 1000; - const NUM_COMPRESSES: usize = 1000; - - let mut input_exec = ExecutionRecord::::default(); - - let rng = &mut rand::thread_rng(); - let permuter: Poseidon2< - BabyBear, - Poseidon2ExternalMatrixGeneral, - DiffusionMatrixBabyBear, - 16, - 7, - > = inner_perm(); - - // Generate hash test events. - let hash_test_input_sizes: [usize; NUM_ABSORBS] = - array::from_fn(|_| random::() % 128 + 1); - hash_test_input_sizes.iter().enumerate().for_each(|(i, input_size)| { - let test_input = (0..*input_size).map(|_| BabyBear::rand(rng)).collect_vec(); - - let prev_ts = BabyBear::from_canonical_usize(i); - let absorb_ts = BabyBear::from_canonical_usize(i + 1); - let finalize_ts = BabyBear::from_canonical_usize(i + 2); - let hash_num = i as u32; - let absorb_num = 0_u32; - let hash_and_absorb_num = - BabyBear::from_canonical_u32(hash_num * (1 << 12) + absorb_num); - let start_addr = BabyBear::from_canonical_usize(i + 1); - let input_len = BabyBear::from_canonical_usize(*input_size); - - let mut absorb_event = Poseidon2AbsorbEvent::new( - absorb_ts, - hash_and_absorb_num, - start_addr, - input_len, - BabyBear::from_canonical_u32(hash_num), - BabyBear::from_canonical_u32(absorb_num), - ); - - let mut hash_state = [BabyBear::zero(); WIDTH]; - let mut hash_state_cursor = 0; - absorb_event.populate_iterations( - start_addr, - input_len, - &dummy_memory_access_records(test_input.clone(), prev_ts, absorb_ts), - &permuter, - &mut hash_state, - &mut hash_state_cursor, - ); - - input_exec.poseidon2_hash_events.push(Poseidon2HashEvent::Absorb(absorb_event)); - - let do_perm = hash_state_cursor != 0; - let mut perm_output = permuter.permute(hash_state); - if incorrect_trace { - perm_output = [BabyBear::rand(rng); WIDTH]; - } - - let state = if do_perm { perm_output } else { hash_state }; - - input_exec.poseidon2_hash_events.push(Poseidon2HashEvent::Finalize( - Poseidon2FinalizeEvent { - clk: finalize_ts, - hash_num: BabyBear::from_canonical_u32(hash_num), - output_ptr: start_addr, - output_records: dummy_memory_access_records( - state.as_slice().to_vec(), - absorb_ts, - finalize_ts, - )[0..DIGEST_SIZE] - .try_into() - .unwrap(), - state_cursor: hash_state_cursor, - perm_input: hash_state, - perm_output, - previous_state: hash_state, - state, - do_perm, - }, - )); - }); - - let compress_test_inputs: Vec<[BabyBear; WIDTH]> = (0..NUM_COMPRESSES) - .map(|_| core::array::from_fn(|_| BabyBear::rand(rng))) - .collect_vec(); - compress_test_inputs.iter().enumerate().for_each(|(i, input)| { - let mut result_array = permuter.permute(*input); - if incorrect_trace { - result_array = core::array::from_fn(|_| BabyBear::rand(rng)); - } - let prev_ts = BabyBear::from_canonical_usize(i); - let input_ts = BabyBear::from_canonical_usize(i + 1); - let output_ts = BabyBear::from_canonical_usize(i + 2); - - let dst = BabyBear::from_canonical_usize(i + 1); - let left = dst + BabyBear::from_canonical_usize(WIDTH / 2); - let right = left + BabyBear::from_canonical_usize(WIDTH / 2); - - let compress_event = Poseidon2CompressEvent { - clk: input_ts, - dst, - left, - right, - input: *input, - result_array, - input_records: dummy_memory_access_records(input.to_vec(), prev_ts, input_ts) - .try_into() - .unwrap(), - result_records: dummy_memory_access_records( - result_array.to_vec(), - input_ts, - output_ts, - ) - .try_into() - .unwrap(), - }; - - input_exec.poseidon2_compress_events.push(compress_event); - }); - - input_exec - } - - #[test] - fn poseidon2_wide_prove_babybear_success() { - // Generate test input exec record. - let input_exec = generate_test_execution_record(false); - - poseidon2_wide_prove_babybear_degree::<3>(input_exec.clone()); - poseidon2_wide_prove_babybear_degree::<9>(input_exec); - } - - #[test] - #[should_panic] - fn poseidon2_wide_prove_babybear_failure() { - // Generate test input exec record. - let input_exec = generate_test_execution_record(true); - - poseidon2_wide_prove_babybear_degree::<3>(input_exec.clone()); - poseidon2_wide_prove_babybear_degree::<9>(input_exec); - } -} diff --git a/crates/recursion/core/src/poseidon2_wide/trace.rs b/crates/recursion/core/src/poseidon2_wide/trace.rs deleted file mode 100644 index db4fa9b3dd..0000000000 --- a/crates/recursion/core/src/poseidon2_wide/trace.rs +++ /dev/null @@ -1,554 +0,0 @@ -use std::borrow::Borrow; - -use p3_air::BaseAir; -use p3_field::PrimeField32; -use p3_matrix::dense::RowMajorMatrix; -use p3_maybe_rayon::prelude::{IndexedParallelIterator, ParallelIterator, ParallelSliceMut}; -use sp1_core_machine::utils::{next_power_of_two, par_for_each_row}; -use sp1_primitives::RC_16_30_U32; -use sp1_stark::air::MachineAir; -use tracing::instrument; - -use crate::{ - poseidon2_wide::{ - columns::permutation::permutation_mut, events::Poseidon2HashEvent, external_linear_layer, - NUM_EXTERNAL_ROUNDS, WIDTH, - }, - range_check::{RangeCheckEvent, RangeCheckOpcode}, - runtime::{ExecutionRecord, RecursionProgram}, -}; - -use super::{ - events::{Poseidon2AbsorbEvent, Poseidon2CompressEvent, Poseidon2FinalizeEvent}, - internal_linear_layer, Poseidon2WideChip, NUM_INTERNAL_ROUNDS, RATE, -}; - -impl MachineAir for Poseidon2WideChip { - type Record = ExecutionRecord; - - type Program = RecursionProgram; - - fn name(&self) -> String { - format!("Poseidon2Wide {}", DEGREE) - } - - #[instrument(name = "generate poseidon2 wide trace", level = "debug", skip_all, fields(rows = input.poseidon2_compress_events.len()))] - fn generate_trace( - &self, - input: &ExecutionRecord, - output: &mut ExecutionRecord, - ) -> RowMajorMatrix { - // Calculate the number of rows in the trace. - let mut nb_rows = 0; - for event in input.poseidon2_hash_events.iter() { - match event { - Poseidon2HashEvent::Absorb(absorb_event) => { - nb_rows += absorb_event.iterations.len(); - } - Poseidon2HashEvent::Finalize(_) => { - nb_rows += 1; - } - } - } - nb_rows += input.poseidon2_compress_events.len() * 2; - - let nb_padded_rows = - if self.pad { next_power_of_two(nb_rows, self.fixed_log2_rows) } else { nb_rows }; - - let num_columns = as BaseAir>::width(self); - let mut rows = vec![F::zero(); nb_padded_rows * num_columns]; - - // Populate the hash events. We do this serially, since each absorb event could populate a - // different number of rows. Also, most of the rows are populated by the compress - // events. - let mut row_cursor = 0; - for event in &input.poseidon2_hash_events { - match event { - Poseidon2HashEvent::Absorb(absorb_event) => { - let num_absorb_elements = absorb_event.iterations.len() * num_columns; - let absorb_rows = &mut rows[row_cursor..row_cursor + num_absorb_elements]; - self.populate_absorb_event(absorb_rows, absorb_event, num_columns, output); - row_cursor += num_absorb_elements; - } - - Poseidon2HashEvent::Finalize(finalize_event) => { - let finalize_row = &mut rows[row_cursor..row_cursor + num_columns]; - self.populate_finalize_event(finalize_row, finalize_event); - row_cursor += num_columns; - } - } - } - - // Populate the compress events. - let compress_rows = &mut rows[row_cursor..nb_rows * num_columns]; - par_for_each_row(compress_rows, num_columns * 2, |i, rows| { - self.populate_compress_event(rows, &input.poseidon2_compress_events[i], num_columns); - }); - - // Convert the trace to a row major matrix. - let mut trace = RowMajorMatrix::new(rows, num_columns); - - let padded_rows = trace.values.par_chunks_mut(num_columns).skip(nb_rows); - - if self.pad { - let mut dummy_row = vec![F::zero(); num_columns]; - self.populate_permutation([F::zero(); WIDTH], None, &mut dummy_row); - padded_rows.for_each(|padded_row| { - padded_row.copy_from_slice(&dummy_row); - }); - } - - trace - } - - fn included(&self, record: &Self::Record) -> bool { - !record.poseidon2_compress_events.is_empty() - } -} - -impl Poseidon2WideChip { - pub fn populate_compress_event( - &self, - rows: &mut [F], - compress_event: &Poseidon2CompressEvent, - num_columns: usize, - ) { - let input_row = &mut rows[0..num_columns]; - // Populate the control flow fields. - { - let mut cols = self.convert_mut(input_row); - let control_flow = cols.control_flow_mut(); - - control_flow.is_compress = F::one(); - control_flow.is_syscall_row = F::one(); - } - - // Populate the syscall params fields. - { - let mut cols = self.convert_mut(input_row); - let syscall_params = cols.syscall_params_mut().compress_mut(); - - syscall_params.clk = compress_event.clk; - syscall_params.dst_ptr = compress_event.dst; - syscall_params.left_ptr = compress_event.left; - syscall_params.right_ptr = compress_event.right; - } - - // Populate the memory fields. - { - let mut cols = self.convert_mut(input_row); - let memory = cols.memory_mut(); - - memory.start_addr = compress_event.left; - // Populate the first half of the memory inputs in the memory struct. - for i in 0..WIDTH / 2 { - memory.memory_slot_used[i] = F::one(); - memory.memory_accesses[i].populate(&compress_event.input_records[i]); - } - } - - // Populate the opcode workspace fields. - { - let mut cols = self.convert_mut(input_row); - let compress_cols = cols.opcode_workspace_mut().compress_mut(); - compress_cols.start_addr = compress_event.right; - - // Populate the second half of the memory inputs. - for i in 0..WIDTH / 2 { - compress_cols.memory_accesses[i] - .populate(&compress_event.input_records[i + WIDTH / 2]); - } - } - - // Populate the permutation fields. - self.populate_permutation( - compress_event.input, - Some(compress_event.result_array), - input_row, - ); - - let output_row = &mut rows[num_columns..]; - { - let mut cols = self.convert_mut(output_row); - let control_flow = cols.control_flow_mut(); - - control_flow.is_compress = F::one(); - control_flow.is_compress_output = F::one(); - } - - { - let mut cols = self.convert_mut(output_row); - let syscall_cols = cols.syscall_params_mut().compress_mut(); - - syscall_cols.clk = compress_event.clk; - syscall_cols.dst_ptr = compress_event.dst; - syscall_cols.left_ptr = compress_event.left; - syscall_cols.right_ptr = compress_event.right; - } - - { - let mut cols = self.convert_mut(output_row); - let memory = cols.memory_mut(); - - memory.start_addr = compress_event.dst; - // Populate the first half of the memory inputs in the memory struct. - for i in 0..WIDTH / 2 { - memory.memory_slot_used[i] = F::one(); - memory.memory_accesses[i].populate(&compress_event.result_records[i]); - } - } - - { - let mut cols = self.convert_mut(output_row); - let compress_cols = cols.opcode_workspace_mut().compress_mut(); - - compress_cols.start_addr = compress_event.dst + F::from_canonical_usize(WIDTH / 2); - for i in 0..WIDTH / 2 { - compress_cols.memory_accesses[i] - .populate(&compress_event.result_records[i + WIDTH / 2]); - } - } - - self.populate_permutation(compress_event.result_array, None, output_row); - } - - pub fn populate_absorb_event( - &self, - rows: &mut [F], - absorb_event: &Poseidon2AbsorbEvent, - num_columns: usize, - output: &mut ExecutionRecord, - ) { - // We currently don't support an input_len of 0, since it will need special logic in the - // AIR. - assert!(absorb_event.input_len > F::zero()); - - let mut last_row_ending_cursor = 0; - let num_absorb_rows = absorb_event.iterations.len(); - - for (iter_num, absorb_iter) in absorb_event.iterations.iter().enumerate() { - let absorb_row = &mut rows[iter_num * num_columns..(iter_num + 1) * num_columns]; - let is_syscall_row = iter_num == 0; - let is_last_row = iter_num == num_absorb_rows - 1; - - // Populate the control flow fields. - { - let mut cols = self.convert_mut(absorb_row); - let control_flow = cols.control_flow_mut(); - - control_flow.is_absorb = F::one(); - control_flow.is_syscall_row = F::from_bool(is_syscall_row); - control_flow.is_absorb_no_perm = F::from_bool(!absorb_iter.do_perm); - control_flow.is_absorb_not_last_row = F::from_bool(!is_last_row); - control_flow.is_absorb_last_row = F::from_bool(is_last_row); - } - - // Populate the syscall params fields. - { - let mut cols = self.convert_mut(absorb_row); - let syscall_params = cols.syscall_params_mut().absorb_mut(); - - syscall_params.clk = absorb_event.clk; - syscall_params.hash_and_absorb_num = absorb_event.hash_and_absorb_num; - syscall_params.input_ptr = absorb_event.input_addr; - syscall_params.input_len = absorb_event.input_len; - - output.add_range_check_events(&[RangeCheckEvent::new( - RangeCheckOpcode::U16, - absorb_event.input_len.as_canonical_u32() as u16, - )]); - } - - // Populate the memory fields. - { - let mut cols = self.convert_mut(absorb_row); - let memory = cols.memory_mut(); - - memory.start_addr = absorb_iter.start_addr; - for (i, input_record) in absorb_iter.input_records.iter().enumerate() { - memory.memory_slot_used[i + absorb_iter.state_cursor] = F::one(); - memory.memory_accesses[i + absorb_iter.state_cursor].populate(input_record); - } - } - - // Populate the opcode workspace fields. - { - let mut cols = self.convert_mut(absorb_row); - let absorb_workspace = cols.opcode_workspace_mut().absorb_mut(); - - absorb_workspace.hash_num = absorb_event.hash_num; - output.add_range_check_events(&[RangeCheckEvent::new( - RangeCheckOpcode::U16, - absorb_event.hash_num.as_canonical_u32() as u16, - )]); - absorb_workspace.absorb_num = absorb_event.absorb_num; - output.add_range_check_events(&[RangeCheckEvent::new( - RangeCheckOpcode::U12, - absorb_event.absorb_num.as_canonical_u32() as u16, - )]); - - let num_remaining_rows = num_absorb_rows - 1 - iter_num; - absorb_workspace.num_remaining_rows = F::from_canonical_usize(num_remaining_rows); - output.add_range_check_events(&[RangeCheckEvent::new( - RangeCheckOpcode::U16, - num_remaining_rows as u16, - )]); - - // Calculate last_row_num_consumed. - // For absorb calls that span multiple rows (e.g. the last row is not the syscall - // row), last_row_num_consumed = (input_len + state_cursor) % 8 at - // the syscall row. For absorb calls that are only one row, - // last_row_num_consumed = absorb_event.input_len. - if is_syscall_row { - last_row_ending_cursor = (absorb_iter.state_cursor - + absorb_event.input_len.as_canonical_u32() as usize - - 1) - % RATE; - } - - absorb_workspace.last_row_ending_cursor = - F::from_canonical_usize(last_row_ending_cursor); - - absorb_workspace.last_row_ending_cursor_is_seven.populate_from_field_element( - F::from_canonical_usize(last_row_ending_cursor) - F::from_canonical_usize(7), - ); - - (0..3).for_each(|i| { - absorb_workspace.last_row_ending_cursor_bitmap[i] = - F::from_bool((last_row_ending_cursor) & (1 << i) == (1 << i)) - }); - - absorb_workspace.num_remaining_rows_is_zero.populate(num_remaining_rows as u32); - - absorb_workspace.is_syscall_not_last_row = - F::from_bool(is_syscall_row && !is_last_row); - absorb_workspace.is_syscall_is_last_row = - F::from_bool(is_syscall_row && is_last_row); - absorb_workspace.not_syscall_not_last_row = - F::from_bool(!is_syscall_row && !is_last_row); - absorb_workspace.not_syscall_is_last_row = - F::from_bool(!is_syscall_row && is_last_row); - absorb_workspace.is_last_row_ending_cursor_is_seven = - F::from_bool(is_last_row && last_row_ending_cursor == 7); - absorb_workspace.is_last_row_ending_cursor_not_seven = - F::from_bool(is_last_row && last_row_ending_cursor != 7); - - absorb_workspace.state = absorb_iter.state; - absorb_workspace.previous_state = absorb_iter.previous_state; - absorb_workspace.state_cursor = F::from_canonical_usize(absorb_iter.state_cursor); - absorb_workspace.is_first_hash_row = - F::from_bool(iter_num == 0 && absorb_event.absorb_num.is_zero()); - - absorb_workspace.start_mem_idx_bitmap[absorb_iter.state_cursor] = F::one(); - if is_last_row { - absorb_workspace.end_mem_idx_bitmap[last_row_ending_cursor] = F::one(); - } else { - absorb_workspace.end_mem_idx_bitmap[7] = F::one(); - } - } - - // Populate the permutation fields. - self.populate_permutation( - absorb_iter.perm_input, - if absorb_iter.do_perm { Some(absorb_iter.perm_output) } else { None }, - absorb_row, - ); - } - } - - pub fn populate_finalize_event( - &self, - row: &mut [F], - finalize_event: &Poseidon2FinalizeEvent, - ) { - // Populate the control flow fields. - { - let mut cols = self.convert_mut(row); - let control_flow = cols.control_flow_mut(); - control_flow.is_finalize = F::one(); - control_flow.is_syscall_row = F::one(); - } - - // Populate the syscall params fields. - { - let mut cols = self.convert_mut(row); - - let syscall_params = cols.syscall_params_mut().finalize_mut(); - syscall_params.clk = finalize_event.clk; - syscall_params.hash_num = finalize_event.hash_num; - syscall_params.output_ptr = finalize_event.output_ptr; - } - - // Populate the memory fields. - { - let mut cols = self.convert_mut(row); - let memory = cols.memory_mut(); - - memory.start_addr = finalize_event.output_ptr; - for i in 0..WIDTH / 2 { - memory.memory_slot_used[i] = F::one(); - memory.memory_accesses[i].populate(&finalize_event.output_records[i]); - } - } - - // Populate the opcode workspace fields. - { - let mut cols = self.convert_mut(row); - let finalize_workspace = cols.opcode_workspace_mut().finalize_mut(); - - finalize_workspace.previous_state = finalize_event.previous_state; - finalize_workspace.state = finalize_event.state; - finalize_workspace.state_cursor = F::from_canonical_usize(finalize_event.state_cursor); - finalize_workspace.state_cursor_is_zero.populate(finalize_event.state_cursor as u32); - } - - // Populate the permutation fields. - self.populate_permutation( - finalize_event.perm_input, - if finalize_event.do_perm { Some(finalize_event.perm_output) } else { None }, - row, - ); - } - - pub fn populate_permutation( - &self, - input: [F; WIDTH], - expected_output: Option<[F; WIDTH]>, - input_row: &mut [F], - ) { - let mut permutation = permutation_mut::(input_row); - - let ( - external_rounds_state, - internal_rounds_state, - internal_rounds_s0, - mut external_sbox, - mut internal_sbox, - output_state, - ) = permutation.get_cols_mut(); - - external_rounds_state[0] = input; - external_linear_layer(&mut external_rounds_state[0]); - - // Apply the first half of external rounds. - for r in 0..NUM_EXTERNAL_ROUNDS / 2 { - let next_state = - self.populate_external_round(external_rounds_state, &mut external_sbox, r); - if r == NUM_EXTERNAL_ROUNDS / 2 - 1 { - *internal_rounds_state = next_state; - } else { - external_rounds_state[r + 1] = next_state; - } - } - - // Apply the internal rounds. - external_rounds_state[NUM_EXTERNAL_ROUNDS / 2] = self.populate_internal_rounds( - internal_rounds_state, - internal_rounds_s0, - &mut internal_sbox, - ); - - // Apply the second half of external rounds. - for r in NUM_EXTERNAL_ROUNDS / 2..NUM_EXTERNAL_ROUNDS { - let next_state = - self.populate_external_round(external_rounds_state, &mut external_sbox, r); - if r == NUM_EXTERNAL_ROUNDS - 1 { - for i in 0..WIDTH { - output_state[i] = next_state[i]; - if let Some(expected_output) = expected_output { - assert_eq!(expected_output[i], next_state[i]); - } - } - } else { - external_rounds_state[r + 1] = next_state; - } - } - } - - fn populate_external_round( - &self, - external_rounds_state: &[[F; WIDTH]], - sbox: &mut Option<&mut [[F; WIDTH]; NUM_EXTERNAL_ROUNDS]>, - r: usize, - ) -> [F; WIDTH] { - let mut state = { - let round_state: &[F; WIDTH] = external_rounds_state[r].borrow(); - - // Add round constants. - // - // Optimization: Since adding a constant is a degree 1 operation, we can avoid adding - // columns for it, and instead include it in the constraint for the x^3 part of the - // sbox. - let round = if r < NUM_EXTERNAL_ROUNDS / 2 { r } else { r + NUM_INTERNAL_ROUNDS }; - let mut add_rc = *round_state; - for i in 0..WIDTH { - add_rc[i] += F::from_wrapped_u32(RC_16_30_U32[round][i]); - } - - // Apply the sboxes. - // Optimization: since the linear layer that comes after the sbox is degree 1, we can - // avoid adding columns for the result of the sbox, and instead include the x^3 -> x^7 - // part of the sbox in the constraint for the linear layer - let mut sbox_deg_7: [F; 16] = [F::zero(); WIDTH]; - let mut sbox_deg_3: [F; 16] = [F::zero(); WIDTH]; - for i in 0..WIDTH { - sbox_deg_3[i] = add_rc[i] * add_rc[i] * add_rc[i]; - sbox_deg_7[i] = sbox_deg_3[i] * sbox_deg_3[i] * add_rc[i]; - } - - if let Some(sbox) = sbox.as_deref_mut() { - sbox[r] = sbox_deg_3; - } - - sbox_deg_7 - }; - - // Apply the linear layer. - external_linear_layer(&mut state); - state - } - - fn populate_internal_rounds( - &self, - internal_rounds_state: &[F; WIDTH], - internal_rounds_s0: &mut [F; NUM_INTERNAL_ROUNDS - 1], - sbox: &mut Option<&mut [F; NUM_INTERNAL_ROUNDS]>, - ) -> [F; WIDTH] { - let mut state: [F; WIDTH] = *internal_rounds_state; - let mut sbox_deg_3: [F; NUM_INTERNAL_ROUNDS] = [F::zero(); NUM_INTERNAL_ROUNDS]; - for r in 0..NUM_INTERNAL_ROUNDS { - // Add the round constant to the 0th state element. - // Optimization: Since adding a constant is a degree 1 operation, we can avoid adding - // columns for it, just like for external rounds. - let round = r + NUM_EXTERNAL_ROUNDS / 2; - let add_rc = state[0] + F::from_wrapped_u32(RC_16_30_U32[round][0]); - - // Apply the sboxes. - // Optimization: since the linear layer that comes after the sbox is degree 1, we can - // avoid adding columns for the result of the sbox, just like for external rounds. - sbox_deg_3[r] = add_rc * add_rc * add_rc; - let sbox_deg_7 = sbox_deg_3[r] * sbox_deg_3[r] * add_rc; - - // Apply the linear layer. - state[0] = sbox_deg_7; - internal_linear_layer(&mut state); - - // Optimization: since we're only applying the sbox to the 0th state element, we only - // need to have columns for the 0th state element at every step. This is because the - // linear layer is degree 1, so all state elements at the end can be expressed as a - // degree-3 polynomial of the state at the beginning of the internal rounds and the 0th - // state element at rounds prior to the current round - if r < NUM_INTERNAL_ROUNDS - 1 { - internal_rounds_s0[r] = state[0]; - } - } - - let ret_state = state; - - if let Some(sbox) = sbox.as_deref_mut() { - *sbox = sbox_deg_3; - } - - ret_state - } -} diff --git a/crates/recursion/core/src/program/mod.rs b/crates/recursion/core/src/program/mod.rs deleted file mode 100644 index 81a2fc2a41..0000000000 --- a/crates/recursion/core/src/program/mod.rs +++ /dev/null @@ -1,162 +0,0 @@ -use crate::air::SP1RecursionAirBuilder; -use core::{ - borrow::{Borrow, BorrowMut}, - mem::size_of, -}; -use p3_air::{Air, BaseAir, PairBuilder}; -use p3_field::PrimeField32; -use p3_matrix::{dense::RowMajorMatrix, Matrix}; -use sp1_core_machine::utils::pad_rows_fixed; -use sp1_stark::air::MachineAir; -use std::collections::HashMap; -use tracing::instrument; - -use sp1_derive::AlignedBorrow; - -use crate::{ - cpu::columns::{InstructionCols, OpcodeSelectorCols}, - runtime::{ExecutionRecord, RecursionProgram}, -}; - -pub const NUM_PROGRAM_PREPROCESSED_COLS: usize = size_of::>(); -pub const NUM_PROGRAM_MULT_COLS: usize = size_of::>(); - -/// The column layout for the chip. -#[derive(AlignedBorrow, Clone, Copy, Default)] -#[repr(C)] -pub struct ProgramPreprocessedCols { - pub pc: T, - pub instruction: InstructionCols, - pub selectors: OpcodeSelectorCols, -} - -/// The column layout for the chip. -#[derive(AlignedBorrow, Clone, Copy, Default)] -#[repr(C)] -pub struct ProgramMultiplicityCols { - pub multiplicity: T, -} - -/// A chip that implements addition for the opcodes ADD and ADDI. -#[derive(Default)] -pub struct ProgramChip; - -impl ProgramChip { - pub const fn new() -> Self { - Self {} - } -} - -impl MachineAir for ProgramChip { - type Record = ExecutionRecord; - - type Program = RecursionProgram; - - fn name(&self) -> String { - "Program".to_string() - } - - fn preprocessed_width(&self) -> usize { - NUM_PROGRAM_PREPROCESSED_COLS - } - - fn generate_preprocessed_trace(&self, program: &Self::Program) -> Option> { - let max_program_size = program.instructions.len(); - let mut rows = program.instructions[0..max_program_size] - .iter() - .enumerate() - .map(|(i, instruction)| { - let pc = i as u32; - let mut row = [F::zero(); NUM_PROGRAM_PREPROCESSED_COLS]; - let cols: &mut ProgramPreprocessedCols = row.as_mut_slice().borrow_mut(); - cols.pc = F::from_canonical_u32(pc); - cols.selectors.populate(instruction); - cols.instruction.populate(instruction); - row - }) - .collect::>(); - - // Pad the trace to a power of two. - pad_rows_fixed(&mut rows, || [F::zero(); NUM_PROGRAM_PREPROCESSED_COLS], None); - - // Convert the trace to a row major matrix. - Some(RowMajorMatrix::new( - rows.into_iter().flatten().collect::>(), - NUM_PROGRAM_PREPROCESSED_COLS, - )) - } - - fn generate_dependencies(&self, _: &Self::Record, _: &mut Self::Record) { - // This is a no-op. - } - - #[instrument(name = "generate program trace", level = "debug", skip_all, fields(rows = input.program.instructions.len()))] - fn generate_trace( - &self, - input: &ExecutionRecord, - _output: &mut ExecutionRecord, - ) -> RowMajorMatrix { - // Collect the number of times each instruction is called from the cpu events. - // Store it as a map of PC -> count. - let mut instruction_counts = HashMap::new(); - input.cpu_events.iter().for_each(|event| { - let pc = event.pc; - instruction_counts - .entry(pc.as_canonical_u32()) - .and_modify(|count| *count += 1) - .or_insert(1); - }); - - let max_program_size = input.program.instructions.len(); - let mut rows = input.program.instructions[0..max_program_size] - .iter() - .enumerate() - .map(|(i, _)| { - let pc = i as u32; - let mut row = [F::zero(); NUM_PROGRAM_MULT_COLS]; - let cols: &mut ProgramMultiplicityCols = row.as_mut_slice().borrow_mut(); - cols.multiplicity = - F::from_canonical_usize(*instruction_counts.get(&pc).unwrap_or(&0)); - row - }) - .collect::>(); - - // Pad the trace to a power of two. - pad_rows_fixed(&mut rows, || [F::zero(); NUM_PROGRAM_MULT_COLS], None); - - // Convert the trace to a row major matrix. - RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), NUM_PROGRAM_MULT_COLS) - } - - fn included(&self, _: &Self::Record) -> bool { - true - } -} - -impl BaseAir for ProgramChip { - fn width(&self) -> usize { - NUM_PROGRAM_MULT_COLS - } -} - -impl Air for ProgramChip -where - AB: SP1RecursionAirBuilder + PairBuilder, -{ - fn eval(&self, builder: &mut AB) { - let main = builder.main(); - let preprocessed = builder.preprocessed(); - - let prep_local = preprocessed.row_slice(0); - let prep_local: &ProgramPreprocessedCols = (*prep_local).borrow(); - let mult_local = main.row_slice(0); - let mult_local: &ProgramMultiplicityCols = (*mult_local).borrow(); - - builder.receive_program( - prep_local.pc, - prep_local.instruction, - prep_local.selectors, - mult_local.multiplicity, - ); - } -} diff --git a/crates/recursion/core/src/range_check/air.rs b/crates/recursion/core/src/range_check/air.rs deleted file mode 100644 index 0845c3c78d..0000000000 --- a/crates/recursion/core/src/range_check/air.rs +++ /dev/null @@ -1,43 +0,0 @@ -use core::borrow::Borrow; - -use p3_air::{Air, AirBuilder, BaseAir, PairBuilder}; -use p3_field::Field; -use p3_matrix::Matrix; - -use super::{ - columns::{RangeCheckMultCols, RangeCheckPreprocessedCols, NUM_RANGE_CHECK_MULT_COLS}, - RangeCheckChip, RangeCheckOpcode, -}; -use crate::air::SP1RecursionAirBuilder; - -impl BaseAir for RangeCheckChip { - fn width(&self) -> usize { - NUM_RANGE_CHECK_MULT_COLS - } -} - -impl Air for RangeCheckChip { - /// Eval's the range check chip. - fn eval(&self, builder: &mut AB) { - let main = builder.main(); - let local_mult = main.row_slice(0); - let local_mult: &RangeCheckMultCols = (*local_mult).borrow(); - - let prep = builder.preprocessed(); - let prep = prep.row_slice(0); - let local: &RangeCheckPreprocessedCols = (*prep).borrow(); - - // Send all the lookups for each operation. - for (i, opcode) in RangeCheckOpcode::all().iter().enumerate() { - let field_op = opcode.as_field::(); - let mult = local_mult.multiplicities[i]; - - // Ensure that all U12 range check lookups are not outside of the U12 range. - if *opcode == RangeCheckOpcode::U12 { - builder.when(local.u12_out_range).assert_zero(mult); - } - - builder.receive_range_check(field_op, local.value_u16, mult); - } - } -} diff --git a/crates/recursion/core/src/range_check/columns.rs b/crates/recursion/core/src/range_check/columns.rs deleted file mode 100644 index eef8421394..0000000000 --- a/crates/recursion/core/src/range_check/columns.rs +++ /dev/null @@ -1,29 +0,0 @@ -use sp1_derive::AlignedBorrow; -use std::mem::size_of; - -use super::NUM_RANGE_CHECK_OPS; - -/// The number of main trace columns for `RangeCheckChip`. -pub const NUM_RANGE_CHECK_PREPROCESSED_COLS: usize = size_of::>(); - -/// The number of multiplicity columns for `RangeCheckChip`. -pub const NUM_RANGE_CHECK_MULT_COLS: usize = size_of::>(); - -#[derive(Debug, Clone, Copy, AlignedBorrow)] -#[repr(C)] -pub struct RangeCheckPreprocessedCols { - /// Value to store all possible U16 values. - pub value_u16: T, - - /// A flag indicating whether the value is out of U12 range. - pub u12_out_range: T, -} - -/// For each byte operation in the preprocessed table, a corresponding RangeCheckMultCols row tracks -/// the number of times the operation is used. -#[derive(Debug, Clone, Copy, AlignedBorrow)] -#[repr(C)] -pub struct RangeCheckMultCols { - /// The multiplicites of each byte operation. - pub multiplicities: [T; NUM_RANGE_CHECK_OPS], -} diff --git a/crates/recursion/core/src/range_check/event.rs b/crates/recursion/core/src/range_check/event.rs deleted file mode 100644 index 3dbdae7bb4..0000000000 --- a/crates/recursion/core/src/range_check/event.rs +++ /dev/null @@ -1,20 +0,0 @@ -use serde::{Deserialize, Serialize}; - -use super::RangeCheckOpcode; - -/// A byte lookup event. -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] -pub struct RangeCheckEvent { - /// The opcode of the operation. - pub opcode: RangeCheckOpcode, - - /// The val to range check. - pub val: u16, -} - -impl RangeCheckEvent { - /// Creates a new `RangeCheckEvent`. - pub const fn new(opcode: RangeCheckOpcode, val: u16) -> Self { - Self { opcode, val } - } -} diff --git a/crates/recursion/core/src/range_check/mod.rs b/crates/recursion/core/src/range_check/mod.rs deleted file mode 100644 index 10d6cc9f65..0000000000 --- a/crates/recursion/core/src/range_check/mod.rs +++ /dev/null @@ -1,72 +0,0 @@ -pub mod air; -pub mod columns; -pub mod event; -pub mod opcode; -pub mod trace; - -pub use event::RangeCheckEvent; -pub use opcode::*; - -use alloc::collections::BTreeMap; -use core::borrow::BorrowMut; -use std::marker::PhantomData; - -use p3_field::Field; -use p3_matrix::dense::RowMajorMatrix; - -use self::columns::{RangeCheckPreprocessedCols, NUM_RANGE_CHECK_PREPROCESSED_COLS}; -use crate::range_check::trace::NUM_ROWS; - -/// The number of different range check operations. -pub const NUM_RANGE_CHECK_OPS: usize = 2; - -/// A chip for computing range check operations. -/// -/// The chip contains a preprocessed table of all possible range check operations. Other chips can -/// then use lookups into this table to range check their values. -#[derive(Debug, Clone, Copy, Default)] -pub struct RangeCheckChip(PhantomData); - -impl RangeCheckChip { - /// Creates the preprocessed range check trace and event map. - /// - /// This function returns a pair `(trace, map)`, where: - /// - `trace` is a matrix containing all possible range check values. - /// - `map` is a map from a range check lookup to the value's corresponding row it appears in - /// the table and the index of the result in the array of multiplicities. - pub fn trace_and_map() -> (RowMajorMatrix, BTreeMap) { - // A map from a byte lookup to its corresponding row in the table and index in the array of - // multiplicities. - let mut event_map = BTreeMap::new(); - - // The trace containing all values, with all multiplicities set to zero. - let mut initial_trace = RowMajorMatrix::new( - vec![F::zero(); NUM_ROWS * NUM_RANGE_CHECK_PREPROCESSED_COLS], - NUM_RANGE_CHECK_PREPROCESSED_COLS, - ); - - // Record all the necessary operations for each range check lookup. - let opcodes = RangeCheckOpcode::all(); - - // Iterate over all U16 values. - for (row_index, val) in (0..=u16::MAX).enumerate() { - let col: &mut RangeCheckPreprocessedCols = - initial_trace.row_mut(row_index).borrow_mut(); - - // Set the u16 value. - col.value_u16 = F::from_canonical_u16(val); - - // Iterate over all range check operations to update col values and the table map. - for (i, opcode) in opcodes.iter().enumerate() { - if *opcode == RangeCheckOpcode::U12 { - col.u12_out_range = F::from_bool(val > 0xFFF); - } - - let event = RangeCheckEvent::new(*opcode, val); - event_map.insert(event, (row_index, i)); - } - } - - (initial_trace, event_map) - } -} diff --git a/crates/recursion/core/src/range_check/trace.rs b/crates/recursion/core/src/range_check/trace.rs deleted file mode 100644 index ca77a0c8b2..0000000000 --- a/crates/recursion/core/src/range_check/trace.rs +++ /dev/null @@ -1,63 +0,0 @@ -use std::borrow::BorrowMut; - -use p3_field::PrimeField32; -use p3_matrix::dense::RowMajorMatrix; -use sp1_stark::air::MachineAir; - -use super::{ - columns::{RangeCheckMultCols, NUM_RANGE_CHECK_MULT_COLS, NUM_RANGE_CHECK_PREPROCESSED_COLS}, - RangeCheckChip, -}; -use crate::runtime::{ExecutionRecord, RecursionProgram}; - -pub const NUM_ROWS: usize = 1 << 16; - -impl MachineAir for RangeCheckChip { - type Record = ExecutionRecord; - type Program = RecursionProgram; - - fn name(&self) -> String { - "RangeCheck".to_string() - } - - fn preprocessed_width(&self) -> usize { - NUM_RANGE_CHECK_PREPROCESSED_COLS - } - - fn generate_preprocessed_trace(&self, _program: &Self::Program) -> Option> { - let (trace, _) = Self::trace_and_map(); - - Some(trace) - } - - fn generate_dependencies(&self, _: &Self::Record, _: &mut Self::Record) { - // This is a no-op. - } - - fn generate_trace( - &self, - input: &ExecutionRecord, - _output: &mut ExecutionRecord, - ) -> RowMajorMatrix { - let (_, event_map) = Self::trace_and_map(); - - let mut trace = RowMajorMatrix::new( - vec![F::zero(); NUM_RANGE_CHECK_MULT_COLS * NUM_ROWS], - NUM_RANGE_CHECK_MULT_COLS, - ); - - for (lookup, mult) in input.range_check_events.iter() { - let (row, index) = event_map[lookup]; - let cols: &mut RangeCheckMultCols = trace.row_mut(row).borrow_mut(); - - // Update the trace multiplicity - cols.multiplicities[index] += F::from_canonical_usize(*mult); - } - - trace - } - - fn included(&self, _shard: &Self::Record) -> bool { - true - } -} diff --git a/crates/recursion/core/src/runtime/instruction.rs b/crates/recursion/core/src/runtime/instruction.rs index 0748af68b1..6a7dbc8725 100644 --- a/crates/recursion/core/src/runtime/instruction.rs +++ b/crates/recursion/core/src/runtime/instruction.rs @@ -1,77 +1,215 @@ -use p3_field::PrimeField32; -use serde::{Deserialize, Serialize}; +use std::borrow::Borrow; -use crate::air::Block; +use p3_field::{AbstractExtensionField, AbstractField}; +use serde::{Deserialize, Serialize}; -use super::{Opcode, D}; +use crate::*; #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Instruction { - /// Which operation to execute. - pub opcode: Opcode, - - /// The first operand. - pub op_a: F, - - /// The second operand. - pub op_b: Block, - - /// The third operand. - pub op_c: Block, - - // The offset imm operand. - pub offset_imm: F, - - // The size imm operand. - pub size_imm: F, - - /// Whether the second operand is an immediate value. - pub imm_b: bool, - - /// Whether the third operand is an immediate value. - pub imm_c: bool, - - /// A debug string for the instruction. - pub debug: String, -} - -impl Instruction { - #[allow(clippy::too_many_arguments)] - pub fn new( - opcode: Opcode, - op_a: F, - op_b: [F; D], - op_c: [F; D], - offset_imm: F, - size_imm: F, - imm_b: bool, - imm_c: bool, - debug: String, - ) -> Self { - Self { - opcode, - op_a, - op_b: Block::from(op_b), - op_c: Block::from(op_c), - offset_imm, - size_imm, - imm_b, - imm_c, - debug, - } - } - - pub fn dummy() -> Self { - Instruction::new( - Opcode::ADD, - F::zero(), - [F::zero(); 4], - [F::zero(); 4], - F::zero(), - F::zero(), - false, - false, - "".to_string(), - ) - } +pub enum Instruction { + BaseAlu(BaseAluInstr), + ExtAlu(ExtAluInstr), + Mem(MemInstr), + Poseidon2(Box>), + ExpReverseBitsLen(ExpReverseBitsInstr), + HintBits(HintBitsInstr), + FriFold(Box>), + Print(PrintInstr), + HintExt2Felts(HintExt2FeltsInstr), + CommitPublicValues(Box>), + Hint(HintInstr), +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct HintBitsInstr { + /// Addresses and mults of the output bits. + pub output_addrs_mults: Vec<(Address, F)>, + /// Input value to decompose. + pub input_addr: Address, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct PrintInstr { + pub field_elt_type: FieldEltType, + pub addr: Address, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct HintInstr { + /// Addresses and mults of the output felts. + pub output_addrs_mults: Vec<(Address, F)>, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct HintExt2FeltsInstr { + /// Addresses and mults of the output bits. + pub output_addrs_mults: [(Address, F); D], + /// Input value to decompose. + pub input_addr: Address, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum FieldEltType { + Base, + Extension, +} + +pub fn base_alu( + opcode: BaseAluOpcode, + mult: u32, + out: u32, + in1: u32, + in2: u32, +) -> Instruction { + Instruction::BaseAlu(BaseAluInstr { + opcode, + mult: F::from_canonical_u32(mult), + addrs: BaseAluIo { + out: Address(F::from_canonical_u32(out)), + in1: Address(F::from_canonical_u32(in1)), + in2: Address(F::from_canonical_u32(in2)), + }, + }) +} + +pub fn ext_alu( + opcode: ExtAluOpcode, + mult: u32, + out: u32, + in1: u32, + in2: u32, +) -> Instruction { + Instruction::ExtAlu(ExtAluInstr { + opcode, + mult: F::from_canonical_u32(mult), + addrs: ExtAluIo { + out: Address(F::from_canonical_u32(out)), + in1: Address(F::from_canonical_u32(in1)), + in2: Address(F::from_canonical_u32(in2)), + }, + }) +} + +pub fn mem( + kind: MemAccessKind, + mult: u32, + addr: u32, + val: u32, +) -> Instruction { + mem_single(kind, mult, addr, F::from_canonical_u32(val)) +} + +pub fn mem_single( + kind: MemAccessKind, + mult: u32, + addr: u32, + val: F, +) -> Instruction { + mem_block(kind, mult, addr, Block::from(val)) +} + +pub fn mem_ext>( + kind: MemAccessKind, + mult: u32, + addr: u32, + val: EF, +) -> Instruction { + mem_block(kind, mult, addr, val.as_base_slice().into()) +} + +pub fn mem_block( + kind: MemAccessKind, + mult: u32, + addr: u32, + val: Block, +) -> Instruction { + Instruction::Mem(MemInstr { + addrs: MemIo { inner: Address(F::from_canonical_u32(addr)) }, + vals: MemIo { inner: val }, + mult: F::from_canonical_u32(mult), + kind, + }) +} + +pub fn poseidon2( + mults: [u32; WIDTH], + output: [u32; WIDTH], + input: [u32; WIDTH], +) -> Instruction { + Instruction::Poseidon2(Box::new(Poseidon2Instr { + mults: mults.map(F::from_canonical_u32), + addrs: Poseidon2Io { + output: output.map(F::from_canonical_u32).map(Address), + input: input.map(F::from_canonical_u32).map(Address), + }, + })) +} + +pub fn exp_reverse_bits_len( + mult: u32, + base: F, + exp: Vec, + result: F, +) -> Instruction { + Instruction::ExpReverseBitsLen(ExpReverseBitsInstr { + mult: F::from_canonical_u32(mult), + addrs: ExpReverseBitsIo { + base: Address(base), + exp: exp.into_iter().map(Address).collect(), + result: Address(result), + }, + }) +} + +#[allow(clippy::too_many_arguments)] +pub fn fri_fold( + z: u32, + alpha: u32, + x: u32, + mat_opening: Vec, + ps_at_z: Vec, + alpha_pow_input: Vec, + ro_input: Vec, + alpha_pow_output: Vec, + ro_output: Vec, + alpha_mults: Vec, + ro_mults: Vec, +) -> Instruction { + Instruction::FriFold(Box::new(FriFoldInstr { + base_single_addrs: FriFoldBaseIo { x: Address(F::from_canonical_u32(x)) }, + ext_single_addrs: FriFoldExtSingleIo { + z: Address(F::from_canonical_u32(z)), + alpha: Address(F::from_canonical_u32(alpha)), + }, + ext_vec_addrs: FriFoldExtVecIo { + mat_opening: mat_opening + .iter() + .map(|elm| Address(F::from_canonical_u32(*elm))) + .collect(), + ps_at_z: ps_at_z.iter().map(|elm| Address(F::from_canonical_u32(*elm))).collect(), + alpha_pow_input: alpha_pow_input + .iter() + .map(|elm| Address(F::from_canonical_u32(*elm))) + .collect(), + ro_input: ro_input.iter().map(|elm| Address(F::from_canonical_u32(*elm))).collect(), + alpha_pow_output: alpha_pow_output + .iter() + .map(|elm| Address(F::from_canonical_u32(*elm))) + .collect(), + ro_output: ro_output.iter().map(|elm| Address(F::from_canonical_u32(*elm))).collect(), + }, + alpha_pow_mults: alpha_mults.iter().map(|mult| F::from_canonical_u32(*mult)).collect(), + ro_mults: ro_mults.iter().map(|mult| F::from_canonical_u32(*mult)).collect(), + })) +} + +pub fn commit_public_values( + public_values_a: &RecursionPublicValues, +) -> Instruction { + let pv_a = public_values_a.as_array().map(|pv| Address(F::from_canonical_u32(pv))); + let pv_address: &RecursionPublicValues> = pv_a.as_slice().borrow(); + + Instruction::CommitPublicValues(Box::new(CommitPublicValuesInstr { + pv_addrs: pv_address.clone(), + })) } diff --git a/crates/recursion/core-v2/src/runtime/memory.rs b/crates/recursion/core/src/runtime/memory.rs similarity index 98% rename from crates/recursion/core-v2/src/runtime/memory.rs rename to crates/recursion/core/src/runtime/memory.rs index 89cc829ec3..a82337b684 100644 --- a/crates/recursion/core-v2/src/runtime/memory.rs +++ b/crates/recursion/core/src/runtime/memory.rs @@ -1,10 +1,9 @@ use std::iter::repeat; use p3_field::PrimeField64; -use sp1_recursion_core::air::Block; use vec_map::{Entry, VecMap}; -use crate::Address; +use crate::{air::Block, Address}; #[derive(Debug, Clone, Default)] pub struct MemoryEntry { diff --git a/crates/recursion/core/src/runtime/mod.rs b/crates/recursion/core/src/runtime/mod.rs index 045a6557be..f3ee33513f 100644 --- a/crates/recursion/core/src/runtime/mod.rs +++ b/crates/recursion/core/src/runtime/mod.rs @@ -1,35 +1,41 @@ -mod instruction; +pub mod instruction; +mod memory; mod opcode; mod program; mod record; -mod utils; -use std::{array, collections::VecDeque, fmt, marker::PhantomData, sync::Arc}; +// Avoid triggering annoying branch of thiserror derive macro. +use backtrace::Backtrace as Trace; +pub use instruction::Instruction; +use instruction::{FieldEltType, HintBitsInstr, HintExt2FeltsInstr, HintInstr, PrintInstr}; +use memory::*; +pub use opcode::*; +pub use program::*; +pub use record::*; + +use std::{ + array, + borrow::Borrow, + collections::VecDeque, + fmt::Debug, + io::{stdout, Write}, + iter::zip, + marker::PhantomData, + sync::Arc, +}; use hashbrown::HashMap; -pub use instruction::*; use itertools::Itertools; -pub use opcode::*; +use p3_field::{AbstractField, ExtensionField, PrimeField32}; use p3_poseidon2::{Poseidon2, Poseidon2ExternalMatrixGeneral}; use p3_symmetric::{CryptographicPermutation, Permutation}; -pub use program::*; -pub use record::*; -use sp1_core_executor::events::MemoryAccessPosition; -pub use utils::*; +use p3_util::reverse_bits_len; +use thiserror::Error; -use crate::{ - air::{Block, RECURSION_PUBLIC_VALUES_COL_MAP, RECURSIVE_PROOF_NUM_PV_ELTS}, - cpu::CpuEvent, - exp_reverse_bits::ExpReverseBitsLenEvent, - fri_fold::FriFoldEvent, - memory::{compute_addr_diff, MemoryRecord}, - poseidon2_wide::events::{ - Poseidon2AbsorbEvent, Poseidon2CompressEvent, Poseidon2FinalizeEvent, Poseidon2HashEvent, - }, - range_check::{RangeCheckEvent, RangeCheckOpcode}, -}; +use crate::air::{Block, RECURSIVE_PROOF_NUM_PV_ELTS}; -use p3_field::{ExtensionField, PrimeField32}; +/// TODO expand glob import once things are organized enough +use crate::*; /// The heap pointer address. pub const HEAP_PTR: i32 = -4; @@ -51,20 +57,6 @@ pub const NUM_BITS: usize = 31; pub const D: usize = 4; -#[derive(Debug, Clone, Default)] -pub struct CpuRecord { - pub a: Option>, - pub b: Option>, - pub c: Option>, - pub memory: Option>, -} - -#[derive(Debug, Clone, Default)] -pub struct MemoryEntry { - pub value: Block, - pub timestamp: F, -} - #[derive(Debug, Clone, Default)] pub struct CycleTrackerEntry { pub span_entered: bool, @@ -72,16 +64,15 @@ pub struct CycleTrackerEntry { pub cumulative_cycles: usize, } -pub struct Runtime, Diffusion> { +/// TODO fully document. +/// Taken from [`sp1_recursion_core::runtime::Runtime`]. +/// Many missing things (compared to the old `Runtime`) will need to be implemented. +pub struct Runtime<'a, F: PrimeField32, EF: ExtensionField, Diffusion> { pub timestamp: usize, pub nb_poseidons: usize, - pub nb_poseidon_permutes: usize, - - pub nb_erb_lens: usize, - - pub nb_fri_folds: usize, + pub nb_wide_poseidons: usize, pub nb_bit_decompositions: usize, @@ -93,6 +84,10 @@ pub struct Runtime, Diffusion> { pub nb_branch_ops: usize, + pub nb_exp_reverse_bits: usize, + + pub nb_fri_fold: usize, + pub nb_print_f: usize, pub nb_print_e: usize, @@ -100,34 +95,26 @@ pub struct Runtime, Diffusion> { /// The current clock. pub clk: F, - /// The frame pointer. - pub fp: F, - /// The program counter. pub pc: F, /// The program. - pub program: RecursionProgram, - - /// Memory. - // pub memory: Vec>, - pub memory: HashMap>, + pub program: Arc>, - /// Uninitialized memory addresses that have a specific value they should be initialized with. - /// The Opcodes that start with Hint* utilize this to set memory values. - pub uninitialized_memory: HashMap>, + /// Memory. From canonical usize of an Address to a MemoryEntry. + pub memory: MemVecMap, /// The execution record. pub record: ExecutionRecord, - /// The access record for this cycle. - pub access: CpuRecord, - - pub witness_stream: VecDeque>>, + pub witness_stream: VecDeque>, pub cycle_tracker: HashMap, - // pub witness_stream: Vec>, + /// The stream that print statements write to. + pub debug_stdout: Box, + + /// Entries for dealing with the Poseidon2 hash state. perm: Option< Poseidon2< F, @@ -138,31 +125,42 @@ pub struct Runtime, Diffusion> { >, >, - p2_hash_state: [F; PERMUTATION_WIDTH], - - p2_hash_state_cursor: usize, - - p2_current_hash_num: Option, + _marker_ef: PhantomData, - _marker: PhantomData, + _marker_diffusion: PhantomData, } -#[derive(Debug)] -pub enum RuntimeError { - Trap(String), -} - -impl fmt::Display for RuntimeError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> fmt::Result { - match self { - RuntimeError::Trap(msg) => write!(f, "TRAP encountered: {}", msg), - } - } +#[derive(Error, Debug)] +pub enum RuntimeError { + #[error( + "attempted to perform base field division {in1:?}/{in2:?} \ + from instruction {instr:?} at pc {pc:?}\nnearest pc with backtrace:\n{trace:?}" + )] + DivFOutOfDomain { + in1: F, + in2: F, + instr: BaseAluInstr, + pc: usize, + trace: Option<(usize, Trace)>, + }, + #[error( + "attempted to perform extension field division {in1:?}/{in2:?} \ + from instruction {instr:?} at pc {pc:?}\nnearest pc with backtrace:\n{trace:?}" + )] + DivEOutOfDomain { + in1: EF, + in2: EF, + instr: ExtAluInstr, + pc: usize, + trace: Option<(usize, Trace)>, + }, + #[error("failed to print to `debug_stdout`: {0}")] + DebugPrint(#[from] std::io::Error), + #[error("attempted to read from empty witness stream")] + EmptyWitnessStream, } -impl std::error::Error for RuntimeError {} - -impl, Diffusion> Runtime +impl<'a, F: PrimeField32, EF: ExtensionField, Diffusion> Runtime<'a, F, EF, Diffusion> where Poseidon2< F, @@ -173,7 +171,7 @@ where >: CryptographicPermutation<[F; PERMUTATION_WIDTH]>, { pub fn new( - program: &RecursionProgram, + program: Arc>, perm: Poseidon2< F, Poseidon2ExternalMatrixGeneral, @@ -182,79 +180,41 @@ where POSEIDON2_SBOX_DEGREE, >, ) -> Self { - let record = - ExecutionRecord:: { program: Arc::new(program.clone()), ..Default::default() }; + let record = ExecutionRecord:: { program: program.clone(), ..Default::default() }; + let memory = Memory::with_capacity(program.total_memory); Self { timestamp: 0, nb_poseidons: 0, - nb_poseidon_permutes: 0, - nb_erb_lens: 0, - nb_fri_folds: 0, + nb_wide_poseidons: 0, nb_bit_decompositions: 0, + nb_exp_reverse_bits: 0, nb_ext_ops: 0, nb_base_ops: 0, nb_memory_ops: 0, nb_branch_ops: 0, + nb_fri_fold: 0, nb_print_f: 0, nb_print_e: 0, clk: F::zero(), - program: program.clone(), - fp: F::from_canonical_usize(STACK_SIZE), - pc: F::zero(), - memory: HashMap::new(), - uninitialized_memory: HashMap::new(), - record, - perm: Some(perm), - access: CpuRecord::default(), - witness_stream: VecDeque::new(), - cycle_tracker: HashMap::new(), - p2_hash_state: [F::zero(); PERMUTATION_WIDTH], - p2_hash_state_cursor: 0, - p2_current_hash_num: None, - _marker: PhantomData, - } - } - - pub fn new_no_perm(program: &RecursionProgram) -> Self { - let record = - ExecutionRecord:: { program: Arc::new(program.clone()), ..Default::default() }; - Self { - timestamp: 0, - nb_poseidons: 0, - nb_poseidon_permutes: 0, - nb_erb_lens: 0, - nb_fri_folds: 0, - nb_bit_decompositions: 0, - nb_ext_ops: 0, - nb_base_ops: 0, - nb_memory_ops: 0, - nb_print_f: 0, - nb_print_e: 0, - nb_branch_ops: 0, - clk: F::zero(), - program: program.clone(), - fp: F::from_canonical_usize(STACK_SIZE), + program, pc: F::zero(), - memory: HashMap::new(), - uninitialized_memory: HashMap::new(), + memory, record, - perm: None, - access: CpuRecord::default(), witness_stream: VecDeque::new(), cycle_tracker: HashMap::new(), - p2_hash_state: [F::zero(); PERMUTATION_WIDTH], - p2_hash_state_cursor: 0, - p2_current_hash_num: None, - _marker: PhantomData, + debug_stdout: Box::new(stdout()), + perm: Some(perm), + _marker_ef: PhantomData, + _marker_diffusion: PhantomData, } } pub fn print_stats(&self) { tracing::debug!("Total Cycles: {}", self.timestamp); - tracing::debug!("Poseidon Operations: {}", self.nb_poseidons); - tracing::debug!("Poseidon Permute Operations: {}", self.nb_poseidon_permutes); - tracing::debug!("Exp Reverse Bits Len Operations: {}", self.nb_erb_lens); - tracing::debug!("FRI Fold Operations: {}", self.nb_fri_folds); + tracing::debug!("Poseidon Skinny Operations: {}", self.nb_poseidons); + tracing::debug!("Poseidon Wide Operations: {}", self.nb_wide_poseidons); + tracing::debug!("Exp Reverse Bits Operations: {}", self.nb_exp_reverse_bits); + tracing::debug!("FriFold Operations: {}", self.nb_fri_fold); tracing::debug!("Field Operations: {}", self.nb_base_ops); tracing::debug!("Extension Operations: {}", self.nb_ext_ops); tracing::debug!("Memory Operations: {}", self.nb_memory_ops); @@ -264,909 +224,300 @@ where } } - // Peek at the memory without touching the record. - fn peek(&mut self, addr: F) -> (F, Block) { - (addr, self.memory.get(&(addr.as_canonical_u32() as usize)).unwrap().value) - } - - // Write to uninitialized memory. - fn mw_uninitialized(&mut self, addr: usize, value: Block) { - // Write it to uninitialized memory for creating MemoryInit table later. - self.uninitialized_memory - .entry(addr) - .and_modify(|_| panic!("address already initialized")) - .or_insert(value); - // Also write it to the memory map so that it can be read later. - self.memory - .entry(addr) - .and_modify(|_| panic!("address already initialized")) - .or_insert(MemoryEntry { value, timestamp: F::zero() }); - } - - /// Given a MemoryRecord event, track the range checks for the memory access. - /// This will be used later to set the multiplicities in the range check table. - fn track_memory_range_checks(&mut self, record: &MemoryRecord) { - let diff_16bit_limb_event = RangeCheckEvent::new( - RangeCheckOpcode::U16, - record.diff_16bit_limb.as_canonical_u32() as u16, - ); - let diff_12bit_limb_event = RangeCheckEvent::new( - RangeCheckOpcode::U12, - record.diff_12bit_limb.as_canonical_u32() as u16, - ); - self.record.add_range_check_events(&[diff_16bit_limb_event, diff_12bit_limb_event]); - } - - /// Track the range checks for the memory finalize table. This will be used later to set the - /// multiplicities in the range check table. The parameter `subtract_one` should be `true` when - /// used for checking address uniqueness, and `false` when used to range-check the addresses - /// themselves. - fn track_addr_range_check(&mut self, addr: F, next_addr: F, subtract_one: bool) { - let (diff_16, diff_12) = compute_addr_diff(next_addr, addr, subtract_one); - let diff_16bit_limb_event = - RangeCheckEvent::new(RangeCheckOpcode::U16, diff_16.as_canonical_u32() as u16); - let diff_8bit_limb_event = - RangeCheckEvent::new(RangeCheckOpcode::U12, diff_12.as_canonical_u32() as u16); - self.record.add_range_check_events(&[diff_16bit_limb_event, diff_8bit_limb_event]); - } - - fn mr(&mut self, addr: F, timestamp: F) -> (MemoryRecord, Block) { - let entry = self.memory.entry(addr.as_canonical_u32() as usize).or_default(); - let (prev_value, prev_timestamp) = (entry.value, entry.timestamp); - let record = MemoryRecord::new_read(addr, prev_value, timestamp, prev_timestamp); - *entry = MemoryEntry { value: prev_value, timestamp }; - self.track_memory_range_checks(&record); - (record, prev_value) - } - - fn mr_cpu(&mut self, addr: F, position: MemoryAccessPosition) -> Block { - let timestamp = self.timestamp(&position); - let (record, value) = self.mr(addr, timestamp); - match position { - MemoryAccessPosition::A => self.access.a = Some(record), - MemoryAccessPosition::B => self.access.b = Some(record), - MemoryAccessPosition::C => self.access.c = Some(record), - MemoryAccessPosition::Memory => self.access.memory = Some(record), - }; - value - } - - fn mw(&mut self, addr: F, value: impl Into>, timestamp: F) -> MemoryRecord { - let addr_usize = addr.as_canonical_u32() as usize; - let entry = self.memory.entry(addr_usize).or_default(); - let (prev_value, prev_timestamp) = (entry.value, entry.timestamp); - let value_as_block = value.into(); - let record = - MemoryRecord::new_write(addr, value_as_block, timestamp, prev_value, prev_timestamp); - *entry = MemoryEntry { value: value_as_block, timestamp }; - self.track_memory_range_checks(&record); - record - } - - fn mw_cpu(&mut self, addr: F, value: Block, position: MemoryAccessPosition) { - let timestamp = self.timestamp(&position); - let record = self.mw(addr, value, timestamp); - match position { - MemoryAccessPosition::A => self.access.a = Some(record), - MemoryAccessPosition::B => self.access.b = Some(record), - MemoryAccessPosition::C => self.access.c = Some(record), - MemoryAccessPosition::Memory => self.access.memory = Some(record), - }; - } - - fn timestamp(&self, position: &MemoryAccessPosition) -> F { - self.clk + F::from_canonical_u32(*position as u32) - } - - // When we read the "a" position, it is never an immediate value, so we always read from memory. - fn get_a(&mut self, instruction: &Instruction) -> Block { - self.mr_cpu(self.fp + instruction.op_a, MemoryAccessPosition::A) - } - - // Useful to peek at the value of the "a" position without updating the access record. - // This assumes that there will be a write later, which is why it also returns the addr. - fn peek_a(&self, instruction: &Instruction) -> (F, Block) { - let addr = self.fp + instruction.op_a; - ( - addr, - self.memory - .get(&(addr.as_canonical_u32() as usize)) - .map(|entry| entry.value) - .unwrap_or_default(), - ) - } - - fn get_b(&mut self, instruction: &Instruction) -> Block { - if instruction.imm_b { - instruction.op_b - } else { - self.mr_cpu(self.fp + instruction.op_b[0], MemoryAccessPosition::B) - } - } - - fn get_c(&mut self, instruction: &Instruction) -> Block { - if instruction.imm_c { - instruction.op_c + fn nearest_pc_backtrace(&mut self) -> Option<(usize, Trace)> { + let trap_pc = self.pc.as_canonical_u32() as usize; + let trace = self.program.traces.get(trap_pc).cloned()?; + if let Some(mut trace) = trace { + trace.resolve(); + Some((trap_pc, trace)) } else { - self.mr_cpu(self.fp + instruction.op_c[0], MemoryAccessPosition::C) + (0..trap_pc) + .rev() + .filter_map(|nearby_pc| { + let mut trace = self.program.traces.get(nearby_pc)?.clone()?; + trace.resolve(); + Some((nearby_pc, trace)) + }) + .next() } } - /// Fetch the destination address and input operand values for an ALU instruction. - fn alu_rr(&mut self, instruction: &Instruction) -> (F, Block, Block) { - let a_ptr = self.fp + instruction.op_a; - let c_val = self.get_c(instruction); - let b_val = self.get_b(instruction); - - (a_ptr, b_val, c_val) - } - - /// Fetch the destination address input operand values for a store instruction (from stack). - fn mem_rr(&mut self, instruction: &Instruction) -> (F, Block, Block) { - let a_ptr = self.fp + instruction.op_a; - let c_val = self.get_c(instruction); - let b_val = self.get_b(instruction); - - (a_ptr, b_val, c_val) - } - - // A function to calculate the memory address for both load and store opcodes. - fn calculate_address(b_val: Block, c_val: Block, instruction: &Instruction) -> F { - let index = c_val[0]; - let ptr = b_val[0]; - - let offset = instruction.offset_imm; - let size = instruction.size_imm; - - ptr + index * size + offset - } - - /// Fetch the input operand values for a branch instruction. - fn branch_rr(&mut self, instruction: &Instruction) -> (Block, Block, F) { - let c = instruction.op_c[0]; - let b = self.get_b(instruction); - let a = self.get_a(instruction); - (a, b, c) - } - - /// Read all the values for an instruction. - fn all_rr(&mut self, instruction: &Instruction) -> (Block, Block, Block) { - let c_val = self.get_c(instruction); - let b_val = self.get_b(instruction); - let a_val = self.get_a(instruction); - (a_val, b_val, c_val) - } - - pub fn run(&mut self) -> Result<(), RuntimeError> { + /// Compare to [sp1_recursion_core::runtime::Runtime::run]. + pub fn run(&mut self) -> Result<(), RuntimeError> { let early_exit_ts = std::env::var("RECURSION_EARLY_EXIT_TS") .map_or(usize::MAX, |ts: String| ts.parse().unwrap()); while self.pc < F::from_canonical_u32(self.program.instructions.len() as u32) { let idx = self.pc.as_canonical_u32() as usize; let instruction = self.program.instructions[idx].clone(); - let mut next_clk = self.clk + F::from_canonical_u32(4); - let mut next_pc = self.pc + F::one(); - let (a, b, c): (Block, Block, Block); - match instruction.opcode { - Opcode::PrintF => { - self.nb_print_f += 1; - let (a_val, b_val, c_val) = self.all_rr(&instruction); - println!("PRINTF={}, clk={}", a_val[0], self.timestamp); - (a, b, c) = (a_val, b_val, c_val); - } - Opcode::PrintE => { - self.nb_print_e += 1; - let (a_val, b_val, c_val) = self.all_rr(&instruction); - println!("PRINTEF={:?}", a_val); - (a, b, c) = (a_val, b_val, c_val); - } - Opcode::CycleTracker => { - let (a_val, b_val, c_val) = self.all_rr(&instruction); - let name = instruction.debug.clone(); - let entry = self.cycle_tracker.entry(name).or_default(); - if !entry.span_entered { - entry.span_entered = true; - entry.span_enter_cycle = self.timestamp; - } else { - entry.span_entered = false; - entry.cumulative_cycles += self.timestamp - entry.span_enter_cycle; - } - - (a, b, c) = (a_val, b_val, c_val); - } - Opcode::ADD => { - self.nb_base_ops += 1; - let (a_ptr, b_val, c_val) = self.alu_rr(&instruction); - let mut a_val = Block::default(); - a_val[0] = b_val[0] + c_val[0]; - self.mw_cpu(a_ptr, a_val, MemoryAccessPosition::A); - - // If the instruction is a heap expansion, we need to add a range check event to - // ensure that the heap size never goes above 2^28. - if instruction_is_heap_expand(&instruction) { - let (u16_range_check, u12_range_check) = - get_heap_size_range_check_events(a_val[0]); - self.record.add_range_check_events(&[u16_range_check, u12_range_check]); - } - - (a, b, c) = (a_val, b_val, c_val); - } - Opcode::LessThanF => { - let (a_ptr, b_val, c_val) = self.alu_rr(&instruction); - let mut a_val = Block::default(); - a_val[0] = F::from_bool(b_val[0] < c_val[0]); - self.mw_cpu(a_ptr, a_val, MemoryAccessPosition::A); - (a, b, c) = (a_val, b_val, c_val); - } - Opcode::SUB => { - self.nb_base_ops += 1; - let (a_ptr, b_val, c_val) = self.alu_rr(&instruction); - let mut a_val = Block::default(); - a_val[0] = b_val[0] - c_val[0]; - self.mw_cpu(a_ptr, a_val, MemoryAccessPosition::A); - (a, b, c) = (a_val, b_val, c_val); - } - Opcode::MUL => { + let next_clk = self.clk + F::from_canonical_u32(4); + let next_pc = self.pc + F::one(); + match instruction { + Instruction::BaseAlu(instr @ BaseAluInstr { opcode, mult, addrs }) => { self.nb_base_ops += 1; - let (a_ptr, b_val, c_val) = self.alu_rr(&instruction); - let mut a_val = Block::default(); - a_val[0] = b_val[0] * c_val[0]; - self.mw_cpu(a_ptr, a_val, MemoryAccessPosition::A); - (a, b, c) = (a_val, b_val, c_val); - } - Opcode::DIV => { - self.nb_base_ops += 1; - let (a_ptr, b_val, c_val) = self.alu_rr(&instruction); - let mut a_val: Block = Block::default(); - a_val[0] = b_val[0] / c_val[0]; - self.mw_cpu(a_ptr, a_val, MemoryAccessPosition::A); - (a, b, c) = (a_val, b_val, c_val); - } - Opcode::EADD => { - self.nb_ext_ops += 1; - let (a_ptr, b_val, c_val) = self.alu_rr(&instruction); - let sum = EF::from_base_slice(&b_val.0) + EF::from_base_slice(&c_val.0); - let a_val = Block::from(sum.as_base_slice()); - self.mw_cpu(a_ptr, a_val, MemoryAccessPosition::A); - (a, b, c) = (a_val, b_val, c_val); - } - Opcode::EMUL => { - self.nb_ext_ops += 1; - let (a_ptr, b_val, c_val) = self.alu_rr(&instruction); - let product = EF::from_base_slice(&b_val.0) * EF::from_base_slice(&c_val.0); - let a_val = Block::from(product.as_base_slice()); - self.mw_cpu(a_ptr, a_val, MemoryAccessPosition::A); - (a, b, c) = (a_val, b_val, c_val); - } - Opcode::ESUB => { - self.nb_ext_ops += 1; - let (a_ptr, b_val, c_val) = self.alu_rr(&instruction); - let diff = EF::from_base_slice(&b_val.0) - EF::from_base_slice(&c_val.0); - let a_val = Block::from(diff.as_base_slice()); - self.mw_cpu(a_ptr, a_val, MemoryAccessPosition::A); - (a, b, c) = (a_val, b_val, c_val); + let in1 = self.memory.mr(addrs.in1).val[0]; + let in2 = self.memory.mr(addrs.in2).val[0]; + // Do the computation. + let out = match opcode { + BaseAluOpcode::AddF => in1 + in2, + BaseAluOpcode::SubF => in1 - in2, + BaseAluOpcode::MulF => in1 * in2, + BaseAluOpcode::DivF => match in1.try_div(in2) { + Some(x) => x, + None => { + // Check for division exceptions and error. Note that 0/0 is defined + // to be 1. + if in1.is_zero() { + AbstractField::one() + } else { + return Err(RuntimeError::DivFOutOfDomain { + in1, + in2, + instr, + pc: self.pc.as_canonical_u32() as usize, + trace: self.nearest_pc_backtrace(), + }); + } + } + }, + }; + self.memory.mw(addrs.out, Block::from(out), mult); + self.record.base_alu_events.push(BaseAluEvent { out, in1, in2 }); } - Opcode::EDIV => { + Instruction::ExtAlu(instr @ ExtAluInstr { opcode, mult, addrs }) => { self.nb_ext_ops += 1; - let (a_ptr, b_val, c_val) = self.alu_rr(&instruction); - let quotient = EF::from_base_slice(&b_val.0) / EF::from_base_slice(&c_val.0); - let a_val = Block::from(quotient.as_base_slice()); - self.mw_cpu(a_ptr, a_val, MemoryAccessPosition::A); - (a, b, c) = (a_val, b_val, c_val); - } - Opcode::LOAD => { - self.nb_memory_ops += 1; - let (a_ptr, b_val, c_val) = self.mem_rr(&instruction); - let addr = Self::calculate_address(b_val, c_val, &instruction); - let a_val = self.mr_cpu(addr, MemoryAccessPosition::Memory); - self.mw_cpu(a_ptr, a_val, MemoryAccessPosition::A); - (a, b, c) = (a_val, b_val, c_val); + let in1 = self.memory.mr(addrs.in1).val; + let in2 = self.memory.mr(addrs.in2).val; + // Do the computation. + let in1_ef = EF::from_base_slice(&in1.0); + let in2_ef = EF::from_base_slice(&in2.0); + let out_ef = match opcode { + ExtAluOpcode::AddE => in1_ef + in2_ef, + ExtAluOpcode::SubE => in1_ef - in2_ef, + ExtAluOpcode::MulE => in1_ef * in2_ef, + ExtAluOpcode::DivE => match in1_ef.try_div(in2_ef) { + Some(x) => x, + None => { + // Check for division exceptions and error. Note that 0/0 is defined + // to be 1. + if in1_ef.is_zero() { + AbstractField::one() + } else { + return Err(RuntimeError::DivEOutOfDomain { + in1: in1_ef, + in2: in2_ef, + instr, + pc: self.pc.as_canonical_u32() as usize, + trace: self.nearest_pc_backtrace(), + }); + } + } + }, + }; + let out = Block::from(out_ef.as_base_slice()); + self.memory.mw(addrs.out, out, mult); + self.record.ext_alu_events.push(ExtAluEvent { out, in1, in2 }); } - Opcode::STORE => { + Instruction::Mem(MemInstr { + addrs: MemIo { inner: addr }, + vals: MemIo { inner: val }, + mult, + kind, + }) => { self.nb_memory_ops += 1; - let (a_ptr, b_val, c_val) = self.mem_rr(&instruction); - let addr = Self::calculate_address(b_val, c_val, &instruction); - let a_val = self.mr_cpu(a_ptr, MemoryAccessPosition::A); - self.mw_cpu(addr, a_val, MemoryAccessPosition::Memory); - (a, b, c) = (a_val, b_val, c_val); - } - Opcode::BEQ => { - self.nb_branch_ops += 1; - let (a_val, b_val, c_offset) = self.branch_rr(&instruction); - (a, b, c) = (a_val, b_val, Block::from(c_offset)); - if a == b { - next_pc = self.pc + c_offset; - } - } - Opcode::BNE => { - self.nb_branch_ops += 1; - let (a_val, b_val, c_offset) = self.branch_rr(&instruction); - (a, b, c) = (a_val, b_val, Block::from(c_offset)); - if a != b { - next_pc = self.pc + c_offset; - } - } - Opcode::BNEINC => { - self.nb_branch_ops += 1; - let (_, b_val, c_offset) = self.alu_rr(&instruction); - let (a_ptr, mut a_val) = self.peek_a(&instruction); - a_val[0] += F::one(); - if a_val != b_val { - next_pc = self.pc + c_offset[0]; - } - self.mw_cpu(a_ptr, a_val, MemoryAccessPosition::A); - (a, b, c) = (a_val, b_val, c_offset); - } - Opcode::JAL => { - self.nb_branch_ops += 1; - let (a_ptr, b_val, c_offset) = self.alu_rr(&instruction); - let a_val = Block::from(self.pc); - self.mw_cpu(a_ptr, a_val, MemoryAccessPosition::A); - next_pc = self.pc + b_val[0]; - self.fp += c_offset[0]; - (a, b, c) = (a_val, b_val, c_offset); - } - Opcode::JALR => { - self.nb_branch_ops += 1; - let (a_ptr, b_val, c_val) = self.alu_rr(&instruction); - let a_val = Block::from(self.pc + F::one()); - self.mw_cpu(a_ptr, a_val, MemoryAccessPosition::A); - next_pc = b_val[0]; - self.fp = c_val[0]; - (a, b, c) = (a_val, b_val, c_val); - } - Opcode::TRAP => { - self.record.public_values.resize(RECURSIVE_PROOF_NUM_PV_ELTS, F::zero()); - self.record.public_values[RECURSION_PUBLIC_VALUES_COL_MAP.exit_code] = F::one(); - - let trap_pc = self.pc.as_canonical_u32() as usize; - let trace = self.program.traces[trap_pc].clone(); - if let Some(mut trace) = trace { - trace.resolve(); - return Err(RuntimeError::Trap(format!("Backtrace:\n{:?}", trace))); - } else { - for nearby_pc in (0..trap_pc).rev() { - let trace = self.program.traces[nearby_pc].clone(); - if let Some(mut trace) = trace { - trace.resolve(); - return Err(RuntimeError::Trap(format!( - "TRAP encountered at pc={}. Nearest trace at pc={}: {:?}", - trap_pc, nearby_pc, trace - ))); - } + match kind { + MemAccessKind::Read => { + let mem_entry = self.memory.mr_mult(addr, mult); + assert_eq!( + mem_entry.val, val, + "stored memory value should be the specified value" + ); } - return Err(RuntimeError::Trap("No backtrace available".to_string())); + MemAccessKind::Write => drop(self.memory.mw(addr, val, mult)), } + self.record.mem_const_count += 1; } - Opcode::HALT => { - self.record.public_values.resize(RECURSIVE_PROOF_NUM_PV_ELTS, F::zero()); - self.record.public_values[RECURSION_PUBLIC_VALUES_COL_MAP.exit_code] = - F::zero(); - - let (a_val, b_val, c_val) = self.all_rr(&instruction); - (a, b, c) = (a_val, b_val, c_val); - } - Opcode::HintExt2Felt => { - let (a_val, b_val, c_val) = self.all_rr(&instruction); - let dst = a_val[0].as_canonical_u32() as usize; - self.mw_uninitialized(dst, Block::from(b_val[0])); - self.mw_uninitialized(dst + 1, Block::from(b_val[1])); - self.mw_uninitialized(dst + 2, Block::from(b_val[2])); - self.mw_uninitialized(dst + 3, Block::from(b_val[3])); - (a, b, c) = (a_val, b_val, c_val); - } - Opcode::Poseidon2Compress => { + Instruction::Poseidon2(instr) => { + let Poseidon2Instr { addrs: Poseidon2Io { input, output }, mults } = *instr; self.nb_poseidons += 1; - self.nb_poseidon_permutes += 1; - - let (a_val, b_val, c_val) = self.all_rr(&instruction); - - // Get the dst array ptr. - let dst = a_val[0]; - // Get the src array ptr. - let left = b_val[0]; - let right = c_val[0] + instruction.offset_imm; - - let timestamp = self.clk; - - let mut left_records = vec![]; - let mut right_records = vec![]; - let mut left_array: [F; PERMUTATION_WIDTH / 2] = - [F::zero(); PERMUTATION_WIDTH / 2]; - let mut right_array: [F; PERMUTATION_WIDTH / 2] = - [F::zero(); PERMUTATION_WIDTH / 2]; - - for i in 0..PERMUTATION_WIDTH / 2 { - let f_i = F::from_canonical_u32(i as u32); - let left_val = self.mr(left + f_i, timestamp); - let right_val = self.mr(right + f_i, timestamp); - left_array[i] = left_val.1 .0[0]; - right_array[i] = right_val.1 .0[0]; - left_records.push(left_val.0); - right_records.push(right_val.0); - } - let array: [_; PERMUTATION_WIDTH] = - [left_array, right_array].concat().try_into().unwrap(); - let input_records: [_; PERMUTATION_WIDTH] = - [left_records, right_records].concat().try_into().unwrap(); - - // Perform the permutation. - let result = self.perm.as_ref().unwrap().permute(array); - - // Write the value back to the array at ptr. - let mut result_records = vec![]; - for (i, value) in result.iter().enumerate() { - result_records.push(self.mw( - dst + F::from_canonical_usize(i), - Block::from(*value), - timestamp + F::one(), - )); - } + let in_vals = std::array::from_fn(|i| self.memory.mr(input[i]).val[0]); + let perm_output = self.perm.as_ref().unwrap().permute(in_vals); - self.record.poseidon2_compress_events.push(Poseidon2CompressEvent { - clk: timestamp, - dst, - left, - right, - input: array, - result_array: result, - input_records, - result_records: result_records.try_into().unwrap(), + perm_output.iter().zip(output).zip(mults).for_each(|((&val, addr), mult)| { + self.memory.mw(addr, Block::from(val), mult); }); - - (a, b, c) = (a_val, b_val, c_val); - } - - Opcode::Poseidon2Absorb => { - self.nb_poseidons += 1; - let (a_val, b_val, c_val) = self.all_rr(&instruction); - - let hash_and_absorb_num = a_val[0]; - let start_addr = b_val[0]; - let input_len = c_val[0]; - let timestamp = self.clk; - - let two_pow_12 = 1 << 12; - - let hash_and_absorb_num_u32 = hash_and_absorb_num.as_canonical_u32(); - let hash_num = F::from_canonical_u32(hash_and_absorb_num_u32 / two_pow_12); - let absorb_num = F::from_canonical_u32(hash_and_absorb_num_u32 % two_pow_12); - - // Double check that hash_num is [0, 2^16 - 1] and absorb_num is [0, 2^12 - 1] - // since that is what the AIR will enforce. - assert!(hash_num.as_canonical_u32() < 1 << 16); - assert!(absorb_num.as_canonical_u32() < 1 << 12); - - // We currently don't support an input_len of 0, since it will need special - // logic in the AIR. - assert!(input_len > F::zero()); - - let mut absorb_event = Poseidon2AbsorbEvent::new( - timestamp, - hash_and_absorb_num, - start_addr, - input_len, - hash_num, - absorb_num, - ); - - let memory_records: Vec> = (0..input_len.as_canonical_u32()) - .map(|i| self.mr(start_addr + F::from_canonical_u32(i), timestamp).0) - .collect_vec(); - - let permuter = self.perm.as_ref().unwrap().clone(); - self.nb_poseidon_permutes += absorb_event.populate_iterations( - start_addr, - input_len, - &memory_records, - &permuter, - &mut self.p2_hash_state, - &mut self.p2_hash_state_cursor, - ); - - // Update the current hash number. - self.p2_current_hash_num = Some(hash_num); - self.record - .poseidon2_hash_events - .push(Poseidon2HashEvent::Absorb(absorb_event)); - - (a, b, c) = (a_val, b_val, c_val); + .poseidon2_events + .push(Poseidon2Event { input: in_vals, output: perm_output }); } - - Opcode::Poseidon2Finalize => { - self.nb_poseidons += 1; - let (a_val, b_val, c_val) = self.all_rr(&instruction); - - let p2_hash_num = a_val[0]; - let output_ptr = b_val[0]; - let timestamp = self.clk; - - let do_perm = self.p2_hash_state_cursor != 0; - let perm_output = self.perm.as_ref().unwrap().permute(self.p2_hash_state); - let state = if do_perm { - self.nb_poseidon_permutes += 1; - perm_output - } else { - self.p2_hash_state - }; - let output_records: [MemoryRecord; DIGEST_SIZE] = array::from_fn(|i| { - self.mw(output_ptr + F::from_canonical_usize(i), state[i], timestamp) + Instruction::ExpReverseBitsLen(ExpReverseBitsInstr { + addrs: ExpReverseBitsIo { base, exp, result }, + mult, + }) => { + self.nb_exp_reverse_bits += 1; + let base_val = self.memory.mr(base).val[0]; + let exp_bits: Vec<_> = + exp.iter().map(|bit| self.memory.mr(*bit).val[0]).collect(); + let exp_val = exp_bits + .iter() + .enumerate() + .fold(0, |acc, (i, &val)| acc + val.as_canonical_u32() * (1 << i)); + let out = + base_val.exp_u64(reverse_bits_len(exp_val as usize, exp_bits.len()) as u64); + self.memory.mw(result, Block::from(out), mult); + self.record.exp_reverse_bits_len_events.push(ExpReverseBitsEvent { + result: out, + base: base_val, + exp: exp_bits, }); - - self.record.poseidon2_hash_events.push(Poseidon2HashEvent::Finalize( - Poseidon2FinalizeEvent { - clk: timestamp, - hash_num: p2_hash_num, - output_ptr, - output_records, - state_cursor: self.p2_hash_state_cursor, - perm_input: self.p2_hash_state, - perm_output, - previous_state: self.p2_hash_state, - state, - do_perm, - }, - )); - - self.p2_hash_state_cursor = 0; - self.p2_hash_state = [F::zero(); PERMUTATION_WIDTH]; - - (a, b, c) = (a_val, b_val, c_val); } - Opcode::HintBits => { + Instruction::HintBits(HintBitsInstr { output_addrs_mults, input_addr }) => { self.nb_bit_decompositions += 1; - let (a_val, b_val, c_val) = self.all_rr(&instruction); - - // Get the dst array ptr. - let dst = a_val[0].as_canonical_u32() as usize; - // Get the src value. - let num = b_val[0].as_canonical_u32(); - + let num = self.memory.mr_mult(input_addr, F::zero()).val[0].as_canonical_u32(); // Decompose the num into LE bits. - let bits = (0..NUM_BITS).map(|i| (num >> i) & 1).collect::>(); + let bits = (0..output_addrs_mults.len()) + .map(|i| Block::from(F::from_canonical_u32((num >> i) & 1))) + .collect::>(); // Write the bits to the array at dst. - for (i, bit) in bits.iter().enumerate() { - self.mw_uninitialized(dst + i, Block::from(F::from_canonical_u32(*bit))); + for (bit, (addr, mult)) in bits.into_iter().zip(output_addrs_mults) { + self.memory.mw(addr, bit, mult); + self.record.mem_var_events.push(MemEvent { inner: bit }); } - (a, b, c) = (a_val, b_val, c_val); - } - Opcode::HintLen => { - let (a_ptr, b_val, c_val) = self.alu_rr(&instruction); - let a_val: Block = - F::from_canonical_usize(self.witness_stream[0].len()).into(); - self.mw_cpu(a_ptr, a_val, MemoryAccessPosition::A); - (a, b, c) = (a_val, b_val, c_val); } - Opcode::Hint => { - let (a_val, b_val, c_val) = self.all_rr(&instruction); - let dst = a_val[0].as_canonical_u32() as usize; - let blocks = self.witness_stream.pop_front().unwrap(); - for (i, block) in blocks.into_iter().enumerate() { - self.mw_uninitialized(dst + i, block); - } - (a, b, c) = (a_val, b_val, c_val); - } - Opcode::FRIFold => { - let (a_val, b_val, c_val) = self.all_rr(&instruction); - - // The timestamp for the memory reads for all of these operations will be - // self.clk - let ps_at_z_len = a_val[0]; - let input_ptr = b_val[0]; - - let mut timestamp = self.clk; - - self.nb_fri_folds += ps_at_z_len.as_canonical_u32() as usize; - // Read the input values. - for m in 0..ps_at_z_len.as_canonical_u32() { - let m = F::from_canonical_u32(m); - let mut ptr = input_ptr; - let (z_record, z) = self.mr(ptr, timestamp); - let z: EF = z.ext(); - ptr += F::one(); - let (alpha_record, alpha) = self.mr(ptr, timestamp); - let alpha: EF = alpha.ext(); - ptr += F::one(); - let (x_record, x) = self.mr(ptr, timestamp); - let x = x[0]; - ptr += F::one(); - let (log_height_record, log_height) = self.mr(ptr, timestamp); - let log_height = log_height[0]; - ptr += F::one(); - let (mat_opening_ptr_record, mat_opening_ptr) = self.mr(ptr, timestamp); - let mat_opening_ptr = mat_opening_ptr[0]; - ptr += F::two(); - let (ps_at_z_ptr_record, ps_at_z_ptr) = self.mr(ptr, timestamp); - let ps_at_z_ptr = ps_at_z_ptr[0]; - ptr += F::two(); - let (alpha_pow_ptr_record, alpha_pow_ptr) = self.mr(ptr, timestamp); - let alpha_pow_ptr = alpha_pow_ptr[0]; - ptr += F::two(); - let (ro_ptr_record, ro_ptr) = self.mr(ptr, timestamp); - let ro_ptr = ro_ptr[0]; + Instruction::FriFold(instr) => { + let FriFoldInstr { + base_single_addrs, + ext_single_addrs, + ext_vec_addrs, + alpha_pow_mults, + ro_mults, + } = *instr; + self.nb_fri_fold += 1; + let x = self.memory.mr(base_single_addrs.x).val[0]; + let z = self.memory.mr(ext_single_addrs.z).val; + let z: EF = z.ext(); + let alpha = self.memory.mr(ext_single_addrs.alpha).val; + let alpha: EF = alpha.ext(); + let mat_opening = ext_vec_addrs + .mat_opening + .iter() + .map(|addr| self.memory.mr(*addr).val) + .collect_vec(); + let ps_at_z = ext_vec_addrs + .ps_at_z + .iter() + .map(|addr| self.memory.mr(*addr).val) + .collect_vec(); + for m in 0..ps_at_z.len() { + // let m = F::from_canonical_u32(m); // Get the opening values. - let (p_at_x_record, p_at_x) = self.mr(mat_opening_ptr + m, timestamp); + let p_at_x = mat_opening[m]; let p_at_x: EF = p_at_x.ext(); - - let (p_at_z_record, p_at_z) = self.mr(ps_at_z_ptr + m, timestamp); + let p_at_z = ps_at_z[m]; let p_at_z: EF = p_at_z.ext(); // Calculate the quotient and update the values let quotient = (-p_at_z + p_at_x) / (-z + x); - // Modify the ro and alpha pow values. - // First we peek to get the current value. - let (alpha_pow_ptr_plus_log_height, alpha_pow_at_log_height) = - self.peek(alpha_pow_ptr + log_height); - let alpha_pow_at_log_height: EF = alpha_pow_at_log_height.ext(); + let alpha_pow: EF = + self.memory.mr(ext_vec_addrs.alpha_pow_input[m]).val.ext(); - let (ro_ptr_plus_log_height, ro_at_log_height) = - self.peek(ro_ptr + log_height); - let ro_at_log_height: EF = ro_at_log_height.ext(); + let ro: EF = self.memory.mr(ext_vec_addrs.ro_input[m]).val.ext(); - let new_ro_at_log_height = - ro_at_log_height + alpha_pow_at_log_height * quotient; - let new_alpha_pow_at_log_height = alpha_pow_at_log_height * alpha; + let new_ro = ro + alpha_pow * quotient; + let new_alpha_pow = alpha_pow * alpha; - let ro_at_log_height_record = self.mw( - ro_ptr_plus_log_height, - Block::from(new_ro_at_log_height.as_base_slice()), - timestamp, + let _ = self.memory.mw( + ext_vec_addrs.ro_output[m], + Block::from(new_ro.as_base_slice()), + ro_mults[m], ); - let alpha_pow_at_log_height_record = self.mw( - alpha_pow_ptr_plus_log_height, - Block::from(new_alpha_pow_at_log_height.as_base_slice()), - timestamp, + let _ = self.memory.mw( + ext_vec_addrs.alpha_pow_output[m], + Block::from(new_alpha_pow.as_base_slice()), + alpha_pow_mults[m], ); self.record.fri_fold_events.push(FriFoldEvent { - is_last_iteration: F::from_bool( - ps_at_z_len.as_canonical_u32() - 1 == m.as_canonical_u32(), - ), - clk: timestamp, - m, - input_ptr, - z: z_record, - alpha: alpha_record, - x: x_record, - log_height: log_height_record, - mat_opening_ptr: mat_opening_ptr_record, - ps_at_z_ptr: ps_at_z_ptr_record, - alpha_pow_ptr: alpha_pow_ptr_record, - ro_ptr: ro_ptr_record, - p_at_x: p_at_x_record, - p_at_z: p_at_z_record, - alpha_pow_at_log_height: alpha_pow_at_log_height_record, - ro_at_log_height: ro_at_log_height_record, + base_single: FriFoldBaseIo { x }, + ext_single: FriFoldExtSingleIo { + z: Block::from(z.as_base_slice()), + alpha: Block::from(alpha.as_base_slice()), + }, + ext_vec: FriFoldExtVecIo { + mat_opening: Block::from(p_at_x.as_base_slice()), + ps_at_z: Block::from(p_at_z.as_base_slice()), + alpha_pow_input: Block::from(alpha_pow.as_base_slice()), + ro_input: Block::from(ro.as_base_slice()), + alpha_pow_output: Block::from(new_alpha_pow.as_base_slice()), + ro_output: Block::from(new_ro.as_base_slice()), + }, }); - timestamp += F::one(); } - - next_clk = timestamp; - (a, b, c) = (a_val, b_val, c_val); } - Opcode::ExpReverseBitsLen => { - // Read the operands. - let (a_val, b_val, c_val) = self.all_rr(&instruction); - // A pointer to the base of the exponentiation. - let base = a_val[0]; - - // A pointer to the first bit (LSB) of the exponent. - let input_ptr = b_val[0]; - - // The length parameter in bit-reverse-len. - let len = c_val[0]; - - let mut timestamp = self.clk; - - let mut accum = F::one(); - - // Read the value at the pointer `base`. - let mut x_record = self.mr(base, timestamp).0; - - // Iterate over the `len` least-significant bits of the exponent. - self.nb_erb_lens += len.as_canonical_u32() as usize; - for m in 0..len.as_canonical_u32() { - let m = F::from_canonical_u32(m); - - // Pointer to the current bit. - let ptr = input_ptr + m; - - // Read the current bit. - let (current_bit_record, current_bit) = self.mr(ptr, timestamp); - let current_bit = current_bit.ext::().as_base_slice()[0]; - - // Extract the val in `x_record` - let current_x_val = x_record.value[0]; - - let prev_accum = accum; - accum = prev_accum - * prev_accum - * if current_bit == F::one() { current_x_val } else { F::one() }; - - // On the last iteration, write accum to the address pointed to in `base`. - if m == len - F::one() { - x_record = self.mw(base, Block::from(accum), timestamp); - }; + Instruction::CommitPublicValues(instr) => { + let pv_addrs = instr.pv_addrs.as_array(); + let pv_values: [F; RECURSIVE_PROOF_NUM_PV_ELTS] = + array::from_fn(|i| self.memory.mr(pv_addrs[i]).val[0]); + self.record.public_values = *pv_values.as_slice().borrow(); + self.record + .commit_pv_hash_events + .push(CommitPublicValuesEvent { public_values: self.record.public_values }); + } - // Add the event for this iteration to the `ExecutionRecord`. - self.record.exp_reverse_bits_len_events.push(ExpReverseBitsLenEvent { - clk: timestamp, - x: x_record, - current_bit: current_bit_record, - len: len - m, - prev_accum, - accum, - ptr, - base_ptr: base, - iteration_num: m, - }); - timestamp += F::one(); + Instruction::Print(PrintInstr { field_elt_type, addr }) => match field_elt_type { + FieldEltType::Base => { + self.nb_print_f += 1; + let f = self.memory.mr_mult(addr, F::zero()).val[0]; + writeln!(self.debug_stdout, "PRINTF={f}") + } + FieldEltType::Extension => { + self.nb_print_e += 1; + let ef = self.memory.mr_mult(addr, F::zero()).val; + writeln!(self.debug_stdout, "PRINTEF={ef:?}") } - - next_clk = timestamp; - (a, b, c) = (a_val, b_val, c_val); } - // For both the Commit and RegisterPublicValue opcodes, we record the public value - Opcode::Commit | Opcode::RegisterPublicValue => { - let (a_val, b_val, c_val) = self.all_rr(&instruction); - self.record.public_values.push(a_val[0]); - - (a, b, c) = (a_val, b_val, c_val); + .map_err(RuntimeError::DebugPrint)?, + Instruction::HintExt2Felts(HintExt2FeltsInstr { + output_addrs_mults, + input_addr, + }) => { + self.nb_bit_decompositions += 1; + let fs = self.memory.mr_mult(input_addr, F::zero()).val; + // Write the bits to the array at dst. + for (f, (addr, mult)) in fs.into_iter().zip(output_addrs_mults) { + let felt = Block::from(f); + self.memory.mw(addr, felt, mult); + self.record.mem_var_events.push(MemEvent { inner: felt }); + } + } + Instruction::Hint(HintInstr { output_addrs_mults }) => { + // Check that enough Blocks can be read, so `drain` does not panic. + if self.witness_stream.len() < output_addrs_mults.len() { + return Err(RuntimeError::EmptyWitnessStream); + } + let witness = self.witness_stream.drain(0..output_addrs_mults.len()); + for ((addr, mult), val) in zip(output_addrs_mults, witness) { + // Inline [`Self::mw`] to mutably borrow multiple fields of `self`. + self.memory.mw(addr, val, mult); + self.record.mem_var_events.push(MemEvent { inner: val }); + } } - }; + } - let event = CpuEvent { - clk: self.clk, - pc: self.pc, - fp: self.fp, - instruction: instruction.clone(), - a, - a_record: self.access.a, - b, - b_record: self.access.b, - c, - c_record: self.access.c, - memory_record: self.access.memory, - }; self.pc = next_pc; - self.record.cpu_events.push(event); self.clk = next_clk; self.timestamp += 1; - self.access = CpuRecord::default(); - if self.timestamp >= early_exit_ts - || instruction.opcode == Opcode::HALT - || instruction.opcode == Opcode::TRAP - { + if self.timestamp >= early_exit_ts { break; } } - - let zero_block = Block::from(F::zero()); - // Collect all used memory addresses. - for (addr, entry) in self.memory.iter() { - // Get the initial value of the memory address from either the uninitialized memory - // or set it as a default to 0. - let init_value = self.uninitialized_memory.get(addr).unwrap_or(&zero_block); - self.record.first_memory_record.push((F::from_canonical_usize(*addr), *init_value)); - - self.record.last_memory_record.push(( - F::from_canonical_usize(*addr), - entry.timestamp, - entry.value, - )) - } - self.record.last_memory_record.sort_by_key(|(addr, _, _)| *addr); - - // For all the records but the last, need to check that the next address is greater than the - // current address, and that the difference is bounded by 2^28. We also track that the - // current address is bounded by 2^28. - for i in 0..self.record.last_memory_record.len() - 1 { - self.track_addr_range_check( - self.record.last_memory_record[i].0, - self.record.last_memory_record[i + 1].0, - true, - ); - self.track_addr_range_check(F::zero(), self.record.last_memory_record[i].0, false); - } - // Add the last range check event for the last memory address. - self.track_addr_range_check( - F::zero(), - self.record.last_memory_record.last().unwrap().0, - false, - ); - Ok(()) } } - -#[cfg(test)] -mod tests { - use p3_field::AbstractField; - use sp1_core_machine::riscv::RiscvAir; - use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, StarkGenericConfig}; - - use super::{Instruction, Opcode, RecursionProgram, Runtime, RuntimeError}; - - type SC = BabyBearPoseidon2; - type F = ::Val; - type EF = ::Challenge; - type A = RiscvAir; - - #[test] - fn test_witness_success() { - let zero = F::zero(); - let zero_block = [F::zero(); 4]; - let program = RecursionProgram { - traces: vec![], - instructions: vec![ - Instruction::new( - Opcode::HintLen, - zero, - zero_block, - zero_block, - zero, - zero, - false, - false, - "".to_string(), - ), - Instruction::new( - Opcode::PrintF, - zero, - zero_block, - zero_block, - zero, - zero, - false, - false, - "".to_string(), - ), - ], - }; - let machine = A::machine(SC::default()); - let mut runtime = Runtime::::new(&program, machine.config().perm.clone()); - runtime.witness_stream = - vec![vec![F::two().into(), F::two().into(), F::two().into()]].into(); - - let result = runtime.run(); - assert!(result.is_ok(), "Expected run to complete successfully"); - } - - #[test] - fn test_witness_trap_error() { - let zero = F::zero(); - let zero_block = [F::zero(); 4]; - let trap_program = RecursionProgram { - traces: vec![None], // None trace for the TRAP instruction - instructions: vec![Instruction::new( - Opcode::TRAP, - zero, - zero_block, - zero_block, - zero, - zero, - false, - false, - "".to_string(), - )], - }; - let machine = A::machine(SC::default()); - let mut trap_runtime = - Runtime::::new(&trap_program, machine.config().perm.clone()); - - let trap_result = trap_runtime.run(); - assert!(trap_result.is_err(), "Expected run to return an error due to TRAP instruction"); - - if let Err(RuntimeError::Trap(msg)) = trap_result { - println!("Caught expected trap error: {}", msg); - } - } -} diff --git a/crates/recursion/core/src/runtime/opcode.rs b/crates/recursion/core/src/runtime/opcode.rs index fa9913dd34..96a748d065 100644 --- a/crates/recursion/core/src/runtime/opcode.rs +++ b/crates/recursion/core/src/runtime/opcode.rs @@ -1,64 +1,17 @@ -use p3_field::AbstractField; use serde::{Deserialize, Serialize}; -#[allow(clippy::upper_case_acronyms)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub enum Opcode { - // Arithmetic field instructions. - ADD = 0, - SUB = 1, - MUL = 2, - DIV = 3, - - // Arithmetic field extension operations. - EADD = 10, - ESUB = 11, - EMUL = 12, - EDIV = 13, - - // Memory instructions. - LOAD = 4, - STORE = 5, - - // Branch instructions. - BEQ = 6, - BNE = 7, - - // Jump instructions. - JAL = 8, - JALR = 9, - - // System instructions. - TRAP = 30, - HALT = 31, - - // Poseidon2 compress. - Poseidon2Compress = 39, - - // Poseidon2 hash. - Poseidon2Absorb = 46, - Poseidon2Finalize = 47, - - // Bit instructions. - HintBits = 32, - - PrintF = 33, - PrintE = 34, - HintExt2Felt = 35, - - FRIFold = 36, - HintLen = 37, - Hint = 38, - BNEINC = 40, - Commit = 41, - RegisterPublicValue = 42, - LessThanF = 43, - CycleTracker = 44, - ExpReverseBitsLen = 45, +pub enum BaseAluOpcode { + AddF, + SubF, + MulF, + DivF, } -impl Opcode { - pub fn as_field(&self) -> F { - F::from_canonical_u32(*self as u32) - } +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub enum ExtAluOpcode { + AddE, + SubE, + MulE, + DivE, } diff --git a/crates/recursion/core/src/runtime/program.rs b/crates/recursion/core/src/runtime/program.rs index 5ac33dafc8..38fb29ca8a 100644 --- a/crates/recursion/core/src/runtime/program.rs +++ b/crates/recursion/core/src/runtime/program.rs @@ -1,14 +1,18 @@ -use super::Instruction; use backtrace::Backtrace; use p3_field::Field; use serde::{Deserialize, Serialize}; -use sp1_stark::air::MachineProgram; +use shape::RecursionShape; +use sp1_stark::air::{MachineAir, MachineProgram}; + +use crate::*; #[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct RecursionProgram { pub instructions: Vec>, + pub total_memory: usize, #[serde(skip)] pub traces: Vec>, + pub shape: Option, } impl MachineProgram for RecursionProgram { @@ -16,3 +20,18 @@ impl MachineProgram for RecursionProgram { F::zero() } } + +impl RecursionProgram { + #[inline] + pub fn fixed_log2_rows>(&self, air: &A) -> Option { + self.shape + .as_ref() + .map(|shape| { + shape + .inner + .get(&air.name()) + .unwrap_or_else(|| panic!("Chip {} not found in specified shape", air.name())) + }) + .copied() + } +} diff --git a/crates/recursion/core/src/runtime/record.rs b/crates/recursion/core/src/runtime/record.rs index 17cc25e318..63df96f0ca 100644 --- a/crates/recursion/core/src/runtime/record.rs +++ b/crates/recursion/core/src/runtime/record.rs @@ -1,80 +1,80 @@ -use hashbrown::HashMap; use std::{array, sync::Arc}; -use p3_field::{AbstractField, PrimeField32}; -use sp1_stark::{MachineRecord, SP1CoreOpts, PROOF_MAX_NUM_PVS}; +use hashbrown::HashMap; +use p3_field::{AbstractField, Field, PrimeField32}; +use sp1_stark::{air::MachineAir, MachineRecord, SP1CoreOpts, PROOF_MAX_NUM_PVS}; -use super::RecursionProgram; -use crate::{ - air::Block, - cpu::CpuEvent, - exp_reverse_bits::ExpReverseBitsLenEvent, - fri_fold::FriFoldEvent, - poseidon2_wide::events::{Poseidon2CompressEvent, Poseidon2HashEvent}, - range_check::RangeCheckEvent, +use super::{ + BaseAluEvent, CommitPublicValuesEvent, ExpReverseBitsEvent, ExtAluEvent, FriFoldEvent, + MemEvent, Poseidon2Event, RecursionProgram, RecursionPublicValues, }; -#[derive(Default, Debug, Clone)] -pub struct ExecutionRecord { +#[derive(Clone, Default, Debug)] +pub struct ExecutionRecord { pub program: Arc>, - pub cpu_events: Vec>, - pub poseidon2_compress_events: Vec>, - pub poseidon2_hash_events: Vec>, - pub fri_fold_events: Vec>, - pub range_check_events: HashMap, - pub exp_reverse_bits_len_events: Vec>, - // (address, value) - pub first_memory_record: Vec<(F, Block)>, - - // (address, last_timestamp, last_value) - pub last_memory_record: Vec<(F, F, Block)>, + /// The index of the shard. + pub index: u32, + pub base_alu_events: Vec>, + pub ext_alu_events: Vec>, + pub mem_const_count: usize, + pub mem_var_events: Vec>, /// The public values. - pub public_values: Vec, -} + pub public_values: RecursionPublicValues, -impl ExecutionRecord { - pub fn add_range_check_events(&mut self, events: &[RangeCheckEvent]) { - for event in events { - *self.range_check_events.entry(*event).or_insert(0) += 1; - } - } + pub poseidon2_events: Vec>, + pub exp_reverse_bits_len_events: Vec>, + pub fri_fold_events: Vec>, + pub commit_pv_hash_events: Vec>, } impl MachineRecord for ExecutionRecord { type Config = SP1CoreOpts; - fn stats(&self) -> HashMap { + fn stats(&self) -> hashbrown::HashMap { let mut stats = HashMap::new(); - stats.insert("cpu_events".to_string(), self.cpu_events.len()); - stats.insert("poseidon2_events".to_string(), self.poseidon2_compress_events.len()); - stats.insert("poseidon2_events".to_string(), self.poseidon2_hash_events.len()); + stats.insert("base_alu_events".to_string(), self.base_alu_events.len()); + stats.insert("ext_alu_events".to_string(), self.ext_alu_events.len()); + stats.insert("mem_var_events".to_string(), self.mem_var_events.len()); + + stats.insert("poseidon2_events".to_string(), self.poseidon2_events.len()); + stats.insert("exp_reverse_bits_events".to_string(), self.exp_reverse_bits_len_events.len()); stats.insert("fri_fold_events".to_string(), self.fri_fold_events.len()); - stats.insert("range_check_events".to_string(), self.range_check_events.len()); - stats.insert( - "exp_reverse_bits_len_events".to_string(), - self.exp_reverse_bits_len_events.len(), - ); + stats } - // NOTE: This should be unused. fn append(&mut self, other: &mut Self) { - self.cpu_events.append(&mut other.cpu_events); - self.first_memory_record.append(&mut other.first_memory_record); - self.last_memory_record.append(&mut other.last_memory_record); - - // Merge the range check lookups. - for (range_check_event, count) in std::mem::take(&mut other.range_check_events).into_iter() - { - *self.range_check_events.entry(range_check_event).or_insert(0) += count; - } + // Exhaustive destructuring for refactoring purposes. + let Self { + program: _, + index: _, + base_alu_events, + ext_alu_events, + mem_const_count, + mem_var_events, + public_values: _, + poseidon2_events, + exp_reverse_bits_len_events, + fri_fold_events, + commit_pv_hash_events, + } = self; + base_alu_events.append(&mut other.base_alu_events); + ext_alu_events.append(&mut other.ext_alu_events); + *mem_const_count += other.mem_const_count; + mem_var_events.append(&mut other.mem_var_events); + poseidon2_events.append(&mut other.poseidon2_events); + exp_reverse_bits_len_events.append(&mut other.exp_reverse_bits_len_events); + fri_fold_events.append(&mut other.fri_fold_events); + commit_pv_hash_events.append(&mut other.commit_pv_hash_events); } fn public_values(&self) -> Vec { + let pv_elms = self.public_values.as_array(); + let ret: [T; PROOF_MAX_NUM_PVS] = array::from_fn(|i| { - if i < self.public_values.len() { - T::from_canonical_u32(self.public_values[i].as_canonical_u32()) + if i < pv_elms.len() { + T::from_canonical_u32(pv_elms[i].as_canonical_u32()) } else { T::zero() } @@ -83,3 +83,10 @@ impl MachineRecord for ExecutionRecord { ret.to_vec() } } + +impl ExecutionRecord { + #[inline] + pub fn fixed_log2_rows>(&self, air: &A) -> Option { + self.program.fixed_log2_rows(air) + } +} diff --git a/crates/recursion/core/src/runtime/utils.rs b/crates/recursion/core/src/runtime/utils.rs deleted file mode 100644 index b7710186d7..0000000000 --- a/crates/recursion/core/src/runtime/utils.rs +++ /dev/null @@ -1,33 +0,0 @@ -use p3_field::PrimeField32; - -use crate::range_check::{RangeCheckEvent, RangeCheckOpcode}; - -use super::{Instruction, Opcode, HEAP_PTR, HEAP_START_ADDRESS}; - -pub fn canonical_i32_to_field(x: i32) -> F { - let modulus = F::ORDER_U32; - assert!(x < modulus as i32 && x >= -(modulus as i32)); - if x < 0 { - -F::from_canonical_u32((-x) as u32) - } else { - F::from_canonical_u32(x as u32) - } -} - -pub fn get_heap_size_range_check_events( - end_heap_address: F, -) -> (RangeCheckEvent, RangeCheckEvent) { - let heap_size = - (end_heap_address - F::from_canonical_usize(HEAP_START_ADDRESS)).as_canonical_u32(); - let diff_16bit_limb = heap_size & 0xffff; - let diff_12bit_limb = (heap_size >> 16) & 0xfff; - - ( - RangeCheckEvent::new(RangeCheckOpcode::U16, diff_16bit_limb as u16), - RangeCheckEvent::new(RangeCheckOpcode::U12, diff_12bit_limb as u16), - ) -} - -pub fn instruction_is_heap_expand(instruction: &Instruction) -> bool { - instruction.opcode == Opcode::ADD && instruction.op_a == canonical_i32_to_field(HEAP_PTR) -} diff --git a/crates/recursion/core/src/shape.rs b/crates/recursion/core/src/shape.rs new file mode 100644 index 0000000000..54342048bd --- /dev/null +++ b/crates/recursion/core/src/shape.rs @@ -0,0 +1,214 @@ +use std::marker::PhantomData; + +use hashbrown::HashMap; + +use itertools::Itertools; +use p3_field::{extension::BinomiallyExtendable, PrimeField32}; +use serde::{Deserialize, Serialize}; +use sp1_stark::{air::MachineAir, ProofShape}; + +use crate::{ + chips::{ + alu_base::BaseAluChip, + alu_ext::ExtAluChip, + exp_reverse_bits::ExpReverseBitsLenChip, + mem::{MemoryConstChip, MemoryVarChip}, + poseidon2_wide::Poseidon2WideChip, + public_values::{PublicValuesChip, PUB_VALUES_LOG_HEIGHT}, + }, + machine::RecursionAir, + RecursionProgram, D, +}; + +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct RecursionShape { + pub(crate) inner: HashMap, +} + +pub struct RecursionShapeConfig { + allowed_shapes: Vec>, + _marker: PhantomData<(F, A)>, +} + +impl, const DEGREE: usize> + RecursionShapeConfig> +{ + pub fn fix_shape(&self, program: &mut RecursionProgram) { + let heights = RecursionAir::::heights(program); + // Get the allowed shape with a minimal hamming distance from the current shape. + let mut min_distance = usize::MAX; + let mut closest_shape = None; + for shape in self.allowed_shapes.iter() { + let mut distance = 0; + let mut is_valid = true; + for (name, height) in heights.iter() { + let next_power_of_two = height.next_power_of_two(); + let allowed_log_height = shape.get(name).unwrap(); + let allowed_height = 1 << allowed_log_height; + if next_power_of_two != allowed_height { + distance += 1; + } + if next_power_of_two > allowed_height { + is_valid = false; + } + } + if is_valid && distance < min_distance { + min_distance = distance; + closest_shape = Some(shape.clone()); + } + } + + if let Some(shape) = closest_shape { + let shape = RecursionShape { inner: shape }; + program.shape = Some(shape); + } else { + panic!("no shape found for heights: {:?}", heights); + } + } + + pub fn get_all_shape_combinations( + &self, + batch_size: usize, + ) -> impl Iterator> + '_ { + (0..batch_size) + .map(|_| { + self.allowed_shapes + .iter() + .cloned() + .map(|map| map.into_iter().collect::()) + }) + .multi_cartesian_product() + } +} + +impl, const DEGREE: usize> Default + for RecursionShapeConfig> +{ + fn default() -> Self { + // Get the names of all the recursion airs to make the shape specification more readable. + let mem_const = RecursionAir::::MemoryConst(MemoryConstChip::default()).name(); + let mem_var = RecursionAir::::MemoryVar(MemoryVarChip::default()).name(); + let base_alu = RecursionAir::::BaseAlu(BaseAluChip).name(); + let ext_alu = RecursionAir::::ExtAlu(ExtAluChip).name(); + let poseidon2_wide = + RecursionAir::::Poseidon2Wide(Poseidon2WideChip::).name(); + let exp_reverse_bits_len = + RecursionAir::::ExpReverseBitsLen(ExpReverseBitsLenChip::).name(); + let public_values = RecursionAir::::PublicValues(PublicValuesChip).name(); + + // Specify allowed shapes. + let allowed_shapes = [ + [ + (base_alu.clone(), 20), + (mem_var.clone(), 18), + (ext_alu.clone(), 18), + (exp_reverse_bits_len.clone(), 17), + (mem_const.clone(), 17), + (poseidon2_wide.clone(), 16), + (public_values.clone(), PUB_VALUES_LOG_HEIGHT), + ], + [ + (base_alu.clone(), 20), + (mem_var.clone(), 18), + (ext_alu.clone(), 18), + (exp_reverse_bits_len.clone(), 17), + (mem_const.clone(), 16), + (poseidon2_wide.clone(), 16), + (public_values.clone(), PUB_VALUES_LOG_HEIGHT), + ], + [ + (ext_alu.clone(), 20), + (base_alu.clone(), 19), + (mem_var.clone(), 19), + (poseidon2_wide.clone(), 17), + (mem_const.clone(), 16), + (exp_reverse_bits_len.clone(), 16), + (public_values.clone(), PUB_VALUES_LOG_HEIGHT), + ], + [ + (base_alu.clone(), 19), + (mem_var.clone(), 18), + (ext_alu.clone(), 18), + (exp_reverse_bits_len.clone(), 17), + (mem_const.clone(), 16), + (poseidon2_wide.clone(), 16), + (public_values.clone(), PUB_VALUES_LOG_HEIGHT), + ], + [ + (base_alu.clone(), 19), + (mem_var.clone(), 18), + (ext_alu.clone(), 18), + (exp_reverse_bits_len.clone(), 16), + (mem_const.clone(), 16), + (poseidon2_wide.clone(), 16), + (public_values.clone(), PUB_VALUES_LOG_HEIGHT), + ], + [ + (base_alu.clone(), 20), + (mem_var.clone(), 19), + (ext_alu.clone(), 19), + (exp_reverse_bits_len.clone(), 17), + (mem_const.clone(), 17), + (poseidon2_wide.clone(), 17), + (public_values.clone(), PUB_VALUES_LOG_HEIGHT), + ], + [ + (base_alu.clone(), 21), + (mem_var.clone(), 19), + (ext_alu.clone(), 19), + (exp_reverse_bits_len.clone(), 18), + (mem_const.clone(), 18), + (poseidon2_wide.clone(), 17), + (public_values.clone(), PUB_VALUES_LOG_HEIGHT), + ], + [ + (base_alu.clone(), 21), + (mem_var.clone(), 19), + (ext_alu.clone(), 19), + (exp_reverse_bits_len.clone(), 18), + (mem_const.clone(), 17), + (poseidon2_wide.clone(), 17), + (public_values.clone(), PUB_VALUES_LOG_HEIGHT), + ], + [ + (ext_alu.clone(), 21), + (base_alu.clone(), 20), + (mem_var.clone(), 20), + (poseidon2_wide.clone(), 18), + (mem_const.clone(), 17), + (exp_reverse_bits_len.clone(), 17), + (public_values.clone(), PUB_VALUES_LOG_HEIGHT), + ], + [ + (base_alu.clone(), 20), + (mem_var.clone(), 19), + (ext_alu.clone(), 19), + (exp_reverse_bits_len.clone(), 18), + (mem_const.clone(), 17), + (poseidon2_wide.clone(), 17), + (public_values.clone(), PUB_VALUES_LOG_HEIGHT), + ], + [ + (base_alu.clone(), 20), + (mem_var.clone(), 19), + (ext_alu.clone(), 19), + (exp_reverse_bits_len.clone(), 17), + (mem_const.clone(), 17), + (poseidon2_wide.clone(), 17), + (public_values.clone(), PUB_VALUES_LOG_HEIGHT), + ], + [ + (base_alu.clone(), 21), + (mem_var.clone(), 20), + (ext_alu.clone(), 20), + (exp_reverse_bits_len.clone(), 18), + (mem_const.clone(), 18), + (poseidon2_wide.clone(), 18), + (public_values.clone(), PUB_VALUES_LOG_HEIGHT), + ], + ] + .map(HashMap::from) + .to_vec(); + Self { allowed_shapes, _marker: PhantomData } + } +} diff --git a/crates/recursion/core/src/stark/config.rs b/crates/recursion/core/src/stark/config.rs index 3713fb2ed3..48e812146b 100644 --- a/crates/recursion/core/src/stark/config.rs +++ b/crates/recursion/core/src/stark/config.rs @@ -3,7 +3,7 @@ use p3_bn254_fr::{Bn254Fr, DiffusionMatrixBN254}; use p3_challenger::MultiField32Challenger; use p3_commit::ExtensionMmcs; use p3_dft::Radix2DitParallel; -use p3_field::extension::BinomialExtensionField; +use p3_field::{extension::BinomialExtensionField, AbstractField}; use p3_fri::{ BatchOpening, CommitPhaseProofStep, FriConfig, FriProof, QueryProof, TwoAdicFriPcs, TwoAdicFriPcsProof, @@ -12,22 +12,35 @@ use p3_merkle_tree::FieldMerkleTreeMmcs; use p3_poseidon2::{Poseidon2, Poseidon2ExternalMatrixGeneral}; use p3_symmetric::{Hash, MultiField32PaddingFreeSponge, TruncatedPermutation}; use serde::{Deserialize, Serialize}; -use sp1_stark::StarkGenericConfig; +use sp1_stark::{Com, StarkGenericConfig, ZeroCommitment}; -use super::{poseidon2::bn254_poseidon2_rc3, utils}; +use super::{poseidon2::bn254_poseidon2_rc3, sp1_dev_mode}; + +pub const DIGEST_SIZE: usize = 1; + +pub const OUTER_MULTI_FIELD_CHALLENGER_WIDTH: usize = 3; +pub const OUTER_MULTI_FIELD_CHALLENGER_RATE: usize = 2; +pub const OUTER_MULTI_FIELD_CHALLENGER_DIGEST_SIZE: usize = 1; /// A configuration for outer recursion. pub type OuterVal = BabyBear; pub type OuterChallenge = BinomialExtensionField; pub type OuterPerm = Poseidon2; -pub type OuterHash = MultiField32PaddingFreeSponge; -pub type OuterDigestHash = Hash; -pub type OuterDigest = [Bn254Fr; 1]; +pub type OuterHash = + MultiField32PaddingFreeSponge; +pub type OuterDigestHash = Hash; +pub type OuterDigest = [Bn254Fr; DIGEST_SIZE]; pub type OuterCompress = TruncatedPermutation; pub type OuterValMmcs = FieldMerkleTreeMmcs; pub type OuterChallengeMmcs = ExtensionMmcs; pub type OuterDft = Radix2DitParallel; -pub type OuterChallenger = MultiField32Challenger; +pub type OuterChallenger = MultiField32Challenger< + OuterVal, + Bn254Fr, + OuterPerm, + OUTER_MULTI_FIELD_CHALLENGER_WIDTH, + OUTER_MULTI_FIELD_CHALLENGER_RATE, +>; pub type OuterPcs = TwoAdicFriPcs; pub type OuterQueryProof = QueryProof; @@ -63,7 +76,7 @@ pub fn outer_fri_config() -> FriConfig { let hash = OuterHash::new(perm.clone()).unwrap(); let compress = OuterCompress::new(perm.clone()); let challenge_mmcs = OuterChallengeMmcs::new(OuterValMmcs::new(hash, compress)); - let num_queries = if utils::sp1_dev_mode() { + let num_queries = if sp1_dev_mode() { 1 } else { match std::env::var("FRI_QUERIES") { @@ -80,7 +93,7 @@ pub fn outer_fri_config_with_blowup(log_blowup: usize) -> FriConfig for OuterPcs { + fn zero_commitment(&self) -> Com { + OuterDigestHash::from([Bn254Fr::zero(); DIGEST_SIZE]) + } +} + /// The FRI config for testing recursion. pub fn test_fri_config() -> FriConfig { let perm = outer_perm(); diff --git a/crates/recursion/core/src/stark/mod.rs b/crates/recursion/core/src/stark/mod.rs index 0c2104b3ea..84f419fa53 100644 --- a/crates/recursion/core/src/stark/mod.rs +++ b/crates/recursion/core/src/stark/mod.rs @@ -1,107 +1,7 @@ -pub mod config; -pub mod poseidon2; -pub mod utils; +mod config; +mod poseidon2; +mod utils; -use crate::{ - cpu::CpuChip, exp_reverse_bits::ExpReverseBitsLenChip, fri_fold::FriFoldChip, - memory::MemoryGlobalChip, multi::MultiChip, poseidon2_wide::Poseidon2WideChip, - program::ProgramChip, range_check::RangeCheckChip, -}; -use core::iter::once; -use p3_field::{extension::BinomiallyExtendable, PrimeField32}; -use sp1_stark::{Chip, StarkGenericConfig, StarkMachine, PROOF_MAX_NUM_PVS}; -use std::marker::PhantomData; - -use crate::runtime::D; - -pub type RecursionAirWideDeg3 = RecursionAir; -pub type RecursionAirWideDeg9 = RecursionAir; -pub type RecursionAirWideDeg17 = RecursionAir; - -#[derive(sp1_derive::MachineAir)] -#[sp1_core_path = "sp1_stark"] -#[execution_record_path = "crate::runtime::ExecutionRecord"] -#[program_path = "crate::runtime::RecursionProgram"] -#[builder_path = "crate::air::SP1RecursionAirBuilder"] -#[eval_trait_bound = "AB::Var: 'static"] -pub enum RecursionAir, const DEGREE: usize> { - Program(ProgramChip), - Cpu(CpuChip), - MemoryGlobal(MemoryGlobalChip), - Poseidon2Wide(Poseidon2WideChip), - FriFold(FriFoldChip), - RangeCheck(RangeCheckChip), - Multi(MultiChip), - ExpReverseBitsLen(ExpReverseBitsLenChip), -} - -impl, const DEGREE: usize> RecursionAir { - /// A recursion machine that can have dynamic trace sizes. - pub fn machine>(config: SC) -> StarkMachine { - let chips = Self::get_all().into_iter().map(Chip::new).collect::>(); - StarkMachine::new(config, chips, PROOF_MAX_NUM_PVS) - } - - /// A recursion machine with fixed trace sizes tuned to work specifically for the wrap layer. - pub fn wrap_machine>(config: SC) -> StarkMachine { - let chips = Self::get_wrap_all().into_iter().map(Chip::new).collect::>(); - StarkMachine::new(config, chips, PROOF_MAX_NUM_PVS) - } - - /// A recursion machine with fixed trace sizes tuned to work specifically for the wrap layer. - pub fn wrap_machine_dyn>(config: SC) -> StarkMachine { - let chips = Self::get_wrap_dyn_all().into_iter().map(Chip::new).collect::>(); - StarkMachine::new(config, chips, PROOF_MAX_NUM_PVS) - } - - pub fn get_all() -> Vec { - once(RecursionAir::Program(ProgramChip)) - .chain(once(RecursionAir::Cpu(CpuChip { - fixed_log2_rows: None, - _phantom: PhantomData, - }))) - .chain(once(RecursionAir::MemoryGlobal(MemoryGlobalChip { fixed_log2_rows: None }))) - .chain(once(RecursionAir::Poseidon2Wide(Poseidon2WideChip:: { - fixed_log2_rows: None, - pad: true, - }))) - .chain(once(RecursionAir::FriFold(FriFoldChip:: { - fixed_log2_rows: None, - pad: true, - }))) - .chain(once(RecursionAir::RangeCheck(RangeCheckChip::default()))) - .chain(once(RecursionAir::ExpReverseBitsLen(ExpReverseBitsLenChip:: { - fixed_log2_rows: None, - pad: true, - }))) - .collect() - } - - pub fn get_wrap_dyn_all() -> Vec { - once(RecursionAir::Program(ProgramChip)) - .chain(once(RecursionAir::Cpu(CpuChip { - fixed_log2_rows: None, - _phantom: PhantomData, - }))) - .chain(once(RecursionAir::MemoryGlobal(MemoryGlobalChip { fixed_log2_rows: None }))) - .chain(once(RecursionAir::Multi(MultiChip { fixed_log2_rows: None }))) - .chain(once(RecursionAir::RangeCheck(RangeCheckChip::default()))) - .chain(once(RecursionAir::ExpReverseBitsLen(ExpReverseBitsLenChip:: { - fixed_log2_rows: None, - pad: true, - }))) - .collect() - } - - pub fn get_wrap_all() -> Vec { - once(RecursionAir::Program(ProgramChip)) - .chain(once(RecursionAir::Cpu(CpuChip { - fixed_log2_rows: Some(20), - _phantom: PhantomData, - }))) - .chain(once(RecursionAir::MemoryGlobal(MemoryGlobalChip { fixed_log2_rows: Some(19) }))) - .chain(once(RecursionAir::Multi(MultiChip { fixed_log2_rows: Some(17) }))) - .chain(once(RecursionAir::RangeCheck(RangeCheckChip::default()))) - .collect() - } -} +pub use config::*; +pub use poseidon2::*; +pub use utils::*; diff --git a/crates/recursion/core/src/stark/utils.rs b/crates/recursion/core/src/stark/utils.rs index 6e815b05c1..91b2c29250 100644 --- a/crates/recursion/core/src/stark/utils.rs +++ b/crates/recursion/core/src/stark/utils.rs @@ -1,85 +1,3 @@ -use p3_baby_bear::BabyBear; -use sp1_core_machine::utils; -use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, StarkGenericConfig}; - -use crate::{ - air::Block, - runtime::{RecursionProgram, Runtime}, - stark::{RecursionAirWideDeg3, RecursionAirWideDeg9}, -}; -use p3_field::PrimeField32; -use sp1_core_machine::utils::run_test_machine; -use std::collections::VecDeque; - -#[derive(PartialEq, Clone, Debug)] -pub enum TestConfig { - All, - WideDeg3, - SkinnyDeg7, - WideDeg17Wrap, -} - -type Val = ::Val; -type Challenge = ::Challenge; - -/// Takes in a program and runs it with the given witness and generates a proof with a variety of -/// machines depending on the provided test_config. -pub fn run_test_recursion( - program: RecursionProgram, - witness: Option>>>, - test_config: TestConfig, -) { - utils::setup_logger(); - let config = BabyBearPoseidon2::default(); - - let mut runtime = Runtime::::new(&program, config.perm.clone()); - if witness.is_some() { - runtime.witness_stream = witness.unwrap(); - } - - match runtime.run() { - Ok(_) => { - println!( - "The program executed successfully, number of cycles: {}", - runtime.clk.as_canonical_u32() / 4 - ); - } - Err(e) => { - eprintln!("Runtime error: {:?}", e); - return; - } - } - - let records = vec![runtime.record]; - - if test_config == TestConfig::All || test_config == TestConfig::WideDeg3 { - let machine = RecursionAirWideDeg3::machine(BabyBearPoseidon2::default()); - let (pk, vk) = machine.setup(&program); - let result = run_test_machine(records.clone(), machine, pk, vk); - if let Err(e) = result { - panic!("Verification failed: {:?}", e); - } - } - - if test_config == TestConfig::All || test_config == TestConfig::SkinnyDeg7 { - let machine = RecursionAirWideDeg9::machine(BabyBearPoseidon2::compressed()); - let (pk, vk) = machine.setup(&program); - let result = run_test_machine(records.clone(), machine, pk, vk); - if let Err(e) = result { - panic!("Verification failed: {:?}", e); - } - } - - if test_config == TestConfig::All || test_config == TestConfig::WideDeg17Wrap { - let machine = RecursionAirWideDeg9::wrap_machine(BabyBearPoseidon2::compressed()); - let (pk, vk) = machine.setup(&program); - let result = run_test_machine(records.clone(), machine, pk, vk); - if let Err(e) = result { - panic!("Verification failed: {:?}", e); - } - } -} - /// Returns whether the `SP1_DEV` environment variable is enabled or disabled. /// /// This variable controls whether a smaller version of the circuit will be used for generating the diff --git a/crates/recursion/derive/Cargo.toml b/crates/recursion/derive/Cargo.toml index 3c3135ba3a..3e75beae41 100644 --- a/crates/recursion/derive/Cargo.toml +++ b/crates/recursion/derive/Cargo.toml @@ -13,6 +13,5 @@ categories = { workspace = true } proc-macro = true [dependencies] -proc-macro2 = "1.0" quote = "1.0" syn = { version = "1.0", features = ["full"] } diff --git a/crates/recursion/gnark-ffi/Cargo.toml b/crates/recursion/gnark-ffi/Cargo.toml index 0d36bf4b69..3276efb80d 100644 --- a/crates/recursion/gnark-ffi/Cargo.toml +++ b/crates/recursion/gnark-ffi/Cargo.toml @@ -19,7 +19,6 @@ sp1-stark = { workspace = true } serde = "1.0.204" serde_json = "1.0.121" tempfile = "3.10.1" -rand = "0.8" log = "0.4.22" num-bigint = "0.4.6" cfg-if = "1.0" diff --git a/crates/recursion/gnark-ffi/build.rs b/crates/recursion/gnark-ffi/build.rs index f35769b3c6..78325f356b 100644 --- a/crates/recursion/gnark-ffi/build.rs +++ b/crates/recursion/gnark-ffi/build.rs @@ -25,6 +25,7 @@ fn main() { .env("CGO_ENABLED", "1") .args([ "build", + "-tags=debug", "-o", dest.to_str().unwrap(), "-buildmode=c-archive", diff --git a/crates/recursion/gnark-ffi/go/go.mod b/crates/recursion/gnark-ffi/go/go.mod index 505279b9dc..5c089a14ef 100644 --- a/crates/recursion/gnark-ffi/go/go.mod +++ b/crates/recursion/gnark-ffi/go/go.mod @@ -4,33 +4,32 @@ go 1.22 require ( github.com/consensys/gnark v0.10.1-0.20240504023521-d9bfacd7cb60 - github.com/consensys/gnark-crypto v0.12.2-0.20240504013751-564b6f724c3b - github.com/spf13/cobra v1.8.0 + github.com/consensys/gnark-crypto v0.14.0 ) require ( - github.com/bits-and-blooms/bitset v1.8.0 // indirect + github.com/bits-and-blooms/bitset v1.14.2 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/consensys/bavard v0.1.13 // indirect github.com/consensys/gnark-ignition-verifier v0.0.0-20230527014722-10693546ab33 github.com/davecgh/go-spew v1.1.1 // indirect - github.com/fxamacker/cbor/v2 v2.5.0 // indirect - github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/ingonyama-zk/icicle v0.0.0-20230928131117-97f0079e5c71 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect + github.com/ingonyama-zk/icicle v1.1.0 // indirect github.com/ingonyama-zk/iciclegnark v0.1.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.19 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/ronanh/intcomp v1.1.0 // indirect - github.com/rs/zerolog v1.30.0 // indirect - github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/testify v1.8.4 // indirect + github.com/rs/zerolog v1.33.0 // indirect + github.com/stretchr/testify v1.9.0 // indirect github.com/x448/float16 v0.8.4 // indirect - golang.org/x/crypto v0.17.0 // indirect - golang.org/x/sync v0.3.0 // indirect - golang.org/x/sys v0.15.0 // indirect + golang.org/x/crypto v0.26.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.24.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect rsc.io/tmplfunc v0.0.3 // indirect ) + +replace github.com/consensys/gnark => github.com/jtguibas/gnark v0.0.0-20240923234830-41125bc1909c diff --git a/crates/recursion/gnark-ffi/go/go.sum b/crates/recursion/gnark-ffi/go/go.sum index 1cba1df674..3f91c58c6d 100644 --- a/crates/recursion/gnark-ffi/go/go.sum +++ b/crates/recursion/gnark-ffi/go/go.sum @@ -1,78 +1,68 @@ -github.com/bits-and-blooms/bitset v1.8.0 h1:FD+XqgOZDUxxZ8hzoBFuV9+cGWY9CslN6d5MS5JVb4c= -github.com/bits-and-blooms/bitset v1.8.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bits-and-blooms/bitset v1.14.2 h1:YXVoyPndbdvcEVcseEovVfp0qjJp7S+i5+xgp/Nfbdc= +github.com/bits-and-blooms/bitset v1.14.2/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= -github.com/consensys/gnark v0.10.1-0.20240504023521-d9bfacd7cb60 h1:+m2KO2BeqBkH1zfCy88z93144AnnD8boClw6d6sD2Ko= -github.com/consensys/gnark v0.10.1-0.20240504023521-d9bfacd7cb60/go.mod h1:DU7zXvIuOqheiS3EgVdD7ydbXDiLh71FkaArWPxwJqY= -github.com/consensys/gnark-crypto v0.12.2-0.20240504013751-564b6f724c3b h1:tu0NaVr64o6vXzy9rYSK/LCZXmS+u/k9eP1F8OtRUWQ= -github.com/consensys/gnark-crypto v0.12.2-0.20240504013751-564b6f724c3b/go.mod h1:wKqwsieaKPThcFkHe0d0zMsbHEUWFmZcG7KBCse210o= +github.com/consensys/gnark-crypto v0.14.0 h1:DDBdl4HaBtdQsq/wfMwJvZNE80sHidrK3Nfrefatm0E= +github.com/consensys/gnark-crypto v0.14.0/go.mod h1:CU4UijNPsHawiVGNxe9co07FkzCeWHHrb1li/n1XoU0= github.com/consensys/gnark-ignition-verifier v0.0.0-20230527014722-10693546ab33 h1:z42ewLaLxoTYeQ17arcF4WExZc/eSaN3YVlF7eEaPt4= github.com/consensys/gnark-ignition-verifier v0.0.0-20230527014722-10693546ab33/go.mod h1:JdKor28c/KR4BbznP88bz8AAvnCgovzrB3KWsiR7lwk= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fxamacker/cbor/v2 v2.5.0 h1:oHsG0V/Q6E/wqTS2O1Cozzsy69nqCiguo5Q1a1ADivE= -github.com/fxamacker/cbor/v2 v2.5.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b h1:h9U78+dx9a4BKdQkBBos92HalKpaGKHrp+3Uo6yTodo= -github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/ingonyama-zk/icicle v0.0.0-20230928131117-97f0079e5c71 h1:YxI1RTPzpFJ3MBmxPl3Bo0F7ume7CmQEC1M9jL6CT94= -github.com/ingonyama-zk/icicle v0.0.0-20230928131117-97f0079e5c71/go.mod h1:kAK8/EoN7fUEmakzgZIYdWy1a2rBnpCaZLqSHwZWxEk= +github.com/ingonyama-zk/icicle v1.1.0 h1:a2MUIaF+1i4JY2Lnb961ZMvaC8GFs9GqZgSnd9e95C8= +github.com/ingonyama-zk/icicle v1.1.0/go.mod h1:kAK8/EoN7fUEmakzgZIYdWy1a2rBnpCaZLqSHwZWxEk= github.com/ingonyama-zk/iciclegnark v0.1.0 h1:88MkEghzjQBMjrYRJFxZ9oR9CTIpB8NG2zLeCJSvXKQ= github.com/ingonyama-zk/iciclegnark v0.1.0/go.mod h1:wz6+IpyHKs6UhMMoQpNqz1VY+ddfKqC/gRwR/64W6WU= +github.com/jtguibas/gnark v0.0.0-20240923234830-41125bc1909c h1:vaqr8feWdNWKvsApw7iBhgbTznreKg+YvNAsbvqvwow= +github.com/jtguibas/gnark v0.0.0-20240923234830-41125bc1909c/go.mod h1:2LbheIOxsBI1a9Ck1XxUoy6PRnH28mSI9qrvtN2HwDY= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= -github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4= +github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/ronanh/intcomp v1.1.0 h1:i54kxmpmSoOZFcWPMWryuakN0vLxLswASsGa07zkvLU= github.com/ronanh/intcomp v1.1.0/go.mod h1:7FOLy3P3Zj3er/kVrU/pl+Ql7JFZj7bwliMGketo0IU= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.30.0 h1:SymVODrcRsaRaSInD9yQtKbtWqwsfoPcRff/oRXLj4c= -github.com/rs/zerolog v1.30.0/go.mod h1:/tk+P47gFdPXq4QYjvCmT5/Gsug2nagsFWBWhAiSi1w= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= +github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/crates/recursion/gnark-ffi/go/sp1/babybear/babybear.go b/crates/recursion/gnark-ffi/go/sp1/babybear/babybear.go index 31baba23e8..f3dd63228f 100644 --- a/crates/recursion/gnark-ffi/go/sp1/babybear/babybear.go +++ b/crates/recursion/gnark-ffi/go/sp1/babybear/babybear.go @@ -17,6 +17,7 @@ import ( ) var modulus = new(big.Int).SetUint64(2013265921) +var modulus_sub_1 = new(big.Int).SetUint64(2013265920) func init() { // These functions must be public so Gnark's hint system can access them. @@ -27,8 +28,8 @@ func init() { } type Variable struct { - Value frontend.Variable - NbBits uint + Value frontend.Variable + UpperBound *big.Int } type ExtensionVariable struct { @@ -49,22 +50,33 @@ func NewChip(api frontend.API) *Chip { func Zero() Variable { return Variable{ - Value: frontend.Variable("0"), - NbBits: 32, + Value: frontend.Variable("0"), + UpperBound: new(big.Int).SetUint64(0), } } func One() Variable { return Variable{ - Value: frontend.Variable("1"), - NbBits: 32, + Value: frontend.Variable("1"), + UpperBound: new(big.Int).SetUint64(1), + } +} + +func NewFConst(value string) Variable { + int_value, success := new(big.Int).SetString(value, 10) + if !success { + panic("string to int conversion failed") + } + return Variable{ + Value: frontend.Variable(value), + UpperBound: int_value, } } func NewF(value string) Variable { return Variable{ - Value: frontend.Variable(value), - NbBits: 32, + Value: frontend.Variable(value), + UpperBound: new(big.Int).SetUint64(uint64(math.Pow(2, 32))), } } @@ -76,21 +88,27 @@ func NewE(value []string) ExtensionVariable { return ExtensionVariable{Value: [4]Variable{a, b, c, d}} } +func NewEConst(value []string) ExtensionVariable { + a := NewFConst(value[0]) + b := NewFConst(value[1]) + c := NewFConst(value[2]) + d := NewFConst(value[3]) + return ExtensionVariable{Value: [4]Variable{a, b, c, d}} +} + func Felts2Ext(a, b, c, d Variable) ExtensionVariable { return ExtensionVariable{Value: [4]Variable{a, b, c, d}} } -func (c *Chip) AddF(a, b Variable) Variable { - var maxBits uint - if a.NbBits > b.NbBits { - maxBits = a.NbBits - } else { - maxBits = b.NbBits +func (c *Chip) AddF(a, b Variable, forceReduce ...bool) Variable { + result := Variable{ + Value: c.api.Add(a.Value, b.Value), + UpperBound: new(big.Int).Add(a.UpperBound, b.UpperBound), } - return c.reduceFast(Variable{ - Value: c.api.Add(a.Value, b.Value), - NbBits: maxBits + 1, - }) + if len(forceReduce) > 0 && !forceReduce[0] { + return result + } + return c.reduceFast(result) } func (c *Chip) SubF(a, b Variable) Variable { @@ -98,46 +116,48 @@ func (c *Chip) SubF(a, b Variable) Variable { return c.AddF(a, negB) } -func (c *Chip) MulF(a, b Variable) Variable { - return c.reduceFast(Variable{ - Value: c.api.Mul(a.Value, b.Value), - NbBits: a.NbBits + b.NbBits, - }) +func (c *Chip) MulF(a, b Variable, forceReduce ...bool) Variable { + result := Variable{ + Value: c.api.Mul(a.Value, b.Value), + UpperBound: new(big.Int).Mul(a.UpperBound, b.UpperBound), + } + if len(forceReduce) > 0 && !forceReduce[0] { + return result + } + return c.reduceFast(result) } -func (c *Chip) MulFConst(a Variable, b int) Variable { - return c.reduceFast(Variable{ - Value: c.api.Mul(a.Value, b), - NbBits: a.NbBits + 4, - }) +func (c *Chip) MulFConst(a Variable, b int, forceReduce ...bool) Variable { + result := Variable{ + Value: c.api.Mul(a.Value, b), + UpperBound: new(big.Int).Mul(a.UpperBound, new(big.Int).SetUint64(uint64(b))), + } + if len(forceReduce) > 0 && !forceReduce[0] { + return result + } + return c.reduceFast(result) } func (c *Chip) negF(a Variable) Variable { - if a.NbBits <= 30 { - return Variable{Value: c.api.Sub(modulus, a.Value), NbBits: 31} - } - - ub := new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(a.NbBits)), big.NewInt(0)) - divisor := new(big.Int).Div(ub, modulus) + divisor := new(big.Int).Div(a.UpperBound, modulus) divisorPlusOne := new(big.Int).Add(divisor, big.NewInt(1)) liftedModulus := new(big.Int).Mul(divisorPlusOne, modulus) return c.reduceFast(Variable{ - Value: c.api.Sub(liftedModulus, a.Value), - NbBits: a.NbBits + 1, + Value: c.api.Sub(liftedModulus, a.Value), + UpperBound: liftedModulus, }) } func (c *Chip) invF(in Variable) Variable { - in = c.ReduceSlow(in) result, err := c.api.Compiler().NewHint(InvFHint, 1, in.Value) if err != nil { panic(err) } xinv := Variable{ - Value: result[0], - NbBits: 31, + Value: result[0], + UpperBound: new(big.Int).SetUint64(2147483648), } if os.Getenv("GROTH16") != "1" { c.RangeChecker.Check(result[0], 31) @@ -145,7 +165,7 @@ func (c *Chip) invF(in Variable) Variable { c.api.ToBinary(result[0], 31) } product := c.MulF(in, xinv) - c.AssertIsEqualF(product, NewF("1")) + c.AssertIsEqualF(product, NewFConst("1")) return xinv } @@ -161,6 +181,12 @@ func (c *Chip) AssertIsEqualF(a, b Variable) { c.api.AssertIsEqual(a2.Value, b2.Value) } +func (c *Chip) AssertNotEqualF(a, b Variable) { + a2 := c.ReduceSlow(a) + b2 := c.ReduceSlow(b) + c.api.AssertIsDifferent(a2.Value, b2.Value) +} + func (c *Chip) AssertIsEqualE(a, b ExtensionVariable) { c.AssertIsEqualF(a.Value[0], b.Value[0]) c.AssertIsEqualF(a.Value[1], b.Value[1]) @@ -169,15 +195,15 @@ func (c *Chip) AssertIsEqualE(a, b ExtensionVariable) { } func (c *Chip) SelectF(cond frontend.Variable, a, b Variable) Variable { - var nbBits uint - if a.NbBits > b.NbBits { - nbBits = a.NbBits + var UpperBound *big.Int + if a.UpperBound.Cmp(b.UpperBound) == -1 { + UpperBound = b.UpperBound } else { - nbBits = b.NbBits + UpperBound = a.UpperBound } return Variable{ - Value: c.api.Select(cond, a.Value, b.Value), - NbBits: nbBits, + Value: c.api.Select(cond, a.Value, b.Value), + UpperBound: UpperBound, } } @@ -229,13 +255,16 @@ func (c *Chip) MulE(a, b ExtensionVariable) ExtensionVariable { for i := 0; i < 4; i++ { for j := 0; j < 4; j++ { if i+j >= 4 { - v2[i+j-4] = c.AddF(v2[i+j-4], c.MulFConst(c.MulF(a.Value[i], b.Value[j]), 11)) + v2[i+j-4] = c.AddF(v2[i+j-4], c.MulFConst(c.MulF(a.Value[i], b.Value[j], false), 11, false), false) } else { - v2[i+j] = c.AddF(v2[i+j], c.MulF(a.Value[i], b.Value[j])) + v2[i+j] = c.AddF(v2[i+j], c.MulF(a.Value[i], b.Value[j], false), false) } } } - + v2[0] = c.reduceFast(v2[0]) + v2[1] = c.reduceFast(v2[1]) + v2[2] = c.reduceFast(v2[2]) + v2[3] = c.reduceFast(v2[3]) return ExtensionVariable{Value: v2} } @@ -248,19 +277,15 @@ func (c *Chip) MulEF(a ExtensionVariable, b Variable) ExtensionVariable { } func (c *Chip) InvE(in ExtensionVariable) ExtensionVariable { - in.Value[0] = c.ReduceSlow(in.Value[0]) - in.Value[1] = c.ReduceSlow(in.Value[1]) - in.Value[2] = c.ReduceSlow(in.Value[2]) - in.Value[3] = c.ReduceSlow(in.Value[3]) result, err := c.api.Compiler().NewHint(InvEHint, 4, in.Value[0].Value, in.Value[1].Value, in.Value[2].Value, in.Value[3].Value) if err != nil { panic(err) } - xinv := Variable{Value: result[0], NbBits: 31} - yinv := Variable{Value: result[1], NbBits: 31} - zinv := Variable{Value: result[2], NbBits: 31} - linv := Variable{Value: result[3], NbBits: 31} + xinv := Variable{Value: result[0], UpperBound: new(big.Int).SetUint64(2147483648)} + yinv := Variable{Value: result[1], UpperBound: new(big.Int).SetUint64(2147483648)} + zinv := Variable{Value: result[2], UpperBound: new(big.Int).SetUint64(2147483648)} + linv := Variable{Value: result[3], UpperBound: new(big.Int).SetUint64(2147483648)} if os.Getenv("GROTH16") != "1" { c.RangeChecker.Check(result[0], 31) c.RangeChecker.Check(result[1], 31) @@ -275,7 +300,7 @@ func (c *Chip) InvE(in ExtensionVariable) ExtensionVariable { out := ExtensionVariable{Value: [4]Variable{xinv, yinv, zinv, linv}} product := c.MulE(in, out) - c.AssertIsEqualE(product, NewE([]string{"1", "0", "0", "0"})) + c.AssertIsEqualE(product, NewEConst([]string{"1", "0", "0", "0"})) return out } @@ -289,6 +314,11 @@ func (c *Chip) DivE(a, b ExtensionVariable) ExtensionVariable { return c.MulE(a, bInv) } +func (c *Chip) DivEF(a ExtensionVariable, b Variable) ExtensionVariable { + bInv := c.invF(b) + return c.MulEF(a, bInv) +} + func (c *Chip) NegE(a ExtensionVariable) ExtensionVariable { v1 := c.negF(a.Value[0]) v2 := c.negF(a.Value[1]) @@ -298,26 +328,26 @@ func (c *Chip) NegE(a ExtensionVariable) ExtensionVariable { } func (c *Chip) ToBinary(in Variable) []frontend.Variable { - return c.api.ToBinary(c.ReduceSlow(in).Value, 32) + return c.api.ToBinary(c.ReduceSlow(in).Value, 31) } func (p *Chip) reduceFast(x Variable) Variable { - if x.NbBits >= uint(126) { + if x.UpperBound.BitLen() >= 120 { return Variable{ - Value: p.reduceWithMaxBits(x.Value, uint64(x.NbBits)), - NbBits: 31, + Value: p.reduceWithMaxBits(x.Value, uint64(x.UpperBound.BitLen())), + UpperBound: modulus_sub_1, } } return x } func (p *Chip) ReduceSlow(x Variable) Variable { - if x.NbBits <= 30 { + if x.UpperBound.Cmp(modulus) == -1 { return x } return Variable{ - Value: p.reduceWithMaxBits(x.Value, uint64(x.NbBits)), - NbBits: 31, + Value: p.reduceWithMaxBits(x.Value, uint64(x.UpperBound.BitLen())), + UpperBound: modulus_sub_1, } } @@ -338,6 +368,7 @@ func (p *Chip) reduceWithMaxBits(x frontend.Variable, maxNbBits uint64) frontend } else { p.api.ToBinary(quotient, int(maxNbBits-30)) } + // Check that the remainder has size less than the BabyBear modulus, by decomposing it into a 27 // bit limb and a 4 bit limb. new_result, new_err := p.api.Compiler().NewHint(SplitLimbsHint, 2, remainder) @@ -369,10 +400,9 @@ func (p *Chip) reduceWithMaxBits(x frontend.Variable, maxNbBits uint64) frontend // need to do any checks, since we already know that the element is less than the BabyBear modulus. shouldCheck := p.api.IsZero(p.api.Sub(highLimb, uint64(math.Pow(2, 4))-1)) p.api.AssertIsEqual( - p.api.Select( + p.api.Mul( shouldCheck, lowLimb, - frontend.Variable(0), ), frontend.Variable(0), ) @@ -403,7 +433,7 @@ func (p *Chip) ReduceE(x ExtensionVariable) ExtensionVariable { } func InvFHint(_ *big.Int, inputs []*big.Int, results []*big.Int) error { - a := C.uint(inputs[0].Uint64()) + a := C.uint(new(big.Int).Mod(inputs[0], modulus).Uint64()) ainv := C.babybearinv(a) results[0].SetUint64(uint64(ainv)) return nil @@ -434,10 +464,10 @@ func SplitLimbsHint(_ *big.Int, inputs []*big.Int, results []*big.Int) error { } func InvEHint(_ *big.Int, inputs []*big.Int, results []*big.Int) error { - a := C.uint(inputs[0].Uint64()) - b := C.uint(inputs[1].Uint64()) - c := C.uint(inputs[2].Uint64()) - d := C.uint(inputs[3].Uint64()) + a := C.uint(new(big.Int).Mod(inputs[0], modulus).Uint64()) + b := C.uint(new(big.Int).Mod(inputs[1], modulus).Uint64()) + c := C.uint(new(big.Int).Mod(inputs[2], modulus).Uint64()) + d := C.uint(new(big.Int).Mod(inputs[3], modulus).Uint64()) ainv := C.babybearextinv(a, b, c, d, 0) binv := C.babybearextinv(a, b, c, d, 1) cinv := C.babybearextinv(a, b, c, d, 2) diff --git a/crates/recursion/gnark-ffi/go/sp1/build.go b/crates/recursion/gnark-ffi/go/sp1/build.go index 174c3f961f..4a082059c7 100644 --- a/crates/recursion/gnark-ffi/go/sp1/build.go +++ b/crates/recursion/gnark-ffi/go/sp1/build.go @@ -292,7 +292,7 @@ func BuildGroth16(dataDir string) { panic(err) } defer pkFile.Close() - _, err = pk.WriteTo(pkFile) + err = pk.WriteDump(pkFile) if err != nil { panic(err) } diff --git a/crates/recursion/gnark-ffi/go/sp1/poseidon2/constants.go b/crates/recursion/gnark-ffi/go/sp1/poseidon2/constants.go index 63f127b60c..a7e6614bbb 100644 --- a/crates/recursion/gnark-ffi/go/sp1/poseidon2/constants.go +++ b/crates/recursion/gnark-ffi/go/sp1/poseidon2/constants.go @@ -473,575 +473,575 @@ func init_rc16() { round := 0 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("2110014213"), - babybear.NewF("3964964605"), - babybear.NewF("2190662774"), - babybear.NewF("2732996483"), - babybear.NewF("640767983"), - babybear.NewF("3403899136"), - babybear.NewF("1716033721"), - babybear.NewF("1606702601"), - babybear.NewF("3759873288"), - babybear.NewF("1466015491"), - babybear.NewF("1498308946"), - babybear.NewF("2844375094"), - babybear.NewF("3042463841"), - babybear.NewF("1969905919"), - babybear.NewF("4109944726"), - babybear.NewF("3925048366"), + babybear.NewFConst("2110014213"), + babybear.NewFConst("3964964605"), + babybear.NewFConst("2190662774"), + babybear.NewFConst("2732996483"), + babybear.NewFConst("640767983"), + babybear.NewFConst("3403899136"), + babybear.NewFConst("1716033721"), + babybear.NewFConst("1606702601"), + babybear.NewFConst("3759873288"), + babybear.NewFConst("1466015491"), + babybear.NewFConst("1498308946"), + babybear.NewFConst("2844375094"), + babybear.NewFConst("3042463841"), + babybear.NewFConst("1969905919"), + babybear.NewFConst("4109944726"), + babybear.NewFConst("3925048366"), } round += 1 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("3706859504"), - babybear.NewF("759122502"), - babybear.NewF("3167665446"), - babybear.NewF("1131812921"), - babybear.NewF("1080754908"), - babybear.NewF("4080114493"), - babybear.NewF("893583089"), - babybear.NewF("2019677373"), - babybear.NewF("3128604556"), - babybear.NewF("580640471"), - babybear.NewF("3277620260"), - babybear.NewF("842931656"), - babybear.NewF("548879852"), - babybear.NewF("3608554714"), - babybear.NewF("3575647916"), - babybear.NewF("81826002"), + babybear.NewFConst("3706859504"), + babybear.NewFConst("759122502"), + babybear.NewFConst("3167665446"), + babybear.NewFConst("1131812921"), + babybear.NewFConst("1080754908"), + babybear.NewFConst("4080114493"), + babybear.NewFConst("893583089"), + babybear.NewFConst("2019677373"), + babybear.NewFConst("3128604556"), + babybear.NewFConst("580640471"), + babybear.NewFConst("3277620260"), + babybear.NewFConst("842931656"), + babybear.NewFConst("548879852"), + babybear.NewFConst("3608554714"), + babybear.NewFConst("3575647916"), + babybear.NewFConst("81826002"), } round += 1 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("4289086263"), - babybear.NewF("1563933798"), - babybear.NewF("1440025885"), - babybear.NewF("184445025"), - babybear.NewF("2598651360"), - babybear.NewF("1396647410"), - babybear.NewF("1575877922"), - babybear.NewF("3303853401"), - babybear.NewF("137125468"), - babybear.NewF("765010148"), - babybear.NewF("633675867"), - babybear.NewF("2037803363"), - babybear.NewF("2573389828"), - babybear.NewF("1895729703"), - babybear.NewF("541515871"), - babybear.NewF("1783382863"), + babybear.NewFConst("4289086263"), + babybear.NewFConst("1563933798"), + babybear.NewFConst("1440025885"), + babybear.NewFConst("184445025"), + babybear.NewFConst("2598651360"), + babybear.NewFConst("1396647410"), + babybear.NewFConst("1575877922"), + babybear.NewFConst("3303853401"), + babybear.NewFConst("137125468"), + babybear.NewFConst("765010148"), + babybear.NewFConst("633675867"), + babybear.NewFConst("2037803363"), + babybear.NewFConst("2573389828"), + babybear.NewFConst("1895729703"), + babybear.NewFConst("541515871"), + babybear.NewFConst("1783382863"), } round += 1 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("2641856484"), - babybear.NewF("3035743342"), - babybear.NewF("3672796326"), - babybear.NewF("245668751"), - babybear.NewF("2025460432"), - babybear.NewF("201609705"), - babybear.NewF("286217151"), - babybear.NewF("4093475563"), - babybear.NewF("2519572182"), - babybear.NewF("3080699870"), - babybear.NewF("2762001832"), - babybear.NewF("1244250808"), - babybear.NewF("606038199"), - babybear.NewF("3182740831"), - babybear.NewF("73007766"), - babybear.NewF("2572204153"), + babybear.NewFConst("2641856484"), + babybear.NewFConst("3035743342"), + babybear.NewFConst("3672796326"), + babybear.NewFConst("245668751"), + babybear.NewFConst("2025460432"), + babybear.NewFConst("201609705"), + babybear.NewFConst("286217151"), + babybear.NewFConst("4093475563"), + babybear.NewFConst("2519572182"), + babybear.NewFConst("3080699870"), + babybear.NewFConst("2762001832"), + babybear.NewFConst("1244250808"), + babybear.NewFConst("606038199"), + babybear.NewFConst("3182740831"), + babybear.NewFConst("73007766"), + babybear.NewFConst("2572204153"), } round += 1 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("1196780786"), - babybear.NewF("3447394443"), - babybear.NewF("747167305"), - babybear.NewF("2968073607"), - babybear.NewF("1053214930"), - babybear.NewF("1074411832"), - babybear.NewF("4016794508"), - babybear.NewF("1570312929"), - babybear.NewF("113576933"), - babybear.NewF("4042581186"), - babybear.NewF("3634515733"), - babybear.NewF("1032701597"), - babybear.NewF("2364839308"), - babybear.NewF("3840286918"), - babybear.NewF("888378655"), - babybear.NewF("2520191583"), + babybear.NewFConst("1196780786"), + babybear.NewFConst("3447394443"), + babybear.NewFConst("747167305"), + babybear.NewFConst("2968073607"), + babybear.NewFConst("1053214930"), + babybear.NewFConst("1074411832"), + babybear.NewFConst("4016794508"), + babybear.NewFConst("1570312929"), + babybear.NewFConst("113576933"), + babybear.NewFConst("4042581186"), + babybear.NewFConst("3634515733"), + babybear.NewFConst("1032701597"), + babybear.NewFConst("2364839308"), + babybear.NewFConst("3840286918"), + babybear.NewFConst("888378655"), + babybear.NewFConst("2520191583"), } round += 1 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("36046858"), - babybear.NewF("2927525953"), - babybear.NewF("3912129105"), - babybear.NewF("4004832531"), - babybear.NewF("193772436"), - babybear.NewF("1590247392"), - babybear.NewF("4125818172"), - babybear.NewF("2516251696"), - babybear.NewF("4050945750"), - babybear.NewF("269498914"), - babybear.NewF("1973292656"), - babybear.NewF("891403491"), - babybear.NewF("1845429189"), - babybear.NewF("2611996363"), - babybear.NewF("2310542653"), - babybear.NewF("4071195740"), + babybear.NewFConst("36046858"), + babybear.NewFConst("2927525953"), + babybear.NewFConst("3912129105"), + babybear.NewFConst("4004832531"), + babybear.NewFConst("193772436"), + babybear.NewFConst("1590247392"), + babybear.NewFConst("4125818172"), + babybear.NewFConst("2516251696"), + babybear.NewFConst("4050945750"), + babybear.NewFConst("269498914"), + babybear.NewFConst("1973292656"), + babybear.NewFConst("891403491"), + babybear.NewFConst("1845429189"), + babybear.NewFConst("2611996363"), + babybear.NewFConst("2310542653"), + babybear.NewFConst("4071195740"), } round += 1 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("3505307391"), - babybear.NewF("786445290"), - babybear.NewF("3815313971"), - babybear.NewF("1111591756"), - babybear.NewF("4233279834"), - babybear.NewF("2775453034"), - babybear.NewF("1991257625"), - babybear.NewF("2940505809"), - babybear.NewF("2751316206"), - babybear.NewF("1028870679"), - babybear.NewF("1282466273"), - babybear.NewF("1059053371"), - babybear.NewF("834521354"), - babybear.NewF("138721483"), - babybear.NewF("3100410803"), - babybear.NewF("3843128331"), + babybear.NewFConst("3505307391"), + babybear.NewFConst("786445290"), + babybear.NewFConst("3815313971"), + babybear.NewFConst("1111591756"), + babybear.NewFConst("4233279834"), + babybear.NewFConst("2775453034"), + babybear.NewFConst("1991257625"), + babybear.NewFConst("2940505809"), + babybear.NewFConst("2751316206"), + babybear.NewFConst("1028870679"), + babybear.NewFConst("1282466273"), + babybear.NewFConst("1059053371"), + babybear.NewFConst("834521354"), + babybear.NewFConst("138721483"), + babybear.NewFConst("3100410803"), + babybear.NewFConst("3843128331"), } round += 1 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("3878220780"), - babybear.NewF("4058162439"), - babybear.NewF("1478942487"), - babybear.NewF("799012923"), - babybear.NewF("496734827"), - babybear.NewF("3521261236"), - babybear.NewF("755421082"), - babybear.NewF("1361409515"), - babybear.NewF("392099473"), - babybear.NewF("3178453393"), - babybear.NewF("4068463721"), - babybear.NewF("7935614"), - babybear.NewF("4140885645"), - babybear.NewF("2150748066"), - babybear.NewF("1685210312"), - babybear.NewF("3852983224"), + babybear.NewFConst("3878220780"), + babybear.NewFConst("4058162439"), + babybear.NewFConst("1478942487"), + babybear.NewFConst("799012923"), + babybear.NewFConst("496734827"), + babybear.NewFConst("3521261236"), + babybear.NewFConst("755421082"), + babybear.NewFConst("1361409515"), + babybear.NewFConst("392099473"), + babybear.NewFConst("3178453393"), + babybear.NewFConst("4068463721"), + babybear.NewFConst("7935614"), + babybear.NewFConst("4140885645"), + babybear.NewFConst("2150748066"), + babybear.NewFConst("1685210312"), + babybear.NewFConst("3852983224"), } round += 1 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("2896943075"), - babybear.NewF("3087590927"), - babybear.NewF("992175959"), - babybear.NewF("970216228"), - babybear.NewF("3473630090"), - babybear.NewF("3899670400"), - babybear.NewF("3603388822"), - babybear.NewF("2633488197"), - babybear.NewF("2479406964"), - babybear.NewF("2420952999"), - babybear.NewF("1852516800"), - babybear.NewF("4253075697"), - babybear.NewF("979699862"), - babybear.NewF("1163403191"), - babybear.NewF("1608599874"), - babybear.NewF("3056104448"), + babybear.NewFConst("2896943075"), + babybear.NewFConst("3087590927"), + babybear.NewFConst("992175959"), + babybear.NewFConst("970216228"), + babybear.NewFConst("3473630090"), + babybear.NewFConst("3899670400"), + babybear.NewFConst("3603388822"), + babybear.NewFConst("2633488197"), + babybear.NewFConst("2479406964"), + babybear.NewFConst("2420952999"), + babybear.NewFConst("1852516800"), + babybear.NewFConst("4253075697"), + babybear.NewFConst("979699862"), + babybear.NewFConst("1163403191"), + babybear.NewFConst("1608599874"), + babybear.NewFConst("3056104448"), } round += 1 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("3779109343"), - babybear.NewF("536205958"), - babybear.NewF("4183458361"), - babybear.NewF("1649720295"), - babybear.NewF("1444912244"), - babybear.NewF("3122230878"), - babybear.NewF("384301396"), - babybear.NewF("4228198516"), - babybear.NewF("1662916865"), - babybear.NewF("4082161114"), - babybear.NewF("2121897314"), - babybear.NewF("1706239958"), - babybear.NewF("4166959388"), - babybear.NewF("1626054781"), - babybear.NewF("3005858978"), - babybear.NewF("1431907253"), + babybear.NewFConst("3779109343"), + babybear.NewFConst("536205958"), + babybear.NewFConst("4183458361"), + babybear.NewFConst("1649720295"), + babybear.NewFConst("1444912244"), + babybear.NewFConst("3122230878"), + babybear.NewFConst("384301396"), + babybear.NewFConst("4228198516"), + babybear.NewFConst("1662916865"), + babybear.NewFConst("4082161114"), + babybear.NewFConst("2121897314"), + babybear.NewFConst("1706239958"), + babybear.NewFConst("4166959388"), + babybear.NewFConst("1626054781"), + babybear.NewFConst("3005858978"), + babybear.NewFConst("1431907253"), } round += 1 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("1418914503"), - babybear.NewF("1365856753"), - babybear.NewF("3942715745"), - babybear.NewF("1429155552"), - babybear.NewF("3545642795"), - babybear.NewF("3772474257"), - babybear.NewF("1621094396"), - babybear.NewF("2154399145"), - babybear.NewF("826697382"), - babybear.NewF("1700781391"), - babybear.NewF("3539164324"), - babybear.NewF("652815039"), - babybear.NewF("442484755"), - babybear.NewF("2055299391"), - babybear.NewF("1064289978"), - babybear.NewF("1152335780"), + babybear.NewFConst("1418914503"), + babybear.NewFConst("1365856753"), + babybear.NewFConst("3942715745"), + babybear.NewFConst("1429155552"), + babybear.NewFConst("3545642795"), + babybear.NewFConst("3772474257"), + babybear.NewFConst("1621094396"), + babybear.NewFConst("2154399145"), + babybear.NewFConst("826697382"), + babybear.NewFConst("1700781391"), + babybear.NewFConst("3539164324"), + babybear.NewFConst("652815039"), + babybear.NewFConst("442484755"), + babybear.NewFConst("2055299391"), + babybear.NewFConst("1064289978"), + babybear.NewFConst("1152335780"), } round += 1 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("3417648695"), - babybear.NewF("186040114"), - babybear.NewF("3475580573"), - babybear.NewF("2113941250"), - babybear.NewF("1779573826"), - babybear.NewF("1573808590"), - babybear.NewF("3235694804"), - babybear.NewF("2922195281"), - babybear.NewF("1119462702"), - babybear.NewF("3688305521"), - babybear.NewF("1849567013"), - babybear.NewF("667446787"), - babybear.NewF("753897224"), - babybear.NewF("1896396780"), - babybear.NewF("3143026334"), - babybear.NewF("3829603876"), + babybear.NewFConst("3417648695"), + babybear.NewFConst("186040114"), + babybear.NewFConst("3475580573"), + babybear.NewFConst("2113941250"), + babybear.NewFConst("1779573826"), + babybear.NewFConst("1573808590"), + babybear.NewFConst("3235694804"), + babybear.NewFConst("2922195281"), + babybear.NewFConst("1119462702"), + babybear.NewFConst("3688305521"), + babybear.NewFConst("1849567013"), + babybear.NewFConst("667446787"), + babybear.NewFConst("753897224"), + babybear.NewFConst("1896396780"), + babybear.NewFConst("3143026334"), + babybear.NewFConst("3829603876"), } round += 1 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("859661334"), - babybear.NewF("3898844357"), - babybear.NewF("180258337"), - babybear.NewF("2321867017"), - babybear.NewF("3599002504"), - babybear.NewF("2886782421"), - babybear.NewF("3038299378"), - babybear.NewF("1035366250"), - babybear.NewF("2038912197"), - babybear.NewF("2920174523"), - babybear.NewF("1277696101"), - babybear.NewF("2785700290"), - babybear.NewF("3806504335"), - babybear.NewF("3518858933"), - babybear.NewF("654843672"), - babybear.NewF("2127120275"), + babybear.NewFConst("859661334"), + babybear.NewFConst("3898844357"), + babybear.NewFConst("180258337"), + babybear.NewFConst("2321867017"), + babybear.NewFConst("3599002504"), + babybear.NewFConst("2886782421"), + babybear.NewFConst("3038299378"), + babybear.NewFConst("1035366250"), + babybear.NewFConst("2038912197"), + babybear.NewFConst("2920174523"), + babybear.NewFConst("1277696101"), + babybear.NewFConst("2785700290"), + babybear.NewFConst("3806504335"), + babybear.NewFConst("3518858933"), + babybear.NewFConst("654843672"), + babybear.NewFConst("2127120275"), } round += 1 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("1548195514"), - babybear.NewF("2378056027"), - babybear.NewF("390914568"), - babybear.NewF("1472049779"), - babybear.NewF("1552596765"), - babybear.NewF("1905886441"), - babybear.NewF("1611959354"), - babybear.NewF("3653263304"), - babybear.NewF("3423946386"), - babybear.NewF("340857935"), - babybear.NewF("2208879480"), - babybear.NewF("139364268"), - babybear.NewF("3447281773"), - babybear.NewF("3777813707"), - babybear.NewF("55640413"), - babybear.NewF("4101901741"), + babybear.NewFConst("1548195514"), + babybear.NewFConst("2378056027"), + babybear.NewFConst("390914568"), + babybear.NewFConst("1472049779"), + babybear.NewFConst("1552596765"), + babybear.NewFConst("1905886441"), + babybear.NewFConst("1611959354"), + babybear.NewFConst("3653263304"), + babybear.NewFConst("3423946386"), + babybear.NewFConst("340857935"), + babybear.NewFConst("2208879480"), + babybear.NewFConst("139364268"), + babybear.NewFConst("3447281773"), + babybear.NewFConst("3777813707"), + babybear.NewFConst("55640413"), + babybear.NewFConst("4101901741"), } round += 1 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("104929687"), - babybear.NewF("1459980974"), - babybear.NewF("1831234737"), - babybear.NewF("457139004"), - babybear.NewF("2581487628"), - babybear.NewF("2112044563"), - babybear.NewF("3567013861"), - babybear.NewF("2792004347"), - babybear.NewF("576325418"), - babybear.NewF("41126132"), - babybear.NewF("2713562324"), - babybear.NewF("151213722"), - babybear.NewF("2891185935"), - babybear.NewF("546846420"), - babybear.NewF("2939794919"), - babybear.NewF("2543469905"), + babybear.NewFConst("104929687"), + babybear.NewFConst("1459980974"), + babybear.NewFConst("1831234737"), + babybear.NewFConst("457139004"), + babybear.NewFConst("2581487628"), + babybear.NewFConst("2112044563"), + babybear.NewFConst("3567013861"), + babybear.NewFConst("2792004347"), + babybear.NewFConst("576325418"), + babybear.NewFConst("41126132"), + babybear.NewFConst("2713562324"), + babybear.NewFConst("151213722"), + babybear.NewFConst("2891185935"), + babybear.NewFConst("546846420"), + babybear.NewFConst("2939794919"), + babybear.NewFConst("2543469905"), } round += 1 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("2191909784"), - babybear.NewF("3315138460"), - babybear.NewF("530414574"), - babybear.NewF("1242280418"), - babybear.NewF("1211740715"), - babybear.NewF("3993672165"), - babybear.NewF("2505083323"), - babybear.NewF("3845798801"), - babybear.NewF("538768466"), - babybear.NewF("2063567560"), - babybear.NewF("3366148274"), - babybear.NewF("1449831887"), - babybear.NewF("2408012466"), - babybear.NewF("294726285"), - babybear.NewF("3943435493"), - babybear.NewF("924016661"), + babybear.NewFConst("2191909784"), + babybear.NewFConst("3315138460"), + babybear.NewFConst("530414574"), + babybear.NewFConst("1242280418"), + babybear.NewFConst("1211740715"), + babybear.NewFConst("3993672165"), + babybear.NewFConst("2505083323"), + babybear.NewFConst("3845798801"), + babybear.NewFConst("538768466"), + babybear.NewFConst("2063567560"), + babybear.NewFConst("3366148274"), + babybear.NewFConst("1449831887"), + babybear.NewFConst("2408012466"), + babybear.NewFConst("294726285"), + babybear.NewFConst("3943435493"), + babybear.NewFConst("924016661"), } round += 1 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("3633138367"), - babybear.NewF("3222789372"), - babybear.NewF("809116305"), - babybear.NewF("30100013"), - babybear.NewF("2655172876"), - babybear.NewF("2564247117"), - babybear.NewF("2478649732"), - babybear.NewF("4113689151"), - babybear.NewF("4120146082"), - babybear.NewF("2512308515"), - babybear.NewF("650406041"), - babybear.NewF("4240012393"), - babybear.NewF("2683508708"), - babybear.NewF("951073977"), - babybear.NewF("3460081988"), - babybear.NewF("339124269"), + babybear.NewFConst("3633138367"), + babybear.NewFConst("3222789372"), + babybear.NewFConst("809116305"), + babybear.NewFConst("30100013"), + babybear.NewFConst("2655172876"), + babybear.NewFConst("2564247117"), + babybear.NewFConst("2478649732"), + babybear.NewFConst("4113689151"), + babybear.NewFConst("4120146082"), + babybear.NewFConst("2512308515"), + babybear.NewFConst("650406041"), + babybear.NewFConst("4240012393"), + babybear.NewFConst("2683508708"), + babybear.NewFConst("951073977"), + babybear.NewFConst("3460081988"), + babybear.NewFConst("339124269"), } round += 1 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("130182653"), - babybear.NewF("2755946749"), - babybear.NewF("542600513"), - babybear.NewF("2816103022"), - babybear.NewF("1931786340"), - babybear.NewF("2044470840"), - babybear.NewF("1709908013"), - babybear.NewF("2938369043"), - babybear.NewF("3640399693"), - babybear.NewF("1374470239"), - babybear.NewF("2191149676"), - babybear.NewF("2637495682"), - babybear.NewF("4236394040"), - babybear.NewF("2289358846"), - babybear.NewF("3833368530"), - babybear.NewF("974546524"), + babybear.NewFConst("130182653"), + babybear.NewFConst("2755946749"), + babybear.NewFConst("542600513"), + babybear.NewFConst("2816103022"), + babybear.NewFConst("1931786340"), + babybear.NewFConst("2044470840"), + babybear.NewFConst("1709908013"), + babybear.NewFConst("2938369043"), + babybear.NewFConst("3640399693"), + babybear.NewFConst("1374470239"), + babybear.NewFConst("2191149676"), + babybear.NewFConst("2637495682"), + babybear.NewFConst("4236394040"), + babybear.NewFConst("2289358846"), + babybear.NewFConst("3833368530"), + babybear.NewFConst("974546524"), } round += 1 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("3306659113"), - babybear.NewF("2234814261"), - babybear.NewF("1188782305"), - babybear.NewF("223782844"), - babybear.NewF("2248980567"), - babybear.NewF("2309786141"), - babybear.NewF("2023401627"), - babybear.NewF("3278877413"), - babybear.NewF("2022138149"), - babybear.NewF("575851471"), - babybear.NewF("1612560780"), - babybear.NewF("3926656936"), - babybear.NewF("3318548977"), - babybear.NewF("2591863678"), - babybear.NewF("188109355"), - babybear.NewF("4217723909"), + babybear.NewFConst("3306659113"), + babybear.NewFConst("2234814261"), + babybear.NewFConst("1188782305"), + babybear.NewFConst("223782844"), + babybear.NewFConst("2248980567"), + babybear.NewFConst("2309786141"), + babybear.NewFConst("2023401627"), + babybear.NewFConst("3278877413"), + babybear.NewFConst("2022138149"), + babybear.NewFConst("575851471"), + babybear.NewFConst("1612560780"), + babybear.NewFConst("3926656936"), + babybear.NewFConst("3318548977"), + babybear.NewFConst("2591863678"), + babybear.NewFConst("188109355"), + babybear.NewFConst("4217723909"), } round += 1 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("1564209905"), - babybear.NewF("2154197895"), - babybear.NewF("2459687029"), - babybear.NewF("2870634489"), - babybear.NewF("1375012945"), - babybear.NewF("1529454825"), - babybear.NewF("306140690"), - babybear.NewF("2855578299"), - babybear.NewF("1246997295"), - babybear.NewF("3024298763"), - babybear.NewF("1915270363"), - babybear.NewF("1218245412"), - babybear.NewF("2479314020"), - babybear.NewF("2989827755"), - babybear.NewF("814378556"), - babybear.NewF("4039775921"), + babybear.NewFConst("1564209905"), + babybear.NewFConst("2154197895"), + babybear.NewFConst("2459687029"), + babybear.NewFConst("2870634489"), + babybear.NewFConst("1375012945"), + babybear.NewFConst("1529454825"), + babybear.NewFConst("306140690"), + babybear.NewFConst("2855578299"), + babybear.NewFConst("1246997295"), + babybear.NewFConst("3024298763"), + babybear.NewFConst("1915270363"), + babybear.NewFConst("1218245412"), + babybear.NewFConst("2479314020"), + babybear.NewFConst("2989827755"), + babybear.NewFConst("814378556"), + babybear.NewFConst("4039775921"), } round += 1 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("1165280628"), - babybear.NewF("1203983801"), - babybear.NewF("3814740033"), - babybear.NewF("1919627044"), - babybear.NewF("600240215"), - babybear.NewF("773269071"), - babybear.NewF("486685186"), - babybear.NewF("4254048810"), - babybear.NewF("1415023565"), - babybear.NewF("502840102"), - babybear.NewF("4225648358"), - babybear.NewF("510217063"), - babybear.NewF("166444818"), - babybear.NewF("1430745893"), - babybear.NewF("1376516190"), - babybear.NewF("1775891321"), + babybear.NewFConst("1165280628"), + babybear.NewFConst("1203983801"), + babybear.NewFConst("3814740033"), + babybear.NewFConst("1919627044"), + babybear.NewFConst("600240215"), + babybear.NewFConst("773269071"), + babybear.NewFConst("486685186"), + babybear.NewFConst("4254048810"), + babybear.NewFConst("1415023565"), + babybear.NewFConst("502840102"), + babybear.NewFConst("4225648358"), + babybear.NewFConst("510217063"), + babybear.NewFConst("166444818"), + babybear.NewFConst("1430745893"), + babybear.NewFConst("1376516190"), + babybear.NewFConst("1775891321"), } round += 1 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("1170945922"), - babybear.NewF("1105391877"), - babybear.NewF("261536467"), - babybear.NewF("1401687994"), - babybear.NewF("1022529847"), - babybear.NewF("2476446456"), - babybear.NewF("2603844878"), - babybear.NewF("3706336043"), - babybear.NewF("3463053714"), - babybear.NewF("1509644517"), - babybear.NewF("588552318"), - babybear.NewF("65252581"), - babybear.NewF("3696502656"), - babybear.NewF("2183330763"), - babybear.NewF("3664021233"), - babybear.NewF("1643809916"), + babybear.NewFConst("1170945922"), + babybear.NewFConst("1105391877"), + babybear.NewFConst("261536467"), + babybear.NewFConst("1401687994"), + babybear.NewFConst("1022529847"), + babybear.NewFConst("2476446456"), + babybear.NewFConst("2603844878"), + babybear.NewFConst("3706336043"), + babybear.NewFConst("3463053714"), + babybear.NewFConst("1509644517"), + babybear.NewFConst("588552318"), + babybear.NewFConst("65252581"), + babybear.NewFConst("3696502656"), + babybear.NewFConst("2183330763"), + babybear.NewFConst("3664021233"), + babybear.NewFConst("1643809916"), } round += 1 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("2922875898"), - babybear.NewF("3740690643"), - babybear.NewF("3932461140"), - babybear.NewF("161156271"), - babybear.NewF("2619943483"), - babybear.NewF("4077039509"), - babybear.NewF("2921201703"), - babybear.NewF("2085619718"), - babybear.NewF("2065264646"), - babybear.NewF("2615693812"), - babybear.NewF("3116555433"), - babybear.NewF("246100007"), - babybear.NewF("4281387154"), - babybear.NewF("4046141001"), - babybear.NewF("4027749321"), - babybear.NewF("111611860"), + babybear.NewFConst("2922875898"), + babybear.NewFConst("3740690643"), + babybear.NewFConst("3932461140"), + babybear.NewFConst("161156271"), + babybear.NewFConst("2619943483"), + babybear.NewFConst("4077039509"), + babybear.NewFConst("2921201703"), + babybear.NewFConst("2085619718"), + babybear.NewFConst("2065264646"), + babybear.NewFConst("2615693812"), + babybear.NewFConst("3116555433"), + babybear.NewFConst("246100007"), + babybear.NewFConst("4281387154"), + babybear.NewFConst("4046141001"), + babybear.NewFConst("4027749321"), + babybear.NewFConst("111611860"), } round += 1 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("2066954820"), - babybear.NewF("2502099969"), - babybear.NewF("2915053115"), - babybear.NewF("2362518586"), - babybear.NewF("366091708"), - babybear.NewF("2083204932"), - babybear.NewF("4138385632"), - babybear.NewF("3195157567"), - babybear.NewF("1318086382"), - babybear.NewF("521723799"), - babybear.NewF("702443405"), - babybear.NewF("2507670985"), - babybear.NewF("1760347557"), - babybear.NewF("2631999893"), - babybear.NewF("1672737554"), - babybear.NewF("1060867760"), + babybear.NewFConst("2066954820"), + babybear.NewFConst("2502099969"), + babybear.NewFConst("2915053115"), + babybear.NewFConst("2362518586"), + babybear.NewFConst("366091708"), + babybear.NewFConst("2083204932"), + babybear.NewFConst("4138385632"), + babybear.NewFConst("3195157567"), + babybear.NewFConst("1318086382"), + babybear.NewFConst("521723799"), + babybear.NewFConst("702443405"), + babybear.NewFConst("2507670985"), + babybear.NewFConst("1760347557"), + babybear.NewFConst("2631999893"), + babybear.NewFConst("1672737554"), + babybear.NewFConst("1060867760"), } round += 1 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("2359801781"), - babybear.NewF("2800231467"), - babybear.NewF("3010357035"), - babybear.NewF("1035997899"), - babybear.NewF("1210110952"), - babybear.NewF("1018506770"), - babybear.NewF("2799468177"), - babybear.NewF("1479380761"), - babybear.NewF("1536021911"), - babybear.NewF("358993854"), - babybear.NewF("579904113"), - babybear.NewF("3432144800"), - babybear.NewF("3625515809"), - babybear.NewF("199241497"), - babybear.NewF("4058304109"), - babybear.NewF("2590164234"), + babybear.NewFConst("2359801781"), + babybear.NewFConst("2800231467"), + babybear.NewFConst("3010357035"), + babybear.NewFConst("1035997899"), + babybear.NewFConst("1210110952"), + babybear.NewFConst("1018506770"), + babybear.NewFConst("2799468177"), + babybear.NewFConst("1479380761"), + babybear.NewFConst("1536021911"), + babybear.NewFConst("358993854"), + babybear.NewFConst("579904113"), + babybear.NewFConst("3432144800"), + babybear.NewFConst("3625515809"), + babybear.NewFConst("199241497"), + babybear.NewFConst("4058304109"), + babybear.NewFConst("2590164234"), } round += 1 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("1688530738"), - babybear.NewF("1580733335"), - babybear.NewF("2443981517"), - babybear.NewF("2206270565"), - babybear.NewF("2780074229"), - babybear.NewF("2628739677"), - babybear.NewF("2940123659"), - babybear.NewF("4145206827"), - babybear.NewF("3572278009"), - babybear.NewF("2779607509"), - babybear.NewF("1098718697"), - babybear.NewF("1424913749"), - babybear.NewF("2224415875"), - babybear.NewF("1108922178"), - babybear.NewF("3646272562"), - babybear.NewF("3935186184"), + babybear.NewFConst("1688530738"), + babybear.NewFConst("1580733335"), + babybear.NewFConst("2443981517"), + babybear.NewFConst("2206270565"), + babybear.NewFConst("2780074229"), + babybear.NewFConst("2628739677"), + babybear.NewFConst("2940123659"), + babybear.NewFConst("4145206827"), + babybear.NewFConst("3572278009"), + babybear.NewFConst("2779607509"), + babybear.NewFConst("1098718697"), + babybear.NewFConst("1424913749"), + babybear.NewFConst("2224415875"), + babybear.NewFConst("1108922178"), + babybear.NewFConst("3646272562"), + babybear.NewFConst("3935186184"), } round += 1 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("820046587"), - babybear.NewF("1393386250"), - babybear.NewF("2665818575"), - babybear.NewF("2231782019"), - babybear.NewF("672377010"), - babybear.NewF("1920315467"), - babybear.NewF("1913164407"), - babybear.NewF("2029526876"), - babybear.NewF("2629271820"), - babybear.NewF("384320012"), - babybear.NewF("4112320585"), - babybear.NewF("3131824773"), - babybear.NewF("2347818197"), - babybear.NewF("2220997386"), - babybear.NewF("1772368609"), - babybear.NewF("2579960095"), + babybear.NewFConst("820046587"), + babybear.NewFConst("1393386250"), + babybear.NewFConst("2665818575"), + babybear.NewFConst("2231782019"), + babybear.NewFConst("672377010"), + babybear.NewFConst("1920315467"), + babybear.NewFConst("1913164407"), + babybear.NewFConst("2029526876"), + babybear.NewFConst("2629271820"), + babybear.NewFConst("384320012"), + babybear.NewFConst("4112320585"), + babybear.NewFConst("3131824773"), + babybear.NewFConst("2347818197"), + babybear.NewFConst("2220997386"), + babybear.NewFConst("1772368609"), + babybear.NewFConst("2579960095"), } round += 1 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("3544930873"), - babybear.NewF("225847443"), - babybear.NewF("3070082278"), - babybear.NewF("95643305"), - babybear.NewF("3438572042"), - babybear.NewF("3312856509"), - babybear.NewF("615850007"), - babybear.NewF("1863868773"), - babybear.NewF("803582265"), - babybear.NewF("3461976859"), - babybear.NewF("2903025799"), - babybear.NewF("1482092434"), - babybear.NewF("3902972499"), - babybear.NewF("3872341868"), - babybear.NewF("1530411808"), - babybear.NewF("2214923584"), + babybear.NewFConst("3544930873"), + babybear.NewFConst("225847443"), + babybear.NewFConst("3070082278"), + babybear.NewFConst("95643305"), + babybear.NewFConst("3438572042"), + babybear.NewFConst("3312856509"), + babybear.NewFConst("615850007"), + babybear.NewFConst("1863868773"), + babybear.NewFConst("803582265"), + babybear.NewFConst("3461976859"), + babybear.NewFConst("2903025799"), + babybear.NewFConst("1482092434"), + babybear.NewFConst("3902972499"), + babybear.NewFConst("3872341868"), + babybear.NewFConst("1530411808"), + babybear.NewFConst("2214923584"), } round += 1 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("3118792481"), - babybear.NewF("2241076515"), - babybear.NewF("3983669831"), - babybear.NewF("3180915147"), - babybear.NewF("3838626501"), - babybear.NewF("1921630011"), - babybear.NewF("3415351771"), - babybear.NewF("2249953859"), - babybear.NewF("3755081630"), - babybear.NewF("486327260"), - babybear.NewF("1227575720"), - babybear.NewF("3643869379"), - babybear.NewF("2982026073"), - babybear.NewF("2466043731"), - babybear.NewF("1982634375"), - babybear.NewF("3769609014"), + babybear.NewFConst("3118792481"), + babybear.NewFConst("2241076515"), + babybear.NewFConst("3983669831"), + babybear.NewFConst("3180915147"), + babybear.NewFConst("3838626501"), + babybear.NewFConst("1921630011"), + babybear.NewFConst("3415351771"), + babybear.NewFConst("2249953859"), + babybear.NewFConst("3755081630"), + babybear.NewFConst("486327260"), + babybear.NewFConst("1227575720"), + babybear.NewFConst("3643869379"), + babybear.NewFConst("2982026073"), + babybear.NewFConst("2466043731"), + babybear.NewFConst("1982634375"), + babybear.NewFConst("3769609014"), } round += 1 rc16[round] = [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("2195455495"), - babybear.NewF("2596863283"), - babybear.NewF("4244994973"), - babybear.NewF("1983609348"), - babybear.NewF("4019674395"), - babybear.NewF("3469982031"), - babybear.NewF("1458697570"), - babybear.NewF("1593516217"), - babybear.NewF("1963896497"), - babybear.NewF("3115309118"), - babybear.NewF("1659132465"), - babybear.NewF("2536770756"), - babybear.NewF("3059294171"), - babybear.NewF("2618031334"), - babybear.NewF("2040903247"), - babybear.NewF("3799795076"), + babybear.NewFConst("2195455495"), + babybear.NewFConst("2596863283"), + babybear.NewFConst("4244994973"), + babybear.NewFConst("1983609348"), + babybear.NewFConst("4019674395"), + babybear.NewFConst("3469982031"), + babybear.NewFConst("1458697570"), + babybear.NewFConst("1593516217"), + babybear.NewFConst("1963896497"), + babybear.NewFConst("3115309118"), + babybear.NewFConst("1659132465"), + babybear.NewFConst("2536770756"), + babybear.NewFConst("3059294171"), + babybear.NewFConst("2618031334"), + babybear.NewFConst("2040903247"), + babybear.NewFConst("3799795076"), } } diff --git a/crates/recursion/gnark-ffi/go/sp1/poseidon2/poseidon2_babybear.go b/crates/recursion/gnark-ffi/go/sp1/poseidon2/poseidon2_babybear.go index a16cc609fe..d26f614fff 100644 --- a/crates/recursion/gnark-ffi/go/sp1/poseidon2/poseidon2_babybear.go +++ b/crates/recursion/gnark-ffi/go/sp1/poseidon2/poseidon2_babybear.go @@ -1,6 +1,8 @@ package poseidon2 import ( + "math/big" + "github.com/consensys/gnark/frontend" "github.com/succinctlabs/sp1-recursion-gnark/sp1/babybear" ) @@ -57,7 +59,7 @@ func (p *Poseidon2BabyBearChip) addRc(state *[BABYBEAR_WIDTH]babybear.Variable, } func (p *Poseidon2BabyBearChip) sboxP(input babybear.Variable) babybear.Variable { - zero := babybear.NewF("0") + zero := babybear.NewFConst("0") inputCpy := p.fieldApi.AddF(input, zero) inputCpy = p.fieldApi.ReduceSlow(inputCpy) inputValue := inputCpy.Value @@ -66,8 +68,8 @@ func (p *Poseidon2BabyBearChip) sboxP(input babybear.Variable) babybear.Variable i6 := p.api.Mul(i4, i2) i7 := p.api.Mul(i6, inputValue) i7bb := p.fieldApi.ReduceSlow(babybear.Variable{ - Value: i7, - NbBits: 31 * 7, + Value: i7, + UpperBound: new(big.Int).Exp(new(big.Int).SetUint64(2013265921), new(big.Int).SetUint64(7), new(big.Int).SetUint64(0)), }) return i7bb } @@ -115,24 +117,24 @@ func (p *Poseidon2BabyBearChip) externalLinearLayer(state *[BABYBEAR_WIDTH]babyb func (p *Poseidon2BabyBearChip) diffusionPermuteMut(state *[BABYBEAR_WIDTH]babybear.Variable) { matInternalDiagM1 := [BABYBEAR_WIDTH]babybear.Variable{ - babybear.NewF("2013265919"), - babybear.NewF("1"), - babybear.NewF("2"), - babybear.NewF("4"), - babybear.NewF("8"), - babybear.NewF("16"), - babybear.NewF("32"), - babybear.NewF("64"), - babybear.NewF("128"), - babybear.NewF("256"), - babybear.NewF("512"), - babybear.NewF("1024"), - babybear.NewF("2048"), - babybear.NewF("4096"), - babybear.NewF("8192"), - babybear.NewF("32768"), + babybear.NewFConst("2013265919"), + babybear.NewFConst("1"), + babybear.NewFConst("2"), + babybear.NewFConst("4"), + babybear.NewFConst("8"), + babybear.NewFConst("16"), + babybear.NewFConst("32"), + babybear.NewFConst("64"), + babybear.NewFConst("128"), + babybear.NewFConst("256"), + babybear.NewFConst("512"), + babybear.NewFConst("1024"), + babybear.NewFConst("2048"), + babybear.NewFConst("4096"), + babybear.NewFConst("8192"), + babybear.NewFConst("32768"), } - montyInverse := babybear.NewF("943718400") + montyInverse := babybear.NewFConst("943718400") p.matmulInternal(state, &matInternalDiagM1) for i := 0; i < BABYBEAR_WIDTH; i++ { state[i] = p.fieldApi.MulF(state[i], montyInverse) @@ -144,7 +146,7 @@ func (p *Poseidon2BabyBearChip) matmulInternal( state *[BABYBEAR_WIDTH]babybear.Variable, matInternalDiagM1 *[BABYBEAR_WIDTH]babybear.Variable, ) { - sum := babybear.NewF("0") + sum := babybear.NewFConst("0") for i := 0; i < BABYBEAR_WIDTH; i++ { sum = p.fieldApi.AddF(sum, state[i]) } diff --git a/crates/recursion/gnark-ffi/go/sp1/prove.go b/crates/recursion/gnark-ffi/go/sp1/prove.go index f474d786a2..2ead9db55b 100644 --- a/crates/recursion/gnark-ffi/go/sp1/prove.go +++ b/crates/recursion/gnark-ffi/go/sp1/prove.go @@ -3,14 +3,24 @@ package sp1 import ( "bufio" "encoding/json" + "fmt" "os" + "sync" + "time" "github.com/consensys/gnark-crypto/ecc" "github.com/consensys/gnark/backend/groth16" "github.com/consensys/gnark/backend/plonk" + "github.com/consensys/gnark/constraint" "github.com/consensys/gnark/frontend" ) +var globalMutex sync.RWMutex +var globalR1cs constraint.ConstraintSystem = groth16.NewCS(ecc.BN254) +var globalR1csInitialized = false +var globalPk groth16.ProvingKey = groth16.NewProvingKey(ecc.BN254) +var globalPkInitialized = false + func ProvePlonk(dataDir string, witnessPath string) Proof { // Sanity check the required arguments have been provided. if dataDir == "" { @@ -90,72 +100,77 @@ func ProveGroth16(dataDir string, witnessPath string) Proof { if dataDir == "" { panic("dataDirStr is required") } + + start := time.Now() os.Setenv("CONSTRAINTS_JSON", dataDir+"/"+constraintsJsonFile) os.Setenv("GROTH16", "1") + fmt.Printf("Setting environment variables took %s\n", time.Since(start)) // Read the R1CS. - r1csFile, err := os.Open(dataDir + "/" + groth16CircuitPath) - if err != nil { - panic(err) - } - r1cs := groth16.NewCS(ecc.BN254) - r1cs.ReadFrom(r1csFile) - defer r1csFile.Close() + globalMutex.Lock() + if !globalR1csInitialized { + start = time.Now() + r1csFile, err := os.Open(dataDir + "/" + groth16CircuitPath) + if err != nil { + panic(err) + } + r1csReader := bufio.NewReaderSize(r1csFile, 1024*1024) + globalR1cs.ReadFrom(r1csReader) + defer r1csFile.Close() + globalR1csInitialized = true + fmt.Printf("Reading R1CS took %s\n", time.Since(start)) + } + globalMutex.Unlock() // Read the proving key. - pkFile, err := os.Open(dataDir + "/" + groth16PkPath) - if err != nil { - panic(err) - } - pk := groth16.NewProvingKey(ecc.BN254) - bufReader := bufio.NewReaderSize(pkFile, 1024*1024) - pk.UnsafeReadFrom(bufReader) - defer pkFile.Close() - - // Read the verifier key. - vkFile, err := os.Open(dataDir + "/" + groth16VkPath) - if err != nil { - panic(err) - } - vk := groth16.NewVerifyingKey(ecc.BN254) - vk.ReadFrom(vkFile) - defer vkFile.Close() - + globalMutex.Lock() + if !globalPkInitialized { + start = time.Now() + pkFile, err := os.Open(dataDir + "/" + groth16PkPath) + if err != nil { + panic(err) + } + pkReader := bufio.NewReaderSize(pkFile, 1024*1024) + globalPk.ReadDump(pkReader) + defer pkFile.Close() + fmt.Printf("Reading proving key took %s\n", time.Since(start)) + } + globalMutex.Unlock() + + start = time.Now() // Read the file. data, err := os.ReadFile(witnessPath) if err != nil { panic(err) } + fmt.Printf("Reading witness file took %s\n", time.Since(start)) + start = time.Now() // Deserialize the JSON data into a slice of Instruction structs var witnessInput WitnessInput err = json.Unmarshal(data, &witnessInput) if err != nil { panic(err) } + fmt.Printf("Deserializing JSON data took %s\n", time.Since(start)) + start = time.Now() // Generate the witness. assignment := NewCircuit(witnessInput) witness, err := frontend.NewWitness(&assignment, ecc.BN254.ScalarField()) if err != nil { panic(err) } - publicWitness, err := witness.Public() - if err != nil { - panic(err) - } + fmt.Printf("Generating witness took %s\n", time.Since(start)) + start = time.Now() // Generate the proof. - proof, err := groth16.Prove(r1cs, pk, witness) - if err != nil { - panic(err) - } - - // Verify proof. - err = groth16.Verify(proof, vk, publicWitness) + proof, err := groth16.Prove(globalR1cs, globalPk, witness) if err != nil { + fmt.Printf("Error: %v\n", err) panic(err) } + fmt.Printf("Generating proof took %s\n", time.Since(start)) return NewSP1Groth16Proof(&proof, witnessInput) } diff --git a/crates/recursion/gnark-ffi/go/sp1/sp1.go b/crates/recursion/gnark-ffi/go/sp1/sp1.go index 0385a87bf6..adbd83bf99 100644 --- a/crates/recursion/gnark-ffi/go/sp1/sp1.go +++ b/crates/recursion/gnark-ffi/go/sp1/sp1.go @@ -134,6 +134,8 @@ func (circuit *Circuit) Define(api frontend.API) error { exts[cs.Args[0][0]] = fieldAPI.MulEF(exts[cs.Args[1][0]], felts[cs.Args[2][0]]) case "DivE": exts[cs.Args[0][0]] = fieldAPI.DivE(exts[cs.Args[1][0]], exts[cs.Args[2][0]]) + case "DivEF": + exts[cs.Args[0][0]] = fieldAPI.DivEF(exts[cs.Args[1][0]], felts[cs.Args[2][0]]) case "NegE": exts[cs.Args[0][0]] = fieldAPI.NegE(exts[cs.Args[1][0]]) case "InvE": @@ -182,6 +184,8 @@ func (circuit *Circuit) Define(api frontend.API) error { api.AssertIsEqual(vars[cs.Args[0][0]], vars[cs.Args[1][0]]) case "AssertEqF": fieldAPI.AssertIsEqualF(felts[cs.Args[0][0]], felts[cs.Args[1][0]]) + case "AssertNeF": + fieldAPI.AssertNotEqualF(felts[cs.Args[0][0]], felts[cs.Args[1][0]]) case "AssertEqE": fieldAPI.AssertIsEqualE(exts[cs.Args[0][0]], exts[cs.Args[1][0]]) case "PrintV": diff --git a/crates/recursion/gnark-ffi/src/groth16_bn254.rs b/crates/recursion/gnark-ffi/src/groth16_bn254.rs index 063ca25ccd..f2864df5bb 100644 --- a/crates/recursion/gnark-ffi/src/groth16_bn254.rs +++ b/crates/recursion/gnark-ffi/src/groth16_bn254.rs @@ -55,6 +55,21 @@ impl Groth16Bn254Prover { ) } + pub fn build_contracts(build_dir: PathBuf) { + // Write the corresponding asset files to the build dir. + let sp1_verifier_path = build_dir.join("SP1VerifierGroth16.sol"); + let vkey_hash = Self::get_vkey_hash(&build_dir); + let sp1_verifier_str = include_str!("../assets/SP1VerifierGroth16.txt") + .replace("{SP1_CIRCUIT_VERSION}", SP1_CIRCUIT_VERSION) + .replace("{VERIFIER_HASH}", format!("0x{}", hex::encode(vkey_hash)).as_str()) + .replace("{PROOF_SYSTEM}", "Groth16"); + let mut sp1_verifier_file = File::create(sp1_verifier_path).unwrap(); + sp1_verifier_file.write_all(sp1_verifier_str.as_bytes()).unwrap(); + + let groth16_verifier_path = build_dir.join("Groth16Verifier.sol"); + Self::modify_groth16_verifier(&groth16_verifier_path); + } + /// Builds the Groth16 circuit locally. pub fn build(constraints: Vec, witness: Witness, build_dir: PathBuf) { let serialized = serde_json::to_string(&constraints).unwrap(); @@ -71,20 +86,11 @@ impl Groth16Bn254Prover { let serialized = serde_json::to_string(&gnark_witness).unwrap(); file.write_all(serialized.as_bytes()).unwrap(); + // Build the circuit. build_groth16_bn254(build_dir.to_str().unwrap()); - // Write the corresponding asset files to the build dir. - let sp1_verifier_path = build_dir.join("SP1VerifierGroth16.sol"); - let vkey_hash = Self::get_vkey_hash(&build_dir); - let sp1_verifier_str = include_str!("../assets/SP1VerifierGroth16.txt") - .replace("{SP1_CIRCUIT_VERSION}", SP1_CIRCUIT_VERSION) - .replace("{VERIFIER_HASH}", format!("0x{}", hex::encode(vkey_hash)).as_str()) - .replace("{PROOF_SYSTEM}", "Groth16"); - let mut sp1_verifier_file = File::create(sp1_verifier_path).unwrap(); - sp1_verifier_file.write_all(sp1_verifier_str.as_bytes()).unwrap(); - - let groth16_verifier_path = build_dir.join("Groth16Verifier.sol"); - Self::modify_groth16_verifier(&groth16_verifier_path); + // Build the contracts. + Self::build_contracts(build_dir); } /// Generates a Groth16 proof given a witness. diff --git a/crates/recursion/program/CHANGELOG.md b/crates/recursion/program/CHANGELOG.md deleted file mode 100644 index 7a5991ce79..0000000000 --- a/crates/recursion/program/CHANGELOG.md +++ /dev/null @@ -1,153 +0,0 @@ -# Changelog - -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [Unreleased] - -## [1.1.0](https://github.com/succinctlabs/sp1/compare/sp1-recursion-program-v1.0.1...sp1-recursion-program-v1.1.0) - 2024-08-02 - -### Added -- update tg ([#1214](https://github.com/succinctlabs/sp1/pull/1214)) -- streaming recursion ([#1175](https://github.com/succinctlabs/sp1/pull/1175)) - -### Other -- Merge branch 'main' into dev -- prover trait cleanup ([#1170](https://github.com/succinctlabs/sp1/pull/1170)) -- add audit reports ([#1142](https://github.com/succinctlabs/sp1/pull/1142)) - -## [1.0.0-rc1](https://github.com/succinctlabs/sp1/compare/sp1-recursion-program-v1.0.0-rc1...sp1-recursion-program-v1.0.0-rc1) - 2024-07-19 - -### Added - -- parallel recursion tracegen ([#1095](https://github.com/succinctlabs/sp1/pull/1095)) -- result instead of exit(1) on trap in recursion ([#1089](https://github.com/succinctlabs/sp1/pull/1089)) -- publish sp1 to crates.io ([#1052](https://github.com/succinctlabs/sp1/pull/1052)) -- critical constraint changes ([#1046](https://github.com/succinctlabs/sp1/pull/1046)) -- suggest prover network if high cycles ([#1019](https://github.com/succinctlabs/sp1/pull/1019)) -- plonk circuit optimizations ([#972](https://github.com/succinctlabs/sp1/pull/972)) -- poseidon2 hash ([#885](https://github.com/succinctlabs/sp1/pull/885)) -- generic const expr ([#854](https://github.com/succinctlabs/sp1/pull/854)) -- sp1 core prover opts -- exit code ([#750](https://github.com/succinctlabs/sp1/pull/750)) -- _(recursion)_ public values constraints ([#748](https://github.com/succinctlabs/sp1/pull/748)) -- reduce network prover ([#687](https://github.com/succinctlabs/sp1/pull/687)) -- fix execution + proving errors ([#715](https://github.com/succinctlabs/sp1/pull/715)) -- _(recursion)_ HALT instruction ([#703](https://github.com/succinctlabs/sp1/pull/703)) -- ci refactor ([#684](https://github.com/succinctlabs/sp1/pull/684)) -- program refactor ([#651](https://github.com/succinctlabs/sp1/pull/651)) -- Adding docs for new `ProverClient` and `groth16` and `compressed` mode ([#627](https://github.com/succinctlabs/sp1/pull/627)) -- arbitrary degree in recursion ([#605](https://github.com/succinctlabs/sp1/pull/605)) -- prover tweaks pt 2 ([#607](https://github.com/succinctlabs/sp1/pull/607)) -- prover tweaks ([#603](https://github.com/succinctlabs/sp1/pull/603)) -- _(recursion)_ memory access timestamp constraints ([#589](https://github.com/succinctlabs/sp1/pull/589)) -- enable arbitrary constraint degree ([#593](https://github.com/succinctlabs/sp1/pull/593)) -- recursion compress layer + RecursionAirWideDeg3 + RecursionAirSkinnyDeg7 + optimized groth16 ([#590](https://github.com/succinctlabs/sp1/pull/590)) -- _(Recursion)_ evaluate constraints in a single expression ([#592](https://github.com/succinctlabs/sp1/pull/592)) -- expression caching ([#586](https://github.com/succinctlabs/sp1/pull/586)) -- complete reduce program ([#565](https://github.com/succinctlabs/sp1/pull/565)) -- e2e groth16 flow ([#549](https://github.com/succinctlabs/sp1/pull/549)) -- stark cleanup and verification ([#556](https://github.com/succinctlabs/sp1/pull/556)) -- recursion experiments ([#522](https://github.com/succinctlabs/sp1/pull/522)) -- groth16 circuit build script ([#541](https://github.com/succinctlabs/sp1/pull/541)) -- verify shard transitions + fixes ([#482](https://github.com/succinctlabs/sp1/pull/482)) -- nested sp1 proof verification ([#494](https://github.com/succinctlabs/sp1/pull/494)) -- verify pc and shard transition in recursive proofs ([#514](https://github.com/succinctlabs/sp1/pull/514)) -- recursion profiling ([#521](https://github.com/succinctlabs/sp1/pull/521)) -- update to latest p3 ([#515](https://github.com/succinctlabs/sp1/pull/515)) -- gnark wrap test + cleanup ([#511](https://github.com/succinctlabs/sp1/pull/511)) -- 0 cycle input for recursion program ([#510](https://github.com/succinctlabs/sp1/pull/510)) -- reduce with different configs ([#508](https://github.com/succinctlabs/sp1/pull/508)) -- sdk using secp256k1 auth ([#483](https://github.com/succinctlabs/sp1/pull/483)) -- logup batching ([#487](https://github.com/succinctlabs/sp1/pull/487)) -- _(recursion)_ reduce N sp1/recursive proofs ([#503](https://github.com/succinctlabs/sp1/pull/503)) -- recursion optimizations + compiler cleanup ([#499](https://github.com/succinctlabs/sp1/pull/499)) -- recursion vm public values ([#495](https://github.com/succinctlabs/sp1/pull/495)) -- cleanup compiler ir ([#496](https://github.com/succinctlabs/sp1/pull/496)) -- shard transition public values ([#466](https://github.com/succinctlabs/sp1/pull/466)) -- recursion permutation challenges as variables ([#486](https://github.com/succinctlabs/sp1/pull/486)) -- add support for witness in programs ([#476](https://github.com/succinctlabs/sp1/pull/476)) -- fri-fold precompile ([#479](https://github.com/succinctlabs/sp1/pull/479)) -- setup recursion prover crate ([#475](https://github.com/succinctlabs/sp1/pull/475)) -- gnark recursive verifier ([#457](https://github.com/succinctlabs/sp1/pull/457)) -- add shard to byte and program table ([#463](https://github.com/succinctlabs/sp1/pull/463)) -- recursion cpu constraints ([#464](https://github.com/succinctlabs/sp1/pull/464)) -- public values ([#455](https://github.com/succinctlabs/sp1/pull/455)) -- Preprocessing + recursion ([#450](https://github.com/succinctlabs/sp1/pull/450)) -- sp1-sdk, remote prover ([#370](https://github.com/succinctlabs/sp1/pull/370)) -- _(precompile)_ add bn254 precompile ([#384](https://github.com/succinctlabs/sp1/pull/384)) -- verify shard ([#444](https://github.com/succinctlabs/sp1/pull/444)) -- _(WIP)_ end-to-end verfier ([#439](https://github.com/succinctlabs/sp1/pull/439)) -- new README img ([#226](https://github.com/succinctlabs/sp1/pull/226)) -- readme updates ([#205](https://github.com/succinctlabs/sp1/pull/205)) -- more final touches ([#194](https://github.com/succinctlabs/sp1/pull/194)) -- curtaup + release system + cargo prove CLI updates ([#178](https://github.com/succinctlabs/sp1/pull/178)) -- (perf) updates from Plonky3 and verifier refactor ([#156](https://github.com/succinctlabs/sp1/pull/156)) -- developer experience improvements ([#145](https://github.com/succinctlabs/sp1/pull/145)) -- toolchain build from source & install ([#113](https://github.com/succinctlabs/sp1/pull/113)) -- io::read io::write ([#126](https://github.com/succinctlabs/sp1/pull/126)) -- tracing, profiling, benchmarking ([#99](https://github.com/succinctlabs/sp1/pull/99)) - -### Fixed - -- incorrect checks on deferred digest ([#1116](https://github.com/succinctlabs/sp1/pull/1116)) -- use correct value for blowup ([#965](https://github.com/succinctlabs/sp1/pull/965)) -- p3 audit change ([#964](https://github.com/succinctlabs/sp1/pull/964)) -- some informational fixes from veridise audit ([#953](https://github.com/succinctlabs/sp1/pull/953)) -- set sponge state to be zero ([#951](https://github.com/succinctlabs/sp1/pull/951)) -- range check for shard number in recursion ([#952](https://github.com/succinctlabs/sp1/pull/952)) -- memory finalize duplicate address attack from audit ([#934](https://github.com/succinctlabs/sp1/pull/934)) -- fix things -- unnecessary pc constraint ([#749](https://github.com/succinctlabs/sp1/pull/749)) -- _(recursion)_ enable mul constraint ([#686](https://github.com/succinctlabs/sp1/pull/686)) -- verify reduced proofs ([#655](https://github.com/succinctlabs/sp1/pull/655)) -- high degree constraints in recursion ([#619](https://github.com/succinctlabs/sp1/pull/619)) -- deferred proofs + cleanup hash_vkey ([#615](https://github.com/succinctlabs/sp1/pull/615)) -- observe only non-padded public values ([#523](https://github.com/succinctlabs/sp1/pull/523)) -- broken e2e recursion -- don't observe padded public values ([#520](https://github.com/succinctlabs/sp1/pull/520)) -- public inputs in recursion program ([#467](https://github.com/succinctlabs/sp1/pull/467)) - -### Other - -- use global workspace version ([#1102](https://github.com/succinctlabs/sp1/pull/1102)) -- fix release-plz ([#1088](https://github.com/succinctlabs/sp1/pull/1088)) -- add release-plz ([#1086](https://github.com/succinctlabs/sp1/pull/1086)) -- _(deps)_ bump serde from 1.0.203 to 1.0.204 ([#1063](https://github.com/succinctlabs/sp1/pull/1063)) -- updated p3 dependency to 0.1.3 ([#1059](https://github.com/succinctlabs/sp1/pull/1059)) -- merge main -> dev ([#969](https://github.com/succinctlabs/sp1/pull/969)) -- Fixes from review. -- Reverted to exp_rev_bits_len_fast -- please clippy -- Merge branch 'dev' into erabinov/exp_rev_precompile -- Version of exp_rev_precompile -- fixes ([#821](https://github.com/succinctlabs/sp1/pull/821)) -- program doc and remove unnecessary clones ([#857](https://github.com/succinctlabs/sp1/pull/857)) -- recursive program docs ([#855](https://github.com/succinctlabs/sp1/pull/855)) -- fmt -- change challenger rate from 16 to 8 ([#807](https://github.com/succinctlabs/sp1/pull/807)) -- remove todos in recursion ([#809](https://github.com/succinctlabs/sp1/pull/809)) -- require cpu shard in verifier ([#808](https://github.com/succinctlabs/sp1/pull/808)) -- clippy -- hm -- Make some functions const ([#774](https://github.com/succinctlabs/sp1/pull/774)) -- remove unused deps ([#794](https://github.com/succinctlabs/sp1/pull/794)) -- Clean up TOML files ([#796](https://github.com/succinctlabs/sp1/pull/796)) -- update all dependencies ([#689](https://github.com/succinctlabs/sp1/pull/689)) -- fixing dep tree for `prover`, `recursion`, `core` and `sdk` ([#545](https://github.com/succinctlabs/sp1/pull/545)) -- cleanup prover ([#551](https://github.com/succinctlabs/sp1/pull/551)) -- cleanup program + add missing constraints ([#547](https://github.com/succinctlabs/sp1/pull/547)) -- make ci faster ([#536](https://github.com/succinctlabs/sp1/pull/536)) -- _(recursion)_ reduce program ([#497](https://github.com/succinctlabs/sp1/pull/497)) -- for loop optimizations -- update to latest plonky3 main ([#491](https://github.com/succinctlabs/sp1/pull/491)) -- final touches for public release ([#239](https://github.com/succinctlabs/sp1/pull/239)) -- update docs with slight nits ([#224](https://github.com/succinctlabs/sp1/pull/224)) -- sp1 rename ([#212](https://github.com/succinctlabs/sp1/pull/212)) -- enshrine AlignedBorrow macro ([#209](https://github.com/succinctlabs/sp1/pull/209)) -- readme cleanup ([#196](https://github.com/succinctlabs/sp1/pull/196)) -- rename succinct to curta ([#192](https://github.com/succinctlabs/sp1/pull/192)) -- better curta graphic ([#184](https://github.com/succinctlabs/sp1/pull/184)) -- Initial commit diff --git a/crates/recursion/program/Cargo.toml b/crates/recursion/program/Cargo.toml deleted file mode 100644 index ba8633caab..0000000000 --- a/crates/recursion/program/Cargo.toml +++ /dev/null @@ -1,39 +0,0 @@ -[package] -name = "sp1-recursion-program" -description = "SP1 is a performant, 100% open-source, contributor-friendly zkVM." -readme = "../../../README.md" -version = { workspace = true } -edition = { workspace = true } -license = { workspace = true } -repository = { workspace = true } -keywords = { workspace = true } -categories = { workspace = true } - -[dependencies] -p3-air = { workspace = true } -p3-field = { workspace = true } -p3-commit = { workspace = true } -p3-fri = { workspace = true } -p3-matrix = { workspace = true } -p3-maybe-rayon = { workspace = true } -p3-util = { workspace = true } -p3-symmetric = { workspace = true } -p3-challenger = { workspace = true } -p3-baby-bear = { workspace = true } -p3-dft = { workspace = true } -p3-merkle-tree = { workspace = true } -p3-poseidon2 = { workspace = true } -sp1-recursion-core = { workspace = true } -sp1-recursion-compiler = { workspace = true } -sp1-core-machine = { workspace = true } -sp1-primitives = { workspace = true } -sp1-stark = { workspace = true } -sp1-core-executor = { workspace = true } -itertools = "0.13.0" -serde = { version = "1.0.204", features = ["derive"] } -rand = "0.8.5" -tracing = "0.1.40" -stacker = "0.1" - -[features] -debug = ["sp1-core-machine/debug"] diff --git a/crates/recursion/program/src/challenger.rs b/crates/recursion/program/src/challenger.rs deleted file mode 100644 index 8a3732f47a..0000000000 --- a/crates/recursion/program/src/challenger.rs +++ /dev/null @@ -1,350 +0,0 @@ -use p3_field::AbstractField; -use sp1_recursion_compiler::prelude::{ - Array, Builder, Config, DslVariable, Ext, Felt, MemIndex, MemVariable, Ptr, Usize, Var, - Variable, -}; -use sp1_recursion_core::runtime::{DIGEST_SIZE, HASH_RATE, PERMUTATION_WIDTH}; - -use crate::{fri::types::DigestVariable, types::VerifyingKeyVariable}; - -/// Reference: [p3_challenger::CanObserve]. -pub trait CanObserveVariable { - fn observe(&mut self, builder: &mut Builder, value: V); - - fn observe_slice(&mut self, builder: &mut Builder, values: Array); -} - -pub trait CanSampleVariable { - fn sample(&mut self, builder: &mut Builder) -> V; -} - -/// Reference: [p3_challenger::FieldChallenger]. -pub trait FeltChallenger: - CanObserveVariable> + CanSampleVariable> + CanSampleBitsVariable -{ - fn sample_ext(&mut self, builder: &mut Builder) -> Ext; -} - -pub trait CanSampleBitsVariable { - fn sample_bits( - &mut self, - builder: &mut Builder, - nb_bits: Usize, - ) -> Array>; -} - -/// Reference: [p3_challenger::DuplexChallenger] -#[derive(Clone, DslVariable)] -pub struct DuplexChallengerVariable { - pub sponge_state: Array>, - pub nb_inputs: Var, - pub input_buffer: Array>, - pub nb_outputs: Var, - pub output_buffer: Array>, -} - -impl DuplexChallengerVariable { - /// Creates a new duplex challenger with the default state. - pub fn new(builder: &mut Builder) -> Self { - let mut result = DuplexChallengerVariable:: { - sponge_state: builder.dyn_array(PERMUTATION_WIDTH), - nb_inputs: builder.eval(C::N::zero()), - input_buffer: builder.dyn_array(PERMUTATION_WIDTH), - nb_outputs: builder.eval(C::N::zero()), - output_buffer: builder.dyn_array(PERMUTATION_WIDTH), - }; - - // Constrain the state of the challenger to contain all zeroes. - builder.range(0, PERMUTATION_WIDTH).for_each(|i, builder| { - builder.set(&mut result.sponge_state, i, C::F::zero()); - builder.set(&mut result.input_buffer, i, C::F::zero()); - builder.set(&mut result.output_buffer, i, C::F::zero()); - }); - result - } - - /// Creates a new challenger with the same state as an existing challenger. - pub fn copy(&self, builder: &mut Builder) -> Self { - let mut sponge_state = builder.dyn_array(PERMUTATION_WIDTH); - builder.range(0, PERMUTATION_WIDTH).for_each(|i, builder| { - let element = builder.get(&self.sponge_state, i); - builder.set(&mut sponge_state, i, element); - }); - let nb_inputs = builder.eval(self.nb_inputs); - let mut input_buffer = builder.dyn_array(PERMUTATION_WIDTH); - builder.range(0, PERMUTATION_WIDTH).for_each(|i, builder| { - let element = builder.get(&self.input_buffer, i); - builder.set(&mut input_buffer, i, element); - }); - let nb_outputs = builder.eval(self.nb_outputs); - let mut output_buffer = builder.dyn_array(PERMUTATION_WIDTH); - builder.range(0, PERMUTATION_WIDTH).for_each(|i, builder| { - let element = builder.get(&self.output_buffer, i); - builder.set(&mut output_buffer, i, element); - }); - DuplexChallengerVariable:: { - sponge_state, - nb_inputs, - input_buffer, - nb_outputs, - output_buffer, - } - } - - /// Asserts that the state of this challenger is equal to the state of another challenger. - pub fn assert_eq(&self, builder: &mut Builder, other: &Self) { - builder.assert_var_eq(self.nb_inputs, other.nb_inputs); - builder.assert_var_eq(self.nb_outputs, other.nb_outputs); - builder.range(0, PERMUTATION_WIDTH).for_each(|i, builder| { - let element = builder.get(&self.sponge_state, i); - let other_element = builder.get(&other.sponge_state, i); - builder.assert_felt_eq(element, other_element); - }); - builder.range(0, self.nb_inputs).for_each(|i, builder| { - let element = builder.get(&self.input_buffer, i); - let other_element = builder.get(&other.input_buffer, i); - builder.assert_felt_eq(element, other_element); - }); - builder.range(0, self.nb_outputs).for_each(|i, builder| { - let element = builder.get(&self.output_buffer, i); - let other_element = builder.get(&other.output_buffer, i); - builder.assert_felt_eq(element, other_element); - }); - } - - pub fn reset(&mut self, builder: &mut Builder) { - let zero: Var<_> = builder.eval(C::N::zero()); - let zero_felt: Felt<_> = builder.eval(C::F::zero()); - builder.range(0, PERMUTATION_WIDTH).for_each(|i, builder| { - builder.set(&mut self.sponge_state, i, zero_felt); - }); - builder.assign(self.nb_inputs, zero); - builder.range(0, PERMUTATION_WIDTH).for_each(|i, builder| { - builder.set(&mut self.input_buffer, i, zero_felt); - }); - builder.assign(self.nb_outputs, zero); - builder.range(0, PERMUTATION_WIDTH).for_each(|i, builder| { - builder.set(&mut self.output_buffer, i, zero_felt); - }); - } - - pub fn duplexing(&mut self, builder: &mut Builder) { - builder.range(0, self.nb_inputs).for_each(|i, builder| { - let element = builder.get(&self.input_buffer, i); - builder.set(&mut self.sponge_state, i, element); - }); - builder.assign(self.nb_inputs, C::N::zero()); - - builder.poseidon2_permute_mut(&self.sponge_state); - - builder.assign(self.nb_outputs, C::N::zero()); - - for i in 0..PERMUTATION_WIDTH { - let element = builder.get(&self.sponge_state, i); - builder.set(&mut self.output_buffer, i, element); - builder.assign(self.nb_outputs, self.nb_outputs + C::N::one()); - } - } - - fn observe(&mut self, builder: &mut Builder, value: Felt) { - builder.assign(self.nb_outputs, C::N::zero()); - - builder.set(&mut self.input_buffer, self.nb_inputs, value); - builder.assign(self.nb_inputs, self.nb_inputs + C::N::one()); - - builder.if_eq(self.nb_inputs, C::N::from_canonical_usize(HASH_RATE)).then(|builder| { - self.duplexing(builder); - }) - } - - fn observe_commitment(&mut self, builder: &mut Builder, commitment: DigestVariable) { - for i in 0..DIGEST_SIZE { - let element = builder.get(&commitment, i); - self.observe(builder, element); - } - } - - fn sample(&mut self, builder: &mut Builder) -> Felt { - let zero: Var<_> = builder.eval(C::N::zero()); - builder.if_ne(self.nb_inputs, zero).then_or_else( - |builder| { - self.clone().duplexing(builder); - }, - |builder| { - builder.if_eq(self.nb_outputs, zero).then(|builder| { - self.clone().duplexing(builder); - }); - }, - ); - let idx: Var<_> = builder.eval(self.nb_outputs - C::N::one()); - let output = builder.get(&self.output_buffer, idx); - builder.assign(self.nb_outputs, self.nb_outputs - C::N::one()); - output - } - - fn sample_ext(&mut self, builder: &mut Builder) -> Ext { - let a = self.sample(builder); - let b = self.sample(builder); - let c = self.sample(builder); - let d = self.sample(builder); - builder.ext_from_base_slice(&[a, b, c, d]) - } - - fn sample_bits( - &mut self, - builder: &mut Builder, - nb_bits: Usize, - ) -> Array> { - let rand_f = self.sample(builder); - let mut bits = builder.num2bits_f(rand_f); - - builder.range(nb_bits, bits.len()).for_each(|i, builder| { - builder.set(&mut bits, i, C::N::zero()); - }); - - bits - } - - pub fn check_witness( - &mut self, - builder: &mut Builder, - nb_bits: Var, - witness: Felt, - ) { - self.observe(builder, witness); - let element_bits = self.sample_bits(builder, nb_bits.into()); - builder.range(0, nb_bits).for_each(|i, builder| { - let element = builder.get(&element_bits, i); - builder.assert_var_eq(element, C::N::zero()); - }); - } -} - -impl CanObserveVariable> for DuplexChallengerVariable { - fn observe(&mut self, builder: &mut Builder, value: Felt) { - DuplexChallengerVariable::observe(self, builder, value); - } - - fn observe_slice(&mut self, builder: &mut Builder, values: Array>) { - match values { - Array::Dyn(_, len) => { - builder.range(0, len).for_each(|i, builder| { - let element = builder.get(&values, i); - self.observe(builder, element); - }); - } - Array::Fixed(values) => { - values.iter().for_each(|value| { - self.observe(builder, *value); - }); - } - } - } -} - -impl CanSampleVariable> for DuplexChallengerVariable { - fn sample(&mut self, builder: &mut Builder) -> Felt { - DuplexChallengerVariable::sample(self, builder) - } -} - -impl CanSampleBitsVariable for DuplexChallengerVariable { - fn sample_bits( - &mut self, - builder: &mut Builder, - nb_bits: Usize, - ) -> Array> { - DuplexChallengerVariable::sample_bits(self, builder, nb_bits) - } -} - -impl CanObserveVariable> for DuplexChallengerVariable { - fn observe(&mut self, builder: &mut Builder, commitment: DigestVariable) { - DuplexChallengerVariable::observe_commitment(self, builder, commitment); - } - - fn observe_slice(&mut self, _builder: &mut Builder, _values: Array>) { - todo!() - } -} - -impl CanObserveVariable> for DuplexChallengerVariable { - fn observe(&mut self, builder: &mut Builder, value: VerifyingKeyVariable) { - self.observe_commitment(builder, value.commitment); - self.observe(builder, value.pc_start) - } - - fn observe_slice( - &mut self, - _builder: &mut Builder, - _values: Array>, - ) { - todo!() - } -} - -impl FeltChallenger for DuplexChallengerVariable { - fn sample_ext(&mut self, builder: &mut Builder) -> Ext { - DuplexChallengerVariable::sample_ext(self, builder) - } -} - -#[cfg(test)] -mod tests { - use p3_challenger::{CanObserve, CanSample}; - use p3_field::AbstractField; - - use sp1_recursion_compiler::{ - asm::{AsmBuilder, AsmConfig}, - ir::{Felt, Usize, Var}, - }; - - use sp1_recursion_core::{ - runtime::PERMUTATION_WIDTH, - stark::utils::{run_test_recursion, TestConfig}, - }; - use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, StarkGenericConfig}; - - use crate::challenger::DuplexChallengerVariable; - - #[test] - fn test_compiler_challenger() { - type SC = BabyBearPoseidon2; - type F = ::Val; - type EF = ::Challenge; - - let config = SC::default(); - let mut challenger = config.challenger(); - challenger.observe(F::one()); - challenger.observe(F::two()); - challenger.observe(F::two()); - challenger.observe(F::two()); - let result: F = challenger.sample(); - println!("expected result: {}", result); - - let mut builder = AsmBuilder::::default(); - - let width: Var<_> = builder.eval(F::from_canonical_usize(PERMUTATION_WIDTH)); - let mut challenger = DuplexChallengerVariable::> { - sponge_state: builder.array(Usize::Var(width)), - nb_inputs: builder.eval(F::zero()), - input_buffer: builder.array(Usize::Var(width)), - nb_outputs: builder.eval(F::zero()), - output_buffer: builder.array(Usize::Var(width)), - }; - let one: Felt<_> = builder.eval(F::one()); - let two: Felt<_> = builder.eval(F::two()); - builder.halt(); - challenger.observe(&mut builder, one); - challenger.observe(&mut builder, two); - challenger.observe(&mut builder, two); - challenger.observe(&mut builder, two); - let element = challenger.sample(&mut builder); - - let expected_result: Felt<_> = builder.eval(result); - builder.assert_felt_eq(expected_result, element); - - let program = builder.compile_program(); - run_test_recursion(program, None, TestConfig::All); - } -} diff --git a/crates/recursion/program/src/commit.rs b/crates/recursion/program/src/commit.rs deleted file mode 100644 index 100d74c405..0000000000 --- a/crates/recursion/program/src/commit.rs +++ /dev/null @@ -1,58 +0,0 @@ -use p3_commit::{LagrangeSelectors, PolynomialSpace}; -use sp1_recursion_compiler::ir::{Array, Builder, Config, Ext, FromConstant, Usize}; - -use crate::fri::types::{FriConfigVariable, TwoAdicPcsRoundVariable}; - -/// Reference: [p3_commit::PolynomialSpace] -pub trait PolynomialSpaceVariable: Sized + FromConstant { - type Constant: PolynomialSpace; - - fn next_point(&self, builder: &mut Builder, point: Ext) -> Ext; - - fn selectors_at_point( - &self, - builder: &mut Builder, - point: Ext, - ) -> LagrangeSelectors>; - - fn zp_at_point(&self, builder: &mut Builder, point: Ext) -> Ext; - - fn split_domains( - &self, - builder: &mut Builder, - log_num_chunks: impl Into>, - num_chunks: impl Into>, - ) -> Array; - - fn split_domains_const(&self, _: &mut Builder, log_num_chunks: usize) -> Vec; - - fn create_disjoint_domain( - &self, - builder: &mut Builder, - log_degree: Usize, - config: Option>, - ) -> Self; -} - -/// Reference: [p3_commit::Pcs] -pub trait PcsVariable { - type Domain: PolynomialSpaceVariable; - - type Commitment; - - type Proof; - - fn natural_domain_for_log_degree( - &self, - builder: &mut Builder, - log_degree: Usize, - ) -> Self::Domain; - - fn verify( - &self, - builder: &mut Builder, - rounds: Array>, - proof: Self::Proof, - challenger: &mut Challenger, - ); -} diff --git a/crates/recursion/program/src/constraints.rs b/crates/recursion/program/src/constraints.rs deleted file mode 100644 index 3c92440992..0000000000 --- a/crates/recursion/program/src/constraints.rs +++ /dev/null @@ -1,384 +0,0 @@ -use p3_air::Air; -use p3_commit::LagrangeSelectors; -use p3_field::{AbstractExtensionField, AbstractField, TwoAdicField}; - -use sp1_recursion_compiler::{ - ir::{Array, Felt}, - prelude::{Builder, Config, Ext, ExtConst, SymbolicExt}, -}; -use sp1_stark::{ - air::MachineAir, AirOpenedValues, MachineChip, StarkGenericConfig, PROOF_MAX_NUM_PVS, -}; - -use crate::{ - commit::PolynomialSpaceVariable, - fri::TwoAdicMultiplicativeCosetVariable, - stark::{RecursiveVerifierConstraintFolder, StarkVerifier}, - types::{ChipOpenedValuesVariable, ChipOpening}, -}; - -impl StarkVerifier -where - SC: StarkGenericConfig, - C::F: TwoAdicField, -{ - fn eval_constrains( - builder: &mut Builder, - chip: &MachineChip, - opening: &ChipOpening, - public_values: Array>, - selectors: &LagrangeSelectors>, - alpha: Ext, - permutation_challenges: &[Ext], - ) -> Ext - where - A: for<'b> Air>, - { - let mut unflatten = |v: &[Ext]| { - v.chunks_exact(SC::Challenge::D) - .map(|chunk| { - builder.eval( - chunk - .iter() - .enumerate() - .map(|(e_i, &x)| x * C::EF::monomial(e_i).cons()) - .sum::>(), - ) - }) - .collect::>>() - }; - let perm_opening = AirOpenedValues { - local: unflatten(&opening.permutation.local), - next: unflatten(&opening.permutation.next), - }; - - let mut folder_pv = Vec::new(); - for i in 0..PROOF_MAX_NUM_PVS { - folder_pv.push(builder.get(&public_values, i)); - } - - let mut folder = RecursiveVerifierConstraintFolder:: { - preprocessed: opening.preprocessed.view(), - main: opening.main.view(), - perm: perm_opening.view(), - perm_challenges: permutation_challenges, - cumulative_sum: opening.cumulative_sum, - public_values: &folder_pv, - is_first_row: selectors.is_first_row, - is_last_row: selectors.is_last_row, - is_transition: selectors.is_transition, - alpha, - accumulator: SymbolicExt::zero(), - _marker: std::marker::PhantomData, - }; - - chip.eval(&mut folder); - builder.eval(folder.accumulator) - } - - fn recompute_quotient( - builder: &mut Builder, - opening: &ChipOpening, - qc_domains: Vec>, - zeta: Ext, - ) -> Ext { - let zps = qc_domains - .iter() - .enumerate() - .map(|(i, domain)| { - qc_domains - .iter() - .enumerate() - .filter(|(j, _)| *j != i) - .map(|(_, other_domain)| { - let first_point: Ext<_, _> = builder.eval(domain.first_point()); - other_domain.zp_at_point(builder, zeta) - * other_domain.zp_at_point(builder, first_point).inverse() - }) - .product::>() - }) - .collect::>>() - .into_iter() - .map(|x| builder.eval(x)) - .collect::>>(); - - builder.eval( - opening - .quotient - .iter() - .enumerate() - .map(|(ch_i, ch)| { - assert_eq!(ch.len(), C::EF::D); - ch.iter() - .enumerate() - .map(|(e_i, &c)| zps[ch_i] * C::EF::monomial(e_i) * c) - .sum::>() - }) - .sum::>(), - ) - } - - /// Reference: [sp1_core_machine::stark::Verifier::verify_constraints] - pub fn verify_constraints( - builder: &mut Builder, - chip: &MachineChip, - opening: &ChipOpenedValuesVariable, - public_values: Array>, - trace_domain: TwoAdicMultiplicativeCosetVariable, - qc_domains: Vec>, - zeta: Ext, - alpha: Ext, - permutation_challenges: &[Ext], - ) where - A: MachineAir + for<'a> Air>, - { - let opening = ChipOpening::from_variable(builder, chip, opening); - let sels = trace_domain.selectors_at_point(builder, zeta); - - let folded_constraints = Self::eval_constrains( - builder, - chip, - &opening, - public_values, - &sels, - alpha, - permutation_challenges, - ); - - let quotient: Ext<_, _> = Self::recompute_quotient(builder, &opening, qc_domains, zeta); - - // Assert that the quotient times the zerofier is equal to the folded constraints. - builder.assert_ext_eq(folded_constraints * sels.inv_zeroifier, quotient); - } -} - -#[cfg(test)] -mod tests { - use itertools::{izip, Itertools}; - use rand::{thread_rng, Rng}; - - use sp1_core_executor::Program; - use sp1_core_machine::{io::SP1Stdin, riscv::RiscvAir}; - use sp1_recursion_core::stark::utils::{run_test_recursion, TestConfig}; - - use p3_challenger::{CanObserve, FieldChallenger}; - use sp1_recursion_compiler::{asm::AsmBuilder, ir::Felt, prelude::ExtConst}; - - use p3_commit::{Pcs, PolynomialSpace}; - use sp1_stark::{ - baby_bear_poseidon2::BabyBearPoseidon2, Chip, Com, CpuProver, Dom, OpeningProof, - PcsProverData, SP1CoreOpts, ShardCommitment, ShardProof, StarkGenericConfig, StarkMachine, - }; - - use crate::stark::StarkVerifier; - - #[allow(clippy::type_complexity)] - fn get_shard_data<'a, SC>( - machine: &'a StarkMachine>, - proof: &'a ShardProof, - challenger: &mut SC::Challenger, - ) -> ( - Vec<&'a Chip>>, - Vec>, - Vec>>, - Vec, - SC::Challenge, - SC::Challenge, - ) - where - SC: StarkGenericConfig + Default, - SC::Challenger: Clone, - OpeningProof: Send + Sync, - Com: Send + Sync, - PcsProverData: Send + Sync, - SC::Val: p3_field::PrimeField32, - { - let ShardProof { commitment, opened_values, .. } = proof; - - let ShardCommitment { permutation_commit, quotient_commit, .. } = commitment; - - // Extract verification metadata. - let pcs = machine.config().pcs(); - - let permutation_challenges = - (0..2).map(|_| challenger.sample_ext_element::()).collect::>(); - - challenger.observe(permutation_commit.clone()); - - let alpha = challenger.sample_ext_element::(); - - // Observe the quotient commitments. - challenger.observe(quotient_commit.clone()); - - let zeta = challenger.sample_ext_element::(); - - let chips = machine.shard_chips_ordered(&proof.chip_ordering).collect::>(); - - let log_degrees = opened_values.chips.iter().map(|val| val.log_degree).collect::>(); - - let log_quotient_degrees = - chips.iter().map(|chip| chip.log_quotient_degree()).collect::>(); - - let trace_domains = log_degrees - .iter() - .map(|log_degree| pcs.natural_domain_for_degree(1 << log_degree)) - .collect::>(); - - let quotient_chunk_domains = trace_domains - .iter() - .zip_eq(log_degrees) - .zip_eq(log_quotient_degrees) - .map(|((domain, log_degree), log_quotient_degree)| { - let quotient_degree = 1 << log_quotient_degree; - let quotient_domain = - domain.create_disjoint_domain(1 << (log_degree + log_quotient_degree)); - quotient_domain.split_domains(quotient_degree) - }) - .collect::>(); - - (chips, trace_domains, quotient_chunk_domains, permutation_challenges, alpha, zeta) - } - - #[test] - fn test_verify_constraints() { - type SC = BabyBearPoseidon2; - type F = ::Val; - type EF = ::Challenge; - type A = RiscvAir; - - // Generate a dummy proof. - sp1_core_machine::utils::setup_logger(); - let elf = include_bytes!("../../../../tests/fibonacci/elf/riscv32im-succinct-zkvm-elf"); - - let machine = A::machine(SC::default()); - let (_, vk) = machine.setup(&Program::from(elf).unwrap()); - let mut challenger = machine.config().challenger(); - let (proof, _, _) = sp1_core_machine::utils::prove::<_, CpuProver<_, _>>( - Program::from(elf).unwrap(), - &SP1Stdin::new(), - SC::default(), - SP1CoreOpts::default(), - ) - .unwrap(); - machine.verify(&vk, &proof, &mut challenger).unwrap(); - - println!("Proof generated and verified successfully"); - let mut challenger = machine.config().challenger(); - vk.observe_into(&mut challenger); - proof.shard_proofs.iter().for_each(|proof| { - challenger.observe(proof.commitment.main_commit); - challenger.observe_slice(&proof.public_values[0..machine.num_pv_elts()]); - }); - - // Run the verify inside the DSL and compare it to the calculated value. - let mut builder = AsmBuilder::::default(); - - #[allow(clippy::never_loop)] - for proof in proof.shard_proofs.into_iter().take(1) { - let ( - chips, - trace_domains_vals, - quotient_chunk_domains_vals, - permutation_challenges, - alpha_val, - zeta_val, - ) = get_shard_data(&machine, &proof, &mut challenger); - - for (chip, trace_domain_val, qc_domains_vals, values_vals) in izip!( - chips.iter(), - trace_domains_vals, - quotient_chunk_domains_vals, - proof.opened_values.chips.iter(), - ) { - let opening = builder.constant(values_vals.clone()); - let alpha = builder.eval(alpha_val.cons()); - let zeta = builder.eval(zeta_val.cons()); - let trace_domain = builder.constant(trace_domain_val); - let public_values = builder.constant(proof.public_values.clone()); - - let qc_domains = qc_domains_vals - .iter() - .map(|domain| builder.constant(*domain)) - .collect::>(); - - let permutation_challenges = permutation_challenges - .iter() - .map(|c| builder.eval(c.cons())) - .collect::>(); - - StarkVerifier::<_, SC>::verify_constraints::( - &mut builder, - chip, - &opening, - public_values, - trace_domain, - qc_domains, - zeta, - alpha, - &permutation_challenges, - ) - } - break; - } - builder.halt(); - - let program = builder.compile_program(); - run_test_recursion(program, None, TestConfig::All); - } - - #[test] - fn test_exp_reverse_bit_len_fast() { - type SC = BabyBearPoseidon2; - type F = ::Val; - type EF = ::Challenge; - - let mut rng = thread_rng(); - - // Initialize a builder. - let mut builder = AsmBuilder::::default(); - - // Get a random var with `NUM_BITS` bits. - let x_val: F = rng.gen(); - - // Materialize the number as a var - let x_felt: Felt<_> = builder.eval(x_val); - let x_bits = builder.num2bits_f(x_felt); - - let result = builder.exp_reverse_bits_len_fast(x_felt, &x_bits, 5); - let expected_val = builder.exp_reverse_bits_len(x_felt, &x_bits, 5); - - builder.assert_felt_eq(expected_val, result); - builder.halt(); - - let program = builder.compile_program(); - - // We don't test with the config TestConfig::WideDeg17Wrap, since it doesn't have the - // `ExpReverseBitsLen` chip. - run_test_recursion(program.clone(), None, TestConfig::WideDeg3); - run_test_recursion(program, None, TestConfig::SkinnyDeg7); - } - - #[test] - fn test_memory_finalize() { - type SC = BabyBearPoseidon2; - type F = ::Val; - type EF = ::Challenge; - - let mut rng = thread_rng(); - - // Initialize a builder. - let mut builder = AsmBuilder::::default(); - - // Get a random var with `NUM_BITS` bits. - let x_val: F = rng.gen(); - - // Materialize the number as a var - let _x_felt: Felt<_> = builder.eval(x_val); - - builder.halt(); - - let program = builder.compile_program(); - - run_test_recursion(program, None, TestConfig::All); - } -} diff --git a/crates/recursion/program/src/fri/domain.rs b/crates/recursion/program/src/fri/domain.rs deleted file mode 100644 index bb05f2a7cc..0000000000 --- a/crates/recursion/program/src/fri/domain.rs +++ /dev/null @@ -1,265 +0,0 @@ -use p3_commit::{LagrangeSelectors, TwoAdicMultiplicativeCoset}; -use p3_field::{AbstractField, TwoAdicField}; -use sp1_recursion_compiler::prelude::*; - -use super::types::FriConfigVariable; -use crate::commit::PolynomialSpaceVariable; - -/// Reference: [p3_commit::TwoAdicMultiplicativeCoset] -#[derive(DslVariable, Clone, Copy)] -pub struct TwoAdicMultiplicativeCosetVariable { - pub log_n: Var, - pub size: Var, - pub shift: Felt, - pub g: Felt, -} - -impl TwoAdicMultiplicativeCosetVariable { - pub const fn size(&self) -> Var { - self.size - } - - pub const fn first_point(&self) -> Felt { - self.shift - } - - pub const fn gen(&self) -> Felt { - self.g - } -} - -impl FromConstant for TwoAdicMultiplicativeCosetVariable -where - C::F: TwoAdicField, -{ - type Constant = TwoAdicMultiplicativeCoset; - - fn constant(value: Self::Constant, builder: &mut Builder) -> Self { - let log_d_val = value.log_n as u32; - let g_val = C::F::two_adic_generator(value.log_n); - TwoAdicMultiplicativeCosetVariable:: { - log_n: builder.eval::, _>(C::N::from_canonical_u32(log_d_val)), - size: builder.eval::, _>(C::N::from_canonical_u32(1 << (log_d_val))), - shift: builder.eval(value.shift), - g: builder.eval(g_val), - } - } -} - -impl PolynomialSpaceVariable for TwoAdicMultiplicativeCosetVariable -where - C::F: TwoAdicField, -{ - type Constant = p3_commit::TwoAdicMultiplicativeCoset; - - fn next_point( - &self, - builder: &mut Builder, - point: Ext<::F, ::EF>, - ) -> Ext<::F, ::EF> { - builder.eval(point * self.gen()) - } - - fn selectors_at_point( - &self, - builder: &mut Builder, - point: Ext<::F, ::EF>, - ) -> LagrangeSelectors::F, ::EF>> { - let unshifted_point: Ext<_, _> = builder.eval(point * self.shift.inverse()); - let z_h_expr = builder - .exp_power_of_2_v::>(unshifted_point, Usize::Var(self.log_n)) - - C::EF::one(); - let z_h: Ext<_, _> = builder.eval(z_h_expr); - - LagrangeSelectors { - is_first_row: builder.eval(z_h / (unshifted_point - C::EF::one())), - is_last_row: builder.eval(z_h / (unshifted_point - self.gen().inverse())), - is_transition: builder.eval(unshifted_point - self.gen().inverse()), - inv_zeroifier: builder.eval(z_h.inverse()), - } - } - - fn zp_at_point( - &self, - builder: &mut Builder, - point: Ext<::F, ::EF>, - ) -> Ext<::F, ::EF> { - let unshifted_power = builder - .exp_power_of_2_v::>(point * self.shift.inverse(), Usize::Var(self.log_n)); - builder.eval(unshifted_power - C::EF::one()) - } - - fn split_domains( - &self, - builder: &mut Builder, - log_num_chunks: impl Into>, - num_chunks: impl Into>, - ) -> Array { - let log_num_chunks = log_num_chunks.into(); - let num_chunks = num_chunks.into(); - let log_n: Var<_> = builder.eval(self.log_n - log_num_chunks); - let size = builder.sll(C::N::one(), Usize::Var(log_n)); - - let g_dom = self.gen(); - let g = builder.exp_power_of_2_v::>(g_dom, log_num_chunks); - - let domain_power: Felt<_> = builder.eval(C::F::one()); - - let mut domains = builder.dyn_array(num_chunks); - - builder.range(0, num_chunks).for_each(|i, builder| { - let domain = TwoAdicMultiplicativeCosetVariable { - log_n, - size, - shift: builder.eval(self.shift * domain_power), - g, - }; - builder.set(&mut domains, i, domain); - builder.assign(domain_power, domain_power * g_dom); - }); - - domains - } - - fn split_domains_const(&self, builder: &mut Builder, log_num_chunks: usize) -> Vec { - let num_chunks = 1 << log_num_chunks; - let log_n: Var<_> = builder.eval(self.log_n - C::N::from_canonical_usize(log_num_chunks)); - let size = builder.sll(C::N::one(), Usize::Var(log_n)); - - let g_dom = self.gen(); - let g = builder.exp_power_of_2_v::>(g_dom, log_num_chunks); - - let domain_power: Felt<_> = builder.eval(C::F::one()); - let mut domains = vec![]; - - for _ in 0..num_chunks { - domains.push(TwoAdicMultiplicativeCosetVariable { - log_n, - size, - shift: builder.eval(self.shift * domain_power), - g, - }); - builder.assign(domain_power, domain_power * g_dom); - } - domains - } - - fn create_disjoint_domain( - &self, - builder: &mut Builder, - log_degree: Usize<::N>, - config: Option>, - ) -> Self { - let domain = config.unwrap().get_subgroup(builder, log_degree); - builder.assign(domain.shift, self.shift * C::F::generator()); - domain - } -} - -#[cfg(test)] -pub(crate) mod tests { - - use sp1_recursion_compiler::asm::AsmBuilder; - use sp1_recursion_core::stark::utils::{run_test_recursion, TestConfig}; - use sp1_stark::{ - baby_bear_poseidon2::BabyBearPoseidon2, inner_fri_config, Dom, StarkGenericConfig, - }; - - use crate::utils::const_fri_config; - - use super::*; - use p3_commit::{Pcs, PolynomialSpace}; - use rand::{thread_rng, Rng}; - - pub(crate) fn domain_assertions>( - builder: &mut Builder, - domain: &TwoAdicMultiplicativeCosetVariable, - domain_val: &TwoAdicMultiplicativeCoset, - zeta_val: C::EF, - ) { - // Assert the domain parameters are the same. - builder.assert_var_eq(domain.log_n, F::from_canonical_usize(domain_val.log_n)); - builder.assert_var_eq(domain.size, F::from_canonical_usize(1 << domain_val.log_n)); - builder.assert_felt_eq(domain.shift, domain_val.shift); - - // Get a random point. - let zeta: Ext<_, _> = builder.eval(zeta_val.cons()); - - // Compare the selector values of the reference and the builder. - let sels_expected = domain_val.selectors_at_point(zeta_val); - let sels = domain.selectors_at_point(builder, zeta); - builder.assert_ext_eq(sels.is_first_row, sels_expected.is_first_row.cons()); - builder.assert_ext_eq(sels.is_last_row, sels_expected.is_last_row.cons()); - builder.assert_ext_eq(sels.is_transition, sels_expected.is_transition.cons()); - - let zp_val = domain_val.zp_at_point(zeta_val); - let zp = domain.zp_at_point(builder, zeta); - builder.assert_ext_eq(zp, zp_val.cons()); - } - - #[test] - fn test_domain() { - type SC = BabyBearPoseidon2; - type F = ::Val; - type EF = ::Challenge; - type Challenger = ::Challenger; - type ScPcs = ::Pcs; - - let mut rng = thread_rng(); - let config = SC::default(); - let pcs = config.pcs(); - let natural_domain_for_degree = |degree: usize| -> Dom { - >::natural_domain_for_degree(pcs, degree) - }; - - // Initialize a builder. - let mut builder = AsmBuilder::::default(); - - let config_var = const_fri_config(&mut builder, &inner_fri_config()); - for i in 0..5 { - let log_d_val = 10 + i; - - let log_quotient_degree = 2; - - // Initialize a reference doamin. - let domain_val = natural_domain_for_degree(1 << log_d_val); - let domain = builder.constant(domain_val); - - // builder.assert_felt_eq(domain.shift, domain_val.shift); - let zeta_val = rng.gen::(); - domain_assertions(&mut builder, &domain, &domain_val, zeta_val); - - // Try a shifted domain. - let disjoint_domain_val = - domain_val.create_disjoint_domain(1 << (log_d_val + log_quotient_degree)); - let disjoint_domain = builder.constant(disjoint_domain_val); - domain_assertions(&mut builder, &disjoint_domain, &disjoint_domain_val, zeta_val); - - let log_degree: Usize<_> = builder.eval(Usize::Const(log_d_val) + log_quotient_degree); - let disjoint_domain_gen = - domain.create_disjoint_domain(&mut builder, log_degree, Some(config_var.clone())); - domain_assertions(&mut builder, &disjoint_domain_gen, &disjoint_domain_val, zeta_val); - - // Now try splited domains - let qc_domains_val = disjoint_domain_val.split_domains(1 << log_quotient_degree); - for dom_val in qc_domains_val.iter() { - let dom = builder.constant(*dom_val); - domain_assertions(&mut builder, &dom, dom_val, zeta_val); - } - - // Test the splitting of domains by the builder. - let quotient_size: Usize<_> = builder.eval(1 << log_quotient_degree); - let log_quotient_degree: Usize<_> = builder.eval(log_quotient_degree); - let qc_domains = - disjoint_domain.split_domains(&mut builder, log_quotient_degree, quotient_size); - for (i, dom_val) in qc_domains_val.iter().enumerate() { - let dom = builder.get(&qc_domains, i); - domain_assertions(&mut builder, &dom, dom_val, zeta_val); - } - } - builder.halt(); - - let program = builder.compile_program(); - run_test_recursion(program, None, TestConfig::All); - } -} diff --git a/crates/recursion/program/src/fri/hints.rs b/crates/recursion/program/src/fri/hints.rs deleted file mode 100644 index 081f7f4196..0000000000 --- a/crates/recursion/program/src/fri/hints.rs +++ /dev/null @@ -1,282 +0,0 @@ -use p3_field::{AbstractExtensionField, AbstractField}; - -use sp1_recursion_compiler::{ - asm::AsmConfig, - config::InnerConfig, - ir::{Array, Builder, Config}, -}; -use sp1_recursion_core::{air::Block, runtime::DIGEST_SIZE}; -use sp1_stark::{ - InnerBatchOpening, InnerChallenge, InnerCommitPhaseStep, InnerDigest, InnerFriProof, - InnerPcsProof, InnerQueryProof, InnerVal, -}; - -use super::types::{BatchOpeningVariable, TwoAdicPcsProofVariable}; -use crate::{ - fri::types::{ - DigestVariable, FriCommitPhaseProofStepVariable, FriProofVariable, FriQueryProofVariable, - }, - hints::Hintable, -}; - -type C = InnerConfig; - -impl Hintable for InnerDigest { - type HintVariable = DigestVariable; - - fn read(builder: &mut Builder>) -> Self::HintVariable { - builder.hint_felts() - } - - fn write(&self) -> Vec>> { - let h: [InnerVal; DIGEST_SIZE] = *self; - vec![h.iter().map(|x| Block::from(*x)).collect()] - } -} - -impl Hintable for Vec { - type HintVariable = Array>; - - fn read(builder: &mut Builder>) -> Self::HintVariable { - let len = builder.hint_var(); - let mut arr = builder.dyn_array(len); - builder.range(0, len).for_each(|i, builder| { - let hint = InnerDigest::read(builder); - builder.set(&mut arr, i, hint); - }); - arr - } - - fn write(&self) -> Vec>> { - let mut stream = Vec::new(); - - let len = InnerVal::from_canonical_usize(self.len()); - stream.push(vec![len.into()]); - - self.iter().for_each(|arr| { - let comm = InnerDigest::write(arr); - stream.extend(comm); - }); - - stream - } -} - -impl Hintable for InnerCommitPhaseStep { - type HintVariable = FriCommitPhaseProofStepVariable; - - fn read(builder: &mut Builder) -> Self::HintVariable { - let sibling_value = builder.hint_ext(); - let opening_proof = Vec::::read(builder); - Self::HintVariable { sibling_value, opening_proof } - } - - fn write(&self) -> Vec::F>>> { - let mut stream = Vec::new(); - - let sibling_value: &[InnerVal] = self.sibling_value.as_base_slice(); - let sibling_value = Block::from(sibling_value); - stream.push(vec![sibling_value]); - - stream.extend(Vec::::write(&self.opening_proof)); - - stream - } -} - -impl Hintable for Vec { - type HintVariable = Array>; - - fn read(builder: &mut Builder) -> Self::HintVariable { - let len = builder.hint_var(); - let mut arr = builder.dyn_array(len); - builder.range(0, len).for_each(|i, builder| { - let hint = InnerCommitPhaseStep::read(builder); - builder.set(&mut arr, i, hint); - }); - arr - } - - fn write(&self) -> Vec::F>>> { - let mut stream = Vec::new(); - - let len = InnerVal::from_canonical_usize(self.len()); - stream.push(vec![len.into()]); - - self.iter().for_each(|arr| { - let comm = InnerCommitPhaseStep::write(arr); - stream.extend(comm); - }); - - stream - } -} - -impl Hintable for InnerQueryProof { - type HintVariable = FriQueryProofVariable; - - fn read(builder: &mut Builder) -> Self::HintVariable { - let commit_phase_openings = Vec::::read(builder); - Self::HintVariable { commit_phase_openings } - } - - fn write(&self) -> Vec::F>>> { - let mut stream = Vec::new(); - - stream.extend(Vec::::write(&self.commit_phase_openings)); - - stream - } -} - -impl Hintable for Vec { - type HintVariable = Array>; - - fn read(builder: &mut Builder) -> Self::HintVariable { - let len = builder.hint_var(); - let mut arr = builder.dyn_array(len); - builder.range(0, len).for_each(|i, builder| { - let hint = InnerQueryProof::read(builder); - builder.set(&mut arr, i, hint); - }); - arr - } - - fn write(&self) -> Vec::F>>> { - let mut stream = Vec::new(); - - let len = InnerVal::from_canonical_usize(self.len()); - stream.push(vec![len.into()]); - - self.iter().for_each(|arr| { - let comm = InnerQueryProof::write(arr); - stream.extend(comm); - }); - - stream - } -} - -impl Hintable for InnerFriProof { - type HintVariable = FriProofVariable; - - fn read(builder: &mut Builder) -> Self::HintVariable { - let commit_phase_commits = Vec::::read(builder); - let query_proofs = Vec::::read(builder); - let final_poly = builder.hint_ext(); - let pow_witness = builder.hint_felt(); - Self::HintVariable { commit_phase_commits, query_proofs, final_poly, pow_witness } - } - - fn write(&self) -> Vec::F>>> { - let mut stream = Vec::new(); - - stream.extend(Vec::::write( - &self.commit_phase_commits.iter().map(|x| (*x).into()).collect(), - )); - stream.extend(Vec::::write(&self.query_proofs)); - let final_poly: &[InnerVal] = self.final_poly.as_base_slice(); - let final_poly = Block::from(final_poly); - stream.push(vec![final_poly]); - let pow_witness = Block::from(self.pow_witness); - stream.push(vec![pow_witness]); - - stream - } -} - -impl Hintable for InnerBatchOpening { - type HintVariable = BatchOpeningVariable; - - fn read(builder: &mut Builder) -> Self::HintVariable { - let opened_values = Vec::>::read(builder); - let opening_proof = Vec::::read(builder); - Self::HintVariable { opened_values, opening_proof } - } - - fn write(&self) -> Vec::F>>> { - let mut stream = Vec::new(); - stream.extend(Vec::>::write( - &self - .opened_values - .iter() - .map(|v| v.iter().map(|x| InnerChallenge::from_base(*x)).collect()) - .collect(), - )); - stream.extend(Vec::::write(&self.opening_proof)); - stream - } -} - -impl Hintable for Vec { - type HintVariable = Array>; - - fn read(builder: &mut Builder) -> Self::HintVariable { - let len = builder.hint_var(); - let mut arr = builder.dyn_array(len); - builder.range(0, len).for_each(|i, builder| { - let hint = InnerBatchOpening::read(builder); - builder.set(&mut arr, i, hint); - }); - arr - } - - fn write(&self) -> Vec::F>>> { - let mut stream = Vec::new(); - - let len = InnerVal::from_canonical_usize(self.len()); - stream.push(vec![len.into()]); - - self.iter().for_each(|arr| { - let comm = InnerBatchOpening::write(arr); - stream.extend(comm); - }); - - stream - } -} - -impl Hintable for Vec> { - type HintVariable = Array>>; - - fn read(builder: &mut Builder) -> Self::HintVariable { - let len = builder.hint_var(); - let mut arr = builder.dyn_array(len); - builder.range(0, len).for_each(|i, builder| { - let hint = Vec::::read(builder); - builder.set(&mut arr, i, hint); - }); - arr - } - - fn write(&self) -> Vec::F>>> { - let mut stream = Vec::new(); - - let len = InnerVal::from_canonical_usize(self.len()); - stream.push(vec![len.into()]); - - self.iter().for_each(|arr| { - let comm = Vec::::write(arr); - stream.extend(comm); - }); - - stream - } -} - -impl Hintable for InnerPcsProof { - type HintVariable = TwoAdicPcsProofVariable; - - fn read(builder: &mut Builder) -> Self::HintVariable { - let fri_proof = InnerFriProof::read(builder); - let query_openings = Vec::>::read(builder); - Self::HintVariable { fri_proof, query_openings } - } - - fn write(&self) -> Vec::F>>> { - let mut stream = Vec::new(); - stream.extend(self.fri_proof.write()); - stream.extend(self.query_openings.write()); - stream - } -} diff --git a/crates/recursion/program/src/fri/mod.rs b/crates/recursion/program/src/fri/mod.rs deleted file mode 100644 index b996b4f7b8..0000000000 --- a/crates/recursion/program/src/fri/mod.rs +++ /dev/null @@ -1,311 +0,0 @@ -pub mod domain; -pub mod hints; -pub mod two_adic_pcs; -pub mod types; - -pub use domain::*; -use sp1_primitives::types::RecursionProgramType; -use sp1_recursion_compiler::ir::{ExtensionOperand, Ptr}; -use sp1_recursion_core::runtime::DIGEST_SIZE; -pub use two_adic_pcs::*; - -use p3_field::{AbstractField, Field, TwoAdicField}; - -use sp1_recursion_compiler::ir::{Array, Builder, Config, Ext, Felt, SymbolicVar, Usize, Var}; - -use self::types::{ - DigestVariable, DimensionsVariable, FriChallengesVariable, FriConfigVariable, FriProofVariable, - FriQueryProofVariable, -}; -use crate::challenger::{ - CanObserveVariable, CanSampleBitsVariable, DuplexChallengerVariable, FeltChallenger, -}; - -/// Reference: https://github.com/Plonky3/Plonky3/blob/4809fa7bedd9ba8f6f5d3267b1592618e3776c57/fri/src/verifier.rs#L27 -pub fn verify_shape_and_sample_challenges( - builder: &mut Builder, - config: &FriConfigVariable, - proof: &FriProofVariable, - challenger: &mut DuplexChallengerVariable, -) -> FriChallengesVariable { - let mut betas: Array> = builder.dyn_array(proof.commit_phase_commits.len()); - - builder.range(0, proof.commit_phase_commits.len()).for_each(|i, builder| { - let comm = builder.get(&proof.commit_phase_commits, i); - challenger.observe(builder, comm); - let sample = challenger.sample_ext(builder); - builder.set(&mut betas, i, sample); - }); - - // Observe the final polynomial. - let final_poly_felts = builder.ext2felt(proof.final_poly); - challenger.observe_slice(builder, final_poly_felts); - - let num_query_proofs = proof.query_proofs.len().materialize(builder); - builder.if_ne(num_query_proofs, config.num_queries).then(|builder| { - builder.error(); - }); - - challenger.check_witness(builder, config.proof_of_work_bits, proof.pow_witness); - - let num_commit_phase_commits = proof.commit_phase_commits.len().materialize(builder); - let log_max_height: Var<_> = builder.eval(num_commit_phase_commits + config.log_blowup); - let mut query_indices = builder.array(config.num_queries); - builder.range(0, config.num_queries).for_each(|i, builder| { - let index_bits = challenger.sample_bits(builder, Usize::Var(log_max_height)); - builder.set(&mut query_indices, i, index_bits); - }); - - FriChallengesVariable { query_indices, betas } -} - -/// Verifies a set of FRI challenges. -/// -/// Reference: https://github.com/Plonky3/Plonky3/blob/4809fa7bedd9ba8f6f5d3267b1592618e3776c57/fri/src/verifier.rs#L67 -#[allow(clippy::type_complexity)] -pub fn verify_challenges( - builder: &mut Builder, - config: &FriConfigVariable, - proof: &FriProofVariable, - challenges: &FriChallengesVariable, - reduced_openings: &Array>>, -) where - C::F: TwoAdicField, - C::EF: TwoAdicField, -{ - let nb_commit_phase_commits = proof.commit_phase_commits.len().materialize(builder); - let log_max_height = builder.eval(nb_commit_phase_commits + config.log_blowup); - builder.range(0, challenges.query_indices.len()).for_each(|i, builder| { - let index_bits = builder.get(&challenges.query_indices, i); - let query_proof = builder.get(&proof.query_proofs, i); - let ro = builder.get(reduced_openings, i); - - let folded_eval = verify_query( - builder, - config, - &proof.commit_phase_commits, - &index_bits, - &query_proof, - &challenges.betas, - &ro, - Usize::Var(log_max_height), - ); - - builder.assert_ext_eq(folded_eval, proof.final_poly); - }); -} - -/// Verifies a FRI query. -/// -/// Currently assumes the index that is accessed is constant. -/// -/// Reference: https://github.com/Plonky3/Plonky3/blob/4809fa7bedd9ba8f6f5d3267b1592618e3776c57/fri/src/verifier.rs#L101 -#[allow(clippy::too_many_arguments)] -#[allow(unused_variables)] -pub fn verify_query( - builder: &mut Builder, - config: &FriConfigVariable, - commit_phase_commits: &Array>, - index_bits: &Array>, - proof: &FriQueryProofVariable, - betas: &Array>, - reduced_openings: &Array>, - log_max_height: Usize, -) -> Ext -where - C::F: TwoAdicField, - C::EF: TwoAdicField, -{ - builder.cycle_tracker("verify-query"); - let folded_eval: Ext = builder.eval(C::F::zero()); - let two_adic_generator_f = config.get_two_adic_generator(builder, log_max_height); - - let x = if matches!(builder.program_type, RecursionProgramType::Wrap) { - builder.exp_reverse_bits_len(two_adic_generator_f, index_bits, log_max_height) - } else { - builder.exp_reverse_bits_len_fast(two_adic_generator_f, index_bits, log_max_height) - }; - - let log_max_height = log_max_height.materialize(builder); - builder.range(0, commit_phase_commits.len()).for_each(|i, builder| { - let log_folded_height: Var<_> = builder.eval(log_max_height - i - C::N::one()); - let log_folded_height_plus_one: Var<_> = builder.eval(log_folded_height + C::N::one()); - let commit = builder.get(commit_phase_commits, i); - let step = builder.get(&proof.commit_phase_openings, i); - let beta = builder.get(betas, i); - - let reduced_opening = builder.get(reduced_openings, log_folded_height_plus_one); - builder.assign(folded_eval, folded_eval + reduced_opening); - - let index_bit = builder.get(index_bits, i); - let index_sibling_mod_2: Var = - builder.eval(SymbolicVar::from(C::N::one()) - index_bit); - let i_plus_one = builder.eval(i + C::N::one()); - let index_pair = index_bits.shift(builder, i_plus_one); - - let mut evals: Array> = builder.array(2); - builder.set_value(&mut evals, 0, folded_eval); - builder.set_value(&mut evals, 1, folded_eval); - builder.set_value(&mut evals, index_sibling_mod_2, step.sibling_value); - - let dims = DimensionsVariable:: { - height: builder.sll(C::N::one(), Usize::Var(log_folded_height)), - }; - let mut dims_slice: Array> = builder.array(1); - builder.set_value(&mut dims_slice, 0, dims); - - let mut opened_values = builder.array(1); - builder.set_value(&mut opened_values, 0, evals.clone()); - verify_batch::( - builder, - &commit, - dims_slice, - index_pair, - opened_values, - &step.opening_proof, - ); - - let two_adic_generator_one = config.get_two_adic_generator(builder, Usize::Const(1)); - let xs_0: Ext<_, _> = builder.eval(x); - let xs_1: Ext<_, _> = builder.eval(x); - builder.if_eq(index_sibling_mod_2, C::N::zero()).then_or_else( - |builder| { - builder.assign(xs_0, x * two_adic_generator_one.to_operand().symbolic()); - }, - |builder| { - builder.assign(xs_1, x * two_adic_generator_one.to_operand().symbolic()); - }, - ); - - let eval_0 = builder.get(&evals, 0); - let eval_1 = builder.get(&evals, 1); - builder.assign(folded_eval, eval_0 + (beta - xs_0) * (eval_1 - eval_0) / (xs_1 - xs_0)); - - builder.assign(x, x * x); - }); - - builder.cycle_tracker("verify-query"); - folded_eval -} - -/// Verifies a batch opening. -/// -/// Assumes the dimensions have already been sorted by tallest first. -/// -/// Reference: https://github.com/Plonky3/Plonky3/blob/4809fa7bedd9ba8f6f5d3267b1592618e3776c57/merkle-tree/src/mmcs.rs#L92 -#[allow(clippy::type_complexity)] -#[allow(unused_variables)] -pub fn verify_batch( - builder: &mut Builder, - commit: &DigestVariable, - dimensions: Array>, - index_bits: Array>, - opened_values: Array>>, - proof: &Array>, -) { - builder.cycle_tracker("verify-batch"); - // The index of which table to process next. - let index: Var = builder.eval(C::N::zero()); - - // The height of the current layer (padded). - let current_height = builder.get(&dimensions, index).height; - - // Reduce all the tables that have the same height to a single root. - let root = reduce_fast::(builder, index, &dimensions, current_height, &opened_values); - let root_ptr = match root { - Array::Fixed(_) => panic!("root is fixed"), - Array::Dyn(ptr, _) => ptr, - }; - - // For each sibling in the proof, reconstruct the root. - let one: Var<_> = builder.eval(C::N::one()); - let left: Ptr = builder.uninit(); - let right: Ptr = builder.uninit(); - builder.range(0, proof.len()).for_each(|i, builder| { - let sibling = builder.get_ptr(proof, i); - let bit = builder.get(&index_bits, i); - - builder.if_eq(bit, C::N::one()).then_or_else( - |builder| { - builder.assign(left, sibling); - builder.assign(right, root_ptr); - }, - |builder| { - builder.assign(left, root_ptr); - builder.assign(right, sibling); - }, - ); - - builder.poseidon2_compress_x( - &mut Array::Dyn(root_ptr, Usize::Const(0)), - &Array::Dyn(left, Usize::Const(0)), - &Array::Dyn(right, Usize::Const(0)), - ); - builder.assign(current_height, current_height * (C::N::two().inverse())); - - builder.if_ne(index, dimensions.len()).then(|builder| { - let next_height = builder.get(&dimensions, index).height; - builder.if_eq(next_height, current_height).then(|builder| { - let next_height_openings_digest = reduce_fast::( - builder, - index, - &dimensions, - current_height, - &opened_values, - ); - builder.poseidon2_compress_x( - &mut root.clone(), - &root.clone(), - &next_height_openings_digest, - ); - }); - }) - }); - - // Assert that the commitments match. - for i in 0..DIGEST_SIZE { - let e1 = builder.get(commit, i); - let e2 = builder.get(&root, i); - builder.assert_felt_eq(e1, e2); - } - builder.cycle_tracker("verify-batch"); -} - -#[allow(clippy::type_complexity)] -pub fn reduce_fast( - builder: &mut Builder, - dim_idx: Var, - dims: &Array>, - curr_height_padded: Var, - opened_values: &Array>>, -) -> Array> { - builder.cycle_tracker("verify-batch-reduce-fast"); - let nb_opened_values: Var<_> = builder.eval(C::N::zero()); - let mut nested_opened_values: Array<_, Array<_, Ext<_, _>>> = builder.dyn_array(8192); - let start_dim_idx: Var<_> = builder.eval(dim_idx); - builder.cycle_tracker("verify-batch-reduce-fast-setup"); - builder.range(start_dim_idx, dims.len()).for_each(|i, builder| { - let height = builder.get(dims, i).height; - builder.if_eq(height, curr_height_padded).then(|builder| { - let opened_values = builder.get(opened_values, i); - builder.set_value(&mut nested_opened_values, nb_opened_values, opened_values.clone()); - builder.assign(nb_opened_values, nb_opened_values + C::N::one()); - builder.assign(dim_idx, dim_idx + C::N::one()); - }); - }); - builder.cycle_tracker("verify-batch-reduce-fast-setup"); - - let h = if D == 1 { - let nested_opened_values = match nested_opened_values { - Array::Dyn(ptr, len) => Array::Dyn(ptr, len), - _ => unreachable!(), - }; - nested_opened_values.truncate(builder, Usize::Var(nb_opened_values)); - builder.poseidon2_hash_x(&nested_opened_values) - } else { - nested_opened_values.truncate(builder, Usize::Var(nb_opened_values)); - builder.poseidon2_hash_ext(&nested_opened_values) - }; - builder.cycle_tracker("verify-batch-reduce-fast"); - h -} diff --git a/crates/recursion/program/src/fri/two_adic_pcs.rs b/crates/recursion/program/src/fri/two_adic_pcs.rs deleted file mode 100644 index 10f49aba42..0000000000 --- a/crates/recursion/program/src/fri/two_adic_pcs.rs +++ /dev/null @@ -1,383 +0,0 @@ -use p3_commit::TwoAdicMultiplicativeCoset; -use p3_field::{AbstractField, TwoAdicField}; -use p3_symmetric::Hash; -use sp1_primitives::types::RecursionProgramType; -use sp1_recursion_compiler::prelude::*; -use sp1_recursion_core::runtime::DIGEST_SIZE; - -use super::{ - types::{ - DigestVariable, DimensionsVariable, FriConfigVariable, TwoAdicPcsMatsVariable, - TwoAdicPcsProofVariable, TwoAdicPcsRoundVariable, - }, - verify_batch, verify_challenges, verify_shape_and_sample_challenges, - TwoAdicMultiplicativeCosetVariable, -}; -use crate::{ - challenger::{DuplexChallengerVariable, FeltChallenger}, - commit::PcsVariable, -}; - -pub fn verify_two_adic_pcs( - builder: &mut Builder, - config: &FriConfigVariable, - rounds: Array>, - proof: TwoAdicPcsProofVariable, - challenger: &mut DuplexChallengerVariable, -) where - C::F: TwoAdicField, - C::EF: TwoAdicField, -{ - let mut input_ptr = builder.array::>(1); - let g = builder.generator(); - - let log_blowup = config.log_blowup; - let blowup = config.blowup; - let alpha = challenger.sample_ext(builder); - - builder.cycle_tracker("stage-d-1-verify-shape-and-sample-challenges"); - let fri_challenges = - verify_shape_and_sample_challenges(builder, config, &proof.fri_proof, challenger); - builder.cycle_tracker("stage-d-1-verify-shape-and-sample-challenges"); - - let commit_phase_commits_len = proof.fri_proof.commit_phase_commits.len().materialize(builder); - let log_global_max_height: Var<_> = builder.eval(commit_phase_commits_len + log_blowup); - - let mut reduced_openings: Array>> = - builder.array(proof.query_openings.len()); - - builder.cycle_tracker("stage-d-2-fri-fold"); - builder.range(0, proof.query_openings.len()).for_each(|i, builder| { - let query_opening = builder.get(&proof.query_openings, i); - let index_bits = builder.get(&fri_challenges.query_indices, i); - - let mut ro: Array> = builder.array(32); - let mut alpha_pow: Array> = builder.array(32); - let zero_ef = builder.eval(C::EF::zero().cons()); - for j in 0..32 { - builder.set_value(&mut ro, j, zero_ef); - } - let one_ef = builder.eval(C::EF::one().cons()); - for j in 0..32 { - builder.set_value(&mut alpha_pow, j, one_ef); - } - - builder.range(0, rounds.len()).for_each(|j, builder| { - let batch_opening = builder.get(&query_opening, j); - let round = builder.get(&rounds, j); - let batch_commit = round.batch_commit; - let mats = round.mats; - - let mut batch_heights_log2: Array> = builder.array(mats.len()); - builder.range(0, mats.len()).for_each(|k, builder| { - let mat = builder.get(&mats, k); - let height_log2: Var<_> = builder.eval(mat.domain.log_n + log_blowup); - builder.set_value(&mut batch_heights_log2, k, height_log2); - }); - let mut batch_dims: Array> = builder.array(mats.len()); - builder.range(0, mats.len()).for_each(|k, builder| { - let mat = builder.get(&mats, k); - let dim = - DimensionsVariable:: { height: builder.eval(mat.domain.size() * blowup) }; - builder.set_value(&mut batch_dims, k, dim); - }); - - let log_batch_max_height = builder.get(&batch_heights_log2, 0); - let bits_reduced: Var<_> = builder.eval(log_global_max_height - log_batch_max_height); - let index_bits_shifted_v1 = index_bits.shift(builder, bits_reduced); - verify_batch::( - builder, - &batch_commit, - batch_dims, - index_bits_shifted_v1, - batch_opening.opened_values.clone(), - &batch_opening.opening_proof, - ); - - builder.range(0, batch_opening.opened_values.len()).for_each(|k, builder| { - let mat_opening = builder.get(&batch_opening.opened_values, k); - let mat = builder.get(&mats, k); - let mat_points = mat.points; - let mat_values = mat.values; - - let log2_domain_size = mat.domain.log_n; - let log_height: Var = builder.eval(log2_domain_size + log_blowup); - - let bits_reduced: Var = builder.eval(log_global_max_height - log_height); - let index_bits_shifted = index_bits.shift(builder, bits_reduced); - - let two_adic_generator = config.get_two_adic_generator(builder, log_height); - builder.cycle_tracker("exp_reverse_bits_len"); - - let two_adic_generator_exp: Felt = - if matches!(builder.program_type, RecursionProgramType::Wrap) { - builder.exp_reverse_bits_len( - two_adic_generator, - &index_bits_shifted, - log_height, - ) - } else { - builder.exp_reverse_bits_len_fast( - two_adic_generator, - &index_bits_shifted, - log_height, - ) - }; - - builder.cycle_tracker("exp_reverse_bits_len"); - let x: Felt = builder.eval(two_adic_generator_exp * g); - - builder.range(0, mat_points.len()).for_each(|l, builder| { - let z: Ext = builder.get(&mat_points, l); - let ps_at_z = builder.get(&mat_values, l); - let input = FriFoldInput { - z, - alpha, - x, - log_height, - mat_opening: mat_opening.clone(), - ps_at_z: ps_at_z.clone(), - alpha_pow: alpha_pow.clone(), - ro: ro.clone(), - }; - builder.set_value(&mut input_ptr, 0, input); - - let ps_at_z_len = ps_at_z.len().materialize(builder); - builder.push(DslIr::FriFold(ps_at_z_len, input_ptr.clone())); - }); - }); - }); - - builder.set_value(&mut reduced_openings, i, ro); - }); - builder.cycle_tracker("stage-d-2-fri-fold"); - - builder.cycle_tracker("stage-d-3-verify-challenges"); - verify_challenges(builder, config, &proof.fri_proof, &fri_challenges, &reduced_openings); - builder.cycle_tracker("stage-d-3-verify-challenges"); -} - -impl FromConstant for TwoAdicPcsRoundVariable -where - C::F: TwoAdicField, -{ - type Constant = ( - Hash, - Vec<(TwoAdicMultiplicativeCoset, Vec<(C::EF, Vec)>)>, - ); - - fn constant(value: Self::Constant, builder: &mut Builder) -> Self { - let (commit_val, domains_and_openings_val) = value; - - // Allocate the commitment. - let mut commit = builder.dyn_array::>(DIGEST_SIZE); - let commit_val: [C::F; DIGEST_SIZE] = commit_val.into(); - for (i, f) in commit_val.into_iter().enumerate() { - builder.set(&mut commit, i, f); - } - - let mut mats = - builder.dyn_array::>(domains_and_openings_val.len()); - - for (i, (domain, openning)) in domains_and_openings_val.into_iter().enumerate() { - let domain = builder.constant::>(domain); - - let points_val = openning.iter().map(|(p, _)| *p).collect::>(); - let values_val = openning.iter().map(|(_, v)| v.clone()).collect::>(); - let mut points: Array<_, Ext<_, _>> = builder.dyn_array(points_val.len()); - for (j, point) in points_val.into_iter().enumerate() { - let el: Ext<_, _> = builder.eval(point.cons()); - builder.set_value(&mut points, j, el); - } - let mut values: Array<_, Array<_, Ext<_, _>>> = builder.dyn_array(values_val.len()); - for (j, val) in values_val.into_iter().enumerate() { - let mut tmp = builder.dyn_array(val.len()); - for (k, v) in val.into_iter().enumerate() { - let el: Ext<_, _> = builder.eval(v.cons()); - builder.set_value(&mut tmp, k, el); - } - builder.set_value(&mut values, j, tmp); - } - - let mat = TwoAdicPcsMatsVariable { domain, points, values }; - builder.set_value(&mut mats, i, mat); - } - - Self { batch_commit: commit, mats } - } -} - -#[derive(DslVariable, Clone)] -pub struct TwoAdicFriPcsVariable { - pub config: FriConfigVariable, -} - -impl PcsVariable> for TwoAdicFriPcsVariable -where - C::F: TwoAdicField, - C::EF: TwoAdicField, -{ - type Domain = TwoAdicMultiplicativeCosetVariable; - - type Commitment = DigestVariable; - - type Proof = TwoAdicPcsProofVariable; - - fn natural_domain_for_log_degree( - &self, - builder: &mut Builder, - log_degree: Usize, - ) -> Self::Domain { - self.config.get_subgroup(builder, log_degree) - } - - fn verify( - &self, - builder: &mut Builder, - rounds: Array>, - proof: Self::Proof, - challenger: &mut DuplexChallengerVariable, - ) { - verify_two_adic_pcs(builder, &self.config, rounds, proof, challenger) - } -} - -pub mod tests { - - use std::{cmp::Reverse, collections::VecDeque}; - - use crate::{ - challenger::{CanObserveVariable, DuplexChallengerVariable, FeltChallenger}, - commit::PcsVariable, - fri::{ - types::TwoAdicPcsRoundVariable, TwoAdicFriPcsVariable, - TwoAdicMultiplicativeCosetVariable, - }, - hints::Hintable, - utils::const_fri_config, - }; - use itertools::Itertools; - use p3_baby_bear::BabyBear; - use p3_challenger::{CanObserve, FieldChallenger}; - use p3_commit::{Pcs, TwoAdicMultiplicativeCoset}; - use p3_field::AbstractField; - use p3_matrix::dense::RowMajorMatrix; - use rand::rngs::OsRng; - - use sp1_recursion_compiler::{ - config::InnerConfig, - ir::{Array, Builder, Usize, Var}, - }; - use sp1_recursion_core::{ - air::Block, - runtime::{RecursionProgram, DIGEST_SIZE}, - }; - use sp1_stark::{ - baby_bear_poseidon2::compressed_fri_config, inner_perm, InnerChallenge, InnerChallenger, - InnerCompress, InnerDft, InnerHash, InnerPcs, InnerPcsProof, InnerVal, InnerValMmcs, - }; - - pub fn build_test_fri_with_cols_and_log2_rows( - nb_cols: usize, - nb_log2_rows: usize, - ) -> (RecursionProgram, VecDeque>>) { - let mut rng = &mut OsRng; - let log_degrees = &[nb_log2_rows]; - let perm = inner_perm(); - let fri_config = compressed_fri_config(); - let hash = InnerHash::new(perm.clone()); - let compress = InnerCompress::new(perm.clone()); - let val_mmcs = InnerValMmcs::new(hash, compress); - let dft = InnerDft {}; - let pcs_val: InnerPcs = - InnerPcs::new(log_degrees.iter().copied().max().unwrap(), dft, val_mmcs, fri_config); - - // Generate proof. - let domains_and_polys = log_degrees - .iter() - .map(|&d| { - ( - >::natural_domain_for_degree( - &pcs_val, - 1 << d, - ), - RowMajorMatrix::::rand(&mut rng, 1 << d, nb_cols), - ) - }) - .sorted_by_key(|(dom, _)| Reverse(dom.log_n)) - .collect::>(); - let (commit, data) = >::commit( - &pcs_val, - domains_and_polys.clone(), - ); - let mut challenger = InnerChallenger::new(perm.clone()); - challenger.observe(commit); - let zeta = challenger.sample_ext_element::(); - let points = domains_and_polys.iter().map(|_| vec![zeta]).collect::>(); - let (opening, proof) = pcs_val.open(vec![(&data, points)], &mut challenger); - - // Verify proof. - let mut challenger = InnerChallenger::new(perm.clone()); - challenger.observe(commit); - challenger.sample_ext_element::(); - let os: Vec<( - TwoAdicMultiplicativeCoset, - Vec<(InnerChallenge, Vec)>, - )> = domains_and_polys - .iter() - .zip(&opening[0]) - .map(|((domain, _), mat_openings)| (*domain, vec![(zeta, mat_openings[0].clone())])) - .collect(); - pcs_val.verify(vec![(commit, os.clone())], &proof, &mut challenger).unwrap(); - - // Test the recursive Pcs. - let mut builder = Builder::::default(); - let config = const_fri_config(&mut builder, &compressed_fri_config()); - let pcs = TwoAdicFriPcsVariable { config }; - let rounds = - builder.constant::>>(vec![(commit, os.clone())]); - - // Test natural domain for degree. - for log_d_val in log_degrees.iter() { - let log_d: Var<_> = builder.eval(InnerVal::from_canonical_usize(*log_d_val)); - let domain = pcs.natural_domain_for_log_degree(&mut builder, Usize::Var(log_d)); - - let domain_val = - >::natural_domain_for_degree( - &pcs_val, - 1 << log_d_val, - ); - - let expected_domain: TwoAdicMultiplicativeCosetVariable<_> = - builder.constant(domain_val); - - builder.assert_eq::>(domain, expected_domain); - } - - // Test proof verification. - let proofvar = InnerPcsProof::read(&mut builder); - let mut challenger = DuplexChallengerVariable::new(&mut builder); - let commit = <[InnerVal; DIGEST_SIZE]>::from(commit).to_vec(); - let commit = builder.constant::>(commit); - challenger.observe(&mut builder, commit); - challenger.sample_ext(&mut builder); - pcs.verify(&mut builder, rounds, proofvar, &mut challenger); - builder.halt(); - - let program = builder.compile_program(); - let mut witness_stream = VecDeque::new(); - witness_stream.extend(proof.write()); - (program, witness_stream) - } - - #[test] - fn test_two_adic_fri_pcs_single_batch() { - use sp1_recursion_core::stark::utils::{run_test_recursion, TestConfig}; - let (program, witness) = build_test_fri_with_cols_and_log2_rows(10, 16); - - // We don't test with the config TestConfig::WideDeg17Wrap, since it doesn't have the - // `ExpReverseBitsLen` chip. - run_test_recursion(program.clone(), Some(witness.clone()), TestConfig::WideDeg3); - run_test_recursion(program, Some(witness), TestConfig::SkinnyDeg7); - } -} diff --git a/crates/recursion/program/src/fri/types.rs b/crates/recursion/program/src/fri/types.rs deleted file mode 100644 index ca2541a3be..0000000000 --- a/crates/recursion/program/src/fri/types.rs +++ /dev/null @@ -1,71 +0,0 @@ -use sp1_recursion_compiler::prelude::*; - -use crate::fri::TwoAdicMultiplicativeCosetVariable; - -pub type DigestVariable = Array::F>>; - -#[derive(DslVariable, Clone)] -pub struct FriConfigVariable { - pub log_blowup: Var, - pub blowup: Var, - pub num_queries: Var, - pub proof_of_work_bits: Var, - pub generators: Array>, - pub subgroups: Array>, -} - -#[derive(DslVariable, Clone)] -pub struct FriProofVariable { - pub commit_phase_commits: Array>, - pub query_proofs: Array>, - pub final_poly: Ext, - pub pow_witness: Felt, -} - -#[derive(DslVariable, Clone)] -pub struct FriQueryProofVariable { - pub commit_phase_openings: Array>, -} - -#[derive(DslVariable, Clone)] -pub struct FriCommitPhaseProofStepVariable { - pub sibling_value: Ext, - pub opening_proof: Array>, -} - -#[derive(DslVariable, Clone)] -pub struct FriChallengesVariable { - pub query_indices: Array>>, - pub betas: Array>, -} - -#[derive(DslVariable, Clone)] -pub struct DimensionsVariable { - pub height: Var, -} - -#[derive(DslVariable, Clone)] -pub struct TwoAdicPcsProofVariable { - pub fri_proof: FriProofVariable, - pub query_openings: Array>>, -} - -#[derive(DslVariable, Clone)] -pub struct BatchOpeningVariable { - pub opened_values: Array>>, - pub opening_proof: Array>>, -} - -#[derive(DslVariable, Clone)] -pub struct TwoAdicPcsRoundVariable { - pub batch_commit: DigestVariable, - pub mats: Array>, -} - -#[allow(clippy::type_complexity)] -#[derive(DslVariable, Clone)] -pub struct TwoAdicPcsMatsVariable { - pub domain: TwoAdicMultiplicativeCosetVariable, - pub points: Array>, - pub values: Array>>, -} diff --git a/crates/recursion/program/src/hints.rs b/crates/recursion/program/src/hints.rs deleted file mode 100644 index 41ff6e46b5..0000000000 --- a/crates/recursion/program/src/hints.rs +++ /dev/null @@ -1,662 +0,0 @@ -use p3_baby_bear::BabyBear; -use p3_challenger::DuplexChallenger; -use p3_commit::TwoAdicMultiplicativeCoset; -use p3_field::{AbstractExtensionField, AbstractField, TwoAdicField}; - -use sp1_core_machine::riscv::RiscvAir; -use sp1_recursion_compiler::{ - config::InnerConfig, - ir::{Array, Builder, Config, Ext, Felt, MemVariable, Var, Variable}, -}; -use sp1_recursion_core::{air::Block, runtime::PERMUTATION_WIDTH}; -use sp1_stark::{ - air::{MachineAir, PV_DIGEST_NUM_WORDS}, - baby_bear_poseidon2::BabyBearPoseidon2, - AirOpenedValues, ChipOpenedValues, Com, InnerChallenge, InnerDigest, InnerDigestHash, - InnerPcsProof, InnerPerm, InnerVal, ShardCommitment, ShardOpenedValues, StarkGenericConfig, - Word, -}; - -use crate::{ - challenger::DuplexChallengerVariable, - fri::TwoAdicMultiplicativeCosetVariable, - machine::*, - stark::{ShardProofHint, VerifyingKeyHint}, - types::{ - AirOpenedValuesVariable, ChipOpenedValuesVariable, QuotientData, QuotientDataValues, - Sha256DigestVariable, ShardCommitmentVariable, ShardOpenedValuesVariable, - ShardProofVariable, VerifyingKeyVariable, - }, - utils::{get_chip_quotient_data, get_preprocessed_data, get_sorted_indices}, -}; - -pub trait Hintable { - type HintVariable: Variable; - - fn read(builder: &mut Builder) -> Self::HintVariable; - - fn write(&self) -> Vec>>; - - fn witness(variable: &Self::HintVariable, builder: &mut Builder) { - let target = Self::read(builder); - builder.assign(variable.clone(), target); - } -} - -type C = InnerConfig; - -impl Hintable for usize { - type HintVariable = Var; - - fn read(builder: &mut Builder) -> Self::HintVariable { - builder.hint_var() - } - - fn write(&self) -> Vec>> { - vec![vec![Block::from(InnerVal::from_canonical_usize(*self))]] - } -} - -impl Hintable for InnerVal { - type HintVariable = Felt; - - fn read(builder: &mut Builder) -> Self::HintVariable { - builder.hint_felt() - } - - fn write(&self) -> Vec::F>>> { - vec![vec![Block::from(*self)]] - } -} - -impl Hintable for InnerChallenge { - type HintVariable = Ext; - - fn read(builder: &mut Builder) -> Self::HintVariable { - builder.hint_ext() - } - - fn write(&self) -> Vec::F>>> { - vec![vec![Block::from((*self).as_base_slice())]] - } -} - -impl Hintable for [Word; PV_DIGEST_NUM_WORDS] { - type HintVariable = Sha256DigestVariable; - - fn read(builder: &mut Builder) -> Self::HintVariable { - let bytes = builder.hint_felts(); - Sha256DigestVariable { bytes } - } - - fn write(&self) -> Vec::F>>> { - vec![self.iter().flat_map(|w| w.0.iter().map(|f| Block::from(*f))).collect::>()] - } -} - -impl Hintable for QuotientDataValues { - type HintVariable = QuotientData; - - fn read(builder: &mut Builder) -> Self::HintVariable { - let log_quotient_degree = usize::read(builder); - let quotient_size = usize::read(builder); - - QuotientData { log_quotient_degree, quotient_size } - } - - fn write(&self) -> Vec::F>>> { - let mut buffer = Vec::new(); - buffer.extend(usize::write(&self.log_quotient_degree)); - buffer.extend(usize::write(&self.quotient_size)); - - buffer - } -} - -impl Hintable for TwoAdicMultiplicativeCoset { - type HintVariable = TwoAdicMultiplicativeCosetVariable; - - fn read(builder: &mut Builder) -> Self::HintVariable { - let log_n = usize::read(builder); - let shift = InnerVal::read(builder); - let g_val = InnerVal::read(builder); - let size = usize::read(builder); - - // Initialize a domain. - TwoAdicMultiplicativeCosetVariable:: { log_n, size, shift, g: g_val } - } - - fn write(&self) -> Vec::F>>> { - let mut vec = Vec::new(); - vec.extend(usize::write(&self.log_n)); - vec.extend(InnerVal::write(&self.shift)); - vec.extend(InnerVal::write(&InnerVal::two_adic_generator(self.log_n))); - vec.extend(usize::write(&(1usize << (self.log_n)))); - vec - } -} - -trait VecAutoHintable: Hintable {} - -impl<'a, A: MachineAir> VecAutoHintable for ShardProofHint<'a, BabyBearPoseidon2, A> {} -impl VecAutoHintable for TwoAdicMultiplicativeCoset {} -impl VecAutoHintable for Vec {} -impl VecAutoHintable for QuotientDataValues {} -impl VecAutoHintable for Vec {} -impl VecAutoHintable for Vec {} - -impl> VecAutoHintable for &I {} - -impl> Hintable for &H { - type HintVariable = H::HintVariable; - - fn read(builder: &mut Builder) -> Self::HintVariable { - H::read(builder) - } - - fn write(&self) -> Vec::F>>> { - H::write(self) - } -} - -impl> Hintable for Vec -where - >::HintVariable: MemVariable, -{ - type HintVariable = Array; - - fn read(builder: &mut Builder) -> Self::HintVariable { - let len = builder.hint_var(); - let mut arr = builder.dyn_array(len); - builder.range(0, len).for_each(|i, builder| { - let hint = I::read(builder); - builder.set(&mut arr, i, hint); - }); - arr - } - - fn write(&self) -> Vec::F>>> { - let mut stream = Vec::new(); - - let len = InnerVal::from_canonical_usize(self.len()); - stream.push(vec![len.into()]); - - self.iter().for_each(|i| { - let comm = I::write(i); - stream.extend(comm); - }); - - stream - } -} - -impl Hintable for Vec { - type HintVariable = Array>; - - fn read(builder: &mut Builder) -> Self::HintVariable { - builder.hint_vars() - } - - fn write(&self) -> Vec>> { - vec![self.iter().map(|x| Block::from(InnerVal::from_canonical_usize(*x))).collect()] - } -} - -impl Hintable for Vec { - type HintVariable = Array>; - - fn read(builder: &mut Builder) -> Self::HintVariable { - builder.hint_felts() - } - - fn write(&self) -> Vec::F>>> { - vec![self.iter().map(|x| Block::from(*x)).collect()] - } -} - -impl Hintable for Vec { - type HintVariable = Array>; - - fn read(builder: &mut Builder) -> Self::HintVariable { - builder.hint_exts() - } - - fn write(&self) -> Vec::F>>> { - vec![self.iter().map(|x| Block::from((*x).as_base_slice())).collect()] - } -} - -impl Hintable for AirOpenedValues { - type HintVariable = AirOpenedValuesVariable; - - fn read(builder: &mut Builder) -> Self::HintVariable { - let local = Vec::::read(builder); - let next = Vec::::read(builder); - AirOpenedValuesVariable { local, next } - } - - fn write(&self) -> Vec::F>>> { - let mut stream = Vec::new(); - stream.extend(self.local.write()); - stream.extend(self.next.write()); - stream - } -} - -impl Hintable for Vec> { - type HintVariable = Array>>; - - fn read(builder: &mut Builder) -> Self::HintVariable { - let len = builder.hint_var(); - let mut arr = builder.dyn_array(len); - builder.range(0, len).for_each(|i, builder| { - let hint = Vec::::read(builder); - builder.set(&mut arr, i, hint); - }); - arr - } - - fn write(&self) -> Vec::F>>> { - let mut stream = Vec::new(); - - let len = InnerVal::from_canonical_usize(self.len()); - stream.push(vec![len.into()]); - - self.iter().for_each(|arr| { - let comm = Vec::::write(arr); - stream.extend(comm); - }); - - stream - } -} - -impl Hintable for ChipOpenedValues { - type HintVariable = ChipOpenedValuesVariable; - - fn read(builder: &mut Builder) -> Self::HintVariable { - let preprocessed = AirOpenedValues::::read(builder); - let main = AirOpenedValues::::read(builder); - let permutation = AirOpenedValues::::read(builder); - let quotient = Vec::>::read(builder); - let cumulative_sum = InnerChallenge::read(builder); - let log_degree = builder.hint_var(); - ChipOpenedValuesVariable { - preprocessed, - main, - permutation, - quotient, - cumulative_sum, - log_degree, - } - } - - fn write(&self) -> Vec::F>>> { - let mut stream = Vec::new(); - stream.extend(self.preprocessed.write()); - stream.extend(self.main.write()); - stream.extend(self.permutation.write()); - stream.extend(self.quotient.write()); - stream.extend(self.cumulative_sum.write()); - stream.extend(self.log_degree.write()); - stream - } -} - -impl Hintable for Vec> { - type HintVariable = Array>; - - fn read(builder: &mut Builder) -> Self::HintVariable { - let len = builder.hint_var(); - let mut arr = builder.dyn_array(len); - builder.range(0, len).for_each(|i, builder| { - let hint = ChipOpenedValues::::read(builder); - builder.set(&mut arr, i, hint); - }); - arr - } - - fn write(&self) -> Vec::F>>> { - let mut stream = Vec::new(); - - let len = InnerVal::from_canonical_usize(self.len()); - stream.push(vec![len.into()]); - - self.iter().for_each(|arr| { - let comm = ChipOpenedValues::::write(arr); - stream.extend(comm); - }); - - stream - } -} - -impl Hintable for ShardOpenedValues { - type HintVariable = ShardOpenedValuesVariable; - - fn read(builder: &mut Builder) -> Self::HintVariable { - let chips = Vec::>::read(builder); - ShardOpenedValuesVariable { chips } - } - - fn write(&self) -> Vec::F>>> { - let mut stream = Vec::new(); - stream.extend(self.chips.write()); - stream - } -} - -impl Hintable for ShardCommitment { - type HintVariable = ShardCommitmentVariable; - - fn read(builder: &mut Builder) -> Self::HintVariable { - let main_commit = InnerDigest::read(builder); - let permutation_commit = InnerDigest::read(builder); - let quotient_commit = InnerDigest::read(builder); - ShardCommitmentVariable { main_commit, permutation_commit, quotient_commit } - } - - fn write(&self) -> Vec::F>>> { - let mut stream = Vec::new(); - let h: InnerDigest = self.main_commit.into(); - stream.extend(h.write()); - let h: InnerDigest = self.permutation_commit.into(); - stream.extend(h.write()); - let h: InnerDigest = self.quotient_commit.into(); - stream.extend(h.write()); - stream - } -} - -impl Hintable for DuplexChallenger { - type HintVariable = DuplexChallengerVariable; - - fn read(builder: &mut Builder) -> Self::HintVariable { - let sponge_state = builder.hint_felts(); - let nb_inputs = builder.hint_var(); - let input_buffer = builder.hint_felts(); - let nb_outputs = builder.hint_var(); - let output_buffer = builder.hint_felts(); - DuplexChallengerVariable { - sponge_state, - nb_inputs, - input_buffer, - nb_outputs, - output_buffer, - } - } - - fn write(&self) -> Vec::F>>> { - let mut stream = Vec::new(); - stream.extend(self.sponge_state.to_vec().write()); - stream.extend(self.input_buffer.len().write()); - let mut input_padded = self.input_buffer.to_vec(); - input_padded.resize(PERMUTATION_WIDTH, InnerVal::zero()); - stream.extend(input_padded.write()); - stream.extend(self.output_buffer.len().write()); - let mut output_padded = self.output_buffer.to_vec(); - output_padded.resize(PERMUTATION_WIDTH, InnerVal::zero()); - stream.extend(output_padded.write()); - stream - } -} - -impl< - 'a, - SC: StarkGenericConfig< - Pcs = ::Pcs, - Challenge = ::Challenge, - Challenger = ::Challenger, - >, - A: MachineAir, - > Hintable for VerifyingKeyHint<'a, SC, A> -{ - type HintVariable = VerifyingKeyVariable; - - fn read(builder: &mut Builder) -> Self::HintVariable { - let commitment = InnerDigest::read(builder); - let pc_start = InnerVal::read(builder); - let preprocessed_sorted_idxs = Vec::::read(builder); - let prep_domains = Vec::>::read(builder); - VerifyingKeyVariable { commitment, pc_start, preprocessed_sorted_idxs, prep_domains } - } - - fn write(&self) -> Vec::F>>> { - let (preprocessed_sorted_idxs, prep_domains) = get_preprocessed_data(self.machine, self.vk); - - let mut stream = Vec::new(); - let h: InnerDigest = self.vk.commit.into(); - stream.extend(h.write()); - stream.extend(self.vk.pc_start.write()); - stream.extend(preprocessed_sorted_idxs.write()); - stream.extend(prep_domains.write()); - stream - } -} - -// Implement Hintable for ShardProof where SC is equivalent to BabyBearPoseidon2 -impl< - 'a, - SC: StarkGenericConfig< - Pcs = ::Pcs, - Challenge = ::Challenge, - Challenger = ::Challenger, - >, - A: MachineAir, - > Hintable for ShardProofHint<'a, SC, A> -where - ShardCommitment>: Hintable, -{ - type HintVariable = ShardProofVariable; - - fn read(builder: &mut Builder) -> Self::HintVariable { - let commitment = ShardCommitment::read(builder); - let opened_values = ShardOpenedValues::read(builder); - let opening_proof = InnerPcsProof::read(builder); - let public_values = Vec::::read(builder); - let quotient_data = Vec::::read(builder); - let sorted_idxs = Vec::::read(builder); - ShardProofVariable { - commitment, - opened_values, - opening_proof, - public_values, - quotient_data, - sorted_idxs, - } - } - - fn write(&self) -> Vec::F>>> { - let quotient_data = get_chip_quotient_data(self.machine, self.proof); - let sorted_indices = get_sorted_indices(self.machine, self.proof); - - [ - self.proof.commitment.write(), - self.proof.opened_values.write(), - self.proof.opening_proof.write(), - self.proof.public_values.write(), - quotient_data.write(), - sorted_indices.write(), - ] - .concat() - } -} - -impl<'a, A: MachineAir> Hintable - for SP1RecursionMemoryLayout<'a, BabyBearPoseidon2, A> -{ - type HintVariable = SP1RecursionMemoryLayoutVariable; - - fn read(builder: &mut Builder) -> Self::HintVariable { - let vk = VerifyingKeyHint::<'a, BabyBearPoseidon2, A>::read(builder); - let shard_proofs = Vec::>::read(builder); - let leaf_challenger = DuplexChallenger::::read(builder); - let initial_reconstruct_challenger = - DuplexChallenger::::read(builder); - let is_complete = builder.hint_var(); - - SP1RecursionMemoryLayoutVariable { - vk, - shard_proofs, - leaf_challenger, - initial_reconstruct_challenger, - is_complete, - } - } - - fn write(&self) -> Vec::F>>> { - let mut stream = Vec::new(); - - let vk_hint = VerifyingKeyHint::<'a, BabyBearPoseidon2, _>::new(self.machine, self.vk); - - let proof_hints = self - .shard_proofs - .iter() - .map(|proof| ShardProofHint::::new(self.machine, proof)) - .collect::>(); - - stream.extend(vk_hint.write()); - stream.extend(proof_hints.write()); - stream.extend(self.leaf_challenger.write()); - stream.extend(self.initial_reconstruct_challenger.write()); - stream.extend((self.is_complete as usize).write()); - - stream - } -} - -impl<'a, A: MachineAir> Hintable - for SP1CompressMemoryLayout<'a, BabyBearPoseidon2, A> -{ - type HintVariable = SP1CompressMemoryLayoutVariable; - - fn read(builder: &mut Builder) -> Self::HintVariable { - let compress_vk = VerifyingKeyHint::<'a, BabyBearPoseidon2, A>::read(builder); - let shard_proofs = Vec::>::read(builder); - let kinds = Vec::::read(builder); - let is_complete = builder.hint_var(); - - SP1CompressMemoryLayoutVariable { compress_vk, shard_proofs, kinds, is_complete } - } - - fn write(&self) -> Vec::F>>> { - let mut stream = Vec::new(); - - let compress_vk_hint = VerifyingKeyHint::<'a, BabyBearPoseidon2, _>::new( - self.recursive_machine, - self.compress_vk, - ); - - let proof_hints = self - .shard_proofs - .iter() - .map(|proof| ShardProofHint::::new(self.recursive_machine, proof)) - .collect::>(); - - let kinds = self.kinds.iter().map(|k| *k as usize).collect::>(); - - stream.extend(compress_vk_hint.write()); - stream.extend(proof_hints.write()); - stream.extend(kinds.write()); - stream.extend((self.is_complete as usize).write()); - - stream - } -} - -impl<'a, A: MachineAir> Hintable for SP1RootMemoryLayout<'a, BabyBearPoseidon2, A> { - type HintVariable = SP1RootMemoryLayoutVariable; - - fn read(builder: &mut Builder) -> Self::HintVariable { - let proof = ShardProofHint::<'a, BabyBearPoseidon2, A>::read(builder); - let is_reduce = builder.hint_var(); - - SP1RootMemoryLayoutVariable { proof, is_reduce } - } - - fn write(&self) -> Vec::F>>> { - let mut stream = Vec::new(); - - let proof_hint = ShardProofHint::::new(self.machine, &self.proof); - - stream.extend(proof_hint.write()); - stream.extend((self.is_reduce as usize).write()); - - stream - } -} - -impl<'a, A: MachineAir> Hintable - for SP1DeferredMemoryLayout<'a, BabyBearPoseidon2, A> -{ - type HintVariable = SP1DeferredMemoryLayoutVariable; - - fn read(builder: &mut Builder) -> Self::HintVariable { - let compress_vk = VerifyingKeyHint::<'a, BabyBearPoseidon2, A>::read(builder); - let proofs = Vec::>::read(builder); - let start_reconstruct_deferred_digest = Vec::::read(builder); - let is_complete = builder.hint_var(); - - let sp1_vk = VerifyingKeyHint::<'a, BabyBearPoseidon2, RiscvAir<_>>::read(builder); - let committed_value_digest = Vec::>::read(builder); - let deferred_proofs_digest = Vec::::read(builder); - let leaf_challenger = DuplexChallenger::::read(builder); - let end_pc = InnerVal::read(builder); - let end_shard = InnerVal::read(builder); - let end_execution_shard = InnerVal::read(builder); - let init_addr_bits = Vec::::read(builder); - let finalize_addr_bits = Vec::::read(builder); - - SP1DeferredMemoryLayoutVariable { - compress_vk, - proofs, - start_reconstruct_deferred_digest, - is_complete, - sp1_vk, - committed_value_digest, - deferred_proofs_digest, - leaf_challenger, - end_pc, - end_shard, - end_execution_shard, - init_addr_bits, - finalize_addr_bits, - } - } - - fn write(&self) -> Vec::F>>> { - let mut stream = Vec::new(); - - let sp1_vk_hint = - VerifyingKeyHint::<'a, BabyBearPoseidon2, _>::new(self.sp1_machine, self.sp1_vk); - - let compress_vk_hint = - VerifyingKeyHint::<'a, BabyBearPoseidon2, _>::new(self.machine, self.compress_vk); - - let proof_hints = self - .proofs - .iter() - .map(|proof| ShardProofHint::::new(self.machine, proof)) - .collect::>(); - - let committed_value_digest = - self.committed_value_digest.iter().map(|w| w.0.to_vec()).collect::>(); - - stream.extend(compress_vk_hint.write()); - stream.extend(proof_hints.write()); - stream.extend(self.start_reconstruct_deferred_digest.write()); - stream.extend((self.is_complete as usize).write()); - - stream.extend(sp1_vk_hint.write()); - stream.extend(committed_value_digest.write()); - stream.extend(self.deferred_proofs_digest.write()); - stream.extend(self.leaf_challenger.write()); - stream.extend(self.end_pc.write()); - stream.extend(self.end_shard.write()); - stream.extend(self.end_execution_shard.write()); - stream.extend(self.init_addr_bits.to_vec().write()); - stream.extend(self.finalize_addr_bits.to_vec().write()); - - stream - } -} diff --git a/crates/recursion/program/src/lib.rs b/crates/recursion/program/src/lib.rs deleted file mode 100644 index dacc8c88c3..0000000000 --- a/crates/recursion/program/src/lib.rs +++ /dev/null @@ -1,13 +0,0 @@ -#![allow(type_alias_bounds)] -#![allow(clippy::type_complexity)] -#![allow(clippy::too_many_arguments)] - -pub mod challenger; -pub mod commit; -pub mod constraints; -pub mod fri; -pub mod hints; -pub mod machine; -pub mod stark; -pub mod types; -pub mod utils; diff --git a/crates/recursion/program/src/machine/compress.rs b/crates/recursion/program/src/machine/compress.rs deleted file mode 100644 index 75b57378ac..0000000000 --- a/crates/recursion/program/src/machine/compress.rs +++ /dev/null @@ -1,576 +0,0 @@ -use std::{ - array, - borrow::{Borrow, BorrowMut}, - marker::PhantomData, -}; - -use crate::machine::utils::assert_complete; -use itertools::{izip, Itertools}; -use p3_air::Air; -use p3_baby_bear::BabyBear; -use p3_commit::TwoAdicMultiplicativeCoset; -use p3_field::{AbstractField, PrimeField32, TwoAdicField}; -use serde::{Deserialize, Serialize}; -use sp1_primitives::{consts::WORD_SIZE, types::RecursionProgramType}; -use sp1_recursion_compiler::{ - config::InnerConfig, - ir::{Array, Builder, Config, Felt, Var}, - prelude::DslVariable, -}; -use sp1_recursion_core::{ - air::{RecursionPublicValues, RECURSIVE_PROOF_NUM_PV_ELTS}, - runtime::{RecursionProgram, D, DIGEST_SIZE}, -}; -use sp1_stark::{ - baby_bear_poseidon2::BabyBearPoseidon2, Com, ShardProof, StarkMachine, StarkVerifyingKey, Word, -}; - -use sp1_recursion_compiler::prelude::*; -use sp1_stark::{ - air::{MachineAir, POSEIDON_NUM_WORDS, PV_DIGEST_NUM_WORDS}, - StarkGenericConfig, -}; - -use crate::{ - challenger::{CanObserveVariable, DuplexChallengerVariable}, - fri::TwoAdicFriPcsVariable, - hints::Hintable, - stark::{RecursiveVerifierConstraintFolder, StarkVerifier}, - types::{ShardProofVariable, VerifyingKeyVariable}, - utils::{ - assert_challenger_eq_pv, assign_challenger_from_pv, const_fri_config, felt2var, - get_challenger_public_values, hash_vkey, - }, -}; - -use super::utils::{commit_public_values, proof_data_from_vk, verify_public_values_hash}; - -/// A program to verify a batch of recursive proofs and aggregate their public values. -#[derive(Debug, Clone, Copy)] -pub struct SP1CompressVerifier { - _phantom: PhantomData<(C, SC, A)>, -} - -/// The different types of programs that can be verified by the `SP1ReduceVerifier`. -#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)] -pub enum ReduceProgramType { - /// A batch of proofs that are all SP1 Core proofs. - Core = 0, - /// A batch of proofs that are all deferred proofs. - Deferred = 1, - /// A batch of proofs that are reduce proofs of a higher level in the recursion tree. - Reduce = 2, -} - -/// An input layout for the reduce verifier. -pub struct SP1CompressMemoryLayout<'a, SC: StarkGenericConfig, A: MachineAir> { - pub compress_vk: &'a StarkVerifyingKey, - pub recursive_machine: &'a StarkMachine, - pub shard_proofs: Vec>, - pub is_complete: bool, - pub kinds: Vec, -} - -#[derive(DslVariable, Clone)] -pub struct SP1CompressMemoryLayoutVariable { - pub compress_vk: VerifyingKeyVariable, - pub shard_proofs: Array>, - pub kinds: Array>, - pub is_complete: Var, -} - -impl SP1CompressVerifier -where - A: MachineAir + for<'a> Air>, -{ - /// Create a new instance of the program for the [BabyBearPoseidon2] config. - pub fn build( - machine: &StarkMachine, - recursive_vk: &StarkVerifyingKey, - deferred_vk: &StarkVerifyingKey, - ) -> RecursionProgram { - let mut builder = Builder::::new(RecursionProgramType::Compress); - - let input: SP1CompressMemoryLayoutVariable<_> = builder.uninit(); - SP1CompressMemoryLayout::::witness(&input, &mut builder); - - let pcs = TwoAdicFriPcsVariable { - config: const_fri_config(&mut builder, machine.config().pcs().fri_config()), - }; - SP1CompressVerifier::verify(&mut builder, &pcs, machine, input, recursive_vk, deferred_vk); - - builder.halt(); - - builder.compile_program() - } -} - -impl SP1CompressVerifier -where - C::F: PrimeField32 + TwoAdicField, - SC: StarkGenericConfig< - Val = C::F, - Challenge = C::EF, - Domain = TwoAdicMultiplicativeCoset, - >, - A: MachineAir + for<'a> Air>, - Com: Into<[SC::Val; DIGEST_SIZE]>, -{ - /// Verify a batch of recursive proofs and aggregate their public values. - /// - /// The compression verifier can aggregate proofs of different kinds: - /// - Core proofs: proofs which are recursive proof of a batch of SP1 shard proofs. The - /// implementation in this function assumes a fixed recursive verifier speicified by - /// `recursive_vk`. - /// - Deferred proofs: proofs which are recursive proof of a batch of deferred proofs. The - /// implementation in this function assumes a fixed deferred verification program specified by - /// `deferred_vk`. - /// - Compress proofs: these are proofs which refer to a prove of this program. The key for it - /// is part of public values will be propagated accross all levels of recursion and will be - /// checked against itself as in [sp1_prover::Prover] or as in [super::SP1RootVerifier]. - pub fn verify( - builder: &mut Builder, - pcs: &TwoAdicFriPcsVariable, - machine: &StarkMachine, - input: SP1CompressMemoryLayoutVariable, - recursive_vk: &StarkVerifyingKey, - deferred_vk: &StarkVerifyingKey, - ) { - let SP1CompressMemoryLayoutVariable { compress_vk, shard_proofs, kinds, is_complete } = - input; - - // Initialize the values for the aggregated public output. - - let mut reduce_public_values_stream: Vec> = - (0..RECURSIVE_PROOF_NUM_PV_ELTS).map(|_| builder.uninit()).collect(); - let reduce_public_values: &mut RecursionPublicValues<_> = - reduce_public_values_stream.as_mut_slice().borrow_mut(); - - // Compute the digest of compress_vk and input the value to the public values. - let compress_vk_digest = hash_vkey(builder, &compress_vk); - - reduce_public_values.compress_vk_digest = - array::from_fn(|i| builder.get(&compress_vk_digest, i)); - - // Assert that there is at least one proof. - builder.assert_usize_ne(shard_proofs.len(), 0); - - // Assert that the number of proofs is equal to the number of kinds. - builder.assert_usize_eq(shard_proofs.len(), kinds.len()); - - // Initialize the consistency check variables. - let sp1_vk_digest: [Felt<_>; DIGEST_SIZE] = array::from_fn(|_| builder.uninit()); - let pc: Felt<_> = builder.uninit(); - let shard: Felt<_> = builder.uninit(); - let execution_shard: Felt<_> = builder.uninit(); - let mut initial_reconstruct_challenger = DuplexChallengerVariable::new(builder); - let mut reconstruct_challenger = DuplexChallengerVariable::new(builder); - let mut leaf_challenger = DuplexChallengerVariable::new(builder); - let committed_value_digest: [Word>; PV_DIGEST_NUM_WORDS] = - array::from_fn(|_| Word(array::from_fn(|_| builder.uninit()))); - let deferred_proofs_digest: [Felt<_>; POSEIDON_NUM_WORDS] = - array::from_fn(|_| builder.uninit()); - let reconstruct_deferred_digest: [Felt<_>; POSEIDON_NUM_WORDS] = - core::array::from_fn(|_| builder.uninit()); - let cumulative_sum: [Felt<_>; D] = core::array::from_fn(|_| builder.eval(C::F::zero())); - let init_addr_bits: [Felt<_>; 32] = core::array::from_fn(|_| builder.uninit()); - let finalize_addr_bits: [Felt<_>; 32] = core::array::from_fn(|_| builder.uninit()); - - // Collect verifying keys for each kind of program. - let recursive_vk_variable = proof_data_from_vk(builder, recursive_vk, machine); - let deferred_vk_variable = proof_data_from_vk(builder, deferred_vk, machine); - - // Get field values for the proof kind. - let core_kind = C::N::from_canonical_u32(ReduceProgramType::Core as u32); - let deferred_kind = C::N::from_canonical_u32(ReduceProgramType::Deferred as u32); - let reduce_kind = C::N::from_canonical_u32(ReduceProgramType::Reduce as u32); - - // Verify the shard proofs and connect the values. - builder.range(0, shard_proofs.len()).for_each(|i, builder| { - // Load the proof. - let proof = builder.get(&shard_proofs, i); - - // Get the kind of proof we are verifying. - let kind = builder.get(&kinds, i); - - // Verify the shard proof. - - // Initialize values for verifying key and proof data. - let vk: VerifyingKeyVariable<_> = builder.uninit(); - - // Set the correct value given the value of kind, and assert it must be one of the - // valid values. We can do that by nested `if-else` statements. - builder.if_eq(kind, core_kind).then_or_else( - |builder| { - builder.assign(vk.clone(), recursive_vk_variable.clone()); - }, - |builder| { - builder.if_eq(kind, deferred_kind).then_or_else( - |builder| { - builder.assign(vk.clone(), deferred_vk_variable.clone()); - }, - |builder| { - builder.if_eq(kind, reduce_kind).then_or_else( - |builder| { - builder.assign(vk.clone(), compress_vk.clone()); - }, - |builder| { - // If the kind is not one of the valid values, raise an error. - builder.error(); - }, - ); - }, - ); - }, - ); - - // Verify the shard proof given the correct data. - - // Prepare a challenger. - let mut challenger = DuplexChallengerVariable::new(builder); - - // Observe the vk and start pc. - challenger.observe(builder, vk.commitment.clone()); - challenger.observe(builder, vk.pc_start); - - // Observe the main commitment and public values. - challenger.observe(builder, proof.commitment.main_commit.clone()); - for j in 0..machine.num_pv_elts() { - let element = builder.get(&proof.public_values, j); - challenger.observe(builder, element); - } - - // Verify proof. - StarkVerifier::::verify_shard( - builder, - &vk, - pcs, - machine, - &mut challenger, - &proof, - true, - ); - - // Load the public values from the proof. - let current_public_values_elements = (0..RECURSIVE_PROOF_NUM_PV_ELTS) - .map(|i| builder.get(&proof.public_values, i)) - .collect::>>(); - - let current_public_values: &RecursionPublicValues> = - current_public_values_elements.as_slice().borrow(); - - // Check that the public values digest is correct. - verify_public_values_hash(builder, current_public_values); - - // If the proof is the first proof, initialize the values. - builder.if_eq(i, C::N::zero()).then(|builder| { - // Initialize global and accumulated values. - - // Initialize the start of deferred digests. - for (digest, current_digest, global_digest) in izip!( - reconstruct_deferred_digest.iter(), - current_public_values.start_reconstruct_deferred_digest.iter(), - reduce_public_values.start_reconstruct_deferred_digest.iter() - ) { - builder.assign(*digest, *current_digest); - builder.assign(*global_digest, *current_digest); - } - - // Initialize the sp1_vk digest - for (digest, first_digest) in - sp1_vk_digest.iter().zip(current_public_values.sp1_vk_digest) - { - builder.assign(*digest, first_digest); - } - - // Initiallize start pc. - builder.assign(reduce_public_values.start_pc, current_public_values.start_pc); - builder.assign(pc, current_public_values.start_pc); - - // Initialize start shard. - builder.assign(shard, current_public_values.start_shard); - builder.assign(reduce_public_values.start_shard, current_public_values.start_shard); - - // Initialize start execution shard. - builder.assign(execution_shard, current_public_values.start_execution_shard); - builder.assign( - reduce_public_values.start_execution_shard, - current_public_values.start_execution_shard, - ); - - // Initialize the MemoryInitialize address bits. - for (bit, (first_bit, current_bit)) in init_addr_bits.iter().zip( - reduce_public_values - .previous_init_addr_bits - .iter() - .zip(current_public_values.previous_init_addr_bits.iter()), - ) { - builder.assign(*bit, *current_bit); - builder.assign(*first_bit, *current_bit); - } - - // Initialize the MemoryFinalize address bits. - for (bit, (first_bit, current_bit)) in finalize_addr_bits.iter().zip( - reduce_public_values - .previous_finalize_addr_bits - .iter() - .zip(current_public_values.previous_finalize_addr_bits.iter()), - ) { - builder.assign(*bit, *current_bit); - builder.assign(*first_bit, *current_bit); - } - - // Initialize the leaf challenger. - assign_challenger_from_pv( - builder, - &mut leaf_challenger, - current_public_values.leaf_challenger, - ); - - // Initialize the reconstruct challenger. - assign_challenger_from_pv( - builder, - &mut initial_reconstruct_challenger, - current_public_values.start_reconstruct_challenger, - ); - assign_challenger_from_pv( - builder, - &mut reconstruct_challenger, - current_public_values.start_reconstruct_challenger, - ); - - // Assign the commited values and deferred proof digests. - for (word, current_word) in committed_value_digest - .iter() - .zip_eq(current_public_values.committed_value_digest.iter()) - { - for (byte, current_byte) in word.0.iter().zip_eq(current_word.0.iter()) { - builder.assign(*byte, *current_byte); - } - } - - for (digest, current_digest) in deferred_proofs_digest - .iter() - .zip_eq(current_public_values.deferred_proofs_digest.iter()) - { - builder.assign(*digest, *current_digest); - } - }); - - // Assert that the current values match the accumulated values. - - // Assert that the start deferred digest is equal to the current deferred digest. - for (digest, current_digest) in reconstruct_deferred_digest - .iter() - .zip_eq(current_public_values.start_reconstruct_deferred_digest.iter()) - { - builder.assert_felt_eq(*digest, *current_digest); - } - - // Consistency checks for all accumulated values. - - // Assert that the sp1_vk digest is always the same. - for (digest, current) in sp1_vk_digest.iter().zip(current_public_values.sp1_vk_digest) { - builder.assert_felt_eq(*digest, current); - } - - // Assert that the start pc is equal to the current pc. - builder.assert_felt_eq(pc, current_public_values.start_pc); - - // Verify that the shard is equal to the current shard. - builder.assert_felt_eq(shard, current_public_values.start_shard); - - // Verfiy that the exeuction shard is equal to the current execution shard. - builder.assert_felt_eq(execution_shard, current_public_values.start_execution_shard); - - // Assert that the leaf challenger is always the same. - - // Assert that the MemoryInitialize address bits are the same. - for (bit, current_bit) in - init_addr_bits.iter().zip(current_public_values.previous_init_addr_bits.iter()) - { - builder.assert_felt_eq(*bit, *current_bit); - } - - // Assert that the MemoryFinalize address bits are the same. - for (bit, current_bit) in finalize_addr_bits - .iter() - .zip(current_public_values.previous_finalize_addr_bits.iter()) - { - builder.assert_felt_eq(*bit, *current_bit); - } - - assert_challenger_eq_pv( - builder, - &leaf_challenger, - current_public_values.leaf_challenger, - ); - // Assert that the current challenger matches the start reconstruct challenger. - assert_challenger_eq_pv( - builder, - &reconstruct_challenger, - current_public_values.start_reconstruct_challenger, - ); - - // Digest constraints. - { - // If `commited_value_digest` is not zero, then `public_values.commited_value_digest - // should be the current value. - let is_zero: Var<_> = builder.eval(C::N::one()); - #[allow(clippy::needless_range_loop)] - for i in 0..committed_value_digest.len() { - for j in 0..WORD_SIZE { - let d = felt2var(builder, committed_value_digest[i][j]); - builder.if_ne(d, C::N::zero()).then(|builder| { - builder.assign(is_zero, C::N::zero()); - }); - } - } - builder.if_eq(is_zero, C::N::zero()).then(|builder| { - #[allow(clippy::needless_range_loop)] - for i in 0..committed_value_digest.len() { - for j in 0..WORD_SIZE { - builder.assert_felt_eq( - committed_value_digest[i][j], - current_public_values.committed_value_digest[i][j], - ); - } - } - }); - - // Update the committed value digest. - #[allow(clippy::needless_range_loop)] - for i in 0..committed_value_digest.len() { - for j in 0..WORD_SIZE { - builder.assign( - committed_value_digest[i][j], - current_public_values.committed_value_digest[i][j], - ); - } - } - - // If `deferred_proofs_digest` is not zero, then - // `public_values.deferred_proofs_digest should be the current - // value. - let is_zero: Var<_> = builder.eval(C::N::one()); - #[allow(clippy::needless_range_loop)] - for i in 0..deferred_proofs_digest.len() { - let d = felt2var(builder, deferred_proofs_digest[i]); - builder.if_ne(d, C::N::zero()).then(|builder| { - builder.assign(is_zero, C::N::zero()); - }); - } - builder.if_eq(is_zero, C::N::zero()).then(|builder| { - #[allow(clippy::needless_range_loop)] - for i in 0..deferred_proofs_digest.len() { - builder.assert_felt_eq( - deferred_proofs_digest[i], - current_public_values.deferred_proofs_digest[i], - ); - } - }); - - // Update the deferred proofs digest. - #[allow(clippy::needless_range_loop)] - for i in 0..deferred_proofs_digest.len() { - builder.assign( - deferred_proofs_digest[i], - current_public_values.deferred_proofs_digest[i], - ); - } - } - - // Update the deferred proof digest. - for (digest, current_digest) in reconstruct_deferred_digest - .iter() - .zip_eq(current_public_values.end_reconstruct_deferred_digest.iter()) - { - builder.assign(*digest, *current_digest); - } - - // Update the accumulated values. - // Update pc to be the next pc. - builder.assign(pc, current_public_values.next_pc); - - // Update the shard to be the next shard. - builder.assign(shard, current_public_values.next_shard); - - // Update the execution shard to be the next execution shard. - builder.assign(execution_shard, current_public_values.next_execution_shard); - - // Update the MemoryInitialize address bits. - for (bit, next_bit) in - init_addr_bits.iter().zip(current_public_values.last_init_addr_bits.iter()) - { - builder.assign(*bit, *next_bit); - } - - // Update the MemoryFinalize address bits. - for (bit, next_bit) in - finalize_addr_bits.iter().zip(current_public_values.last_finalize_addr_bits.iter()) - { - builder.assign(*bit, *next_bit); - } - - // Update the reconstruct challenger. - assign_challenger_from_pv( - builder, - &mut reconstruct_challenger, - current_public_values.end_reconstruct_challenger, - ); - - // Update the cumulative sum. - for (sum_element, current_sum_element) in - cumulative_sum.iter().zip_eq(current_public_values.cumulative_sum.iter()) - { - builder.assign(*sum_element, *sum_element + *current_sum_element); - } - }); - - // Update the global values from the last accumulated values. - // Set sp1_vk digest to the one from the proof values. - reduce_public_values.sp1_vk_digest = sp1_vk_digest; - // Set next_pc to be the last pc (which is the same as accumulated pc) - reduce_public_values.next_pc = pc; - // Set next shard to be the last shard - reduce_public_values.next_shard = shard; - // Set next execution shard to be the last execution shard - reduce_public_values.next_execution_shard = execution_shard; - // Set the MemoryInitialize address bits to be the last MemoryInitialize address bits. - reduce_public_values.last_init_addr_bits = init_addr_bits; - // Set the MemoryFinalize address bits to be the last MemoryFinalize address bits. - reduce_public_values.last_finalize_addr_bits = finalize_addr_bits; - // Set the leaf challenger to it's value. - let values = get_challenger_public_values(builder, &leaf_challenger); - reduce_public_values.leaf_challenger = values; - // Set the start reconstruct challenger to be the initial reconstruct challenger. - let values = get_challenger_public_values(builder, &initial_reconstruct_challenger); - reduce_public_values.start_reconstruct_challenger = values; - // Set the end reconstruct challenger to be the last reconstruct challenger. - let values = get_challenger_public_values(builder, &reconstruct_challenger); - reduce_public_values.end_reconstruct_challenger = values; - // Set the start reconstruct deferred digest to be the last reconstruct deferred digest. - reduce_public_values.end_reconstruct_deferred_digest = reconstruct_deferred_digest; - // Assign the deferred proof digests. - reduce_public_values.deferred_proofs_digest = deferred_proofs_digest; - // Assign the committed value digests. - reduce_public_values.committed_value_digest = committed_value_digest; - // Assign the cumulative sum. - reduce_public_values.cumulative_sum = cumulative_sum; - - // If the proof is complete, make completeness assertions and set the flag. Otherwise, check - // the flag is zero and set the public value to zero. - builder.if_eq(is_complete, C::N::one()).then_or_else( - |builder| { - builder.assign(reduce_public_values.is_complete, C::F::one()); - assert_complete(builder, reduce_public_values, &reconstruct_challenger) - }, - |builder| { - builder.assert_var_eq(is_complete, C::N::zero()); - builder.assign(reduce_public_values.is_complete, C::F::zero()); - }, - ); - - commit_public_values(builder, reduce_public_values); - } -} diff --git a/crates/recursion/program/src/machine/core.rs b/crates/recursion/program/src/machine/core.rs deleted file mode 100644 index 560f6dcc0d..0000000000 --- a/crates/recursion/program/src/machine/core.rs +++ /dev/null @@ -1,616 +0,0 @@ -use std::{ - array, - borrow::{Borrow, BorrowMut}, - marker::PhantomData, -}; - -use itertools::Itertools; -use p3_baby_bear::BabyBear; -use p3_commit::TwoAdicMultiplicativeCoset; -use p3_field::{AbstractField, PrimeField32, TwoAdicField}; -use sp1_core_machine::{cpu::MAX_CPU_LOG_DEGREE, riscv::RiscvAir}; -use sp1_primitives::{consts::WORD_SIZE, types::RecursionProgramType}; -use sp1_recursion_compiler::{ - config::InnerConfig, - ir::{Array, Builder, Config, Ext, ExtConst, Felt, Var}, - prelude::{DslVariable, *}, -}; -use sp1_recursion_core::{ - air::{RecursionPublicValues, RECURSIVE_PROOF_NUM_PV_ELTS}, - runtime::{RecursionProgram, DIGEST_SIZE}, -}; -use sp1_stark::{ - air::{MachineAir, PublicValues, POSEIDON_NUM_WORDS, PV_DIGEST_NUM_WORDS}, - baby_bear_poseidon2::BabyBearPoseidon2, - Com, ShardProof, StarkGenericConfig, StarkMachine, StarkVerifyingKey, Word, -}; - -use crate::{ - challenger::{CanObserveVariable, DuplexChallengerVariable}, - fri::TwoAdicFriPcsVariable, - hints::Hintable, - stark::{StarkVerifier, EMPTY}, - types::{ShardProofVariable, VerifyingKeyVariable}, - utils::{const_fri_config, felt2var, get_challenger_public_values, hash_vkey, var2felt}, -}; - -use super::utils::{assert_complete, commit_public_values}; - -/// A program for recursively verifying a batch of SP1 proofs. -#[derive(Debug, Clone, Copy)] -pub struct SP1RecursiveVerifier { - _phantom: PhantomData<(C, SC)>, -} - -pub struct SP1RecursionMemoryLayout<'a, SC: StarkGenericConfig, A: MachineAir> { - pub vk: &'a StarkVerifyingKey, - pub machine: &'a StarkMachine, - pub shard_proofs: Vec>, - pub leaf_challenger: &'a SC::Challenger, - pub initial_reconstruct_challenger: SC::Challenger, - pub is_complete: bool, -} - -#[derive(DslVariable, Clone)] -pub struct SP1RecursionMemoryLayoutVariable { - pub vk: VerifyingKeyVariable, - pub shard_proofs: Array>, - pub leaf_challenger: DuplexChallengerVariable, - pub initial_reconstruct_challenger: DuplexChallengerVariable, - pub is_complete: Var, -} - -impl SP1RecursiveVerifier { - /// Create a new instance of the program for the [BabyBearPoseidon2] config. - pub fn build( - machine: &StarkMachine>, - ) -> RecursionProgram { - let mut builder = Builder::::new(RecursionProgramType::Core); - - let input: SP1RecursionMemoryLayoutVariable<_> = builder.uninit(); - SP1RecursionMemoryLayout::>::witness(&input, &mut builder); - - let pcs = TwoAdicFriPcsVariable { - config: const_fri_config(&mut builder, machine.config().pcs().fri_config()), - }; - SP1RecursiveVerifier::verify(&mut builder, &pcs, machine, input); - - builder.halt(); - - builder.compile_program() - } -} - -impl SP1RecursiveVerifier -where - C::F: PrimeField32 + TwoAdicField, - SC: StarkGenericConfig< - Val = C::F, - Challenge = C::EF, - Domain = TwoAdicMultiplicativeCoset, - >, - Com: Into<[SC::Val; DIGEST_SIZE]>, -{ - /// Verify a batch of SP1 shard proofs and aggregate their public values. - /// - /// This program represents a first recursive step in the verification of an SP1 proof - /// consisting of one or more shards. Each shard proof is verified and its public values are - /// aggregated into a single set representing the start and end state of the program execution - /// across all shards. - /// - /// # Constraints - /// - /// ## Verifying the STARK proofs. - /// For each shard, the verifier asserts the correctness of the STARK proof which is composed - /// of verifying the FRI proof for openings and verifying the constraints. - /// - /// ## Aggregating the shard public values. - /// See [SP1Prover::verify] for the verification algorithm of a complete SP1 proof. In this - /// function, we are aggregating several shard proofs and attesting to an aggregated state which - /// represents all the shards. - /// - /// ## The leaf challenger. - /// A key difference between the recursive tree verification and the complete one in - /// [SP1Prover::verify] is that the recursive verifier has no way of reconstructing the - /// chanllenger only from a part of the shard proof. Therefore, the value of the leaf challenger - /// is witnessed in the program and the verifier asserts correctness given this challenger. - /// In the course of the recursive verification, the challenger is reconstructed by observing - /// the commitments one by one, and in the final step, the challenger is asserted to be the same - /// as the one witnessed here. - pub fn verify( - builder: &mut Builder, - pcs: &TwoAdicFriPcsVariable, - machine: &StarkMachine>, - input: SP1RecursionMemoryLayoutVariable, - ) { - // Read input. - let SP1RecursionMemoryLayoutVariable { - vk, - shard_proofs, - leaf_challenger, - initial_reconstruct_challenger, - is_complete, - } = input; - - // Initialize shard variables. - let initial_shard = builder.uninit(); - let current_shard = builder.uninit(); - - // Initialize execution shard variables. - let initial_execution_shard = builder.uninit(); - let current_execution_shard = builder.uninit(); - - // Initialize program counter variables. - let start_pc = builder.uninit(); - let current_pc = builder.uninit(); - - // Initialize memory initialization and finalization variables. - let initial_previous_init_addr_bits: [Felt<_>; 32] = array::from_fn(|_| builder.uninit()); - let initial_previous_finalize_addr_bits: [Felt<_>; 32] = - array::from_fn(|_| builder.uninit()); - let current_init_addr_bits: [Felt<_>; 32] = array::from_fn(|_| builder.uninit()); - let current_finalize_addr_bits: [Felt<_>; 32] = array::from_fn(|_| builder.uninit()); - - // Initialize the exit code variable. - let exit_code: Felt<_> = builder.uninit(); - - // Initialize the public values digest. - let committed_value_digest: [Word>; PV_DIGEST_NUM_WORDS] = - array::from_fn(|_| Word(array::from_fn(|_| builder.uninit()))); - - // Initialize the deferred proofs digest. - let deferred_proofs_digest: [Felt<_>; POSEIDON_NUM_WORDS] = - array::from_fn(|_| builder.uninit()); - - // Initialize the challenger variables. - let leaf_challenger_public_values = get_challenger_public_values(builder, &leaf_challenger); - let mut reconstruct_challenger: DuplexChallengerVariable<_> = - initial_reconstruct_challenger.copy(builder); - - // Initialize the cumulative sum. - let cumulative_sum: Ext<_, _> = builder.eval(C::EF::zero().cons()); - - // Assert that the number of proofs is not zero. - builder.assert_usize_ne(shard_proofs.len(), 0); - - // Verify proofs, validate transitions, and update accumulation variables. - builder.range(0, shard_proofs.len()).for_each(|i, builder| { - // Load the proof. - let proof = builder.get(&shard_proofs, i); - - // Compute some flags about which chips exist in the shard. - let contains_cpu: Var<_> = builder.eval(C::N::zero()); - let contains_memory_init: Var<_> = builder.eval(C::N::zero()); - let contains_memory_finalize: Var<_> = builder.eval(C::N::zero()); - for (i, chip) in machine.chips().iter().enumerate() { - let index = builder.get(&proof.sorted_idxs, i); - if chip.name() == "CPU" { - builder.if_ne(index, C::N::from_canonical_usize(EMPTY)).then(|builder| { - builder.assign(contains_cpu, C::N::one()); - }); - } else if chip.name() == "MemoryInit" { - builder.if_ne(index, C::N::from_canonical_usize(EMPTY)).then(|builder| { - builder.assign(contains_memory_init, C::N::one()); - }); - } else if chip.name() == "MemoryFinalize" { - builder.if_ne(index, C::N::from_canonical_usize(EMPTY)).then(|builder| { - builder.assign(contains_memory_finalize, C::N::one()); - }); - } - } - - // Extract public values. - let mut pv_elements = Vec::new(); - for i in 0..machine.num_pv_elts() { - let element = builder.get(&proof.public_values, i); - pv_elements.push(element); - } - let public_values: &PublicValues>, Felt<_>> = - pv_elements.as_slice().borrow(); - - // If this is the first proof in the batch, initialize the variables. - builder.if_eq(i, C::N::zero()).then(|builder| { - // Shard. - builder.assign(initial_shard, public_values.shard); - builder.assign(current_shard, public_values.shard); - - // Execution shard. - builder.assign(initial_execution_shard, public_values.execution_shard); - builder.assign(current_execution_shard, public_values.execution_shard); - - // Program counter. - builder.assign(start_pc, public_values.start_pc); - builder.assign(current_pc, public_values.start_pc); - - // Memory initialization & finalization. - for ((bit, pub_bit), first_bit) in current_init_addr_bits - .iter() - .zip(public_values.previous_init_addr_bits.iter()) - .zip(initial_previous_init_addr_bits.iter()) - { - builder.assign(*bit, *pub_bit); - builder.assign(*first_bit, *pub_bit); - } - for ((bit, pub_bit), first_bit) in current_finalize_addr_bits - .iter() - .zip(public_values.previous_finalize_addr_bits.iter()) - .zip(initial_previous_finalize_addr_bits.iter()) - { - builder.assign(*bit, *pub_bit); - builder.assign(*first_bit, *pub_bit); - } - - // Exit code. - builder.assign(exit_code, public_values.exit_code); - - // Commited public values digests. - for (word, first_word) in committed_value_digest - .iter() - .zip_eq(public_values.committed_value_digest.iter()) - { - for (byte, first_byte) in word.0.iter().zip_eq(first_word.0.iter()) { - builder.assign(*byte, *first_byte); - } - } - - // Deferred proofs digests. - for (digest, first_digest) in deferred_proofs_digest - .iter() - .zip_eq(public_values.deferred_proofs_digest.iter()) - { - builder.assign(*digest, *first_digest); - } - }); - - // If the shard is the first shard, assert that the initial challenger is equal to a - // fresh challenger observing the verifier key and the initial pc. - let shard = felt2var(builder, public_values.shard); - builder.if_eq(shard, C::N::one()).then(|builder| { - let mut first_initial_challenger = DuplexChallengerVariable::new(builder); - first_initial_challenger.observe(builder, vk.commitment.clone()); - first_initial_challenger.observe(builder, vk.pc_start); - initial_reconstruct_challenger.assert_eq(builder, &first_initial_challenger); - }); - - // Verify the shard. - // - // Do not verify the cumulative sum here, since the permutation challenge is shared - // between all shards. - let mut challenger = leaf_challenger.copy(builder); - StarkVerifier::::verify_shard( - builder, - &vk, - pcs, - machine, - &mut challenger, - &proof, - false, - ); - - // First shard has a "CPU" constraint. - { - builder.if_eq(shard, C::N::one()).then(|builder| { - builder.assert_var_eq(contains_cpu, C::N::one()); - }); - } - - // CPU log degree bound check constraints. - { - for (i, chip) in machine.chips().iter().enumerate() { - if chip.name() == "CPU" { - builder.if_eq(contains_cpu, C::N::one()).then(|builder| { - let index = builder.get(&proof.sorted_idxs, i); - let cpu_log_degree = - builder.get(&proof.opened_values.chips, index).log_degree; - let cpu_log_degree_lt_max: Var<_> = builder.eval(C::N::zero()); - builder.range(0, MAX_CPU_LOG_DEGREE + 1).for_each(|j, builder| { - builder.if_eq(j, cpu_log_degree).then(|builder| { - builder.assign(cpu_log_degree_lt_max, C::N::one()); - }); - }); - builder.assert_var_eq(cpu_log_degree_lt_max, C::N::one()); - }); - } - } - } - - // Shard constraints. - { - // Assert that the shard of the proof is equal to the current shard. - builder.assert_felt_eq(current_shard, public_values.shard); - - // Increment the current shard by one. - builder.assign(current_shard, current_shard + C::F::one()); - } - - // Execution shard constraints. - let execution_shard = felt2var(builder, public_values.execution_shard); - { - // Assert that the shard of the proof is equal to the current shard. - builder.if_eq(contains_cpu, C::N::one()).then(|builder| { - builder.assert_felt_eq(current_execution_shard, public_values.execution_shard); - }); - - // If the shard has a "CPU" chip, then the execution shard should be incremented by - // 1. - builder.if_eq(contains_cpu, C::N::one()).then(|builder| { - builder.assign(current_execution_shard, current_execution_shard + C::F::one()); - }); - } - - // Program counter constraints. - { - // If it's the first shard (which is the first execution shard), then the start_pc - // should be vk.pc_start. - builder.if_eq(shard, C::N::one()).then(|builder| { - builder.assert_felt_eq(public_values.start_pc, vk.pc_start); - }); - - // Assert that the start_pc of the proof is equal to the current pc. - builder.assert_felt_eq(current_pc, public_values.start_pc); - - // If it's not a shard with "CPU", then assert that the start_pc equals the next_pc. - builder.if_ne(contains_cpu, C::N::one()).then(|builder| { - builder.assert_felt_eq(public_values.start_pc, public_values.next_pc); - }); - - // If it's a shard with "CPU", then assert that the start_pc is not zero. - builder.if_eq(contains_cpu, C::N::one()).then(|builder| { - builder.assert_felt_ne(public_values.start_pc, C::F::zero()); - }); - - // Update current_pc to be the end_pc of the current proof. - builder.assign(current_pc, public_values.next_pc); - } - - // Exit code constraints. - { - // Assert that the exit code is zero (success) for all proofs. - builder.assert_felt_eq(exit_code, C::F::zero()); - } - - // Memory initialization & finalization constraints. - { - // Assert that `init_addr_bits` and `finalize_addr_bits` are zero for the first - // execution shard. - builder.if_eq(execution_shard, C::N::one()).then(|builder| { - // Assert that the MemoryInitialize address bits are zero. - for bit in current_init_addr_bits.iter() { - builder.assert_felt_eq(*bit, C::F::zero()); - } - - // Assert that the MemoryFinalize address bits are zero. - for bit in current_finalize_addr_bits.iter() { - builder.assert_felt_eq(*bit, C::F::zero()); - } - }); - - // Assert that the MemoryInitialize address bits match the current loop variable. - for (bit, current_bit) in current_init_addr_bits - .iter() - .zip_eq(public_values.previous_init_addr_bits.iter()) - { - builder.assert_felt_eq(*bit, *current_bit); - } - - // Assert that the MemoryFinalize address bits match the current loop variable. - for (bit, current_bit) in current_finalize_addr_bits - .iter() - .zip_eq(public_values.previous_finalize_addr_bits.iter()) - { - builder.assert_felt_eq(*bit, *current_bit); - } - - // Assert that if MemoryInit is not present, then the address bits are the same. - builder.if_ne(contains_memory_init, C::N::one()).then(|builder| { - for (prev_bit, last_bit) in public_values - .previous_init_addr_bits - .iter() - .zip_eq(public_values.last_init_addr_bits.iter()) - { - builder.assert_felt_eq(*prev_bit, *last_bit); - } - }); - - // Assert that if MemoryFinalize is not present, then the address bits are the same. - builder.if_ne(contains_memory_finalize, C::N::one()).then(|builder| { - for (prev_bit, last_bit) in public_values - .previous_finalize_addr_bits - .iter() - .zip_eq(public_values.last_finalize_addr_bits.iter()) - { - builder.assert_felt_eq(*prev_bit, *last_bit); - } - }); - - // Update the MemoryInitialize address bits. - for (bit, pub_bit) in - current_init_addr_bits.iter().zip(public_values.last_init_addr_bits.iter()) - { - builder.assign(*bit, *pub_bit); - } - - // Update the MemoryFinalize address bits. - for (bit, pub_bit) in current_finalize_addr_bits - .iter() - .zip(public_values.last_finalize_addr_bits.iter()) - { - builder.assign(*bit, *pub_bit); - } - } - - // Digest constraints. - { - // If `commited_value_digest` is not zero, then `public_values.commited_value_digest - // should be the current value. - let is_zero: Var<_> = builder.eval(C::N::one()); - #[allow(clippy::needless_range_loop)] - for i in 0..committed_value_digest.len() { - for j in 0..WORD_SIZE { - let d = felt2var(builder, committed_value_digest[i][j]); - builder.if_ne(d, C::N::zero()).then(|builder| { - builder.assign(is_zero, C::N::zero()); - }); - } - } - builder.if_eq(is_zero, C::N::zero()).then(|builder| { - #[allow(clippy::needless_range_loop)] - for i in 0..committed_value_digest.len() { - for j in 0..WORD_SIZE { - builder.assert_felt_eq( - committed_value_digest[i][j], - public_values.committed_value_digest[i][j], - ); - } - } - }); - - // If it's not a shard with "CPU", then the committed value digest should not - // change. - builder.if_ne(contains_cpu, C::N::one()).then(|builder| { - #[allow(clippy::needless_range_loop)] - for i in 0..committed_value_digest.len() { - for j in 0..WORD_SIZE { - builder.assert_felt_eq( - committed_value_digest[i][j], - public_values.committed_value_digest[i][j], - ); - } - } - }); - - // Update the committed value digest. - #[allow(clippy::needless_range_loop)] - for i in 0..committed_value_digest.len() { - for j in 0..WORD_SIZE { - builder.assign( - committed_value_digest[i][j], - public_values.committed_value_digest[i][j], - ); - } - } - - // If `deferred_proofs_digest` is not zero, then - // `public_values.deferred_proofs_digest should be the current - // value. - let is_zero: Var<_> = builder.eval(C::N::one()); - #[allow(clippy::needless_range_loop)] - for i in 0..deferred_proofs_digest.len() { - let d = felt2var(builder, deferred_proofs_digest[i]); - builder.if_ne(d, C::N::zero()).then(|builder| { - builder.assign(is_zero, C::N::zero()); - }); - } - builder.if_eq(is_zero, C::N::zero()).then(|builder| { - #[allow(clippy::needless_range_loop)] - for i in 0..deferred_proofs_digest.len() { - builder.assert_felt_eq( - deferred_proofs_digest[i], - public_values.deferred_proofs_digest[i], - ); - } - }); - - // If it's not a shard with "CPU", then the deferred proofs digest should not - // change. - builder.if_ne(contains_cpu, C::N::one()).then(|builder| { - #[allow(clippy::needless_range_loop)] - for i in 0..deferred_proofs_digest.len() { - builder.assert_felt_eq( - deferred_proofs_digest[i], - public_values.deferred_proofs_digest[i], - ); - } - }); - - // Update the deferred proofs digest. - #[allow(clippy::needless_range_loop)] - for i in 0..deferred_proofs_digest.len() { - builder - .assign(deferred_proofs_digest[i], public_values.deferred_proofs_digest[i]); - } - } - - // Verify that the number of shards is not too large. - builder.range_check_f(public_values.shard, 16); - - // Update the reconstruct challenger. - reconstruct_challenger.observe(builder, proof.commitment.main_commit.clone()); - for j in 0..machine.num_pv_elts() { - let element = builder.get(&proof.public_values, j); - reconstruct_challenger.observe(builder, element); - } - - // Cumulative sum is updated by sums of all chips. - let opened_values = proof.opened_values.chips; - builder.range(0, opened_values.len()).for_each(|k, builder| { - let values = builder.get(&opened_values, k); - let sum = values.cumulative_sum; - builder.assign(cumulative_sum, cumulative_sum + sum); - }); - }); - - // Write all values to the public values struct and commit to them. - { - // Compute the vk digest. - let vk_digest = hash_vkey(builder, &vk); - let vk_digest: [Felt<_>; DIGEST_SIZE] = array::from_fn(|i| builder.get(&vk_digest, i)); - - // Collect the public values for challengers. - let initial_challenger_public_values = - get_challenger_public_values(builder, &initial_reconstruct_challenger); - let final_challenger_public_values = - get_challenger_public_values(builder, &reconstruct_challenger); - - // Collect the cumulative sum. - let cumulative_sum_array = builder.ext2felt(cumulative_sum); - let cumulative_sum_array = array::from_fn(|i| builder.get(&cumulative_sum_array, i)); - - // Collect the deferred proof digests. - let zero: Felt<_> = builder.eval(C::F::zero()); - let start_deferred_digest = [zero; POSEIDON_NUM_WORDS]; - let end_deferred_digest = [zero; POSEIDON_NUM_WORDS]; - - // Collect the is_complete flag. - let is_complete_felt = var2felt(builder, is_complete); - - // Initialize the public values we will commit to. - let mut recursion_public_values_stream = [zero; RECURSIVE_PROOF_NUM_PV_ELTS]; - let recursion_public_values: &mut RecursionPublicValues<_> = - recursion_public_values_stream.as_mut_slice().borrow_mut(); - recursion_public_values.committed_value_digest = committed_value_digest; - recursion_public_values.deferred_proofs_digest = deferred_proofs_digest; - recursion_public_values.start_pc = start_pc; - recursion_public_values.next_pc = current_pc; - recursion_public_values.start_shard = initial_shard; - recursion_public_values.next_shard = current_shard; - recursion_public_values.start_execution_shard = initial_execution_shard; - recursion_public_values.next_execution_shard = current_execution_shard; - recursion_public_values.previous_init_addr_bits = initial_previous_init_addr_bits; - recursion_public_values.last_init_addr_bits = current_init_addr_bits; - recursion_public_values.previous_finalize_addr_bits = - initial_previous_finalize_addr_bits; - recursion_public_values.last_finalize_addr_bits = current_finalize_addr_bits; - recursion_public_values.sp1_vk_digest = vk_digest; - recursion_public_values.leaf_challenger = leaf_challenger_public_values; - recursion_public_values.start_reconstruct_challenger = initial_challenger_public_values; - recursion_public_values.end_reconstruct_challenger = final_challenger_public_values; - recursion_public_values.cumulative_sum = cumulative_sum_array; - recursion_public_values.start_reconstruct_deferred_digest = start_deferred_digest; - recursion_public_values.end_reconstruct_deferred_digest = end_deferred_digest; - recursion_public_values.exit_code = exit_code; - recursion_public_values.is_complete = is_complete_felt; - - // If the proof represents a complete proof, make completeness assertions. - // - // *Remark*: In this program, this only happends if there is one shard and the program - // has no deferred proofs to verify. However, the completeness check is - // independent of these facts. - builder.if_eq(is_complete, C::N::one()).then(|builder| { - assert_complete(builder, recursion_public_values, &reconstruct_challenger) - }); - - commit_public_values(builder, recursion_public_values); - } - } -} diff --git a/crates/recursion/program/src/machine/deferred.rs b/crates/recursion/program/src/machine/deferred.rs deleted file mode 100644 index eb7e7f6937..0000000000 --- a/crates/recursion/program/src/machine/deferred.rs +++ /dev/null @@ -1,318 +0,0 @@ -use std::{ - array, - borrow::{Borrow, BorrowMut}, - marker::PhantomData, -}; - -use p3_air::Air; -use p3_baby_bear::BabyBear; -use p3_commit::TwoAdicMultiplicativeCoset; -use p3_field::{AbstractField, PrimeField32, TwoAdicField}; -use sp1_core_machine::riscv::RiscvAir; -use sp1_primitives::{consts::WORD_SIZE, types::RecursionProgramType}; -use sp1_recursion_compiler::{ - config::InnerConfig, - ir::{Array, Builder, Config, Felt, Var}, - prelude::DslVariable, -}; -use sp1_recursion_core::{ - air::{RecursionPublicValues, RECURSIVE_PROOF_NUM_PV_ELTS}, - runtime::{RecursionProgram, DIGEST_SIZE}, -}; - -use sp1_recursion_compiler::prelude::*; -use sp1_stark::{ - air::{MachineAir, POSEIDON_NUM_WORDS, PV_DIGEST_NUM_WORDS}, - baby_bear_poseidon2::BabyBearPoseidon2, - Com, ShardProof, StarkGenericConfig, StarkMachine, StarkVerifyingKey, Word, -}; - -use crate::{ - challenger::{CanObserveVariable, DuplexChallengerVariable}, - fri::TwoAdicFriPcsVariable, - hints::Hintable, - stark::{RecursiveVerifierConstraintFolder, StarkVerifier}, - types::{ShardProofVariable, VerifyingKeyVariable}, - utils::{const_fri_config, get_challenger_public_values, hash_vkey, var2felt}, -}; - -use super::utils::{commit_public_values, verify_public_values_hash}; - -#[derive(Debug, Clone, Copy)] -pub struct SP1DeferredVerifier { - _phantom: PhantomData<(C, SC, A)>, -} - -/// Inputs that are hinted to the [SP1DeferredVerifier] program. -pub struct SP1DeferredMemoryLayout<'a, SC: StarkGenericConfig, A: MachineAir> -where - SC::Val: PrimeField32, -{ - pub compress_vk: &'a StarkVerifyingKey, - pub machine: &'a StarkMachine, - pub proofs: Vec>, - - pub start_reconstruct_deferred_digest: Vec, - - pub is_complete: bool, - - pub sp1_vk: &'a StarkVerifyingKey, - pub sp1_machine: &'a StarkMachine>, - pub committed_value_digest: Vec>, - pub deferred_proofs_digest: Vec, - pub leaf_challenger: SC::Challenger, - pub end_pc: SC::Val, - pub end_shard: SC::Val, - pub end_execution_shard: SC::Val, - pub init_addr_bits: [SC::Val; 32], - pub finalize_addr_bits: [SC::Val; 32], -} - -/// A variable version of the [SP1DeferredMemoryLayout] struct. -#[derive(DslVariable, Clone)] -pub struct SP1DeferredMemoryLayoutVariable { - pub compress_vk: VerifyingKeyVariable, - - pub proofs: Array>, - - pub start_reconstruct_deferred_digest: Array>, - - pub is_complete: Var, - - pub sp1_vk: VerifyingKeyVariable, - pub committed_value_digest: Array>>, - pub deferred_proofs_digest: Array>, - pub leaf_challenger: DuplexChallengerVariable, - pub end_pc: Felt, - pub end_shard: Felt, - pub end_execution_shard: Felt, - pub init_addr_bits: Array>, - pub finalize_addr_bits: Array>, -} - -impl SP1DeferredVerifier -where - A: MachineAir + for<'a> Air>, -{ - /// Create a new instance of the program for the [BabyBearPoseidon2] config. - pub fn build(machine: &StarkMachine) -> RecursionProgram { - let mut builder = Builder::::new(RecursionProgramType::Deferred); - let input: SP1DeferredMemoryLayoutVariable<_> = builder.uninit(); - SP1DeferredMemoryLayout::::witness(&input, &mut builder); - - let pcs = TwoAdicFriPcsVariable { - config: const_fri_config(&mut builder, machine.config().pcs().fri_config()), - }; - - SP1DeferredVerifier::verify(&mut builder, &pcs, machine, input); - - builder.halt(); - - builder.compile_program() - } -} - -impl SP1DeferredVerifier -where - C::F: PrimeField32 + TwoAdicField, - SC: StarkGenericConfig< - Val = C::F, - Challenge = C::EF, - Domain = TwoAdicMultiplicativeCoset, - >, - A: MachineAir + for<'a> Air>, - Com: Into<[SC::Val; DIGEST_SIZE]>, -{ - /// Verify a batch of deferred proofs. - /// - /// Each deferred proof is a recursive proof representing some computation. Namely, every such - /// proof represents a recursively verified program. - /// verifier: - /// - Asserts that each of these proofs is valid as a `compress` proof. - /// - Asserts that each of these proofs is complete by checking the `is_complete` flag in the - /// proof's public values. - /// - Aggregates the proof information into the accumulated deferred digest. - pub fn verify( - builder: &mut Builder, - pcs: &TwoAdicFriPcsVariable, - machine: &StarkMachine, - input: SP1DeferredMemoryLayoutVariable, - ) { - // Read the inputs. - let SP1DeferredMemoryLayoutVariable { - compress_vk, - proofs, - start_reconstruct_deferred_digest, - is_complete, - sp1_vk, - committed_value_digest, - deferred_proofs_digest, - leaf_challenger, - end_pc, - end_shard, - end_execution_shard, - init_addr_bits, - finalize_addr_bits, - } = input; - - // Initialize the values for the aggregated public output as all zeros. - let mut deferred_public_values_stream: Vec> = - (0..RECURSIVE_PROOF_NUM_PV_ELTS).map(|_| builder.eval(C::F::zero())).collect(); - - let deferred_public_values: &mut RecursionPublicValues<_> = - deferred_public_values_stream.as_mut_slice().borrow_mut(); - - // Compute the digest of compress_vk and input the value to the public values. - let compress_vk_digest = hash_vkey(builder, &compress_vk); - - deferred_public_values.compress_vk_digest = - array::from_fn(|i| builder.get(&compress_vk_digest, i)); - - // Initialize the start of deferred digests. - deferred_public_values.start_reconstruct_deferred_digest = - array::from_fn(|i| builder.get(&start_reconstruct_deferred_digest, i)); - - // Assert that there is at least one proof. - builder.assert_usize_ne(proofs.len(), 0); - - // Initialize the consistency check variable. - let mut reconstruct_deferred_digest = builder.array(POSEIDON_NUM_WORDS); - for (i, first_digest) in - deferred_public_values.start_reconstruct_deferred_digest.iter().enumerate() - { - builder.set(&mut reconstruct_deferred_digest, i, *first_digest); - } - - // Verify the proofs and connect the values. - builder.range(0, proofs.len()).for_each(|i, builder| { - // Load the proof. - let proof = builder.get(&proofs, i); - - // Verify the shard proof. - - // Prepare a challenger. - let mut challenger = DuplexChallengerVariable::new(builder); - // Observe the vk and start pc. - challenger.observe(builder, compress_vk.commitment.clone()); - challenger.observe(builder, compress_vk.pc_start); - // Observe the main commitment and public values. - challenger.observe(builder, proof.commitment.main_commit.clone()); - for j in 0..machine.num_pv_elts() { - let element = builder.get(&proof.public_values, j); - challenger.observe(builder, element); - } - - // Verify the proof. - StarkVerifier::::verify_shard( - builder, - &compress_vk, - pcs, - machine, - &mut challenger, - &proof, - true, - ); - - // Load the public values from the proof. - let current_public_values_elements = (0..RECURSIVE_PROOF_NUM_PV_ELTS) - .map(|i| builder.get(&proof.public_values, i)) - .collect::>>(); - - let current_public_values: &RecursionPublicValues> = - current_public_values_elements.as_slice().borrow(); - - // Check that the public values digest is correct. - verify_public_values_hash(builder, current_public_values); - - // Assert that the proof is complete. - builder.assert_felt_eq(current_public_values.is_complete, C::F::one()); - - // Assert that the compress_vk digest is the same. - for (digest, current) in deferred_public_values - .compress_vk_digest - .iter() - .zip(current_public_values.compress_vk_digest.iter()) - { - builder.assert_felt_eq(*digest, *current); - } - - // Update deferred proof digest - // poseidon2( current_digest[..8] || pv.sp1_vk_digest[..8] || - // pv.committed_value_digest[..32] ) - let mut poseidon_inputs = builder.array(48); - for j in 0..DIGEST_SIZE { - let current_digest_element = builder.get(&reconstruct_deferred_digest, j); - builder.set(&mut poseidon_inputs, j, current_digest_element); - } - - for j in 0..DIGEST_SIZE { - // let input_index: Var<_> = builder.constant(F::from_canonical_usize(j + 8)); - builder.set( - &mut poseidon_inputs, - j + DIGEST_SIZE, - current_public_values.sp1_vk_digest[j], - ); - } - for j in 0..PV_DIGEST_NUM_WORDS { - for k in 0..WORD_SIZE { - // let input_index: Var<_> = - // builder.eval(F::from_canonical_usize(j * WORD_SIZE + k + 16)); - let element = current_public_values.committed_value_digest[j][k]; - builder.set(&mut poseidon_inputs, j * WORD_SIZE + k + 16, element); - } - } - let new_digest = builder.poseidon2_hash(&poseidon_inputs); - for j in 0..DIGEST_SIZE { - let new_value = builder.get(&new_digest, j); - builder.set(&mut reconstruct_deferred_digest, j, new_value); - } - }); - - // Set the public values. - - // Set initial_pc, end_pc, initial_shard, and end_shard to be the hitned values. - deferred_public_values.start_pc = end_pc; - deferred_public_values.next_pc = end_pc; - deferred_public_values.start_shard = end_shard; - deferred_public_values.next_shard = end_shard; - deferred_public_values.start_execution_shard = end_execution_shard; - deferred_public_values.next_execution_shard = end_execution_shard; - // Set the init and finalize address bits to be the hintred values. - let init_addr_bits = core::array::from_fn(|i| builder.get(&init_addr_bits, i)); - deferred_public_values.previous_init_addr_bits = init_addr_bits; - deferred_public_values.last_init_addr_bits = init_addr_bits; - let finalize_addr_bits = core::array::from_fn(|i| builder.get(&finalize_addr_bits, i)); - deferred_public_values.previous_finalize_addr_bits = finalize_addr_bits; - deferred_public_values.last_finalize_addr_bits = finalize_addr_bits; - - // Set the sp1_vk_digest to be the hitned value. - let sp1_vk_digest = hash_vkey(builder, &sp1_vk); - deferred_public_values.sp1_vk_digest = array::from_fn(|i| builder.get(&sp1_vk_digest, i)); - - // Set the committed value digest to be the hitned value. - for (i, public_word) in deferred_public_values.committed_value_digest.iter_mut().enumerate() - { - let hinted_word = builder.get(&committed_value_digest, i); - public_word.0 = array::from_fn(|j| builder.get(&hinted_word, j)); - } - - // Set the deferred proof digest to be the hitned value. - deferred_public_values.deferred_proofs_digest = - core::array::from_fn(|i| builder.get(&deferred_proofs_digest, i)); - - // Set the initial, end, and leaf challenger to be the hitned values. - let values = get_challenger_public_values(builder, &leaf_challenger); - deferred_public_values.leaf_challenger = values; - deferred_public_values.start_reconstruct_challenger = values; - deferred_public_values.end_reconstruct_challenger = values; - - // Assign the deffered proof digests. - deferred_public_values.end_reconstruct_deferred_digest = - array::from_fn(|i| builder.get(&reconstruct_deferred_digest, i)); - - // Set the is_complete flag. - deferred_public_values.is_complete = var2felt(builder, is_complete); - - commit_public_values(builder, deferred_public_values); - } -} diff --git a/crates/recursion/program/src/machine/mod.rs b/crates/recursion/program/src/machine/mod.rs deleted file mode 100644 index 38e17690f8..0000000000 --- a/crates/recursion/program/src/machine/mod.rs +++ /dev/null @@ -1,11 +0,0 @@ -mod compress; -mod core; -mod deferred; -mod root; -mod utils; - -pub use compress::*; -pub use core::*; -pub use deferred::*; -pub use root::*; -pub use utils::*; diff --git a/crates/recursion/program/src/machine/root.rs b/crates/recursion/program/src/machine/root.rs deleted file mode 100644 index 2b19c1735c..0000000000 --- a/crates/recursion/program/src/machine/root.rs +++ /dev/null @@ -1,160 +0,0 @@ -use std::borrow::Borrow; - -use p3_air::Air; -use p3_baby_bear::BabyBear; -use p3_commit::TwoAdicMultiplicativeCoset; -use p3_field::{AbstractField, PrimeField32, TwoAdicField}; -use sp1_primitives::types::RecursionProgramType; -use sp1_recursion_compiler::{ - config::InnerConfig, - ir::{Builder, Config, Felt, Var}, - prelude::DslVariable, -}; -use sp1_recursion_core::{ - air::{RecursionPublicValues, RECURSIVE_PROOF_NUM_PV_ELTS}, - runtime::{RecursionProgram, DIGEST_SIZE}, -}; - -use sp1_recursion_compiler::prelude::*; -use sp1_stark::{ - air::MachineAir, baby_bear_poseidon2::BabyBearPoseidon2, Com, ShardProof, StarkGenericConfig, - StarkMachine, StarkVerifyingKey, -}; - -use crate::{ - challenger::{CanObserveVariable, DuplexChallengerVariable}, - fri::TwoAdicFriPcsVariable, - hints::Hintable, - machine::utils::proof_data_from_vk, - stark::{RecursiveVerifierConstraintFolder, ShardProofHint, StarkVerifier}, - types::ShardProofVariable, - utils::{const_fri_config, hash_vkey}, -}; - -use super::utils::{commit_public_values, verify_public_values_hash}; - -/// The program that gets a final verifier at the root of the tree. -#[derive(Debug, Clone, Copy)] -pub struct SP1RootVerifier { - _phantom: std::marker::PhantomData<(C, SC, A)>, -} - -pub struct SP1RootMemoryLayout<'a, SC: StarkGenericConfig, A: MachineAir> { - pub machine: &'a StarkMachine, - pub proof: ShardProof, - pub is_reduce: bool, -} - -#[derive(DslVariable, Clone)] -pub struct SP1RootMemoryLayoutVariable { - pub proof: ShardProofVariable, - pub is_reduce: Var, -} - -impl SP1RootVerifier -where - A: MachineAir + for<'a> Air>, -{ - /// Create a new instance of the program for the [BabyBearPoseidon2] config. - pub fn build( - machine: &StarkMachine, - vk: &StarkVerifyingKey, - program_type: RecursionProgramType, - ) -> RecursionProgram { - assert!(matches!(program_type, RecursionProgramType::Shrink | RecursionProgramType::Wrap)); - - let mut builder = Builder::::new(program_type); - - let proof: ShardProofVariable<_> = builder.uninit(); - ShardProofHint::::witness(&proof, &mut builder); - - let pcs = TwoAdicFriPcsVariable { - config: const_fri_config(&mut builder, machine.config().pcs().fri_config()), - }; - - SP1RootVerifier::verify(&mut builder, &pcs, machine, vk, &proof); - - builder.compile_program() - } -} - -impl SP1RootVerifier -where - C::F: PrimeField32 + TwoAdicField, - SC: StarkGenericConfig< - Val = C::F, - Challenge = C::EF, - Domain = TwoAdicMultiplicativeCoset, - >, - A: MachineAir + for<'a> Air>, - Com: Into<[SC::Val; DIGEST_SIZE]>, -{ - /// Verify a proof with given vk and aggregate their public values. - /// - /// is_reduce : if the proof is a reduce proof, we will assert that the given vk indentifies - /// with the reduce vk digest of public inputs. - pub fn verify( - builder: &mut Builder, - pcs: &TwoAdicFriPcsVariable, - machine: &StarkMachine, - vk: &StarkVerifyingKey, - proof: &ShardProofVariable, - ) { - // Get the verifying key info from the vk. - let vk = proof_data_from_vk(builder, vk, machine); - - // Verify the proof. - - let mut challenger = DuplexChallengerVariable::new(builder); - // Observe the vk and start pc. - challenger.observe(builder, vk.commitment.clone()); - challenger.observe(builder, vk.pc_start); - // Observe the main commitment and public values. - challenger.observe(builder, proof.commitment.main_commit.clone()); - for j in 0..machine.num_pv_elts() { - let element = builder.get(&proof.public_values, j); - challenger.observe(builder, element); - } - // verify proof. - StarkVerifier::::verify_shard( - builder, - &vk, - pcs, - machine, - &mut challenger, - proof, - true, - ); - - // Get the public inputs from the proof. - let public_values_elements = (0..RECURSIVE_PROOF_NUM_PV_ELTS) - .map(|i| builder.get(&proof.public_values, i)) - .collect::>>(); - let public_values: &RecursionPublicValues> = - public_values_elements.as_slice().borrow(); - - // Check that the public values digest is correct. - verify_public_values_hash(builder, public_values); - - // Assert that the proof is complete. - // - // *Remark*: here we are assuming on that the program we are verifying indludes the check - // of completeness conditions are satisfied if the flag is set to one, so we are only - // checking the `is_complete` flag in this program. - builder.assert_felt_eq(public_values.is_complete, C::F::one()); - - // If this is a Shrink program (when it's verifying a compress proof), then assert that the - // vk is the same as the compress vk from the public values. - if matches!(builder.program_type, RecursionProgramType::Shrink) { - let vk_digest = hash_vkey(builder, &vk); - for (i, reduce_digest_elem) in public_values.compress_vk_digest.iter().enumerate() { - let vk_digest_elem = builder.get(&vk_digest, i); - builder.assert_felt_eq(vk_digest_elem, *reduce_digest_elem); - } - } - - commit_public_values(builder, public_values); - - builder.halt(); - } -} diff --git a/crates/recursion/program/src/machine/utils.rs b/crates/recursion/program/src/machine/utils.rs deleted file mode 100644 index 15ea3100c4..0000000000 --- a/crates/recursion/program/src/machine/utils.rs +++ /dev/null @@ -1,173 +0,0 @@ -use std::mem::transmute; - -use itertools::Itertools; -use p3_commit::TwoAdicMultiplicativeCoset; -use p3_field::AbstractField; - -use sp1_recursion_compiler::ir::{Array, Builder, Config, Felt, Var}; -use sp1_recursion_core::{ - air::{RecursionPublicValues, NUM_PV_ELMS_TO_HASH, RECURSIVE_PROOF_NUM_PV_ELTS}, - runtime::DIGEST_SIZE, -}; -use sp1_stark::{air::MachineAir, Com, StarkGenericConfig, StarkMachine, StarkVerifyingKey}; - -use crate::{ - challenger::DuplexChallengerVariable, - fri::TwoAdicMultiplicativeCosetVariable, - types::VerifyingKeyVariable, - utils::{assert_challenger_eq_pv, felt2var, get_preprocessed_data}, -}; - -/// Assertions on the public values describing a complete recursive proof state. -/// -/// See [SP1Prover::verify] for the verification algorithm of a complete SP1 proof. -pub(crate) fn assert_complete( - builder: &mut Builder, - public_values: &RecursionPublicValues>, - end_reconstruct_challenger: &DuplexChallengerVariable, -) { - let RecursionPublicValues { - deferred_proofs_digest, - next_pc, - start_shard, - next_shard, - start_execution_shard, - next_execution_shard, - cumulative_sum, - start_reconstruct_deferred_digest, - end_reconstruct_deferred_digest, - leaf_challenger, - .. - } = public_values; - - // Assert that `next_pc` is equal to zero (so program execution has completed) - builder.assert_felt_eq(*next_pc, C::F::zero()); - - // Assert that start shard is equal to 1. - builder.assert_felt_eq(*start_shard, C::F::one()); - - // Assert that the next shard is not equal to one. This guarantees that there is at least one - // shard. - builder.assert_felt_ne(*next_shard, C::F::one()); - - // Assert that the start execution shard is equal to 1. - builder.assert_felt_eq(*start_execution_shard, C::F::one()); - - // Assert that next shard is not equal to one. This guarantees that there is at least one shard - // with CPU. - builder.assert_felt_ne(*next_execution_shard, C::F::one()); - - // Assert that the end reconstruct challenger is equal to the leaf challenger. - assert_challenger_eq_pv(builder, end_reconstruct_challenger, *leaf_challenger); - - // The start reconstruct deffered digest should be zero. - for start_digest_word in start_reconstruct_deferred_digest { - builder.assert_felt_eq(*start_digest_word, C::F::zero()); - } - - // The end reconstruct deffered digest should be equal to the deferred proofs digest. - for (end_digest_word, deferred_digest_word) in - end_reconstruct_deferred_digest.iter().zip_eq(deferred_proofs_digest.iter()) - { - builder.assert_felt_eq(*end_digest_word, *deferred_digest_word); - } - - // Assert that the cumulative sum is zero. - for b in cumulative_sum.iter() { - builder.assert_felt_eq(*b, C::F::zero()); - } -} - -pub(crate) fn proof_data_from_vk( - builder: &mut Builder, - vk: &StarkVerifyingKey, - machine: &StarkMachine, -) -> VerifyingKeyVariable -where - SC: StarkGenericConfig< - Val = C::F, - Challenge = C::EF, - Domain = TwoAdicMultiplicativeCoset, - >, - A: MachineAir, - Com: Into<[SC::Val; DIGEST_SIZE]>, -{ - let mut commitment = builder.dyn_array(DIGEST_SIZE); - for (i, value) in vk.commit.clone().into().iter().enumerate() { - builder.set(&mut commitment, i, *value); - } - let pc_start: Felt<_> = builder.eval(vk.pc_start); - - let (prep_sorted_indices_val, prep_domains_val) = get_preprocessed_data(machine, vk); - - let mut prep_sorted_indices = builder.dyn_array::>(prep_sorted_indices_val.len()); - let mut prep_domains = - builder.dyn_array::>(prep_domains_val.len()); - - for (i, value) in prep_sorted_indices_val.iter().enumerate() { - builder.set(&mut prep_sorted_indices, i, C::N::from_canonical_usize(*value)); - } - - for (i, value) in prep_domains_val.iter().enumerate() { - let domain: TwoAdicMultiplicativeCosetVariable<_> = builder.constant(*value); - builder.set(&mut prep_domains, i, domain); - } - - VerifyingKeyVariable { - commitment, - pc_start, - preprocessed_sorted_idxs: prep_sorted_indices, - prep_domains, - } -} - -/// Calculates the digest of the recursion public values. -fn calculate_public_values_digest( - builder: &mut Builder, - public_values: &RecursionPublicValues>, -) -> Array> { - let pv_elements: [Felt<_>; RECURSIVE_PROOF_NUM_PV_ELTS] = unsafe { transmute(*public_values) }; - let mut poseidon_inputs = builder.array(NUM_PV_ELMS_TO_HASH); - for (i, elm) in pv_elements[0..NUM_PV_ELMS_TO_HASH].iter().enumerate() { - builder.set(&mut poseidon_inputs, i, *elm); - } - builder.poseidon2_hash(&poseidon_inputs) -} - -/// Verifies the digest of a recursive public values struct. -pub(crate) fn verify_public_values_hash( - builder: &mut Builder, - public_values: &RecursionPublicValues>, -) { - let var_exit_code = felt2var(builder, public_values.exit_code); - // Check that the public values digest is correct if the exit_code is 0. - builder.if_eq(var_exit_code, C::N::zero()).then(|builder| { - let calculated_digest = calculate_public_values_digest(builder, public_values); - - let expected_digest = public_values.digest; - for (i, expected_elm) in expected_digest.iter().enumerate() { - let calculated_elm = builder.get(&calculated_digest, i); - builder.assert_felt_eq(*expected_elm, calculated_elm); - } - }); -} - -/// Register and commits the recursion public values. -pub fn commit_public_values( - builder: &mut Builder, - public_values: &RecursionPublicValues>, -) { - let pv_elements: [Felt<_>; RECURSIVE_PROOF_NUM_PV_ELTS] = unsafe { transmute(*public_values) }; - let pv_elms_no_digest = &pv_elements[0..NUM_PV_ELMS_TO_HASH]; - - for value in pv_elms_no_digest.iter() { - builder.register_public_value(*value); - } - - // Hash the public values. - let pv_digest = calculate_public_values_digest(builder, public_values); - for i in 0..DIGEST_SIZE { - let digest_element = builder.get(&pv_digest, i); - builder.commit_public_value(digest_element); - } -} diff --git a/crates/recursion/program/src/stark.rs b/crates/recursion/program/src/stark.rs deleted file mode 100644 index 607b6e52df..0000000000 --- a/crates/recursion/program/src/stark.rs +++ /dev/null @@ -1,544 +0,0 @@ -use p3_air::Air; -use p3_commit::TwoAdicMultiplicativeCoset; -use p3_field::{AbstractField, TwoAdicField}; - -use sp1_recursion_compiler::{ - ir::{Array, Builder, Config, Ext, ExtConst, SymbolicExt, SymbolicVar, Usize, Var}, - prelude::Felt, -}; - -use sp1_recursion_core::runtime::DIGEST_SIZE; -use sp1_stark::{ - air::MachineAir, Com, GenericVerifierConstraintFolder, ShardProof, StarkGenericConfig, - StarkMachine, StarkVerifyingKey, -}; - -use crate::{ - challenger::{CanObserveVariable, DuplexChallengerVariable, FeltChallenger}, - commit::{PcsVariable, PolynomialSpaceVariable}, - fri::{ - types::{TwoAdicPcsMatsVariable, TwoAdicPcsRoundVariable}, - TwoAdicFriPcsVariable, TwoAdicMultiplicativeCosetVariable, - }, - types::{ShardCommitmentVariable, ShardProofVariable, VerifyingKeyVariable}, -}; - -use crate::types::QuotientData; - -pub const EMPTY: usize = 0x_1111_1111; - -pub trait StarkRecursiveVerifier { - fn verify_shard( - &self, - builder: &mut Builder, - vk: &VerifyingKeyVariable, - pcs: &TwoAdicFriPcsVariable, - challenger: &mut DuplexChallengerVariable, - proof: &ShardProofVariable, - is_complete: impl Into>, - ); - - fn verify_shards( - &self, - builder: &mut Builder, - vk: &VerifyingKeyVariable, - pcs: &TwoAdicFriPcsVariable, - challenger: &mut DuplexChallengerVariable, - proofs: &Array>, - is_complete: impl Into> + Clone, - ) { - // Assert that the number of shards is not zero. - builder.assert_usize_ne(proofs.len(), 0); - - // Verify each shard. - builder.range(0, proofs.len()).for_each(|i, builder| { - let proof = builder.get(proofs, i); - self.verify_shard(builder, vk, pcs, challenger, &proof, is_complete.clone()); - }); - } -} - -#[derive(Debug, Clone, Copy)] -pub struct StarkVerifier { - _phantom: std::marker::PhantomData<(C, SC)>, -} - -pub struct ShardProofHint<'a, SC: StarkGenericConfig, A> { - pub machine: &'a StarkMachine, - pub proof: &'a ShardProof, -} - -impl<'a, SC: StarkGenericConfig, A: MachineAir> ShardProofHint<'a, SC, A> { - pub const fn new(machine: &'a StarkMachine, proof: &'a ShardProof) -> Self { - Self { machine, proof } - } -} - -pub struct VerifyingKeyHint<'a, SC: StarkGenericConfig, A> { - pub machine: &'a StarkMachine, - pub vk: &'a StarkVerifyingKey, -} - -impl<'a, SC: StarkGenericConfig, A: MachineAir> VerifyingKeyHint<'a, SC, A> { - pub const fn new(machine: &'a StarkMachine, vk: &'a StarkVerifyingKey) -> Self { - Self { machine, vk } - } -} - -pub type RecursiveVerifierConstraintFolder<'a, C> = GenericVerifierConstraintFolder< - 'a, - ::F, - ::EF, - Felt<::F>, - Ext<::F, ::EF>, - SymbolicExt<::F, ::EF>, ->; - -impl StarkVerifier -where - C::F: TwoAdicField, - SC: StarkGenericConfig< - Val = C::F, - Challenge = C::EF, - Domain = TwoAdicMultiplicativeCoset, - >, -{ - pub fn verify_shard( - builder: &mut Builder, - vk: &VerifyingKeyVariable, - pcs: &TwoAdicFriPcsVariable, - machine: &StarkMachine, - challenger: &mut DuplexChallengerVariable, - proof: &ShardProofVariable, - check_cumulative_sum: bool, - ) where - A: MachineAir + for<'a> Air>, - C::F: TwoAdicField, - C::EF: TwoAdicField, - Com: Into<[SC::Val; DIGEST_SIZE]>, - { - builder.cycle_tracker("stage-c-verify-shard-setup"); - let ShardProofVariable { commitment, opened_values, opening_proof, .. } = proof; - - let ShardCommitmentVariable { main_commit, permutation_commit, quotient_commit } = - commitment; - - let permutation_challenges = - (0..2).map(|_| challenger.sample_ext(builder)).collect::>(); - - challenger.observe(builder, permutation_commit.clone()); - - let alpha = challenger.sample_ext(builder); - - challenger.observe(builder, quotient_commit.clone()); - - let zeta = challenger.sample_ext(builder); - - let num_shard_chips = opened_values.chips.len(); - let mut trace_domains = - builder.dyn_array::>(num_shard_chips); - let mut quotient_domains = - builder.dyn_array::>(num_shard_chips); - - let num_preprocessed_chips = machine.preprocessed_chip_ids().len(); - - let mut prep_mats: Array<_, TwoAdicPcsMatsVariable<_>> = - builder.dyn_array(num_preprocessed_chips); - let mut main_mats: Array<_, TwoAdicPcsMatsVariable<_>> = builder.dyn_array(num_shard_chips); - let mut perm_mats: Array<_, TwoAdicPcsMatsVariable<_>> = builder.dyn_array(num_shard_chips); - - let num_quotient_mats: Var<_> = builder.eval(C::N::zero()); - builder.range(0, num_shard_chips).for_each(|i, builder| { - let num_quotient_chunks = builder.get(&proof.quotient_data, i).quotient_size; - builder.assign(num_quotient_mats, num_quotient_mats + num_quotient_chunks); - }); - - let mut quotient_mats: Array<_, TwoAdicPcsMatsVariable<_>> = - builder.dyn_array(num_quotient_mats); - - let mut qc_points = builder.dyn_array::>(1); - builder.set_value(&mut qc_points, 0, zeta); - - // Iterate through machine.chips filtered for preprocessed chips. - for (preprocessed_id, chip_id) in machine.preprocessed_chip_ids().into_iter().enumerate() { - // Get index within sorted preprocessed chips. - let preprocessed_sorted_id = builder.get(&vk.preprocessed_sorted_idxs, preprocessed_id); - // Get domain from witnessed domains. Array is ordered by machine.chips ordering. - let domain = builder.get(&vk.prep_domains, preprocessed_id); - - // Get index within all sorted chips. - let chip_sorted_id = builder.get(&proof.sorted_idxs, chip_id); - // Get opening from proof. - let opening = builder.get(&opened_values.chips, chip_sorted_id); - - let mut trace_points = builder.dyn_array::>(2); - let zeta_next = domain.next_point(builder, zeta); - - builder.set_value(&mut trace_points, 0, zeta); - builder.set_value(&mut trace_points, 1, zeta_next); - - let mut prep_values = builder.dyn_array::>(2); - builder.set_value(&mut prep_values, 0, opening.preprocessed.local); - builder.set_value(&mut prep_values, 1, opening.preprocessed.next); - let main_mat = TwoAdicPcsMatsVariable:: { - domain: domain.clone(), - values: prep_values, - points: trace_points.clone(), - }; - builder.set_value(&mut prep_mats, preprocessed_sorted_id, main_mat); - } - - let qc_index: Var<_> = builder.eval(C::N::zero()); - builder.range(0, num_shard_chips).for_each(|i, builder| { - let opening = builder.get(&opened_values.chips, i); - let QuotientData { log_quotient_degree, quotient_size } = - builder.get(&proof.quotient_data, i); - let domain = pcs.natural_domain_for_log_degree(builder, Usize::Var(opening.log_degree)); - builder.set_value(&mut trace_domains, i, domain.clone()); - - let log_quotient_size: Usize<_> = - builder.eval(opening.log_degree + log_quotient_degree); - let quotient_domain = - domain.create_disjoint_domain(builder, log_quotient_size, Some(pcs.config.clone())); - builder.set_value(&mut quotient_domains, i, quotient_domain.clone()); - - // Get trace_opening_points. - let mut trace_points = builder.dyn_array::>(2); - let zeta_next = domain.next_point(builder, zeta); - builder.set_value(&mut trace_points, 0, zeta); - builder.set_value(&mut trace_points, 1, zeta_next); - - // Get the main matrix. - let mut main_values = builder.dyn_array::>(2); - builder.set_value(&mut main_values, 0, opening.main.local); - builder.set_value(&mut main_values, 1, opening.main.next); - let main_mat = TwoAdicPcsMatsVariable:: { - domain: domain.clone(), - values: main_values, - points: trace_points.clone(), - }; - builder.set_value(&mut main_mats, i, main_mat); - - // Get the permutation matrix. - let mut perm_values = builder.dyn_array::>(2); - builder.set_value(&mut perm_values, 0, opening.permutation.local); - builder.set_value(&mut perm_values, 1, opening.permutation.next); - let perm_mat = TwoAdicPcsMatsVariable:: { - domain: domain.clone(), - values: perm_values, - points: trace_points, - }; - builder.set_value(&mut perm_mats, i, perm_mat); - - // Get the quotient matrices and values. - let qc_domains = - quotient_domain.split_domains(builder, log_quotient_degree, quotient_size); - - builder.range(0, qc_domains.len()).for_each(|j, builder| { - let qc_dom = builder.get(&qc_domains, j); - let qc_vals_array = builder.get(&opening.quotient, j); - let mut qc_values = builder.dyn_array::>(1); - builder.set_value(&mut qc_values, 0, qc_vals_array); - let qc_mat = TwoAdicPcsMatsVariable:: { - domain: qc_dom, - values: qc_values, - points: qc_points.clone(), - }; - builder.set_value(&mut quotient_mats, qc_index, qc_mat); - builder.assign(qc_index, qc_index + C::N::one()); - }); - }); - - // Create the pcs rounds. - let mut rounds = builder.dyn_array::>(4); - let prep_commit = vk.commitment.clone(); - let prep_round = TwoAdicPcsRoundVariable { batch_commit: prep_commit, mats: prep_mats }; - let main_round = - TwoAdicPcsRoundVariable { batch_commit: main_commit.clone(), mats: main_mats }; - let perm_round = - TwoAdicPcsRoundVariable { batch_commit: permutation_commit.clone(), mats: perm_mats }; - let quotient_round = - TwoAdicPcsRoundVariable { batch_commit: quotient_commit.clone(), mats: quotient_mats }; - builder.set_value(&mut rounds, 0, prep_round); - builder.set_value(&mut rounds, 1, main_round); - builder.set_value(&mut rounds, 2, perm_round); - builder.set_value(&mut rounds, 3, quotient_round); - builder.cycle_tracker("stage-c-verify-shard-setup"); - - // Verify the pcs proof - builder.cycle_tracker("stage-d-verify-pcs"); - pcs.verify(builder, rounds, opening_proof.clone(), challenger); - builder.cycle_tracker("stage-d-verify-pcs"); - - builder.cycle_tracker("stage-e-verify-constraints"); - - let num_shard_chips_enabled: Var<_> = builder.eval(C::N::zero()); - for (i, chip) in machine.chips().iter().enumerate() { - tracing::debug!("verifying constraints for chip: {}", chip.name()); - let index = builder.get(&proof.sorted_idxs, i); - - if chip.preprocessed_width() > 0 { - builder.assert_var_ne(index, C::N::from_canonical_usize(EMPTY)); - } - - builder.if_ne(index, C::N::from_canonical_usize(EMPTY)).then(|builder| { - let values = builder.get(&opened_values.chips, index); - let trace_domain = builder.get(&trace_domains, index); - let quotient_domain: TwoAdicMultiplicativeCosetVariable<_> = - builder.get("ient_domains, index); - - // Check that the quotient data matches the chip's data. - let log_quotient_degree = chip.log_quotient_degree(); - - let quotient_size = 1 << log_quotient_degree; - let chip_quotient_data = builder.get(&proof.quotient_data, index); - builder - .assert_usize_eq(chip_quotient_data.log_quotient_degree, log_quotient_degree); - builder.assert_usize_eq(chip_quotient_data.quotient_size, quotient_size); - - // Get the domains from the chip itself. - let qc_domains = quotient_domain.split_domains_const(builder, log_quotient_degree); - - // Verify the constraints. - stacker::maybe_grow(16 * 1024 * 1024, 16 * 1024 * 1024, || { - Self::verify_constraints( - builder, - chip, - &values, - proof.public_values.clone(), - trace_domain, - qc_domains, - zeta, - alpha, - &permutation_challenges, - ); - }); - - // Increment the number of shard chips that are enabled. - builder.assign(num_shard_chips_enabled, num_shard_chips_enabled + C::N::one()); - }); - } - - // Assert that the number of chips in `opened_values` matches the number of shard chips - // enabled. - builder.assert_var_eq(num_shard_chips_enabled, num_shard_chips); - - // If we're checking the cumulative sum, assert that the sum of the cumulative sums is zero. - if check_cumulative_sum { - let sum: Ext<_, _> = builder.eval(C::EF::zero().cons()); - builder.range(0, proof.opened_values.chips.len()).for_each(|i, builder| { - let cumulative_sum = builder.get(&proof.opened_values.chips, i).cumulative_sum; - builder.assign(sum, sum + cumulative_sum); - }); - builder.assert_ext_eq(sum, C::EF::zero().cons()); - } - - builder.cycle_tracker("stage-e-verify-constraints"); - } -} - -#[cfg(test)] -pub(crate) mod tests { - use std::{borrow::BorrowMut, time::Instant}; - - use crate::{ - challenger::{CanObserveVariable, FeltChallenger}, - hints::Hintable, - machine::commit_public_values, - stark::{DuplexChallengerVariable, Ext, ShardProofHint}, - types::ShardCommitmentVariable, - }; - use p3_challenger::{CanObserve, FieldChallenger}; - use p3_field::AbstractField; - use rand::Rng; - use sp1_core_executor::Program; - use sp1_core_machine::{io::SP1Stdin, riscv::RiscvAir, utils::setup_logger}; - use sp1_recursion_compiler::{ - asm::AsmBuilder, - config::InnerConfig, - ir::{Array, Builder, Config, ExtConst, Felt, Usize}, - }; - use sp1_recursion_core::{ - air::{ - RecursionPublicValues, RECURSION_PUBLIC_VALUES_COL_MAP, RECURSIVE_PROOF_NUM_PV_ELTS, - }, - runtime::{RecursionProgram, Runtime, DIGEST_SIZE}, - stark::{ - utils::{run_test_recursion, TestConfig}, - RecursionAir, - }, - }; - use sp1_stark::{ - air::POSEIDON_NUM_WORDS, baby_bear_poseidon2::BabyBearPoseidon2, CpuProver, InnerChallenge, - InnerVal, MachineProver, SP1CoreOpts, StarkGenericConfig, - }; - - type SC = BabyBearPoseidon2; - type Challenge = ::Challenge; - type F = InnerVal; - type EF = InnerChallenge; - type C = InnerConfig; - type A = RiscvAir; - - #[test] - fn test_permutation_challenges() { - // Generate a dummy proof. - sp1_core_machine::utils::setup_logger(); - let elf = include_bytes!("../../../../tests/fibonacci/elf/riscv32im-succinct-zkvm-elf"); - - let machine = A::machine(SC::default()); - let (_, vk) = machine.setup(&Program::from(elf).unwrap()); - let mut challenger_val = machine.config().challenger(); - let (proof, _, _) = sp1_core_machine::utils::prove::<_, CpuProver<_, _>>( - Program::from(elf).unwrap(), - &SP1Stdin::new(), - SC::default(), - SP1CoreOpts::default(), - ) - .unwrap(); - let proofs = proof.shard_proofs; - println!("Proof generated successfully"); - - challenger_val.observe(vk.commit); - - proofs.iter().for_each(|proof| { - challenger_val.observe(proof.commitment.main_commit); - challenger_val.observe_slice(&proof.public_values[0..machine.num_pv_elts()]); - }); - - let permutation_challenges = - (0..2).map(|_| challenger_val.sample_ext_element::()).collect::>(); - - // Observe all the commitments. - let mut builder = Builder::::default(); - - // Add a hash invocation, since the poseidon2 table expects that it's in the first row. - let hash_input = builder.constant(vec![vec![F::one()]]); - builder.poseidon2_hash_x(&hash_input); - - let mut challenger = DuplexChallengerVariable::new(&mut builder); - - let preprocessed_commit_val: [F; DIGEST_SIZE] = vk.commit.into(); - let preprocessed_commit: Array = builder.constant(preprocessed_commit_val.to_vec()); - challenger.observe(&mut builder, preprocessed_commit); - - let mut witness_stream = Vec::new(); - for proof in proofs { - let proof_hint = ShardProofHint::new(&machine, &proof); - witness_stream.extend(proof_hint.write()); - let proof = ShardProofHint::::read(&mut builder); - let ShardCommitmentVariable { main_commit, .. } = proof.commitment; - challenger.observe(&mut builder, main_commit); - let pv_slice = proof.public_values.slice( - &mut builder, - Usize::Const(0), - Usize::Const(machine.num_pv_elts()), - ); - challenger.observe_slice(&mut builder, pv_slice); - } - - // Sample the permutation challenges. - let permutation_challenges_var = - (0..2).map(|_| challenger.sample_ext(&mut builder)).collect::>(); - - for i in 0..2 { - builder.assert_ext_eq(permutation_challenges_var[i], permutation_challenges[i].cons()); - } - builder.halt(); - - let program = builder.compile_program(); - run_test_recursion(program, Some(witness_stream.into()), TestConfig::All); - } - - fn test_public_values_program() -> RecursionProgram { - let mut builder = Builder::::default(); - - // Add a hash invocation, since the poseidon2 table expects that it's in the first row. - let hash_input = builder.constant(vec![vec![F::one()]]); - builder.poseidon2_hash_x(&hash_input); - - let mut public_values_stream: Vec> = - (0..RECURSIVE_PROOF_NUM_PV_ELTS).map(|_| builder.uninit()).collect(); - - let public_values: &mut RecursionPublicValues<_> = - public_values_stream.as_mut_slice().borrow_mut(); - - public_values.sp1_vk_digest = [builder.constant(::F::zero()); DIGEST_SIZE]; - public_values.next_pc = builder.constant(::F::one()); - public_values.next_execution_shard = builder.constant(::F::two()); - public_values.end_reconstruct_deferred_digest = - [builder.constant(::F::from_canonical_usize(3)); POSEIDON_NUM_WORDS]; - - public_values.deferred_proofs_digest = - [builder.constant(::F::from_canonical_usize(4)); POSEIDON_NUM_WORDS]; - - public_values.cumulative_sum = - [builder.constant(::F::from_canonical_usize(5)); 4]; - - commit_public_values(&mut builder, public_values); - builder.halt(); - - builder.compile_program() - } - - #[test] - fn test_public_values_failure() { - let program = test_public_values_program(); - - let config = SC::default(); - - let mut runtime = Runtime::::new(&program, config.perm.clone()); - runtime.run().unwrap(); - - let machine = RecursionAir::<_, 3>::machine(SC::default()); - let prover = CpuProver::new(machine); - let (pk, vk) = prover.setup(&program); - let record = runtime.record.clone(); - - let mut challenger = prover.config().challenger(); - let mut proof = - prover.prove(&pk, vec![record], &mut challenger, SP1CoreOpts::recursion()).unwrap(); - - let mut challenger = prover.config().challenger(); - let verification_result = prover.machine().verify(&vk, &proof, &mut challenger); - if verification_result.is_err() { - panic!("Proof should verify successfully"); - } - - // Corrupt the public values. - proof.shard_proofs[0].public_values[RECURSION_PUBLIC_VALUES_COL_MAP.digest[0]] = - InnerVal::zero(); - let verification_result = prover.machine().verify(&vk, &proof, &mut challenger); - if verification_result.is_ok() { - panic!("Proof should not verify successfully"); - } - } - - #[test] - #[ignore] - fn test_kitchen_sink() { - setup_logger(); - - let time = Instant::now(); - let mut builder = AsmBuilder::::default(); - - let a: Felt<_> = builder.eval(F::from_canonical_u32(23)); - let b: Felt<_> = builder.eval(F::from_canonical_u32(17)); - let a_plus_b = builder.eval(a + b); - let mut rng = rand::thread_rng(); - let a_ext_val = rng.gen::(); - let b_ext_val = rng.gen::(); - let a_ext: Ext<_, _> = builder.eval(a_ext_val.cons()); - let b_ext: Ext<_, _> = builder.eval(b_ext_val.cons()); - let a_plus_b_ext = builder.eval(a_ext + b_ext); - builder.print_f(a_plus_b); - builder.print_e(a_plus_b_ext); - builder.halt(); - - let program = builder.compile_program(); - let elapsed = time.elapsed(); - println!("Building took: {:?}", elapsed); - - run_test_recursion(program, None, TestConfig::All); - } -} diff --git a/crates/recursion/program/src/types.rs b/crates/recursion/program/src/types.rs deleted file mode 100644 index ab3881e76b..0000000000 --- a/crates/recursion/program/src/types.rs +++ /dev/null @@ -1,228 +0,0 @@ -use p3_air::BaseAir; -use p3_field::{AbstractExtensionField, AbstractField}; -use sp1_primitives::consts::WORD_SIZE; -use sp1_recursion_compiler::prelude::*; -use sp1_stark::{ - air::{MachineAir, PV_DIGEST_NUM_WORDS}, - AirOpenedValues, Chip, ChipOpenedValues, Word, -}; - -use crate::fri::{ - types::{DigestVariable, FriConfigVariable, TwoAdicPcsProofVariable}, - TwoAdicMultiplicativeCosetVariable, -}; - -/// Reference: [sp1_core_machine::stark::ShardProof] -#[derive(DslVariable, Clone)] -pub struct ShardProofVariable { - pub commitment: ShardCommitmentVariable, - pub opened_values: ShardOpenedValuesVariable, - pub opening_proof: TwoAdicPcsProofVariable, - pub public_values: Array>, - pub quotient_data: Array>, - pub sorted_idxs: Array>, -} - -#[derive(DslVariable, Clone, Copy)] -pub struct QuotientData { - pub log_quotient_degree: Var, - pub quotient_size: Var, -} - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct QuotientDataValues { - pub log_quotient_degree: usize, - pub quotient_size: usize, -} - -/// Reference: [sp1_core_machine::stark::VerifyingKey] -#[derive(DslVariable, Clone)] -pub struct VerifyingKeyVariable { - pub commitment: DigestVariable, - pub pc_start: Felt, - pub preprocessed_sorted_idxs: Array>, - pub prep_domains: Array>, -} - -/// Reference: [sp1_core_machine::stark::ShardCommitment] -#[derive(DslVariable, Clone)] -pub struct ShardCommitmentVariable { - pub main_commit: DigestVariable, - pub permutation_commit: DigestVariable, - pub quotient_commit: DigestVariable, -} - -/// Reference: [sp1_core_machine::stark::ShardOpenedValues] -#[derive(DslVariable, Debug, Clone)] -pub struct ShardOpenedValuesVariable { - pub chips: Array>, -} - -/// Reference: [sp1_core_machine::stark::ChipOpenedValues] -#[derive(Debug, Clone)] -pub struct ChipOpening { - pub preprocessed: AirOpenedValues>, - pub main: AirOpenedValues>, - pub permutation: AirOpenedValues>, - pub quotient: Vec>>, - pub cumulative_sum: Ext, - pub log_degree: Var, -} - -/// Reference: [sp1_core_machine::stark::ChipOpenedValues] -#[derive(DslVariable, Debug, Clone)] -pub struct ChipOpenedValuesVariable { - pub preprocessed: AirOpenedValuesVariable, - pub main: AirOpenedValuesVariable, - pub permutation: AirOpenedValuesVariable, - pub quotient: Array>>, - pub cumulative_sum: Ext, - pub log_degree: Var, -} - -/// Reference: [sp1_core_machine::stark::AirOpenedValues] -#[derive(DslVariable, Debug, Clone)] -pub struct AirOpenedValuesVariable { - pub local: Array>, - pub next: Array>, -} - -#[derive(DslVariable, Debug, Clone)] -pub struct Sha256DigestVariable { - pub bytes: Array>, -} - -impl Sha256DigestVariable { - pub fn from_words(builder: &mut Builder, words: &[Word>]) -> Self { - let mut bytes = builder.array(PV_DIGEST_NUM_WORDS * WORD_SIZE); - for (i, word) in words.iter().enumerate() { - for j in 0..WORD_SIZE { - let byte = word[j]; - builder.set(&mut bytes, i * WORD_SIZE + j, byte); - } - } - Sha256DigestVariable { bytes } - } -} - -impl ChipOpening { - /// Collect opening values from a dynamic array into vectors. - /// - /// This method is used to convert a `ChipOpenedValuesVariable` into a `ChipOpenedValues`, which - /// are the same values but with each opening converted from a dynamic array into a Rust vector. - /// - /// *Safety*: This method also verifies that the legnth of the dynamic arrays match the expected - /// length of the vectors. - pub fn from_variable( - builder: &mut Builder, - chip: &Chip, - opening: &ChipOpenedValuesVariable, - ) -> Self - where - A: MachineAir, - { - let mut preprocessed = AirOpenedValues { local: vec![], next: vec![] }; - let preprocessed_width = chip.preprocessed_width(); - // Assert that the length of the dynamic arrays match the expected length of the vectors. - builder.assert_usize_eq(preprocessed_width, opening.preprocessed.local.len()); - builder.assert_usize_eq(preprocessed_width, opening.preprocessed.next.len()); - // Collect the preprocessed values into vectors. - for i in 0..preprocessed_width { - preprocessed.local.push(builder.get(&opening.preprocessed.local, i)); - preprocessed.next.push(builder.get(&opening.preprocessed.next, i)); - } - - let mut main = AirOpenedValues { local: vec![], next: vec![] }; - let main_width = chip.width(); - // Assert that the length of the dynamic arrays match the expected length of the vectors. - builder.assert_usize_eq(main_width, opening.main.local.len()); - builder.assert_usize_eq(main_width, opening.main.next.len()); - // Collect the main values into vectors. - for i in 0..main_width { - main.local.push(builder.get(&opening.main.local, i)); - main.next.push(builder.get(&opening.main.next, i)); - } - - let mut permutation = AirOpenedValues { local: vec![], next: vec![] }; - let permutation_width = C::EF::D * chip.permutation_width(); - // Assert that the length of the dynamic arrays match the expected length of the vectors. - builder.assert_usize_eq(permutation_width, opening.permutation.local.len()); - builder.assert_usize_eq(permutation_width, opening.permutation.next.len()); - // Collect the permutation values into vectors. - for i in 0..permutation_width { - permutation.local.push(builder.get(&opening.permutation.local, i)); - permutation.next.push(builder.get(&opening.permutation.next, i)); - } - - let num_quotient_chunks = 1 << chip.log_quotient_degree(); - let mut quotient = vec![]; - // Assert that the length of the quotient chunk arrays match the expected length. - builder.assert_usize_eq(num_quotient_chunks, opening.quotient.len()); - // Collect the quotient values into vectors. - for i in 0..num_quotient_chunks { - let chunk = builder.get(&opening.quotient, i); - // Assert that the chunk length matches the expected length. - builder.assert_usize_eq(C::EF::D, chunk.len()); - // Collect the quotient values into vectors. - let mut quotient_vals = vec![]; - for j in 0..C::EF::D { - let value = builder.get(&chunk, j); - quotient_vals.push(value); - } - quotient.push(quotient_vals); - } - - ChipOpening { - preprocessed, - main, - permutation, - quotient, - cumulative_sum: opening.cumulative_sum, - log_degree: opening.log_degree, - } - } -} - -impl FromConstant for AirOpenedValuesVariable { - type Constant = AirOpenedValues; - - fn constant(value: Self::Constant, builder: &mut Builder) -> Self { - AirOpenedValuesVariable { - local: builder.constant(value.local), - next: builder.constant(value.next), - } - } -} - -impl FromConstant for ChipOpenedValuesVariable { - type Constant = ChipOpenedValues; - - fn constant(value: Self::Constant, builder: &mut Builder) -> Self { - ChipOpenedValuesVariable { - preprocessed: builder.constant(value.preprocessed), - main: builder.constant(value.main), - permutation: builder.constant(value.permutation), - quotient: builder.constant(value.quotient), - cumulative_sum: builder.eval(value.cumulative_sum.cons()), - log_degree: builder.eval(C::N::from_canonical_usize(value.log_degree)), - } - } -} - -impl FriConfigVariable { - pub fn get_subgroup( - &self, - builder: &mut Builder, - log_degree: impl Into>, - ) -> TwoAdicMultiplicativeCosetVariable { - builder.get(&self.subgroups, log_degree) - } - - pub fn get_two_adic_generator( - &self, - builder: &mut Builder, - bits: impl Into>, - ) -> Felt { - builder.get(&self.generators, bits) - } -} diff --git a/crates/recursion/program/src/utils.rs b/crates/recursion/program/src/utils.rs deleted file mode 100644 index 551a346178..0000000000 --- a/crates/recursion/program/src/utils.rs +++ /dev/null @@ -1,241 +0,0 @@ -use p3_baby_bear::{BabyBear, DiffusionMatrixBabyBear}; -use p3_commit::{ExtensionMmcs, TwoAdicMultiplicativeCoset}; -use p3_field::{extension::BinomialExtensionField, AbstractField, Field, TwoAdicField}; -use p3_fri::FriConfig; -use p3_merkle_tree::FieldMerkleTreeMmcs; -use p3_poseidon2::{Poseidon2, Poseidon2ExternalMatrixGeneral}; -use p3_symmetric::{PaddingFreeSponge, TruncatedPermutation}; -use sp1_recursion_compiler::{ - asm::AsmConfig, - ir::{Array, Builder, Config, Felt, MemVariable, Var}, -}; -use sp1_recursion_core::{ - air::ChallengerPublicValues, - runtime::{DIGEST_SIZE, PERMUTATION_WIDTH}, -}; -use sp1_stark::{ - air::MachineAir, baby_bear_poseidon2::BabyBearPoseidon2, Dom, ShardProof, StarkGenericConfig, - StarkMachine, StarkVerifyingKey, -}; - -use crate::{ - challenger::DuplexChallengerVariable, - fri::{types::FriConfigVariable, TwoAdicMultiplicativeCosetVariable}, - stark::EMPTY, - types::{QuotientDataValues, VerifyingKeyVariable}, -}; - -type SC = BabyBearPoseidon2; -type F = ::Val; -type EF = ::Challenge; -type C = AsmConfig; -type Val = BabyBear; -type Challenge = BinomialExtensionField; -type Perm = Poseidon2; -type Hash = PaddingFreeSponge; -type Compress = TruncatedPermutation; -type ValMmcs = - FieldMerkleTreeMmcs<::Packing, ::Packing, Hash, Compress, 8>; -type ChallengeMmcs = ExtensionMmcs; -type RecursionConfig = AsmConfig; -type RecursionBuilder = Builder; - -pub fn const_fri_config( - builder: &mut RecursionBuilder, - config: &FriConfig, -) -> FriConfigVariable { - let two_addicity = Val::TWO_ADICITY; - let mut generators = builder.dyn_array(two_addicity); - let mut subgroups = builder.dyn_array(two_addicity); - for i in 0..two_addicity { - let constant_generator = Val::two_adic_generator(i); - builder.set(&mut generators, i, constant_generator); - - let constant_domain = TwoAdicMultiplicativeCoset { log_n: i, shift: Val::one() }; - let domain_value: TwoAdicMultiplicativeCosetVariable<_> = builder.constant(constant_domain); - builder.set(&mut subgroups, i, domain_value); - } - FriConfigVariable { - log_blowup: builder.eval(BabyBear::from_canonical_usize(config.log_blowup)), - blowup: builder.eval(BabyBear::from_canonical_usize(1 << config.log_blowup)), - num_queries: builder.eval(BabyBear::from_canonical_usize(config.num_queries)), - proof_of_work_bits: builder.eval(BabyBear::from_canonical_usize(config.proof_of_work_bits)), - subgroups, - generators, - } -} - -pub fn clone>(builder: &mut RecursionBuilder, var: &T) -> T { - let mut arr = builder.dyn_array(1); - builder.set(&mut arr, 0, var.clone()); - builder.get(&arr, 0) -} - -pub fn clone_array>( - builder: &mut RecursionBuilder, - arr: &Array, -) -> Array { - let mut new_arr = builder.dyn_array(arr.len()); - builder.range(0, arr.len()).for_each(|i, builder| { - let var = builder.get(arr, i); - builder.set(&mut new_arr, i, var); - }); - new_arr -} - -// OPT: this can be done much more efficiently, but in the meantime this should work -pub fn felt2var(builder: &mut Builder, felt: Felt) -> Var { - let bits = builder.num2bits_f(felt); - builder.bits2num_v(&bits) -} - -pub fn var2felt(builder: &mut Builder, var: Var) -> Felt { - let bits = builder.num2bits_v(var); - builder.bits2num_f(&bits) -} - -/// Asserts that the challenger variable is equal to a challenger in public values. -pub fn assert_challenger_eq_pv( - builder: &mut Builder, - var: &DuplexChallengerVariable, - values: ChallengerPublicValues>, -) { - for i in 0..PERMUTATION_WIDTH { - let element = builder.get(&var.sponge_state, i); - builder.assert_felt_eq(element, values.sponge_state[i]); - } - let num_inputs_var = felt2var(builder, values.num_inputs); - builder.assert_var_eq(var.nb_inputs, num_inputs_var); - let mut input_buffer_array: Array<_, Felt<_>> = builder.dyn_array(PERMUTATION_WIDTH); - for i in 0..PERMUTATION_WIDTH { - builder.set(&mut input_buffer_array, i, values.input_buffer[i]); - } - builder.range(0, num_inputs_var).for_each(|i, builder| { - let element = builder.get(&var.input_buffer, i); - let values_element = builder.get(&input_buffer_array, i); - builder.assert_felt_eq(element, values_element); - }); - let num_outputs_var = felt2var(builder, values.num_outputs); - builder.assert_var_eq(var.nb_outputs, num_outputs_var); - let mut output_buffer_array: Array<_, Felt<_>> = builder.dyn_array(PERMUTATION_WIDTH); - for i in 0..PERMUTATION_WIDTH { - builder.set(&mut output_buffer_array, i, values.output_buffer[i]); - } - builder.range(0, num_outputs_var).for_each(|i, builder| { - let element = builder.get(&var.output_buffer, i); - let values_element = builder.get(&output_buffer_array, i); - builder.assert_felt_eq(element, values_element); - }); -} - -/// Assigns a challenger variable from a challenger in public values. -pub fn assign_challenger_from_pv( - builder: &mut Builder, - dst: &mut DuplexChallengerVariable, - values: ChallengerPublicValues>, -) { - for i in 0..PERMUTATION_WIDTH { - builder.set(&mut dst.sponge_state, i, values.sponge_state[i]); - } - let num_inputs_var = felt2var(builder, values.num_inputs); - builder.assign(dst.nb_inputs, num_inputs_var); - for i in 0..PERMUTATION_WIDTH { - builder.set(&mut dst.input_buffer, i, values.input_buffer[i]); - } - let num_outputs_var = felt2var(builder, values.num_outputs); - builder.assign(dst.nb_outputs, num_outputs_var); - for i in 0..PERMUTATION_WIDTH { - builder.set(&mut dst.output_buffer, i, values.output_buffer[i]); - } -} - -pub fn get_challenger_public_values( - builder: &mut Builder, - var: &DuplexChallengerVariable, -) -> ChallengerPublicValues> { - let sponge_state = core::array::from_fn(|i| builder.get(&var.sponge_state, i)); - let num_inputs = var2felt(builder, var.nb_inputs); - let input_buffer = core::array::from_fn(|i| builder.get(&var.input_buffer, i)); - let num_outputs = var2felt(builder, var.nb_outputs); - let output_buffer = core::array::from_fn(|i| builder.get(&var.output_buffer, i)); - - ChallengerPublicValues { sponge_state, num_inputs, input_buffer, num_outputs, output_buffer } -} - -/// Hash the verifying key + prep domains into a single digest. -/// poseidon2( commit[0..8] || pc_start || prep_domains[N].{log_n, .size, .shift, .g}) -pub fn hash_vkey( - builder: &mut Builder, - vk: &VerifyingKeyVariable, -) -> Array> { - let domain_slots: Var<_> = builder.eval(vk.prep_domains.len() * 4); - let vkey_slots: Var<_> = builder.constant(C::N::from_canonical_usize(DIGEST_SIZE + 1)); - let total_slots: Var<_> = builder.eval(vkey_slots + domain_slots); - let mut inputs = builder.dyn_array(total_slots); - builder.range(0, DIGEST_SIZE).for_each(|i, builder| { - let element = builder.get(&vk.commitment, i); - builder.set(&mut inputs, i, element); - }); - builder.set(&mut inputs, DIGEST_SIZE, vk.pc_start); - let four: Var<_> = builder.constant(C::N::from_canonical_usize(4)); - let one: Var<_> = builder.constant(C::N::one()); - builder.range(0, vk.prep_domains.len()).for_each(|i, builder| { - let sorted_index = builder.get(&vk.preprocessed_sorted_idxs, i); - let domain = builder.get(&vk.prep_domains, i); - let log_n_index: Var<_> = builder.eval(vkey_slots + sorted_index * four); - let size_index: Var<_> = builder.eval(log_n_index + one); - let shift_index: Var<_> = builder.eval(size_index + one); - let g_index: Var<_> = builder.eval(shift_index + one); - let log_n_felt = var2felt(builder, domain.log_n); - let size_felt = var2felt(builder, domain.size); - builder.set(&mut inputs, log_n_index, log_n_felt); - builder.set(&mut inputs, size_index, size_felt); - builder.set(&mut inputs, shift_index, domain.shift); - builder.set(&mut inputs, g_index, domain.g); - }); - builder.poseidon2_hash(&inputs) -} - -pub(crate) fn get_sorted_indices>( - machine: &StarkMachine, - proof: &ShardProof, -) -> Vec { - machine - .chips_sorted_indices(proof) - .into_iter() - .map(|x| match x { - Some(x) => x, - None => EMPTY, - }) - .collect() -} - -pub(crate) fn get_preprocessed_data>( - machine: &StarkMachine, - vk: &StarkVerifyingKey, -) -> (Vec, Vec>) { - let chips = machine.chips(); - let (prep_sorted_indices, prep_domains) = machine - .preprocessed_chip_ids() - .into_iter() - .map(|chip_idx| { - let name = chips[chip_idx].name().clone(); - let prep_sorted_idx = vk.chip_ordering[&name]; - (prep_sorted_idx, vk.chip_information[prep_sorted_idx].1) - }) - .unzip(); - (prep_sorted_indices, prep_domains) -} - -pub(crate) fn get_chip_quotient_data>( - machine: &StarkMachine, - proof: &ShardProof, -) -> Vec { - machine - .shard_chips_ordered(&proof.chip_ordering) - .map(|chip| { - let log_quotient_degree = chip.log_quotient_degree(); - QuotientDataValues { log_quotient_degree, quotient_size: 1 << log_quotient_degree } - }) - .collect() -} diff --git a/crates/sdk/Cargo.toml b/crates/sdk/Cargo.toml index a60c2ee22c..bbcecd9f5c 100644 --- a/crates/sdk/Cargo.toml +++ b/crates/sdk/Cargo.toml @@ -12,7 +12,6 @@ categories = { workspace = true } [dependencies] prost = { version = "0.13", optional = true } serde = { version = "1.0.204", features = ["derive"] } -serde_json = "1.0.121" twirp = { package = "twirp-rs", version = "0.13.0-succinct", optional = true } async-trait = "0.1.81" reqwest-middleware = { version = "0.3.2", optional = true } @@ -28,8 +27,6 @@ sp1-cuda = { workspace = true, optional = true } futures = "0.3.30" bincode = "1.3.3" tokio = { version = "1.39.2", features = ["full"], optional = true } -p3-matrix = { workspace = true } -p3-commit = { workspace = true } p3-field = { workspace = true } p3-baby-bear = { workspace = true } p3-fri = { workspace = true } @@ -37,28 +34,23 @@ indicatif = "0.17.8" tracing = "0.1.40" hex = "0.4.3" log = "0.4.22" -axum = { version = "0.7.7", optional = true } alloy-sol-types = { version = "0.7.7", optional = true } -sha2 = "0.10.8" dirs = "5.0.1" tempfile = "3.10.1" -num-bigint = "0.4.6" cfg-if = "1.0" ethers = { version = "2", default-features = false, optional = true } -strum_macros = "0.26.4" strum = "0.26.3" +strum_macros = "0.26.4" thiserror = "1.0.63" hashbrown = "0.14.5" -sysinfo = "0.30.13" sp1-core-executor = { workspace = true } sp1-stark = { workspace = true } sp1-primitives = { workspace = true } -getrandom = { version = "0.2.15", features = ["custom", "js"] } itertools = "0.13.0" tonic = { version = "0.12", features = ["tls", "tls-roots"], optional = true } alloy-signer = { version = "0.3.6", optional = true } alloy-signer-local = { version = "0.3.6", optional = true } -alloy-primitives = { version = "0.8.3", optional = true } +alloy-primitives = { version = "0.8.7", optional = true } aws-sdk-s3 = { version = "1.53.0", optional = true } aws-config = { version = "1.5.7", optional = true } diff --git a/crates/sdk/src/action.rs b/crates/sdk/src/action.rs index 48f8ae7da3..6c0767ff83 100644 --- a/crates/sdk/src/action.rs +++ b/crates/sdk/src/action.rs @@ -122,6 +122,17 @@ impl<'a> Prove<'a> { let proof_opts = ProofOpts { sp1_prover_opts: opts, timeout }; let context = context_builder.build(); + // Dump the program and stdin to files for debugging if `SP1_DUMP` is set. + if std::env::var("SP1_DUMP") + .map(|v| v == "1" || v.to_lowercase() == "true") + .unwrap_or(false) + { + let program = pk.elf.clone(); + std::fs::write("program.bin", program).unwrap(); + let stdin = bincode::serialize(&stdin).unwrap(); + std::fs::write("stdin.bin", stdin.clone()).unwrap(); + } + prover.prove(pk, stdin, proof_opts, context, kind) } diff --git a/crates/sdk/src/artifacts.rs b/crates/sdk/src/artifacts.rs index 26663c146a..2ed25462c0 100644 --- a/crates/sdk/src/artifacts.rs +++ b/crates/sdk/src/artifacts.rs @@ -23,7 +23,7 @@ pub fn export_solidity_plonk_bn254_verifier(output_dir: impl Into) -> R let artifacts_dir = if sp1_prover::build::sp1_dev_mode() { sp1_prover::build::plonk_bn254_artifacts_dev_dir() } else { - try_install_circuit_artifacts() + try_install_circuit_artifacts("plonk") }; let verifier_path = artifacts_dir.join("SP1VerifierPlonk.sol"); @@ -52,7 +52,7 @@ pub fn export_solidity_groth16_bn254_verifier(output_dir: impl Into) -> let artifacts_dir = if sp1_prover::build::sp1_dev_mode() { sp1_prover::build::groth16_bn254_artifacts_dev_dir() } else { - try_install_circuit_artifacts() + try_install_circuit_artifacts("groth16") }; let verifier_path = artifacts_dir.join("SP1VerifierGroth16.sol"); diff --git a/crates/sdk/src/install.rs b/crates/sdk/src/install.rs index b28ce0ecab..d8661df82d 100644 --- a/crates/sdk/src/install.rs +++ b/crates/sdk/src/install.rs @@ -15,29 +15,42 @@ use crate::SP1_CIRCUIT_VERSION; /// The base URL for the S3 bucket containing the ciruit artifacts. pub const CIRCUIT_ARTIFACTS_URL_BASE: &str = "https://sp1-circuits.s3-us-east-2.amazonaws.com"; -/// The directory where the circuit artifacts will be stored. -pub fn install_circuit_artifacts_dir() -> PathBuf { - dirs::home_dir().unwrap().join(".sp1").join("circuits").join(SP1_CIRCUIT_VERSION) +/// The directory where the groth16 circuit artifacts will be stored. +pub fn groth16_circuit_artifacts_dir() -> PathBuf { + dirs::home_dir().unwrap().join(".sp1").join("circuits/groth16").join(SP1_CIRCUIT_VERSION) } -/// Tries to install the circuit artifacts if they are not already installed. -pub fn try_install_circuit_artifacts() -> PathBuf { - let build_dir = install_circuit_artifacts_dir(); +/// The directory where the plonk circuit artifacts will be stored. +pub fn plonk_circuit_artifacts_dir() -> PathBuf { + dirs::home_dir().unwrap().join(".sp1").join("circuits/plonk").join(SP1_CIRCUIT_VERSION) +} + +/// Tries to install the groth16 circuit artifacts if they are not already installed. +pub fn try_install_circuit_artifacts(artifacts_type: &str) -> PathBuf { + let build_dir = if artifacts_type == "groth16" { + groth16_circuit_artifacts_dir() + } else if artifacts_type == "plonk" { + plonk_circuit_artifacts_dir() + } else { + unimplemented!("unsupported artifacts type: {}", artifacts_type); + }; if build_dir.exists() { println!( - "[sp1] circuit artifacts already seem to exist at {}. if you want to re-download them, delete the directory", + "[sp1] {} circuit artifacts already seem to exist at {}. if you want to re-download them, delete the directory", + artifacts_type, build_dir.display() ); } else { cfg_if! { if #[cfg(any(feature = "network", feature = "network-v2"))] { println!( - "[sp1] circuit artifacts for version {} do not exist at {}. downloading...", + "[sp1] {} circuit artifacts for version {} do not exist at {}. downloading...", + artifacts_type, SP1_CIRCUIT_VERSION, build_dir.display() ); - install_circuit_artifacts(build_dir.clone()); + install_circuit_artifacts(build_dir.clone(), artifacts_type); } } } @@ -47,14 +60,15 @@ pub fn try_install_circuit_artifacts() -> PathBuf { /// Install the latest circuit artifacts. /// /// This function will download the latest circuit artifacts from the S3 bucket and extract them -/// to the directory specified by [plonk_bn254_artifacts_dir()]. +/// to the directory specified by [groth16_bn254_artifacts_dir()]. #[cfg(any(feature = "network", feature = "network-v2"))] -pub fn install_circuit_artifacts(build_dir: PathBuf) { +pub fn install_circuit_artifacts(build_dir: PathBuf, artifacts_type: &str) { // Create the build directory. std::fs::create_dir_all(&build_dir).expect("failed to create build directory"); // Download the artifacts. - let download_url = format!("{}/{}.tar.gz", CIRCUIT_ARTIFACTS_URL_BASE, SP1_CIRCUIT_VERSION); + let download_url = + format!("{}/{}-{}.tar.gz", CIRCUIT_ARTIFACTS_URL_BASE, SP1_CIRCUIT_VERSION, artifacts_type); let mut artifacts_tar_gz_file = tempfile::NamedTempFile::new().expect("failed to create tempfile"); let client = Client::builder().build().expect("failed to create reqwest client"); diff --git a/crates/sdk/src/lib.rs b/crates/sdk/src/lib.rs index 79bef3a803..691565d068 100644 --- a/crates/sdk/src/lib.rs +++ b/crates/sdk/src/lib.rs @@ -316,7 +316,7 @@ macro_rules! include_elf { #[cfg(test)] mod tests { - use sp1_prover::init::SP1PublicValues; + use sp1_primitives::io::SP1PublicValues; use crate::{utils, CostEstimator, ProverClient, SP1Stdin}; diff --git a/crates/sdk/src/proof.rs b/crates/sdk/src/proof.rs index 8fd64c3383..724ba2d087 100644 --- a/crates/sdk/src/proof.rs +++ b/crates/sdk/src/proof.rs @@ -2,6 +2,7 @@ use std::{fmt::Debug, fs::File, path::Path}; use anyhow::Result; use serde::{Deserialize, Serialize}; +use sp1_core_executor::SP1ReduceProof; use sp1_core_machine::io::SP1Stdin; use sp1_primitives::io::SP1PublicValues; use strum_macros::{EnumDiscriminants, EnumTryAs}; @@ -16,7 +17,7 @@ use sp1_stark::{MachineVerificationError, ShardProof}; pub enum SP1Proof { #[strum_discriminants(default)] Core(Vec>), - Compressed(ShardProof), + Compressed(Box>), Plonk(PlonkBn254Proof), Groth16(Groth16Bn254Proof), } diff --git a/crates/sdk/src/provers/cpu.rs b/crates/sdk/src/provers/cpu.rs index 03cc8843db..56d7dda051 100644 --- a/crates/sdk/src/provers/cpu.rs +++ b/crates/sdk/src/provers/cpu.rs @@ -1,10 +1,12 @@ use anyhow::Result; use sp1_core_executor::SP1Context; -use sp1_prover::{components::DefaultProverComponents, SP1Prover, SP1Stdin}; +use sp1_core_machine::io::SP1Stdin; +use sp1_prover::{components::DefaultProverComponents, SP1Prover}; +use crate::install::try_install_circuit_artifacts; use crate::{ - install::try_install_circuit_artifacts, provers::ProofOpts, Prover, SP1Proof, SP1ProofKind, - SP1ProofWithPublicValues, SP1ProvingKey, SP1VerifyingKey, + provers::ProofOpts, Prover, SP1Proof, SP1ProofKind, SP1ProofWithPublicValues, SP1ProvingKey, + SP1VerifyingKey, }; use super::ProverType; @@ -49,7 +51,8 @@ impl Prover for CpuProver { kind: SP1ProofKind, ) -> Result { // Generate the core proof. - let proof = self.prover.prove_core(pk, &stdin, opts.sp1_prover_opts, context)?; + let proof: sp1_prover::SP1ProofWithMetadata = + self.prover.prove_core(pk, &stdin, opts.sp1_prover_opts, context)?; if kind == SP1ProofKind::Core { return Ok(SP1ProofWithPublicValues { proof: SP1Proof::Core(proof.proof.0), @@ -59,7 +62,8 @@ impl Prover for CpuProver { }); } - let deferred_proofs = stdin.proofs.iter().map(|p| p.0.clone()).collect(); + let deferred_proofs = + stdin.proofs.iter().map(|(reduce_proof, _)| reduce_proof.clone()).collect(); let public_values = proof.public_values.clone(); // Generate the compressed proof. @@ -67,7 +71,7 @@ impl Prover for CpuProver { self.prover.compress(&pk.vk, proof, deferred_proofs, opts.sp1_prover_opts)?; if kind == SP1ProofKind::Compressed { return Ok(SP1ProofWithPublicValues { - proof: SP1Proof::Compressed(reduce_proof.proof), + proof: SP1Proof::Compressed(Box::new(reduce_proof)), stdin, public_values, sp1_version: self.version().to_string(), @@ -83,11 +87,11 @@ impl Prover for CpuProver { if kind == SP1ProofKind::Plonk { let plonk_bn254_aritfacts = if sp1_prover::build::sp1_dev_mode() { sp1_prover::build::try_build_plonk_bn254_artifacts_dev( - self.prover.wrap_vk(), + &outer_proof.vk, &outer_proof.proof, ) } else { - try_install_circuit_artifacts() + try_install_circuit_artifacts("plonk") }; let proof = self.prover.wrap_plonk_bn254(outer_proof, &plonk_bn254_aritfacts); @@ -100,11 +104,11 @@ impl Prover for CpuProver { } else if kind == SP1ProofKind::Groth16 { let groth16_bn254_artifacts = if sp1_prover::build::sp1_dev_mode() { sp1_prover::build::try_build_groth16_bn254_artifacts_dev( - self.prover.wrap_vk(), + &outer_proof.vk, &outer_proof.proof, ) } else { - try_install_circuit_artifacts() + try_install_circuit_artifacts("groth16") }; let proof = self.prover.wrap_groth16_bn254(outer_proof, &groth16_bn254_artifacts); diff --git a/crates/sdk/src/provers/cuda.rs b/crates/sdk/src/provers/cuda.rs index 9475cab8fe..7b14134ed6 100644 --- a/crates/sdk/src/provers/cuda.rs +++ b/crates/sdk/src/provers/cuda.rs @@ -1,12 +1,13 @@ use anyhow::Result; +use sp1_core_machine::io::SP1Stdin; use sp1_cuda::SP1CudaProver; -use sp1_prover::{components::DefaultProverComponents, SP1Prover, SP1Stdin}; +use sp1_prover::{components::DefaultProverComponents, SP1Prover}; use super::ProverType; +use crate::install::try_install_circuit_artifacts; use crate::{ - provers::{try_install_circuit_artifacts, ProofOpts}, - Prover, SP1Context, SP1Proof, SP1ProofKind, SP1ProofWithPublicValues, SP1ProvingKey, - SP1VerifyingKey, + provers::ProofOpts, Prover, SP1Context, SP1Proof, SP1ProofKind, SP1ProofWithPublicValues, + SP1ProvingKey, SP1VerifyingKey, }; /// An implementation of [crate::ProverClient] that can generate proofs locally using CUDA. @@ -57,14 +58,15 @@ impl Prover for CudaProver { }); } - let deferred_proofs = stdin.proofs.iter().map(|p| p.0.clone()).collect(); + let deferred_proofs = + stdin.proofs.iter().map(|(reduce_proof, _)| reduce_proof.clone()).collect(); let public_values = proof.public_values.clone(); // Generate the compressed proof. let reduce_proof = self.cuda_prover.compress(&pk.vk, proof, deferred_proofs)?; if kind == SP1ProofKind::Compressed { return Ok(SP1ProofWithPublicValues { - proof: SP1Proof::Compressed(reduce_proof.proof), + proof: SP1Proof::Compressed(Box::new(reduce_proof)), stdin, public_values, sp1_version: self.version().to_string(), @@ -77,22 +79,39 @@ impl Prover for CudaProver { // Genenerate the wrap proof. let outer_proof = self.cuda_prover.wrap_bn254(compress_proof)?; - let plonk_bn254_aritfacts = if sp1_prover::build::sp1_dev_mode() { - sp1_prover::build::try_build_plonk_bn254_artifacts_dev( - self.prover.wrap_vk(), - &outer_proof.proof, - ) - } else { - try_install_circuit_artifacts() - }; - let proof = self.prover.wrap_plonk_bn254(outer_proof, &plonk_bn254_aritfacts); if kind == SP1ProofKind::Plonk { + let plonk_bn254_aritfacts = if sp1_prover::build::sp1_dev_mode() { + sp1_prover::build::try_build_plonk_bn254_artifacts_dev( + &outer_proof.vk, + &outer_proof.proof, + ) + } else { + try_install_circuit_artifacts("plonk") + }; + let proof = self.prover.wrap_plonk_bn254(outer_proof, &plonk_bn254_aritfacts); return Ok(SP1ProofWithPublicValues { proof: SP1Proof::Plonk(proof), stdin, public_values, sp1_version: self.version().to_string(), }); + } else if kind == SP1ProofKind::Groth16 { + let groth16_bn254_artifacts = if sp1_prover::build::sp1_dev_mode() { + sp1_prover::build::try_build_groth16_bn254_artifacts_dev( + &outer_proof.vk, + &outer_proof.proof, + ) + } else { + try_install_circuit_artifacts("groth16") + }; + + let proof = self.prover.wrap_groth16_bn254(outer_proof, &groth16_bn254_artifacts); + return Ok(SP1ProofWithPublicValues { + proof: SP1Proof::Groth16(proof), + stdin, + public_values, + sp1_version: self.version().to_string(), + }); } unreachable!() diff --git a/crates/sdk/src/provers/mock.rs b/crates/sdk/src/provers/mock.rs index b774cd005b..ca317972ac 100644 --- a/crates/sdk/src/provers/mock.rs +++ b/crates/sdk/src/provers/mock.rs @@ -1,8 +1,8 @@ #![allow(unused_variables)] use hashbrown::HashMap; -use sp1_core_executor::SP1Context; +use sp1_core_executor::{SP1Context, SP1ReduceProof}; use sp1_core_machine::io::SP1Stdin; -use sp1_stark::{ShardCommitment, ShardOpenedValues, ShardProof}; +use sp1_stark::{ShardCommitment, ShardOpenedValues, ShardProof, StarkVerifyingKey}; use crate::{ Prover, SP1Proof, SP1ProofKind, SP1ProofWithPublicValues, SP1ProvingKey, SP1VerificationError, @@ -66,26 +66,42 @@ impl Prover for MockProver { } SP1ProofKind::Compressed => { let (public_values, _) = self.prover.execute(&pk.elf, &stdin, context)?; - Ok(SP1ProofWithPublicValues { - proof: SP1Proof::Compressed(ShardProof { - commitment: ShardCommitment { - main_commit: [BabyBear::zero(); 8].into(), - permutation_commit: [BabyBear::zero(); 8].into(), - quotient_commit: [BabyBear::zero(); 8].into(), - }, - opened_values: ShardOpenedValues { chips: vec![] }, - opening_proof: TwoAdicFriPcsProof { - fri_proof: FriProof { - commit_phase_commits: vec![], - query_proofs: vec![], - final_poly: Default::default(), - pow_witness: BabyBear::zero(), - }, - query_openings: vec![], + + let shard_proof = ShardProof { + commitment: ShardCommitment { + global_main_commit: [BabyBear::zero(); 8].into(), + local_main_commit: [BabyBear::zero(); 8].into(), + permutation_commit: [BabyBear::zero(); 8].into(), + quotient_commit: [BabyBear::zero(); 8].into(), + }, + opened_values: ShardOpenedValues { chips: vec![] }, + opening_proof: TwoAdicFriPcsProof { + fri_proof: FriProof { + commit_phase_commits: vec![], + query_proofs: vec![], + final_poly: Default::default(), + pow_witness: BabyBear::zero(), }, - chip_ordering: HashMap::new(), - public_values: vec![], - }), + query_openings: vec![], + }, + chip_ordering: HashMap::new(), + public_values: vec![], + }; + + let reduce_vk = StarkVerifyingKey { + commit: [BabyBear::zero(); 8].into(), + pc_start: BabyBear::zero(), + chip_information: vec![], + chip_ordering: HashMap::new(), + }; + + let proof = SP1Proof::Compressed(Box::new(SP1ReduceProof { + vk: reduce_vk, + proof: shard_proof, + })); + + Ok(SP1ProofWithPublicValues { + proof, stdin, public_values, sp1_version: self.version().to_string(), diff --git a/crates/sdk/src/provers/mod.rs b/crates/sdk/src/provers/mod.rs index ecc62ff4ce..8e7859d9b2 100644 --- a/crates/sdk/src/provers/mod.rs +++ b/crates/sdk/src/provers/mod.rs @@ -18,15 +18,14 @@ use sp1_core_executor::SP1Context; use sp1_core_machine::{io::SP1Stdin, SP1_CIRCUIT_VERSION}; use sp1_prover::{ components::SP1ProverComponents, CoreSC, InnerSC, SP1CoreProofData, SP1Prover, SP1ProvingKey, - SP1ReduceProof, SP1VerifyingKey, + SP1VerifyingKey, }; use sp1_stark::{air::PublicValues, MachineVerificationError, SP1ProverOpts, Word}; use strum_macros::EnumString; use thiserror::Error; -use crate::{ - install::try_install_circuit_artifacts, SP1Proof, SP1ProofKind, SP1ProofWithPublicValues, -}; +use crate::install::try_install_circuit_artifacts; +use crate::{SP1Proof, SP1ProofKind, SP1ProofWithPublicValues}; /// The type of prover. #[derive(Debug, PartialEq, EnumString)] @@ -122,7 +121,7 @@ pub trait Prover: Send + Sync { } SP1Proof::Compressed(proof) => { let public_values: &PublicValues, _> = - proof.public_values.as_slice().borrow(); + proof.proof.public_values.as_slice().borrow(); // Get the commited value digest bytes. let commited_value_digest_bytes = public_values @@ -140,7 +139,7 @@ pub trait Prover: Send + Sync { } self.sp1_prover() - .verify_compressed(&SP1ReduceProof { proof: proof.clone() }, vkey) + .verify_compressed(proof, vkey) .map_err(SP1VerificationError::Recursion) } SP1Proof::Plonk(proof) => self @@ -152,7 +151,7 @@ pub trait Prover: Send + Sync { &if sp1_prover::build::sp1_dev_mode() { sp1_prover::build::plonk_bn254_artifacts_dev_dir() } else { - try_install_circuit_artifacts() + try_install_circuit_artifacts("plonk") }, ) .map_err(SP1VerificationError::Plonk), @@ -165,7 +164,7 @@ pub trait Prover: Send + Sync { &if sp1_prover::build::sp1_dev_mode() { sp1_prover::build::groth16_bn254_artifacts_dev_dir() } else { - try_install_circuit_artifacts() + try_install_circuit_artifacts("groth16") }, ) .map_err(SP1VerificationError::Groth16), diff --git a/crates/stark/Cargo.toml b/crates/stark/Cargo.toml index 0d5e773441..6865e1f7d2 100644 --- a/crates/stark/Cargo.toml +++ b/crates/stark/Cargo.toml @@ -37,9 +37,12 @@ itertools = "0.13.0" tracing = "0.1.40" rayon-scan = "0.1.1" arrayref = "0.3.8" +strum = "0.26.3" +strum_macros = "0.26.4" getrandom = { version = "0.2.15", features = ["custom"] } sysinfo = "0.30.13" - +num-traits = "0.2.19" +thiserror = "1.0.64" [dev-dependencies] sp1-zkvm = { workspace = true } diff --git a/crates/stark/src/air/builder.rs b/crates/stark/src/air/builder.rs index 16d41e1c54..c64f1677cd 100644 --- a/crates/stark/src/air/builder.rs +++ b/crates/stark/src/air/builder.rs @@ -6,26 +6,50 @@ use p3_field::{AbstractField, Field}; use p3_uni_stark::{ ProverConstraintFolder, StarkGenericConfig, SymbolicAirBuilder, VerifierConstraintFolder, }; +use serde::{Deserialize, Serialize}; +use strum_macros::{Display, EnumIter}; use super::{interaction::AirInteraction, BinomialExtension}; use crate::{lookup::InteractionKind, Word}; +/// The scope of an interaction. +#[derive( + Debug, + Clone, + Copy, + PartialEq, + Eq, + Hash, + Display, + EnumIter, + PartialOrd, + Ord, + Serialize, + Deserialize, +)] +pub enum InteractionScope { + /// Global scope. + Global = 0, + /// Local scope. + Local, +} + /// A builder that can send and receive messages (or interactions) with other AIRs. pub trait MessageBuilder { /// Sends a message. - fn send(&mut self, message: M); + fn send(&mut self, message: M, scope: InteractionScope); /// Receives a message. - fn receive(&mut self, message: M); + fn receive(&mut self, message: M, scope: InteractionScope); } /// A message builder for which sending and receiving messages is a no-op. pub trait EmptyMessageBuilder: AirBuilder {} impl MessageBuilder for AB { - fn send(&mut self, _message: M) {} + fn send(&mut self, _message: M, _scope: InteractionScope) {} - fn receive(&mut self, _message: M) {} + fn receive(&mut self, _message: M, _scope: InteractionScope) {} } /// A trait which contains basic methods for building an AIR. @@ -90,11 +114,9 @@ pub trait ByteAirBuilder: BaseAirBuilder { a: impl Into, b: impl Into, c: impl Into, - shard: impl Into, - channel: impl Into, multiplicity: impl Into, ) { - self.send_byte_pair(opcode, a, Self::Expr::zero(), b, c, shard, channel, multiplicity); + self.send_byte_pair(opcode, a, Self::Expr::zero(), b, c, multiplicity); } /// Sends a byte operation with two outputs to be processed. @@ -106,23 +128,16 @@ pub trait ByteAirBuilder: BaseAirBuilder { a2: impl Into, b: impl Into, c: impl Into, - shard: impl Into, - channel: impl Into, multiplicity: impl Into, ) { - self.send(AirInteraction::new( - vec![ - opcode.into(), - a1.into(), - a2.into(), - b.into(), - c.into(), - shard.into(), - channel.into(), - ], - multiplicity.into(), - InteractionKind::Byte, - )); + self.send( + AirInteraction::new( + vec![opcode.into(), a1.into(), a2.into(), b.into(), c.into()], + multiplicity.into(), + InteractionKind::Byte, + ), + InteractionScope::Local, + ); } /// Receives a byte operation to be processed. @@ -133,11 +148,9 @@ pub trait ByteAirBuilder: BaseAirBuilder { a: impl Into, b: impl Into, c: impl Into, - shard: impl Into, - channel: impl Into, multiplicity: impl Into, ) { - self.receive_byte_pair(opcode, a, Self::Expr::zero(), b, c, shard, channel, multiplicity); + self.receive_byte_pair(opcode, a, Self::Expr::zero(), b, c, multiplicity); } /// Receives a byte operation with two outputs to be processed. @@ -149,23 +162,16 @@ pub trait ByteAirBuilder: BaseAirBuilder { a2: impl Into, b: impl Into, c: impl Into, - shard: impl Into, - channel: impl Into, multiplicity: impl Into, ) { - self.receive(AirInteraction::new( - vec![ - opcode.into(), - a1.into(), - a2.into(), - b.into(), - c.into(), - shard.into(), - channel.into(), - ], - multiplicity.into(), - InteractionKind::Byte, - )); + self.receive( + AirInteraction::new( + vec![opcode.into(), a1.into(), a2.into(), b.into(), c.into()], + multiplicity.into(), + InteractionKind::Byte, + ), + InteractionScope::Local, + ); } } @@ -180,7 +186,6 @@ pub trait AluAirBuilder: BaseAirBuilder { b: Word>, c: Word>, shard: impl Into, - channel: impl Into, nonce: impl Into, multiplicity: impl Into, ) { @@ -189,11 +194,13 @@ pub trait AluAirBuilder: BaseAirBuilder { .chain(b.0.into_iter().map(Into::into)) .chain(c.0.into_iter().map(Into::into)) .chain(once(shard.into())) - .chain(once(channel.into())) .chain(once(nonce.into())) .collect(); - self.send(AirInteraction::new(values, multiplicity.into(), InteractionKind::Alu)); + self.send( + AirInteraction::new(values, multiplicity.into(), InteractionKind::Alu), + InteractionScope::Local, + ); } /// Receives an ALU operation to be processed. @@ -205,7 +212,6 @@ pub trait AluAirBuilder: BaseAirBuilder { b: Word>, c: Word>, shard: impl Into, - channel: impl Into, nonce: impl Into, multiplicity: impl Into, ) { @@ -214,11 +220,13 @@ pub trait AluAirBuilder: BaseAirBuilder { .chain(b.0.into_iter().map(Into::into)) .chain(c.0.into_iter().map(Into::into)) .chain(once(shard.into())) - .chain(once(channel.into())) .chain(once(nonce.into())) .collect(); - self.receive(AirInteraction::new(values, multiplicity.into(), InteractionKind::Alu)); + self.receive( + AirInteraction::new(values, multiplicity.into(), InteractionKind::Alu), + InteractionScope::Local, + ); } /// Sends an syscall operation to be processed (with "ECALL" opcode). @@ -226,27 +234,29 @@ pub trait AluAirBuilder: BaseAirBuilder { fn send_syscall( &mut self, shard: impl Into + Clone, - channel: impl Into + Clone, clk: impl Into + Clone, nonce: impl Into + Clone, syscall_id: impl Into + Clone, arg1: impl Into + Clone, arg2: impl Into + Clone, multiplicity: impl Into, + scope: InteractionScope, ) { - self.send(AirInteraction::new( - vec![ - shard.clone().into(), - channel.clone().into(), - clk.clone().into(), - nonce.clone().into(), - syscall_id.clone().into(), - arg1.clone().into(), - arg2.clone().into(), - ], - multiplicity.into(), - InteractionKind::Syscall, - )); + self.send( + AirInteraction::new( + vec![ + shard.clone().into(), + clk.clone().into(), + nonce.clone().into(), + syscall_id.clone().into(), + arg1.clone().into(), + arg2.clone().into(), + ], + multiplicity.into(), + InteractionKind::Syscall, + ), + scope, + ); } /// Receives a syscall operation to be processed. @@ -254,27 +264,29 @@ pub trait AluAirBuilder: BaseAirBuilder { fn receive_syscall( &mut self, shard: impl Into + Clone, - channel: impl Into + Clone, clk: impl Into + Clone, nonce: impl Into + Clone, syscall_id: impl Into + Clone, arg1: impl Into + Clone, arg2: impl Into + Clone, multiplicity: impl Into, + scope: InteractionScope, ) { - self.receive(AirInteraction::new( - vec![ - shard.clone().into(), - channel.clone().into(), - clk.clone().into(), - nonce.clone().into(), - syscall_id.clone().into(), - arg1.clone().into(), - arg2.clone().into(), - ], - multiplicity.into(), - InteractionKind::Syscall, - )); + self.receive( + AirInteraction::new( + vec![ + shard.clone().into(), + clk.clone().into(), + nonce.clone().into(), + syscall_id.clone().into(), + arg1.clone().into(), + arg2.clone().into(), + ], + multiplicity.into(), + InteractionKind::Syscall, + ), + scope, + ); } } @@ -317,12 +329,12 @@ pub trait ExtensionAirBuilder: BaseAirBuilder { } /// A builder that implements a permutation argument. -pub trait MultiTableAirBuilder: PermutationAirBuilder { +pub trait MultiTableAirBuilder<'a>: PermutationAirBuilder { /// The type of the cumulative sum. - type Sum: Into; + type Sum: Into + Copy; /// Returns the cumulative sum of the permutation. - fn cumulative_sum(&self) -> Self::Sum; + fn cumulative_sums(&self) -> &'a [Self::Sum]; } /// A trait that contains the common helper methods for building `SP1 recursion` and SP1 machine @@ -336,12 +348,12 @@ pub trait MachineAirBuilder: pub trait SP1AirBuilder: MachineAirBuilder + ByteAirBuilder + AluAirBuilder {} impl<'a, AB: AirBuilder + MessageBuilder, M> MessageBuilder for FilteredAirBuilder<'a, AB> { - fn send(&mut self, message: M) { - self.inner.send(message); + fn send(&mut self, message: M, scope: InteractionScope) { + self.inner.send(message, scope); } - fn receive(&mut self, message: M) { - self.inner.receive(message); + fn receive(&mut self, message: M, scope: InteractionScope) { + self.inner.receive(message, scope); } } diff --git a/crates/stark/src/air/machine.rs b/crates/stark/src/air/machine.rs index f80a1dadad..4973c4769c 100644 --- a/crates/stark/src/air/machine.rs +++ b/crates/stark/src/air/machine.rs @@ -6,6 +6,8 @@ use crate::MachineRecord; pub use sp1_derive::MachineAir; +use super::InteractionScope; + /// An AIR that is part of a multi table AIR arithmetization. pub trait MachineAir: BaseAir + 'static + Send + Sync { /// The execution record containing events for producing the air trace. @@ -41,6 +43,11 @@ pub trait MachineAir: BaseAir + 'static + Send + Sync { fn generate_preprocessed_trace(&self, _program: &Self::Program) -> Option> { None } + + /// Specifies whether it's trace should be part of either the global or local commit. + fn commit_scope(&self) -> InteractionScope { + InteractionScope::Local + } } /// A program that defines the control flow of a machine through a program counter. diff --git a/crates/stark/src/air/public_values.rs b/crates/stark/src/air/public_values.rs index dd3965001d..843cf6ba6a 100644 --- a/crates/stark/src/air/public_values.rs +++ b/crates/stark/src/air/public_values.rs @@ -54,6 +54,9 @@ pub struct PublicValues { /// The bits of the largest address that is witnessed for finalization in the current shard. pub last_finalize_addr_bits: [T; 32], + + /// This field is here to ensure that the size of the public values struct is a multiple of 8. + pub empty: [T; 3], } impl PublicValues { @@ -133,6 +136,7 @@ impl From> for PublicValues, F> last_init_addr_bits, previous_finalize_addr_bits, last_finalize_addr_bits, + .. } = value; let committed_value_digest: [_; PV_DIGEST_NUM_WORDS] = @@ -163,6 +167,7 @@ impl From> for PublicValues, F> last_init_addr_bits, previous_finalize_addr_bits, last_finalize_addr_bits, + empty: [F::zero(), F::zero(), F::zero()], } } } diff --git a/crates/stark/src/air/sub_builder.rs b/crates/stark/src/air/sub_builder.rs index 82b154cea5..b0b640d29a 100644 --- a/crates/stark/src/air/sub_builder.rs +++ b/crates/stark/src/air/sub_builder.rs @@ -23,7 +23,10 @@ impl, T: Send + Sync> SubMatrixRowSlices { /// Implement `Matrix` for `SubMatrixRowSlices`. impl, T: Send + Sync> Matrix for SubMatrixRowSlices { - type Row<'a> = Skip>> where Self: 'a; + type Row<'a> + = Skip>> + where + Self: 'a; #[inline] fn row(&self, r: usize) -> Self::Row<'_> { diff --git a/crates/stark/src/bb31_poseidon2.rs b/crates/stark/src/bb31_poseidon2.rs index 2c4dd2f7a4..8c610f54e8 100644 --- a/crates/stark/src/bb31_poseidon2.rs +++ b/crates/stark/src/bb31_poseidon2.rs @@ -1,11 +1,11 @@ #![allow(missing_docs)] -use crate::StarkGenericConfig; +use crate::{Com, StarkGenericConfig, ZeroCommitment}; use p3_baby_bear::{BabyBear, DiffusionMatrixBabyBear}; use p3_challenger::DuplexChallenger; use p3_commit::ExtensionMmcs; use p3_dft::Radix2DitParallel; -use p3_field::{extension::BinomialExtensionField, Field}; +use p3_field::{extension::BinomialExtensionField, AbstractField, Field}; use p3_fri::{ BatchOpening, CommitPhaseProofStep, FriConfig, FriProof, QueryProof, TwoAdicFriPcs, TwoAdicFriPcsProof, @@ -23,7 +23,7 @@ pub type InnerVal = BabyBear; pub type InnerChallenge = BinomialExtensionField; pub type InnerPerm = Poseidon2; -pub type InnerHash = PaddingFreeSponge; +pub type InnerHash = PaddingFreeSponge; pub type InnerDigestHash = Hash; pub type InnerDigest = [InnerVal; DIGEST_SIZE]; pub type InnerCompress = TruncatedPermutation; @@ -144,27 +144,34 @@ impl StarkGenericConfig for BabyBearPoseidon2Inner { } } +impl ZeroCommitment for InnerPcs { + fn zero_commitment(&self) -> Com { + InnerDigestHash::from([InnerVal::zero(); DIGEST_SIZE]) + } +} + pub mod baby_bear_poseidon2 { use p3_baby_bear::{BabyBear, DiffusionMatrixBabyBear}; use p3_challenger::DuplexChallenger; use p3_commit::ExtensionMmcs; use p3_dft::Radix2DitParallel; - use p3_field::{extension::BinomialExtensionField, Field}; + use p3_field::{extension::BinomialExtensionField, AbstractField, Field}; use p3_fri::{FriConfig, TwoAdicFriPcs}; use p3_merkle_tree::FieldMerkleTreeMmcs; use p3_poseidon2::{Poseidon2, Poseidon2ExternalMatrixGeneral}; - use p3_symmetric::{PaddingFreeSponge, TruncatedPermutation}; + use p3_symmetric::{Hash, PaddingFreeSponge, TruncatedPermutation}; use serde::{Deserialize, Serialize}; use sp1_primitives::RC_16_30; - use crate::StarkGenericConfig; + use crate::{Com, StarkGenericConfig, ZeroCommitment, DIGEST_SIZE}; pub type Val = BabyBear; pub type Challenge = BinomialExtensionField; pub type Perm = Poseidon2; - pub type MyHash = PaddingFreeSponge; + pub type MyHash = PaddingFreeSponge; + pub type DigestHash = Hash; pub type MyCompress = TruncatedPermutation; pub type ValMmcs = FieldMerkleTreeMmcs< ::Packing, @@ -215,6 +222,19 @@ pub mod baby_bear_poseidon2 { #[must_use] pub fn compressed_fri_config() -> FriConfig { + let perm = my_perm(); + let hash = MyHash::new(perm.clone()); + let compress = MyCompress::new(perm.clone()); + let challenge_mmcs = ChallengeMmcs::new(ValMmcs::new(hash, compress)); + let num_queries = match std::env::var("FRI_QUERIES") { + Ok(value) => value.parse().unwrap(), + Err(_) => 50, + }; + FriConfig { log_blowup: 2, num_queries, proof_of_work_bits: 16, mmcs: challenge_mmcs } + } + + #[must_use] + pub fn ultra_compressed_fri_config() -> FriConfig { let perm = my_perm(); let hash = MyHash::new(perm.clone()); let compress = MyCompress::new(perm.clone()); @@ -263,6 +283,18 @@ pub mod baby_bear_poseidon2 { let pcs = Pcs::new(27, dft, val_mmcs, fri_config); Self { pcs, perm, config_type: BabyBearPoseidon2Type::Compressed } } + + #[must_use] + pub fn ultra_compressed() -> Self { + let perm = my_perm(); + let hash = MyHash::new(perm.clone()); + let compress = MyCompress::new(perm.clone()); + let val_mmcs = ValMmcs::new(hash, compress); + let dft = Dft {}; + let fri_config = ultra_compressed_fri_config(); + let pcs = Pcs::new(27, dft, val_mmcs, fri_config); + Self { pcs, perm, config_type: BabyBearPoseidon2Type::Compressed } + } } impl Clone for BabyBearPoseidon2 { @@ -311,4 +343,10 @@ pub mod baby_bear_poseidon2 { Challenger::new(self.perm.clone()) } } + + impl ZeroCommitment for Pcs { + fn zero_commitment(&self) -> Com { + DigestHash::from([Val::zero(); DIGEST_SIZE]) + } + } } diff --git a/crates/stark/src/chip.rs b/crates/stark/src/chip.rs index 5d58986c4a..9ba6651565 100644 --- a/crates/stark/src/chip.rs +++ b/crates/stark/src/chip.rs @@ -12,8 +12,7 @@ use crate::{ }; use super::{ - eval_permutation_constraints, generate_permutation_trace, permutation_trace_width, - PROOF_MAX_NUM_PVS, + eval_permutation_constraints, generate_permutation_trace, get_grouped_maps, PROOF_MAX_NUM_PVS, }; /// An Air that encodes lookups based on interactions. @@ -43,6 +42,11 @@ impl Chip { pub const fn log_quotient_degree(&self) -> usize { self.log_quotient_degree } + + /// Consumes the chip and returns the underlying air. + pub fn into_inner(self) -> A { + self.air + } } impl> Chip { @@ -91,6 +95,12 @@ where self.sends.len() + self.receives.len() } + /// Returns the number of sent byte lookups in the chip. + #[inline] + pub fn num_sent_byte_lookups(&self) -> usize { + self.sends.iter().filter(|i| i.kind == InteractionKind::Byte).count() + } + /// Returns the number of sends of the given kind. #[inline] pub fn num_sends_by_kind(&self, kind: InteractionKind) -> usize { @@ -109,9 +119,10 @@ where preprocessed: Option<&RowMajorMatrix>, main: &RowMajorMatrix, random_elements: &[EF], - ) -> RowMajorMatrix + ) -> (RowMajorMatrix, EF, EF) where F: PrimeField, + A: MachineAir, { let batch_size = self.logup_batch_size(); generate_permutation_trace( @@ -127,7 +138,10 @@ where /// Returns the width of the permutation trace. #[inline] pub fn permutation_width(&self) -> usize { - permutation_trace_width(self.sends().len() + self.receives().len(), self.logup_batch_size()) + let (_, _, grouped_widths) = + get_grouped_maps(self.sends(), self.receives(), self.logup_batch_size()); + + grouped_widths.values().sum() } /// Returns the cost of a row in the chip. @@ -195,14 +209,18 @@ where fn included(&self, shard: &Self::Record) -> bool { self.air.included(shard) } + + fn commit_scope(&self) -> crate::air::InteractionScope { + self.air.commit_scope() + } } // Implement AIR directly on Chip, evaluating both execution and permutation constraints. -impl Air for Chip +impl<'a, F, A, AB> Air for Chip where F: Field, A: Air, - AB: SP1AirBuilder + MultiTableAirBuilder + PairBuilder, + AB: SP1AirBuilder + MultiTableAirBuilder<'a> + PairBuilder + 'a, { fn eval(&self, builder: &mut AB) { // Evaluate the execution trace constraints. diff --git a/crates/stark/src/config.rs b/crates/stark/src/config.rs index 59df5f0165..b7da776128 100644 --- a/crates/stark/src/config.rs +++ b/crates/stark/src/config.rs @@ -54,7 +54,9 @@ pub trait StarkGenericConfig: 'static + Send + Sync + Serialize + DeserializeOwn type Domain: PolynomialSpace + Sync; /// The PCS used to commit to trace polynomials. - type Pcs: Pcs + Sync; + type Pcs: Pcs + + Sync + + ZeroCommitment; /// The field from which most random challenges are drawn. type Challenge: ExtensionField; @@ -62,7 +64,9 @@ pub trait StarkGenericConfig: 'static + Send + Sync + Serialize + DeserializeOwn /// The challenger (Fiat-Shamir) implementation used. type Challenger: FieldChallenger> + CanObserve<>::Commitment> - + CanSample; + + CanSample + + Serialize + + DeserializeOwned; /// Get the PCS used by this configuration. fn pcs(&self) -> &Self::Pcs; @@ -71,6 +75,10 @@ pub trait StarkGenericConfig: 'static + Send + Sync + Serialize + DeserializeOwn fn challenger(&self) -> Self::Challenger; } +pub trait ZeroCommitment { + fn zero_commitment(&self) -> Com; +} + pub struct UniConfig(pub SC); impl p3_uni_stark::StarkGenericConfig for UniConfig { diff --git a/crates/stark/src/debug.rs b/crates/stark/src/debug.rs index 2e1ccd9d3d..78a7d0c95c 100644 --- a/crates/stark/src/debug.rs +++ b/crates/stark/src/debug.rs @@ -14,6 +14,8 @@ use p3_matrix::{ stack::VerticalPair, Matrix, }; +use p3_maybe_rayon::prelude::ParallelBridge; +use p3_maybe_rayon::prelude::ParallelIterator; use super::{MachineChip, StarkGenericConfig, Val}; use crate::air::{EmptyMessageBuilder, MachineAir, MultiTableAirBuilder}; @@ -21,14 +23,15 @@ use crate::air::{EmptyMessageBuilder, MachineAir, MultiTableAirBuilder}; /// Checks that the constraints of the given AIR are satisfied, including the permutation trace. /// /// Note that this does not actually verify the proof. -#[allow(clippy::needless_pass_by_value)] +#[allow(clippy::too_many_arguments)] pub fn debug_constraints( chip: &MachineChip, preprocessed: Option<&RowMajorMatrix>>, main: &RowMajorMatrix>, perm: &RowMajorMatrix, perm_challenges: &[SC::Challenge], - public_values: Vec>, + public_values: &[Val], + cumulative_sums: &[SC::Challenge], ) where SC: StarkGenericConfig, Val: PrimeField32, @@ -40,10 +43,8 @@ pub fn debug_constraints( return; } - let cumulative_sum = perm.row_slice(perm.height() - 1).last().copied().unwrap(); - // Check that constraints are satisfied. - (0..height).for_each(|i| { + (0..height).par_bridge().for_each(|i| { let i_next = (i + 1) % height; let main_local = main.row_slice(i); @@ -69,7 +70,6 @@ pub fn debug_constraints( let perm_next = perm.row_slice(i_next); let perm_next = &(*perm_next); - let public_values = public_values.clone(); let mut builder = DebugConstraintBuilder { preprocessed: VerticalPair::new( RowMajorMatrixView::new_row(&preprocessed_local), @@ -84,11 +84,11 @@ pub fn debug_constraints( RowMajorMatrixView::new_row(perm_next), ), perm_challenges, - cumulative_sum, + cumulative_sums, is_first_row: Val::::zero(), is_last_row: Val::::zero(), is_transition: Val::::one(), - public_values: &public_values, + public_values, }; if i == 0 { builder.is_first_row = Val::::one(); @@ -130,7 +130,7 @@ pub struct DebugConstraintBuilder<'a, F: Field, EF: ExtensionField> { pub(crate) preprocessed: VerticalPair, RowMajorMatrixView<'a, F>>, pub(crate) main: VerticalPair, RowMajorMatrixView<'a, F>>, pub(crate) perm: VerticalPair, RowMajorMatrixView<'a, EF>>, - pub(crate) cumulative_sum: EF, + pub(crate) cumulative_sums: &'a [EF], pub(crate) perm_challenges: &'a [EF], pub(crate) is_first_row: F, pub(crate) is_last_row: F, @@ -252,15 +252,15 @@ where } } -impl<'a, F, EF> MultiTableAirBuilder for DebugConstraintBuilder<'a, F, EF> +impl<'a, F, EF> MultiTableAirBuilder<'a> for DebugConstraintBuilder<'a, F, EF> where F: Field, EF: ExtensionField, { type Sum = EF; - fn cumulative_sum(&self) -> Self::Sum { - self.cumulative_sum + fn cumulative_sums(&self) -> &'a [Self::Sum] { + self.cumulative_sums } } diff --git a/crates/stark/src/folder.rs b/crates/stark/src/folder.rs index a99e06c31b..4666e2e94c 100644 --- a/crates/stark/src/folder.rs +++ b/crates/stark/src/folder.rs @@ -27,8 +27,8 @@ pub struct ProverConstraintFolder<'a, SC: StarkGenericConfig> { >, /// The challenges for the permutation. pub perm_challenges: &'a [PackedChallenge], - /// The cumulative sum of the permutation. - pub cumulative_sum: SC::Challenge, + /// The cumulative sums for the permutation. + pub cumulative_sums: &'a [PackedChallenge], /// The selector for the first row. pub is_first_row: PackedVal, /// The selector for the last row. @@ -111,11 +111,11 @@ impl<'a, SC: StarkGenericConfig> PermutationAirBuilder for ProverConstraintFolde } } -impl<'a, SC: StarkGenericConfig> MultiTableAirBuilder for ProverConstraintFolder<'a, SC> { +impl<'a, SC: StarkGenericConfig> MultiTableAirBuilder<'a> for ProverConstraintFolder<'a, SC> { type Sum = PackedChallenge; - fn cumulative_sum(&self) -> Self::Sum { - PackedChallenge::::from_f(self.cumulative_sum) + fn cumulative_sums(&self) -> &'a [Self::Sum] { + self.cumulative_sums } } @@ -155,8 +155,8 @@ pub struct GenericVerifierConstraintFolder<'a, F, EF, PubVar, Var, Expr> { pub perm: VerticalPair, RowMajorMatrixView<'a, Var>>, /// The challenges for the permutation. pub perm_challenges: &'a [Var], - /// The cumulative sum of the permutation. - pub cumulative_sum: Var, + /// The cumulative sums of the permutation. + pub cumulative_sums: &'a [Var], /// The selector for the first row. pub is_first_row: Var, /// The selector for the last row. @@ -316,7 +316,7 @@ where } } -impl<'a, F, EF, PubVar, Var, Expr> MultiTableAirBuilder +impl<'a, F, EF, PubVar, Var, Expr> MultiTableAirBuilder<'a> for GenericVerifierConstraintFolder<'a, F, EF, PubVar, Var, Expr> where F: Field, @@ -347,8 +347,8 @@ where { type Sum = Var; - fn cumulative_sum(&self) -> Self::Sum { - self.cumulative_sum + fn cumulative_sums(&self) -> &'a [Self::Sum] { + self.cumulative_sums } } diff --git a/crates/stark/src/lookup/builder.rs b/crates/stark/src/lookup/builder.rs index df0fa8a75b..153d660483 100644 --- a/crates/stark/src/lookup/builder.rs +++ b/crates/stark/src/lookup/builder.rs @@ -4,7 +4,7 @@ use p3_matrix::dense::RowMajorMatrix; use p3_uni_stark::{Entry, SymbolicExpression, SymbolicVariable}; use crate::{ - air::{AirInteraction, MessageBuilder}, + air::{AirInteraction, InteractionScope, MessageBuilder}, PROOF_MAX_NUM_PVS, }; @@ -93,22 +93,22 @@ impl PairBuilder for InteractionBuilder { } impl MessageBuilder>> for InteractionBuilder { - fn send(&mut self, message: AirInteraction>) { + fn send(&mut self, message: AirInteraction>, scope: InteractionScope) { let values = message.values.into_iter().map(|v| symbolic_to_virtual_pair(&v)).collect::>(); let multiplicity = symbolic_to_virtual_pair(&message.multiplicity); - self.sends.push(Interaction::new(values, multiplicity, message.kind)); + self.sends.push(Interaction::new(values, multiplicity, message.kind, scope)); } - fn receive(&mut self, message: AirInteraction>) { + fn receive(&mut self, message: AirInteraction>, scope: InteractionScope) { let values = message.values.into_iter().map(|v| symbolic_to_virtual_pair(&v)).collect::>(); let multiplicity = symbolic_to_virtual_pair(&message.multiplicity); - self.receives.push(Interaction::new(values, multiplicity, message.kind)); + self.receives.push(Interaction::new(values, multiplicity, message.kind, scope)); } } @@ -240,18 +240,27 @@ mod tests { let y = local[1]; let z = local[2]; - builder.send(AirInteraction::new( - vec![x.into(), y.into()], - AB::F::from_canonical_u32(3).into(), - InteractionKind::Alu, - )); - builder.send(AirInteraction::new( - vec![x + y, z.into()], - AB::F::from_canonical_u32(5).into(), - InteractionKind::Alu, - )); - - builder.receive(AirInteraction::new(vec![x.into()], y.into(), InteractionKind::Byte)); + builder.send( + AirInteraction::new( + vec![x.into(), y.into()], + AB::F::from_canonical_u32(3).into(), + InteractionKind::Alu, + ), + InteractionScope::Local, + ); + builder.send( + AirInteraction::new( + vec![x + y, z.into()], + AB::F::from_canonical_u32(5).into(), + InteractionKind::Alu, + ), + InteractionScope::Local, + ); + + builder.receive( + AirInteraction::new(vec![x.into()], y.into(), InteractionKind::Byte), + InteractionScope::Local, + ); } } diff --git a/crates/stark/src/lookup/debug.rs b/crates/stark/src/lookup/debug.rs index b555cb0471..8d051d8f82 100644 --- a/crates/stark/src/lookup/debug.rs +++ b/crates/stark/src/lookup/debug.rs @@ -5,7 +5,10 @@ use p3_field::{AbstractField, Field, PrimeField32, PrimeField64}; use p3_matrix::Matrix; use super::InteractionKind; -use crate::{air::MachineAir, MachineChip, StarkGenericConfig, StarkMachine, StarkProvingKey, Val}; +use crate::{ + air::{InteractionScope, MachineAir}, + MachineChip, StarkGenericConfig, StarkMachine, StarkProvingKey, Val, +}; /// The data for an interaction. #[derive(Debug)] @@ -61,6 +64,7 @@ pub fn debug_interactions>>( pkey: &StarkProvingKey, record: &A::Record, interaction_kinds: Vec, + scope: InteractionScope, ) -> (BTreeMap>>>, BTreeMap>) { let mut key_to_vec_data = BTreeMap::new(); let mut key_to_count = BTreeMap::new(); @@ -72,9 +76,12 @@ pub fn debug_interactions>>( let mut main = trace.clone(); let height = trace.clone().height(); - let nb_send_interactions = chip.sends().len(); + let sends = chip.sends().iter().filter(|s| s.scope == scope).collect::>(); + let receives = chip.receives().iter().filter(|r| r.scope == scope).collect::>(); + + let nb_send_interactions = sends.len(); for row in 0..height { - for (m, interaction) in chip.sends().iter().chain(chip.receives().iter()).enumerate() { + for (m, interaction) in sends.iter().chain(receives.iter()).enumerate() { if !interaction_kinds.contains(&interaction.kind) { continue; } @@ -94,7 +101,12 @@ pub fn debug_interactions>>( let expr: Val = value.apply(preprocessed_row, main.row_mut(row)); values.push(expr); } - let key = format!("{} {}", &interaction.kind.to_string(), vec_to_string(values)); + let key = format!( + "{} {} {}", + &interaction.scope.to_string(), + &interaction.kind.to_string(), + vec_to_string(values) + ); key_to_vec_data.entry(key.clone()).or_insert_with(Vec::new).push(InteractionData { chip_name: chip.name(), kind: interaction.kind, @@ -124,12 +136,17 @@ pub fn debug_interactions_with_all_chips( pkey: &StarkProvingKey, shards: &[A::Record], interaction_kinds: Vec, + scope: InteractionScope, ) -> bool where SC: StarkGenericConfig, SC::Val: PrimeField32, A: MachineAir, { + if scope == InteractionScope::Local { + assert!(shards.len() == 1); + } + let mut final_map = BTreeMap::new(); let mut total = SC::Val::zero(); @@ -138,7 +155,7 @@ where let mut total_events = 0; for shard in shards { let (_, count) = - debug_interactions::(chip, pkey, shard, interaction_kinds.clone()); + debug_interactions::(chip, pkey, shard, interaction_kinds.clone(), scope); total_events += count.len(); for (key, value) in count.iter() { let entry = diff --git a/crates/stark/src/lookup/interaction.rs b/crates/stark/src/lookup/interaction.rs index 04538aa179..0ff89ab592 100644 --- a/crates/stark/src/lookup/interaction.rs +++ b/crates/stark/src/lookup/interaction.rs @@ -3,7 +3,10 @@ use core::fmt::{Debug, Display}; use p3_air::VirtualPairCol; use p3_field::Field; +use crate::air::InteractionScope; + /// An interaction for a lookup or a permutation argument. +#[derive(Clone)] pub struct Interaction { /// The values of the interaction. pub values: Vec>, @@ -11,6 +14,8 @@ pub struct Interaction { pub multiplicity: VirtualPairCol, /// The kind of interaction. pub kind: InteractionKind, + /// The scope of the interaction. + pub scope: InteractionScope, } /// The type of interaction for a lookup argument. @@ -64,8 +69,9 @@ impl Interaction { values: Vec>, multiplicity: VirtualPairCol, kind: InteractionKind, + scope: InteractionScope, ) -> Self { - Self { values, multiplicity, kind } + Self { values, multiplicity, kind, scope } } /// The index of the argument in the lookup table. @@ -76,7 +82,10 @@ impl Interaction { impl Debug for Interaction { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("Interaction").field("kind", &self.kind).finish_non_exhaustive() + f.debug_struct("Interaction") + .field("kind", &self.kind) + .field("scope", &self.scope) + .finish_non_exhaustive() } } diff --git a/crates/stark/src/machine.rs b/crates/stark/src/machine.rs index 882bd2a04d..49e0c52a31 100644 --- a/crates/stark/src/machine.rs +++ b/crates/stark/src/machine.rs @@ -7,12 +7,12 @@ use p3_field::{AbstractExtensionField, AbstractField, Field, PrimeField32}; use p3_matrix::{dense::RowMajorMatrix, Dimensions, Matrix}; use p3_maybe_rayon::prelude::*; use serde::{de::DeserializeOwned, Deserialize, Serialize}; -use std::{cmp::Reverse, fmt::Debug, time::Instant}; +use std::{array, cmp::Reverse, env, fmt::Debug, time::Instant}; use tracing::instrument; use super::{debug_constraints, Dom}; use crate::{ - air::{MachineAir, MachineProgram}, + air::{InteractionScope, MachineAir, MachineProgram}, lookup::{debug_interactions_with_all_chips, InteractionKind}, record::MachineRecord, DebugConstraintBuilder, ShardProof, VerifierConstraintFolder, @@ -34,12 +34,20 @@ pub struct StarkMachine { /// The number of public values elements that the machine uses num_pv_elts: usize, + + /// Contains a global bus. This should be true for the core machine and false otherwise. + contains_global_bus: bool, } impl StarkMachine { /// Creates a new [`StarkMachine`]. - pub const fn new(config: SC, chips: Vec, A>>, num_pv_elts: usize) -> Self { - Self { config, chips, num_pv_elts } + pub const fn new( + config: SC, + chips: Vec, A>>, + num_pv_elts: usize, + contains_global_bus: bool, + ) -> Self { + Self { config, chips, num_pv_elts, contains_global_bus } } } @@ -65,6 +73,9 @@ impl StarkProvingKey { pub fn observe_into(&self, challenger: &mut SC::Challenger) { challenger.observe(self.commit.clone()); challenger.observe(self.pc_start); + for _ in 0..7 { + challenger.observe(Val::::zero()); + } } } @@ -88,6 +99,9 @@ impl StarkVerifyingKey { pub fn observe_into(&self, challenger: &mut SC::Challenger) { challenger.observe(self.commit.clone()); challenger.observe(self.pc_start); + for _ in 0..7 { + challenger.observe(Val::::zero()); + } } } @@ -108,6 +122,11 @@ impl>> StarkMachine { self.num_pv_elts } + /// Returns whether the machine contains a global bus. + pub const fn contains_global_bus(&self) -> bool { + self.contains_global_bus + } + /// Returns the id of all chips in the machine that have preprocessed columns. pub fn preprocessed_chip_ids(&self) -> Vec { self.chips @@ -183,10 +202,10 @@ impl>> StarkMachine { }); // Order the chips and traces by trace size (biggest first), and get the ordering map. - named_preprocessed_traces.sort_by_key(|(_, trace)| Reverse(trace.height())); + named_preprocessed_traces + .sort_by_key(|(name, trace)| (Reverse(trace.height()), name.clone())); let pcs = self.config.pcs(); - let (chip_information, domains_and_traces): (Vec<_>, Vec<_>) = named_preprocessed_traces .iter() .map(|(name, trace)| { @@ -230,8 +249,20 @@ impl>> StarkMachine { &self, records: &mut [A::Record], opts: &::Config, + chips_filter: Option<&[String]>, ) { - let chips = self.chips(); + let chips = self + .chips + .iter() + .filter(|chip| { + if let Some(chips_filter) = chips_filter { + chips_filter.contains(&chip.name()) + } else { + true + } + }) + .collect::>(); + records.iter_mut().for_each(|record| { chips.iter().for_each(|chip| { tracing::debug_span!("chip dependencies", chip = chip.name()).in_scope(|| { @@ -262,12 +293,16 @@ impl>> StarkMachine { SC::Challenger: Clone, A: for<'a> Air>, { + let contains_global_bus = self.contains_global_bus(); + // Observe the preprocessed commitment. vk.observe_into(challenger); tracing::debug_span!("observe challenges for all shards").in_scope(|| { - proof.shard_proofs.iter().for_each(|proof| { - challenger.observe(proof.commitment.main_commit.clone()); - challenger.observe_slice(&proof.public_values[0..self.num_pv_elts()]); + proof.shard_proofs.iter().for_each(|shard_proof| { + if contains_global_bus { + challenger.observe(shard_proof.commitment.global_main_commit.clone()); + } + challenger.observe_slice(&shard_proof.public_values[0..self.num_pv_elts()]); }); }); @@ -276,6 +311,15 @@ impl>> StarkMachine { return Err(MachineVerificationError::EmptyProof); } + // Obtain the challenges used for the global permutation argument. + let global_permutation_challenges: [SC::Challenge; 2] = array::from_fn(|_| { + if contains_global_bus { + challenger.sample_ext_element() + } else { + SC::Challenge::zero() + } + }); + tracing::debug_span!("verify shard proofs").in_scope(|| { for (i, shard_proof) in proof.shard_proofs.iter().enumerate() { tracing::debug_span!("verifying shard", shard = i).in_scope(|| { @@ -287,6 +331,7 @@ impl>> StarkMachine { &chips, &mut challenger.clone(), shard_proof, + &global_permutation_challenges, ) .map_err(MachineVerificationError::InvalidShardProof) })?; @@ -296,15 +341,21 @@ impl>> StarkMachine { })?; // Verify the cumulative sum is 0. - tracing::debug_span!("verify cumulative sum is 0").in_scope(|| { - let mut sum = SC::Challenge::zero(); - for proof in proof.shard_proofs.iter() { - sum += proof.cumulative_sum(); - } - match sum.is_zero() { - true => Ok(()), - false => Err(MachineVerificationError::NonZeroCumulativeSum), + tracing::debug_span!("verify global cumulative sum is 0").in_scope(|| { + let sum = proof + .shard_proofs + .iter() + .map(|proof| proof.cumulative_sum(InteractionScope::Global)) + .sum::(); + + if !sum.is_zero() { + return Err(MachineVerificationError::NonZeroCumulativeSum( + InteractionScope::Global, + 0, + )); } + + Ok(()) }) } @@ -321,13 +372,18 @@ impl>> StarkMachine { { tracing::debug!("checking constraints for each shard"); - // Obtain the challenges used for the permutation argument. + // Obtain the challenges used for the global permutation argument. let mut permutation_challenges: Vec = Vec::new(); for _ in 0..2 { permutation_challenges.push(challenger.sample_ext_element()); } - let mut cumulative_sum = SC::Challenge::zero(); + // Obtain the challenges used for the local permutation argument. + for _ in 0..2 { + permutation_challenges.push(challenger.sample_ext_element()); + } + + let mut global_cumulative_sum = SC::Challenge::zero(); for shard in records.iter() { // Filter the chips based on what is used. let chips = self.shard_chips(shard).collect::>(); @@ -351,19 +407,34 @@ impl>> StarkMachine { .par_iter() .zip(traces.par_iter_mut()) .map(|(chip, (main_trace, pre_trace))| { - let perm_trace = chip.generate_permutation_trace( + let (trace, global_sum, local_sum) = chip.generate_permutation_trace( *pre_trace, main_trace, &permutation_challenges, ); - let cumulative_sum = - perm_trace.row_slice(main_trace.height() - 1).last().copied().unwrap(); - (perm_trace, cumulative_sum) + (trace, [global_sum, local_sum]) }) .unzip_into_vecs(&mut permutation_traces, &mut cumulative_sums); }); - cumulative_sum += cumulative_sums.iter().copied().sum::(); + global_cumulative_sum += + cumulative_sums.iter().map(|sum| sum[0]).sum::(); + + let local_cumulative_sum = + cumulative_sums.iter().map(|sum| sum[1]).sum::(); + if !local_cumulative_sum.is_zero() { + tracing::warn!("Local cumulative sum is not zero"); + tracing::debug_span!("debug local interactions").in_scope(|| { + debug_interactions_with_all_chips::( + self, + pk, + &[shard.clone()], + InteractionKind::all_kinds(), + InteractionScope::Local, + ) + }); + panic!("Local cumulative sum is not zero"); + } // Compute some statistics. for i in 0..chips.len() { @@ -383,35 +454,40 @@ impl>> StarkMachine { ); } - tracing::info_span!("debug constraints").in_scope(|| { - for i in 0..chips.len() { - let preprocessed_trace = - pk.chip_ordering.get(&chips[i].name()).map(|index| &pk.traces[*index]); - debug_constraints::( - chips[i], - preprocessed_trace, - &traces[i].0, - &permutation_traces[i], - &permutation_challenges, - shard.public_values(), - ); - } - }); + if env::var("SKIP_CONSTRAINTS").is_err() { + tracing::info_span!("debug constraints").in_scope(|| { + for i in 0..chips.len() { + let preprocessed_trace = + pk.chip_ordering.get(&chips[i].name()).map(|index| &pk.traces[*index]); + debug_constraints::( + chips[i], + preprocessed_trace, + &traces[i].0, + &permutation_traces[i], + &permutation_challenges, + &shard.public_values(), + &cumulative_sums[i], + ); + } + }); + } } tracing::info!("Constraints verified successfully"); - println!("Cumulative sum: {cumulative_sum}"); - - // If the cumulative sum is not zero, debug the interactions. - if !cumulative_sum.is_zero() { - debug_interactions_with_all_chips::( - self, - pk, - &records, - InteractionKind::all_kinds(), - ); - panic!("Cumulative sum is not zero"); + // If the global cumulative sum is not zero, debug the interactions. + if !global_cumulative_sum.is_zero() { + tracing::warn!("Global cumulative sum is not zero"); + tracing::debug_span!("debug global interactions").in_scope(|| { + debug_interactions_with_all_chips::( + self, + pk, + &records, + InteractionKind::all_kinds(), + InteractionScope::Global, + ) + }); + panic!("Global cumulative sum is not zero"); } } } @@ -423,7 +499,7 @@ pub enum MachineVerificationError { /// An error occurred during the verification of a global proof. InvalidGlobalProof(VerificationError), /// The cumulative sum is non-zero. - NonZeroCumulativeSum, + NonZeroCumulativeSum(InteractionScope, usize), /// The public values digest is invalid. InvalidPublicValuesDigest, /// The debug interactions failed. @@ -440,6 +516,8 @@ pub enum MachineVerificationError { MissingCpuInFirstShard, /// The CPU log degree is too large. CpuLogDegreeTooLarge(usize), + /// The verification key is not allowed. + InvalidVerificationKey, } impl Debug for MachineVerificationError { @@ -452,8 +530,8 @@ impl Debug for MachineVerificationError { MachineVerificationError::InvalidGlobalProof(e) => { write!(f, "Invalid global proof: {:?}", e) } - MachineVerificationError::NonZeroCumulativeSum => { - write!(f, "Non-zero cumulative sum") + MachineVerificationError::NonZeroCumulativeSum(scope, shard) => { + write!(f, "Non-zero cumulative sum. Scope: {}, Shard: {}", scope, shard) } MachineVerificationError::InvalidPublicValuesDigest => { write!(f, "Invalid public values digest") @@ -479,6 +557,9 @@ impl Debug for MachineVerificationError { MachineVerificationError::CpuLogDegreeTooLarge(log_degree) => { write!(f, "CPU log degree too large: {}", log_degree) } + MachineVerificationError::InvalidVerificationKey => { + write!(f, "Invalid verification key") + } } } } diff --git a/crates/stark/src/opts.rs b/crates/stark/src/opts.rs index db5ced80fa..34ea0d81b7 100644 --- a/crates/stark/src/opts.rs +++ b/crates/stark/src/opts.rs @@ -3,12 +3,16 @@ use std::env; use serde::{Deserialize, Serialize}; use sysinfo::System; -const MAX_SHARD_SIZE: usize = 1 << 22; +const MAX_SHARD_SIZE: usize = 1 << 21; +const RECURSION_MAX_SHARD_SIZE: usize = 1 << 22; const MAX_SHARD_BATCH_SIZE: usize = 8; const DEFAULT_TRACE_GEN_WORKERS: usize = 1; const DEFAULT_CHECKPOINTS_CHANNEL_CAPACITY: usize = 128; const DEFAULT_RECORDS_AND_TRACES_CHANNEL_CAPACITY: usize = 1; +/// The threshold for splitting deferred events. +pub const MAX_DEFERRED_SPLIT_THRESHOLD: usize = 1 << 18; + /// Options to configure the SP1 prover for core and recursive proofs. #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] pub struct SP1ProverOpts { @@ -53,8 +57,8 @@ pub struct SP1CoreOpts { #[allow(clippy::cast_precision_loss)] fn shard_size(total_available_mem: u64) -> usize { let log_shard_size = match total_available_mem { - 0..=14 => 18, - m => (((m as f64).log2() * 0.619) + 17.2).floor() as usize, + 0..=14 => 17, + m => (((m as f64).log2() * 0.619) + 16.2).floor() as usize, }; std::cmp::min(1 << log_shard_size, MAX_SHARD_SIZE) } @@ -77,8 +81,9 @@ fn shard_batch_size(total_available_mem: u64) -> usize { impl Default for SP1CoreOpts { fn default() -> Self { let split_threshold = env::var("SPLIT_THRESHOLD") - .map(|s| s.parse::().unwrap_or(DEFERRED_SPLIT_THRESHOLD)) - .unwrap_or(DEFERRED_SPLIT_THRESHOLD); + .map(|s| s.parse::().unwrap_or(MAX_DEFERRED_SPLIT_THRESHOLD)) + .unwrap_or(MAX_DEFERRED_SPLIT_THRESHOLD) + .max(MAX_DEFERRED_SPLIT_THRESHOLD); let sys = System::new_all(); let total_available_mem = sys.total_memory() / (1024 * 1024 * 1024); @@ -120,8 +125,8 @@ impl SP1CoreOpts { let mut opts = Self::default(); opts.reconstruct_commitments = false; - // Recursion only supports 1 << 22 shard size. - opts.shard_size = MAX_SHARD_SIZE; + // Recursion only supports [RECURSION_MAX_SHARD_SIZE] shard size. + opts.shard_size = RECURSION_MAX_SHARD_SIZE; opts } } @@ -154,6 +159,3 @@ impl SplitOpts { } } } - -/// The threshold for splitting deferred events. -pub const DEFERRED_SPLIT_THRESHOLD: usize = 1 << 19; diff --git a/crates/stark/src/permutation.rs b/crates/stark/src/permutation.rs index 583cb80641..950dc55409 100644 --- a/crates/stark/src/permutation.rs +++ b/crates/stark/src/permutation.rs @@ -1,19 +1,28 @@ use std::borrow::Borrow; +use hashbrown::HashMap; use itertools::Itertools; use p3_air::{ExtensionBuilder, PairBuilder}; -use p3_field::{AbstractExtensionField, AbstractField, ExtensionField, Field, Powers, PrimeField}; +use p3_field::{AbstractExtensionField, AbstractField, ExtensionField, Field, PrimeField}; use p3_matrix::{dense::RowMajorMatrix, Matrix}; use p3_maybe_rayon::prelude::*; use rayon_scan::ScanParallelIterator; +use strum::IntoEnumIterator; -use crate::{air::MultiTableAirBuilder, lookup::Interaction}; +use crate::{ + air::{InteractionScope, MultiTableAirBuilder}, + lookup::Interaction, +}; /// Computes the width of the permutation trace. #[inline] #[must_use] pub const fn permutation_trace_width(num_interactions: usize, batch_size: usize) -> usize { - num_interactions.div_ceil(batch_size) + 1 + if num_interactions == 0 { + 0 + } else { + num_interactions.div_ceil(batch_size) + 1 + } } /// Populates a permutation row. @@ -26,10 +35,14 @@ pub fn populate_permutation_row>( main_row: &[F], sends: &[Interaction], receives: &[Interaction], - alpha: EF, - betas: Powers, + random_elements: &[EF], batch_size: usize, ) { + let alpha = random_elements[0]; + + // Generate the RLC elements to uniquely identify each item in the looked up tuple. + let betas = random_elements[1].powers(); + let interaction_chunks = &sends .iter() .map(|int| (int, true)) @@ -60,6 +73,50 @@ pub fn populate_permutation_row>( } } +/// Returns the sends, receives, and permutation trace width grouped by scope. +#[allow(clippy::type_complexity)] +pub fn get_grouped_maps( + sends: &[Interaction], + receives: &[Interaction], + batch_size: usize, +) -> ( + HashMap>>, + HashMap>>, + HashMap, +) { + // Create a hashmap of scope -> vec. + let mut sends = sends.to_vec(); + sends.sort_by_key(|k| k.scope); + let grouped_sends: HashMap<_, _> = sends + .iter() + .chunk_by(|int| int.scope) + .into_iter() + .map(|(k, values)| (k, values.cloned().collect_vec())) + .collect(); + + // Create a hashmap of scope -> vec. + let mut receives = receives.to_vec(); + receives.sort_by_key(|k| k.scope); + let grouped_receives: HashMap<_, _> = receives + .iter() + .chunk_by(|int| int.scope) + .into_iter() + .map(|(k, values)| (k, values.cloned().collect_vec())) + .collect(); + + // Create a hashmap of scope -> permutation trace width. + let grouped_widths = InteractionScope::iter() + .map(|scope| { + let empty_vec = vec![]; + let sends = grouped_sends.get(&scope).unwrap_or(&empty_vec); + let receives = grouped_receives.get(&scope).unwrap_or(&empty_vec); + (scope, permutation_trace_width(sends.len() + receives.len(), batch_size)) + }) + .collect(); + + (grouped_sends, grouped_receives, grouped_widths) +} + /// Generates the permutation trace for the given chip and main trace based on a variant of `LogUp`. /// /// The permutation trace has `(N+1)*EF::NUM_COLS` columns, where N is the number of interactions in @@ -71,81 +128,109 @@ pub fn generate_permutation_trace>( main: &RowMajorMatrix, random_elements: &[EF], batch_size: usize, -) -> RowMajorMatrix { - // Generate the RLC elements to uniquely identify each interaction. - let alpha = random_elements[0]; - - // Generate the RLC elements to uniquely identify each item in the looked up tuple. - let betas = random_elements[1].powers(); +) -> (RowMajorMatrix, EF, EF) { + let (grouped_sends, grouped_receives, grouped_widths) = + get_grouped_maps(sends, receives, batch_size); - // Iterate over the rows of the main trace to compute the permutation trace values. In - // particular, for each row i, interaction j, and columns c_0, ..., c_{k-1} we compute the sum: - // - // permutation_trace_values[i][j] = \alpha^j + \sum_k \beta^k * f_{i, c_k} - // - // where f_{i, c_k} is the value at row i for column c_k. The computed value is essentially a - // fingerprint for the interaction. - let permutation_trace_width = permutation_trace_width(sends.len() + receives.len(), batch_size); let height = main.height(); + let permutation_trace_width = grouped_widths.values().sum::(); let mut permutation_trace = RowMajorMatrix::new( vec![EF::zero(); permutation_trace_width * height], permutation_trace_width, ); - // Compute the permutation trace values in parallel. - match preprocessed { - Some(prep) => { - permutation_trace - .par_rows_mut() - .zip_eq(prep.par_row_slices()) - .zip_eq(main.par_row_slices()) - .for_each(|((row, prep_row), main_row)| { - populate_permutation_row( - row, - prep_row, - main_row, - sends, - receives, - alpha, - betas.clone(), - batch_size, - ); - }); + let mut global_cumulative_sum = EF::zero(); + let mut local_cumulative_sum = EF::zero(); + + for scope in InteractionScope::iter() { + let empty_vec = vec![]; + let sends = grouped_sends.get(&scope).unwrap_or(&empty_vec); + let receives = grouped_receives.get(&scope).unwrap_or(&empty_vec); + + if sends.is_empty() && receives.is_empty() { + continue; } - None => { - permutation_trace.par_rows_mut().zip_eq(main.par_row_slices()).for_each( - |(row, main_row)| { - populate_permutation_row( - row, - &[], - main_row, - sends, - receives, - alpha, - betas.clone(), - batch_size, - ); - }, - ); + + let random_elements = match scope { + InteractionScope::Global => &random_elements[0..2], + InteractionScope::Local => &random_elements[2..4], + }; + + let row_range = match scope { + InteractionScope::Global => { + 0..*grouped_widths.get(&InteractionScope::Global).expect("Expected global scope") + } + InteractionScope::Local => { + let global_perm_width = + *grouped_widths.get(&InteractionScope::Global).expect("Expected global scope"); + let local_perm_width = + *grouped_widths.get(&InteractionScope::Local).expect("Expected local scope"); + global_perm_width..global_perm_width + local_perm_width + } + }; + + // Compute the permutation trace values in parallel. + match preprocessed { + Some(prep) => { + permutation_trace + .par_rows_mut() + .zip_eq(prep.par_row_slices()) + .zip_eq(main.par_row_slices()) + .for_each(|((row, prep_row), main_row)| { + populate_permutation_row( + &mut row[row_range.start..row_range.end], + prep_row, + main_row, + sends, + receives, + random_elements, + batch_size, + ); + }); + } + None => { + permutation_trace.par_rows_mut().zip_eq(main.par_row_slices()).for_each( + |(row, main_row)| { + populate_permutation_row( + &mut row[row_range.start..row_range.end], + &[], + main_row, + sends, + receives, + random_elements, + batch_size, + ); + }, + ); + } } - } - let zero = EF::zero(); - let cumulative_sums = permutation_trace - .par_rows_mut() - .map(|row| row[0..permutation_trace_width - 1].iter().copied().sum::()) - .collect::>(); + let zero = EF::zero(); + let cumulative_sums = permutation_trace + .par_rows_mut() + .map(|row| row[row_range.start..row_range.end - 1].iter().copied().sum::()) + .collect::>(); - let cumulative_sums = - cumulative_sums.into_par_iter().scan(|a, b| *a + *b, zero).collect::>(); + let cumulative_sums = + cumulative_sums.into_par_iter().scan(|a, b| *a + *b, zero).collect::>(); - permutation_trace.par_rows_mut().zip_eq(cumulative_sums.into_par_iter()).for_each( - |(row, cumulative_sum)| { - *row.last_mut().unwrap() = cumulative_sum; - }, - ); + match scope { + InteractionScope::Global => { + global_cumulative_sum = *cumulative_sums.last().unwrap(); + } + InteractionScope::Local => { + local_cumulative_sum = *cumulative_sums.last().unwrap(); + } + } - permutation_trace + permutation_trace.par_rows_mut().zip_eq(cumulative_sums.clone().into_par_iter()).for_each( + |(row, cumulative_sum)| { + row[row_range.end - 1] = cumulative_sum; + }, + ); + } + + (permutation_trace, global_cumulative_sum, local_cumulative_sum) } /// Evaluates the permutation constraints for the given chip. @@ -154,7 +239,8 @@ pub fn generate_permutation_trace>( /// - The running sum column starts at zero. /// - That the RLC per interaction is computed correctly. /// - The running sum column ends at the (currently) given cumalitive sum. -pub fn eval_permutation_constraints( +#[allow(clippy::too_many_lines)] +pub fn eval_permutation_constraints<'a, F, AB>( sends: &[Interaction], receives: &[Interaction], batch_size: usize, @@ -162,14 +248,18 @@ pub fn eval_permutation_constraints( ) where F: Field, AB::EF: ExtensionField, - AB: MultiTableAirBuilder + PairBuilder, + AB: MultiTableAirBuilder<'a, F = F> + PairBuilder, + AB: 'a, { + let (grouped_sends, grouped_receives, grouped_widths) = + get_grouped_maps(sends, receives, batch_size); + // Get the permutation challenges. let permutation_challenges = builder.permutation_randomness(); - let (alpha, beta): (AB::ExprEF, AB::ExprEF) = - (permutation_challenges[0].into(), permutation_challenges[1].into()); - - // Get the preprocssed, main, and permutation trace. + let random_elements: Vec = + permutation_challenges.iter().map(|x| (*x).into()).collect(); + let cumulative_sums: Vec = + builder.cumulative_sums().iter().map(|x| (*x).into()).collect(); let preprocessed = builder.preprocessed(); let main = builder.main(); let perm = builder.permutation().to_row_major_matrix(); @@ -184,84 +274,127 @@ pub fn eval_permutation_constraints( let perm_next = perm.row_slice(1); let perm_next: &[AB::VarEF] = (*perm_next).borrow(); - // Ensure that each batch sum m_i/f_i is computed correctly. - let interaction_chunks = &sends - .iter() - .map(|int| (int, true)) - .chain(receives.iter().map(|int| (int, false))) - .chunks(batch_size); - // Assert that the permutation trace width is correct. - let expected_perm_width = permutation_trace_width(sends.len() + receives.len(), batch_size); + let expected_perm_width = grouped_widths.values().sum::(); if perm_width != expected_perm_width { panic!( "permutation trace width is incorrect: expected {expected_perm_width}, got {perm_width}", ); } - // Assert that the i-eth entry is equal to the sum_i m_i/rlc_i by constraints: - // entry * \prod_i rlc_i = \sum_i m_i * \prod_{j!=i} rlc_j over all columns of the permutation - // trace except the last column. - for (entry, chunk) in perm_local[0..perm_local.len() - 1].iter().zip(interaction_chunks) { - // First, we calculate the random linear combinations and multiplicities with the correct - // sign depending on wetther the interaction is a send or a recieve. - let mut rlcs: Vec = Vec::with_capacity(batch_size); - let mut multiplicities: Vec = Vec::with_capacity(batch_size); - for (interaction, is_send) in chunk { - let mut rlc = alpha.clone(); - let mut betas = beta.powers(); - - rlc += betas.next().unwrap() - * AB::ExprEF::from_canonical_usize(interaction.argument_index()); - for (field, beta) in interaction.values.iter().zip(betas.clone()) { - let elem = field.apply::(&preprocessed_local, main_local); - rlc += beta * elem; + for scope in InteractionScope::iter() { + let random_elements = match scope { + InteractionScope::Global => &random_elements[0..2], + InteractionScope::Local => &random_elements[2..4], + }; + + let (alpha, beta) = (&random_elements[0], &random_elements[1]); + + let perm_local = match scope { + InteractionScope::Global => &perm_local[0..*grouped_widths.get(&scope).unwrap()], + InteractionScope::Local => { + let global_perm_width = *grouped_widths.get(&InteractionScope::Global).unwrap(); + &perm_local + [global_perm_width..global_perm_width + *grouped_widths.get(&scope).unwrap()] + } + }; + + let perm_next = match scope { + InteractionScope::Global => &perm_next[0..*grouped_widths.get(&scope).unwrap()], + InteractionScope::Local => { + let global_perm_width = *grouped_widths.get(&InteractionScope::Global).unwrap(); + &perm_next + [global_perm_width..global_perm_width + *grouped_widths.get(&scope).unwrap()] } - rlcs.push(rlc); - - let send_factor = if is_send { AB::F::one() } else { -AB::F::one() }; - multiplicities.push( - interaction - .multiplicity - .apply::(&preprocessed_local, main_local) - * send_factor, - ); + }; + + let empty_vec = vec![]; + let sends = grouped_sends.get(&scope).unwrap_or(&empty_vec); + let receives = grouped_receives.get(&scope).unwrap_or(&empty_vec); + + if sends.is_empty() && receives.is_empty() { + continue; } - // Now we can calculate the numerator and denominator of the combined batch. - let mut product = AB::ExprEF::one(); - let mut numerator = AB::ExprEF::zero(); - for (i, (m, rlc)) in multiplicities.into_iter().zip(rlcs.iter()).enumerate() { - // Calculate the running product of all rlcs. - product *= rlc.clone(); - - // Calculate the product of all but the current rlc. - let mut all_but_current = AB::ExprEF::one(); - for other_rlc in rlcs.iter().enumerate().filter(|(j, _)| i != *j).map(|(_, rlc)| rlc) { - all_but_current *= other_rlc.clone(); + // Ensure that each batch sum m_i/f_i is computed correctly. + let interaction_chunks = &sends + .iter() + .map(|int| (int, true)) + .chain(receives.iter().map(|int| (int, false))) + .chunks(batch_size); + + // Assert that the i-eth entry is equal to the sum_i m_i/rlc_i by constraints: + // entry * \prod_i rlc_i = \sum_i m_i * \prod_{j!=i} rlc_j over all columns of the permutation + // trace except the last column. + for (entry, chunk) in perm_local[0..perm_local.len() - 1].iter().zip(interaction_chunks) { + // First, we calculate the random linear combinations and multiplicities with the correct + // sign depending on wetther the interaction is a send or a recieve. + let mut rlcs: Vec = Vec::with_capacity(batch_size); + let mut multiplicities: Vec = Vec::with_capacity(batch_size); + for (interaction, is_send) in chunk { + let mut rlc = alpha.clone(); + let mut betas = beta.powers(); + + rlc += betas.next().unwrap() + * AB::ExprEF::from_canonical_usize(interaction.argument_index()); + for (field, beta) in interaction.values.iter().zip(betas.clone()) { + let elem = field.apply::(&preprocessed_local, main_local); + rlc += beta * elem; + } + rlcs.push(rlc); + + let send_factor = if is_send { AB::F::one() } else { -AB::F::one() }; + multiplicities.push( + interaction + .multiplicity + .apply::(&preprocessed_local, main_local) + * send_factor, + ); + } + + // Now we can calculate the numerator and denominator of the combined batch. + let mut product = AB::ExprEF::one(); + let mut numerator = AB::ExprEF::zero(); + for (i, (m, rlc)) in multiplicities.into_iter().zip(rlcs.iter()).enumerate() { + // Calculate the running product of all rlcs. + product *= rlc.clone(); + + // Calculate the product of all but the current rlc. + let mut all_but_current = AB::ExprEF::one(); + for other_rlc in + rlcs.iter().enumerate().filter(|(j, _)| i != *j).map(|(_, rlc)| rlc) + { + all_but_current *= other_rlc.clone(); + } + numerator += AB::ExprEF::from_base(m) * all_but_current; } - numerator += AB::ExprEF::from_base(m) * all_but_current; + + // Finally, assert that the entry is equal to the numerator divided by the product. + let entry: AB::ExprEF = (*entry).into(); + builder.assert_eq_ext(product.clone() * entry.clone(), numerator); } - // Finally, assert that the entry is equal to the numerator divided by the product. - let entry: AB::ExprEF = (*entry).into(); - builder.assert_eq_ext(product.clone() * entry.clone(), numerator); - } + // Compute the running local and next permutation sums. + let perm_width = grouped_widths.get(&scope).unwrap(); + let sum_local = + perm_local[..perm_width - 1].iter().map(|x| (*x).into()).sum::(); + let sum_next = perm_next[..perm_width - 1].iter().map(|x| (*x).into()).sum::(); + let phi_local: AB::ExprEF = (*perm_local.last().unwrap()).into(); + let phi_next: AB::ExprEF = (*perm_next.last().unwrap()).into(); - // Compute the running local and next permutation sums. - let cumulative_sum = builder.cumulative_sum(); - let sum_local = perm_local[..perm_width - 1].iter().map(|x| (*x).into()).sum::(); - let sum_next = perm_next[..perm_width - 1].iter().map(|x| (*x).into()).sum::(); - let phi_local: AB::ExprEF = (*perm_local.last().unwrap()).into(); - let phi_next: AB::ExprEF = (*perm_next.last().unwrap()).into(); + // Assert that cumulative sum is initialized to `phi_local` on the first row. + builder.when_first_row().assert_eq_ext(phi_local.clone(), sum_local); - // Assert that cumulative sum is initialized to `phi_local` on the first row. - builder.when_first_row().assert_eq_ext(phi_local.clone(), sum_local); + // Assert that the cumulative sum is constrained to `phi_next - phi_local` on the transition + // rows. + builder.when_transition().assert_eq_ext(phi_next - phi_local.clone(), sum_next); - // Assert that the cumulative sum is constrained to `phi_next - phi_local` on the transition - // rows. - builder.when_transition().assert_eq_ext(phi_next - phi_local.clone(), sum_next); + // Assert that the cumulative sum is constrained to `phi_local` on the last row. + let cumulative_sum = match scope { + InteractionScope::Global => &cumulative_sums[0], + InteractionScope::Local => &cumulative_sums[1], + }; - // Assert that the cumulative sum is constrained to `phi_local` on the last row. - builder.when_last_row().assert_eq_ext(*perm_local.last().unwrap(), cumulative_sum); + builder.when_last_row().assert_eq_ext(*perm_local.last().unwrap(), cumulative_sum.clone()); + } } diff --git a/crates/stark/src/prover.rs b/crates/stark/src/prover.rs index 7fca60cefe..1216acb186 100644 --- a/crates/stark/src/prover.rs +++ b/crates/stark/src/prover.rs @@ -1,9 +1,10 @@ use core::fmt::Display; +use hashbrown::HashMap; use itertools::Itertools; use serde::{de::DeserializeOwned, Serialize}; -use std::{cmp::Reverse, error::Error, time::Instant}; +use std::{array, cmp::Reverse, error::Error, time::Instant}; -use crate::{AirOpenedValues, ChipOpenedValues, ShardOpenedValues}; +use crate::{air::InteractionScope, AirOpenedValues, ChipOpenedValues, ShardOpenedValues}; use p3_air::Air; use p3_challenger::{CanObserve, FieldChallenger}; use p3_commit::{Pcs, PolynomialSpace}; @@ -17,21 +18,33 @@ use super::{ VerifierConstraintFolder, }; use crate::{ - air::MachineAir, lookup::InteractionBuilder, opts::SP1CoreOpts, record::MachineRecord, - DebugConstraintBuilder, MachineChip, MachineProof, PackedChallenge, PcsProverData, - ProverConstraintFolder, ShardCommitment, ShardMainData, ShardProof, StarkVerifyingKey, + air::MachineAir, config::ZeroCommitment, lookup::InteractionBuilder, opts::SP1CoreOpts, + record::MachineRecord, Challenger, DebugConstraintBuilder, MachineChip, MachineProof, + PackedChallenge, PcsProverData, ProverConstraintFolder, ShardCommitment, ShardMainData, + ShardProof, StarkVerifyingKey, }; +/// A merged prover data item from the global and local prover data. +pub struct MergedProverDataItem<'a, M> { + /// The trace. + pub trace: &'a M, + /// The main data index. + pub main_data_idx: usize, +} + /// An algorithmic & hardware independent prover implementation for any [`MachineAir`]. pub trait MachineProver>: 'static + Send + Sync { /// The type used to store the traces. - type DeviceMatrix; + type DeviceMatrix: Matrix; /// The type used to store the polynomial commitment schemes data. type DeviceProverData; + /// The type used to store the proving key. + type DeviceProvingKey: MachineProvingKey; + /// The type used for error handling. type Error: Error + Send + Sync; @@ -42,19 +55,25 @@ pub trait MachineProver>: fn machine(&self) -> &StarkMachine; /// Setup the preprocessed data into a proving and verifying key. - fn setup(&self, program: &A::Program) -> (StarkProvingKey, StarkVerifyingKey) { - self.machine().setup(program) - } + fn setup(&self, program: &A::Program) -> (Self::DeviceProvingKey, StarkVerifyingKey); /// Generate the main traces. - fn generate_traces(&self, record: &A::Record) -> Vec<(String, RowMajorMatrix>)> { - // Filter the chips based on what is used. + fn generate_traces( + &self, + record: &A::Record, + interaction_scope: InteractionScope, + ) -> Vec<(String, RowMajorMatrix>)> { let shard_chips = self.shard_chips(record).collect::>(); + let chips = shard_chips + .iter() + .filter(|chip| chip.commit_scope() == interaction_scope) + .collect::>(); + assert!(!chips.is_empty()); // For each chip, generate the trace. let parent_span = tracing::debug_span!("generate traces for shard"); parent_span.in_scope(|| { - shard_chips + chips .par_iter() .map(|chip| { let chip_name = chip.name(); @@ -75,7 +94,7 @@ pub trait MachineProver>: /// Commit to the main traces. fn commit( &self, - record: A::Record, + record: &A::Record, traces: Vec<(String, RowMajorMatrix>)>, ) -> ShardMainData; @@ -96,15 +115,17 @@ pub trait MachineProver>: /// Compute the openings of the traces. fn open( &self, - pk: &StarkProvingKey, - data: ShardMainData, + pk: &Self::DeviceProvingKey, + global_data: Option>, + local_data: ShardMainData, challenger: &mut SC::Challenger, + global_permutation_challenges: &[SC::Challenge], ) -> Result, Self::Error>; /// Generate a proof for the given records. fn prove( &self, - pk: &StarkProvingKey, + pk: &Self::DeviceProvingKey, records: Vec, challenger: &mut SC::Challenger, opts: ::Config, @@ -146,6 +167,120 @@ pub trait MachineProver>: { self.machine().debug_constraints(pk, records, challenger); } + + /// Merge the global and local chips' sorted traces. + #[allow(clippy::type_complexity)] + fn merge_shard_traces<'a, 'b>( + &'a self, + global_traces: &'b [Self::DeviceMatrix], + global_chip_ordering: &'b HashMap, + local_traces: &'b [Self::DeviceMatrix], + local_chip_ordering: &'b HashMap, + ) -> ( + HashMap, + Vec, + Vec>, + ) + where + 'a: 'b, + { + // Get the sort order of the chips. + let global_chips = global_chip_ordering + .iter() + .sorted_by_key(|(_, &i)| i) + .map(|chip| chip.0.clone()) + .collect::>(); + let local_chips = local_chip_ordering + .iter() + .sorted_by_key(|(_, &i)| i) + .map(|chip| chip.0.clone()) + .collect::>(); + + let mut merged_chips = Vec::with_capacity(global_traces.len() + local_traces.len()); + let mut merged_prover_data = Vec::with_capacity(global_chips.len() + local_chips.len()); + + assert!(global_traces.len() == global_chips.len()); + let mut global_iter = global_traces.iter().zip(global_chips.iter()).enumerate(); + assert!(local_traces.len() == local_chips.len()); + let mut local_iter = local_traces.iter().zip(local_chips.iter()).enumerate(); + + let mut global_next = global_iter.next(); + let mut local_next = local_iter.next(); + + let mut chip_scopes = Vec::new(); + + while global_next.is_some() || local_next.is_some() { + match (global_next, local_next) { + (Some(global), Some(local)) => { + let (global_prover_data_idx, (global_trace, global_chip)) = global; + let (local_prover_data_idx, (local_trace, local_chip)) = local; + if (Reverse(global_trace.height()), global_chip) + < (Reverse(local_trace.height()), local_chip) + { + merged_chips.push(global_chip.clone()); + chip_scopes.push(InteractionScope::Global); + merged_prover_data.push(MergedProverDataItem { + trace: global_trace, + main_data_idx: global_prover_data_idx, + }); + global_next = global_iter.next(); + } else { + merged_chips.push(local_chip.clone()); + chip_scopes.push(InteractionScope::Local); + merged_prover_data.push(MergedProverDataItem { + trace: local_trace, + main_data_idx: local_prover_data_idx, + }); + local_next = local_iter.next(); + } + } + (Some(global), None) => { + let (global_prover_data_idx, (global_trace, global_chip)) = global; + merged_chips.push(global_chip.clone()); + chip_scopes.push(InteractionScope::Global); + merged_prover_data.push(MergedProverDataItem { + trace: global_trace, + main_data_idx: global_prover_data_idx, + }); + global_next = global_iter.next(); + } + (None, Some(local)) => { + let (local_prover_data_idx, (local_trace, local_chip)) = local; + merged_chips.push(local_chip.clone()); + chip_scopes.push(InteractionScope::Local); + merged_prover_data.push(MergedProverDataItem { + trace: local_trace, + main_data_idx: local_prover_data_idx, + }); + local_next = local_iter.next(); + } + (None, None) => break, + } + } + + let chip_ordering = + merged_chips.iter().enumerate().map(|(i, name)| (name.clone(), i)).collect(); + + (chip_ordering, chip_scopes, merged_prover_data) + } +} + +/// A proving key for any [`MachineAir`] that is agnostic to hardware. +pub trait MachineProvingKey: Send + Sync { + /// The main commitment. + fn preprocessed_commit(&self) -> Com; + + /// The start pc. + fn pc_start(&self) -> Val; + + /// The proving key on the host. + fn to_host(&self) -> StarkProvingKey; + + /// The proving key on the device. + fn from_host(host: &StarkProvingKey) -> Self; + + /// Observe itself in the challenger. + fn observe_into(&self, challenger: &mut Challenger); } /// A prover implementation based on x86 and ARM CPUs. @@ -173,6 +308,7 @@ where { type DeviceMatrix = RowMajorMatrix>; type DeviceProverData = PcsProverData; + type DeviceProvingKey = StarkProvingKey; type Error = CpuProverError; fn new(machine: StarkMachine) -> Self { @@ -183,13 +319,17 @@ where &self.machine } + fn setup(&self, program: &A::Program) -> (Self::DeviceProvingKey, StarkVerifyingKey) { + self.machine().setup(program) + } + fn commit( &self, - record: A::Record, + record: &A::Record, mut named_traces: Vec<(String, RowMajorMatrix>)>, ) -> ShardMainData { // Order the chips and traces by trace size (biggest first), and get the ordering map. - named_traces.sort_by_key(|(_, trace)| Reverse(trace.height())); + named_traces.sort_by_key(|(name, trace)| (Reverse(trace.height()), name.clone())); let pcs = self.config().pcs(); @@ -226,15 +366,49 @@ where fn open( &self, pk: &StarkProvingKey, - mut data: ShardMainData, + global_data: Option>, + local_data: ShardMainData, challenger: &mut ::Challenger, + global_permutation_challenges: &[SC::Challenge], ) -> Result, Self::Error> { - let chips = self.machine().shard_chips_ordered(&data.chip_ordering).collect::>(); + let (global_traces, global_main_commit, global_main_data, global_chip_ordering) = + if let Some(global_data) = global_data { + let ShardMainData { + traces: global_traces, + main_commit: global_main_commit, + main_data: global_main_data, + chip_ordering: global_chip_ordering, + public_values: _, + } = global_data; + (global_traces, global_main_commit, Some(global_main_data), global_chip_ordering) + } else { + (vec![], self.config().pcs().zero_commitment(), None, HashMap::new()) + }; + + let ShardMainData { + traces: local_traces, + main_commit: local_main_commit, + main_data: local_main_data, + chip_ordering: local_chip_ordering, + public_values: local_public_values, + } = local_data; + + // Merge the chip ordering and traces from the global and local data. + let (all_chips_ordering, all_chip_scopes, all_shard_data) = self.merge_shard_traces( + &global_traces, + &global_chip_ordering, + &local_traces, + &local_chip_ordering, + ); + + let chips = self.machine().shard_chips_ordered(&all_chips_ordering).collect::>(); + + assert!(chips.len() == all_shard_data.len()); + let config = self.machine().config(); - // Get the traces. - let traces = &mut data.traces; - let degrees = traces.iter().map(|trace| trace.height()).collect::>(); + let degrees = + all_shard_data.iter().map(|shard_data| shard_data.trace.height()).collect::>(); let log_degrees = degrees.iter().map(|degree| log2_strict_usize(*degree)).collect::>(); @@ -246,13 +420,24 @@ where let trace_domains = degrees.iter().map(|degree| pcs.natural_domain_for_degree(*degree)).collect::>(); - // Obtain the challenges used for the permutation argument. - let mut permutation_challenges: Vec = Vec::new(); + // Observe the main commitment. + challenger.observe(local_main_commit.clone()); + + // Obtain the challenges used for the local permutation argument. + let mut local_permutation_challenges: Vec = Vec::new(); for _ in 0..2 { - permutation_challenges.push(challenger.sample_ext_element()); + local_permutation_challenges.push(challenger.sample_ext_element()); } + + let permutation_challenges = global_permutation_challenges + .iter() + .chain(local_permutation_challenges.iter()) + .copied() + .collect::>(); + let packed_perm_challenges = permutation_challenges .iter() + .chain(local_permutation_challenges.iter()) .map(|c| PackedChallenge::::from_f(*c)) .collect::>(); @@ -261,25 +446,24 @@ where tracing::debug_span!("generate permutation traces").in_scope(|| { chips .par_iter() - .zip(traces.par_iter_mut()) - .map(|(chip, main_trace): (&&MachineChip, _)| { + .zip(all_shard_data.par_iter()) + .map(|(chip, shard_data)| { let preprocessed_trace = pk.chip_ordering.get(&chip.name()).map(|&index| &pk.traces[index]); - let perm_trace = chip.generate_permutation_trace( + let (perm_trace, global_sum, local_sum) = chip.generate_permutation_trace( preprocessed_trace, - main_trace, + shard_data.trace, &permutation_challenges, ); - let cumulative_sum = - perm_trace.row_slice(main_trace.height() - 1).last().copied().unwrap(); - ((perm_trace, preprocessed_trace), cumulative_sum) + ((perm_trace, preprocessed_trace), [global_sum, local_sum]) }) .unzip() }); // Compute some statistics. for i in 0..chips.len() { - let trace_width = traces[i].width(); + let trace_width = all_shard_data[i].trace.width(); + let trace_height = all_shard_data[i].trace.height(); let prep_width = prep_traces[i].map_or(0, |x| x.width()); let permutation_width = permutation_traces[i].width(); let total_width = trace_width @@ -291,8 +475,8 @@ where trace_width, prep_width, permutation_width * >::D, - traces[i].height(), - total_width * traces[i].height(), + trace_height, + total_width * trace_height, ); } @@ -313,7 +497,13 @@ where let (permutation_commit, permutation_data) = tracing::debug_span!("commit to permutation traces") .in_scope(|| pcs.commit(domains_and_perm_traces)); + + // Observe the permutation commitment and cumulative sums. challenger.observe(permutation_commit.clone()); + for [global_sum, local_sum] in cumulative_sums.iter() { + challenger.observe_slice(global_sum.as_base_slice()); + challenger.observe_slice(local_sum.as_base_slice()); + } // Compute the quotient polynomial for all chips. @@ -340,13 +530,24 @@ where pk.chip_ordering.get(&chips[i].name()).map(|&index| { pcs.get_evaluations_on_domain(&pk.data, index, *quotient_domain) }); - let main_trace_on_quotient_domains = - pcs.get_evaluations_on_domain(&data.main_data, i, *quotient_domain); + let scope = all_chip_scopes[i]; + let main_data = if scope == InteractionScope::Global { + global_main_data + .as_ref() + .expect("Expected global_main_data to be Some") + } else { + &local_main_data + }; + let main_trace_on_quotient_domains = pcs.get_evaluations_on_domain( + main_data, + all_shard_data[i].main_data_idx, + *quotient_domain, + ); let permutation_trace_on_quotient_domains = pcs .get_evaluations_on_domain(&permutation_data, i, *quotient_domain); quotient_values( chips[i], - cumulative_sums[i], + &cumulative_sums[i], trace_domains[i], *quotient_domain, preprocessed_trace_on_quotient_domains, @@ -354,7 +555,7 @@ where permutation_trace_on_quotient_domains, &packed_perm_challenges, alpha, - &data.public_values, + &local_public_values, ) }) }) @@ -411,22 +612,61 @@ where let quotient_opening_points = (0..num_quotient_chunks).map(|_| vec![zeta]).collect::>(); - let (openings, opening_proof) = tracing::debug_span!("open multi batches").in_scope(|| { - pcs.open( - vec![ - (&pk.data, preprocessed_opening_points), - (&data.main_data, trace_opening_points.clone()), - (&permutation_data, trace_opening_points), - ("ient_data, quotient_opening_points), - ], - challenger, - ) - }); + // Split the trace_opening_points to the global and local chips. + let mut global_trace_opening_points = Vec::with_capacity(global_chip_ordering.len()); + let mut local_trace_opening_points = Vec::with_capacity(local_chip_ordering.len()); + for (i, trace_opening_point) in trace_opening_points.clone().into_iter().enumerate() { + let scope = all_chip_scopes[i]; + if scope == InteractionScope::Global { + global_trace_opening_points.push(trace_opening_point); + } else { + local_trace_opening_points.push(trace_opening_point); + } + } + + let rounds = if let Some(global_main_data) = global_main_data.as_ref() { + vec![ + (&pk.data, preprocessed_opening_points), + (global_main_data, global_trace_opening_points), + (&local_main_data, local_trace_opening_points), + (&permutation_data, trace_opening_points), + ("ient_data, quotient_opening_points), + ] + } else { + vec![ + (&pk.data, preprocessed_opening_points), + (&local_main_data, local_trace_opening_points), + (&permutation_data, trace_opening_points), + ("ient_data, quotient_opening_points), + ] + }; + + let (openings, opening_proof) = + tracing::debug_span!("open multi batches").in_scope(|| pcs.open(rounds, challenger)); // Collect the opened values for each chip. - let [preprocessed_values, main_values, permutation_values, mut quotient_values] = - openings.try_into().unwrap(); - assert!(main_values.len() == chips.len()); + let ( + preprocessed_values, + global_main_values, + local_main_values, + permutation_values, + mut quotient_values, + ) = if global_main_data.is_some() { + let [preprocessed_values, global_main_values, local_main_values, permutation_values, quotient_values] = + openings.try_into().unwrap(); + ( + preprocessed_values, + Some(global_main_values), + local_main_values, + permutation_values, + quotient_values, + ) + } else { + let [preprocessed_values, local_main_values, permutation_values, quotient_values] = + openings.try_into().unwrap(); + (preprocessed_values, None, local_main_values, permutation_values, quotient_values) + }; + let preprocessed_opened_values = preprocessed_values .into_iter() .map(|op| { @@ -435,6 +675,26 @@ where }) .collect::>(); + // Merge the global and local main values. + let mut main_values = + Vec::with_capacity(global_chip_ordering.len() + local_chip_ordering.len()); + for chip in chips.iter() { + let global_order = global_chip_ordering.get(&chip.name()); + let local_order = local_chip_ordering.get(&chip.name()); + match (global_order, local_order) { + (Some(&global_order), None) => { + let global_main_values = + global_main_values.as_ref().expect("Global main values should be Some"); + main_values.push(global_main_values[global_order].clone()); + } + (None, Some(&local_order)) => { + main_values.push(local_main_values[local_order].clone()); + } + _ => unreachable!(), + } + } + assert!(main_values.len() == chips.len()); + let main_opened_values = main_values .into_iter() .map(|op| { @@ -449,6 +709,7 @@ where AirOpenedValues { local, next } }) .collect::>(); + let mut quotient_opened_values = Vec::with_capacity(log_quotient_degrees.len()); for log_quotient_degree in log_quotient_degrees.iter() { let degree = 1 << *log_quotient_degree; @@ -463,7 +724,7 @@ where .zip_eq(cumulative_sums) .zip_eq(log_degrees.iter()) .enumerate() - .map(|(i, ((((main, permutation), quotient), cumulative_sum), log_degree))| { + .map(|(i, ((((main, permutation), quotient), cumulative_sums), log_degree))| { let preprocessed = pk .chip_ordering .get(&chips[i].name()) @@ -474,7 +735,8 @@ where main, permutation, quotient, - cumulative_sum, + global_cumulative_sum: cumulative_sums[0], + local_cumulative_sum: cumulative_sums[1], log_degree: *log_degree, } }) @@ -482,14 +744,15 @@ where Ok(ShardProof:: { commitment: ShardCommitment { - main_commit: data.main_commit.clone(), + global_main_commit, + local_main_commit, permutation_commit, quotient_commit, }, opened_values: ShardOpenedValues { chips: opened_values }, opening_proof, - chip_ordering: data.chip_ordering, - public_values: data.public_values, + chip_ordering: all_chips_ordering, + public_values: local_public_values, }) } @@ -508,33 +771,70 @@ where where A: for<'a> Air, SC::Challenge>>, { - // Generate dependencies. - self.machine().generate_dependencies(&mut records, &opts); - // Observe the preprocessed commitment. pk.observe_into(challenger); - // Generate and commit the traces for each shard. - let shard_data = records - .into_par_iter() + let contains_global_bus = self.machine().contains_global_bus(); + + if contains_global_bus { + // Generate dependencies. + self.machine().generate_dependencies(&mut records, &opts, None); + } + + // Generate and commit the global traces for each shard. + let global_data = records + .par_iter() .map(|record| { - let named_traces = self.generate_traces(&record); - self.commit(record, named_traces) + if contains_global_bus { + let global_named_traces = + self.generate_traces(record, InteractionScope::Global); + Some(self.commit(record, global_named_traces)) + } else { + None + } }) .collect::>(); // Observe the challenges for each segment. tracing::debug_span!("observing all challenges").in_scope(|| { - shard_data.iter().for_each(|data| { - challenger.observe(data.main_commit.clone()); - challenger.observe_slice(&data.public_values[0..self.num_pv_elts()]); + global_data.iter().zip_eq(records.iter()).for_each(|(global_data, record)| { + if contains_global_bus { + challenger.observe( + global_data + .as_ref() + .expect("must have a global commitment") + .main_commit + .clone(), + ); + } + challenger.observe_slice(&record.public_values::()[0..self.num_pv_elts()]); }); }); + // Obtain the challenges used for the global permutation argument. + let global_permutation_challenges: [SC::Challenge; 2] = array::from_fn(|_| { + if contains_global_bus { + challenger.sample_ext_element() + } else { + SC::Challenge::zero() + } + }); + let shard_proofs = tracing::info_span!("prove_shards").in_scope(|| { - shard_data + global_data .into_par_iter() - .map(|data| self.open(pk, data, &mut challenger.clone())) + .zip_eq(records.par_iter()) + .map(|(global_shard_data, record)| { + let local_named_traces = self.generate_traces(record, InteractionScope::Local); + let local_shard_data = self.commit(record, local_named_traces); + self.open( + pk, + global_shard_data, + local_shard_data, + &mut challenger.clone(), + &global_permutation_challenges, + ) + }) .collect::, _>>() })?; @@ -542,6 +842,38 @@ where } } +impl MachineProvingKey for StarkProvingKey +where + SC: 'static + StarkGenericConfig + Send + Sync, + PcsProverData: Send + Sync + Serialize + DeserializeOwned, + Com: Send + Sync, +{ + fn preprocessed_commit(&self) -> Com { + self.commit.clone() + } + + fn pc_start(&self) -> Val { + self.pc_start + } + + fn to_host(&self) -> StarkProvingKey { + self.clone() + } + + fn from_host(host: &StarkProvingKey) -> Self { + host.clone() + } + + fn observe_into(&self, challenger: &mut Challenger) { + challenger.observe(self.commit.clone()); + challenger.observe(self.pc_start); + let zero = Val::::zero(); + for _ in 0..7 { + challenger.observe(zero); + } + } +} + impl Display for CpuProverError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "DefaultProverError") diff --git a/crates/stark/src/quotient.rs b/crates/stark/src/quotient.rs index 3ce49ba610..ee98d5e7c2 100644 --- a/crates/stark/src/quotient.rs +++ b/crates/stark/src/quotient.rs @@ -18,7 +18,7 @@ use super::{ #[allow(clippy::too_many_lines)] pub fn quotient_values( chip: &Chip, A>, - cumulative_sum: SC::Challenge, + cumulative_sums: &[SC::Challenge], trace_domain: Domain, quotient_domain: Domain, preprocessed_trace_on_quotient_domain: Option, @@ -126,6 +126,12 @@ where .collect(); let accumulator = PackedChallenge::::zero(); + + let packed_cumulative_sums = cumulative_sums + .iter() + .map(|c| PackedChallenge::::from_f(*c)) + .collect::>(); + let mut folder = ProverConstraintFolder { preprocessed: VerticalPair::new( RowMajorMatrixView::new_row(&prep_local), @@ -140,7 +146,7 @@ where RowMajorMatrixView::new_row(&perm_next), ), perm_challenges, - cumulative_sum, + cumulative_sums: &packed_cumulative_sums, is_first_row, is_last_row, is_transition, diff --git a/crates/stark/src/types.rs b/crates/stark/src/types.rs index 822a7a6268..533a8006da 100644 --- a/crates/stark/src/types.rs +++ b/crates/stark/src/types.rs @@ -1,12 +1,19 @@ #![allow(missing_docs)] -use std::fmt::Debug; +use core::fmt; +use std::{cmp::Reverse, collections::BTreeSet, fmt::Debug}; use hashbrown::HashMap; -use p3_matrix::{dense::RowMajorMatrixView, stack::VerticalPair}; +use itertools::Itertools; +use p3_matrix::{ + dense::{RowMajorMatrix, RowMajorMatrixView}, + stack::VerticalPair, + Matrix, +}; use serde::{Deserialize, Serialize}; use super::{Challenge, Com, OpeningProof, StarkGenericConfig, Val}; +use crate::air::InteractionScope; pub type QuotientOpenedValues = Vec; @@ -32,36 +39,42 @@ impl ShardMainData { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ShardCommitment { - pub main_commit: C, + pub global_main_commit: C, + pub local_main_commit: C, pub permutation_commit: C, pub quotient_commit: C, } #[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(bound(serialize = "T: Serialize"))] +#[serde(bound(deserialize = "T: Deserialize<'de>"))] pub struct AirOpenedValues { pub local: Vec, pub next: Vec, } #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ChipOpenedValues { +#[serde(bound(serialize = "T: Serialize"))] +#[serde(bound(deserialize = "T: Deserialize<'de>"))] +pub struct ChipOpenedValues { pub preprocessed: AirOpenedValues, pub main: AirOpenedValues, pub permutation: AirOpenedValues, pub quotient: Vec>, - pub cumulative_sum: T, + pub global_cumulative_sum: T, + pub local_cumulative_sum: T, pub log_degree: usize, } #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ShardOpenedValues { +pub struct ShardOpenedValues { pub chips: Vec>, } /// The maximum number of elements that can be stored in the public values vec. Both SP1 and /// recursive proofs need to pad their public values vec to this length. This is required since the /// recursion verification program expects the public values vec to be fixed length. -pub const PROOF_MAX_NUM_PVS: usize = 370; +pub const PROOF_MAX_NUM_PVS: usize = 371; #[derive(Serialize, Deserialize, Clone)] #[serde(bound = "")] @@ -73,6 +86,27 @@ pub struct ShardProof { pub public_values: Vec>, } +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, PartialOrd, Ord, Eq, Hash)] +pub struct ProofShape { + pub chip_information: Vec<(String, usize)>, +} + +impl ProofShape { + #[must_use] + pub fn from_traces( + global_traces: Option<&[(String, RowMajorMatrix)]>, + local_traces: &[(String, RowMajorMatrix)], + ) -> Self { + global_traces + .into_iter() + .flatten() + .chain(local_traces.iter()) + .map(|(name, trace)| (name.clone(), trace.height().ilog2() as usize)) + .sorted_by_key(|(_, height)| *height) + .collect() + } +} + impl Debug for ShardProof { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("ShardProof").finish() @@ -89,8 +123,15 @@ impl AirOpenedValues { } impl ShardProof { - pub fn cumulative_sum(&self) -> Challenge { - self.opened_values.chips.iter().map(|c| c.cumulative_sum).sum() + pub fn cumulative_sum(&self, scope: InteractionScope) -> Challenge { + self.opened_values + .chips + .iter() + .map(|c| match scope { + InteractionScope::Global => c.global_cumulative_sum, + InteractionScope::Local => c.local_cumulative_sum, + }) + .sum() } pub fn log_degree_cpu(&self) -> usize { @@ -102,12 +143,12 @@ impl ShardProof { self.chip_ordering.contains_key("CPU") } - pub fn contains_memory_init(&self) -> bool { - self.chip_ordering.contains_key("MemoryInit") + pub fn contains_global_memory_init(&self) -> bool { + self.chip_ordering.contains_key("MemoryGlobalInit") } - pub fn contains_memory_finalize(&self) -> bool { - self.chip_ordering.contains_key("MemoryFinalize") + pub fn contains_global_memory_finalize(&self) -> bool { + self.chip_ordering.contains_key("MemoryGlobalFinalize") } } @@ -148,3 +189,53 @@ impl From<[u32; 8]> for DeferredDigest { DeferredDigest(bytes) } } + +impl ShardProof { + pub fn shape(&self) -> ProofShape { + ProofShape { + chip_information: self + .chip_ordering + .iter() + .sorted_by_key(|(_, idx)| *idx) + .zip(self.opened_values.chips.iter()) + .map(|((name, _), values)| (name.to_owned(), values.log_degree)) + .collect(), + } + } +} + +impl FromIterator<(String, usize)> for ProofShape { + fn from_iter>(iter: T) -> Self { + let set = iter + .into_iter() + .map(|(name, log_degree)| (Reverse(log_degree), name)) + .collect::>(); + Self { + chip_information: set + .into_iter() + .map(|(Reverse(log_degree), name)| (name, log_degree)) + .collect(), + } + } +} + +impl IntoIterator for ProofShape { + type Item = (String, usize); + + type IntoIter = as IntoIterator>::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.chip_information.into_iter() + } +} + +impl fmt::Display for ProofShape { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Print the proof shapes in a human-readable format + writeln!(f, "Proofshape:")?; + for (name, log_degree) in &self.chip_information { + writeln!(f, "{name}: {}", 1 << log_degree)?; + } + Ok(()) + } +} diff --git a/crates/stark/src/verifier.rs b/crates/stark/src/verifier.rs index 43bbf61798..e688cc89d7 100644 --- a/crates/stark/src/verifier.rs +++ b/crates/stark/src/verifier.rs @@ -5,17 +5,21 @@ use std::{ }; use itertools::Itertools; +use num_traits::cast::ToPrimitive; use p3_air::{Air, BaseAir}; use p3_challenger::{CanObserve, FieldChallenger}; use p3_commit::{LagrangeSelectors, Pcs, PolynomialSpace}; -use p3_field::{AbstractExtensionField, AbstractField}; +use p3_field::{AbstractExtensionField, AbstractField, Field}; use super::{ folder::VerifierConstraintFolder, types::{AirOpenedValues, ChipOpenedValues, ShardCommitment, ShardProof}, Domain, OpeningError, StarkGenericConfig, StarkVerifyingKey, Val, }; -use crate::{air::MachineAir, MachineChip}; +use crate::{ + air::{InteractionScope, MachineAir}, + MachineChip, +}; /// A verifier for a collection of air chips. pub struct Verifier(PhantomData, PhantomData); @@ -29,6 +33,7 @@ impl>> Verifier { chips: &[&MachineChip], challenger: &mut SC::Challenger, proof: &ShardProof, + global_permutation_challenges: &[SC::Challenge], ) -> Result<(), VerificationError> where A: for<'a> Air>, @@ -50,6 +55,25 @@ impl>> Verifier { return Err(VerificationError::ChipOpeningLengthMismatch); } + let chip_scopes = chips.iter().map(|chip| chip.commit_scope()).collect::>(); + + // Assert that the byte multiplicities don't overflow. + let mut max_byte_lookup_mult = 0u64; + chips.iter().zip(opened_values.chips.iter()).for_each(|(chip, val)| { + max_byte_lookup_mult = max_byte_lookup_mult + .checked_add( + (chip.num_sent_byte_lookups() as u64) + .checked_mul(1u64.checked_shl(val.log_degree as u32).unwrap()) + .unwrap(), + ) + .unwrap(); + }); + + assert!( + max_byte_lookup_mult <= SC::Val::order().to_u64().unwrap(), + "Byte multiplicities overflow" + ); + let log_degrees = opened_values.chips.iter().map(|val| val.log_degree).collect::>(); let log_quotient_degrees = @@ -60,12 +84,48 @@ impl>> Verifier { .map(|log_degree| pcs.natural_domain_for_degree(1 << log_degree)) .collect::>(); - let ShardCommitment { main_commit, permutation_commit, quotient_commit } = commitment; + let ShardCommitment { + global_main_commit, + local_main_commit, + permutation_commit, + quotient_commit, + } = commitment; - let permutation_challenges = + challenger.observe(local_main_commit.clone()); + + let local_permutation_challenges = (0..2).map(|_| challenger.sample_ext_element::()).collect::>(); challenger.observe(permutation_commit.clone()); + // Observe the cumulative sums and constrain any sum without a corresponding scope to be + // zero. + for (opening, chip) in opened_values.chips.iter().zip_eq(chips.iter()) { + let global_sum = opening.global_cumulative_sum; + let local_sum = opening.local_cumulative_sum; + challenger.observe_slice(global_sum.as_base_slice()); + challenger.observe_slice(local_sum.as_base_slice()); + + let has_global_interactions = chip + .sends() + .iter() + .chain(chip.receives()) + .any(|i| i.scope == InteractionScope::Global); + if !has_global_interactions && !global_sum.is_zero() { + return Err(VerificationError::CumulativeSumsError( + "global cumulative sum is non-zero, but no global interactions", + )); + } + let has_local_interactions = chip + .sends() + .iter() + .chain(chip.receives()) + .any(|i| i.scope == InteractionScope::Local); + if !has_local_interactions && !local_sum.is_zero() { + return Err(VerificationError::CumulativeSumsError( + "local cumulative sum is non-zero, but no local interactions", + )); + } + } let alpha = challenger.sample_ext_element::(); @@ -141,20 +201,48 @@ impl>> Verifier { }) .collect::>(); + // Split the main_domains_points_and_opens to the global and local chips. + let mut global_trace_points_and_openings = Vec::new(); + let mut local_trace_points_and_openings = Vec::new(); + for (i, points_and_openings) in + main_domains_points_and_opens.clone().into_iter().enumerate() + { + let scope = chip_scopes[i]; + if scope == InteractionScope::Global { + global_trace_points_and_openings.push(points_and_openings); + } else { + local_trace_points_and_openings.push(points_and_openings); + } + } + + let rounds = if !global_trace_points_and_openings.is_empty() { + vec![ + (vk.commit.clone(), preprocessed_domains_points_and_opens), + (global_main_commit.clone(), global_trace_points_and_openings), + (local_main_commit.clone(), local_trace_points_and_openings), + (permutation_commit.clone(), perm_domains_points_and_opens), + (quotient_commit.clone(), quotient_domains_points_and_opens), + ] + } else { + vec![ + (vk.commit.clone(), preprocessed_domains_points_and_opens), + (local_main_commit.clone(), local_trace_points_and_openings), + (permutation_commit.clone(), perm_domains_points_and_opens), + (quotient_commit.clone(), quotient_domains_points_and_opens), + ] + }; + config .pcs() - .verify( - vec![ - (vk.commit.clone(), preprocessed_domains_points_and_opens), - (main_commit.clone(), main_domains_points_and_opens), - (permutation_commit.clone(), perm_domains_points_and_opens), - (quotient_commit.clone(), quotient_domains_points_and_opens), - ], - opening_proof, - challenger, - ) + .verify(rounds, opening_proof, challenger) .map_err(|e| VerificationError::InvalidopeningArgument(e))?; + let permutation_challenges = global_permutation_challenges + .iter() + .chain(local_permutation_challenges.iter()) + .copied() + .collect::>(); + // Verify the constrtaint evaluations. for (chip, trace_domain, qc_domains, values) in izip!(chips.iter(), trace_domains, quotient_chunk_domains, opened_values.chips.iter(),) @@ -175,6 +263,11 @@ impl>> Verifier { ) .map_err(|_| VerificationError::OodEvaluationMismatch(chip.name()))?; } + // Verify that the local cumulative sum is zero. + let local_cumulative_sum = proof.cumulative_sum(InteractionScope::Local); + if local_cumulative_sum != SC::Challenge::zero() { + return Err(VerificationError::CumulativeSumsError("local cumulative sum is not zero")); + } Ok(()) } @@ -310,12 +403,14 @@ impl>> Verifier { next: unflatten(&opening.permutation.next), }; + let cumulative_sums = [opening.global_cumulative_sum, opening.local_cumulative_sum]; + let cumulative_sums = cumulative_sums.as_slice(); let mut folder = VerifierConstraintFolder:: { preprocessed: opening.preprocessed.view(), main: opening.main.view(), perm: perm_opening.view(), perm_challenges: permutation_challenges, - cumulative_sum: opening.cumulative_sum, + cumulative_sums, is_first_row: selectors.is_first_row, is_last_row: selectors.is_last_row, is_transition: selectors.is_transition, @@ -400,6 +495,8 @@ pub enum VerificationError { MissingCpuChip, /// The length of the chip opening does not match the expected length. ChipOpeningLengthMismatch, + /// Cumulative sums error + CumulativeSumsError(&'static str), } impl Debug for OpeningShapeError { @@ -450,6 +547,7 @@ impl Debug for VerificationError { VerificationError::ChipOpeningLengthMismatch => { write!(f, "Chip opening length mismatch") } + VerificationError::CumulativeSumsError(s) => write!(f, "cumulative sums error: {}", s), } } } @@ -473,6 +571,7 @@ impl Display for VerificationError { VerificationError::ChipOpeningLengthMismatch => { write!(f, "Chip opening length mismatch") } + VerificationError::CumulativeSumsError(s) => write!(f, "cumulative sums error: {}", s), } } } diff --git a/crates/zkvm/entrypoint/Cargo.toml b/crates/zkvm/entrypoint/Cargo.toml index e24ef6b10c..320340a7ea 100644 --- a/crates/zkvm/entrypoint/Cargo.toml +++ b/crates/zkvm/entrypoint/Cargo.toml @@ -10,12 +10,9 @@ keywords = { workspace = true } categories = { workspace = true } [dependencies] -bincode = "1.3.3" cfg-if = "1.0.0" getrandom = { version = "0.2.15", features = ["custom"] } -once_cell = "1.19.0" rand = "0.8.5" -serde = { version = "1.0.204", features = ["derive"] } libm = { version = "0.2.8", optional = true } sha2 = { version = "0.10.8" } lazy_static = "1.5.0" diff --git a/crates/zkvm/entrypoint/src/libm.rs b/crates/zkvm/entrypoint/src/libm.rs index 4c8e6c3c52..304624d008 100644 --- a/crates/zkvm/entrypoint/src/libm.rs +++ b/crates/zkvm/entrypoint/src/libm.rs @@ -423,16 +423,6 @@ pub fn remquo(x: f64, y: f64) -> (f64, i32) { libm::remquo(x, y) } -#[no_mangle] -pub extern "C" fn rint(arg: f64) -> f64 { - libm::rint(arg) -} - -#[no_mangle] -pub extern "C" fn rintf(arg: f32) -> f32 { - libm::rintf(arg) -} - #[no_mangle] pub fn remquof(x: f32, y: f32) -> (f32, i32) { libm::remquof(x, y) diff --git a/crates/zkvm/entrypoint/src/syscalls/secp256k1.rs b/crates/zkvm/entrypoint/src/syscalls/secp256k1.rs index 7cfe22b405..4a6d0a55b7 100644 --- a/crates/zkvm/entrypoint/src/syscalls/secp256k1.rs +++ b/crates/zkvm/entrypoint/src/syscalls/secp256k1.rs @@ -8,7 +8,8 @@ use core::arch::asm; /// ### Safety /// /// The caller must ensure that `p` and `q` are valid pointers to data that is aligned along a four -/// byte boundary. +/// byte boundary. Additionally, the caller must ensure that `p` and `q` are valid points on the +/// secp256k1 curve, and that `p` and `q` are not equal to each other. #[allow(unused_variables)] #[no_mangle] pub extern "C" fn syscall_secp256k1_add(p: *mut [u32; 16], q: *mut [u32; 16]) { diff --git a/crates/zkvm/lib/Cargo.toml b/crates/zkvm/lib/Cargo.toml index 86b4aacd00..368c52faf1 100644 --- a/crates/zkvm/lib/Cargo.toml +++ b/crates/zkvm/lib/Cargo.toml @@ -10,14 +10,8 @@ keywords = { workspace = true } categories = { workspace = true } [dependencies] -anyhow = "1.0.83" bincode = "1.3.3" -cfg-if = "1.0.0" serde = { version = "1.0.204", features = ["derive"] } -amcl = { package = "snowbridge-amcl", version = "1.0.2", default-features = false, features = [ - "bls381", -] } -hex = "0.4.3" [features] default = [] diff --git a/crates/zkvm/lib/src/bls12381.rs b/crates/zkvm/lib/src/bls12381.rs index 4bf0a4d3e9..4938cee595 100644 --- a/crates/zkvm/lib/src/bls12381.rs +++ b/crates/zkvm/lib/src/bls12381.rs @@ -1,18 +1,29 @@ use std::io::ErrorKind; use crate::{ - syscall_bls12381_add, syscall_bls12381_decompress, syscall_bls12381_double, utils::AffinePoint, + syscall_bls12381_add, syscall_bls12381_decompress, syscall_bls12381_double, + utils::{AffinePoint, WeierstrassAffinePoint, WeierstrassPoint}, }; /// The number of limbs in [Bls12381AffinePoint]. pub const N: usize = 24; -/// An affine point on the BLS12-381 curve. +/// A point on the BLS12-381 curve. #[derive(Copy, Clone)] #[repr(align(4))] -pub struct Bls12381AffinePoint(pub [u32; N]); +pub struct Bls12381Point(pub WeierstrassPoint); -impl AffinePoint for Bls12381AffinePoint { +impl WeierstrassAffinePoint for Bls12381Point { + fn infinity() -> Self { + Self(WeierstrassPoint::Infinity) + } + + fn is_infinity(&self) -> bool { + matches!(self.0, WeierstrassPoint::Infinity) + } +} + +impl AffinePoint for Bls12381Point { /// The generator was taken from "py_ecc" python library by the Ethereum Foundation: /// /// https://github.com/ethereum/py_ecc/blob/7b9e1b3/py_ecc/bls12_381/bls12_381_curve.py#L38-L45 @@ -24,15 +35,25 @@ impl AffinePoint for Bls12381AffinePoint { ]; fn new(limbs: [u32; N]) -> Self { - Self(limbs) + Self(WeierstrassPoint::Affine(limbs)) } fn limbs_ref(&self) -> &[u32; N] { - &self.0 + match &self.0 { + WeierstrassPoint::Infinity => panic!("Infinity point has no limbs"), + WeierstrassPoint::Affine(limbs) => limbs, + } } fn limbs_mut(&mut self) -> &mut [u32; N] { - &mut self.0 + match &mut self.0 { + WeierstrassPoint::Infinity => panic!("Infinity point has no limbs"), + WeierstrassPoint::Affine(limbs) => limbs, + } + } + + fn complete_add_assign(&mut self, other: &Self) { + self.weierstrass_add_assign(other); } fn add_assign(&mut self, other: &Self) { diff --git a/crates/zkvm/lib/src/bn254.rs b/crates/zkvm/lib/src/bn254.rs index f3635f3f63..a541bc8663 100644 --- a/crates/zkvm/lib/src/bn254.rs +++ b/crates/zkvm/lib/src/bn254.rs @@ -1,29 +1,52 @@ -use crate::{syscall_bn254_add, syscall_bn254_double, utils::AffinePoint}; +use crate::{ + syscall_bn254_add, syscall_bn254_double, + utils::{AffinePoint, WeierstrassAffinePoint, WeierstrassPoint}, +}; /// The number of limbs in [Bn254AffinePoint]. pub const N: usize = 16; -/// An affine point on the Bn254 curve. +/// A point on the Bn254 curve. #[derive(Copy, Clone)] #[repr(align(4))] -pub struct Bn254AffinePoint(pub [u32; N]); +pub struct Bn254Point(pub WeierstrassPoint); -impl AffinePoint for Bn254AffinePoint { +impl WeierstrassAffinePoint for Bn254Point { + fn infinity() -> Self { + Self(WeierstrassPoint::Infinity) + } + + fn is_infinity(&self) -> bool { + matches!(self.0, WeierstrassPoint::Infinity) + } +} + +impl AffinePoint for Bn254Point { /// The generator has been taken from py_pairing python library by the Ethereum Foundation: /// /// https://github.com/ethereum/py_pairing/blob/5f609da/py_ecc/bn128/bn128_field_elements.py const GENERATOR: [u32; N] = [1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0]; fn new(limbs: [u32; N]) -> Self { - Self(limbs) + Self(WeierstrassPoint::Affine(limbs)) } fn limbs_ref(&self) -> &[u32; N] { - &self.0 + match &self.0 { + WeierstrassPoint::Infinity => panic!("Infinity point has no limbs"), + WeierstrassPoint::Affine(limbs) => limbs, + } } fn limbs_mut(&mut self) -> &mut [u32; N] { - &mut self.0 + match &mut self.0 { + WeierstrassPoint::Infinity => panic!("Infinity point has no limbs"), + WeierstrassPoint::Affine(limbs) => limbs, + } + } + + fn complete_add_assign(&mut self, other: &Self) { + self.weierstrass_add_assign(other); } fn add_assign(&mut self, other: &Self) { diff --git a/crates/zkvm/lib/src/secp256k1.rs b/crates/zkvm/lib/src/secp256k1.rs index 9106bd2ac3..0c5f65ed44 100644 --- a/crates/zkvm/lib/src/secp256k1.rs +++ b/crates/zkvm/lib/src/secp256k1.rs @@ -1,14 +1,27 @@ -use crate::{syscall_secp256k1_add, syscall_secp256k1_double, utils::AffinePoint}; +use crate::{ + syscall_secp256k1_add, syscall_secp256k1_double, + utils::{AffinePoint, WeierstrassAffinePoint, WeierstrassPoint}, +}; -/// The number of limbs in [Secp256k1AffinePoint]. +/// The number of limbs in [Secp256k1Point]. pub const N: usize = 16; /// An affine point on the Secp256k1 curve. #[derive(Copy, Clone)] #[repr(align(4))] -pub struct Secp256k1AffinePoint(pub [u32; N]); +pub struct Secp256k1Point(pub WeierstrassPoint); -impl AffinePoint for Secp256k1AffinePoint { +impl WeierstrassAffinePoint for Secp256k1Point { + fn infinity() -> Self { + Self(WeierstrassPoint::Infinity) + } + + fn is_infinity(&self) -> bool { + matches!(self.0, WeierstrassPoint::Infinity) + } +} + +impl AffinePoint for Secp256k1Point { /// The values are taken from https://en.bitcoin.it/wiki/Secp256k1. const GENERATOR: [u32; N] = [ 385357720, 1509065051, 768485593, 43777243, 3464956679, 1436574357, 4191992748, 2042521214, @@ -17,15 +30,25 @@ impl AffinePoint for Secp256k1AffinePoint { ]; fn new(limbs: [u32; N]) -> Self { - Self(limbs) + Self(WeierstrassPoint::Affine(limbs)) } fn limbs_ref(&self) -> &[u32; N] { - &self.0 + match &self.0 { + WeierstrassPoint::Infinity => panic!("Infinity point has no limbs"), + WeierstrassPoint::Affine(limbs) => limbs, + } } fn limbs_mut(&mut self) -> &mut [u32; N] { - &mut self.0 + match &mut self.0 { + WeierstrassPoint::Infinity => panic!("Infinity point has no limbs"), + WeierstrassPoint::Affine(limbs) => limbs, + } + } + + fn complete_add_assign(&mut self, other: &Self) { + self.weierstrass_add_assign(other); } fn add_assign(&mut self, other: &Self) { @@ -37,9 +60,11 @@ impl AffinePoint for Secp256k1AffinePoint { } fn double(&mut self) { - let a = self.limbs_mut(); - unsafe { - syscall_secp256k1_double(a); + match &mut self.0 { + WeierstrassPoint::Infinity => (), + WeierstrassPoint::Affine(limbs) => unsafe { + syscall_secp256k1_double(limbs); + }, } } } diff --git a/crates/zkvm/lib/src/utils.rs b/crates/zkvm/lib/src/utils.rs index b0b3193187..8fcf34fa7e 100644 --- a/crates/zkvm/lib/src/utils.rs +++ b/crates/zkvm/lib/src/utils.rs @@ -8,7 +8,7 @@ pub trait AffinePoint: Clone + Sized { /// Returns a reference to the limbs. fn limbs_ref(&self) -> &[u32; N]; - /// Returns a mutable reference to the limbs. + /// Returns a mutable reference to the limbs. If the point is the infinity point, this will panic. fn limbs_mut(&mut self) -> &mut [u32; N]; /// Creates a new [`AffinePoint`] from the given x and y coordinates. @@ -47,6 +47,12 @@ pub trait AffinePoint: Clone + Sized { /// Adds the given [`AffinePoint`] to `self`. fn add_assign(&mut self, other: &Self); + /// Adds the given [`AffinePoint`] to `self`. Can be optionally overriden to use a different + /// implementation of addition in multi-scalar multiplication, which is used in secp256k1 recovery. + fn complete_add_assign(&mut self, other: &Self) { + self.add_assign(other); + } + /// Doubles `self`. fn double(&mut self); @@ -87,20 +93,23 @@ pub trait AffinePoint: Clone + Sized { b_bits_le: &[bool], b: Self, ) -> Option { + // The length of the bit vectors must be the same. + debug_assert!(a_bits_le.len() == b_bits_le.len()); + let mut res: Option = None; let mut temp_a = a.clone(); let mut temp_b = b.clone(); for (a_bit, b_bit) in a_bits_le.iter().zip(b_bits_le.iter()) { if *a_bit { match res.as_mut() { - Some(res) => res.add_assign(&temp_a), + Some(res) => res.complete_add_assign(&temp_a), None => res = Some(temp_a.clone()), }; } if *b_bit { match res.as_mut() { - Some(res) => res.add_assign(&temp_b), + Some(res) => res.complete_add_assign(&temp_b), None => res = Some(temp_b.clone()), }; } @@ -130,3 +139,65 @@ pub fn bytes_to_words_le(bytes: &[u8]) -> Vec { .map(|chunk| u32::from_le_bytes(chunk.try_into().unwrap())) .collect::>() } + +#[derive(Copy, Clone)] +/// A representation of a point on a Weierstrass curve. +pub enum WeierstrassPoint { + Infinity, + Affine([u32; N]), +} + +/// A trait for affine points on Weierstrass curves. +pub trait WeierstrassAffinePoint: AffinePoint { + /// The infinity point representation of the Weierstrass curve. Typically an enum variant. + fn infinity() -> Self; + + /// Returns true if the point is the infinity point. + fn is_infinity(&self) -> bool; + + /// Performs the complete addition of two [`AffinePoint`]'s on a Weierstrass curve. + /// For an addition of two points P1 and P2, the cases are: + /// 1. P1 is infinity + /// 2. P2 is infinity + /// 3. P1 equals P2 + /// 4. P1 is the negation of P2 + /// 5. Default addition. + /// + /// Implements the complete addition cases according to the + /// [Zcash complete addition spec](https://zcash.github.io/halo2/design/gadgets/ecc/addition.html#complete-addition). + fn weierstrass_add_assign(&mut self, other: &Self) { + // Case 1: p1 is infinity. + if self.is_infinity() { + *self = other.clone(); + return; + } + + // Case 2: p2 is infinity. + if other.is_infinity() { + return; + } + + // Once it's known the points are not infinity, their limbs can be safely used. + let p1 = self.limbs_mut(); + let p2 = other.limbs_ref(); + + // Case 3: p1 equals p2. + if p1 == p2 { + self.double(); + return; + } + + // Case 4: p1 is the negation of p2. + // Note: If p1 and p2 are valid elliptic curve points, and p1.x == p2.x, that means that + // either p1.y == p2.y or p1.y + p2.y == p. Because we are past Case 4, we know that p1.y != + // p2.y, so we can just check if p1.x == p2.x. Therefore, this implictly checks that + // p1.x == p2.x AND p1.y + p2.y == p without modular negation. + if p1[..N / 2] == p2[..N / 2] { + *self = Self::infinity(); + return; + } + + // Case 5: Default addition. + self.add_assign(other); + } +} diff --git a/examples/Cargo.lock b/examples/Cargo.lock deleted file mode 100644 index c4f8741dde..0000000000 --- a/examples/Cargo.lock +++ /dev/null @@ -1,8784 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "Inflector" -version = "0.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" -dependencies = [ - "lazy_static", - "regex", -] - -[[package]] -name = "addchain" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b2e69442aa5628ea6951fa33e24efe8313f4321a91bd729fc2f75bdfc858570" -dependencies = [ - "num-bigint 0.3.3", - "num-integer", - "num-traits", -] - -[[package]] -name = "addr2line" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler2" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" - -[[package]] -name = "aes" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" -dependencies = [ - "cfg-if", - "cipher", - "cpufeatures", -] - -[[package]] -name = "aggregation-program" -version = "1.1.0" -dependencies = [ - "hex", - "sha2 0.10.8", - "sp1-zkvm", -] - -[[package]] -name = "aggregation-script" -version = "1.1.0" -dependencies = [ - "hex", - "sp1-build", - "sp1-sdk", - "tracing", -] - -[[package]] -name = "ahash" -version = "0.8.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" -dependencies = [ - "cfg-if", - "once_cell", - "version_check", - "zerocopy", -] - -[[package]] -name = "aho-corasick" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" -dependencies = [ - "memchr", -] - -[[package]] -name = "allocator-api2" -version = "0.2.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" - -[[package]] -name = "alloy-chains" -version = "0.1.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8158b4878c67837e5413721cc44298e6a2d88d39203175ea025e51892a16ba4c" -dependencies = [ - "alloy-rlp", - "num_enum 0.7.3", - "serde", - "strum", -] - -[[package]] -name = "alloy-consensus" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "629b62e38d471cc15fea534eb7283d2f8a4e8bdb1811bcc5d66dda6cfce6fae1" -dependencies = [ - "alloy-eips", - "alloy-primitives 0.8.5", - "alloy-rlp", - "alloy-serde", - "c-kzg", - "serde", -] - -[[package]] -name = "alloy-eip2930" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0069cf0642457f87a01a014f6dc29d5d893cd4fd8fddf0c3cdfad1bb3ebafc41" -dependencies = [ - "alloy-primitives 0.8.5", - "alloy-rlp", - "serde", -] - -[[package]] -name = "alloy-eip7702" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea59dc42102bc9a1905dc57901edc6dd48b9f38115df86c7d252acba70d71d04" -dependencies = [ - "alloy-primitives 0.8.5", - "alloy-rlp", - "k256", - "serde", -] - -[[package]] -name = "alloy-eips" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f923dd5fca5f67a43d81ed3ebad0880bd41f6dd0ada930030353ac356c54cd0f" -dependencies = [ - "alloy-eip2930", - "alloy-eip7702", - "alloy-primitives 0.8.5", - "alloy-rlp", - "alloy-serde", - "c-kzg", - "derive_more 1.0.0", - "once_cell", - "serde", - "sha2 0.10.8", -] - -[[package]] -name = "alloy-genesis" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a7a18afb0b318616b6b2b0e2e7ac5529d32a966c673b48091c9919e284e6aca" -dependencies = [ - "alloy-primitives 0.8.5", - "alloy-serde", - "serde", -] - -[[package]] -name = "alloy-json-abi" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a438d4486b5d525df3b3004188f9d5cd1d65cd30ecc41e5a3ccef6f6342e8af9" -dependencies = [ - "alloy-primitives 0.8.5", - "alloy-sol-type-parser", - "serde", - "serde_json", -] - -[[package]] -name = "alloy-json-rpc" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3c717b5298fad078cd3a418335b266eba91b511383ca9bd497f742d5975d5ab" -dependencies = [ - "alloy-primitives 0.8.5", - "alloy-sol-types 0.8.5", - "serde", - "serde_json", - "thiserror", - "tracing", -] - -[[package]] -name = "alloy-network" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb3705ce7d8602132bcf5ac7a1dd293a42adc2f183abf5907c30ac535ceca049" -dependencies = [ - "alloy-consensus", - "alloy-eips", - "alloy-json-rpc", - "alloy-network-primitives", - "alloy-primitives 0.8.5", - "alloy-rpc-types-eth", - "alloy-serde", - "alloy-signer", - "alloy-sol-types 0.8.5", - "async-trait", - "auto_impl", - "futures-utils-wasm", - "thiserror", -] - -[[package]] -name = "alloy-network-primitives" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94ad40869867ed2d9cd3842b1e800889e5b49e6b92da346e93862b4a741bedf3" -dependencies = [ - "alloy-eips", - "alloy-primitives 0.8.5", - "alloy-serde", - "serde", -] - -[[package]] -name = "alloy-primitives" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "600d34d8de81e23b6d909c094e23b3d357e01ca36b78a8c5424c501eedbe86f0" -dependencies = [ - "alloy-rlp", - "bytes", - "cfg-if", - "const-hex", - "derive_more 0.99.18", - "hex-literal", - "itoa", - "k256", - "keccak-asm", - "proptest", - "rand 0.8.5", - "ruint", - "serde", - "tiny-keccak", -] - -[[package]] -name = "alloy-primitives" -version = "0.7.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccb3ead547f4532bc8af961649942f0b9c16ee9226e26caa3f38420651cc0bf4" -dependencies = [ - "alloy-rlp", - "bytes", - "cfg-if", - "const-hex", - "derive_more 0.99.18", - "hex-literal", - "itoa", - "k256", - "keccak-asm", - "proptest", - "rand 0.8.5", - "ruint", - "serde", - "tiny-keccak", -] - -[[package]] -name = "alloy-primitives" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "260d3ff3bff0bb84599f032a2f2c6828180b0ea0cd41fdaf44f39cef3ba41861" -dependencies = [ - "alloy-rlp", - "bytes", - "cfg-if", - "const-hex", - "derive_more 1.0.0", - "getrandom", - "hashbrown 0.14.5", - "hex-literal", - "indexmap 2.5.0", - "itoa", - "k256", - "keccak-asm", - "paste", - "proptest", - "rand 0.8.5", - "ruint", - "rustc-hash 2.0.0", - "serde", - "sha3", - "tiny-keccak", -] - -[[package]] -name = "alloy-provider" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927f708dd457ed63420400ee5f06945df9632d5d101851952056840426a10dc5" -dependencies = [ - "alloy-chains", - "alloy-consensus", - "alloy-eips", - "alloy-json-rpc", - "alloy-network", - "alloy-network-primitives", - "alloy-primitives 0.8.5", - "alloy-rpc-client", - "alloy-rpc-types-eth", - "alloy-transport", - "alloy-transport-http", - "async-stream", - "async-trait", - "auto_impl", - "dashmap", - "futures", - "futures-utils-wasm", - "lru", - "pin-project", - "reqwest 0.12.8", - "serde", - "serde_json", - "thiserror", - "tokio", - "tracing", - "url", -] - -[[package]] -name = "alloy-rlp" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26154390b1d205a4a7ac7352aa2eb4f81f391399d4e2f546fb81a2f8bb383f62" -dependencies = [ - "alloy-rlp-derive", - "arrayvec 0.7.6", - "bytes", -] - -[[package]] -name = "alloy-rlp-derive" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d0f2d905ebd295e7effec65e5f6868d153936130ae718352771de3e7d03c75c" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "alloy-rpc-client" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d82952dca71173813d4e5733e2c986d8b04aea9e0f3b0a576664c232ad050a5" -dependencies = [ - "alloy-json-rpc", - "alloy-transport", - "alloy-transport-http", - "futures", - "pin-project", - "reqwest 0.12.8", - "serde", - "serde_json", - "tokio", - "tokio-stream", - "tower", - "tracing", - "url", -] - -[[package]] -name = "alloy-rpc-types" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64333d639f2a0cf73491813c629a405744e16343a4bc5640931be707c345ecc5" -dependencies = [ - "alloy-rpc-types-eth", - "alloy-serde", - "serde", -] - -[[package]] -name = "alloy-rpc-types-eth" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83aa984386deda02482660aa31cb8ca1e63d533f1c31a52d7d181ac5ec68e9b8" -dependencies = [ - "alloy-consensus", - "alloy-eips", - "alloy-network-primitives", - "alloy-primitives 0.8.5", - "alloy-rlp", - "alloy-serde", - "alloy-sol-types 0.8.5", - "cfg-if", - "derive_more 1.0.0", - "hashbrown 0.14.5", - "itertools 0.13.0", - "serde", - "serde_json", -] - -[[package]] -name = "alloy-serde" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "731f75ec5d383107fd745d781619bd9cedf145836c51ecb991623d41278e71fa" -dependencies = [ - "alloy-primitives 0.8.5", - "serde", - "serde_json", -] - -[[package]] -name = "alloy-signer" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "307324cca94354cd654d6713629f0383ec037e1ff9e3e3d547212471209860c0" -dependencies = [ - "alloy-primitives 0.8.5", - "async-trait", - "auto_impl", - "elliptic-curve", - "k256", - "thiserror", -] - -[[package]] -name = "alloy-sol-macro" -version = "0.7.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b40397ddcdcc266f59f959770f601ce1280e699a91fc1862f29cef91707cd09" -dependencies = [ - "alloy-sol-macro-expander 0.7.7", - "alloy-sol-macro-input 0.7.7", - "proc-macro-error", - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "alloy-sol-macro" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68e7f6e8fe5b443f82b3f1e15abfa191128f71569148428e49449d01f6f49e8b" -dependencies = [ - "alloy-sol-macro-expander 0.8.5", - "alloy-sol-macro-input 0.8.5", - "proc-macro-error2", - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "alloy-sol-macro-expander" -version = "0.7.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "867a5469d61480fea08c7333ffeca52d5b621f5ca2e44f271b117ec1fc9a0525" -dependencies = [ - "alloy-sol-macro-input 0.7.7", - "const-hex", - "heck", - "indexmap 2.5.0", - "proc-macro-error", - "proc-macro2", - "quote", - "syn 2.0.79", - "syn-solidity 0.7.7", - "tiny-keccak", -] - -[[package]] -name = "alloy-sol-macro-expander" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b96ce28d2fde09abb6135f410c41fad670a3a770b6776869bd852f1df102e6f" -dependencies = [ - "alloy-sol-macro-input 0.8.5", - "const-hex", - "heck", - "indexmap 2.5.0", - "proc-macro-error2", - "proc-macro2", - "quote", - "syn 2.0.79", - "syn-solidity 0.8.5", - "tiny-keccak", -] - -[[package]] -name = "alloy-sol-macro-input" -version = "0.7.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e482dc33a32b6fadbc0f599adea520bd3aaa585c141a80b404d0a3e3fa72528" -dependencies = [ - "const-hex", - "dunce", - "heck", - "proc-macro2", - "quote", - "syn 2.0.79", - "syn-solidity 0.7.7", -] - -[[package]] -name = "alloy-sol-macro-input" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "906746396a8296537745711630d9185746c0b50c033d5e9d18b0a6eba3d53f90" -dependencies = [ - "const-hex", - "dunce", - "heck", - "proc-macro2", - "quote", - "syn 2.0.79", - "syn-solidity 0.8.5", -] - -[[package]] -name = "alloy-sol-type-parser" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc85178909a49c8827ffccfc9103a7ce1767ae66a801b69bdc326913870bf8e6" -dependencies = [ - "serde", - "winnow 0.6.20", -] - -[[package]] -name = "alloy-sol-types" -version = "0.7.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a91ca40fa20793ae9c3841b83e74569d1cc9af29a2f5237314fd3452d51e38c7" -dependencies = [ - "alloy-primitives 0.7.7", - "alloy-sol-macro 0.7.7", - "const-hex", - "serde", -] - -[[package]] -name = "alloy-sol-types" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d86a533ce22525969661b25dfe296c112d35eb6861f188fd284f8bd4bb3842ae" -dependencies = [ - "alloy-json-abi", - "alloy-primitives 0.8.5", - "alloy-sol-macro 0.8.5", - "const-hex", - "serde", -] - -[[package]] -name = "alloy-transport" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33616b2edf7454302a1d48084db185e52c309f73f6c10be99b0fe39354b3f1e9" -dependencies = [ - "alloy-json-rpc", - "base64 0.22.1", - "futures-util", - "futures-utils-wasm", - "serde", - "serde_json", - "thiserror", - "tokio", - "tower", - "tracing", - "url", -] - -[[package]] -name = "alloy-transport-http" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a944f5310c690b62bbb3e7e5ce34527cbd36b2d18532a797af123271ce595a49" -dependencies = [ - "alloy-json-rpc", - "alloy-transport", - "reqwest 0.12.8", - "serde_json", - "tower", - "tracing", - "url", -] - -[[package]] -name = "alloy-trie" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a46c9c4fdccda7982e7928904bd85fe235a0404ee3d7e197fff13d61eac8b4f" -dependencies = [ - "alloy-primitives 0.8.5", - "alloy-rlp", - "derive_more 1.0.0", - "hashbrown 0.14.5", - "nybbles", - "serde", - "smallvec", - "tracing", -] - -[[package]] -name = "android-tzdata" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" - -[[package]] -name = "android_system_properties" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" -dependencies = [ - "libc", -] - -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi", -] - -[[package]] -name = "anstream" -version = "0.6.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" -dependencies = [ - "anstyle", - "anstyle-parse", - "anstyle-query", - "anstyle-wincon", - "colorchoice", - "is_terminal_polyfill", - "utf8parse", -] - -[[package]] -name = "anstyle" -version = "1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" - -[[package]] -name = "anstyle-parse" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" -dependencies = [ - "utf8parse", -] - -[[package]] -name = "anstyle-query" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" -dependencies = [ - "windows-sys 0.52.0", -] - -[[package]] -name = "anstyle-wincon" -version = "3.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" -dependencies = [ - "anstyle", - "windows-sys 0.52.0", -] - -[[package]] -name = "anyhow" -version = "1.0.89" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" - -[[package]] -name = "ark-ff" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" -dependencies = [ - "ark-ff-asm 0.3.0", - "ark-ff-macros 0.3.0", - "ark-serialize 0.3.0", - "ark-std 0.3.0", - "derivative", - "num-bigint 0.4.6", - "num-traits", - "paste", - "rustc_version 0.3.3", - "zeroize", -] - -[[package]] -name = "ark-ff" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" -dependencies = [ - "ark-ff-asm 0.4.2", - "ark-ff-macros 0.4.2", - "ark-serialize 0.4.2", - "ark-std 0.4.0", - "derivative", - "digest 0.10.7", - "itertools 0.10.5", - "num-bigint 0.4.6", - "num-traits", - "paste", - "rustc_version 0.4.1", - "zeroize", -] - -[[package]] -name = "ark-ff-asm" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" -dependencies = [ - "quote", - "syn 1.0.109", -] - -[[package]] -name = "ark-ff-asm" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" -dependencies = [ - "quote", - "syn 1.0.109", -] - -[[package]] -name = "ark-ff-macros" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" -dependencies = [ - "num-bigint 0.4.6", - "num-traits", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "ark-ff-macros" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" -dependencies = [ - "num-bigint 0.4.6", - "num-traits", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "ark-serialize" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" -dependencies = [ - "ark-std 0.3.0", - "digest 0.9.0", -] - -[[package]] -name = "ark-serialize" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" -dependencies = [ - "ark-std 0.4.0", - "digest 0.10.7", - "num-bigint 0.4.6", -] - -[[package]] -name = "ark-std" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" -dependencies = [ - "num-traits", - "rand 0.8.5", -] - -[[package]] -name = "ark-std" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" -dependencies = [ - "num-traits", - "rand 0.8.5", -] - -[[package]] -name = "arrayref" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" - -[[package]] -name = "arrayvec" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" - -[[package]] -name = "arrayvec" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" - -[[package]] -name = "async-stream" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" -dependencies = [ - "async-stream-impl", - "futures-core", - "pin-project-lite", -] - -[[package]] -name = "async-stream-impl" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "async-trait" -version = "0.1.83" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "async_io_stream" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" -dependencies = [ - "futures", - "pharos", - "rustc_version 0.4.1", -] - -[[package]] -name = "atomic-waker" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" - -[[package]] -name = "aurora-engine-modexp" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0aef7712851e524f35fbbb74fa6599c5cd8692056a1c36f9ca0d2001b670e7e5" -dependencies = [ - "hex", - "num", -] - -[[package]] -name = "auto_impl" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "autocfg" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" - -[[package]] -name = "axum" -version = "0.7.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "504e3947307ac8326a5437504c517c4b56716c9d98fac0028c2acc7ca47d70ae" -dependencies = [ - "async-trait", - "axum-core", - "bytes", - "futures-util", - "http 1.1.0", - "http-body 1.0.1", - "http-body-util", - "hyper 1.4.1", - "hyper-util", - "itoa", - "matchit", - "memchr", - "mime", - "percent-encoding", - "pin-project-lite", - "rustversion", - "serde", - "serde_json", - "serde_path_to_error", - "serde_urlencoded", - "sync_wrapper 1.0.1", - "tokio", - "tower", - "tower-layer", - "tower-service", - "tracing", -] - -[[package]] -name = "axum-core" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" -dependencies = [ - "async-trait", - "bytes", - "futures-util", - "http 1.1.0", - "http-body 1.0.1", - "http-body-util", - "mime", - "pin-project-lite", - "rustversion", - "sync_wrapper 1.0.1", - "tower-layer", - "tower-service", - "tracing", -] - -[[package]] -name = "backtrace" -version = "0.3.74" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" -dependencies = [ - "addr2line", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", - "serde", - "windows-targets 0.52.6", -] - -[[package]] -name = "base16ct" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" - -[[package]] -name = "base64" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" - -[[package]] -name = "base64" -version = "0.21.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" - -[[package]] -name = "base64" -version = "0.22.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" - -[[package]] -name = "base64ct" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" - -[[package]] -name = "bech32" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d86b93f97252c47b41663388e6d155714a9d0c398b99f1005cbc5f978b29f445" - -[[package]] -name = "bincode" -version = "1.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" -dependencies = [ - "serde", -] - -[[package]] -name = "bindgen" -version = "0.69.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" -dependencies = [ - "bitflags 2.6.0", - "cexpr", - "clang-sys", - "itertools 0.12.1", - "lazy_static", - "lazycell", - "log", - "prettyplease", - "proc-macro2", - "quote", - "regex", - "rustc-hash 1.1.0", - "shlex", - "syn 2.0.79", - "which", -] - -[[package]] -name = "bit-set" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" -dependencies = [ - "bit-vec", -] - -[[package]] -name = "bit-vec" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bitflags" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" -dependencies = [ - "serde", -] - -[[package]] -name = "bitvec" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" -dependencies = [ - "funty", - "radium", - "serde", - "tap", - "wyz", -] - -[[package]] -name = "blake2" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" -dependencies = [ - "digest 0.10.7", -] - -[[package]] -name = "blake2b_simd" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23285ad32269793932e830392f2fe2f83e26488fd3ec778883a93c8323735780" -dependencies = [ - "arrayref", - "arrayvec 0.7.6", - "constant_time_eq", -] - -[[package]] -name = "blake3" -version = "1.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d82033247fd8e890df8f740e407ad4d038debb9eb1f40533fffb32e7d17dc6f7" -dependencies = [ - "arrayref", - "arrayvec 0.7.6", - "cc", - "cfg-if", - "constant_time_eq", - "rayon-core", -] - -[[package]] -name = "block-buffer" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" -dependencies = [ - "generic-array 0.14.7", -] - -[[package]] -name = "block-buffer" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" -dependencies = [ - "generic-array 0.14.7", -] - -[[package]] -name = "bls12381-program" -version = "1.1.0" -dependencies = [ - "bls12_381 0.8.0", - "ff 0.13.0", - "group 0.13.0", - "num", - "rand 0.8.5", - "serde_yaml", - "sp1-zkvm", -] - -[[package]] -name = "bls12381-script" -version = "0.1.0" -dependencies = [ - "sp1-build", - "sp1-sdk", -] - -[[package]] -name = "bls12_381" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3c196a77437e7cc2fb515ce413a6401291578b5afc8ecb29a3c7ab957f05941" -dependencies = [ - "ff 0.12.1", - "group 0.12.1", - "pairing 0.22.0", - "rand_core 0.6.4", - "subtle", -] - -[[package]] -name = "bls12_381" -version = "0.8.0" -source = "git+https://github.com/sp1-patches/bls12_381?branch=patch-v0.8.0#1519853234a532d066ad652687270dff8f1ec6a3" -dependencies = [ - "cfg-if", - "ff 0.13.0", - "group 0.13.0", - "pairing 0.23.0", - "rand_core 0.6.4", - "sp1-lib 1.2.0", - "subtle", -] - -[[package]] -name = "blst" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4378725facc195f1a538864863f6de233b500a8862747e7f165078a419d5e874" -dependencies = [ - "cc", - "glob", - "threadpool", - "zeroize", -] - -[[package]] -name = "bn254-program" -version = "1.1.0" -dependencies = [ - "num", - "rand 0.8.5", - "serde_yaml", - "sp1-zkvm", - "substrate-bn 0.6.0 (git+https://github.com/sp1-patches/bn?rev=43d854d45b5727b1ff2b9f346d728e785bb8395c)", -] - -[[package]] -name = "bn254-script" -version = "0.1.0" -dependencies = [ - "sp1-build", - "sp1-sdk", -] - -[[package]] -name = "bs58" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" -dependencies = [ - "sha2 0.10.8", - "tinyvec", -] - -[[package]] -name = "bumpalo" -version = "3.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" - -[[package]] -name = "byte-slice-cast" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" - -[[package]] -name = "bytemuck" -version = "1.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94bbb0ad554ad961ddc5da507a12a29b14e4ae5bda06b19f575a3e6079d2e2ae" - -[[package]] -name = "byteorder" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" - -[[package]] -name = "bytes" -version = "1.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" -dependencies = [ - "serde", -] - -[[package]] -name = "c-kzg" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0307f72feab3300336fb803a57134159f6e20139af1357f36c54cb90d8e8928" -dependencies = [ - "blst", - "cc", - "glob", - "hex", - "libc", - "once_cell", - "serde", -] - -[[package]] -name = "camino" -version = "1.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b96ec4966b5813e2c0507c1f86115c8c5abaadc3980879c3424042a02fd1ad3" -dependencies = [ - "serde", -] - -[[package]] -name = "cargo-platform" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24b1f0365a6c6bb4020cd05806fd0d33c44d38046b8bd7f0e40814b9763cabfc" -dependencies = [ - "serde", -] - -[[package]] -name = "cargo_metadata" -version = "0.18.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037" -dependencies = [ - "camino", - "cargo-platform", - "semver 1.0.23", - "serde", - "serde_json", - "thiserror", -] - -[[package]] -name = "cc" -version = "1.1.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "812acba72f0a070b003d3697490d2b55b837230ae7c6c6497f05cc2ddbb8d938" -dependencies = [ - "jobserver", - "libc", - "shlex", -] - -[[package]] -name = "cexpr" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" -dependencies = [ - "nom", -] - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "cfg_aliases" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" - -[[package]] -name = "chess" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ed299b171ec34f372945ad6726f7bc1d2afd5f59fb8380f64f48e2bab2f0ec8" -dependencies = [ - "arrayvec 0.5.2", - "failure", - "nodrop", - "rand 0.7.3", -] - -[[package]] -name = "chess-program" -version = "1.1.0" -dependencies = [ - "chess", - "sp1-zkvm", -] - -[[package]] -name = "chess-script" -version = "1.1.0" -dependencies = [ - "sp1-build", - "sp1-sdk", -] - -[[package]] -name = "chrono" -version = "0.4.38" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" -dependencies = [ - "android-tzdata", - "iana-time-zone", - "num-traits", - "serde", - "windows-targets 0.52.6", -] - -[[package]] -name = "cipher" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" -dependencies = [ - "crypto-common", - "inout", -] - -[[package]] -name = "clang-sys" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" -dependencies = [ - "glob", - "libc", - "libloading", -] - -[[package]] -name = "clap" -version = "4.5.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0956a43b323ac1afaffc053ed5c4b7c1f1800bacd1683c353aabbb752515dd3" -dependencies = [ - "clap_builder", - "clap_derive", -] - -[[package]] -name = "clap_builder" -version = "4.5.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d72166dd41634086d5803a47eb71ae740e61d84709c36f3c34110173db3961b" -dependencies = [ - "anstream", - "anstyle", - "clap_lex", - "strsim", -] - -[[package]] -name = "clap_derive" -version = "4.5.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "clap_lex" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" - -[[package]] -name = "cobs" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67ba02a97a2bd10f4b59b25c7973101c79642302776489e030cd13cdab09ed15" - -[[package]] -name = "coins-bip32" -version = "0.8.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b6be4a5df2098cd811f3194f64ddb96c267606bffd9689ac7b0160097b01ad3" -dependencies = [ - "bs58", - "coins-core", - "digest 0.10.7", - "hmac", - "k256", - "serde", - "sha2 0.10.8", - "thiserror", -] - -[[package]] -name = "coins-bip39" -version = "0.8.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3db8fba409ce3dc04f7d804074039eb68b960b0829161f8e06c95fea3f122528" -dependencies = [ - "bitvec", - "coins-bip32", - "hmac", - "once_cell", - "pbkdf2 0.12.2", - "rand 0.8.5", - "sha2 0.10.8", - "thiserror", -] - -[[package]] -name = "coins-core" -version = "0.8.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5286a0843c21f8367f7be734f89df9b822e0321d8bcce8d6e735aadff7d74979" -dependencies = [ - "base64 0.21.7", - "bech32", - "bs58", - "digest 0.10.7", - "generic-array 0.14.7", - "hex", - "ripemd", - "serde", - "serde_derive", - "sha2 0.10.8", - "sha3", - "thiserror", -] - -[[package]] -name = "colorchoice" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" - -[[package]] -name = "console" -version = "0.15.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb" -dependencies = [ - "encode_unicode", - "lazy_static", - "libc", - "unicode-width", - "windows-sys 0.52.0", -] - -[[package]] -name = "const-hex" -version = "1.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0121754e84117e65f9d90648ee6aa4882a6e63110307ab73967a4c5e7e69e586" -dependencies = [ - "cfg-if", - "cpufeatures", - "hex", - "proptest", - "serde", -] - -[[package]] -name = "const-oid" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" - -[[package]] -name = "const-oid" -version = "0.9.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" - -[[package]] -name = "constant_time_eq" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" - -[[package]] -name = "convert_case" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" - -[[package]] -name = "convert_case" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" -dependencies = [ - "unicode-segmentation", -] - -[[package]] -name = "core-foundation" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "core-foundation-sys" -version = "0.8.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" - -[[package]] -name = "cpufeatures" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" -dependencies = [ - "libc", -] - -[[package]] -name = "crc" -version = "3.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636" -dependencies = [ - "crc-catalog", -] - -[[package]] -name = "crc-catalog" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" - -[[package]] -name = "crossbeam-deque" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" -dependencies = [ - "crossbeam-epoch", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.9.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" -dependencies = [ - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" - -[[package]] -name = "crunchy" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" - -[[package]] -name = "crypto-bigint" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21" -dependencies = [ - "generic-array 0.14.7", - "subtle", -] - -[[package]] -name = "crypto-bigint" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" -dependencies = [ - "generic-array 0.14.7", - "rand_core 0.6.4", - "subtle", - "zeroize", -] - -[[package]] -name = "crypto-common" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" -dependencies = [ - "generic-array 0.14.7", - "typenum", -] - -[[package]] -name = "ctr" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" -dependencies = [ - "cipher", -] - -[[package]] -name = "ctrlc" -version = "3.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90eeab0aa92f3f9b4e87f258c72b139c207d251f9cbc1080a0086b86a8870dd3" -dependencies = [ - "nix", - "windows-sys 0.59.0", -] - -[[package]] -name = "curve25519-dalek" -version = "4.1.3" -source = "git+https://github.com/sp1-patches/curve25519-dalek?branch=patch-curve25519-v4.1.3#1d73fd95f1a76bee8f46643cf78bbccc1fb06ede" -dependencies = [ - "anyhow", - "cfg-if", - "cpufeatures", - "curve25519-dalek-derive", - "digest 0.10.7", - "fiat-crypto", - "rustc_version 0.4.1", - "sp1-lib 1.2.0", - "subtle", - "zeroize", -] - -[[package]] -name = "curve25519-dalek-derive" -version = "0.1.1" -source = "git+https://github.com/sp1-patches/curve25519-dalek?branch=patch-curve25519-v4.1.3#1d73fd95f1a76bee8f46643cf78bbccc1fb06ede" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "curve25519-dalek-ng" -version = "4.1.1" -source = "git+https://github.com/sp1-patches/curve25519-dalek-ng?branch=patch-v4.1.1#8dd77b20f3e78965a0cc57070a04465b9d52c49e" -dependencies = [ - "anyhow", - "byteorder", - "cfg-if", - "digest 0.9.0", - "rand_core 0.6.4", - "sp1-lib 1.2.0", - "subtle-ng", - "zeroize", -] - -[[package]] -name = "cycle-tracking-program" -version = "1.1.0" -dependencies = [ - "sp1-derive", - "sp1-zkvm", -] - -[[package]] -name = "cycle-tracking-script" -version = "1.1.0" -dependencies = [ - "sp1-build", - "sp1-sdk", -] - -[[package]] -name = "darling" -version = "0.20.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" -dependencies = [ - "darling_core", - "darling_macro", -] - -[[package]] -name = "darling_core" -version = "0.20.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim", - "syn 2.0.79", -] - -[[package]] -name = "darling_macro" -version = "0.20.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" -dependencies = [ - "darling_core", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "dashmap" -version = "6.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" -dependencies = [ - "cfg-if", - "crossbeam-utils", - "hashbrown 0.14.5", - "lock_api", - "once_cell", - "parking_lot_core", -] - -[[package]] -name = "dashu" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b3e5ac1e23ff1995ef05b912e2b012a8784506987a2651552db2c73fb3d7e0" -dependencies = [ - "dashu-base", - "dashu-float", - "dashu-int", - "dashu-macros", - "dashu-ratio", - "rustversion", -] - -[[package]] -name = "dashu-base" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0b80bf6b85aa68c58ffea2ddb040109943049ce3fbdf4385d0380aef08ef289" - -[[package]] -name = "dashu-float" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85078445a8dbd2e1bd21f04a816f352db8d333643f0c9b78ca7c3d1df71063e7" -dependencies = [ - "dashu-base", - "dashu-int", - "num-modular", - "num-order", - "rustversion", - "static_assertions", -] - -[[package]] -name = "dashu-int" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee99d08031ca34a4d044efbbb21dff9b8c54bb9d8c82a189187c0651ffdb9fbf" -dependencies = [ - "cfg-if", - "dashu-base", - "num-modular", - "num-order", - "rustversion", - "static_assertions", -] - -[[package]] -name = "dashu-macros" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93381c3ef6366766f6e9ed9cf09e4ef9dec69499baf04f0c60e70d653cf0ab10" -dependencies = [ - "dashu-base", - "dashu-float", - "dashu-int", - "dashu-ratio", - "paste", - "proc-macro2", - "quote", - "rustversion", -] - -[[package]] -name = "dashu-ratio" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47e33b04dd7ce1ccf8a02a69d3419e354f2bbfdf4eb911a0b7465487248764c9" -dependencies = [ - "dashu-base", - "dashu-float", - "dashu-int", - "num-modular", - "num-order", - "rustversion", -] - -[[package]] -name = "der" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" -dependencies = [ - "const-oid 0.7.1", - "crypto-bigint 0.3.2", - "pem-rfc7468 0.3.1", -] - -[[package]] -name = "der" -version = "0.7.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" -dependencies = [ - "const-oid 0.9.6", - "pem-rfc7468 0.7.0", - "zeroize", -] - -[[package]] -name = "deranged" -version = "0.3.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" -dependencies = [ - "powerfmt", - "serde", -] - -[[package]] -name = "derivative" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "derive_more" -version = "0.99.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" -dependencies = [ - "convert_case 0.4.0", - "proc-macro2", - "quote", - "rustc_version 0.4.1", - "syn 2.0.79", -] - -[[package]] -name = "derive_more" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" -dependencies = [ - "derive_more-impl", -] - -[[package]] -name = "derive_more-impl" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" -dependencies = [ - "convert_case 0.6.0", - "proc-macro2", - "quote", - "syn 2.0.79", - "unicode-xid", -] - -[[package]] -name = "digest" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" -dependencies = [ - "generic-array 0.14.7", -] - -[[package]] -name = "digest" -version = "0.10.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" -dependencies = [ - "block-buffer 0.10.4", - "const-oid 0.9.6", - "crypto-common", - "subtle", -] - -[[package]] -name = "dirs" -version = "5.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" -dependencies = [ - "dirs-sys", -] - -[[package]] -name = "dirs-sys" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" -dependencies = [ - "libc", - "option-ext", - "redox_users", - "windows-sys 0.48.0", -] - -[[package]] -name = "downcast-rs" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2" - -[[package]] -name = "dunce" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" - -[[package]] -name = "dyn-clone" -version = "1.0.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" - -[[package]] -name = "ecdsa" -version = "0.16.9" -source = "git+https://github.com/sp1-patches/signatures?branch=patch-ecdsa-v0.16.9#de1b108e8140dcb86ecf56f1215ac4b7fab6fcd8" -dependencies = [ - "anyhow", - "cfg-if", - "der 0.7.9", - "digest 0.10.7", - "elliptic-curve", - "hex-literal", - "rfc6979", - "signature", - "sp1-lib 1.2.0", - "spki 0.7.3", -] - -[[package]] -name = "ed25519" -version = "2.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" -dependencies = [ - "pkcs8 0.10.2", - "signature", -] - -[[package]] -name = "ed25519-consensus" -version = "2.1.0" -source = "git+https://github.com/sp1-patches/ed25519-consensus?branch=patch-v2.1.0#2b2c4b43344bc4daf5b1326f367f2d9d661eeabb" -dependencies = [ - "curve25519-dalek-ng", - "hex", - "rand_core 0.6.4", - "serde", - "sha2 0.9.9", - "thiserror", - "zeroize", -] - -[[package]] -name = "ed25519-dalek" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" -dependencies = [ - "curve25519-dalek", - "ed25519", - "serde", - "sha2 0.10.8", - "subtle", - "zeroize", -] - -[[package]] -name = "either" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" - -[[package]] -name = "elf" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4445909572dbd556c457c849c4ca58623d84b27c8fff1e74b0b4227d8b90d17b" - -[[package]] -name = "elliptic-curve" -version = "0.13.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" -dependencies = [ - "base16ct", - "crypto-bigint 0.5.5", - "digest 0.10.7", - "ff 0.13.0", - "generic-array 0.14.7", - "group 0.13.0", - "pkcs8 0.10.2", - "rand_core 0.6.4", - "sec1", - "subtle", - "zeroize", -] - -[[package]] -name = "embedded-io" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef1a6892d9eef45c8fa6b9e0086428a2cca8491aca8f787c534a3d6d0bcb3ced" - -[[package]] -name = "embedded-io" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edd0f118536f44f5ccd48bcb8b111bdc3de888b58c74639dfb034a357d0f206d" - -[[package]] -name = "encode_unicode" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" - -[[package]] -name = "encoding_rs" -version = "0.8.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "enr" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a3d8dc56e02f954cac8eb489772c552c473346fc34f67412bb6244fd647f7e4" -dependencies = [ - "base64 0.21.7", - "bytes", - "hex", - "k256", - "log", - "rand 0.8.5", - "rlp", - "serde", - "sha3", - "zeroize", -] - -[[package]] -name = "enr" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "972070166c68827e64bd1ebc8159dd8e32d9bc2da7ebe8f20b61308f7974ad30" -dependencies = [ - "alloy-rlp", - "base64 0.21.7", - "bytes", - "hex", - "log", - "rand 0.8.5", - "sha3", - "zeroize", -] - -[[package]] -name = "enum-map" -version = "2.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6866f3bfdf8207509a033af1a75a7b08abda06bbaaeae6669323fd5a097df2e9" -dependencies = [ - "enum-map-derive", - "serde", -] - -[[package]] -name = "enum-map-derive" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f282cfdfe92516eb26c2af8589c274c7c17681f5ecc03c18255fe741c6aa64eb" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "enumn" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "equivalent" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" - -[[package]] -name = "errno" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" -dependencies = [ - "libc", - "windows-sys 0.52.0", -] - -[[package]] -name = "eth-keystore" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fda3bf123be441da5260717e0661c25a2fd9cb2b2c1d20bf2e05580047158ab" -dependencies = [ - "aes", - "ctr", - "digest 0.10.7", - "hex", - "hmac", - "pbkdf2 0.11.0", - "rand 0.8.5", - "scrypt", - "serde", - "serde_json", - "sha2 0.10.8", - "sha3", - "thiserror", - "uuid", -] - -[[package]] -name = "ethabi" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7413c5f74cc903ea37386a8965a936cbeb334bd270862fdece542c1b2dcbc898" -dependencies = [ - "ethereum-types", - "hex", - "once_cell", - "regex", - "serde", - "serde_json", - "sha3", - "thiserror", - "uint", -] - -[[package]] -name = "ethbloom" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c22d4b5885b6aa2fe5e8b9329fb8d232bf739e434e6b87347c63bdd00c120f60" -dependencies = [ - "crunchy", - "fixed-hash", - "impl-codec", - "impl-rlp", - "impl-serde", - "scale-info", - "tiny-keccak", -] - -[[package]] -name = "ethereum-types" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02d215cbf040552efcbe99a38372fe80ab9d00268e20012b79fcd0f073edd8ee" -dependencies = [ - "ethbloom", - "fixed-hash", - "impl-codec", - "impl-rlp", - "impl-serde", - "primitive-types", - "scale-info", - "uint", -] - -[[package]] -name = "ethers" -version = "2.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "816841ea989f0c69e459af1cf23a6b0033b19a55424a1ea3a30099becdb8dec0" -dependencies = [ - "ethers-addressbook", - "ethers-contract", - "ethers-core", - "ethers-middleware", - "ethers-providers", - "ethers-signers", -] - -[[package]] -name = "ethers-addressbook" -version = "2.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5495afd16b4faa556c3bba1f21b98b4983e53c1755022377051a975c3b021759" -dependencies = [ - "ethers-core", - "once_cell", - "serde", - "serde_json", -] - -[[package]] -name = "ethers-contract" -version = "2.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fceafa3578c836eeb874af87abacfb041f92b4da0a78a5edd042564b8ecdaaa" -dependencies = [ - "const-hex", - "ethers-contract-abigen", - "ethers-contract-derive", - "ethers-core", - "ethers-providers", - "futures-util", - "once_cell", - "pin-project", - "serde", - "serde_json", - "thiserror", -] - -[[package]] -name = "ethers-contract-abigen" -version = "2.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04ba01fbc2331a38c429eb95d4a570166781f14290ef9fdb144278a90b5a739b" -dependencies = [ - "Inflector", - "const-hex", - "dunce", - "ethers-core", - "eyre", - "prettyplease", - "proc-macro2", - "quote", - "regex", - "serde", - "serde_json", - "syn 2.0.79", - "toml", - "walkdir", -] - -[[package]] -name = "ethers-contract-derive" -version = "2.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87689dcabc0051cde10caaade298f9e9093d65f6125c14575db3fd8c669a168f" -dependencies = [ - "Inflector", - "const-hex", - "ethers-contract-abigen", - "ethers-core", - "proc-macro2", - "quote", - "serde_json", - "syn 2.0.79", -] - -[[package]] -name = "ethers-core" -version = "2.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82d80cc6ad30b14a48ab786523af33b37f28a8623fc06afd55324816ef18fb1f" -dependencies = [ - "arrayvec 0.7.6", - "bytes", - "cargo_metadata", - "chrono", - "const-hex", - "elliptic-curve", - "ethabi", - "generic-array 0.14.7", - "k256", - "num_enum 0.7.3", - "once_cell", - "open-fastrlp", - "rand 0.8.5", - "rlp", - "serde", - "serde_json", - "strum", - "syn 2.0.79", - "tempfile", - "thiserror", - "tiny-keccak", - "unicode-xid", -] - -[[package]] -name = "ethers-middleware" -version = "2.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48f9fdf09aec667c099909d91908d5eaf9be1bd0e2500ba4172c1d28bfaa43de" -dependencies = [ - "async-trait", - "auto_impl", - "ethers-contract", - "ethers-core", - "ethers-providers", - "ethers-signers", - "futures-channel", - "futures-locks", - "futures-util", - "instant", - "reqwest 0.11.27", - "serde", - "serde_json", - "thiserror", - "tokio", - "tracing", - "tracing-futures", - "url", -] - -[[package]] -name = "ethers-providers" -version = "2.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6434c9a33891f1effc9c75472e12666db2fa5a0fec4b29af6221680a6fe83ab2" -dependencies = [ - "async-trait", - "auto_impl", - "base64 0.21.7", - "bytes", - "const-hex", - "enr 0.10.0", - "ethers-core", - "futures-core", - "futures-timer", - "futures-util", - "hashers", - "http 0.2.12", - "instant", - "jsonwebtoken", - "once_cell", - "pin-project", - "reqwest 0.11.27", - "serde", - "serde_json", - "thiserror", - "tokio", - "tracing", - "tracing-futures", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "ws_stream_wasm", -] - -[[package]] -name = "ethers-signers" -version = "2.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "228875491c782ad851773b652dd8ecac62cda8571d3bc32a5853644dd26766c2" -dependencies = [ - "async-trait", - "coins-bip32", - "coins-bip39", - "const-hex", - "elliptic-curve", - "eth-keystore", - "ethers-core", - "rand 0.8.5", - "sha2 0.10.8", - "thiserror", - "tracing", -] - -[[package]] -name = "eyre" -version = "0.6.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" -dependencies = [ - "indenter", - "once_cell", -] - -[[package]] -name = "failure" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32e9bd16cc02eae7db7ef620b392808b89f6a5e16bb3497d159c6b92a0f4f86" -dependencies = [ - "backtrace", - "failure_derive", -] - -[[package]] -name = "failure_derive" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", - "synstructure", -] - -[[package]] -name = "fastrand" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" - -[[package]] -name = "fastrlp" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" -dependencies = [ - "arrayvec 0.7.6", - "auto_impl", - "bytes", -] - -[[package]] -name = "ff" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" -dependencies = [ - "bitvec", - "rand_core 0.6.4", - "subtle", -] - -[[package]] -name = "ff" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" -dependencies = [ - "bitvec", - "byteorder", - "ff_derive", - "rand_core 0.6.4", - "subtle", -] - -[[package]] -name = "ff_derive" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9f54704be45ed286151c5e11531316eaef5b8f5af7d597b806fdb8af108d84a" -dependencies = [ - "addchain", - "cfg-if", - "num-bigint 0.3.3", - "num-integer", - "num-traits", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "fiat-crypto" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" - -[[package]] -name = "fibonacci-program" -version = "1.1.0" -dependencies = [ - "sp1-zkvm", -] - -[[package]] -name = "fibonacci-script" -version = "1.1.0" -dependencies = [ - "hex", - "itertools 0.12.1", - "sha2 0.10.8", - "sp1-build", - "sp1-sdk", -] - -[[package]] -name = "fixed-hash" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" -dependencies = [ - "byteorder", - "rand 0.8.5", - "rustc-hex", - "static_assertions", -] - -[[package]] -name = "flex-error" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c606d892c9de11507fa0dcffc116434f94e105d0bbdc4e405b61519464c49d7b" -dependencies = [ - "paste", -] - -[[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" - -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - -[[package]] -name = "form_urlencoded" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" -dependencies = [ - "percent-encoding", -] - -[[package]] -name = "funty" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" - -[[package]] -name = "futures" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-channel" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" -dependencies = [ - "futures-core", - "futures-sink", -] - -[[package]] -name = "futures-core" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" - -[[package]] -name = "futures-executor" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" -dependencies = [ - "futures-core", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-io" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" - -[[package]] -name = "futures-locks" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45ec6fe3675af967e67c5536c0b9d44e34e6c52f86bedc4ea49c5317b8e94d06" -dependencies = [ - "futures-channel", - "futures-task", -] - -[[package]] -name = "futures-macro" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "futures-sink" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" - -[[package]] -name = "futures-task" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" - -[[package]] -name = "futures-timer" -version = "3.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" -dependencies = [ - "gloo-timers", - "send_wrapper 0.4.0", -] - -[[package]] -name = "futures-util" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" -dependencies = [ - "futures-channel", - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-project-lite", - "pin-utils", - "slab", -] - -[[package]] -name = "futures-utils-wasm" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42012b0f064e01aa58b545fe3727f90f7dd4020f4a3ea735b50344965f5a57e9" - -[[package]] -name = "fxhash" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" -dependencies = [ - "byteorder", -] - -[[package]] -name = "gcd" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d758ba1b47b00caf47f24925c0074ecb20d6dfcffe7f6d53395c0465674841a" - -[[package]] -name = "generic-array" -version = "0.14.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" -dependencies = [ - "typenum", - "version_check", - "zeroize", -] - -[[package]] -name = "generic-array" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96512db27971c2c3eece70a1e106fbe6c87760234e31e8f7e5634912fe52794a" -dependencies = [ - "serde", - "typenum", -] - -[[package]] -name = "getrandom" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" -dependencies = [ - "cfg-if", - "js-sys", - "libc", - "wasi", - "wasm-bindgen", -] - -[[package]] -name = "gimli" -version = "0.31.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" - -[[package]] -name = "git2" -version = "0.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b903b73e45dc0c6c596f2d37eccece7c1c8bb6e4407b001096387c63d0d93724" -dependencies = [ - "bitflags 2.6.0", - "libc", - "libgit2-sys", - "log", - "url", -] - -[[package]] -name = "glob" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" - -[[package]] -name = "gloo-timers" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" -dependencies = [ - "futures-channel", - "futures-core", - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "group" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" -dependencies = [ - "ff 0.12.1", - "memuse", - "rand_core 0.6.4", - "subtle", -] - -[[package]] -name = "group" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" -dependencies = [ - "ff 0.13.0", - "rand_core 0.6.4", - "subtle", -] - -[[package]] -name = "h2" -version = "0.3.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http 0.2.12", - "indexmap 2.5.0", - "slab", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "h2" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" -dependencies = [ - "atomic-waker", - "bytes", - "fnv", - "futures-core", - "futures-sink", - "http 1.1.0", - "indexmap 2.5.0", - "slab", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "half" -version = "1.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b43ede17f21864e81be2fa654110bf1e793774238d86ef8555c37e6519c0403" - -[[package]] -name = "halo2" -version = "0.1.0-beta.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a23c779b38253fe1538102da44ad5bd5378495a61d2c4ee18d64eaa61ae5995" -dependencies = [ - "halo2_proofs", -] - -[[package]] -name = "halo2_proofs" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e925780549adee8364c7f2b685c753f6f3df23bde520c67416e93bf615933760" -dependencies = [ - "blake2b_simd", - "ff 0.12.1", - "group 0.12.1", - "pasta_curves 0.4.1", - "rand_core 0.6.4", - "rayon", -] - -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" - -[[package]] -name = "hashbrown" -version = "0.14.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" -dependencies = [ - "ahash", - "allocator-api2", - "serde", -] - -[[package]] -name = "hashers" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2bca93b15ea5a746f220e56587f71e73c6165eab783df9e26590069953e3c30" -dependencies = [ - "fxhash", -] - -[[package]] -name = "heck" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" - -[[package]] -name = "hermit-abi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" - -[[package]] -name = "hex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" -dependencies = [ - "serde", -] - -[[package]] -name = "hex-literal" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" - -[[package]] -name = "hmac" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" -dependencies = [ - "digest 0.10.7", -] - -[[package]] -name = "home" -version = "0.5.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" -dependencies = [ - "windows-sys 0.52.0", -] - -[[package]] -name = "http" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" -dependencies = [ - "bytes", - "fnv", - "itoa", -] - -[[package]] -name = "http" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" -dependencies = [ - "bytes", - "fnv", - "itoa", -] - -[[package]] -name = "http-body" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" -dependencies = [ - "bytes", - "http 0.2.12", - "pin-project-lite", -] - -[[package]] -name = "http-body" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" -dependencies = [ - "bytes", - "http 1.1.0", -] - -[[package]] -name = "http-body-util" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" -dependencies = [ - "bytes", - "futures-util", - "http 1.1.0", - "http-body 1.0.1", - "pin-project-lite", -] - -[[package]] -name = "httparse" -version = "1.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" - -[[package]] -name = "httpdate" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" - -[[package]] -name = "hyper" -version = "0.14.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" -dependencies = [ - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "h2 0.3.26", - "http 0.2.12", - "http-body 0.4.6", - "httparse", - "httpdate", - "itoa", - "pin-project-lite", - "socket2", - "tokio", - "tower-service", - "tracing", - "want", -] - -[[package]] -name = "hyper" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" -dependencies = [ - "bytes", - "futures-channel", - "futures-util", - "h2 0.4.6", - "http 1.1.0", - "http-body 1.0.1", - "httparse", - "httpdate", - "itoa", - "pin-project-lite", - "smallvec", - "tokio", - "want", -] - -[[package]] -name = "hyper-rustls" -version = "0.27.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" -dependencies = [ - "futures-util", - "http 1.1.0", - "hyper 1.4.1", - "hyper-util", - "rustls", - "rustls-pki-types", - "tokio", - "tokio-rustls", - "tower-service", - "webpki-roots", -] - -[[package]] -name = "hyper-tls" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" -dependencies = [ - "bytes", - "hyper 0.14.30", - "native-tls", - "tokio", - "tokio-native-tls", -] - -[[package]] -name = "hyper-tls" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" -dependencies = [ - "bytes", - "http-body-util", - "hyper 1.4.1", - "hyper-util", - "native-tls", - "tokio", - "tokio-native-tls", - "tower-service", -] - -[[package]] -name = "hyper-util" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" -dependencies = [ - "bytes", - "futures-channel", - "futures-util", - "http 1.1.0", - "http-body 1.0.1", - "hyper 1.4.1", - "pin-project-lite", - "socket2", - "tokio", - "tower-service", - "tracing", -] - -[[package]] -name = "iana-time-zone" -version = "0.1.61" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" -dependencies = [ - "android_system_properties", - "core-foundation-sys", - "iana-time-zone-haiku", - "js-sys", - "wasm-bindgen", - "windows-core", -] - -[[package]] -name = "iana-time-zone-haiku" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" -dependencies = [ - "cc", -] - -[[package]] -name = "ident_case" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" - -[[package]] -name = "idna" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" -dependencies = [ - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "impl-codec" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" -dependencies = [ - "parity-scale-codec", -] - -[[package]] -name = "impl-rlp" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" -dependencies = [ - "rlp", -] - -[[package]] -name = "impl-serde" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebc88fc67028ae3db0c853baa36269d398d5f45b6982f95549ff5def78c935cd" -dependencies = [ - "serde", -] - -[[package]] -name = "impl-trait-for-tuples" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "indenter" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" - -[[package]] -name = "indexmap" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" -dependencies = [ - "autocfg", - "hashbrown 0.12.3", - "serde", -] - -[[package]] -name = "indexmap" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" -dependencies = [ - "equivalent", - "hashbrown 0.14.5", - "serde", -] - -[[package]] -name = "indicatif" -version = "0.17.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "763a5a8f45087d6bcea4222e7b72c291a054edf80e4ef6efd2a4979878c7bea3" -dependencies = [ - "console", - "instant", - "number_prefix", - "portable-atomic", - "unicode-width", -] - -[[package]] -name = "inout" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" -dependencies = [ - "generic-array 0.14.7", -] - -[[package]] -name = "instant" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "io-program" -version = "1.1.0" -dependencies = [ - "serde", - "sp1-zkvm", -] - -[[package]] -name = "io-script" -version = "1.1.0" -dependencies = [ - "serde", - "sp1-build", - "sp1-sdk", -] - -[[package]] -name = "ipnet" -version = "2.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "187674a687eed5fe42285b40c6291f9a01517d415fad1c3cbc6a9f778af7fcd4" - -[[package]] -name = "is-prime-program" -version = "1.1.0" -dependencies = [ - "sp1-zkvm", -] - -[[package]] -name = "is-prime-script" -version = "1.1.0" -dependencies = [ - "sp1-build", - "sp1-sdk", -] - -[[package]] -name = "is_terminal_polyfill" -version = "1.70.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" - -[[package]] -name = "itertools" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" -dependencies = [ - "either", -] - -[[package]] -name = "itertools" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" -dependencies = [ - "either", -] - -[[package]] -name = "itertools" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" -dependencies = [ - "either", -] - -[[package]] -name = "itoa" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" - -[[package]] -name = "jobserver" -version = "0.1.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" -dependencies = [ - "libc", -] - -[[package]] -name = "js-sys" -version = "0.3.70" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" -dependencies = [ - "wasm-bindgen", -] - -[[package]] -name = "json-lib" -version = "1.1.0" -dependencies = [ - "serde", -] - -[[package]] -name = "json-program" -version = "1.1.0" -dependencies = [ - "json-lib", - "serde", - "serde_json", - "sp1-zkvm", -] - -[[package]] -name = "json-script" -version = "1.1.0" -dependencies = [ - "json-lib", - "serde", - "serde_json", - "sp1-build", - "sp1-sdk", -] - -[[package]] -name = "jsonwebtoken" -version = "8.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" -dependencies = [ - "base64 0.21.7", - "pem", - "ring 0.16.20", - "serde", - "serde_json", - "simple_asn1", -] - -[[package]] -name = "jubjub" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a575df5f985fe1cd5b2b05664ff6accfc46559032b954529fd225a2168d27b0f" -dependencies = [ - "bitvec", - "bls12_381 0.7.1", - "ff 0.12.1", - "group 0.12.1", - "rand_core 0.6.4", - "subtle", -] - -[[package]] -name = "k256" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" -dependencies = [ - "cfg-if", - "ecdsa", - "elliptic-curve", - "once_cell", - "sha2 0.10.8", - "signature", -] - -[[package]] -name = "keccak" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" -dependencies = [ - "cpufeatures", -] - -[[package]] -name = "keccak-asm" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" -dependencies = [ - "digest 0.10.7", - "sha3-asm", -] - -[[package]] -name = "kzg-rs" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0850eb19206463a61bede4f7b7e6b21731807137619044b1f3c287ebcfe2b3b0" -dependencies = [ - "ff 0.13.0", - "hex", - "sha2 0.10.8", - "sp1_bls12_381", - "spin 0.9.8", -] - -[[package]] -name = "lazy_static" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" -dependencies = [ - "spin 0.9.8", -] - -[[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" - -[[package]] -name = "libc" -version = "0.2.159" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" - -[[package]] -name = "libgit2-sys" -version = "0.17.0+1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10472326a8a6477c3c20a64547b0059e4b0d086869eee31e6d7da728a8eb7224" -dependencies = [ - "cc", - "libc", - "libz-sys", - "pkg-config", -] - -[[package]] -name = "libloading" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" -dependencies = [ - "cfg-if", - "windows-targets 0.52.6", -] - -[[package]] -name = "libm" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" - -[[package]] -name = "libredox" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" -dependencies = [ - "bitflags 2.6.0", - "libc", -] - -[[package]] -name = "libz-sys" -version = "1.1.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] - -[[package]] -name = "linux-raw-sys" -version = "0.4.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" - -[[package]] -name = "lock_api" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" -dependencies = [ - "autocfg", - "scopeguard", -] - -[[package]] -name = "log" -version = "0.4.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" - -[[package]] -name = "lru" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ee39891760e7d94734f6f63fedc29a2e4a152f836120753a72503f09fcf904" -dependencies = [ - "hashbrown 0.14.5", -] - -[[package]] -name = "matchers" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" -dependencies = [ - "regex-automata 0.1.10", -] - -[[package]] -name = "matchit" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" - -[[package]] -name = "memchr" -version = "2.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" - -[[package]] -name = "memuse" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2145869435ace5ea6ea3d35f59be559317ec9a0d04e1812d5f185a87b6d36f1a" - -[[package]] -name = "mime" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" - -[[package]] -name = "minimal-lexical" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" - -[[package]] -name = "miniz_oxide" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" -dependencies = [ - "adler2", -] - -[[package]] -name = "mio" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" -dependencies = [ - "hermit-abi", - "libc", - "wasi", - "windows-sys 0.52.0", -] - -[[package]] -name = "modular-bitfield" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a53d79ba8304ac1c4f9eb3b9d281f21f7be9d4626f72ce7df4ad8fbde4f38a74" -dependencies = [ - "modular-bitfield-impl", - "static_assertions", -] - -[[package]] -name = "modular-bitfield-impl" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a7d5f7076603ebc68de2dc6a650ec331a062a13abaa346975be747bbfa4b789" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "native-tls" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" -dependencies = [ - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", - "tempfile", -] - -[[package]] -name = "nix" -version = "0.29.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" -dependencies = [ - "bitflags 2.6.0", - "cfg-if", - "cfg_aliases", - "libc", -] - -[[package]] -name = "nodrop" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" - -[[package]] -name = "nohash-hasher" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" - -[[package]] -name = "nom" -version = "7.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" -dependencies = [ - "memchr", - "minimal-lexical", -] - -[[package]] -name = "ntapi" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4" -dependencies = [ - "winapi", -] - -[[package]] -name = "nu-ansi-term" -version = "0.46.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" -dependencies = [ - "overload", - "winapi", -] - -[[package]] -name = "num" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" -dependencies = [ - "num-bigint 0.4.6", - "num-complex", - "num-integer", - "num-iter", - "num-rational", - "num-traits", -] - -[[package]] -name = "num-bigint" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6f7833f2cbf2360a6cfd58cd41a53aa7a90bd4c202f5b1c7dd2ed73c57b2c3" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-bigint" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" -dependencies = [ - "num-integer", - "num-traits", -] - -[[package]] -name = "num-bigint-dig" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151" -dependencies = [ - "byteorder", - "lazy_static", - "libm", - "num-integer", - "num-iter", - "num-traits", - "rand 0.8.5", - "smallvec", - "zeroize", -] - -[[package]] -name = "num-complex" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" -dependencies = [ - "num-traits", -] - -[[package]] -name = "num-conv" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" - -[[package]] -name = "num-derive" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "num-integer" -version = "0.1.46" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" -dependencies = [ - "num-traits", -] - -[[package]] -name = "num-iter" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-modular" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17bb261bf36fa7d83f4c294f834e91256769097b3cb505d44831e0a179ac647f" - -[[package]] -name = "num-order" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "537b596b97c40fcf8056d153049eb22f481c17ebce72a513ec9286e4986d1bb6" -dependencies = [ - "num-modular", -] - -[[package]] -name = "num-rational" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" -dependencies = [ - "num-bigint 0.4.6", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-traits" -version = "0.2.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" -dependencies = [ - "autocfg", - "libm", -] - -[[package]] -name = "num_cpus" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" -dependencies = [ - "hermit-abi", - "libc", -] - -[[package]] -name = "num_enum" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f646caf906c20226733ed5b1374287eb97e3c2a5c227ce668c1f2ce20ae57c9" -dependencies = [ - "num_enum_derive 0.5.11", -] - -[[package]] -name = "num_enum" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179" -dependencies = [ - "num_enum_derive 0.7.3", -] - -[[package]] -name = "num_enum_derive" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799" -dependencies = [ - "proc-macro-crate 1.3.1", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "num_enum_derive" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" -dependencies = [ - "proc-macro-crate 3.2.0", - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "num_threads" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" -dependencies = [ - "libc", -] - -[[package]] -name = "number_prefix" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" - -[[package]] -name = "nybbles" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95f06be0417d97f81fe4e5c86d7d01b392655a9cac9c19a848aa033e18937b23" -dependencies = [ - "alloy-rlp", - "const-hex", - "proptest", - "serde", - "smallvec", -] - -[[package]] -name = "object" -version = "0.36.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a" -dependencies = [ - "memchr", -] - -[[package]] -name = "once_cell" -version = "1.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82881c4be219ab5faaf2ad5e5e5ecdff8c66bd7402ca3160975c93b24961afd1" -dependencies = [ - "portable-atomic", -] - -[[package]] -name = "oneshot" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e296cf87e61c9cfc1a61c3c63a0f7f286ed4554e0e22be84e8a38e1d264a2a29" - -[[package]] -name = "op-alloy-consensus" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21aad1fbf80d2bcd7406880efc7ba109365f44bbb72896758ddcbfa46bf1592c" -dependencies = [ - "alloy-consensus", - "alloy-eips", - "alloy-primitives 0.8.5", - "alloy-rlp", - "alloy-serde", - "derive_more 1.0.0", - "serde", - "spin 0.9.8", -] - -[[package]] -name = "op-alloy-rpc-types" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e281fbfc2198b7c0c16457d6524f83d192662bc9f3df70f24c3038d4521616df" -dependencies = [ - "alloy-eips", - "alloy-network-primitives", - "alloy-primitives 0.8.5", - "alloy-rpc-types-eth", - "alloy-serde", - "cfg-if", - "hashbrown 0.14.5", - "op-alloy-consensus", - "serde", - "serde_json", -] - -[[package]] -name = "opaque-debug" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" - -[[package]] -name = "open-fastrlp" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "786393f80485445794f6043fd3138854dd109cc6c4bd1a6383db304c9ce9b9ce" -dependencies = [ - "arrayvec 0.7.6", - "auto_impl", - "bytes", - "ethereum-types", - "open-fastrlp-derive", -] - -[[package]] -name = "open-fastrlp-derive" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "003b2be5c6c53c1cfeb0a238b8a1c3915cd410feb684457a36c10038f764bb1c" -dependencies = [ - "bytes", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "openssl" -version = "0.10.66" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" -dependencies = [ - "bitflags 2.6.0", - "cfg-if", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] - -[[package]] -name = "openssl-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "openssl-probe" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" - -[[package]] -name = "openssl-sys" -version = "0.9.103" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] - -[[package]] -name = "option-ext" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" - -[[package]] -name = "overload" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" - -[[package]] -name = "p256" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" -dependencies = [ - "ecdsa", - "elliptic-curve", - "primeorder", - "sha2 0.10.8", -] - -[[package]] -name = "p3-air" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45e909ef66fa5d77ff0fd3cb5af4b33b27fa6fb68d02b9b1e70edbc29383e565" -dependencies = [ - "p3-field", - "p3-matrix", -] - -[[package]] -name = "p3-baby-bear" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46965470aac1cddfe52f535424b59d52f2fffef0fdeb9dbed19da39b1d8f048a" -dependencies = [ - "num-bigint 0.4.6", - "p3-field", - "p3-mds", - "p3-poseidon2", - "p3-symmetric", - "rand 0.8.5", - "serde", -] - -[[package]] -name = "p3-blake3" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36ef32d6ea21dd5cf9fec8a31bf0c64e6ceee8901dbf50966b83a443093c2aba" -dependencies = [ - "blake3", - "p3-symmetric", -] - -[[package]] -name = "p3-bn254-fr" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e3edfca6be3b3109adf8e3330baec30c3fc5f9f4d63d27aaec1b471ca51ed67" -dependencies = [ - "ff 0.13.0", - "num-bigint 0.4.6", - "p3-field", - "p3-poseidon2", - "p3-symmetric", - "rand 0.8.5", - "serde", -] - -[[package]] -name = "p3-challenger" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6662ea899a5d848b60c699944491d72757873b5e1fd46798e4712f90a03a4e9" -dependencies = [ - "p3-field", - "p3-maybe-rayon", - "p3-symmetric", - "p3-util", - "tracing", -] - -[[package]] -name = "p3-commit" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc3563918b5cc44ef5280bf9b51753e70dc78802de25e3fb81ed6c94617ccb6e" -dependencies = [ - "itertools 0.12.1", - "p3-challenger", - "p3-field", - "p3-matrix", - "p3-util", - "serde", -] - -[[package]] -name = "p3-dft" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "510095701819d83c9509fe825bbf1ebfe50426ae75149df5fe1dcfd18261323a" -dependencies = [ - "p3-field", - "p3-matrix", - "p3-maybe-rayon", - "p3-util", - "tracing", -] - -[[package]] -name = "p3-field" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61f1977a0a65789f719aa824119c332c4676b000bdbfe94d312fb6244a70d601" -dependencies = [ - "itertools 0.12.1", - "num-bigint 0.4.6", - "num-traits", - "p3-util", - "rand 0.8.5", - "serde", -] - -[[package]] -name = "p3-fri" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c22ddb958f200d9289cc73ff68847b0167ca0c14557b791dd9e318f98c2d1b28" -dependencies = [ - "itertools 0.12.1", - "p3-challenger", - "p3-commit", - "p3-dft", - "p3-field", - "p3-interpolation", - "p3-matrix", - "p3-maybe-rayon", - "p3-util", - "serde", - "tracing", -] - -[[package]] -name = "p3-interpolation" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d032cda212f6b408d7d5b0b9a8270a9455acb93742fe55a0880d82be8e90e500" -dependencies = [ - "p3-field", - "p3-matrix", - "p3-util", -] - -[[package]] -name = "p3-keccak" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c56abdd5a8a780049d2f8e92cea1df57b55a2ef50a40d1103f2732f7a00e4b1" -dependencies = [ - "p3-symmetric", - "tiny-keccak", -] - -[[package]] -name = "p3-keccak-air" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8398f1694ccc38513df0b8cab5f9ef7325423f27cd9e4fa20bdc77d5079cf1b" -dependencies = [ - "p3-air", - "p3-field", - "p3-matrix", - "p3-maybe-rayon", - "p3-util", - "tracing", - "tracing-forest", - "tracing-subscriber", -] - -[[package]] -name = "p3-matrix" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d548ee0b834f8e2ebc5037073acd101a3b0ca41a2d1d28a15ba0ccd9059495b0" -dependencies = [ - "itertools 0.12.1", - "p3-field", - "p3-maybe-rayon", - "p3-util", - "rand 0.8.5", - "serde", - "tracing", -] - -[[package]] -name = "p3-maybe-rayon" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55f5575d3d61bedb3e05681abb0f36b8bb339d65aa395d50756bfa64e9cd3f46" -dependencies = [ - "rayon", -] - -[[package]] -name = "p3-mds" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6e57ed310d59245f93e24ee805ea7aa16fc9c505551b76a15f5e50f29d177e" -dependencies = [ - "itertools 0.12.1", - "p3-dft", - "p3-field", - "p3-matrix", - "p3-symmetric", - "p3-util", - "rand 0.8.5", -] - -[[package]] -name = "p3-merkle-tree" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af46b41cba75d483ec8a553cbab1d2d794935ae3403d75394acfa4fb2c977cce" -dependencies = [ - "itertools 0.12.1", - "p3-commit", - "p3-field", - "p3-matrix", - "p3-maybe-rayon", - "p3-symmetric", - "p3-util", - "serde", - "tracing", -] - -[[package]] -name = "p3-poseidon2" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adaba6f14c197203530e233badce0ca1126ba3bf3c9ff766505b497bdad0bee1" -dependencies = [ - "gcd", - "p3-field", - "p3-mds", - "p3-symmetric", - "rand 0.8.5", -] - -[[package]] -name = "p3-symmetric" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57ecc4282566eb14f48be7707f6745c4dff6be664984d59ec0fb1849cd82b5c2" -dependencies = [ - "itertools 0.12.1", - "p3-field", - "serde", -] - -[[package]] -name = "p3-uni-stark" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1af5c038b22b058bf1d49fb1ea3dd6c240a3e46c3278fde5c444e0034f7ffe37" -dependencies = [ - "itertools 0.12.1", - "p3-air", - "p3-challenger", - "p3-commit", - "p3-dft", - "p3-field", - "p3-matrix", - "p3-maybe-rayon", - "p3-util", - "postcard", - "serde", - "tracing", - "tracing-forest", - "tracing-subscriber", -] - -[[package]] -name = "p3-util" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79f3fef0e00d9d7246385e758c4cd39b4efcbbcea31752471491ab502631385e" -dependencies = [ - "serde", -] - -[[package]] -name = "pairing" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "135590d8bdba2b31346f9cd1fb2a912329f5135e832a4f422942eb6ead8b6b3b" -dependencies = [ - "group 0.12.1", -] - -[[package]] -name = "pairing" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f" -dependencies = [ - "group 0.13.0", -] - -[[package]] -name = "parity-scale-codec" -version = "3.6.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" -dependencies = [ - "arrayvec 0.7.6", - "bitvec", - "byte-slice-cast", - "impl-trait-for-tuples", - "parity-scale-codec-derive", - "serde", -] - -[[package]] -name = "parity-scale-codec-derive" -version = "3.6.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" -dependencies = [ - "proc-macro-crate 3.2.0", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "parking_lot" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" -dependencies = [ - "lock_api", - "parking_lot_core", -] - -[[package]] -name = "parking_lot_core" -version = "0.9.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" -dependencies = [ - "cfg-if", - "libc", - "redox_syscall", - "smallvec", - "windows-targets 0.52.6", -] - -[[package]] -name = "pasta_curves" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc65faf8e7313b4b1fbaa9f7ca917a0eed499a9663be71477f87993604341d8" -dependencies = [ - "blake2b_simd", - "ff 0.12.1", - "group 0.12.1", - "lazy_static", - "rand 0.8.5", - "static_assertions", - "subtle", -] - -[[package]] -name = "pasta_curves" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3e57598f73cc7e1b2ac63c79c517b31a0877cd7c402cdcaa311b5208de7a095" -dependencies = [ - "blake2b_simd", - "ff 0.13.0", - "group 0.13.0", - "lazy_static", - "rand 0.8.5", - "static_assertions", - "subtle", -] - -[[package]] -name = "paste" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" - -[[package]] -name = "patch-testing-program" -version = "1.1.0" -dependencies = [ - "alloy-primitives 0.7.7", - "curve25519-dalek", - "curve25519-dalek-ng", - "ed25519-consensus", - "ed25519-dalek", - "k256", - "secp256k1", - "sha2 0.10.8", - "sha2 0.9.9", - "sp1-zkvm", - "tiny-keccak", -] - -[[package]] -name = "patch-testing-script" -version = "1.1.0" -dependencies = [ - "sp1-build", - "sp1-core-executor", - "sp1-core-machine", - "sp1-sdk", -] - -[[package]] -name = "pbkdf2" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" -dependencies = [ - "digest 0.10.7", -] - -[[package]] -name = "pbkdf2" -version = "0.12.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" -dependencies = [ - "digest 0.10.7", - "hmac", -] - -[[package]] -name = "pem" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" -dependencies = [ - "base64 0.13.1", -] - -[[package]] -name = "pem-rfc7468" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01de5d978f34aa4b2296576379fcc416034702fd94117c56ffd8a1a767cefb30" -dependencies = [ - "base64ct", -] - -[[package]] -name = "pem-rfc7468" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" -dependencies = [ - "base64ct", -] - -[[package]] -name = "percent-encoding" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" - -[[package]] -name = "pest" -version = "2.7.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdbef9d1d47087a895abd220ed25eb4ad973a5e26f6a4367b038c25e28dfc2d9" -dependencies = [ - "memchr", - "thiserror", - "ucd-trie", -] - -[[package]] -name = "pharos" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" -dependencies = [ - "futures", - "rustc_version 0.4.1", -] - -[[package]] -name = "pin-project" -version = "1.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" -dependencies = [ - "pin-project-internal", -] - -[[package]] -name = "pin-project-internal" -version = "1.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "pin-project-lite" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" - -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - -[[package]] -name = "pkcs1" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a78f66c04ccc83dd4486fd46c33896f4e17b24a7a3a6400dedc48ed0ddd72320" -dependencies = [ - "der 0.5.1", - "pkcs8 0.8.0", - "zeroize", -] - -[[package]] -name = "pkcs1" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" -dependencies = [ - "der 0.7.9", - "pkcs8 0.10.2", - "spki 0.7.3", -] - -[[package]] -name = "pkcs8" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" -dependencies = [ - "der 0.5.1", - "spki 0.5.4", - "zeroize", -] - -[[package]] -name = "pkcs8" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" -dependencies = [ - "der 0.7.9", - "spki 0.7.3", -] - -[[package]] -name = "pkg-config" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" - -[[package]] -name = "portable-atomic" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" - -[[package]] -name = "postcard" -version = "1.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f7f0a8d620d71c457dd1d47df76bb18960378da56af4527aaa10f515eee732e" -dependencies = [ - "cobs", - "embedded-io 0.4.0", - "embedded-io 0.6.1", - "serde", -] - -[[package]] -name = "powerfmt" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" - -[[package]] -name = "ppv-lite86" -version = "0.2.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" -dependencies = [ - "zerocopy", -] - -[[package]] -name = "prettyplease" -version = "0.2.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" -dependencies = [ - "proc-macro2", - "syn 2.0.79", -] - -[[package]] -name = "primeorder" -version = "0.13.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" -dependencies = [ - "elliptic-curve", -] - -[[package]] -name = "primitive-types" -version = "0.12.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" -dependencies = [ - "fixed-hash", - "impl-codec", - "impl-rlp", - "impl-serde", - "scale-info", - "uint", -] - -[[package]] -name = "proc-macro-crate" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" -dependencies = [ - "once_cell", - "toml_edit 0.19.15", -] - -[[package]] -name = "proc-macro-crate" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" -dependencies = [ - "toml_edit 0.22.22", -] - -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn 1.0.109", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2", - "quote", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr2" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" -dependencies = [ - "proc-macro2", - "quote", -] - -[[package]] -name = "proc-macro-error2" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" -dependencies = [ - "proc-macro-error-attr2", - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "proc-macro2" -version = "1.0.86" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "proptest" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" -dependencies = [ - "bit-set", - "bit-vec", - "bitflags 2.6.0", - "lazy_static", - "num-traits", - "rand 0.8.5", - "rand_chacha 0.3.1", - "rand_xorshift", - "regex-syntax 0.8.5", - "rusty-fork", - "tempfile", - "unarray", -] - -[[package]] -name = "prost" -version = "0.12.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29" -dependencies = [ - "bytes", - "prost-derive 0.12.6", -] - -[[package]] -name = "prost" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0487d90e047de87f984913713b85c601c05609aad5b0df4b4573fbf69aa13f" -dependencies = [ - "bytes", - "prost-derive 0.13.3", -] - -[[package]] -name = "prost-derive" -version = "0.12.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" -dependencies = [ - "anyhow", - "itertools 0.12.1", - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "prost-derive" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9552f850d5f0964a4e4d0bf306459ac29323ddfbae05e35a7c0d35cb0803cc5" -dependencies = [ - "anyhow", - "itertools 0.13.0", - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "prost-types" -version = "0.12.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9091c90b0a32608e984ff2fa4091273cbdd755d54935c51d520887f4a1dbd5b0" -dependencies = [ - "prost 0.12.6", -] - -[[package]] -name = "prost-types" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4759aa0d3a6232fb8dbdb97b61de2c20047c68aca932c7ed76da9d788508d670" -dependencies = [ - "prost 0.13.3", -] - -[[package]] -name = "psm" -version = "0.1.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa37f80ca58604976033fae9515a8a2989fc13797d953f7c04fb8fa36a11f205" -dependencies = [ - "cc", -] - -[[package]] -name = "quick-error" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" - -[[package]] -name = "quinn" -version = "0.11.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" -dependencies = [ - "bytes", - "pin-project-lite", - "quinn-proto", - "quinn-udp", - "rustc-hash 2.0.0", - "rustls", - "socket2", - "thiserror", - "tokio", - "tracing", -] - -[[package]] -name = "quinn-proto" -version = "0.11.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" -dependencies = [ - "bytes", - "rand 0.8.5", - "ring 0.17.8", - "rustc-hash 2.0.0", - "rustls", - "slab", - "thiserror", - "tinyvec", - "tracing", -] - -[[package]] -name = "quinn-udp" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" -dependencies = [ - "libc", - "once_cell", - "socket2", - "tracing", - "windows-sys 0.59.0", -] - -[[package]] -name = "quote" -version = "1.0.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "radium" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" - -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc", - "rand_pcg", -] - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", - "serde", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", -] - -[[package]] -name = "rand_pcg" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429" -dependencies = [ - "rand_core 0.5.1", -] - -[[package]] -name = "rand_xorshift" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" -dependencies = [ - "rand_core 0.6.4", -] - -[[package]] -name = "rayon" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" -dependencies = [ - "either", - "rayon-core", -] - -[[package]] -name = "rayon-core" -version = "1.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" -dependencies = [ - "crossbeam-deque", - "crossbeam-utils", -] - -[[package]] -name = "rayon-scan" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f87cc11a0140b4b0da0ffc889885760c61b13672d80a908920b2c0df078fa14" -dependencies = [ - "rayon", -] - -[[package]] -name = "redox_syscall" -version = "0.5.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" -dependencies = [ - "bitflags 2.6.0", -] - -[[package]] -name = "redox_users" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" -dependencies = [ - "getrandom", - "libredox", - "thiserror", -] - -[[package]] -name = "regex" -version = "1.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" -dependencies = [ - "aho-corasick", - "memchr", - "regex-automata 0.4.8", - "regex-syntax 0.8.5", -] - -[[package]] -name = "regex-automata" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" -dependencies = [ - "regex-syntax 0.6.29", -] - -[[package]] -name = "regex-automata" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax 0.8.5", -] - -[[package]] -name = "regex-program" -version = "1.1.0" -dependencies = [ - "regex", - "sp1-zkvm", -] - -[[package]] -name = "regex-script" -version = "1.1.0" -dependencies = [ - "sp1-build", - "sp1-sdk", -] - -[[package]] -name = "regex-syntax" -version = "0.6.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" - -[[package]] -name = "regex-syntax" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" - -[[package]] -name = "reqwest" -version = "0.11.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" -dependencies = [ - "base64 0.21.7", - "bytes", - "encoding_rs", - "futures-core", - "futures-util", - "h2 0.3.26", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.30", - "hyper-tls 0.5.0", - "ipnet", - "js-sys", - "log", - "mime", - "native-tls", - "once_cell", - "percent-encoding", - "pin-project-lite", - "rustls-pemfile 1.0.4", - "serde", - "serde_json", - "serde_urlencoded", - "sync_wrapper 0.1.2", - "system-configuration 0.5.1", - "tokio", - "tokio-native-tls", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "winreg", -] - -[[package]] -name = "reqwest" -version = "0.12.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" -dependencies = [ - "base64 0.22.1", - "bytes", - "encoding_rs", - "futures-core", - "futures-util", - "h2 0.4.6", - "http 1.1.0", - "http-body 1.0.1", - "http-body-util", - "hyper 1.4.1", - "hyper-rustls", - "hyper-tls 0.6.0", - "hyper-util", - "ipnet", - "js-sys", - "log", - "mime", - "native-tls", - "once_cell", - "percent-encoding", - "pin-project-lite", - "quinn", - "rustls", - "rustls-pemfile 2.2.0", - "rustls-pki-types", - "serde", - "serde_json", - "serde_urlencoded", - "sync_wrapper 1.0.1", - "system-configuration 0.6.1", - "tokio", - "tokio-native-tls", - "tokio-rustls", - "tokio-util", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "wasm-streams", - "web-sys", - "webpki-roots", - "windows-registry", -] - -[[package]] -name = "reqwest-middleware" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "562ceb5a604d3f7c885a792d42c199fd8af239d0a51b2fa6a78aafa092452b04" -dependencies = [ - "anyhow", - "async-trait", - "http 1.1.0", - "reqwest 0.12.8", - "serde", - "thiserror", - "tower-service", -] - -[[package]] -name = "reth-blockchain-tree-api" -version = "1.0.6" -source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374182a43a3602aaa953d37aa9217b" -dependencies = [ - "reth-consensus", - "reth-execution-errors", - "reth-primitives", - "reth-storage-errors", - "thiserror", -] - -[[package]] -name = "reth-chainspec" -version = "1.0.6" -source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374182a43a3602aaa953d37aa9217b" -dependencies = [ - "alloy-chains", - "alloy-eips", - "alloy-genesis", - "alloy-primitives 0.8.5", - "alloy-trie", - "auto_impl", - "derive_more 1.0.0", - "once_cell", - "op-alloy-rpc-types", - "reth-ethereum-forks", - "reth-network-peers", - "reth-primitives-traits", - "reth-trie-common", - "serde", - "serde_json", -] - -[[package]] -name = "reth-codecs" -version = "1.0.6" -source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374182a43a3602aaa953d37aa9217b" -dependencies = [ - "alloy-consensus", - "alloy-eips", - "alloy-genesis", - "alloy-primitives 0.8.5", - "alloy-trie", - "bytes", - "modular-bitfield", - "reth-codecs-derive", - "serde", -] - -[[package]] -name = "reth-codecs-derive" -version = "1.0.6" -source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374182a43a3602aaa953d37aa9217b" -dependencies = [ - "convert_case 0.6.0", - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "reth-consensus" -version = "1.0.6" -source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374182a43a3602aaa953d37aa9217b" -dependencies = [ - "auto_impl", - "derive_more 1.0.0", - "reth-primitives", -] - -[[package]] -name = "reth-consensus-common" -version = "1.0.6" -source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374182a43a3602aaa953d37aa9217b" -dependencies = [ - "reth-chainspec", - "reth-consensus", - "reth-primitives", -] - -[[package]] -name = "reth-db-models" -version = "1.0.6" -source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374182a43a3602aaa953d37aa9217b" -dependencies = [ - "bytes", - "modular-bitfield", - "reth-codecs", - "reth-primitives", - "serde", -] - -[[package]] -name = "reth-errors" -version = "1.0.6" -source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374182a43a3602aaa953d37aa9217b" -dependencies = [ - "reth-blockchain-tree-api", - "reth-consensus", - "reth-execution-errors", - "reth-fs-util", - "reth-storage-errors", - "thiserror", -] - -[[package]] -name = "reth-ethereum-consensus" -version = "1.0.6" -source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374182a43a3602aaa953d37aa9217b" -dependencies = [ - "reth-chainspec", - "reth-consensus", - "reth-consensus-common", - "reth-primitives", - "tracing", -] - -[[package]] -name = "reth-ethereum-forks" -version = "1.0.6" -source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374182a43a3602aaa953d37aa9217b" -dependencies = [ - "alloy-chains", - "alloy-primitives 0.8.5", - "alloy-rlp", - "auto_impl", - "crc", - "dyn-clone", - "once_cell", - "rustc-hash 2.0.0", - "serde", - "thiserror-no-std", -] - -[[package]] -name = "reth-evm" -version = "1.0.6" -source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374182a43a3602aaa953d37aa9217b" -dependencies = [ - "alloy-eips", - "auto_impl", - "futures-util", - "reth-chainspec", - "reth-execution-errors", - "reth-execution-types", - "reth-primitives", - "reth-prune-types", - "reth-storage-errors", - "revm", - "revm-primitives", -] - -[[package]] -name = "reth-evm-ethereum" -version = "1.0.6" -source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374182a43a3602aaa953d37aa9217b" -dependencies = [ - "alloy-eips", - "alloy-sol-types 0.8.5", - "reth-chainspec", - "reth-ethereum-consensus", - "reth-ethereum-forks", - "reth-evm", - "reth-execution-types", - "reth-primitives", - "reth-prune-types", - "reth-revm", - "revm-primitives", -] - -[[package]] -name = "reth-evm-optimism" -version = "1.0.6" -source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374182a43a3602aaa953d37aa9217b" -dependencies = [ - "reth-chainspec", - "reth-ethereum-forks", - "reth-evm", - "reth-execution-errors", - "reth-execution-types", - "reth-optimism-consensus", - "reth-primitives", - "reth-prune-types", - "reth-revm", - "revm", - "revm-primitives", - "thiserror", - "tracing", -] - -[[package]] -name = "reth-execution-errors" -version = "1.0.6" -source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374182a43a3602aaa953d37aa9217b" -dependencies = [ - "alloy-eips", - "alloy-primitives 0.8.5", - "alloy-rlp", - "derive_more 1.0.0", - "nybbles", - "reth-consensus", - "reth-prune-types", - "reth-storage-errors", - "revm-primitives", -] - -[[package]] -name = "reth-execution-types" -version = "1.0.6" -source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374182a43a3602aaa953d37aa9217b" -dependencies = [ - "reth-chainspec", - "reth-execution-errors", - "reth-primitives", - "reth-trie", - "revm", -] - -[[package]] -name = "reth-fs-util" -version = "1.0.6" -source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374182a43a3602aaa953d37aa9217b" -dependencies = [ - "serde", - "serde_json", - "thiserror", -] - -[[package]] -name = "reth-network-peers" -version = "1.0.6" -source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374182a43a3602aaa953d37aa9217b" -dependencies = [ - "alloy-primitives 0.8.5", - "alloy-rlp", - "enr 0.12.1", - "serde_with", - "thiserror", - "url", -] - -[[package]] -name = "reth-optimism-chainspec" -version = "1.0.6" -source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374182a43a3602aaa953d37aa9217b" -dependencies = [ - "alloy-chains", - "alloy-primitives 0.8.5", - "derive_more 1.0.0", - "once_cell", - "reth-chainspec", - "reth-ethereum-forks", - "reth-primitives-traits", - "serde_json", -] - -[[package]] -name = "reth-optimism-consensus" -version = "1.0.6" -source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374182a43a3602aaa953d37aa9217b" -dependencies = [ - "reth-chainspec", - "reth-consensus", - "reth-consensus-common", - "reth-primitives", - "tracing", -] - -[[package]] -name = "reth-primitives" -version = "1.0.6" -source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374182a43a3602aaa953d37aa9217b" -dependencies = [ - "alloy-consensus", - "alloy-eips", - "alloy-genesis", - "alloy-primitives 0.8.5", - "alloy-rlp", - "alloy-rpc-types", - "alloy-serde", - "bytes", - "derive_more 1.0.0", - "k256", - "once_cell", - "op-alloy-rpc-types", - "rayon", - "reth-chainspec", - "reth-ethereum-forks", - "reth-optimism-chainspec", - "reth-primitives-traits", - "reth-static-file-types", - "reth-trie-common", - "revm-primitives", - "secp256k1", - "serde", - "thiserror", -] - -[[package]] -name = "reth-primitives-traits" -version = "1.0.6" -source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374182a43a3602aaa953d37aa9217b" -dependencies = [ - "alloy-consensus", - "alloy-eips", - "alloy-genesis", - "alloy-primitives 0.8.5", - "alloy-rlp", - "alloy-rpc-types-eth", - "byteorder", - "bytes", - "derive_more 1.0.0", - "modular-bitfield", - "reth-codecs", - "revm-primitives", - "roaring", - "serde", -] - -[[package]] -name = "reth-prune-types" -version = "1.0.6" -source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374182a43a3602aaa953d37aa9217b" -dependencies = [ - "alloy-primitives 0.8.5", - "bytes", - "derive_more 1.0.0", - "modular-bitfield", - "reth-codecs", - "serde", - "thiserror", -] - -[[package]] -name = "reth-revm" -version = "1.0.6" -source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374182a43a3602aaa953d37aa9217b" -dependencies = [ - "reth-chainspec", - "reth-consensus-common", - "reth-execution-errors", - "reth-primitives", - "reth-prune-types", - "reth-storage-api", - "reth-storage-errors", - "revm", -] - -[[package]] -name = "reth-stages-types" -version = "1.0.6" -source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374182a43a3602aaa953d37aa9217b" -dependencies = [ - "alloy-primitives 0.8.5", - "bytes", - "modular-bitfield", - "reth-codecs", - "reth-trie-common", - "serde", -] - -[[package]] -name = "reth-static-file-types" -version = "1.0.6" -source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374182a43a3602aaa953d37aa9217b" -dependencies = [ - "alloy-primitives 0.8.5", - "derive_more 1.0.0", - "serde", - "strum", -] - -[[package]] -name = "reth-storage-api" -version = "1.0.6" -source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374182a43a3602aaa953d37aa9217b" -dependencies = [ - "auto_impl", - "reth-chainspec", - "reth-db-models", - "reth-execution-types", - "reth-primitives", - "reth-prune-types", - "reth-stages-types", - "reth-storage-errors", - "reth-trie", -] - -[[package]] -name = "reth-storage-errors" -version = "1.0.6" -source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374182a43a3602aaa953d37aa9217b" -dependencies = [ - "alloy-rlp", - "derive_more 1.0.0", - "reth-fs-util", - "reth-primitives", -] - -[[package]] -name = "reth-trie" -version = "1.0.6" -source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374182a43a3602aaa953d37aa9217b" -dependencies = [ - "alloy-rlp", - "auto_impl", - "derive_more 1.0.0", - "itertools 0.13.0", - "rayon", - "reth-execution-errors", - "reth-primitives", - "reth-stages-types", - "reth-storage-errors", - "reth-trie-common", - "revm", - "tracing", -] - -[[package]] -name = "reth-trie-common" -version = "1.0.6" -source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374182a43a3602aaa953d37aa9217b" -dependencies = [ - "alloy-consensus", - "alloy-genesis", - "alloy-primitives 0.8.5", - "alloy-rlp", - "alloy-trie", - "bytes", - "derive_more 1.0.0", - "itertools 0.13.0", - "nybbles", - "reth-codecs", - "reth-primitives-traits", - "revm-primitives", - "serde", -] - -[[package]] -name = "revm" -version = "14.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f719e28cc6fdd086f8bc481429e587740d20ad89729cec3f5f5dd7b655474df" -dependencies = [ - "auto_impl", - "cfg-if", - "dyn-clone", - "revm-interpreter", - "revm-precompile", - "serde", - "serde_json", -] - -[[package]] -name = "revm-interpreter" -version = "10.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "959ecbc36802de6126852479844737f20194cf8e6718e0c30697d306a2cca916" -dependencies = [ - "revm-primitives", - "serde", -] - -[[package]] -name = "revm-precompile" -version = "11.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e25f604cb9db593ca3013be8c00f310d6790ccb1b7d8fbbdd4660ec8888043a" -dependencies = [ - "aurora-engine-modexp", - "c-kzg", - "cfg-if", - "k256", - "kzg-rs", - "once_cell", - "p256", - "revm-primitives", - "ripemd", - "secp256k1", - "sha2 0.10.8", - "substrate-bn 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "revm-primitives" -version = "9.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7a6bff9dbde3370a5ac9555104117f7e6039b3cc76e8d5d9d01899088beca2a" -dependencies = [ - "alloy-eips", - "alloy-primitives 0.8.5", - "auto_impl", - "bitflags 2.6.0", - "bitvec", - "c-kzg", - "cfg-if", - "dyn-clone", - "enumn", - "hashbrown 0.14.5", - "hex", - "kzg-rs", - "serde", -] - -[[package]] -name = "rfc6979" -version = "0.4.0" -source = "git+https://github.com/sp1-patches/signatures?branch=patch-ecdsa-v0.16.9#de1b108e8140dcb86ecf56f1215ac4b7fab6fcd8" -dependencies = [ - "hmac", - "subtle", -] - -[[package]] -name = "ring" -version = "0.16.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" -dependencies = [ - "cc", - "libc", - "once_cell", - "spin 0.5.2", - "untrusted 0.7.1", - "web-sys", - "winapi", -] - -[[package]] -name = "ring" -version = "0.17.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" -dependencies = [ - "cc", - "cfg-if", - "getrandom", - "libc", - "spin 0.9.8", - "untrusted 0.9.0", - "windows-sys 0.52.0", -] - -[[package]] -name = "ripemd" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" -dependencies = [ - "digest 0.10.7", -] - -[[package]] -name = "rlp" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" -dependencies = [ - "bytes", - "rlp-derive", - "rustc-hex", -] - -[[package]] -name = "rlp-derive" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "roaring" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f4b84ba6e838ceb47b41de5194a60244fac43d9fe03b71dbe8c5a201081d6d1" -dependencies = [ - "bytemuck", - "byteorder", -] - -[[package]] -name = "rrs-succinct" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3372685893a9f67d18e98e792d690017287fd17379a83d798d958e517d380fa9" -dependencies = [ - "downcast-rs", - "num_enum 0.5.11", - "paste", -] - -[[package]] -name = "rsa" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cf22754c49613d2b3b119f0e5d46e34a2c628a937e3024b8762de4e7d8c710b" -dependencies = [ - "byteorder", - "digest 0.10.7", - "num-bigint-dig", - "num-integer", - "num-iter", - "num-traits", - "pkcs1 0.3.3", - "pkcs8 0.8.0", - "rand_core 0.6.4", - "smallvec", - "subtle", - "zeroize", -] - -[[package]] -name = "rsa" -version = "0.9.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d0e5124fcb30e76a7e79bfee683a2746db83784b86289f6251b54b7950a0dfc" -dependencies = [ - "const-oid 0.9.6", - "digest 0.10.7", - "num-bigint-dig", - "num-integer", - "num-traits", - "pkcs1 0.7.5", - "pkcs8 0.10.2", - "rand_core 0.6.4", - "signature", - "spki 0.7.3", - "subtle", - "zeroize", -] - -[[package]] -name = "rsa-program" -version = "1.1.0" -dependencies = [ - "digest 0.10.7", - "rand 0.8.5", - "rsa 0.9.6", - "sha2 0.10.8", - "sp1-zkvm", -] - -[[package]] -name = "rsa-script" -version = "1.1.0" -dependencies = [ - "rsa 0.6.1", - "sp1-build", - "sp1-sdk", -] - -[[package]] -name = "rsp-client-executor" -version = "0.1.0" -source = "git+https://github.com/succinctlabs/rsp/?rev=3647076#3647076da6580e30384dd911a3fc50d4bcdb5bc1" -dependencies = [ - "alloy-primitives 0.8.5", - "alloy-rlp", - "eyre", - "futures", - "itertools 0.13.0", - "reth-chainspec", - "reth-errors", - "reth-ethereum-consensus", - "reth-evm", - "reth-evm-ethereum", - "reth-evm-optimism", - "reth-execution-types", - "reth-optimism-consensus", - "reth-primitives", - "reth-revm", - "reth-storage-errors", - "reth-trie", - "revm", - "revm-primitives", - "rsp-mpt", - "rsp-primitives", - "rsp-witness-db", - "serde", - "serde_json", - "tokio", - "url", -] - -[[package]] -name = "rsp-host-executor" -version = "0.1.0" -source = "git+https://github.com/succinctlabs/rsp/?rev=3647076#3647076da6580e30384dd911a3fc50d4bcdb5bc1" -dependencies = [ - "alloy-primitives 0.8.5", - "alloy-provider", - "alloy-rlp", - "alloy-rpc-types", - "alloy-transport", - "eyre", - "futures", - "itertools 0.13.0", - "reth-chainspec", - "reth-codecs", - "reth-errors", - "reth-execution-types", - "reth-primitives", - "reth-storage-errors", - "reth-trie", - "revm", - "revm-primitives", - "rsp-client-executor", - "rsp-mpt", - "rsp-primitives", - "rsp-rpc-db", - "rsp-witness-db", - "serde", - "serde_json", - "tokio", - "tracing", - "url", -] - -[[package]] -name = "rsp-mpt" -version = "0.1.0" -source = "git+https://github.com/succinctlabs/rsp/?rev=3647076#3647076da6580e30384dd911a3fc50d4bcdb5bc1" -dependencies = [ - "alloy-primitives 0.8.5", - "alloy-rlp", - "alloy-rpc-types", - "anyhow", - "eyre", - "itertools 0.13.0", - "reth-execution-types", - "reth-primitives", - "reth-trie", - "revm", - "revm-primitives", - "rlp", - "rsp-primitives", - "serde", - "thiserror", -] - -[[package]] -name = "rsp-primitives" -version = "0.1.0" -source = "git+https://github.com/succinctlabs/rsp/?rev=3647076#3647076da6580e30384dd911a3fc50d4bcdb5bc1" -dependencies = [ - "alloy-rpc-types", - "eyre", - "reth-chainspec", - "reth-optimism-chainspec", - "reth-primitives", - "reth-revm", - "reth-trie", - "revm-interpreter", - "revm-precompile", - "revm-primitives", - "serde", - "serde_json", - "tracing", -] - -[[package]] -name = "rsp-program" -version = "1.1.0" -dependencies = [ - "bincode", - "rsp-client-executor", - "sp1-zkvm", -] - -[[package]] -name = "rsp-rpc-db" -version = "0.1.0" -source = "git+https://github.com/succinctlabs/rsp/?rev=3647076#3647076da6580e30384dd911a3fc50d4bcdb5bc1" -dependencies = [ - "alloy-provider", - "alloy-rlp", - "alloy-rpc-types", - "alloy-transport", - "futures", - "rayon", - "reth-primitives", - "reth-revm", - "reth-storage-errors", - "reth-trie", - "revm-primitives", - "rsp-primitives", - "thiserror", - "tokio", - "tracing", -] - -[[package]] -name = "rsp-script" -version = "0.1.0" -dependencies = [ - "alloy-primitives 0.8.5", - "bincode", - "clap", - "rsp-client-executor", - "rsp-host-executor", - "serde", - "serde_json", - "sp1-build", - "sp1-sdk", -] - -[[package]] -name = "rsp-witness-db" -version = "0.1.0" -source = "git+https://github.com/succinctlabs/rsp/?rev=3647076#3647076da6580e30384dd911a3fc50d4bcdb5bc1" -dependencies = [ - "reth-primitives", - "reth-storage-errors", - "revm-primitives", - "rsp-primitives", - "serde", -] - -[[package]] -name = "ruint" -version = "1.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c3cc4c2511671f327125da14133d0c5c5d137f006a1017a16f557bc85b16286" -dependencies = [ - "alloy-rlp", - "ark-ff 0.3.0", - "ark-ff 0.4.2", - "bytes", - "fastrlp", - "num-bigint 0.4.6", - "num-traits", - "parity-scale-codec", - "primitive-types", - "proptest", - "rand 0.8.5", - "rlp", - "ruint-macro", - "serde", - "valuable", - "zeroize", -] - -[[package]] -name = "ruint-macro" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" - -[[package]] -name = "rustc-demangle" -version = "0.1.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" - -[[package]] -name = "rustc-hash" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" - -[[package]] -name = "rustc-hash" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" -dependencies = [ - "rand 0.8.5", -] - -[[package]] -name = "rustc-hex" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" - -[[package]] -name = "rustc_version" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" -dependencies = [ - "semver 0.11.0", -] - -[[package]] -name = "rustc_version" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" -dependencies = [ - "semver 1.0.23", -] - -[[package]] -name = "rustix" -version = "0.38.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" -dependencies = [ - "bitflags 2.6.0", - "errno", - "libc", - "linux-raw-sys", - "windows-sys 0.52.0", -] - -[[package]] -name = "rustls" -version = "0.23.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" -dependencies = [ - "once_cell", - "ring 0.17.8", - "rustls-pki-types", - "rustls-webpki", - "subtle", - "zeroize", -] - -[[package]] -name = "rustls-pemfile" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" -dependencies = [ - "base64 0.21.7", -] - -[[package]] -name = "rustls-pemfile" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" -dependencies = [ - "rustls-pki-types", -] - -[[package]] -name = "rustls-pki-types" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e696e35370c65c9c541198af4543ccd580cf17fc25d8e05c5a242b202488c55" - -[[package]] -name = "rustls-webpki" -version = "0.102.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" -dependencies = [ - "ring 0.17.8", - "rustls-pki-types", - "untrusted 0.9.0", -] - -[[package]] -name = "rustversion" -version = "1.0.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" - -[[package]] -name = "rusty-fork" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" -dependencies = [ - "fnv", - "quick-error", - "tempfile", - "wait-timeout", -] - -[[package]] -name = "ryu" -version = "1.0.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" - -[[package]] -name = "salsa20" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97a22f5af31f73a954c10289c93e8a50cc23d971e80ee446f1f6f7137a088213" -dependencies = [ - "cipher", -] - -[[package]] -name = "same-file" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "scale-info" -version = "2.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eca070c12893629e2cc820a9761bedf6ce1dcddc9852984d1dc734b8bd9bd024" -dependencies = [ - "cfg-if", - "derive_more 0.99.18", - "parity-scale-codec", - "scale-info-derive", -] - -[[package]] -name = "scale-info-derive" -version = "2.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d35494501194174bda522a32605929eefc9ecf7e0a326c26db1fdd85881eb62" -dependencies = [ - "proc-macro-crate 3.2.0", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "scc" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836f1e0f4963ef5288b539b643b35e043e76a32d0f4e47e67febf69576527f50" -dependencies = [ - "sdd", -] - -[[package]] -name = "schannel" -version = "0.1.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" -dependencies = [ - "windows-sys 0.59.0", -] - -[[package]] -name = "scopeguard" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" - -[[package]] -name = "scrypt" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f9e24d2b632954ded8ab2ef9fea0a0c769ea56ea98bddbafbad22caeeadf45d" -dependencies = [ - "hmac", - "pbkdf2 0.11.0", - "salsa20", - "sha2 0.10.8", -] - -[[package]] -name = "sdd" -version = "3.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a7b59a5d9b0099720b417b6325d91a52cbf5b3dcb5041d864be53eefa58abc" - -[[package]] -name = "sec1" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" -dependencies = [ - "base16ct", - "der 0.7.9", - "generic-array 0.14.7", - "pkcs8 0.10.2", - "subtle", - "zeroize", -] - -[[package]] -name = "secp256k1" -version = "0.29.0" -source = "git+https://github.com/sp1-patches/rust-secp256k1?branch=patch-secp256k1-v0.29.0#13910d476dbdaf436312a9f096ee312593028557" -dependencies = [ - "cfg-if", - "ecdsa", - "elliptic-curve", - "k256", - "rand 0.8.5", - "secp256k1-sys", -] - -[[package]] -name = "secp256k1-sys" -version = "0.10.0" -source = "git+https://github.com/sp1-patches/rust-secp256k1?branch=patch-secp256k1-v0.29.0#13910d476dbdaf436312a9f096ee312593028557" -dependencies = [ - "cc", -] - -[[package]] -name = "security-framework" -version = "2.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" -dependencies = [ - "bitflags 2.6.0", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "semver" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" -dependencies = [ - "semver-parser", -] - -[[package]] -name = "semver" -version = "1.0.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" -dependencies = [ - "serde", -] - -[[package]] -name = "semver-parser" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" -dependencies = [ - "pest", -] - -[[package]] -name = "send_wrapper" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" - -[[package]] -name = "send_wrapper" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" - -[[package]] -name = "serde" -version = "1.0.210" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_bytes" -version = "0.11.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "387cc504cb06bb40a96c8e04e951fe01854cf6bc921053c954e4a606d9675c6a" -dependencies = [ - "serde", -] - -[[package]] -name = "serde_cbor" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" -dependencies = [ - "half", - "serde", -] - -[[package]] -name = "serde_derive" -version = "1.0.210" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "serde_json" -version = "1.0.128" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" -dependencies = [ - "indexmap 2.5.0", - "itoa", - "memchr", - "ryu", - "serde", -] - -[[package]] -name = "serde_path_to_error" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" -dependencies = [ - "itoa", - "serde", -] - -[[package]] -name = "serde_repr" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "serde_spanned" -version = "0.6.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" -dependencies = [ - "serde", -] - -[[package]] -name = "serde_urlencoded" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" -dependencies = [ - "form_urlencoded", - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "serde_with" -version = "3.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cecfa94848272156ea67b2b1a53f20fc7bc638c4a46d2f8abde08f05f4b857" -dependencies = [ - "base64 0.22.1", - "chrono", - "hex", - "indexmap 1.9.3", - "indexmap 2.5.0", - "serde", - "serde_derive", - "serde_json", - "serde_with_macros", - "time", -] - -[[package]] -name = "serde_with_macros" -version = "3.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8fee4991ef4f274617a51ad4af30519438dacb2f56ac773b08a1922ff743350" -dependencies = [ - "darling", - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "serde_yaml" -version = "0.9.34+deprecated" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" -dependencies = [ - "indexmap 2.5.0", - "itoa", - "ryu", - "serde", - "unsafe-libyaml", -] - -[[package]] -name = "serial_test" -version = "3.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b4b487fe2acf240a021cf57c6b2b4903b1e78ca0ecd862a71b71d2a51fed77d" -dependencies = [ - "futures", - "log", - "once_cell", - "parking_lot", - "scc", - "serial_test_derive", -] - -[[package]] -name = "serial_test_derive" -version = "3.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "sha2" -version = "0.9.8" -source = "git+https://github.com/sp1-patches/RustCrypto-hashes.git?branch=patch-v0.9.8#afdbfb09c325f8a69c01d540ec9a261e3637725d" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", -] - -[[package]] -name = "sha2" -version = "0.9.9" -source = "git+https://github.com/sp1-patches/RustCrypto-hashes?branch=patch-sha2-v0.9.9#db82a4848f8d033eab544255e1efa036cc06f054" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", -] - -[[package]] -name = "sha2" -version = "0.10.8" -source = "git+https://github.com/sp1-patches/RustCrypto-hashes?branch=patch-v0.10.8#1f224388fdede7cef649bce0d63876d1a9e3f515" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest 0.10.7", -] - -[[package]] -name = "sha3" -version = "0.10.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" -dependencies = [ - "digest 0.10.7", - "keccak", -] - -[[package]] -name = "sha3-asm" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" -dependencies = [ - "cc", - "cfg-if", -] - -[[package]] -name = "sharded-slab" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" -dependencies = [ - "lazy_static", -] - -[[package]] -name = "shlex" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" - -[[package]] -name = "signal-hook-registry" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" -dependencies = [ - "libc", -] - -[[package]] -name = "signature" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" -dependencies = [ - "digest 0.10.7", - "rand_core 0.6.4", -] - -[[package]] -name = "simple_asn1" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" -dependencies = [ - "num-bigint 0.4.6", - "num-traits", - "thiserror", - "time", -] - -[[package]] -name = "size" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fed904c7fb2856d868b92464fc8fa597fce366edea1a9cbfaa8cb5fe080bd6d" - -[[package]] -name = "slab" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] - -[[package]] -name = "smallvec" -version = "1.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" -dependencies = [ - "serde", -] - -[[package]] -name = "snowbridge-amcl" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "460a9ed63cdf03c1b9847e8a12a5f5ba19c4efd5869e4a737e05be25d7c427e5" -dependencies = [ - "parity-scale-codec", - "scale-info", -] - -[[package]] -name = "socket2" -version = "0.5.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" -dependencies = [ - "libc", - "windows-sys 0.52.0", -] - -[[package]] -name = "sp1-build" -version = "2.0.0" -dependencies = [ - "anyhow", - "cargo_metadata", - "chrono", - "clap", - "dirs", -] - -[[package]] -name = "sp1-core-executor" -version = "2.0.0" -dependencies = [ - "bincode", - "bytemuck", - "elf", - "enum-map", - "eyre", - "generic-array 1.1.0", - "hashbrown 0.14.5", - "hex", - "itertools 0.13.0", - "log", - "nohash-hasher", - "num", - "p3-field", - "p3-keccak-air", - "p3-maybe-rayon", - "rand 0.8.5", - "rrs-succinct", - "serde", - "serde_with", - "sp1-curves", - "sp1-derive", - "sp1-primitives", - "sp1-stark", - "strum", - "strum_macros", - "thiserror", - "tiny-keccak", - "tracing", - "typenum", - "vec_map", -] - -[[package]] -name = "sp1-core-machine" -version = "2.0.0" -dependencies = [ - "anyhow", - "arrayref", - "bincode", - "blake3", - "bytemuck", - "cfg-if", - "curve25519-dalek", - "elf", - "elliptic-curve", - "generic-array 1.1.0", - "hashbrown 0.14.5", - "hex", - "itertools 0.13.0", - "k256", - "log", - "nohash-hasher", - "num", - "num_cpus", - "p3-air", - "p3-baby-bear", - "p3-blake3", - "p3-challenger", - "p3-commit", - "p3-dft", - "p3-field", - "p3-fri", - "p3-keccak", - "p3-keccak-air", - "p3-matrix", - "p3-maybe-rayon", - "p3-merkle-tree", - "p3-poseidon2", - "p3-symmetric", - "p3-uni-stark", - "p3-util", - "rand 0.8.5", - "rayon-scan", - "rrs-succinct", - "serde", - "serde_with", - "size", - "snowbridge-amcl", - "sp1-core-executor", - "sp1-curves", - "sp1-derive", - "sp1-primitives", - "sp1-stark", - "static_assertions", - "strum", - "strum_macros", - "tempfile", - "thiserror", - "tracing", - "tracing-forest", - "tracing-subscriber", - "typenum", - "web-time", -] - -[[package]] -name = "sp1-cuda" -version = "2.0.0" -dependencies = [ - "bincode", - "ctrlc", - "prost 0.13.3", - "prost-types 0.13.3", - "serde", - "serde_json", - "sp1-core-machine", - "sp1-prover", - "sp1-stark", - "tokio", - "tracing", - "tracing-subscriber", - "twirp-rs", -] - -[[package]] -name = "sp1-curves" -version = "2.0.0" -dependencies = [ - "curve25519-dalek", - "dashu", - "elliptic-curve", - "generic-array 1.1.0", - "itertools 0.13.0", - "k256", - "num", - "p3-field", - "serde", - "snowbridge-amcl", - "sp1-primitives", - "sp1-stark", - "typenum", -] - -[[package]] -name = "sp1-derive" -version = "2.0.0" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "sp1-lib" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bea7811abd2d3a991007fcb284f41152840b8388c171288d0c52c6793956609c" -dependencies = [ - "anyhow", - "bincode", - "cfg-if", - "hex", - "serde", - "snowbridge-amcl", -] - -[[package]] -name = "sp1-lib" -version = "2.0.0" -dependencies = [ - "anyhow", - "bincode", - "cfg-if", - "hex", - "serde", - "snowbridge-amcl", -] - -[[package]] -name = "sp1-lib" -version = "2.0.0" -source = "git+https://github.com/succinctlabs/sp1.git?branch=dev#11b75dae43d268d1071bb28f66aecd4939992bf7" -dependencies = [ - "anyhow", - "bincode", - "cfg-if", - "hex", - "serde", - "snowbridge-amcl", -] - -[[package]] -name = "sp1-primitives" -version = "2.0.0" -dependencies = [ - "bincode", - "hex", - "itertools 0.13.0", - "lazy_static", - "num-bigint 0.4.6", - "p3-baby-bear", - "p3-field", - "p3-poseidon2", - "p3-symmetric", - "serde", - "sha2 0.10.8", -] - -[[package]] -name = "sp1-prover" -version = "2.0.0" -dependencies = [ - "anyhow", - "bincode", - "clap", - "dirs", - "hex", - "itertools 0.13.0", - "num-bigint 0.4.6", - "oneshot", - "p3-baby-bear", - "p3-bn254-fr", - "p3-challenger", - "p3-commit", - "p3-field", - "p3-matrix", - "rayon", - "serde", - "serde_json", - "serial_test", - "sp1-core-executor", - "sp1-core-machine", - "sp1-primitives", - "sp1-recursion-circuit", - "sp1-recursion-compiler", - "sp1-recursion-core", - "sp1-recursion-gnark-ffi", - "sp1-recursion-program", - "sp1-stark", - "subtle-encoding", - "tempfile", - "thiserror", - "tracing", - "tracing-subscriber", -] - -[[package]] -name = "sp1-recursion-circuit" -version = "2.0.0" -dependencies = [ - "bincode", - "itertools 0.13.0", - "p3-air", - "p3-baby-bear", - "p3-bn254-fr", - "p3-commit", - "p3-field", - "p3-fri", - "p3-matrix", - "p3-util", - "serde", - "sp1-core-machine", - "sp1-recursion-compiler", - "sp1-recursion-core", - "sp1-recursion-derive", - "sp1-recursion-program", - "sp1-stark", -] - -[[package]] -name = "sp1-recursion-compiler" -version = "2.0.0" -dependencies = [ - "backtrace", - "itertools 0.13.0", - "p3-air", - "p3-baby-bear", - "p3-bn254-fr", - "p3-commit", - "p3-field", - "p3-fri", - "p3-matrix", - "p3-poseidon2", - "p3-symmetric", - "p3-util", - "rayon", - "serde", - "sp1-core-machine", - "sp1-primitives", - "sp1-recursion-core", - "sp1-recursion-core-v2", - "sp1-recursion-derive", - "sp1-stark", - "tracing", - "vec_map", -] - -[[package]] -name = "sp1-recursion-core" -version = "2.0.0" -dependencies = [ - "arrayref", - "backtrace", - "ff 0.13.0", - "hashbrown 0.14.5", - "itertools 0.13.0", - "num_cpus", - "p3-air", - "p3-baby-bear", - "p3-bn254-fr", - "p3-challenger", - "p3-commit", - "p3-dft", - "p3-field", - "p3-fri", - "p3-matrix", - "p3-maybe-rayon", - "p3-merkle-tree", - "p3-poseidon2", - "p3-symmetric", - "p3-util", - "serde", - "serde_with", - "sp1-core-executor", - "sp1-core-machine", - "sp1-derive", - "sp1-primitives", - "sp1-stark", - "static_assertions", - "tracing", - "zkhash", -] - -[[package]] -name = "sp1-recursion-core-v2" -version = "2.0.0" -dependencies = [ - "arrayref", - "backtrace", - "ff 0.13.0", - "hashbrown 0.14.5", - "itertools 0.13.0", - "num_cpus", - "p3-air", - "p3-baby-bear", - "p3-bn254-fr", - "p3-challenger", - "p3-commit", - "p3-dft", - "p3-field", - "p3-fri", - "p3-matrix", - "p3-maybe-rayon", - "p3-merkle-tree", - "p3-poseidon2", - "p3-symmetric", - "p3-util", - "serde", - "serde_with", - "sp1-core-executor", - "sp1-core-machine", - "sp1-derive", - "sp1-primitives", - "sp1-recursion-core", - "sp1-stark", - "static_assertions", - "thiserror", - "tracing", - "vec_map", - "zkhash", -] - -[[package]] -name = "sp1-recursion-derive" -version = "2.0.0" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "sp1-recursion-gnark-ffi" -version = "2.0.0" -dependencies = [ - "anyhow", - "bincode", - "bindgen", - "cc", - "cfg-if", - "hex", - "log", - "num-bigint 0.4.6", - "p3-baby-bear", - "p3-field", - "p3-symmetric", - "rand 0.8.5", - "serde", - "serde_json", - "sha2 0.10.8", - "sp1-core-machine", - "sp1-recursion-compiler", - "sp1-stark", - "tempfile", -] - -[[package]] -name = "sp1-recursion-program" -version = "2.0.0" -dependencies = [ - "itertools 0.13.0", - "p3-air", - "p3-baby-bear", - "p3-challenger", - "p3-commit", - "p3-dft", - "p3-field", - "p3-fri", - "p3-matrix", - "p3-maybe-rayon", - "p3-merkle-tree", - "p3-poseidon2", - "p3-symmetric", - "p3-util", - "rand 0.8.5", - "serde", - "sp1-core-executor", - "sp1-core-machine", - "sp1-primitives", - "sp1-recursion-compiler", - "sp1-recursion-core", - "sp1-stark", - "stacker", - "tracing", -] - -[[package]] -name = "sp1-sdk" -version = "2.0.0" -dependencies = [ - "alloy-sol-types 0.7.7", - "anyhow", - "async-trait", - "bincode", - "cfg-if", - "dirs", - "ethers", - "futures", - "getrandom", - "hashbrown 0.14.5", - "hex", - "indicatif", - "itertools 0.13.0", - "log", - "num-bigint 0.4.6", - "p3-baby-bear", - "p3-commit", - "p3-field", - "p3-fri", - "p3-matrix", - "prost 0.13.3", - "reqwest 0.12.8", - "reqwest-middleware", - "serde", - "serde_json", - "sha2 0.10.8", - "sp1-core-executor", - "sp1-core-machine", - "sp1-cuda", - "sp1-primitives", - "sp1-prover", - "sp1-stark", - "strum", - "strum_macros", - "sysinfo", - "tempfile", - "thiserror", - "tokio", - "tracing", - "twirp-rs", - "vergen", -] - -[[package]] -name = "sp1-stark" -version = "2.0.0" -dependencies = [ - "arrayref", - "getrandom", - "hashbrown 0.14.5", - "itertools 0.13.0", - "p3-air", - "p3-baby-bear", - "p3-challenger", - "p3-commit", - "p3-dft", - "p3-field", - "p3-fri", - "p3-matrix", - "p3-maybe-rayon", - "p3-merkle-tree", - "p3-poseidon2", - "p3-symmetric", - "p3-uni-stark", - "p3-util", - "rayon-scan", - "serde", - "sp1-derive", - "sp1-primitives", - "sysinfo", - "tracing", -] - -[[package]] -name = "sp1-zkvm" -version = "2.0.0" -dependencies = [ - "bincode", - "cfg-if", - "getrandom", - "lazy_static", - "libm", - "once_cell", - "p3-baby-bear", - "p3-field", - "rand 0.8.5", - "serde", - "sha2 0.10.8", - "sp1-lib 2.0.0", - "sp1-primitives", -] - -[[package]] -name = "sp1_bls12_381" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a27c4b8901334dc09099dd82f80a72ddfc76b0046f4b342584c808f1931bed5a" -dependencies = [ - "cfg-if", - "ff 0.13.0", - "group 0.13.0", - "pairing 0.23.0", - "rand_core 0.6.4", - "sp1-lib 1.2.0", - "subtle", -] - -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - -[[package]] -name = "spin" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" -dependencies = [ - "lock_api", -] - -[[package]] -name = "spki" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" -dependencies = [ - "base64ct", - "der 0.5.1", -] - -[[package]] -name = "spki" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" -dependencies = [ - "base64ct", - "der 0.7.9", -] - -[[package]] -name = "ssz-withdrawals-program" -version = "1.1.0" -dependencies = [ - "alloy-primitives 0.6.4", - "hex", - "hex-literal", - "serde", - "serde_json", - "serde_with", - "sha2 0.9.8", - "sp1-zkvm", - "ssz_rs", -] - -[[package]] -name = "ssz-withdrawals-script" -version = "1.1.0" -dependencies = [ - "sp1-build", - "sp1-sdk", -] - -[[package]] -name = "ssz_rs" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "057291e5631f280978fa9c8009390663ca4613359fc1318e36a8c24c392f6d1f" -dependencies = [ - "bitvec", - "hex", - "num-bigint 0.4.6", - "serde", - "sha2 0.9.9", - "ssz_rs_derive", -] - -[[package]] -name = "ssz_rs_derive" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f07d54c4d01a1713eb363b55ba51595da15f6f1211435b71466460da022aa140" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "stacker" -version = "0.1.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "799c883d55abdb5e98af1a7b3f23b9b6de8ecada0ecac058672d7635eb48ca7b" -dependencies = [ - "cc", - "cfg-if", - "libc", - "psm", - "windows-sys 0.59.0", -] - -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - -[[package]] -name = "strsim" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" - -[[package]] -name = "strum" -version = "0.26.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" -dependencies = [ - "strum_macros", -] - -[[package]] -name = "strum_macros" -version = "0.26.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "rustversion", - "syn 2.0.79", -] - -[[package]] -name = "substrate-bn" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b5bbfa79abbae15dd642ea8176a21a635ff3c00059961d1ea27ad04e5b441c" -dependencies = [ - "byteorder", - "crunchy", - "lazy_static", - "rand 0.8.5", - "rustc-hex", -] - -[[package]] -name = "substrate-bn" -version = "0.6.0" -source = "git+https://github.com/sp1-patches/bn?rev=43d854d45b5727b1ff2b9f346d728e785bb8395c#43d854d45b5727b1ff2b9f346d728e785bb8395c" -dependencies = [ - "bytemuck", - "byteorder", - "cfg-if", - "crunchy", - "lazy_static", - "rand 0.8.5", - "rustc-hex", - "sp1-lib 2.0.0 (git+https://github.com/succinctlabs/sp1.git?branch=dev)", -] - -[[package]] -name = "subtle" -version = "2.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" - -[[package]] -name = "subtle-encoding" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dcb1ed7b8330c5eed5441052651dd7a12c75e2ed88f2ec024ae1fa3a5e59945" -dependencies = [ - "zeroize", -] - -[[package]] -name = "subtle-ng" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" - -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "syn" -version = "2.0.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "syn-solidity" -version = "0.7.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c837dc8852cb7074e46b444afb81783140dab12c58867b49fb3898fbafedf7ea" -dependencies = [ - "paste", - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "syn-solidity" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ab661c8148c2261222a4d641ad5477fd4bea79406a99056096a0b41b35617a5" -dependencies = [ - "paste", - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "sync_wrapper" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" - -[[package]] -name = "sync_wrapper" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" -dependencies = [ - "futures-core", -] - -[[package]] -name = "synstructure" -version = "0.12.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", - "unicode-xid", -] - -[[package]] -name = "sysinfo" -version = "0.30.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a5b4ddaee55fb2bea2bf0e5000747e5f5c0de765e5a5ff87f4cd106439f4bb3" -dependencies = [ - "cfg-if", - "core-foundation-sys", - "libc", - "ntapi", - "once_cell", - "rayon", - "windows", -] - -[[package]] -name = "system-configuration" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" -dependencies = [ - "bitflags 1.3.2", - "core-foundation", - "system-configuration-sys 0.5.0", -] - -[[package]] -name = "system-configuration" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" -dependencies = [ - "bitflags 2.6.0", - "core-foundation", - "system-configuration-sys 0.6.0", -] - -[[package]] -name = "system-configuration-sys" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "system-configuration-sys" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "tap" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" - -[[package]] -name = "tempfile" -version = "3.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" -dependencies = [ - "cfg-if", - "fastrand", - "once_cell", - "rustix", - "windows-sys 0.59.0", -] - -[[package]] -name = "tendermint" -version = "0.35.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43f8a10105d0a7c4af0a242e23ed5a12519afe5cc0e68419da441bb5981a6802" -dependencies = [ - "bytes", - "digest 0.10.7", - "ed25519", - "ed25519-consensus", - "flex-error", - "futures", - "num-traits", - "once_cell", - "prost 0.12.6", - "prost-types 0.12.6", - "serde", - "serde_bytes", - "serde_json", - "serde_repr", - "sha2 0.10.8", - "signature", - "subtle", - "subtle-encoding", - "tendermint-proto", - "time", - "zeroize", -] - -[[package]] -name = "tendermint-light-client-verifier" -version = "0.35.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35678b66e819659617c2e83f9662b8544425694441990c07137904a07872d871" -dependencies = [ - "derive_more 0.99.18", - "flex-error", - "serde", - "tendermint", - "time", -] - -[[package]] -name = "tendermint-program" -version = "1.1.0" -dependencies = [ - "serde", - "serde_cbor", - "serde_json", - "sp1-zkvm", - "tendermint-light-client-verifier", -] - -[[package]] -name = "tendermint-proto" -version = "0.35.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff525d5540a9fc535c38dc0d92a98da3ee36fcdfbda99cecb9f3cce5cd4d41d7" -dependencies = [ - "bytes", - "flex-error", - "num-derive", - "num-traits", - "prost 0.12.6", - "prost-types 0.12.6", - "serde", - "serde_bytes", - "subtle-encoding", - "time", -] - -[[package]] -name = "tendermint-script" -version = "1.1.0" -dependencies = [ - "bincode", - "itertools 0.12.1", - "reqwest 0.11.27", - "serde", - "serde_cbor", - "serde_json", - "sha2 0.10.8", - "sp1-build", - "sp1-core-machine", - "sp1-sdk", - "tendermint", - "tendermint-light-client-verifier", - "tokio", -] - -[[package]] -name = "thiserror" -version = "1.0.64" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.64" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "thiserror-impl-no-std" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58e6318948b519ba6dc2b442a6d0b904ebfb8d411a3ad3e07843615a72249758" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "thiserror-no-std" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3ad459d94dd517257cc96add8a43190ee620011bb6e6cdc82dafd97dfafafea" -dependencies = [ - "thiserror-impl-no-std", -] - -[[package]] -name = "thread_local" -version = "1.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" -dependencies = [ - "cfg-if", - "once_cell", -] - -[[package]] -name = "threadpool" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" -dependencies = [ - "num_cpus", -] - -[[package]] -name = "time" -version = "0.3.36" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" -dependencies = [ - "deranged", - "itoa", - "libc", - "num-conv", - "num_threads", - "powerfmt", - "serde", - "time-core", - "time-macros", -] - -[[package]] -name = "time-core" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" - -[[package]] -name = "time-macros" -version = "0.2.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" -dependencies = [ - "num-conv", - "time-core", -] - -[[package]] -name = "tiny-keccak" -version = "2.0.2" -source = "git+https://github.com/sp1-patches/tiny-keccak?branch=patch-v2.0.2#bf0b28f63510a90c7b6c21ac6ff461c93ecd2331" -dependencies = [ - "cfg-if", - "crunchy", -] - -[[package]] -name = "tinyvec" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" -dependencies = [ - "tinyvec_macros", -] - -[[package]] -name = "tinyvec_macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" - -[[package]] -name = "tokio" -version = "1.40.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" -dependencies = [ - "backtrace", - "bytes", - "libc", - "mio", - "parking_lot", - "pin-project-lite", - "signal-hook-registry", - "socket2", - "tokio-macros", - "windows-sys 0.52.0", -] - -[[package]] -name = "tokio-macros" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "tokio-native-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" -dependencies = [ - "native-tls", - "tokio", -] - -[[package]] -name = "tokio-rustls" -version = "0.26.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" -dependencies = [ - "rustls", - "rustls-pki-types", - "tokio", -] - -[[package]] -name = "tokio-stream" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" -dependencies = [ - "futures-core", - "pin-project-lite", - "tokio", - "tokio-util", -] - -[[package]] -name = "tokio-util" -version = "0.7.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "pin-project-lite", - "tokio", -] - -[[package]] -name = "toml" -version = "0.8.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" -dependencies = [ - "serde", - "serde_spanned", - "toml_datetime", - "toml_edit 0.22.22", -] - -[[package]] -name = "toml_datetime" -version = "0.6.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" -dependencies = [ - "serde", -] - -[[package]] -name = "toml_edit" -version = "0.19.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" -dependencies = [ - "indexmap 2.5.0", - "toml_datetime", - "winnow 0.5.40", -] - -[[package]] -name = "toml_edit" -version = "0.22.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" -dependencies = [ - "indexmap 2.5.0", - "serde", - "serde_spanned", - "toml_datetime", - "winnow 0.6.20", -] - -[[package]] -name = "tower" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" -dependencies = [ - "futures-core", - "futures-util", - "pin-project-lite", - "sync_wrapper 0.1.2", - "tokio", - "tower-layer", - "tower-service", - "tracing", -] - -[[package]] -name = "tower-layer" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" - -[[package]] -name = "tower-service" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" - -[[package]] -name = "tracing" -version = "0.1.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" -dependencies = [ - "log", - "pin-project-lite", - "tracing-attributes", - "tracing-core", -] - -[[package]] -name = "tracing-attributes" -version = "0.1.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "tracing-core" -version = "0.1.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" -dependencies = [ - "once_cell", - "valuable", -] - -[[package]] -name = "tracing-forest" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee40835db14ddd1e3ba414292272eddde9dad04d3d4b65509656414d1c42592f" -dependencies = [ - "ansi_term", - "smallvec", - "thiserror", - "tracing", - "tracing-subscriber", -] - -[[package]] -name = "tracing-futures" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" -dependencies = [ - "pin-project", - "tracing", -] - -[[package]] -name = "tracing-log" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" -dependencies = [ - "log", - "once_cell", - "tracing-core", -] - -[[package]] -name = "tracing-subscriber" -version = "0.3.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" -dependencies = [ - "matchers", - "nu-ansi-term", - "once_cell", - "regex", - "sharded-slab", - "smallvec", - "thread_local", - "tracing", - "tracing-core", - "tracing-log", -] - -[[package]] -name = "try-lock" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" - -[[package]] -name = "twirp-rs" -version = "0.13.0-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27dfcc06b8d9262bc2d4b8d1847c56af9971a52dd8a0076876de9db763227d0d" -dependencies = [ - "async-trait", - "axum", - "futures", - "http 1.1.0", - "http-body-util", - "hyper 1.4.1", - "prost 0.13.3", - "reqwest 0.12.8", - "serde", - "serde_json", - "thiserror", - "tokio", - "tower", - "url", -] - -[[package]] -name = "typenum" -version = "1.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" - -[[package]] -name = "ucd-trie" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" - -[[package]] -name = "uint" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" -dependencies = [ - "byteorder", - "crunchy", - "hex", - "static_assertions", -] - -[[package]] -name = "unarray" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" - -[[package]] -name = "unicode-bidi" -version = "0.3.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" - -[[package]] -name = "unicode-ident" -version = "1.0.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" - -[[package]] -name = "unicode-normalization" -version = "0.1.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" -dependencies = [ - "tinyvec", -] - -[[package]] -name = "unicode-segmentation" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" - -[[package]] -name = "unicode-width" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" - -[[package]] -name = "unicode-xid" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" - -[[package]] -name = "unsafe-libyaml" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" - -[[package]] -name = "untrusted" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" - -[[package]] -name = "untrusted" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" - -[[package]] -name = "url" -version = "2.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" -dependencies = [ - "form_urlencoded", - "idna", - "percent-encoding", -] - -[[package]] -name = "utf8parse" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" - -[[package]] -name = "uuid" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" -dependencies = [ - "getrandom", - "serde", -] - -[[package]] -name = "valuable" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" - -[[package]] -name = "vcpkg" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" - -[[package]] -name = "vec_map" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" -dependencies = [ - "serde", -] - -[[package]] -name = "vergen" -version = "8.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2990d9ea5967266ea0ccf413a4aa5c42a93dbcfda9cb49a97de6931726b12566" -dependencies = [ - "anyhow", - "cfg-if", - "git2", - "rustversion", - "time", -] - -[[package]] -name = "version_check" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" - -[[package]] -name = "wait-timeout" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" -dependencies = [ - "libc", -] - -[[package]] -name = "walkdir" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" -dependencies = [ - "same-file", - "winapi-util", -] - -[[package]] -name = "want" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" -dependencies = [ - "try-lock", -] - -[[package]] -name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" - -[[package]] -name = "wasm-bindgen" -version = "0.2.93" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" -dependencies = [ - "cfg-if", - "once_cell", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.93" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" -dependencies = [ - "bumpalo", - "log", - "once_cell", - "proc-macro2", - "quote", - "syn 2.0.79", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-futures" -version = "0.4.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" -dependencies = [ - "cfg-if", - "js-sys", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.93" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.93" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.79", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.93" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" - -[[package]] -name = "wasm-streams" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e072d4e72f700fb3443d8fe94a39315df013eef1104903cdb0a2abd322bbecd" -dependencies = [ - "futures-util", - "js-sys", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", -] - -[[package]] -name = "web-sys" -version = "0.3.70" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "web-time" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "webpki-roots" -version = "0.26.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" -dependencies = [ - "rustls-pki-types", -] - -[[package]] -name = "which" -version = "4.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" -dependencies = [ - "either", - "home", - "once_cell", - "rustix", -] - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" -dependencies = [ - "windows-sys 0.59.0", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "windows" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" -dependencies = [ - "windows-core", - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-core" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" -dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-registry" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" -dependencies = [ - "windows-result", - "windows-strings", - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-result" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" -dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-strings" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" -dependencies = [ - "windows-result", - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-sys" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" -dependencies = [ - "windows-targets 0.48.5", -] - -[[package]] -name = "windows-sys" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" -dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-sys" -version = "0.59.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" -dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-targets" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" -dependencies = [ - "windows_aarch64_gnullvm 0.48.5", - "windows_aarch64_msvc 0.48.5", - "windows_i686_gnu 0.48.5", - "windows_i686_msvc 0.48.5", - "windows_x86_64_gnu 0.48.5", - "windows_x86_64_gnullvm 0.48.5", - "windows_x86_64_msvc 0.48.5", -] - -[[package]] -name = "windows-targets" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" -dependencies = [ - "windows_aarch64_gnullvm 0.52.6", - "windows_aarch64_msvc 0.52.6", - "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm", - "windows_i686_msvc 0.52.6", - "windows_x86_64_gnu 0.52.6", - "windows_x86_64_gnullvm 0.52.6", - "windows_x86_64_msvc 0.52.6", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" - -[[package]] -name = "windows_i686_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" - -[[package]] -name = "windows_i686_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" - -[[package]] -name = "windows_i686_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" - -[[package]] -name = "windows_i686_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" - -[[package]] -name = "windows_i686_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" - -[[package]] -name = "winnow" -version = "0.5.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" -dependencies = [ - "memchr", -] - -[[package]] -name = "winnow" -version = "0.6.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" -dependencies = [ - "memchr", -] - -[[package]] -name = "winreg" -version = "0.50.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" -dependencies = [ - "cfg-if", - "windows-sys 0.48.0", -] - -[[package]] -name = "ws_stream_wasm" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7999f5f4217fe3818726b66257a4475f71e74ffd190776ad053fa159e50737f5" -dependencies = [ - "async_io_stream", - "futures", - "js-sys", - "log", - "pharos", - "rustc_version 0.4.1", - "send_wrapper 0.6.0", - "thiserror", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", -] - -[[package]] -name = "wyz" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" -dependencies = [ - "tap", -] - -[[package]] -name = "zerocopy" -version = "0.7.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" -dependencies = [ - "byteorder", - "zerocopy-derive", -] - -[[package]] -name = "zerocopy-derive" -version = "0.7.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "zeroize" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" -dependencies = [ - "zeroize_derive", -] - -[[package]] -name = "zeroize_derive" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "zkhash" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4352d1081da6922701401cdd4cbf29a2723feb4cfabb5771f6fee8e9276da1c7" -dependencies = [ - "ark-ff 0.4.2", - "ark-std 0.4.0", - "bitvec", - "blake2", - "bls12_381 0.7.1", - "byteorder", - "cfg-if", - "group 0.12.1", - "group 0.13.0", - "halo2", - "hex", - "jubjub", - "lazy_static", - "pasta_curves 0.5.1", - "rand 0.8.5", - "serde", - "sha2 0.10.8", - "sha3", - "subtle", -] diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 506fcbbc1b..0597d8cb18 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -61,9 +61,13 @@ sp1-zkvm = { path = "../crates/zkvm/entrypoint", default-features = false } [patch.crates-io] curve25519-dalek = { git = "https://github.com/sp1-patches/curve25519-dalek", branch = "patch-curve25519-v4.1.3" } curve25519-dalek-ng = { git = "https://github.com/sp1-patches/curve25519-dalek-ng", branch = "patch-v4.1.1" } -ecdsa-core = { git = "https://github.com/sp1-patches/signatures", package = "ecdsa", branch = "patch-ecdsa-v0.16.9" } +# ecdsa-core = { git = "https://github.com/sp1-patches/signatures", package = "ecdsa", branch = "patch-ecdsa-v0.16.8" } +# Note: This branch of ecdsa-core points to SP1 branch ratan/impl-add-assign-fixes +ecdsa-core = { git = "https://github.com/sp1-patches/signatures", package = "ecdsa", branch = "ratan/secp256k1-add-fixes-v0.16.8" } ed25519-consensus = { git = "https://github.com/sp1-patches/ed25519-consensus", branch = "patch-v2.1.0" } secp256k1 = { git = "https://github.com/sp1-patches/rust-secp256k1", branch = "patch-secp256k1-v0.29.0" } sha2-v0-10-8 = { git = "https://github.com/sp1-patches/RustCrypto-hashes", package = "sha2", branch = "patch-v0.10.8" } +sha2-v0-10-6 = { git = "https://github.com/sp1-patches/RustCrypto-hashes", package = "sha2", branch = "patch-sha2-v0.10.6" } sha2-v0-9-9 = { git = "https://github.com/sp1-patches/RustCrypto-hashes", package = "sha2", branch = "patch-sha2-v0.9.9" } -tiny-keccak = { git = "https://github.com/sp1-patches/tiny-keccak", branch = "patch-v2.0.2" } +sha2-v0-9-8 = { git = "https://github.com/sp1-patches/RustCrypto-hashes", package = "sha2", branch = "patch-sha2-v0.9.8" } +tiny-keccak = { git = "https://github.com/sp1-patches/tiny-keccak", branch = "patch-v2.0.2" } diff --git a/examples/aggregation/program/Cargo.toml b/examples/aggregation/program/Cargo.toml index 7f6df8d05c..d506914b92 100644 --- a/examples/aggregation/program/Cargo.toml +++ b/examples/aggregation/program/Cargo.toml @@ -5,6 +5,5 @@ edition = "2021" publish = false [dependencies] -hex = "0.4.3" sha2 = "0.10.8" sp1-zkvm = { path = "../../../crates/zkvm/entrypoint", features = ["verify"] } diff --git a/examples/aggregation/program/elf/riscv32im-succinct-zkvm-elf b/examples/aggregation/program/elf/riscv32im-succinct-zkvm-elf index c572a1a353..6ca187d36d 100755 Binary files a/examples/aggregation/program/elf/riscv32im-succinct-zkvm-elf and b/examples/aggregation/program/elf/riscv32im-succinct-zkvm-elf differ diff --git a/examples/aggregation/script/Cargo.toml b/examples/aggregation/script/Cargo.toml index 7b3e3e3aee..069c8afd18 100644 --- a/examples/aggregation/script/Cargo.toml +++ b/examples/aggregation/script/Cargo.toml @@ -5,7 +5,6 @@ edition = { workspace = true } publish = false [dependencies] -hex = "0.4.3" sp1-sdk = { workspace = true } tracing = "0.1.40" diff --git a/examples/aggregation/script/src/main.rs b/examples/aggregation/script/src/main.rs index 5eb4bdc4fc..9015cafcc2 100644 --- a/examples/aggregation/script/src/main.rs +++ b/examples/aggregation/script/src/main.rs @@ -72,7 +72,7 @@ fn main() { // witnessed by the prover during the recursive aggregation process inside SP1 itself. for input in inputs { let SP1Proof::Compressed(proof) = input.proof.proof else { panic!() }; - stdin.write_proof(proof, input.vk.vk); + stdin.write_proof(*proof, input.vk.vk); } // Generate the plonk bn254 proof. diff --git a/examples/bls12381/program/Cargo.toml b/examples/bls12381/program/Cargo.toml index 20688d229d..0044235397 100644 --- a/examples/bls12381/program/Cargo.toml +++ b/examples/bls12381/program/Cargo.toml @@ -6,8 +6,6 @@ publish = false [dependencies] sp1-zkvm = { path = "../../../crates/zkvm/entrypoint" } -num = { version = "0.4.1", default-features = false } -serde_yaml = "0.9.34" bls12_381 = { git = "https://github.com/sp1-patches/bls12_381", branch = "patch-v0.8.0" } ff = "0.13.0" rand = "0.8.5" diff --git a/examples/bls12381/program/elf/riscv32im-succinct-zkvm-elf b/examples/bls12381/program/elf/riscv32im-succinct-zkvm-elf index 9b86166a26..6c65387fea 100755 Binary files a/examples/bls12381/program/elf/riscv32im-succinct-zkvm-elf and b/examples/bls12381/program/elf/riscv32im-succinct-zkvm-elf differ diff --git a/examples/bn254/program/Cargo.toml b/examples/bn254/program/Cargo.toml index 0811dd47e1..9e0976cc00 100644 --- a/examples/bn254/program/Cargo.toml +++ b/examples/bn254/program/Cargo.toml @@ -6,8 +6,6 @@ publish = false [dependencies] sp1-zkvm = { path = "../../../crates/zkvm/entrypoint" } -num = { version = "0.4.1", default-features = false } -serde_yaml = "0.9.34" bn = { git = "https://github.com/sp1-patches/bn", package = "substrate-bn", rev = "43d854d45b5727b1ff2b9f346d728e785bb8395c"} # bn = { git = "https://github.com/sp1-patches/bn", package = "substrate-bn", branch = "patch-v0.6.0"} rand = "0.8.5" diff --git a/examples/bn254/program/elf/riscv32im-succinct-zkvm-elf b/examples/bn254/program/elf/riscv32im-succinct-zkvm-elf index 4a25479725..0c62975fc4 100755 Binary files a/examples/bn254/program/elf/riscv32im-succinct-zkvm-elf and b/examples/bn254/program/elf/riscv32im-succinct-zkvm-elf differ diff --git a/examples/chess/program/elf/riscv32im-succinct-zkvm-elf b/examples/chess/program/elf/riscv32im-succinct-zkvm-elf index 01a06d479f..6dd5d47402 100755 Binary files a/examples/chess/program/elf/riscv32im-succinct-zkvm-elf and b/examples/chess/program/elf/riscv32im-succinct-zkvm-elf differ diff --git a/examples/cycle-tracking/program/elf/normal b/examples/cycle-tracking/program/elf/normal index 4d5fe97df1..1f6e0e65b9 100755 Binary files a/examples/cycle-tracking/program/elf/normal and b/examples/cycle-tracking/program/elf/normal differ diff --git a/examples/cycle-tracking/program/elf/report b/examples/cycle-tracking/program/elf/report index a99d47fcb0..23173c4284 100755 Binary files a/examples/cycle-tracking/program/elf/report and b/examples/cycle-tracking/program/elf/report differ diff --git a/examples/fibonacci/program/elf/riscv32im-succinct-zkvm-elf b/examples/fibonacci/program/elf/riscv32im-succinct-zkvm-elf index 017feaa1a2..61e8973390 100755 Binary files a/examples/fibonacci/program/elf/riscv32im-succinct-zkvm-elf and b/examples/fibonacci/program/elf/riscv32im-succinct-zkvm-elf differ diff --git a/examples/fibonacci/script/Cargo.toml b/examples/fibonacci/script/Cargo.toml index 346575c922..021aaf5fcc 100644 --- a/examples/fibonacci/script/Cargo.toml +++ b/examples/fibonacci/script/Cargo.toml @@ -7,9 +7,7 @@ publish = false [dependencies] hex = "0.4.3" -itertools = "0.12.1" sp1-sdk = { workspace = true } -sha2 = "0.10.8" [build-dependencies] sp1-build = { workspace = true } diff --git a/examples/io/program/elf/riscv32im-succinct-zkvm-elf b/examples/io/program/elf/riscv32im-succinct-zkvm-elf index 3429f4bee4..30a53c5a44 100755 Binary files a/examples/io/program/elf/riscv32im-succinct-zkvm-elf and b/examples/io/program/elf/riscv32im-succinct-zkvm-elf differ diff --git a/examples/is-prime/program/elf/riscv32im-succinct-zkvm-elf b/examples/is-prime/program/elf/riscv32im-succinct-zkvm-elf index 69a81599d1..8185dc3dff 100755 Binary files a/examples/is-prime/program/elf/riscv32im-succinct-zkvm-elf and b/examples/is-prime/program/elf/riscv32im-succinct-zkvm-elf differ diff --git a/examples/json/program/Cargo.toml b/examples/json/program/Cargo.toml index 0a4582d54c..a357b8a03c 100644 --- a/examples/json/program/Cargo.toml +++ b/examples/json/program/Cargo.toml @@ -7,5 +7,4 @@ publish = false [dependencies] sp1-zkvm = { path = "../../../crates/zkvm/entrypoint" } serde_json = "1.0.113" -serde = "1.0.197" lib = { path = "../lib", package = "json-lib" } diff --git a/examples/json/program/elf/riscv32im-succinct-zkvm-elf b/examples/json/program/elf/riscv32im-succinct-zkvm-elf index 1a74fe1a1c..799030dbca 100755 Binary files a/examples/json/program/elf/riscv32im-succinct-zkvm-elf and b/examples/json/program/elf/riscv32im-succinct-zkvm-elf differ diff --git a/examples/json/script/Cargo.toml b/examples/json/script/Cargo.toml index d0bedda9e5..8b6eb32a33 100644 --- a/examples/json/script/Cargo.toml +++ b/examples/json/script/Cargo.toml @@ -5,7 +5,6 @@ edition = { workspace = true } publish = false [dependencies] -serde = "1.0.197" serde_json = "1.0.114" sp1-sdk = { workspace = true } lib = { path = "../lib", package = "json-lib" } diff --git a/examples/patch-testing/program/Cargo.toml b/examples/patch-testing/program/Cargo.toml index 56921d4bb5..93b9967e0e 100644 --- a/examples/patch-testing/program/Cargo.toml +++ b/examples/patch-testing/program/Cargo.toml @@ -17,6 +17,7 @@ ed25519-dalek = "2.1.0" tiny-keccak = { version = "2.0.2", features = ["keccak"] } curve25519-dalek = { version = "4.1.3", default-features = false, features = ["alloc"] } curve25519-dalek-ng = { version = "4.1", default-features = false, features = ["u32_backend", "alloc"] } -k256 = { version = "0.13", default-features = false, features = ["ecdsa"] } -alloy-primitives = { version = "0.7", features = ["k256"] } -secp256k1 = { version = "0.29", features = ["recovery", "global-context"]} +alloy-primitives = { version = "0.8", features = ["k256"] } +secp256k1 = { version = "0.29", features = ["recovery", "global-context"] } + +revm-precompile = { version = "11.0.1", default-features = false, features = ["kzg-rs"] } diff --git a/examples/patch-testing/program/elf/riscv32im-succinct-zkvm-elf b/examples/patch-testing/program/elf/riscv32im-succinct-zkvm-elf index 78774e8713..a96b1c119c 100755 Binary files a/examples/patch-testing/program/elf/riscv32im-succinct-zkvm-elf and b/examples/patch-testing/program/elf/riscv32im-succinct-zkvm-elf differ diff --git a/examples/patch-testing/program/src/main.rs b/examples/patch-testing/program/src/main.rs index efd60f15db..8a062717d7 100644 --- a/examples/patch-testing/program/src/main.rs +++ b/examples/patch-testing/program/src/main.rs @@ -1,7 +1,9 @@ #![no_main] sp1_zkvm::entrypoint!(main); -use alloy_primitives::{address, hex, Signature}; +use alloy_primitives::Bytes; +use alloy_primitives::{address, bytes, hex}; +use alloy_primitives::{B256, B512}; use curve25519_dalek::edwards::CompressedEdwardsY as CompressedEdwardsY_dalek; use curve25519_dalek_ng::edwards::CompressedEdwardsY as CompressedEdwardsY_dalek_ng; use ed25519_consensus::{ @@ -14,7 +16,6 @@ use ed25519_dalek::{ use sha2_v0_10_6::{Digest as Digest_10_6, Sha256 as Sha256_10_6}; // use sha2_v0_10_8::{Digest as Digest_10_8, Sha256 as Sha256_10_8}; use sha2_v0_9_8::{Digest as Digest_9_8, Sha256 as Sha256_9_8}; -use std::str::FromStr; use tiny_keccak::{Hasher, Keccak}; use secp256k1::{ @@ -124,17 +125,35 @@ fn test_sha256() { /// Emits SECP256K1_ADD, SECP256K1_DOUBLE, and SECP256K1_DECOMPRESS syscalls. /// Source: https://github.com/alloy-rs/core/blob/adcf7adfa1f35c56e6331bab85b8c56d32a465f1/crates/primitives/src/signature/sig.rs#L620-L631 fn test_k256_patch() { - let sig = Signature::from_str( - "b91467e570a6466aa9e9876cbcd013baba02900b8979d43fe208a4a4f339f5fd6007e74cd82e037b800186422fc2da167c747ef045e5d18a5f5d4300f8e1a0291c" - ).expect("could not parse signature"); - let expected = address!("2c7536E3605D9C16a7a3D7b1898e529396a65c23"); + // A valid signature. + let precompile_input = bytes!("a79c77e94d0cd778e606e61130d9065e718eced9408e63df3a71919d5830d82d000000000000000000000000000000000000000000000000000000000000001cd685e79fb0b7ff849cbc6283dd1174b4a06f2aa556f019169a99396fc052b42e2c0ff35d08662f2685929c20ce8eaab568a404d61cf2aa837f1f431e2aef6211"); + + let msg = <&B256>::try_from(&precompile_input[0..32]).unwrap(); + let recid = precompile_input[63] - 27; + let sig = <&B512>::try_from(&precompile_input[64..128]).unwrap(); println!("cycle-tracker-start: k256 verify"); - let recovered_address = - sig.recover_address_from_msg("Some data").expect("could not recover address"); + let _: Bytes = revm_precompile::secp256k1::ecrecover(sig, recid, msg) + .map(|o| o.to_vec().into()) + .unwrap_or_default(); println!("cycle-tracker-end: k256 verify"); - assert_eq!(recovered_address, expected); + // Signature by the 0x1 private key. Confirms that multi_scalar_multiplication works as intended. + let precompile_input = bytes!("15499a876f0d57fdc360c760aec98245eba1902610140c14d5f0c3c0284e28a7000000000000000000000000000000000000000000000000000000000000001c2106219ec2e5ef9f7d5ffb303fac05c4066e66db6d501d2e5b1626f2cc8fbe1c316d4e90b09819db9c261017f18e1b5b105855922ec962fd58e83c943e4c4ba3"); + + let msg = <&B256>::try_from(&precompile_input[0..32]).unwrap(); + let recid = precompile_input[63] - 27; + let sig = <&B512>::try_from(&precompile_input[64..128]).unwrap(); + + println!("cycle-tracker-start: k256 verify"); + let recovered_address: Bytes = revm_precompile::secp256k1::ecrecover(sig, recid, msg) + .map(|o| o.to_vec().into()) + .unwrap_or_default(); + println!("cycle-tracker-end: k256 verify"); + + println!("recovered_address: {:?}", recovered_address); + + let _ = address!("ea532f4122fb1152b506b545c67e110d276e3448"); } /// Emits SECP256K1_ADD, SECP256K1_DOUBLE, and SECP256K1_DECOMPRESS syscalls. diff --git a/examples/patch-testing/script/Cargo.toml b/examples/patch-testing/script/Cargo.toml index 7fe41b0941..2b7474695e 100644 --- a/examples/patch-testing/script/Cargo.toml +++ b/examples/patch-testing/script/Cargo.toml @@ -5,7 +5,6 @@ edition = { workspace = true } publish = false [dependencies] -sp1-core-machine = { workspace = true, features = ["neon"] } sp1-core-executor = { workspace = true } sp1-sdk = { workspace = true } diff --git a/examples/regex/program/elf/riscv32im-succinct-zkvm-elf b/examples/regex/program/elf/riscv32im-succinct-zkvm-elf index 5793baa3e3..a6dae1343f 100755 Binary files a/examples/regex/program/elf/riscv32im-succinct-zkvm-elf and b/examples/regex/program/elf/riscv32im-succinct-zkvm-elf differ diff --git a/examples/rsa/program/Cargo.toml b/examples/rsa/program/Cargo.toml index ca7006cf0f..b7fd54c620 100644 --- a/examples/rsa/program/Cargo.toml +++ b/examples/rsa/program/Cargo.toml @@ -6,7 +6,5 @@ publish = false [dependencies] sp1-zkvm = { path = "../../../crates/zkvm/entrypoint" } -digest = "0.10.7" -rand = "0.8.5" rsa = "0.9.6" # Check for the latest version sha2 = {version = "0.10.8",package = "sha2", features = ["oid"]} # Check for the latest version diff --git a/examples/rsa/program/elf/riscv32im-succinct-zkvm-elf b/examples/rsa/program/elf/riscv32im-succinct-zkvm-elf index 3fde9f4d1d..514e936859 100755 Binary files a/examples/rsa/program/elf/riscv32im-succinct-zkvm-elf and b/examples/rsa/program/elf/riscv32im-succinct-zkvm-elf differ diff --git a/examples/rsp/program/elf/riscv32im-succinct-zkvm-elf b/examples/rsp/program/elf/riscv32im-succinct-zkvm-elf new file mode 100755 index 0000000000..6c43fe0065 Binary files /dev/null and b/examples/rsp/program/elf/riscv32im-succinct-zkvm-elf differ diff --git a/examples/rsp/script/Cargo.toml b/examples/rsp/script/Cargo.toml index 435a8d681a..a1c98cff2f 100644 --- a/examples/rsp/script/Cargo.toml +++ b/examples/rsp/script/Cargo.toml @@ -5,13 +5,10 @@ edition = "2021" [dependencies] alloy-primitives = "0.8.5" -serde_json = "1.0.94" -serde = { version = "1.0", default-features = false, features = ["derive"] } bincode = "1.3.3" clap = { version = "4.5.7", features = ["derive", "env"] } # rsp -rsp-host-executor = { git = "https://github.com/succinctlabs/rsp/", rev = "3647076" } rsp-client-executor = { git = "https://github.com/succinctlabs/rsp/", rev = "3647076" } # sp1 diff --git a/examples/ssz-withdrawals/program/Cargo.toml b/examples/ssz-withdrawals/program/Cargo.toml index 54cdca55e1..bb6195d015 100644 --- a/examples/ssz-withdrawals/program/Cargo.toml +++ b/examples/ssz-withdrawals/program/Cargo.toml @@ -8,7 +8,6 @@ publish = false sp1-zkvm = { path = "../../../crates/zkvm/entrypoint" } hex-literal = "0.4.1" ssz_rs = { version = "0.9.0", features = ["serde"] } -serde_json = "1.0.111" hex = "0.4.3" serde_with = { version = "3.4.0", features = ["hex"] } serde = { version = "1.0.195", features = ["derive"] } diff --git a/examples/ssz-withdrawals/program/elf/riscv32im-succinct-zkvm-elf b/examples/ssz-withdrawals/program/elf/riscv32im-succinct-zkvm-elf index a39c9cfd70..10b1987383 100755 Binary files a/examples/ssz-withdrawals/program/elf/riscv32im-succinct-zkvm-elf and b/examples/ssz-withdrawals/program/elf/riscv32im-succinct-zkvm-elf differ diff --git a/examples/tendermint/program/Cargo.toml b/examples/tendermint/program/Cargo.toml index 4053fe55c9..f764d2813d 100644 --- a/examples/tendermint/program/Cargo.toml +++ b/examples/tendermint/program/Cargo.toml @@ -6,8 +6,6 @@ publish = false [dependencies] sp1-zkvm = { path = "../../../crates/zkvm/entrypoint" } -serde_json = { version = "1.0", default-features = false, features = ["alloc"] } -serde = { version = "1.0", default-features = false, features = ["derive"] } tendermint-light-client-verifier = { version = "0.35.0", default-features = false, features = [ "rust-crypto", ] } diff --git a/examples/tendermint/program/elf/riscv32im-succinct-zkvm-elf b/examples/tendermint/program/elf/riscv32im-succinct-zkvm-elf index c20d62e67c..be2d7b0e48 100755 Binary files a/examples/tendermint/program/elf/riscv32im-succinct-zkvm-elf and b/examples/tendermint/program/elf/riscv32im-succinct-zkvm-elf differ diff --git a/examples/tendermint/script/Cargo.toml b/examples/tendermint/script/Cargo.toml index e5c6ff4ff4..af778202d0 100644 --- a/examples/tendermint/script/Cargo.toml +++ b/examples/tendermint/script/Cargo.toml @@ -5,20 +5,13 @@ edition = { workspace = true } publish = false [dependencies] -sp1-core-machine = { workspace = true, features = ["neon"] } sp1-sdk = { workspace = true } -reqwest = { version = "0.11", features = ["json"] } -tokio = { version = "1", features = ["full"] } -serde_json = { version = "1.0", default-features = false, features = ["alloc"] } serde = { version = "1.0", default-features = false, features = ["derive"] } -tendermint = { version = "0.35.0", default-features = false } +serde_json = { version = "1.0", default-features = false, features = ["alloc"] } tendermint-light-client-verifier = { version = "0.35.0", default-features = false, features = [ "rust-crypto", ] } -bincode = "1.3.3" -itertools = "0.12.1" serde_cbor = "0.11.2" -sha2 = "0.10.8" [build-dependencies] sp1-build = { workspace = true } diff --git a/rustfmt.toml b/rustfmt.toml index 66b2930ca9..21efa865c0 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,11 +1,11 @@ reorder_imports = true -imports_granularity = "Crate" +# imports_granularity = "Crate" use_small_heuristics = "Max" -comment_width = 100 -wrap_comments = true -binop_separator = "Back" -trailing_comma = "Vertical" -trailing_semicolon = false +# comment_width = 100 +# wrap_comments = true +# binop_separator = "Back" +# trailing_comma = "Vertical" +# trailing_semicolon = false use_field_init_shorthand = true -format_code_in_doc_comments = true -doc_comment_code_block_width = 100 \ No newline at end of file +# format_code_in_doc_comments = true +# doc_comment_code_block_width = 100 diff --git a/tests/Cargo.lock b/tests/Cargo.lock index 7477dcdfee..065277d8c4 100644 --- a/tests/Cargo.lock +++ b/tests/Cargo.lock @@ -2,12 +2,36 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "ahash" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "allocator-api2" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" + [[package]] name = "anyhow" version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" +[[package]] +name = "arrayref" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" + [[package]] name = "arrayvec" version = "0.7.6" @@ -20,6 +44,12 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + [[package]] name = "base64ct" version = "1.6.0" @@ -52,7 +82,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "generic-array", + "generic-array 0.14.7", ] [[package]] @@ -61,7 +91,7 @@ version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ - "generic-array", + "generic-array 0.14.7", ] [[package]] @@ -75,7 +105,9 @@ dependencies = [ name = "bls12381-add-test" version = "1.1.0" dependencies = [ - "num", + "common-test-utils", + "sp1-curves", + "sp1-lib 2.0.0", "sp1-zkvm", ] @@ -93,7 +125,6 @@ version = "1.1.1" dependencies = [ "num-bigint", "rand", - "sp1-derive", "sp1-zkvm", ] @@ -103,7 +134,6 @@ version = "1.1.1" dependencies = [ "num-bigint", "rand", - "sp1-derive", "sp1-zkvm", ] @@ -113,7 +143,6 @@ version = "1.1.1" dependencies = [ "num-bigint", "rand", - "sp1-derive", "sp1-zkvm", ] @@ -122,6 +151,7 @@ name = "bls12381-mul-test" version = "1.1.0" dependencies = [ "sp1-derive", + "sp1-lib 2.0.0", "sp1-zkvm", ] @@ -129,7 +159,9 @@ dependencies = [ name = "bn254-add-test" version = "1.1.0" dependencies = [ - "num", + "common-test-utils", + "sp1-curves", + "sp1-lib 2.0.0", "sp1-zkvm", ] @@ -137,8 +169,6 @@ dependencies = [ name = "bn254-double-test" version = "1.1.0" dependencies = [ - "hex-literal", - "num", "sp1-zkvm", ] @@ -148,7 +178,6 @@ version = "1.1.1" dependencies = [ "num-bigint", "rand", - "sp1-derive", "sp1-zkvm", ] @@ -158,7 +187,6 @@ version = "1.1.1" dependencies = [ "num-bigint", "rand", - "sp1-derive", "sp1-zkvm", ] @@ -168,7 +196,6 @@ version = "1.1.1" dependencies = [ "num-bigint", "rand", - "sp1-derive", "sp1-zkvm", ] @@ -177,6 +204,7 @@ name = "bn254-mul-test" version = "1.1.0" dependencies = [ "sp1-derive", + "sp1-lib 2.0.0", "sp1-zkvm", ] @@ -213,12 +241,26 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "common-test-utils" +version = "1.1.0" +dependencies = [ + "num-bigint", + "sp1-lib 2.0.0", +] + [[package]] name = "const-oid" version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + [[package]] name = "cpufeatures" version = "0.2.14" @@ -228,12 +270,49 @@ dependencies = [ "libc", ] +[[package]] +name = "crossbeam-deque" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" + [[package]] name = "crunchy" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" +[[package]] +name = "crypto-bigint" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" +dependencies = [ + "generic-array 0.14.7", + "rand_core", + "subtle", + "zeroize", +] + [[package]] name = "crypto-bigint" version = "0.6.0-rc.5" @@ -251,10 +330,25 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "generic-array", + "generic-array 0.14.7", "typenum", ] +[[package]] +name = "curve25519-dalek" +version = "4.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" +dependencies = [ + "cfg-if", + "cpufeatures", + "curve25519-dalek-derive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "fiat-crypto", + "rustc_version", + "subtle", + "zeroize", +] + [[package]] name = "curve25519-dalek" version = "4.1.3" @@ -263,7 +357,7 @@ dependencies = [ "anyhow", "cfg-if", "cpufeatures", - "curve25519-dalek-derive", + "curve25519-dalek-derive 0.1.1 (git+https://github.com/sp1-patches/curve25519-dalek?branch=patch-curve25519-v4.1.3)", "digest 0.10.7", "fiat-crypto", "rustc_version", @@ -272,6 +366,17 @@ dependencies = [ "zeroize", ] +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.79", +] + [[package]] name = "curve25519-dalek-derive" version = "0.1.1" @@ -305,6 +410,84 @@ dependencies = [ "sp1-zkvm", ] +[[package]] +name = "dashu" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85b3e5ac1e23ff1995ef05b912e2b012a8784506987a2651552db2c73fb3d7e0" +dependencies = [ + "dashu-base", + "dashu-float", + "dashu-int", + "dashu-macros", + "dashu-ratio", + "rustversion", +] + +[[package]] +name = "dashu-base" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0b80bf6b85aa68c58ffea2ddb040109943049ce3fbdf4385d0380aef08ef289" + +[[package]] +name = "dashu-float" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85078445a8dbd2e1bd21f04a816f352db8d333643f0c9b78ca7c3d1df71063e7" +dependencies = [ + "dashu-base", + "dashu-int", + "num-modular", + "num-order", + "rustversion", + "static_assertions", +] + +[[package]] +name = "dashu-int" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee99d08031ca34a4d044efbbb21dff9b8c54bb9d8c82a189187c0651ffdb9fbf" +dependencies = [ + "cfg-if", + "dashu-base", + "num-modular", + "num-order", + "rustversion", + "static_assertions", +] + +[[package]] +name = "dashu-macros" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93381c3ef6366766f6e9ed9cf09e4ef9dec69499baf04f0c60e70d653cf0ab10" +dependencies = [ + "dashu-base", + "dashu-float", + "dashu-int", + "dashu-ratio", + "paste", + "proc-macro2", + "quote", + "rustversion", +] + +[[package]] +name = "dashu-ratio" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47e33b04dd7ce1ccf8a02a69d3419e354f2bbfdf4eb911a0b7465487248764c9" +dependencies = [ + "dashu-base", + "dashu-float", + "dashu-int", + "num-modular", + "num-order", + "rustversion", +] + [[package]] name = "der" version = "0.7.9" @@ -341,7 +524,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array", + "generic-array 0.14.7", ] [[package]] @@ -351,7 +534,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer 0.10.4", + "const-oid", "crypto-common", + "subtle", +] + +[[package]] +name = "ecdsa" +version = "0.16.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" +dependencies = [ + "der", + "digest 0.10.7", + "elliptic-curve", + "rfc6979", + "signature", + "spki", ] [[package]] @@ -398,7 +597,7 @@ name = "ed25519-dalek" version = "2.1.1" source = "git+https://github.com/sp1-patches/curve25519-dalek?branch=patch-curve25519-v4.1.3#1d73fd95f1a76bee8f46643cf78bbccc1fb06ede" dependencies = [ - "curve25519-dalek", + "curve25519-dalek 4.1.3 (git+https://github.com/sp1-patches/curve25519-dalek?branch=patch-curve25519-v4.1.3)", "ed25519", "serde", "sha2 0.10.8", @@ -421,12 +620,41 @@ version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" +[[package]] +name = "elliptic-curve" +version = "0.13.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" +dependencies = [ + "base16ct", + "crypto-bigint 0.5.5", + "digest 0.10.7", + "ff", + "generic-array 0.14.7", + "group", + "pkcs8", + "rand_core", + "sec1", + "subtle", + "zeroize", +] + [[package]] name = "equivalent" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" +[[package]] +name = "ff" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" +dependencies = [ + "rand_core", + "subtle", +] + [[package]] name = "fiat-crypto" version = "0.2.9" @@ -524,6 +752,17 @@ checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", + "zeroize", +] + +[[package]] +name = "generic-array" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96512db27971c2c3eece70a1e106fbe6c87760234e31e8f7e5634912fe52794a" +dependencies = [ + "serde", + "typenum", ] [[package]] @@ -537,12 +776,40 @@ dependencies = [ "wasi", ] +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff", + "rand_core", + "subtle", +] + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash", + "allocator-api2", + "serde", +] + [[package]] name = "hashbrown" version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + [[package]] name = "hex" version = "0.4.3" @@ -562,6 +829,15 @@ dependencies = [ "sp1-zkvm", ] +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest 0.10.7", +] + [[package]] name = "impl-trait-for-tuples" version = "0.2.2" @@ -580,7 +856,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown", + "hashbrown 0.15.0", ] [[package]] @@ -607,6 +883,20 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +[[package]] +name = "k256" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" +dependencies = [ + "cfg-if", + "ecdsa", + "elliptic-curve", + "once_cell", + "sha2 0.10.8", + "signature", +] + [[package]] name = "keccak-permute-test" version = "1.1.0" @@ -646,6 +936,15 @@ version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +[[package]] +name = "ntapi" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4" +dependencies = [ + "winapi", +] + [[package]] name = "num" version = "0.4.3" @@ -716,6 +1015,21 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-modular" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17bb261bf36fa7d83f4c294f834e91256769097b3cb505d44831e0a179ac647f" + +[[package]] +name = "num-order" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "537b596b97c40fcf8056d153049eb22f481c17ebce72a513ec9286e4986d1bb6" +dependencies = [ + "num-modular", +] + [[package]] name = "num-rational" version = "0.4.2" @@ -751,11 +1065,19 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" +[[package]] +name = "p3-air" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v3#0f8d103e67dbec3e84bac1de6d00bf0f2fb80de0" +dependencies = [ + "p3-field", + "p3-matrix", +] + [[package]] name = "p3-baby-bear" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46965470aac1cddfe52f535424b59d52f2fffef0fdeb9dbed19da39b1d8f048a" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v3#0f8d103e67dbec3e84bac1de6d00bf0f2fb80de0" dependencies = [ "num-bigint", "p3-field", @@ -766,11 +1088,36 @@ dependencies = [ "serde", ] +[[package]] +name = "p3-challenger" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v3#0f8d103e67dbec3e84bac1de6d00bf0f2fb80de0" +dependencies = [ + "p3-field", + "p3-maybe-rayon", + "p3-symmetric", + "p3-util", + "serde", + "tracing", +] + +[[package]] +name = "p3-commit" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v3#0f8d103e67dbec3e84bac1de6d00bf0f2fb80de0" +dependencies = [ + "itertools 0.12.1", + "p3-challenger", + "p3-field", + "p3-matrix", + "p3-util", + "serde", +] + [[package]] name = "p3-dft" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "510095701819d83c9509fe825bbf1ebfe50426ae75149df5fe1dcfd18261323a" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v3#0f8d103e67dbec3e84bac1de6d00bf0f2fb80de0" dependencies = [ "p3-field", "p3-matrix", @@ -781,9 +1128,8 @@ dependencies = [ [[package]] name = "p3-field" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61f1977a0a65789f719aa824119c332c4676b000bdbfe94d312fb6244a70d601" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v3#0f8d103e67dbec3e84bac1de6d00bf0f2fb80de0" dependencies = [ "itertools 0.12.1", "num-bigint", @@ -793,11 +1139,38 @@ dependencies = [ "serde", ] +[[package]] +name = "p3-fri" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v3#0f8d103e67dbec3e84bac1de6d00bf0f2fb80de0" +dependencies = [ + "itertools 0.12.1", + "p3-challenger", + "p3-commit", + "p3-dft", + "p3-field", + "p3-interpolation", + "p3-matrix", + "p3-maybe-rayon", + "p3-util", + "serde", + "tracing", +] + +[[package]] +name = "p3-interpolation" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v3#0f8d103e67dbec3e84bac1de6d00bf0f2fb80de0" +dependencies = [ + "p3-field", + "p3-matrix", + "p3-util", +] + [[package]] name = "p3-matrix" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d548ee0b834f8e2ebc5037073acd101a3b0ca41a2d1d28a15ba0ccd9059495b0" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v3#0f8d103e67dbec3e84bac1de6d00bf0f2fb80de0" dependencies = [ "itertools 0.12.1", "p3-field", @@ -810,15 +1183,16 @@ dependencies = [ [[package]] name = "p3-maybe-rayon" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55f5575d3d61bedb3e05681abb0f36b8bb339d65aa395d50756bfa64e9cd3f46" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v3#0f8d103e67dbec3e84bac1de6d00bf0f2fb80de0" +dependencies = [ + "rayon", +] [[package]] name = "p3-mds" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6e57ed310d59245f93e24ee805ea7aa16fc9c505551b76a15f5e50f29d177e" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v3#0f8d103e67dbec3e84bac1de6d00bf0f2fb80de0" dependencies = [ "itertools 0.12.1", "p3-dft", @@ -829,35 +1203,67 @@ dependencies = [ "rand", ] +[[package]] +name = "p3-merkle-tree" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v3#0f8d103e67dbec3e84bac1de6d00bf0f2fb80de0" +dependencies = [ + "itertools 0.12.1", + "p3-commit", + "p3-field", + "p3-matrix", + "p3-maybe-rayon", + "p3-symmetric", + "p3-util", + "serde", + "tracing", +] + [[package]] name = "p3-poseidon2" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adaba6f14c197203530e233badce0ca1126ba3bf3c9ff766505b497bdad0bee1" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v3#0f8d103e67dbec3e84bac1de6d00bf0f2fb80de0" dependencies = [ "gcd", "p3-field", "p3-mds", "p3-symmetric", "rand", + "serde", ] [[package]] name = "p3-symmetric" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57ecc4282566eb14f48be7707f6745c4dff6be664984d59ec0fb1849cd82b5c2" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v3#0f8d103e67dbec3e84bac1de6d00bf0f2fb80de0" dependencies = [ "itertools 0.12.1", "p3-field", "serde", ] +[[package]] +name = "p3-uni-stark" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v3#0f8d103e67dbec3e84bac1de6d00bf0f2fb80de0" +dependencies = [ + "itertools 0.12.1", + "p3-air", + "p3-challenger", + "p3-commit", + "p3-dft", + "p3-field", + "p3-matrix", + "p3-maybe-rayon", + "p3-util", + "serde", + "tracing", +] + [[package]] name = "p3-util" -version = "0.1.3-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79f3fef0e00d9d7246385e758c4cd39b4efcbbcea31752471491ab502631385e" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v3#0f8d103e67dbec3e84bac1de6d00bf0f2fb80de0" dependencies = [ "serde", ] @@ -866,7 +1272,6 @@ dependencies = [ name = "panic-test" version = "1.1.0" dependencies = [ - "sp1-derive", "sp1-zkvm", ] @@ -1041,6 +1446,45 @@ dependencies = [ "getrandom", ] +[[package]] +name = "rayon" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "rayon-scan" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f87cc11a0140b4b0da0ffc889885760c61b13672d80a908920b2c0df078fa14" +dependencies = [ + "rayon", +] + +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac", + "subtle", +] + [[package]] name = "rustc_version" version = "0.4.1" @@ -1050,6 +1494,12 @@ dependencies = [ "semver", ] +[[package]] +name = "rustversion" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" + [[package]] name = "ryu" version = "1.0.18" @@ -1080,12 +1530,27 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct", + "der", + "generic-array 0.14.7", + "pkcs8", + "subtle", + "zeroize", +] + [[package]] name = "secp256k1-add-test" version = "1.1.0" dependencies = [ - "hex-literal", - "num", + "common-test-utils", + "sp1-curves", + "sp1-lib 2.0.0", "sp1-zkvm", ] @@ -1100,8 +1565,6 @@ dependencies = [ name = "secp256k1-double-test" version = "1.1.0" dependencies = [ - "hex-literal", - "num", "sp1-zkvm", ] @@ -1234,6 +1697,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ + "digest 0.10.7", "rand_core", ] @@ -1247,11 +1711,30 @@ dependencies = [ "scale-info", ] +[[package]] +name = "sp1-curves" +version = "2.0.0" +dependencies = [ + "cfg-if", + "curve25519-dalek 4.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "dashu", + "elliptic-curve", + "generic-array 1.1.0", + "itertools 0.13.0", + "k256", + "num", + "p3-field", + "serde", + "snowbridge-amcl", + "sp1-primitives", + "sp1-stark", + "typenum", +] + [[package]] name = "sp1-derive" version = "2.0.0" dependencies = [ - "proc-macro2", "quote", "syn 1.0.109", ] @@ -1274,12 +1757,8 @@ dependencies = [ name = "sp1-lib" version = "2.0.0" dependencies = [ - "anyhow", "bincode", - "cfg-if", - "hex", "serde", - "snowbridge-amcl", ] [[package]] @@ -1288,7 +1767,6 @@ version = "2.0.0" dependencies = [ "bincode", "hex", - "itertools 0.13.0", "lazy_static", "num-bigint", "p3-baby-bear", @@ -1299,20 +1777,51 @@ dependencies = [ "sha2 0.10.8", ] +[[package]] +name = "sp1-stark" +version = "2.0.0" +dependencies = [ + "arrayref", + "getrandom", + "hashbrown 0.14.5", + "itertools 0.13.0", + "num-traits", + "p3-air", + "p3-baby-bear", + "p3-challenger", + "p3-commit", + "p3-dft", + "p3-field", + "p3-fri", + "p3-matrix", + "p3-maybe-rayon", + "p3-merkle-tree", + "p3-poseidon2", + "p3-symmetric", + "p3-uni-stark", + "p3-util", + "rayon-scan", + "serde", + "sp1-derive", + "sp1-primitives", + "strum", + "strum_macros", + "sysinfo", + "thiserror", + "tracing", +] + [[package]] name = "sp1-zkvm" version = "2.0.0" dependencies = [ - "bincode", "cfg-if", "getrandom", "lazy_static", "libm", - "once_cell", "p3-baby-bear", "p3-field", "rand", - "serde", "sha2 0.10.8", "sp1-lib 2.0.0", "sp1-primitives", @@ -1328,6 +1837,31 @@ dependencies = [ "der", ] +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "strum" +version = "0.26.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" + +[[package]] +name = "strum_macros" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.79", +] + [[package]] name = "subtle" version = "2.6.1" @@ -1371,6 +1905,21 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "sysinfo" +version = "0.30.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a5b4ddaee55fb2bea2bf0e5000747e5f5c0de765e5a5ff87f4cd106439f4bb3" +dependencies = [ + "cfg-if", + "core-foundation-sys", + "libc", + "ntapi", + "once_cell", + "rayon", + "windows", +] + [[package]] name = "tendermint" version = "0.34.1" @@ -1442,6 +1991,26 @@ dependencies = [ "time", ] +[[package]] +name = "thiserror" +version = "1.0.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.79", +] + [[package]] name = "time" version = "0.3.36" @@ -1538,7 +2107,7 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" name = "uint256-arith-program" version = "1.1.0" dependencies = [ - "crypto-bigint", + "crypto-bigint 0.6.0-rc.5", "sp1-derive", "sp1-zkvm", ] @@ -1554,7 +2123,6 @@ name = "verify-proof" version = "1.1.0" dependencies = [ "hex", - "hex-literal", "sha2 0.10.8", "sp1-zkvm", ] @@ -1571,6 +2139,111 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" +dependencies = [ + "windows-core", + "windows-targets", +] + +[[package]] +name = "windows-core" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + [[package]] name = "winnow" version = "0.6.20" diff --git a/tests/Cargo.toml b/tests/Cargo.toml index b1f20963c8..277a393152 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -1,5 +1,6 @@ [workspace] members = [ + "common", "bls12381-add", "bls12381-decompress", "bls12381-double", diff --git a/tests/Makefile b/tests/Makefile index 045cd48e83..4b4531e9f1 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -2,7 +2,7 @@ SHELL := /bin/bash all: @for dir in */ ; do \ - if [ "$${dir}" == "target/" ]; then \ + if [ "$${dir}" == "target/" ] || [ "$${dir}" == "common/" ]; then \ continue; \ fi; \ echo "Building in $${dir}..."; \ diff --git a/tests/bls12381-add/Cargo.toml b/tests/bls12381-add/Cargo.toml index 5b2527dfd5..4f7134660a 100644 --- a/tests/bls12381-add/Cargo.toml +++ b/tests/bls12381-add/Cargo.toml @@ -5,5 +5,7 @@ edition = "2021" publish = false [dependencies] +common-test-utils = { path = "../common" } sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } -num = { version = "0.4.1", default-features = false } +sp1-lib = { path = "../../crates/zkvm/lib" } +sp1-curves = { path = "../../crates/curves" } diff --git a/tests/bls12381-add/elf/riscv32im-succinct-zkvm-elf b/tests/bls12381-add/elf/riscv32im-succinct-zkvm-elf index add0efd030..8338541b89 100755 Binary files a/tests/bls12381-add/elf/riscv32im-succinct-zkvm-elf and b/tests/bls12381-add/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/bls12381-add/src/main.rs b/tests/bls12381-add/src/main.rs index 26527dd744..710e6394a2 100644 --- a/tests/bls12381-add/src/main.rs +++ b/tests/bls12381-add/src/main.rs @@ -1,50 +1,45 @@ #![no_main] -use sp1_zkvm::syscalls::syscall_bls12381_add; - +use sp1_curves::params::FieldParameters; +use sp1_lib::bls12381::Bls12381Point; sp1_zkvm::entrypoint!(main); -pub fn main() { - for _ in 0..4 { - // generator. - // 3685416753713387016781088315183077757961620795782546409894578378688607592378376318836054947676345821548104185464507 - // 1339506544944476473020471379941921221584933875938349620426543736416511423956333506472724655353366534992391756441569 - let mut a: [u8; 96] = [ - 187, 198, 34, 219, 10, 240, 58, 251, 239, 26, 122, 249, 63, 232, 85, 108, 88, 172, 27, - 23, 63, 58, 78, 161, 5, 185, 116, 151, 79, 140, 104, 195, 15, 172, 169, 79, 140, 99, - 149, 38, 148, 215, 151, 49, 167, 211, 241, 23, 225, 231, 197, 70, 41, 35, 170, 12, 228, - 138, 136, 162, 68, 199, 60, 208, 237, 179, 4, 44, 203, 24, 219, 0, 246, 10, 208, 213, - 149, 224, 245, 252, 228, 138, 29, 116, 237, 48, 158, 160, 241, 160, 170, 227, 129, 244, - 179, 8, - ]; - - // 2 * generator. - // 838589206289216005799424730305866328161735431124665289961769162861615689790485775997575391185127590486775437397838 - // 3450209970729243429733164009999191867485184320918914219895632678707687208996709678363578245114137957452475385814312 - let b: [u8; 96] = [ - 78, 15, 191, 41, 85, 140, 154, 195, 66, 124, 28, 143, 187, 117, 143, 226, 42, 166, 88, - 195, 10, 45, 144, 67, 37, 1, 40, 145, 48, 219, 33, 151, 12, 69, 169, 80, 235, 200, 8, - 136, 70, 103, 77, 144, 234, 203, 114, 5, 40, 157, 116, 121, 25, 136, 134, 186, 27, 189, - 22, 205, 212, 217, 86, 76, 106, 215, 95, 29, 2, 185, 59, 247, 97, 228, 112, 134, 203, - 62, 186, 34, 56, 142, 157, 119, 115, 166, 253, 34, 163, 115, 198, 171, 140, 157, 106, - 22, - ]; - - syscall_bls12381_add(a.as_mut_ptr() as *mut [u32; 24], b.as_ptr() as *const [u32; 24]); +// generator. +// 3685416753713387016781088315183077757961620795782546409894578378688607592378376318836054947676345821548104185464507 +// 1339506544944476473020471379941921221584933875938349620426543736416511423956333506472724655353366534992391756441569 +const A: [u8; 96] = [ + 187, 198, 34, 219, 10, 240, 58, 251, 239, 26, 122, 249, 63, 232, 85, 108, 88, 172, 27, 23, 63, + 58, 78, 161, 5, 185, 116, 151, 79, 140, 104, 195, 15, 172, 169, 79, 140, 99, 149, 38, 148, 215, + 151, 49, 167, 211, 241, 23, 225, 231, 197, 70, 41, 35, 170, 12, 228, 138, 136, 162, 68, 199, + 60, 208, 237, 179, 4, 44, 203, 24, 219, 0, 246, 10, 208, 213, 149, 224, 245, 252, 228, 138, 29, + 116, 237, 48, 158, 160, 241, 160, 170, 227, 129, 244, 179, 8, +]; + +// 2 * generator. +// 838589206289216005799424730305866328161735431124665289961769162861615689790485775997575391185127590486775437397838 +// 3450209970729243429733164009999191867485184320918914219895632678707687208996709678363578245114137957452475385814312 +const B: [u8; 96] = [ + 78, 15, 191, 41, 85, 140, 154, 195, 66, 124, 28, 143, 187, 117, 143, 226, 42, 166, 88, 195, 10, + 45, 144, 67, 37, 1, 40, 145, 48, 219, 33, 151, 12, 69, 169, 80, 235, 200, 8, 136, 70, 103, 77, + 144, 234, 203, 114, 5, 40, 157, 116, 121, 25, 136, 134, 186, 27, 189, 22, 205, 212, 217, 86, + 76, 106, 215, 95, 29, 2, 185, 59, 247, 97, 228, 112, 134, 203, 62, 186, 34, 56, 142, 157, 119, + 115, 166, 253, 34, 163, 115, 198, 171, 140, 157, 106, 22, +]; + +// 3 * generator. +// 1527649530533633684281386512094328299672026648504329745640827351945739272160755686119065091946435084697047221031460 +// 487897572011753812113448064805964756454529228648704488481988876974355015977479905373670519228592356747638779818193 +const C: [u8; 96] = [ + 36, 82, 78, 2, 201, 192, 210, 150, 155, 23, 162, 44, 11, 122, 116, 129, 249, 63, 91, 51, 81, + 10, 120, 243, 241, 165, 233, 155, 31, 214, 18, 177, 151, 150, 169, 236, 45, 33, 101, 23, 19, + 240, 209, 249, 8, 227, 236, 9, 209, 48, 174, 144, 5, 59, 71, 163, 92, 244, 74, 99, 108, 37, 69, + 231, 230, 59, 212, 15, 49, 39, 156, 157, 127, 9, 195, 171, 221, 12, 154, 166, 12, 248, 197, + 137, 51, 98, 132, 138, 159, 176, 245, 166, 211, 128, 43, 3, +]; - // 3 * generator. - // 1527649530533633684281386512094328299672026648504329745640827351945739272160755686119065091946435084697047221031460 - // 487897572011753812113448064805964756454529228648704488481988876974355015977479905373670519228592356747638779818193 - let c: [u8; 96] = [ - 36, 82, 78, 2, 201, 192, 210, 150, 155, 23, 162, 44, 11, 122, 116, 129, 249, 63, 91, - 51, 81, 10, 120, 243, 241, 165, 233, 155, 31, 214, 18, 177, 151, 150, 169, 236, 45, 33, - 101, 23, 19, 240, 209, 249, 8, 227, 236, 9, 209, 48, 174, 144, 5, 59, 71, 163, 92, 244, - 74, 99, 108, 37, 69, 231, 230, 59, 212, 15, 49, 39, 156, 157, 127, 9, 195, 171, 221, - 12, 154, 166, 12, 248, 197, 137, 51, 98, 132, 138, 159, 176, 245, 166, 211, 128, 43, 3, - ]; - - assert_eq!(a, c); - } - - println!("done"); +pub fn main() { + common_test_utils::weierstrass_add::test_weierstrass_add::< + Bls12381Point, + { sp1_lib::bls12381::N }, + >(&A, &B, &C, sp1_curves::weierstrass::bls12_381::Bls12381BaseField::MODULUS); } diff --git a/tests/bls12381-decompress/elf/riscv32im-succinct-zkvm-elf b/tests/bls12381-decompress/elf/riscv32im-succinct-zkvm-elf index 6891d2cfc2..338b1294f5 100755 Binary files a/tests/bls12381-decompress/elf/riscv32im-succinct-zkvm-elf and b/tests/bls12381-decompress/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/bls12381-double/elf/riscv32im-succinct-zkvm-elf b/tests/bls12381-double/elf/riscv32im-succinct-zkvm-elf index 649ee6fc26..e6069d00d7 100755 Binary files a/tests/bls12381-double/elf/riscv32im-succinct-zkvm-elf and b/tests/bls12381-double/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/bls12381-fp/Cargo.toml b/tests/bls12381-fp/Cargo.toml index 485560bb45..1fe608fc9e 100644 --- a/tests/bls12381-fp/Cargo.toml +++ b/tests/bls12381-fp/Cargo.toml @@ -7,6 +7,5 @@ publish = false [dependencies] sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } -sp1-derive = { path = "../../crates/derive" } num-bigint = "0.4.6" rand = "0.8.5" diff --git a/tests/bls12381-fp/elf/riscv32im-succinct-zkvm-elf b/tests/bls12381-fp/elf/riscv32im-succinct-zkvm-elf index 0f499c13e9..0dc6cb73de 100755 Binary files a/tests/bls12381-fp/elf/riscv32im-succinct-zkvm-elf and b/tests/bls12381-fp/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/bls12381-fp2-addsub/Cargo.toml b/tests/bls12381-fp2-addsub/Cargo.toml index 4dc5a5bd4d..2d4d21f905 100644 --- a/tests/bls12381-fp2-addsub/Cargo.toml +++ b/tests/bls12381-fp2-addsub/Cargo.toml @@ -7,6 +7,5 @@ publish = false [dependencies] sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } -sp1-derive = { path = "../../crates/derive" } num-bigint = "0.4.6" rand = "0.8.5" diff --git a/tests/bls12381-fp2-addsub/elf/riscv32im-succinct-zkvm-elf b/tests/bls12381-fp2-addsub/elf/riscv32im-succinct-zkvm-elf index 7602dd1d51..0528a0304d 100755 Binary files a/tests/bls12381-fp2-addsub/elf/riscv32im-succinct-zkvm-elf and b/tests/bls12381-fp2-addsub/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/bls12381-fp2-mul/Cargo.toml b/tests/bls12381-fp2-mul/Cargo.toml index 957c2dc490..dee3919614 100644 --- a/tests/bls12381-fp2-mul/Cargo.toml +++ b/tests/bls12381-fp2-mul/Cargo.toml @@ -7,6 +7,5 @@ publish = false [dependencies] sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } -sp1-derive = { path = "../../crates/derive" } num-bigint = "0.4.6" rand = "0.8.5" diff --git a/tests/bls12381-fp2-mul/elf/riscv32im-succinct-zkvm-elf b/tests/bls12381-fp2-mul/elf/riscv32im-succinct-zkvm-elf index 95247d98ad..aefd01db8f 100755 Binary files a/tests/bls12381-fp2-mul/elf/riscv32im-succinct-zkvm-elf and b/tests/bls12381-fp2-mul/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/bls12381-mul/Cargo.toml b/tests/bls12381-mul/Cargo.toml index 3621ac12e0..6ebaf41bd7 100644 --- a/tests/bls12381-mul/Cargo.toml +++ b/tests/bls12381-mul/Cargo.toml @@ -6,4 +6,5 @@ publish = false [dependencies] sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } +sp1-lib = { path = "../../crates/zkvm/lib" } sp1-derive = { path = "../../crates/derive" } diff --git a/tests/bls12381-mul/elf/riscv32im-succinct-zkvm-elf b/tests/bls12381-mul/elf/riscv32im-succinct-zkvm-elf index f2088e19bd..5102c5ce76 100755 Binary files a/tests/bls12381-mul/elf/riscv32im-succinct-zkvm-elf and b/tests/bls12381-mul/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/bls12381-mul/src/main.rs b/tests/bls12381-mul/src/main.rs index a52b908c2f..003061e195 100644 --- a/tests/bls12381-mul/src/main.rs +++ b/tests/bls12381-mul/src/main.rs @@ -1,8 +1,8 @@ #![no_main] sp1_zkvm::entrypoint!(main); -use sp1_zkvm::lib::bls12381::Bls12381AffinePoint; -use sp1_zkvm::lib::utils::AffinePoint; +use sp1_lib::bls12381::Bls12381Point; +use sp1_lib::utils::AffinePoint; #[sp1_derive::cycle_tracker] pub fn main() { @@ -19,15 +19,15 @@ pub fn main() { 179, 8, ]; - let mut a_point = Bls12381AffinePoint::from_le_bytes(&a); + let mut a_point = Bls12381Point::from_le_bytes(&a); // scalar. // 3 let scalar: [u32; 12] = [3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; - println!("cycle-tracker-start: bls12381_mul"); + println!("cycle-tracker-start: bn254_mul"); a_point.mul_assign(&scalar).unwrap(); - println!("cycle-tracker-end: bls12381_mul"); + println!("cycle-tracker-end: bn254_mul"); // 3 * generator. // 1527649530533633684281386512094328299672026648504329745640827351945739272160755686119065091946435084697047221031460 diff --git a/tests/bn254-add/Cargo.toml b/tests/bn254-add/Cargo.toml index b77bc102d9..e22a7cf7fa 100644 --- a/tests/bn254-add/Cargo.toml +++ b/tests/bn254-add/Cargo.toml @@ -5,5 +5,7 @@ edition = "2021" publish = false [dependencies] +common-test-utils = { path = "../common" } +sp1-lib = { path = "../../crates/zkvm/lib" } sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } -num = { version = "0.4.1", default-features = false } +sp1-curves = { path = "../../crates/curves" } diff --git a/tests/bn254-add/elf/riscv32im-succinct-zkvm-elf b/tests/bn254-add/elf/riscv32im-succinct-zkvm-elf index c11e7cfa19..bf87b4456c 100755 Binary files a/tests/bn254-add/elf/riscv32im-succinct-zkvm-elf and b/tests/bn254-add/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/bn254-add/src/main.rs b/tests/bn254-add/src/main.rs index 4343d67a2d..e7021daf82 100644 --- a/tests/bn254-add/src/main.rs +++ b/tests/bn254-add/src/main.rs @@ -1,44 +1,42 @@ #![no_main] -use sp1_zkvm::syscalls::syscall_bn254_add; - +use sp1_curves::params::FieldParameters; +use sp1_lib::bn254::Bn254Point; sp1_zkvm::entrypoint!(main); -pub fn main() { - for _ in 0..4 { - // generator. - // 1 - // 2 - let mut a: [u8; 64] = [ - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, - ]; - - // 2 * generator. - // 1368015179489954701390400359078579693043519447331113978918064868415326638035 - // 9918110051302171585080402603319702774565515993150576347155970296011118125764 - let b: [u8; 64] = [ - 211, 207, 135, 109, 193, 8, 194, 211, 168, 28, 135, 22, 169, 22, 120, 217, 133, 21, 24, - 104, 91, 4, 133, 155, 2, 26, 19, 46, 231, 68, 6, 3, 196, 162, 24, 90, 122, 191, 62, - 255, 199, 143, 83, 227, 73, 164, 166, 104, 10, 156, 174, 178, 150, 95, 132, 231, 146, - 124, 10, 14, 140, 115, 237, 21, - ]; - - syscall_bn254_add(a.as_mut_ptr() as *mut [u32; 16], b.as_ptr() as *const [u32; 16]); +// generator. +// 1 +// 2 +const A: [u8; 64] = [ + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +]; + +// 2 * generator. +// 1368015179489954701390400359078579693043519447331113978918064868415326638035 +// 9918110051302171585080402603319702774565515993150576347155970296011118125764 +const B: [u8; 64] = [ + 211, 207, 135, 109, 193, 8, 194, 211, 168, 28, 135, 22, 169, 22, 120, 217, 133, 21, 24, 104, + 91, 4, 133, 155, 2, 26, 19, 46, 231, 68, 6, 3, 196, 162, 24, 90, 122, 191, 62, 255, 199, 143, + 83, 227, 73, 164, 166, 104, 10, 156, 174, 178, 150, 95, 132, 231, 146, 124, 10, 14, 140, 115, + 237, 21, +]; + +// 3 * generator. +// 3353031288059533942658390886683067124040920775575537747144343083137631628272 +// 19321533766552368860946552437480515441416830039777911637913418824951667761761 +const C: [u8; 64] = [ + 240, 171, 21, 25, 150, 85, 211, 242, 121, 230, 184, 21, 71, 216, 21, 147, 21, 189, 182, 177, + 188, 50, 2, 244, 63, 234, 107, 197, 154, 191, 105, 7, 97, 34, 254, 217, 61, 255, 241, 205, 87, + 91, 156, 11, 180, 99, 158, 49, 117, 100, 8, 141, 124, 219, 79, 85, 41, 148, 72, 224, 190, 153, + 183, 42, +]; - // 3 * generator. - // 3353031288059533942658390886683067124040920775575537747144343083137631628272 - // 19321533766552368860946552437480515441416830039777911637913418824951667761761 - let c: [u8; 64] = [ - 240, 171, 21, 25, 150, 85, 211, 242, 121, 230, 184, 21, 71, 216, 21, 147, 21, 189, 182, - 177, 188, 50, 2, 244, 63, 234, 107, 197, 154, 191, 105, 7, 97, 34, 254, 217, 61, 255, - 241, 205, 87, 91, 156, 11, 180, 99, 158, 49, 117, 100, 8, 141, 124, 219, 79, 85, 41, - 148, 72, 224, 190, 153, 183, 42, - ]; - - assert_eq!(a, c); - } - - println!("done"); +pub fn main() { + common_test_utils::weierstrass_add::test_weierstrass_add::( + &A, + &B, + &C, + sp1_curves::weierstrass::bn254::Bn254BaseField::MODULUS, + ); } diff --git a/tests/bn254-double/Cargo.toml b/tests/bn254-double/Cargo.toml index 3e99b0a023..43b6de30b9 100644 --- a/tests/bn254-double/Cargo.toml +++ b/tests/bn254-double/Cargo.toml @@ -6,5 +6,3 @@ publish = false [dependencies] sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } -hex-literal = "0.4.1" -num = { version = "0.4.1", default-features = false } diff --git a/tests/bn254-double/elf/riscv32im-succinct-zkvm-elf b/tests/bn254-double/elf/riscv32im-succinct-zkvm-elf index fa1f465b82..77f9a29d7c 100755 Binary files a/tests/bn254-double/elf/riscv32im-succinct-zkvm-elf and b/tests/bn254-double/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/bn254-fp/Cargo.toml b/tests/bn254-fp/Cargo.toml index b1d053a215..9959fe0357 100644 --- a/tests/bn254-fp/Cargo.toml +++ b/tests/bn254-fp/Cargo.toml @@ -7,6 +7,5 @@ publish = false [dependencies] sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } -sp1-derive = { path = "../../crates/derive" } num-bigint = "0.4.6" rand = "0.8.5" diff --git a/tests/bn254-fp/elf/riscv32im-succinct-zkvm-elf b/tests/bn254-fp/elf/riscv32im-succinct-zkvm-elf index b7d31090cb..487f198d10 100755 Binary files a/tests/bn254-fp/elf/riscv32im-succinct-zkvm-elf and b/tests/bn254-fp/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/bn254-fp2-addsub/Cargo.toml b/tests/bn254-fp2-addsub/Cargo.toml index 5ea9bfe1bd..3f1b8a399c 100644 --- a/tests/bn254-fp2-addsub/Cargo.toml +++ b/tests/bn254-fp2-addsub/Cargo.toml @@ -7,6 +7,5 @@ publish = false [dependencies] sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } -sp1-derive = { path = "../../crates/derive" } num-bigint = "0.4.6" rand = "0.8.5" diff --git a/tests/bn254-fp2-addsub/elf/riscv32im-succinct-zkvm-elf b/tests/bn254-fp2-addsub/elf/riscv32im-succinct-zkvm-elf index 445a7aeed3..5af2f91a1a 100755 Binary files a/tests/bn254-fp2-addsub/elf/riscv32im-succinct-zkvm-elf and b/tests/bn254-fp2-addsub/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/bn254-fp2-mul/Cargo.toml b/tests/bn254-fp2-mul/Cargo.toml index 2c39cba2a4..aa5d276fd7 100644 --- a/tests/bn254-fp2-mul/Cargo.toml +++ b/tests/bn254-fp2-mul/Cargo.toml @@ -7,6 +7,5 @@ publish = false [dependencies] sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } -sp1-derive = { path = "../../crates/derive" } num-bigint = "0.4.6" rand = "0.8.5" diff --git a/tests/bn254-fp2-mul/elf/riscv32im-succinct-zkvm-elf b/tests/bn254-fp2-mul/elf/riscv32im-succinct-zkvm-elf index 6e08b1d49a..8ed09da22f 100755 Binary files a/tests/bn254-fp2-mul/elf/riscv32im-succinct-zkvm-elf and b/tests/bn254-fp2-mul/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/bn254-mul/Cargo.toml b/tests/bn254-mul/Cargo.toml index 1d61e50e75..7853b90a68 100644 --- a/tests/bn254-mul/Cargo.toml +++ b/tests/bn254-mul/Cargo.toml @@ -6,4 +6,5 @@ publish = false [dependencies] sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } +sp1-lib = { path = "../../crates/zkvm/lib" } sp1-derive = { path = "../../crates/derive" } diff --git a/tests/bn254-mul/elf/riscv32im-succinct-zkvm-elf b/tests/bn254-mul/elf/riscv32im-succinct-zkvm-elf index 027b403a89..48720a7225 100755 Binary files a/tests/bn254-mul/elf/riscv32im-succinct-zkvm-elf and b/tests/bn254-mul/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/bn254-mul/src/main.rs b/tests/bn254-mul/src/main.rs index 751df090f8..1471b3c46a 100644 --- a/tests/bn254-mul/src/main.rs +++ b/tests/bn254-mul/src/main.rs @@ -1,8 +1,8 @@ #![no_main] sp1_zkvm::entrypoint!(main); -use sp1_zkvm::lib::bn254::Bn254AffinePoint; -use sp1_zkvm::lib::utils::AffinePoint; +use sp1_lib::bn254::Bn254Point; +use sp1_lib::utils::AffinePoint; #[sp1_derive::cycle_tracker] pub fn main() { @@ -16,7 +16,7 @@ pub fn main() { 0, 0, 0, 0, 0, 0, ]; - let mut a_point = Bn254AffinePoint::from_le_bytes(&a); + let mut a_point = Bn254Point::from_le_bytes(&a); // scalar. // 3 diff --git a/tests/common/Cargo.toml b/tests/common/Cargo.toml new file mode 100644 index 0000000000..e320ad9f2b --- /dev/null +++ b/tests/common/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "common-test-utils" +version = "1.1.0" +edition = "2021" +publish = false + +[dependencies] +sp1-lib = { path = "../../crates/zkvm/lib" } +num-bigint = "0.4" diff --git a/tests/common/src/lib.rs b/tests/common/src/lib.rs new file mode 100644 index 0000000000..f4e4e6a98b --- /dev/null +++ b/tests/common/src/lib.rs @@ -0,0 +1 @@ +pub mod weierstrass_add; diff --git a/tests/common/src/weierstrass_add.rs b/tests/common/src/weierstrass_add.rs new file mode 100644 index 0000000000..3cf62a0c30 --- /dev/null +++ b/tests/common/src/weierstrass_add.rs @@ -0,0 +1,85 @@ +use num_bigint::BigUint; +use sp1_lib::utils::{AffinePoint, WeierstrassAffinePoint}; + +/// Test all of the potential special cases for addition for Weierstrass elliptic curves. +pub fn test_weierstrass_add + WeierstrassAffinePoint, const N: usize>( + a: &[u8], + b: &[u8], + c: &[u8], + modulus: &[u8], +) { + // Validate that add_assign works. + let mut a_point = P::from_le_bytes(a); + let b_point = P::from_le_bytes(b); + a_point.add_assign(&b_point); + assert_eq!(a_point.to_le_bytes(), *c); + + // Validate that complete_add_assign works. Handles all of the potential special cases. + // Test all of the potential cases for addition. + let a_point = P::from_le_bytes(a); + let b_point = P::from_le_bytes(b); + + // Case 1: Both points are infinity + let orig_infinity = P::infinity(); + let mut b = orig_infinity.clone(); + let b2 = orig_infinity.clone(); + b.complete_add_assign(&b2); + assert!(b.is_infinity(), "Adding two infinity points should result in infinity"); + + // Case 2: First point is infinity + let mut b = orig_infinity.clone(); + b.complete_add_assign(&a_point); + assert_eq!( + b.limbs_ref(), + a_point.limbs_ref(), + "Adding infinity to a point should result in that point" + ); + + // Case 3: Second point is infinity + let mut a_point_clone = a_point.clone(); + let b = orig_infinity.clone(); + a_point_clone.complete_add_assign(&b); + assert_eq!( + a_point_clone.limbs_ref(), + a_point.limbs_ref(), + "Adding a point to infinity should result in that point" + ); + + // Case 4: Points are equal (point doubling, already covered by the main loop) + let mut a_point_clone = a_point.clone(); + let a_point_clone2 = a_point.clone(); + let mut a_point_clone3 = a_point.clone(); + a_point_clone.complete_add_assign(&a_point_clone2); + a_point_clone3.double(); + assert_eq!( + a_point_clone.limbs_ref(), + a_point_clone3.limbs_ref(), + "Adding a point to itself should double the point" + ); + + // Case 5: Points are negations of each other. + // Create a point that is the negation of a_point. + let a_point_le_bytes = a_point.to_le_bytes(); + let y_biguint = BigUint::from_bytes_le(&a_point_le_bytes[N * 2..]); + let modulus_biguint = BigUint::from_bytes_le(modulus); + + // Negate y. + let negated_y_biguint = (&modulus_biguint - &y_biguint) % &modulus_biguint; + + // Create a point using the negated y. + let mut combined_negation_point_bytes = a_point_le_bytes[..N * 2].to_vec(); + combined_negation_point_bytes.extend_from_slice(&negated_y_biguint.to_bytes_le()); + let negation_point = P::from_le_bytes(&combined_negation_point_bytes); + + let mut a_point_clone = a_point.clone(); + a_point_clone.complete_add_assign(&negation_point); + assert!( + a_point_clone.is_infinity(), + "Adding a point to its negation should result in infinity" + ); + + // Case 6: Default addition + let mut a_point_clone = a_point.clone(); + a_point_clone.complete_add_assign(&b_point); + assert_eq!(a_point_clone.to_le_bytes(), *c); +} diff --git a/tests/cycle-tracker/elf/riscv32im-succinct-zkvm-elf b/tests/cycle-tracker/elf/riscv32im-succinct-zkvm-elf index cb9be2c075..cfcd052579 100755 Binary files a/tests/cycle-tracker/elf/riscv32im-succinct-zkvm-elf and b/tests/cycle-tracker/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/ed-add/elf/riscv32im-succinct-zkvm-elf b/tests/ed-add/elf/riscv32im-succinct-zkvm-elf index e8bcfa625a..2522026284 100755 Binary files a/tests/ed-add/elf/riscv32im-succinct-zkvm-elf and b/tests/ed-add/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/ed-decompress/elf/riscv32im-succinct-zkvm-elf b/tests/ed-decompress/elf/riscv32im-succinct-zkvm-elf index cbdfc6e541..52cd651f37 100755 Binary files a/tests/ed-decompress/elf/riscv32im-succinct-zkvm-elf and b/tests/ed-decompress/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/ed25519/elf/riscv32im-succinct-zkvm-elf b/tests/ed25519/elf/riscv32im-succinct-zkvm-elf index 6c402f62bf..9b4414d93c 100755 Binary files a/tests/ed25519/elf/riscv32im-succinct-zkvm-elf and b/tests/ed25519/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/fibonacci/elf/riscv32im-succinct-zkvm-elf b/tests/fibonacci/elf/riscv32im-succinct-zkvm-elf index 9f1aa66747..2a1154f08c 100755 Binary files a/tests/fibonacci/elf/riscv32im-succinct-zkvm-elf and b/tests/fibonacci/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/hint-io/elf/riscv32im-succinct-zkvm-elf b/tests/hint-io/elf/riscv32im-succinct-zkvm-elf index 51074260c8..3ce153925a 100755 Binary files a/tests/hint-io/elf/riscv32im-succinct-zkvm-elf and b/tests/hint-io/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/keccak-permute/elf/riscv32im-succinct-zkvm-elf b/tests/keccak-permute/elf/riscv32im-succinct-zkvm-elf index 598aee937d..f4d95ffeee 100755 Binary files a/tests/keccak-permute/elf/riscv32im-succinct-zkvm-elf and b/tests/keccak-permute/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/keccak256/elf/riscv32im-succinct-zkvm-elf b/tests/keccak256/elf/riscv32im-succinct-zkvm-elf index 5986157666..94db343707 100755 Binary files a/tests/keccak256/elf/riscv32im-succinct-zkvm-elf and b/tests/keccak256/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/panic/Cargo.toml b/tests/panic/Cargo.toml index 578f0ce170..6b6fbb65f0 100644 --- a/tests/panic/Cargo.toml +++ b/tests/panic/Cargo.toml @@ -6,4 +6,3 @@ publish = false [dependencies] sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } -sp1-derive = { path = "../../crates/derive" } diff --git a/tests/panic/elf/riscv32im-succinct-zkvm-elf b/tests/panic/elf/riscv32im-succinct-zkvm-elf index befb17540d..11c6c7cba4 100755 Binary files a/tests/panic/elf/riscv32im-succinct-zkvm-elf and b/tests/panic/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/rand/elf/riscv32im-succinct-zkvm-elf b/tests/rand/elf/riscv32im-succinct-zkvm-elf index 54fc4a8714..1ce9664bcb 100755 Binary files a/tests/rand/elf/riscv32im-succinct-zkvm-elf and b/tests/rand/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/secp256k1-add/Cargo.toml b/tests/secp256k1-add/Cargo.toml index d0ff253cba..8133e8ea20 100644 --- a/tests/secp256k1-add/Cargo.toml +++ b/tests/secp256k1-add/Cargo.toml @@ -6,5 +6,6 @@ publish = false [dependencies] sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } -hex-literal = "0.4.1" -num = { version = "0.4.1", default-features = false } +sp1-lib = { path = "../../crates/zkvm/lib" } +sp1-curves = { path = "../../crates/curves" } +common-test-utils = { path = "../common" } diff --git a/tests/secp256k1-add/elf/riscv32im-succinct-zkvm-elf b/tests/secp256k1-add/elf/riscv32im-succinct-zkvm-elf index 748073f46c..eba9ea3bab 100755 Binary files a/tests/secp256k1-add/elf/riscv32im-succinct-zkvm-elf and b/tests/secp256k1-add/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/secp256k1-add/src/main.rs b/tests/secp256k1-add/src/main.rs index 856aa83f1f..7efdcde5d4 100644 --- a/tests/secp256k1-add/src/main.rs +++ b/tests/secp256k1-add/src/main.rs @@ -1,45 +1,37 @@ #![no_main] -use sp1_zkvm::syscalls::syscall_secp256k1_add; - +use sp1_curves::params::FieldParameters; +use sp1_zkvm::lib::secp256k1::Secp256k1Point; sp1_zkvm::entrypoint!(main); -pub fn main() { - for _ in 0..4 { - // generator. - // 55066263022277343669578718895168534326250603453777594175500187360389116729240 - // 32670510020758816978083085130507043184471273380659243275938904335757337482424 - let mut a: [u8; 64] = [ - 152, 23, 248, 22, 91, 129, 242, 89, 217, 40, 206, 45, 219, 252, 155, 2, 7, 11, 135, - 206, 149, 98, 160, 85, 172, 187, 220, 249, 126, 102, 190, 121, 184, 212, 16, 251, 143, - 208, 71, 156, 25, 84, 133, 166, 72, 180, 23, 253, 168, 8, 17, 14, 252, 251, 164, 93, - 101, 196, 163, 38, 119, 218, 58, 72, - ]; - - // 2 * generator. - // 89565891926547004231252920425935692360644145829622209833684329913297188986597 - // 12158399299693830322967808612713398636155367887041628176798871954788371653930 - let b: [u8; 64] = [ - 229, 158, 112, 92, 185, 9, 172, 171, 167, 60, 239, 140, 75, 142, 119, 92, 216, 124, - 192, 149, 110, 64, 69, 48, 109, 125, 237, 65, 148, 127, 4, 198, 42, 229, 207, 80, 169, - 49, 100, 35, 225, 208, 102, 50, 101, 50, 246, 247, 238, 234, 108, 70, 25, 132, 197, - 163, 57, 195, 61, 166, 254, 104, 225, 26, - ]; - - syscall_secp256k1_add(a.as_mut_ptr() as *mut [u32; 16], b.as_ptr() as *mut [u32; 16]); +const A: [u8; 64] = [ + 152, 23, 248, 22, 91, 129, 242, 89, 217, 40, 206, 45, 219, 252, 155, 2, 7, 11, 135, 206, 149, + 98, 160, 85, 172, 187, 220, 249, 126, 102, 190, 121, 184, 212, 16, 251, 143, 208, 71, 156, 25, + 84, 133, 166, 72, 180, 23, 253, 168, 8, 17, 14, 252, 251, 164, 93, 101, 196, 163, 38, 119, 218, + 58, 72, +]; +// 2 * generator. +// 89565891926547004231252920425935692360644145829622209833684329913297188986597 +// 12158399299693830322967808612713398636155367887041628176798871954788371653930 +const B: [u8; 64] = [ + 229, 158, 112, 92, 185, 9, 172, 171, 167, 60, 239, 140, 75, 142, 119, 92, 216, 124, 192, 149, + 110, 64, 69, 48, 109, 125, 237, 65, 148, 127, 4, 198, 42, 229, 207, 80, 169, 49, 100, 35, 225, + 208, 102, 50, 101, 50, 246, 247, 238, 234, 108, 70, 25, 132, 197, 163, 57, 195, 61, 166, 254, + 104, 225, 26, +]; +// 3 * generator. +// 112711660439710606056748659173929673102114977341539408544630613555209775888121 +// 25583027980570883691656905877401976406448868254816295069919888960541586679410 +const C: [u8; 64] = [ + 249, 54, 224, 188, 19, 241, 1, 134, 176, 153, 111, 131, 69, 200, 49, 181, 41, 82, 157, 248, + 133, 79, 52, 73, 16, 195, 88, 146, 1, 138, 48, 249, 114, 230, 184, 132, 117, 253, 185, 108, 27, + 35, 194, 52, 153, 169, 0, 101, 86, 243, 55, 42, 230, 55, 227, 15, 20, 232, 45, 99, 15, 123, + 143, 56, +]; - // 3 * generator. - // 112711660439710606056748659173929673102114977341539408544630613555209775888121 - // 25583027980570883691656905877401976406448868254816295069919888960541586679410 - let c: [u8; 64] = [ - 249, 54, 224, 188, 19, 241, 1, 134, 176, 153, 111, 131, 69, 200, 49, 181, 41, 82, 157, - 248, 133, 79, 52, 73, 16, 195, 88, 146, 1, 138, 48, 249, 114, 230, 184, 132, 117, 253, - 185, 108, 27, 35, 194, 52, 153, 169, 0, 101, 86, 243, 55, 42, 230, 55, 227, 15, 20, - 232, 45, 99, 15, 123, 143, 56, - ]; - - assert_eq!(a, c); - } - - println!("done"); +pub fn main() { + common_test_utils::weierstrass_add::test_weierstrass_add::< + Secp256k1Point, + { sp1_lib::secp256k1::N }, + >(&A, &B, &C, sp1_curves::weierstrass::secp256k1::Secp256k1BaseField::MODULUS); } diff --git a/tests/secp256k1-decompress/elf/riscv32im-succinct-zkvm-elf b/tests/secp256k1-decompress/elf/riscv32im-succinct-zkvm-elf index 81a022b875..89751b5437 100755 Binary files a/tests/secp256k1-decompress/elf/riscv32im-succinct-zkvm-elf and b/tests/secp256k1-decompress/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/secp256k1-double/Cargo.toml b/tests/secp256k1-double/Cargo.toml index d352c2cbf4..72f332f741 100644 --- a/tests/secp256k1-double/Cargo.toml +++ b/tests/secp256k1-double/Cargo.toml @@ -6,5 +6,3 @@ publish = false [dependencies] sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } -hex-literal = "0.4.1" -num = { version = "0.4.1", default-features = false } diff --git a/tests/secp256k1-double/elf/riscv32im-succinct-zkvm-elf b/tests/secp256k1-double/elf/riscv32im-succinct-zkvm-elf index 30a869476d..2fb2a017c3 100755 Binary files a/tests/secp256k1-double/elf/riscv32im-succinct-zkvm-elf and b/tests/secp256k1-double/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/secp256k1-mul/elf/riscv32im-succinct-zkvm-elf b/tests/secp256k1-mul/elf/riscv32im-succinct-zkvm-elf index 14bc9e609f..cb8333c2ae 100755 Binary files a/tests/secp256k1-mul/elf/riscv32im-succinct-zkvm-elf and b/tests/secp256k1-mul/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/secp256k1-mul/src/main.rs b/tests/secp256k1-mul/src/main.rs index 15281c89f1..63defb7ec5 100644 --- a/tests/secp256k1-mul/src/main.rs +++ b/tests/secp256k1-mul/src/main.rs @@ -1,7 +1,7 @@ #![no_main] sp1_zkvm::entrypoint!(main); -use sp1_zkvm::lib::secp256k1::Secp256k1AffinePoint; +use sp1_zkvm::lib::secp256k1::Secp256k1Point; use sp1_zkvm::lib::utils::AffinePoint; #[sp1_derive::cycle_tracker] @@ -17,7 +17,7 @@ pub fn main() { 101, 196, 163, 38, 119, 218, 58, 72, ]; - let mut a_point = Secp256k1AffinePoint::from_le_bytes(&a); + let mut a_point = Secp256k1Point::from_le_bytes(&a); // scalar. // 3 diff --git a/tests/sha-compress/elf/riscv32im-succinct-zkvm-elf b/tests/sha-compress/elf/riscv32im-succinct-zkvm-elf index 7c6e8810dc..e7043fbe59 100755 Binary files a/tests/sha-compress/elf/riscv32im-succinct-zkvm-elf and b/tests/sha-compress/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/sha-extend/elf/riscv32im-succinct-zkvm-elf b/tests/sha-extend/elf/riscv32im-succinct-zkvm-elf index 0a56930303..e90da84120 100755 Binary files a/tests/sha-extend/elf/riscv32im-succinct-zkvm-elf and b/tests/sha-extend/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/sha2/elf/riscv32im-succinct-zkvm-elf b/tests/sha2/elf/riscv32im-succinct-zkvm-elf index 0a1c7f5597..104609fa19 100755 Binary files a/tests/sha2/elf/riscv32im-succinct-zkvm-elf and b/tests/sha2/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/tendermint-benchmark/elf/riscv32im-succinct-zkvm-elf b/tests/tendermint-benchmark/elf/riscv32im-succinct-zkvm-elf index 4f56f31eba..f16503cb07 100755 Binary files a/tests/tendermint-benchmark/elf/riscv32im-succinct-zkvm-elf and b/tests/tendermint-benchmark/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/uint256-arith/elf/riscv32im-succinct-zkvm-elf b/tests/uint256-arith/elf/riscv32im-succinct-zkvm-elf index fa251d3011..b8e6cb0d8b 100755 Binary files a/tests/uint256-arith/elf/riscv32im-succinct-zkvm-elf and b/tests/uint256-arith/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/uint256-mul/elf/riscv32im-succinct-zkvm-elf b/tests/uint256-mul/elf/riscv32im-succinct-zkvm-elf index 2bac42edfc..f9862d8d20 100755 Binary files a/tests/uint256-mul/elf/riscv32im-succinct-zkvm-elf and b/tests/uint256-mul/elf/riscv32im-succinct-zkvm-elf differ diff --git a/tests/verify-proof/Cargo.toml b/tests/verify-proof/Cargo.toml index 4ba53f6d52..7551bd81f3 100644 --- a/tests/verify-proof/Cargo.toml +++ b/tests/verify-proof/Cargo.toml @@ -6,6 +6,5 @@ publish = false [dependencies] sp1-zkvm = { path = "../../crates/zkvm/entrypoint", features = ["verify"] } -hex-literal = "0.4.1" hex = "0.4.3" sha2 = "0.10.8" diff --git a/tests/verify-proof/elf/riscv32im-succinct-zkvm-elf b/tests/verify-proof/elf/riscv32im-succinct-zkvm-elf index 044250fe03..140253849b 100755 Binary files a/tests/verify-proof/elf/riscv32im-succinct-zkvm-elf and b/tests/verify-proof/elf/riscv32im-succinct-zkvm-elf differ