Skip to content

Commit

Permalink
pine: Compute the squared norm just once
Browse files Browse the repository at this point in the history
Split the existing FLP circuit into two:

1. One that computes the squared norm bound and checks that it is equal
   to the claimed value

2. Another for everything else, including checking that the claimed norm
   bound is in range and that the wraparound tests succeeded

The first circuit does not require joint randomness to compute, which
means we can safely run it once without opening ourselves to offline
attacks. It is also the most expensive part of the computation.

This change aligns our implementation with a planned change for the next
version of PINE:

junyechen1996/draft-chen-cfrg-vdaf-pine#92
junyechen1996/draft-chen-cfrg-vdaf-pine#94
  • Loading branch information
cjpatton committed Aug 14, 2024
1 parent 5261a1f commit 08aca1a
Show file tree
Hide file tree
Showing 9 changed files with 386 additions and 184 deletions.
67 changes: 37 additions & 30 deletions crates/daphne/benches/pine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,13 @@ use prio::{
};

fn pine(c: &mut Criterion) {
for (dimension, chunk_len) in [
for (dimension, chunk_len_sq_norm_equal) in [
// dimension, sqrt(dimension) * some multiplier
(1_000, 32 * 8),
(10_000, 100 * 2),
(100_000, 320 * 6),
(1_000_000, 1_000 * 8),
] {
let pine = Pine::new_64(1 << 15, dimension, 15, chunk_len).unwrap();
let pine = Pine::new_64(1 << 15, dimension, 15, 150, chunk_len_sq_norm_equal).unwrap();
let measurement = vec![0.0; dimension];
let wr_joint_rand_seed = Seed::generate().unwrap();
let nonce = [0; 16];
Expand All @@ -35,36 +34,44 @@ fn pine(c: &mut Criterion) {
},
);

let (mut input, wr_test_results) = pine
.flp
.encode_with_wr_joint_rand(measurement.iter().copied(), &wr_joint_rand_seed)
.unwrap();
input.extend_from_slice(&wr_test_results);
let joint_rand = random_vector(pine.flp.joint_rand_len()).unwrap();
let prove_rand = random_vector(pine.flp.prove_rand_len()).unwrap();
{
let (mut input, wr_test_results) = pine
.flp
.encode_with_wr_joint_rand(measurement.iter().copied(), &wr_joint_rand_seed)
.unwrap();
input.extend_from_slice(&wr_test_results);
let prove_rand = random_vector(pine.flp_sq_norm_equal.prove_rand_len()).unwrap();

c.bench_with_input(
BenchmarkId::new("pine/prove", dimension),
&dimension,
|b, &_d| {
b.iter(|| pine.flp.prove(&input, &prove_rand, &joint_rand).unwrap());
},
);
c.bench_with_input(
BenchmarkId::new("pine/prove", dimension),
&dimension,
|b, &_d| {
b.iter(|| {
pine.flp_sq_norm_equal
.prove(&input, &prove_rand, &[])
.unwrap()
});
},
);

let query_rand = random_vector(pine.flp.query_rand_len()).unwrap();
let proof = pine.flp.prove(&input, &prove_rand, &joint_rand).unwrap();
let query_rand = random_vector(pine.flp_sq_norm_equal.query_rand_len()).unwrap();
let proof = pine
.flp_sq_norm_equal
.prove(&input, &prove_rand, &[])
.unwrap();

c.bench_with_input(
BenchmarkId::new("pine/query", dimension),
&dimension,
|b, &_d| {
b.iter(|| {
pine.flp
.query(&input, &proof, &query_rand, &joint_rand, 1)
.unwrap()
});
},
);
c.bench_with_input(
BenchmarkId::new("pine/query", dimension),
&dimension,
|b, &_d| {
b.iter(|| {
pine.flp_sq_norm_equal
.query(&input, &proof, &query_rand, &[], 1)
.unwrap()
});
},
);
}

c.bench_with_input(
BenchmarkId::new("pine/shard", dimension),
Expand Down
Loading

0 comments on commit 08aca1a

Please sign in to comment.