Skip to content

Commit

Permalink
Maintenance (#381)
Browse files Browse the repository at this point in the history
* chore: bump rust version

* Improve sparse polynomial evaluation algorithm (#317)

* time-optimal algorithm for sparse polynomial evaluation

* update version

---------

Co-authored-by: Srinath Setty <srinath@microsoft.com>
  • Loading branch information
huitseeker and srinathsetty authored May 2, 2024
1 parent 5268c20 commit 53deada
Show file tree
Hide file tree
Showing 8 changed files with 55 additions and 88 deletions.
2 changes: 1 addition & 1 deletion rust-toolchain.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[toolchain]
# The default profile includes rustc, rust-std, cargo, rust-docs, rustfmt and clippy.
profile = "default"
channel = "1.76.0"
channel = "1.77"
targets = [ "wasm32-unknown-unknown" ]

1 change: 1 addition & 0 deletions src/bellpepper/test_shape_cs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ use ff::{Field, PrimeField};
#[derive(Clone, Copy)]
struct OrderedVariable(Variable);

#[allow(dead_code)]
#[derive(Debug)]
enum NamedObject {
Constraint(usize),
Expand Down
12 changes: 1 addition & 11 deletions src/spartan/batched.rs
Original file line number Diff line number Diff line change
Expand Up @@ -490,17 +490,7 @@ impl<E: Engine, EE: EvaluationEngineTrait<E>> BatchedRelaxedR1CSSNARKTrait<E>
let evals_Z = zip_with!(iter, (self.evals_W, U, r_y), |eval_W, U, r_y| {
let eval_X = {
// constant term
let poly_X = iter::once((0, U.u))
.chain(
//remaining inputs
U.X
.iter()
.enumerate()
// filter_map uses the sparsity of the polynomial, if irrelevant
// we should replace by UniPoly
.filter_map(|(i, x_i)| (!x_i.is_zero_vartime()).then_some((i + 1, *x_i))),
)
.collect();
let poly_X = iter::once(U.u).chain(U.X.iter().cloned()).collect();
SparsePolynomial::new(r_y.len() - 1, poly_X).evaluate(&r_y[1..])
};
(E::Scalar::ONE - r_y[0]) * eval_W + r_y[0] * eval_X
Expand Down
10 changes: 1 addition & 9 deletions src/spartan/batched_ppsnark.rs
Original file line number Diff line number Diff line change
Expand Up @@ -927,15 +927,7 @@ impl<E: Engine, EE: EvaluationEngineTrait<E>> BatchedRelaxedR1CSSNARKTrait<E>

let X = {
// constant term
let poly_X = std::iter::once((0, U.u))
.chain(
//remaining inputs
(0..U.X.len())
// filter_map uses the sparsity of the polynomial, if irrelevant
// we should replace by UniPoly
.filter_map(|i| (!U.X[i].is_zero_vartime()).then_some((i + 1, U.X[i]))),
)
.collect();
let poly_X = std::iter::once(U.u).chain(U.X.iter().cloned()).collect();
SparsePolynomial::new(num_vars_log, poly_X).evaluate(&rand_sc_unpad[1..])
};

Expand Down
1 change: 0 additions & 1 deletion src/spartan/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ use crate::{
};
use ff::Field;
use itertools::Itertools as _;
use polys::multilinear::SparsePolynomial;
use rayon::{iter::IntoParallelRefIterator, prelude::*};
use rayon_scan::ScanParallelIterator as _;
use ref_cast::RefCast;
Expand Down
74 changes: 33 additions & 41 deletions src/spartan/polys/multilinear.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,7 @@ use ff::PrimeField;
use itertools::Itertools as _;
use rand_core::{CryptoRng, RngCore};
use rayon::prelude::{
IndexedParallelIterator, IntoParallelIterator, IntoParallelRefIterator,
IntoParallelRefMutIterator, ParallelIterator,
IndexedParallelIterator, IntoParallelRefIterator, IntoParallelRefMutIterator, ParallelIterator,
};
use serde::{Deserialize, Serialize};

Expand Down Expand Up @@ -130,47 +129,37 @@ impl<Scalar: PrimeField> Index<usize> for MultilinearPolynomial<Scalar> {
}

/// Sparse multilinear polynomial, which means the $Z(\cdot)$ is zero at most points.
/// So we do not have to store every evaluations of $Z(\cdot)$, only store the non-zero points.
///
/// For example, the evaluations are [0, 0, 0, 1, 0, 1, 0, 2].
/// The sparse polynomial only store the non-zero values, [(3, 1), (5, 1), (7, 2)].
/// In the tuple, the first is index, the second is value.
/// In our context, sparse polynomials are non-zeros over the hypercube at locations that map to "small" integers
/// We exploit this property to implement a time-optimal algorithm
pub(crate) struct SparsePolynomial<Scalar> {
num_vars: usize,
Z: Vec<(usize, Scalar)>,
Z: Vec<Scalar>,
}

impl<Scalar: PrimeField> SparsePolynomial<Scalar> {
pub fn new(num_vars: usize, Z: Vec<(usize, Scalar)>) -> Self {
pub fn new(num_vars: usize, Z: Vec<Scalar>) -> Self {
Self { num_vars, Z }
}

/// Computes the $\tilde{eq}$ extension polynomial.
/// return 1 when a == r, otherwise return 0.
fn compute_chi(a: &[bool], r: &[Scalar]) -> Scalar {
assert_eq!(a.len(), r.len());
let mut chi_i = Scalar::ONE;
for j in 0..r.len() {
if a[j] {
chi_i *= r[j];
} else {
chi_i *= Scalar::ONE - r[j];
}
}
chi_i
}

// Takes O(m log n) where m is the number of non-zero evaluations and n is the number of variables.
// a time-optimal algorithm to evaluate sparse polynomials
pub fn evaluate(&self, r: &[Scalar]) -> Scalar {
assert_eq!(self.num_vars, r.len());

(0..self.Z.len())
.into_par_iter()
.map(|i| {
let bits = (self.Z[i].0).get_bits(r.len());
Self::compute_chi(&bits, r) * self.Z[i].1
})
.sum()
let num_vars_z = self.Z.len().next_power_of_two().log_2();
let chis = EqPolynomial::evals_from_points(&r[self.num_vars - 1 - num_vars_z..]);
#[allow(clippy::disallowed_methods)]
let eval_partial: Scalar = self
.Z
.iter()
.zip(chis.iter())
.map(|(z, chi)| *z * *chi)
.sum();

let common = (0..self.num_vars - 1 - num_vars_z)
.map(|i| (Scalar::ONE - r[i]))
.product::<Scalar>();

common * eval_partial
}
}

Expand Down Expand Up @@ -232,18 +221,21 @@ mod tests {
}

fn test_sparse_polynomial_with<F: PrimeField>() {
// Let the polynomial have 3 variables, p(x_1, x_2, x_3) = (x_1 + x_2) * x_3
// Evaluations of the polynomial at boolean cube are [0, 0, 0, 1, 0, 1, 0, 2].
// Let the polynomial have 4 variables, but is non-zero at only 3 locations (out of 2^4 = 16) over the hypercube
let mut Z = vec![F::ONE, F::ONE, F::from(2)];
let m_poly = SparsePolynomial::<F>::new(4, Z.clone());

let TWO = F::from(2);
let Z = vec![(3, F::ONE), (5, F::ONE), (7, TWO)];
let m_poly = SparsePolynomial::<F>::new(3, Z);
Z.resize(16, F::ZERO); // append with zeros to make it a dense polynomial
let m_poly_dense = MultilinearPolynomial::new(Z);

let x = vec![F::ONE, F::ONE, F::ONE];
assert_eq!(m_poly.evaluate(x.as_slice()), TWO);
// evaluation point
let x = vec![F::from(5), F::from(8), F::from(5), F::from(3)];

let x = vec![F::ONE, F::ZERO, F::ONE];
assert_eq!(m_poly.evaluate(x.as_slice()), F::ONE);
// check evaluations
assert_eq!(
m_poly.evaluate(x.as_slice()),
m_poly_dense.evaluate(x.as_slice())
);
}

#[test]
Expand Down
24 changes: 11 additions & 13 deletions src/spartan/ppsnark.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ use crate::{
},
SumcheckProof,
},
PolyEvalInstance, PolyEvalWitness, SparsePolynomial,
PolyEvalInstance, PolyEvalWitness,
},
traits::{
commitment::{CommitmentEngineTrait, CommitmentTrait, Len},
Expand All @@ -42,7 +42,7 @@ use rayon::prelude::*;
use serde::{Deserialize, Serialize};
use std::sync::Arc;

use super::polys::masked_eq::MaskedEqPolynomial;
use super::polys::{masked_eq::MaskedEqPolynomial, multilinear::SparsePolynomial};

fn padded<E: Engine>(v: &[E::Scalar], n: usize, e: &E::Scalar) -> Vec<E::Scalar> {
let mut v_padded = vec![*e; n];
Expand Down Expand Up @@ -930,17 +930,15 @@ impl<E: Engine, EE: EvaluationEngineTrait<E>> RelaxedR1CSSNARKTrait<E> for Relax
};

let eval_X = {
// constant term
let poly_X = std::iter::once((0, U.u))
.chain(
//remaining inputs
(0..U.X.len())
// filter_map uses the sparsity of the polynomial, if irrelevant
// we should replace by UniPoly
.filter_map(|i| (!U.X[i].is_zero_vartime()).then_some((i + 1, U.X[i]))),
)
.collect();
SparsePolynomial::new(vk.num_vars.log_2(), poly_X).evaluate(&rand_sc_unpad[1..])
// public IO is (u, X)
let X = vec![U.u]
.into_iter()
.chain(U.X.iter().cloned())
.collect::<Vec<E::Scalar>>();

// evaluate the sparse polynomial at rand_sc_unpad[1..]
let poly_X = SparsePolynomial::new(rand_sc_unpad.len() - 1, X);
poly_X.evaluate(&rand_sc_unpad[1..])
};

self.eval_W + factor * rand_sc_unpad[0] * eval_X
Expand Down
19 changes: 7 additions & 12 deletions src/spartan/snark.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ use itertools::Itertools as _;
use once_cell::sync::OnceCell;
use rayon::prelude::*;
use serde::{Deserialize, Serialize};
use std::{iter, sync::Arc};
use std::sync::Arc;

/// A type that represents the prover's key
#[derive(Debug, Clone)]
Expand Down Expand Up @@ -328,17 +328,12 @@ impl<E: Engine, EE: EvaluationEngineTrait<E>> RelaxedR1CSSNARKTrait<E> for Relax
// verify claim_inner_final
let eval_Z = {
let eval_X = {
// constant term
let poly_X = iter::once((0, U.u))
.chain(
//remaining inputs
(0..U.X.len())
// filter_map uses the sparsity of the polynomial, if irrelevant
// we should replace by UniPoly
.filter_map(|i| (!U.X[i].is_zero_vartime()).then_some((i + 1, U.X[i]))),
)
.collect();
SparsePolynomial::new(usize::try_from(vk.S.num_vars.ilog2()).unwrap(), poly_X)
// public IO is (u, X)
let X = vec![U.u]
.into_iter()
.chain(U.X.iter().cloned())
.collect::<Vec<E::Scalar>>();
SparsePolynomial::new(usize::try_from(vk.S.num_vars.ilog2()).unwrap(), X)
.evaluate(&r_y[1..])
};
(E::Scalar::ONE - r_y[0]) * self.eval_W + r_y[0] * eval_X
Expand Down

1 comment on commit 53deada

@github-actions
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Benchmarks

Table of Contents

Overview

This benchmark report shows the Arecibo GPU benchmarks.
NVIDIA L4
Intel(R) Xeon(R) CPU @ 2.20GHz
32 vCPUs
125 GB RAM
Workflow run: https://github.com/lurk-lab/arecibo/actions/runs/8920491490

Benchmark Results

RecursiveSNARK-NIVC-2

ref=5268c20 ref=53deada
Prove-NumCons-6540 47.25 ms (✅ 1.00x) 47.38 ms (✅ 1.00x slower)
Verify-NumCons-6540 35.39 ms (✅ 1.00x) 35.66 ms (✅ 1.01x slower)
Prove-NumCons-1028888 343.35 ms (✅ 1.00x) 344.89 ms (✅ 1.00x slower)
Verify-NumCons-1028888 257.10 ms (✅ 1.00x) 254.59 ms (✅ 1.01x faster)

CompressedSNARK-NIVC-Commitments-2

ref=5268c20 ref=53deada
Prove-NumCons-6540 13.58 s (✅ 1.00x) 13.59 s (✅ 1.00x slower)
Verify-NumCons-6540 62.92 ms (✅ 1.00x) 64.24 ms (✅ 1.02x slower)
Prove-NumCons-1028888 58.52 s (✅ 1.00x) 57.60 s (✅ 1.02x faster)
Verify-NumCons-1028888 62.53 ms (✅ 1.00x) 65.04 ms (✅ 1.04x slower)

Made with criterion-table

Please sign in to comment.