diff --git a/README.md b/README.md index e37d60a7..e851ee1e 100644 --- a/README.md +++ b/README.md @@ -183,6 +183,7 @@ Unless you explicitly state otherwise, any contribution that you submit to this [aurora-light]: https://ia.cr/2019/601 [pcd-acc]: https://ia.cr/2020/499 [pst]: https://ia.cr/2011/587 +[brakedown]: https://ia.cr/2021/1043 [ligero]: https://ia.cr/2022/1608 [hyrax]: https://eprint.iacr.org/2017/1132 @@ -220,6 +221,10 @@ CCS 2017 Riad S. Wahby, Ioanna Tzialla, abhi shelat, Justin Thaler, Michael Walfish 2018 IEEE Symposium on Security and Privacy +[Brakedown: Linear-time and field-agnostic SNARKs for R1CS][brakedown] +Alexander Golovnev, Jonathan Lee, Srinath Setty, Justin Thaler, Riad S. Wahby +CRYPTO 2023 + ## Acknowledgements This work was supported by: an Engineering and Physical Sciences Research Council grant; a Google Faculty Award; the RISELab at UC Berkeley; and donations from the Ethereum Foundation and the Interchain Foundation. diff --git a/poly-commit/Cargo.toml b/poly-commit/Cargo.toml index 25ef2e6a..024f6f62 100644 --- a/poly-commit/Cargo.toml +++ b/poly-commit/Cargo.toml @@ -33,12 +33,16 @@ name = "ipa_times" path = "benches/ipa_times.rs" harness = false +[[bench]] +name = "brakedown_times" +path = "benches/brakedown_ml_times.rs" +harness = false + [[bench]] name = "ligero_ml_times" path = "benches/ligero_ml_times.rs" harness = false - [[bench]] name = "hyrax_times" path = "benches/hyrax_times.rs" diff --git a/poly-commit/benches/brakedown_ml_times.rs b/poly-commit/benches/brakedown_ml_times.rs new file mode 100644 index 00000000..85e8750c --- /dev/null +++ b/poly-commit/benches/brakedown_ml_times.rs @@ -0,0 +1,59 @@ +use ark_crypto_primitives::{ + crh::{sha256::Sha256, CRHScheme, TwoToOneCRHScheme}, + merkle_tree::{ByteDigestConverter, Config}, +}; +use ark_pcs_bench_templates::*; +use ark_poly::{DenseMultilinearExtension, MultilinearExtension}; + +use ark_bn254::Fr; +use ark_ff::PrimeField; + +use ark_poly_commit::linear_codes::{LinearCodePCS, MultilinearBrakedown}; +use blake2::Blake2s256; +use rand_chacha::ChaCha20Rng; + +// Brakedown PCS over BN254 +struct MerkleTreeParams; +type LeafH = LeafIdentityHasher; +type CompressH = Sha256; +impl Config for MerkleTreeParams { + type Leaf = Vec; + + type LeafDigest = ::Output; + type LeafInnerDigestConverter = ByteDigestConverter; + type InnerDigest = ::Output; + + type LeafHash = LeafH; + type TwoToOneHash = CompressH; +} + +pub type MLE = DenseMultilinearExtension; +type MTConfig = MerkleTreeParams; +type ColHasher = FieldToBytesColHasher; +type Brakedown = LinearCodePCS< + MultilinearBrakedown, ColHasher>, + F, + MLE, + MTConfig, + ColHasher, +>; + +fn rand_poly_brakedown_ml( + num_vars: usize, + rng: &mut ChaCha20Rng, +) -> DenseMultilinearExtension { + DenseMultilinearExtension::rand(num_vars, rng) +} + +fn rand_point_brakedown_ml(num_vars: usize, rng: &mut ChaCha20Rng) -> Vec { + (0..num_vars).map(|_| F::rand(rng)).collect() +} + +const MIN_NUM_VARS: usize = 12; +const MAX_NUM_VARS: usize = 22; + +bench!( + Brakedown, + rand_poly_brakedown_ml, + rand_point_brakedown_ml +); diff --git a/poly-commit/src/error.rs b/poly-commit/src/error.rs index 15eee6a1..ec0bdad4 100644 --- a/poly-commit/src/error.rs +++ b/poly-commit/src/error.rs @@ -119,6 +119,9 @@ pub enum Error { /// Error resulting from hashing in linear code - based PCS. HashingError, + /// Shows that encoding is not feasible + EncodingError, + /// This means a commitment with a certain label was matched with a /// a polynomial which has a different label - which shouldn't happen MismatchedLabels { @@ -235,6 +238,7 @@ impl core::fmt::Display for Error { Error::TranscriptError => write!(f, "Incorrect transcript manipulation"), Error::InvalidParameters(err) => write!(f, "{}", err), Error::HashingError => write!(f, "Error resulting from hashing"), + Error::EncodingError => write!(f, "Encoding failed"), Error::MismatchedLabels { commitment_label, polynomial_label } => write!(f, "Mismatched labels: commitment label: {}, polynomial label: {}", commitment_label, diff --git a/poly-commit/src/lib.rs b/poly-commit/src/lib.rs index e234f875..4e3cb498 100644 --- a/poly-commit/src/lib.rs +++ b/poly-commit/src/lib.rs @@ -131,6 +131,7 @@ pub mod streaming_kzg; /// Scheme based on the Ligero construction in [[Ligero]][ligero]. /// /// [ligero]: https://eprint.iacr.org/2022/1608 +/// [brakedown]: https://eprint.iacr.org/2021/1043.pdf pub mod linear_codes; /// A polynomial commitment scheme based on the hardness of the diff --git a/poly-commit/src/linear_codes/brakedown.rs b/poly-commit/src/linear_codes/brakedown.rs new file mode 100644 index 00000000..722e1338 --- /dev/null +++ b/poly-commit/src/linear_codes/brakedown.rs @@ -0,0 +1,353 @@ +use super::utils::SprsMat; +use super::BrakedownPCParams; +use super::LinCodeParametersInfo; +use crate::linear_codes::utils::calculate_t; +use crate::utils::ceil_div; +use crate::utils::{ceil_mul, ent}; +use crate::{PCCommitterKey, PCUniversalParams, PCVerifierKey}; + +use ark_crypto_primitives::crh::{CRHScheme, TwoToOneCRHScheme}; +use ark_crypto_primitives::merkle_tree::{Config, LeafParam, TwoToOneParam}; +use ark_ff::PrimeField; +use ark_std::log2; +use ark_std::rand::RngCore; +use ark_std::vec::Vec; +#[cfg(all(not(feature = "std"), target_arch = "aarch64"))] +use num_traits::Float; + +impl PCUniversalParams for BrakedownPCParams +where + F: PrimeField, + C: Config, + H: CRHScheme, +{ + fn max_degree(&self) -> usize { + usize::MAX + } +} + +impl PCCommitterKey for BrakedownPCParams +where + F: PrimeField, + C: Config, + H: CRHScheme, +{ + fn max_degree(&self) -> usize { + usize::MAX + } + + fn supported_degree(&self) -> usize { + as PCCommitterKey>::max_degree(self) + } +} + +impl PCVerifierKey for BrakedownPCParams +where + F: PrimeField, + C: Config, + H: CRHScheme, +{ + fn max_degree(&self) -> usize { + usize::MAX + } + + fn supported_degree(&self) -> usize { + as PCVerifierKey>::max_degree(self) + } +} + +impl LinCodeParametersInfo for BrakedownPCParams +where + F: PrimeField, + C: Config, + H: CRHScheme, +{ + fn check_well_formedness(&self) -> bool { + self.check_well_formedness + } + + fn distance(&self) -> (usize, usize) { + (self.rho_inv.1 * self.beta.0, self.rho_inv.0 * self.beta.1) + } + + fn sec_param(&self) -> usize { + self.sec_param + } + + fn compute_dimensions(&self, _n: usize) -> (usize, usize) { + (self.n, self.m) + } + + fn leaf_hash_param(&self) -> &<::LeafHash as CRHScheme>::Parameters { + &self.leaf_hash_param + } + + fn two_to_one_hash_param( + &self, + ) -> &<::TwoToOneHash as TwoToOneCRHScheme>::Parameters { + &self.two_to_one_hash_param + } + + fn col_hash_params(&self) -> &::Parameters { + &self.col_hash_params + } +} + +impl BrakedownPCParams +where + F: PrimeField, + C: Config, + H: CRHScheme, +{ + /// Create a default UniversalParams, with the values from Fig. 2 from the paper. + pub fn default( + rng: &mut R, + poly_len: usize, + check_well_formedness: bool, + leaf_hash_param: LeafParam, + two_to_one_hash_param: TwoToOneParam, + col_hash_params: H::Parameters, + ) -> Self { + let sec_param = 128; + let a = (178, 1000); + let b = (61, 1000); + let r = (1521, 1000); + let base_len = 30; + let t = calculate_t::(sec_param, (b.0 * r.1, b.1 * r.0), poly_len).unwrap(); // we want to get a rough idea what t is + let n = 1 << log2((ceil_div(2 * poly_len, t) as f64).sqrt().ceil() as usize); + let m = ceil_div(poly_len, n); + let c = Self::cn_const(a, b); + let d = Self::dn_const(a, b, r); + let ct = Constants { a, b, r, c, d }; + let (a_dims, b_dims) = Self::mat_size(m, base_len, &ct); + let a_mats = Self::make_all(rng, &a_dims); + let b_mats = Self::make_all(rng, &b_dims); + + Self::new( + sec_param, + a, + b, + r, + base_len, + n, + m, + a_dims, + b_dims, + a_mats, + b_mats, + check_well_formedness, + leaf_hash_param, + two_to_one_hash_param, + col_hash_params, + ) + } + + /// This function creates a UniversalParams. It does not check if the paramters are consistent/correct. + pub fn new( + sec_param: usize, + a: (usize, usize), + b: (usize, usize), + r: (usize, usize), + base_len: usize, + n: usize, + m: usize, + a_dims: Vec<(usize, usize, usize)>, + b_dims: Vec<(usize, usize, usize)>, + a_mats: Vec>, + b_mats: Vec>, + check_well_formedness: bool, + leaf_hash_param: LeafParam, + two_to_one_hash_param: TwoToOneParam, + col_hash_params: H::Parameters, + ) -> Self { + let m_ext = if a_dims.is_empty() { + ceil_mul(m, r) + } else { + Self::codeword_len(&a_dims, &b_dims) + }; + let start = a_dims + .iter() + .scan(0, |acc, &(row, _, _)| { + *acc += row; + Some(*acc) + }) + .collect::>(); + let end = b_dims + .iter() + .scan(m_ext, |acc, &(_, col, _)| { + *acc -= col; + Some(*acc) + }) + .collect::>(); + + Self { + sec_param, + alpha: a, + beta: b, + rho_inv: r, + base_len, + n, + m, + m_ext, + a_dims, + b_dims, + start, + end, + a_mats, + b_mats, + check_well_formedness, + leaf_hash_param, + two_to_one_hash_param, + col_hash_params, + } + } + /// mu = rho_inv - 1 - rho_inv * alpha + fn mu(a: (usize, usize), r: (usize, usize)) -> f64 { + let nom = r.0 * (a.1 - a.0) - r.1 * a.1; + let den = r.1 * a.1; + nom as f64 / den as f64 + } + /// nu = beta + alpha * beta + 0.03 + fn nu(a: (usize, usize), b: (usize, usize)) -> f64 { + let c = (3usize, 100usize); + let nom = b.0 * (a.1 + a.0) * c.1 + c.0 * b.1 * a.1; + let den = b.1 * a.1 * c.1; + nom as f64 / den as f64 + } + /// cn_const + fn cn_const(a: (usize, usize), b: (usize, usize)) -> (f64, f64) { + let a = div(a); + let b = div(b); + let arg = 1.28 * b / a; + let nom = ent(b) + a * ent(arg); + let den = -b * arg.log2(); + (nom, den) + } + /// cn + fn cn(n: usize, ct: &Constants) -> usize { + use ark_std::cmp::{max, min}; + let b = ct.b; + let c = ct.c; + min( + max(ceil_mul(n, (32 * b.0, 25 * b.1)), 4 + ceil_mul(n, b)), + ((110f64 / (n as f64) + c.0) / c.1).ceil() as usize, + ) + } + /// dn_const + fn dn_const(a: (usize, usize), b: (usize, usize), r: (usize, usize)) -> (f64, f64) { + let m = Self::mu(a, r); + let n = Self::nu(a, b); + let a = div(a); + let b = div(b); + let r = div(r); + let nm = n / m; + let nom = r * a * ent(b / r) + m * ent(nm); + let den = -a * b * nm.log2(); + (nom, den) + } + /// dn + fn dn(n: usize, ct: &Constants) -> usize { + use ark_std::cmp::min; + let b = ct.b; + let r = ct.r; + let d = ct.d; + min( + ceil_mul(n, (2 * b.0, b.1)) + + ((ceil_mul(n, r) - n + 110) as f64 / F::MODULUS_BIT_SIZE as f64).ceil() as usize, // 2 * beta * n + n * (r - 1 + 110/n) + ((110f64 / (n as f64) + d.0) / d.1).ceil() as usize, + ) + } + fn mat_size( + mut n: usize, + base_len: usize, + ct: &Constants, + ) -> (Vec<(usize, usize, usize)>, Vec<(usize, usize, usize)>) { + let mut a_dims: Vec<(usize, usize, usize)> = Vec::default(); + let a = ct.a; + let r = ct.r; + + while n >= base_len { + let m = ceil_mul(n, a); + let cn = Self::cn(n, ct); + let cn = if cn < m { cn } else { m }; // can't generate more nonzero entries than there are columns + a_dims.push((n, m, cn)); + n = m; + } + + let b_dims = a_dims + .iter() + .map(|&(an, am, _)| { + let n = ceil_mul(am, r); + let m = ceil_mul(an, r) - an - n; + let dn = Self::dn(n, ct); + let dn = if dn < m { dn } else { m }; // can't generate more nonzero entries than there are columns + (n, m, dn) + }) + .collect::>(); + (a_dims, b_dims) + } + + /// This function computes the codeword length + /// Notice that it assumes the input is bigger than base_len (i.e., a_dim is not empty) + pub(crate) fn codeword_len( + a_dims: &[(usize, usize, usize)], + b_dims: &[(usize, usize, usize)], + ) -> usize { + b_dims.iter().map(|(_, col, _)| col).sum::() + // Output v of the recursive encoding + a_dims.iter().map(|(row, _, _)| row).sum::() + // Input x to the recursive encoding + b_dims.last().unwrap().0 // Output z of the last step of recursion + } + + /// Create a matrix with `n` rows and `m` columns and `d` non-zero entries in each row. + /// This function creates a list for entries of each columns and calls the constructor + /// from `SprsMat`. It leverages Fisher–Yates shuffle for choosing `d` indices in each + /// row. + fn make_mat(n: usize, m: usize, d: usize, rng: &mut R) -> SprsMat { + let mut tmp: Vec = (0..m).collect(); + let mut mat: Vec> = vec![vec![]; m]; + for i in 0..n { + // Fisher–Yates shuffle algorithm + let idxs = { + (0..d) + .map(|j| { + let r = rng.next_u64() as usize % (m - j); + tmp.swap(r, m - 1 - j); + tmp[m - 1 - j] + }) + .collect::>() + }; + // Sampling values for each non-zero entry + for j in idxs { + mat[j].push(( + i, + loop { + let r = F::rand(rng); + if r != F::zero() { + break r; + } + }, + )) + } + } + SprsMat::::new_from_columns(n, m, d, &mat) + } + + fn make_all(rng: &mut R, dims: &[(usize, usize, usize)]) -> Vec> { + dims.iter() + .map(|(n, m, d)| Self::make_mat(*n, *m, *d, rng)) + .collect::>() + } +} + +#[inline] +fn div(a: (usize, usize)) -> f64 { + a.0 as f64 / a.1 as f64 +} + +struct Constants { + a: (usize, usize), + b: (usize, usize), + r: (usize, usize), + c: (f64, f64), + d: (f64, f64), +} diff --git a/poly-commit/src/linear_codes/data_structures.rs b/poly-commit/src/linear_codes/data_structures.rs index 60960ae8..609a5b34 100644 --- a/poly-commit/src/linear_codes/data_structures.rs +++ b/poly-commit/src/linear_codes/data_structures.rs @@ -1,4 +1,4 @@ -use crate::{utils::Matrix, PCCommitment, PCCommitmentState}; +use crate::{linear_codes::utils::SprsMat, utils::Matrix, PCCommitment, PCCommitmentState}; use ark_crypto_primitives::{ crh::CRHScheme, merkle_tree::{Config, LeafParam, Path, TwoToOneParam}, @@ -9,6 +9,57 @@ use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_std::vec::Vec; use ark_std::{marker::PhantomData, rand::RngCore}; +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative(Clone(bound = ""), Debug(bound = ""))] +/// The public parameters for Brakedown PCS. +pub struct BrakedownPCParams { + /// The security parameter + pub(crate) sec_param: usize, + /// alpha in the paper + pub(crate) alpha: (usize, usize), + /// beta in the paper + pub(crate) beta: (usize, usize), + /// The inverse of the code rate. + pub(crate) rho_inv: (usize, usize), + /// Threshold of the base case to encode with RS + pub(crate) base_len: usize, + /// Length of each column in the matrix that represents the polynomials + pub(crate) n: usize, + /// Length of each row in the matrix that represents the polynomials + pub(crate) m: usize, + /// Length of each row in the matrix that represents the polynomials, **after encoding** + pub(crate) m_ext: usize, + /// Constarints on A matrices. `a_dims[i]` is `(n, m, c)`, where `n` is + /// the number of rows, `m` is the number of columns, `c` is the number of + /// non-zero elements in each row, for the matrix A in the `i`th step of + /// the encoding. + pub(crate) a_dims: Vec<(usize, usize, usize)>, + /// Same as `a_dims`, but for B matrices. + pub(crate) b_dims: Vec<(usize, usize, usize)>, + /// By having `a_dims` and `b_dims`, we compute a vector of indices that + /// specfies where is the beginning of the sub-chunk that we need to + /// encode during the recursive encoding. Notice that we do not recurse + /// in this implementation, instead we do it iteratively. + pub(crate) start: Vec, + /// Same as `start`, but stores the end index of those chunks. + pub(crate) end: Vec, + /// A vector of all A matrices we need for encoding. + pub(crate) a_mats: Vec>, + /// A vector of all B matrices we need for encoding. + pub(crate) b_mats: Vec>, + /// This is a flag which determines if the random linear combination is done. + pub(crate) check_well_formedness: bool, + /// Parameters for hash function of Merkle tree leaves + #[derivative(Debug = "ignore")] + pub(crate) leaf_hash_param: LeafParam, + /// Parameters for hash function of Merke tree combining two nodes into one + #[derivative(Debug = "ignore")] + pub(crate) two_to_one_hash_param: TwoToOneParam, + // Parameters for obtaining leaf digest from leaf value. + #[derivative(Debug = "ignore")] + pub(crate) col_hash_params: H::Parameters, +} + #[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] #[derivative(Clone(bound = ""), Debug(bound = ""))] /// The public parameters for Ligero PCS. diff --git a/poly-commit/src/linear_codes/ligero.rs b/poly-commit/src/linear_codes/ligero.rs index 41dddf15..0900b370 100644 --- a/poly-commit/src/linear_codes/ligero.rs +++ b/poly-commit/src/linear_codes/ligero.rs @@ -9,7 +9,7 @@ use ark_crypto_primitives::{ }; use ark_ff::PrimeField; use ark_std::{log2, marker::PhantomData}; -#[cfg(not(feature = "std"))] +#[cfg(all(not(feature = "std"), target_arch = "aarch64"))] use num_traits::Float; impl LigeroPCParams diff --git a/poly-commit/src/linear_codes/mod.rs b/poly-commit/src/linear_codes/mod.rs index ce3fa394..b8478f5f 100644 --- a/poly-commit/src/linear_codes/mod.rs +++ b/poly-commit/src/linear_codes/mod.rs @@ -1,4 +1,8 @@ use crate::{ + linear_codes::{ + data_structures::*, + utils::{calculate_t, get_indices_from_sponge}, + }, to_bytes, utils::{inner_product, Matrix}, Error, LabeledCommitment, LabeledPolynomial, PCCommitterKey, PCUniversalParams, PCVerifierKey, @@ -18,22 +22,23 @@ use ark_std::{string::ToString, vec::Vec}; #[cfg(feature = "parallel")] use rayon::iter::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator}; +mod data_structures; mod utils; +mod brakedown; + +mod ligero; + +mod multilinear_brakedown; + mod multilinear_ligero; mod univariate_ligero; +pub use data_structures::{BrakedownPCParams, LigeroPCParams, LinCodePCProof}; +pub use multilinear_brakedown::MultilinearBrakedown; pub use multilinear_ligero::MultilinearLigero; pub use univariate_ligero::UnivariateLigero; -mod data_structures; -mod ligero; -use data_structures::*; - -pub use data_structures::{LigeroPCParams, LinCodePCProof}; - -use utils::{calculate_t, get_indices_from_sponge}; - const FIELD_SIZE_ERROR: &str = "This field is not suitable for the proposed parameters"; /// For linear code PC schemes, the universal paramters, committer key @@ -97,7 +102,7 @@ where /// Encode a message, which is interpreted as a vector of coefficients /// of a polynomial of degree m - 1. - fn encode(msg: &[F], param: &Self::LinCodePCParams) -> Vec; + fn encode(msg: &[F], param: &Self::LinCodePCParams) -> Result, Error>; /// Represent the polynomial as either coefficients, /// in the univariate case, or evaluations over @@ -123,8 +128,11 @@ where // 2. Apply encoding row-wise let rows = mat.rows(); - let ext_mat = - Matrix::new_from_rows(cfg_iter!(rows).map(|r| Self::encode(r, param)).collect()); + let ext_mat = Matrix::new_from_rows( + cfg_iter!(rows) + .map(|r| Self::encode(r, param).unwrap()) + .collect(), + ); (mat, ext_mat) } @@ -179,7 +187,7 @@ where /// This is only a default setup with reasonable parameters. /// To create your own public parameters (from which vk/ck can be derived by `trim`), - /// see the documentation for `LigeroPCUniversalParams`. + /// see the documentation for `BrakedownPCUniversalParams` or `LigeroPCUniversalParams`. fn setup( max_degree: usize, num_vars: Option, @@ -408,6 +416,7 @@ where }; // 1. Seed the transcript with the point and the recieved vector + // TODO Consider removing the evaluation point from the transcript. let point_vec = L::point_to_vec(point.clone()); sponge.absorb(&point_vec); sponge.absorb(&proof.opening.v); @@ -416,14 +425,17 @@ where let indices = get_indices_from_sponge(n_ext_cols, t, sponge)?; // 3. Hash the received columns into leaf hashes. - let mut col_hashes: Vec = Vec::new(); - - for c in proof.opening.columns.iter() { - match H::evaluate(vk.col_hash_params(), c.clone()) { - Ok(a) => col_hashes.push(a.into()), - Err(_) => return Err(Error::HashingError), - } - } + let col_hashes: Vec = proof + .opening + .columns + .iter() + .map(|c| { + H::evaluate(vk.col_hash_params(), c.clone()) + .map_err(|_| Error::HashingError) + .unwrap() + .into() + }) + .collect(); // 4. Verify the paths for each of the leaf hashes - this is only run once, // even if we have a well-formedness check (i.e., we save sending and checking the columns). @@ -434,16 +446,21 @@ where return Err(Error::InvalidCommitment); } - if !path - .verify(leaf_hash_param, two_to_one_hash_param, root, leaf.clone()) - .map_err(|_| Error::InvalidCommitment)? - { - return Ok(false); - } + path.verify(leaf_hash_param, two_to_one_hash_param, root, leaf.clone()) + .map_err(|_| Error::InvalidCommitment)?; } + // Helper closure: checks if a.b = c. + let check_inner_product = |a, b, c| -> Result<(), Error> { + if inner_product(a, b) != c { + return Err(Error::InvalidCommitment); + } + + Ok(()) + }; + // 5. Compute the encoding w = E(v). - let w = L::encode(&proof.opening.v, vk); + let w = L::encode(&proof.opening.v, vk)?; // 6. Compute `a`, `b` to right- and left- multiply with the matrix `M`. let (a, b) = L::tensor(point, n_cols, n_rows); @@ -452,23 +469,26 @@ where // matches with what the verifier computed for himself. // Note: we sacrifice some code repetition in order not to repeat execution. if let (Some(well_formedness), Some(r)) = out { - let w_well_formedness = L::encode(well_formedness, vk); + let w_well_formedness = L::encode(well_formedness, vk)?; for (transcript_index, matrix_index) in indices.iter().enumerate() { - if inner_product(&r, &proof.opening.columns[transcript_index]) - != w_well_formedness[*matrix_index] - || inner_product(&b, &proof.opening.columns[transcript_index]) - != w[*matrix_index] - { - return Err(Error::InvalidCommitment); - } + check_inner_product( + &r, + &proof.opening.columns[transcript_index], + w_well_formedness[*matrix_index], + )?; + check_inner_product( + &b, + &proof.opening.columns[transcript_index], + w[*matrix_index], + )?; } } else { for (transcript_index, matrix_index) in indices.iter().enumerate() { - if inner_product(&b, &proof.opening.columns[transcript_index]) - != w[*matrix_index] - { - return Err(Error::InvalidCommitment); - } + check_inner_product( + &b, + &proof.opening.columns[transcript_index], + w[*matrix_index], + )?; } } diff --git a/poly-commit/src/linear_codes/multilinear_brakedown/mod.rs b/poly-commit/src/linear_codes/multilinear_brakedown/mod.rs new file mode 100644 index 00000000..921d932d --- /dev/null +++ b/poly-commit/src/linear_codes/multilinear_brakedown/mod.rs @@ -0,0 +1,122 @@ +use crate::Error; + +use super::utils::tensor_vec; +use super::{BrakedownPCParams, LinearEncode}; +use ark_crypto_primitives::{ + crh::{CRHScheme, TwoToOneCRHScheme}, + merkle_tree::Config, +}; +use ark_ff::{Field, PrimeField}; +use ark_poly::{MultilinearExtension, Polynomial}; +#[cfg(not(feature = "std"))] +use ark_std::vec::Vec; +use ark_std::{log2, marker::PhantomData, rand::RngCore}; + +mod tests; + +/// The multilinear Brakedown polynomial commitment scheme based on [[Brakedown]][bd]. +/// The scheme defaults to the naive batching strategy. +/// +/// Note: The scheme currently does not support hiding. +/// +/// [bd]: https://eprint.iacr.org/2021/1043.pdf +pub struct MultilinearBrakedown, H: CRHScheme> +{ + _phantom: PhantomData<(F, C, P, H)>, +} + +impl LinearEncode for MultilinearBrakedown +where + F: PrimeField, + C: Config, + P: MultilinearExtension, +

>::Point: Into>, + H: CRHScheme, +{ + type LinCodePCParams = BrakedownPCParams; + + fn setup( + _max_degree: usize, + num_vars: Option, + rng: &mut R, + leaf_hash_param: <::LeafHash as CRHScheme>::Parameters, + two_to_one_hash_param: <::TwoToOneHash as TwoToOneCRHScheme>::Parameters, + col_hash_params: H::Parameters, + ) -> Self::LinCodePCParams { + Self::LinCodePCParams::default( + rng, + 1 << num_vars.unwrap(), + true, + leaf_hash_param, + two_to_one_hash_param, + col_hash_params, + ) + } + + fn encode(msg: &[F], pp: &Self::LinCodePCParams) -> Result, Error> { + if msg.len() != pp.m { + return Err(Error::EncodingError); + } + let cw_len = pp.m_ext; + let mut cw = Vec::with_capacity(cw_len); + cw.extend_from_slice(msg); + + // Multiply by matrices A + for (i, &s) in pp.start.iter().enumerate() { + let mut src = pp.a_mats[i].row_mul(&cw[s - pp.a_dims[i].0..s]); + cw.append(&mut src); + } + + // later we don't necessarily mutate in order, so we need the full vec now. + cw.resize(cw_len, F::zero()); + // RS encode the last one + let rss = *pp.start.last().unwrap_or(&0); + let rsie = rss + pp.a_dims.last().unwrap_or(&(0, pp.m, 0)).1; + let rsoe = *pp.end.last().unwrap_or(&cw_len); + naive_reed_solomon(&mut cw, rss, rsie, rsoe); + + // Come back + for (i, (&s, &e)) in pp.start.iter().zip(&pp.end).enumerate() { + let src = &pp.b_mats[i].row_mul(&cw[s..e]); + cw[e..e + pp.b_dims[i].1].copy_from_slice(src); + } + Ok(cw.to_vec()) + } + + fn poly_to_vec(polynomial: &P) -> Vec { + polynomial.to_evaluations() + } + + fn point_to_vec(point:

>::Point) -> Vec { + point + } + + /// For a multilinear polynomial in n+m variables it returns a tuple for k={n,m}: + /// ((1-z_1)*(1-z_2)*...*(1_z_k), z_1*(1-z_2)*...*(1-z_k), ..., z_1*z_2*...*z_k) + fn tensor( + point: &

>::Point, + left_len: usize, + _right_len: usize, + ) -> (Vec, Vec) { + let point: Vec = Self::point_to_vec(point.clone()); + + let split = log2(left_len) as usize; + let left = &point[..split]; + let right = &point[split..]; + (tensor_vec(left), tensor_vec(right)) + } +} + +// This RS encoding is on points 1, ..., oe - s without relying on FFTs +fn naive_reed_solomon(cw: &mut [F], s: usize, ie: usize, oe: usize) { + let mut res = vec![F::zero(); oe - s]; + let mut x = F::one(); + for r in res.iter_mut() { + for j in (s..ie).rev() { + *r *= x; + *r += cw[j]; + } + x += F::one(); + } + cw[s..oe].copy_from_slice(&res); +} diff --git a/poly-commit/src/linear_codes/multilinear_brakedown/tests.rs b/poly-commit/src/linear_codes/multilinear_brakedown/tests.rs new file mode 100644 index 00000000..be8edecf --- /dev/null +++ b/poly-commit/src/linear_codes/multilinear_brakedown/tests.rs @@ -0,0 +1,263 @@ +#[cfg(test)] +mod tests { + + use crate::linear_codes::LinearCodePCS; + use crate::utils::test_sponge; + use crate::{ + linear_codes::{utils::*, BrakedownPCParams, MultilinearBrakedown, PolynomialCommitment}, + LabeledPolynomial, + }; + use ark_bls12_377::Fr; + use ark_bls12_381::Fr as Fr381; + use ark_crypto_primitives::{ + crh::{sha256::Sha256, CRHScheme, TwoToOneCRHScheme}, + merkle_tree::{ByteDigestConverter, Config}, + }; + use ark_ff::{Field, PrimeField}; + use ark_poly::evaluations::multivariate::{MultilinearExtension, SparseMultilinearExtension}; + use ark_std::test_rng; + use blake2::Blake2s256; + use rand_chacha::{rand_core::SeedableRng, ChaCha20Rng}; + + type LeafH = LeafIdentityHasher; + type CompressH = Sha256; + type ColHasher = FieldToBytesColHasher; + + struct MerkleTreeParams; + + impl Config for MerkleTreeParams { + type Leaf = Vec; + + type LeafDigest = ::Output; + type LeafInnerDigestConverter = ByteDigestConverter; + type InnerDigest = ::Output; + + type LeafHash = LeafH; + type TwoToOneHash = CompressH; + } + + type MTConfig = MerkleTreeParams; + + type BrakedownPCS = LinearCodePCS< + MultilinearBrakedown, ColHasher>, + F, + SparseMultilinearExtension, + MTConfig, + ColHasher, + >; + + fn rand_poly( + _: usize, + num_vars: Option, + rng: &mut ChaCha20Rng, + ) -> SparseMultilinearExtension { + match num_vars { + Some(n) => SparseMultilinearExtension::rand(n, rng), + None => unimplemented!(), // should not happen in ML case! + } + } + + fn constant_poly( + _: usize, + num_vars: Option, + rng: &mut ChaCha20Rng, + ) -> SparseMultilinearExtension { + match num_vars { + Some(n) => { + let points = vec![(1, Fr::rand(rng))]; + SparseMultilinearExtension::from_evaluations(n, &points) + } + None => unimplemented!(), // should not happen in ML case! + } + } + + #[test] + fn test_construction() { + let mut rng = &mut test_rng(); + let num_vars = 11; + // just to make sure we have the right degree given the FFT domain for our field + let leaf_hash_param = ::setup(&mut rng).unwrap(); + let two_to_one_hash_param = ::setup(&mut rng) + .unwrap() + .clone(); + let col_hash_params = as CRHScheme>::setup(&mut rng).unwrap(); + let check_well_formedness = true; + + let pp: BrakedownPCParams> = + BrakedownPCParams::default( + rng, + 1 << num_vars, + check_well_formedness, + leaf_hash_param, + two_to_one_hash_param, + col_hash_params, + ); + + let (ck, vk) = BrakedownPCS::::trim(&pp, 0, 0, None).unwrap(); + + let rand_chacha = &mut ChaCha20Rng::from_rng(test_rng()).unwrap(); + let labeled_poly = LabeledPolynomial::new( + "test".to_string(), + rand_poly(1, Some(num_vars), rand_chacha), + Some(num_vars), + Some(num_vars), + ); + + let mut test_sponge = test_sponge::(); + let (c, states) = BrakedownPCS::::commit(&ck, &[labeled_poly.clone()], None).unwrap(); + + let point = rand_point(Some(num_vars), rand_chacha); + + let value = labeled_poly.evaluate(&point); + + let proof = BrakedownPCS::::open( + &ck, + &[labeled_poly], + &c, + &point, + &mut (test_sponge.clone()), + &states, + None, + ) + .unwrap(); + assert!(BrakedownPCS::::check( + &vk, + &c, + &point, + [value], + &proof, + &mut test_sponge, + None + ) + .unwrap()); + } + + fn rand_point(num_vars: Option, rng: &mut ChaCha20Rng) -> Vec { + match num_vars { + Some(n) => (0..n).map(|_| F::rand(rng)).collect(), + None => unimplemented!(), // should not happen! + } + } + + #[test] + fn single_poly_test() { + use crate::tests::*; + single_poly_test::<_, _, BrakedownPCS, _>( + Some(5), + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-377"); + single_poly_test::<_, _, BrakedownPCS, _>( + Some(10), + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-381"); + } + + #[test] + fn constant_poly_test() { + use crate::tests::*; + single_poly_test::<_, _, BrakedownPCS, _>( + Some(10), + constant_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-377"); + single_poly_test::<_, _, BrakedownPCS, _>( + Some(5), + constant_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-381"); + } + + #[test] + fn full_end_to_end_test() { + use crate::tests::*; + full_end_to_end_test::<_, _, BrakedownPCS, _>( + Some(8), + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + full_end_to_end_test::<_, _, BrakedownPCS, _>( + Some(9), + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-381"); + println!("Finished bls12-381"); + } + + #[test] + fn single_equation_test() { + use crate::tests::*; + single_equation_test::<_, _, BrakedownPCS, _>( + Some(10), + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + single_equation_test::<_, _, BrakedownPCS, _>( + Some(5), + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-381"); + println!("Finished bls12-381"); + } + + #[test] + fn two_equation_test() { + use crate::tests::*; + two_equation_test::<_, _, BrakedownPCS, _>( + Some(5), + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + two_equation_test::<_, _, BrakedownPCS, _>( + Some(10), + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-381"); + println!("Finished bls12-381"); + } + + #[test] + fn full_end_to_end_equation_test() { + use crate::tests::*; + full_end_to_end_equation_test::<_, _, BrakedownPCS, _>( + Some(5), + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + full_end_to_end_equation_test::<_, _, BrakedownPCS, _>( + Some(8), + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-381"); + println!("Finished bls12-381"); + } +} diff --git a/poly-commit/src/linear_codes/multilinear_ligero/mod.rs b/poly-commit/src/linear_codes/multilinear_ligero/mod.rs index 4d8c8b86..b119937a 100644 --- a/poly-commit/src/linear_codes/multilinear_ligero/mod.rs +++ b/poly-commit/src/linear_codes/multilinear_ligero/mod.rs @@ -1,6 +1,9 @@ -use super::{ - utils::{reed_solomon, tensor_vec}, - LigeroPCParams, LinearEncode, +use crate::{ + linear_codes::{ + utils::{reed_solomon, tensor_vec}, + LigeroPCParams, LinearEncode, + }, + Error, }; use ark_crypto_primitives::{ crh::{CRHScheme, TwoToOneCRHScheme}, @@ -52,8 +55,8 @@ where ) } - fn encode(msg: &[F], param: &Self::LinCodePCParams) -> Vec { - reed_solomon(msg, param.rho_inv) + fn encode(msg: &[F], param: &Self::LinCodePCParams) -> Result, Error> { + Ok(reed_solomon(msg, param.rho_inv)) } fn poly_to_vec(polynomial: &P) -> Vec { diff --git a/poly-commit/src/linear_codes/univariate_ligero/mod.rs b/poly-commit/src/linear_codes/univariate_ligero/mod.rs index 6ea7b133..86daf462 100644 --- a/poly-commit/src/linear_codes/univariate_ligero/mod.rs +++ b/poly-commit/src/linear_codes/univariate_ligero/mod.rs @@ -1,4 +1,7 @@ -use super::{utils::reed_solomon, LigeroPCParams, LinearEncode}; +use crate::{ + linear_codes::{utils::reed_solomon, LigeroPCParams, LinearEncode}, + Error, +}; use ark_crypto_primitives::{ crh::{CRHScheme, TwoToOneCRHScheme}, merkle_tree::Config, @@ -49,8 +52,8 @@ where ) } - fn encode(msg: &[F], param: &Self::LinCodePCParams) -> Vec { - reed_solomon(msg, param.rho_inv) + fn encode(msg: &[F], param: &Self::LinCodePCParams) -> Result, Error> { + Ok(reed_solomon(msg, param.rho_inv)) } /// For a univariate polynomial, we simply return the list of coefficients. diff --git a/poly-commit/src/linear_codes/utils.rs b/poly-commit/src/linear_codes/utils.rs index dc4fe91a..284a91f4 100644 --- a/poly-commit/src/linear_codes/utils.rs +++ b/poly-commit/src/linear_codes/utils.rs @@ -1,12 +1,111 @@ use crate::{utils::ceil_div, Error}; use ark_crypto_primitives::sponge::CryptographicSponge; -use ark_ff::{FftField, PrimeField}; +use ark_ff::{FftField, Field, PrimeField}; use ark_poly::{EvaluationDomain, GeneralEvaluationDomain}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; #[cfg(not(feature = "std"))] use ark_std::{string::ToString, vec::Vec}; -#[cfg(not(feature = "std"))] + +#[cfg(all(not(feature = "std"), target_arch = "aarch64"))] use num_traits::Float; +#[cfg(test)] +use { + crate::to_bytes, + ark_crypto_primitives::crh::CRHScheme, + ark_std::{borrow::Borrow, marker::PhantomData, rand::RngCore}, + digest::Digest, +}; + +/// This is CSC format +/// https://en.wikipedia.org/wiki/Sparse_matrix#Compressed_sparse_column_(CSC_or_CCS) +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative(Clone(bound = ""), Debug(bound = ""))] +pub struct SprsMat { + /// Number of rows. + pub(crate) n: usize, + /// Number of columns. + pub(crate) m: usize, + /// Number of non-zero entries in each row. + pub(crate) d: usize, + /// Numbers of non-zero elements in each columns. + ind_ptr: Vec, + /// The indices in each columns where exists a non-zero element. + col_ind: Vec, + // The values of non-zero entries. + val: Vec, +} + +impl SprsMat { + /// Calulates v.M + pub(crate) fn row_mul(&self, v: &[F]) -> Vec { + (0..self.m) + .map(|j| { + let ij = self.ind_ptr[j]..self.ind_ptr[j + 1]; + self.col_ind[ij.clone()] + .iter() + .zip(&self.val[ij]) + .map(|(&idx, x)| v[idx] * x) + .sum::() + }) + .collect::>() + } + /// Create a new `SprsMat` from list of elements that represents the + /// matrix in column major order. `n` is the number of rows, `m` is + /// the number of columns, and `d` is NNZ in each row. + pub fn new_from_flat(n: usize, m: usize, d: usize, list: &[F]) -> Self { + let nnz = d * n; + let mut ind_ptr = vec![0; m + 1]; + let mut col_ind = Vec::::with_capacity(nnz); + let mut val = Vec::::with_capacity(nnz); + assert!(list.len() == m * n, "The dimension is incorrect."); + for i in 0..m { + for (c, &v) in list[i * n..(i + 1) * n].iter().enumerate() { + if v != F::zero() { + ind_ptr[i + 1] += 1; + col_ind.push(c); + val.push(v); + } + } + ind_ptr[i + 1] += ind_ptr[i]; + } + assert!(ind_ptr[m] <= nnz, "The dimension or NNZ is incorrect."); + Self { + n, + m, + d, + ind_ptr, + col_ind, + val, + } + } + pub fn new_from_columns(n: usize, m: usize, d: usize, list: &[Vec<(usize, F)>]) -> Self { + let nnz = d * n; + let mut ind_ptr = vec![0; m + 1]; + let mut col_ind = Vec::::with_capacity(nnz); + let mut val = Vec::::with_capacity(nnz); + assert!(list.len() == m, "The dimension is incorrect."); + for j in 0..m { + for (i, v) in list[j].iter() { + ind_ptr[j + 1] += 1; + col_ind.push(*i); + val.push(*v); + } + assert!(list[j].len() <= n, "The dimension is incorrect."); + ind_ptr[j + 1] += ind_ptr[j]; + } + assert!(ind_ptr[m] <= nnz, "The dimension or NNZ is incorrect."); + Self { + n, + m, + d, + ind_ptr, + col_ind, + val, + } + } +} + /// Apply reed-solomon encoding to msg. /// Assumes msg.len() is equal to the order of some FFT domain in F. /// Returns a vector of length equal to the smallest FFT domain of size at least msg.len() * RHO_INV. @@ -84,6 +183,60 @@ pub(crate) fn calculate_t( Ok(if t < codeword_len { t } else { codeword_len }) } +#[cfg(test)] +pub(crate) struct LeafIdentityHasher; + +#[cfg(test)] +impl CRHScheme for LeafIdentityHasher { + type Input = Vec; + type Output = Vec; + type Parameters = (); + + fn setup(_: &mut R) -> Result { + Ok(()) + } + + fn evaluate>( + _: &Self::Parameters, + input: T, + ) -> Result { + Ok(input.borrow().to_vec().into()) + } +} + +#[cfg(test)] +pub(crate) struct FieldToBytesColHasher +where + F: PrimeField + CanonicalSerialize, + D: Digest, +{ + _phantom: PhantomData<(F, D)>, +} + +#[cfg(test)] +impl CRHScheme for FieldToBytesColHasher +where + F: PrimeField + CanonicalSerialize, + D: Digest, +{ + type Input = Vec; + type Output = Vec; + type Parameters = (); + + fn setup(_rng: &mut R) -> Result { + Ok(()) + } + + fn evaluate>( + _parameters: &Self::Parameters, + input: T, + ) -> Result { + let mut dig = D::new(); + dig.update(to_bytes!(input.borrow()).unwrap()); + Ok(dig.finalize().to_vec()) + } +} + pub(crate) fn tensor_vec(values: &[F]) -> Vec { let one = F::one(); let anti_values: Vec = values.iter().map(|v| one - *v).collect(); @@ -106,18 +259,47 @@ pub(crate) fn tensor_vec(values: &[F]) -> Vec { #[cfg(test)] pub(crate) mod tests { - - use super::*; - + use crate::linear_codes::utils::{calculate_t, get_num_bytes, reed_solomon, SprsMat}; + use crate::utils::to_field; use ark_bls12_377::Fq; use ark_bls12_377::Fr; + use ark_ff::PrimeField; use ark_poly::{ domain::general::GeneralEvaluationDomain, univariate::DensePolynomial, DenseUVPolynomial, - Polynomial, + EvaluationDomain, Polynomial, }; use ark_std::test_rng; use rand_chacha::{rand_core::SeedableRng, ChaCha20Rng}; + #[test] + fn test_sprs_row_mul() { + // The columns major representation of a matrix. + let mat: Vec = to_field(vec![10, 23, 55, 100, 1, 58, 4, 0, 9]); + + let mat = SprsMat::new_from_flat(3, 3, 3, &mat); + let v: Vec = to_field(vec![12, 41, 55]); + // by giving the result in the integers and then converting to Fr + // we ensure the test will still pass even if Fr changes + assert_eq!(mat.row_mul(&v), to_field::(vec![4088, 4431, 543])); + } + + #[test] + fn test_sprs_row_mul_sparse_mat() { + // The columns major representation of a matrix. + let mat: Vec = to_field(vec![10, 23, 55, 100, 1, 58, 4, 0, 9]); + let mat = vec![ + vec![(0usize, mat[0]), (1usize, mat[1]), (2usize, mat[2])], + vec![(0usize, mat[3]), (1usize, mat[4]), (2usize, mat[5])], + vec![(0usize, mat[6]), (1usize, mat[7]), (2usize, mat[8])], + ]; + + let mat = SprsMat::new_from_columns(3, 3, 3, &mat); + let v: Vec = to_field(vec![12, 41, 55]); + // by giving the result in the integers and then converting to Fr + // we ensure the test will still pass even if Fr changes + assert_eq!(mat.row_mul(&v), to_field::(vec![4088, 4431, 543])); + } + #[test] fn test_reed_solomon() { let rho_inv = 3; diff --git a/poly-commit/src/utils.rs b/poly-commit/src/utils.rs index a542927e..a76cd005 100644 --- a/poly-commit/src/utils.rs +++ b/poly-commit/src/utils.rs @@ -1,7 +1,9 @@ use ark_ff::Field; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +#[cfg(all(not(feature = "std")))] use ark_std::vec::Vec; - +#[cfg(all(not(feature = "std"), target_arch = "aarch64"))] +use num_traits::Float; #[cfg(feature = "parallel")] use rayon::{ iter::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator}, @@ -20,6 +22,22 @@ macro_rules! to_bytes { }}; } +/// Entropy function +pub(crate) fn ent(x: f64) -> f64 { + assert!(0f64 <= x && x <= 1f64); + if x == 0f64 || x == 1f64 { + 0f64 + } else { + -x * x.log2() - (1.0 - x) * (1.0 - x).log2() + } +} + +/// ceil of a * b, where a is integer and b is a rational number +#[inline] +pub(crate) fn ceil_mul(a: usize, b: (usize, usize)) -> usize { + (a * b.0 + b.1 - 1) / b.1 +} + /// Return ceil(x / y). pub(crate) fn ceil_div(x: usize, y: usize) -> usize { // XXX. warning: this expression can overflow.