From b69e3a7a8c568e63ddc42ab87a69f51626350b13 Mon Sep 17 00:00:00 2001 From: Artem Storozhuk Date: Thu, 7 Mar 2024 18:03:20 +0200 Subject: [PATCH 1/2] templating: More accurate representing of CompressedCommitment --- src/provider/tests/ipa_pc.rs | 15 ++++++++------- src/provider/tests/mod.rs | 25 +++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 7 deletions(-) diff --git a/src/provider/tests/ipa_pc.rs b/src/provider/tests/ipa_pc.rs index bc61206dd..3c41dafb8 100644 --- a/src/provider/tests/ipa_pc.rs +++ b/src/provider/tests/ipa_pc.rs @@ -2,7 +2,8 @@ mod test { use crate::provider::ipa_pc::EvaluationEngine; use crate::provider::tests::solidity_compatibility_utils::{ - ec_points_to_json, field_elements_to_json, generate_pcs_solidity_unit_test_data, + compressed_commitment_to_json, ec_points_to_json, field_elements_to_json, + generate_pcs_solidity_unit_test_data, }; use crate::provider::GrumpkinEngine; @@ -33,11 +34,11 @@ Grumpkin.GrumpkinAffinePoint[] memory ck_s = new Grumpkin.GrumpkinAffinePoint[]( uint256[] memory point = new uint256[]({{ len point }}); {{ #each point }} point[{{ i }}]={{ val }};\n {{ /each }} -Grumpkin.GrumpkinAffinePoint[] memory L_vec = new Grumpkin.GrumpkinAffinePoint[]({{ len L_vec }}); -{{ #each L_vec }} L_vec[{{ i }}]=Grumpkin.GrumpkinAffinePoint({{ x }}, {{y}});\n {{ /each }} +uint256[] memory L_vec = new uint256[]({{ len L_vec }}); +{{ #each L_vec }} L_vec[{{ i }}]={{ compressed }};\n {{ /each }} -Grumpkin.GrumpkinAffinePoint[] memory R_vec = new Grumpkin.GrumpkinAffinePoint[]({{ len R_vec }}); -{{ #each R_vec }} R_vec[{{ i }}]=Grumpkin.GrumpkinAffinePoint({{ x }}, {{y}});\n {{ /each }} +uint256[] memory R_vec = new uint256[]({{ len R_vec }}); +{{ #each R_vec }} R_vec[{{ i }}]={{ compressed }};\n {{ /each }} uint256 a_hat = {{ a_hat }}; @@ -94,8 +95,8 @@ return keccak_transcript; let l_vec = CommitmentKey::::reinterpret_commitments_as_ck(&proof.L_vec) .expect("can't reinterpred L_vec"); - let r_vec_array = ec_points_to_json::(&r_vec.ck); - let l_vec_array = ec_points_to_json::(&l_vec.ck); + let r_vec_array = compressed_commitment_to_json::(&r_vec.ck); + let l_vec_array = compressed_commitment_to_json::(&l_vec.ck); let point_array = field_elements_to_json::(&point); let ckv_array = ec_points_to_json::(&vk.ck_v.ck); let cks_array = ec_points_to_json::(&vk.ck_s.ck); diff --git a/src/provider/tests/mod.rs b/src/provider/tests/mod.rs index 39fafa52a..6f2858161 100644 --- a/src/provider/tests/mod.rs +++ b/src/provider/tests/mod.rs @@ -9,6 +9,7 @@ pub mod solidity_compatibility_utils { }; use group::prime::PrimeCurve; use group::prime::PrimeCurveAffine; + use group::GroupEncoding; use rand::rngs::StdRng; use serde_json::{Map, Value}; use std::sync::Arc; @@ -121,4 +122,28 @@ pub mod solidity_compatibility_utils { }); value_vector } + + pub(crate) fn compressed_commitment_to_json( + ec_points: &[::Affine], + ) -> Vec + where + E: Engine, + E::GE: DlogGroup, + { + let mut value_vector = vec![]; + ec_points.iter().enumerate().for_each(|(i, ec_point)| { + let mut value = Map::new(); + let compressed_commitment_info = ec_point.to_curve().to_bytes(); + let mut data = compressed_commitment_info.as_ref().to_vec(); + data.reverse(); + + value.insert("i".to_string(), Value::String(i.to_string())); + value.insert( + "compressed".to_string(), + Value::String(format!("0x{}", hex::encode(data))), + ); + value_vector.push(Value::Object(value)); + }); + value_vector + } } From e67b91aa1b4734fdce2e192a2049961c2a63f7f5 Mon Sep 17 00:00:00 2001 From: Artem Storozhuk Date: Thu, 7 Mar 2024 18:04:51 +0200 Subject: [PATCH 2/2] chore: Fix CI failure on docs generating --- src/provider/hyperkzg.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/provider/hyperkzg.rs b/src/provider/hyperkzg.rs index 0bcf80f2f..91b6111cd 100644 --- a/src/provider/hyperkzg.rs +++ b/src/provider/hyperkzg.rs @@ -5,7 +5,7 @@ //! This means that Spartan's polynomial IOP can use commit to its polynomials as-is without incurring any interpolations or FFTs. //! (2) HyperKZG is specialized to use KZG as the univariate commitment scheme, so it includes several optimizations (both during the transformation of multilinear-to-univariate claims //! and within the KZG commitment scheme implementation itself). -//! (3) HyperKZG also includes optimisation based on so called Shplonk/HaloInfinite technique (https://hackmd.io/@adrian-aztec/BJxoyeCqj#Phase-2-Gemini). +//! (3) HyperKZG also includes optimisation based on so called Shplonk/HaloInfinite technique (``). //! Compared to pure HyperKZG, this optimisation in theory improves prover (at cost of using 1 fixed KZG opening) and verifier (at cost of eliminating MSM) //! #![allow(non_snake_case)]