diff --git a/Cargo.lock b/Cargo.lock index 4bb075d06b..2b1d4545ef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -123,9 +123,9 @@ checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" [[package]] name = "anymap" -version = "0.12.1" +version = "1.0.0-beta.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33954243bd79057c2de7338850b85983a44588021f8a5fee574a8888c6de4344" +checksum = "8f1f8f5a6f3d50d89e3797d7593a50f96bb2aaa20ca0cc7be1fb673232c91d72" [[package]] name = "arrayref" @@ -506,7 +506,7 @@ checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" dependencies = [ "atty", "bitflags 1.3.2", - "clap_derive", + "clap_derive 3.2.25", "clap_lex 0.2.4", "indexmap", "once_cell", @@ -522,6 +522,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1640e5cc7fb47dbb8338fd471b105e7ed6c3cb2aeb00c2e067127ffd3764a05d" dependencies = [ "clap_builder", + "clap_derive 4.3.2", + "once_cell", ] [[package]] @@ -559,6 +561,18 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "clap_derive" +version = "4.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8cd2b2a819ad6eec39e8f1d6b53001af1e5469f8c177579cdaeb313115b825f" +dependencies = [ + "heck 0.4.1", + "proc-macro2 1.0.63", + "quote 1.0.29", + "syn 2.0.23", +] + [[package]] name = "clap_lex" version = "0.2.4" @@ -593,6 +607,7 @@ dependencies = [ "assert_cmd", "blstrs", "clap 4.3.11", + "fcomm", "ff", "lurk", "pasta_curves", @@ -934,9 +949,11 @@ dependencies = [ name = "fcomm" version = "0.1.1" dependencies = [ + "anyhow", "assert_cmd", "base64", "bellperson", + "bincode", "blstrs", "camino", "clap 3.2.25", @@ -951,6 +968,8 @@ dependencies = [ "pasta_curves", "predicates 2.1.5", "pretty_env_logger", + "proptest", + "proptest-derive", "rand", "serde", "serde_json", @@ -1435,10 +1454,14 @@ dependencies = [ name = "lurk-macros" version = "0.1.0" dependencies = [ + "bincode", "lurk", "pasta_curves", "proc-macro2 1.0.63", + "proptest", + "proptest-derive", "quote 1.0.29", + "serde", "syn 1.0.109", ] @@ -2056,9 +2079,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.0" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89089e897c013b3deb627116ae56a6955a72b8bed395c9526af31c9fe528b484" +checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575" dependencies = [ "aho-corasick", "memchr", @@ -2068,9 +2091,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa250384981ea14565685dea16a9ccc4d1c541a13f82b9c168572264d1df8c56" +checksum = "e9aaecc05d5c4b5f7da074b9a0d1a0867e71fd36e7fc0482d8bcfe8e8fc56290" dependencies = [ "aho-corasick", "memchr", @@ -2346,8 +2369,7 @@ dependencies = [ [[package]] name = "sppark" version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfb9486baeb35ca1197c4df27451d4df5bd321e15da94c1ddb89670f9e94896a" +source = "git+https://github.com/supranational/sppark?rev=5fea26f43cc5d12a77776c70815e7c722fd1f8a7#5fea26f43cc5d12a77776c70815e7c722fd1f8a7" dependencies = [ "cc", "which", diff --git a/Cargo.toml b/Cargo.toml index 6ac23645e9..43112c7652 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,14 +12,14 @@ rust-version = "1.68.2" [dependencies] ahash = "0.7.6" anyhow = { workspace = true } -anymap = "0.12.1" +anymap = "1.0.0-beta.2" base32ct = { version = "0.2.0", features = ["std"] } base64 = { workspace = true } base-x = "0.2.11" bellperson = { workspace = true } -bincode = "1.3.3" +bincode = { workspace = true } blstrs = { workspace = true } -clap = "4.1.8" +clap = { version = "4.3.10", features = ["derive"] } dashmap = "5.4.0" ff = { workspace = true } generic-array = "0.14.6" @@ -95,6 +95,7 @@ members = ["clutch", anyhow = "1.0.69" base64 = "0.13.1" bellperson = "0.25" +bincode = "1.3.3" blstrs = "0.7.0" # TODO: clap ff = "0.13" @@ -144,3 +145,6 @@ harness = false [[bench]] name = "public_params" harness = false + +[patch.crates-io] +sppark = { git = "https://github.com/supranational/sppark", rev="5fea26f43cc5d12a77776c70815e7c722fd1f8a7" } diff --git a/README.md b/README.md index f4f2fb807d..453b08b04c 100644 --- a/README.md +++ b/README.md @@ -96,7 +96,7 @@ Set the environment variable `LURK_FIELD` to specify the scalar field of the Lur ``` ➜ lurk-rs ✗ bin/lurk Finished release [optimized] target(s) in 0.06s - Running `target/release/examples/repl` + Running `target/release/lurk` Lurk REPL welcomes you. > (let ((square (lambda (x) (* x x)))) (square 8)) [9 iterations] => 64 @@ -107,49 +107,59 @@ Or enable `info` log-level for a trace of reduction frames: ``` ➜ lurk-rs ✗ RUST_LOG=info bin/lurk Finished release [optimized] target(s) in 0.05s - Running `target/release/examples/repl` + Running `target/release/lurk` Lurk REPL welcomes you. > (let ((square (lambda (x) (* x x)))) (square 8)) -INFO lurk::eval > Frame: 0 - Expr: (LET ((SQUARE (LAMBDA (X) (* X X)))) (SQUARE 8)) - Env: NIL - Cont: Outermost -INFO lurk::eval > Frame: 1 - Expr: (LAMBDA (X) (* X X)) - Env: NIL - Cont: Let{ var: SQUARE, body: (SQUARE 8), saved_env: NIL, continuation: Outermost } -INFO lurk::eval > Frame: 2 - Expr: (SQUARE 8) - Env: ((SQUARE . )) - Cont: Tail{ saved_env: NIL, continuation: Outermost } -INFO lurk::eval > Frame: 3 - Expr: SQUARE - Env: ((SQUARE . )) - Cont: Call{ unevaled_arg: 8, saved_env: ((SQUARE . )), continuation: Tail{ saved_env: NIL, continuation: Outermost } } -INFO lurk::eval > Frame: 4 - Expr: 8 - Env: ((SQUARE . )) - Cont: Call2{ function: , saved_env: ((SQUARE . )), continuation: Tail{ saved_env: NIL, continuation: Outermost } } -INFO lurk::eval > Frame: 5 - Expr: (* X X) - Env: ((X . 8)) - Cont: Tail{ saved_env: NIL, continuation: Outermost } -INFO lurk::eval > Frame: 6 - Expr: X - Env: ((X . 8)) - Cont: Binop{ operator: Product, unevaled_args: (X), saved_env: ((X . 8)), continuation: Tail{ saved_env: NIL, continuation: Outermost } } -INFO lurk::eval > Frame: 7 - Expr: X - Env: ((X . 8)) - Cont: Binop2{ operator: Product, evaled_arg: 8, continuation: Tail{ saved_env: NIL, continuation: Outermost } } -INFO lurk::eval > Frame: 8 - Expr: Thunk{ value: 64 => cont: Outermost} - Env: NIL - Cont: Dummy -INFO lurk::eval > Frame: 9 - Expr: 64 - Env: NIL - Cont: Terminal + INFO lurk::eval > Frame: 0 + Expr: (let ((square (lambda (x) (* x x)))) (square 8)) + Env: nil + Cont: Outermost + + INFO lurk::eval > Frame: 1 + Expr: (lambda (x) (* x x)) + Env: nil + Cont: Let{ var: square, body: (square 8), saved_env: nil, continuation: Outermost } + + INFO lurk::eval > Frame: 2 + Expr: (square 8) + Env: ((square . )) + Cont: Tail{ saved_env: nil, continuation: Outermost } + + INFO lurk::eval > Frame: 3 + Expr: square + Env: ((square . )) + Cont: Call{ unevaled_arg: 8, saved_env: ((square . )), continuation: Tail{ saved_env: nil, continuation: Outermost } } + + INFO lurk::eval > Frame: 4 + Expr: 8 + Env: ((square . )) + Cont: Call2{ function: , saved_env: ((square . )), continuation: Tail{ saved_env: nil, continuation: Outermost } } + + INFO lurk::eval > Frame: 5 + Expr: (* x x) + Env: ((x . 8)) + Cont: Tail{ saved_env: nil, continuation: Outermost } + + INFO lurk::eval > Frame: 6 + Expr: x + Env: ((x . 8)) + Cont: Binop{ operator: product#, unevaled_args: (x), saved_env: ((x . 8)), continuation: Tail{ saved_env: nil, continuation: Outermost } } + + INFO lurk::eval > Frame: 7 + Expr: x + Env: ((x . 8)) + Cont: Binop2{ operator: product#, evaled_arg: 8, continuation: Tail{ saved_env: nil, continuation: Outermost } } + + INFO lurk::eval > Frame: 8 + Expr: Thunk{ value: 64 => cont: Outermost} + Env: nil + Cont: Dummy + + INFO lurk::eval > Frame: 9 + Expr: 64 + Env: nil + Cont: Terminal + [9 iterations] => 64 > ``` diff --git a/clutch/Cargo.toml b/clutch/Cargo.toml index 02eaaf3502..09552b139c 100644 --- a/clutch/Cargo.toml +++ b/clutch/Cargo.toml @@ -12,6 +12,7 @@ repository = "https://github.com/lurk-lab/lurk-rs" anyhow = { workspace = true } blstrs = { workspace = true } clap = "4.1.8" +fcomm = { path = "../fcomm" } ff = "0.13" lurk = { path = "../" } pasta_curves = { workspace = true, features = ["repr-c", "serde"] } diff --git a/clutch/src/lib.rs b/clutch/src/lib.rs index df329564ce..0c2915eb6a 100644 --- a/clutch/src/lib.rs +++ b/clutch/src/lib.rs @@ -2,10 +2,12 @@ use anyhow::{anyhow, bail, Context, Error, Result}; use clap::{Arg, ArgAction, Command}; -use lurk::public_parameters::{ - public_params, Claim, Commitment, CommittedExpression, CommittedExpressionMap, LurkCont, - LurkPtr, NovaProofCache, Opening, Proof, PtrEvaluation, +use fcomm::{ + committed_expression_store, nova_proof_cache, Claim, Commitment, CommittedExpression, + CommittedExpressionMap, LurkCont, LurkPtr, NovaProofCache, Opening, Proof, PtrEvaluation, }; +use lurk::public_parameters::public_params; + use pasta_curves::pallas; use lurk::coprocessor::Coprocessor; @@ -121,8 +123,8 @@ impl ReplTrait> for ClutchState> { ) -> Self { let reduction_count = DEFAULT_REDUCTION_COUNT; - let proof_map = lurk::public_parameters::nova_proof_cache(reduction_count); - let expression_map = lurk::public_parameters::committed_expression_store(); + let proof_map = nova_proof_cache(reduction_count); + let expression_map = committed_expression_store(); let demo = command.clone().and_then(|c| { let l = Self::base_prompt().trim_start_matches('\n').len(); diff --git a/fcomm/Cargo.toml b/fcomm/Cargo.toml index fcc0219d4c..b943e15ad9 100644 --- a/fcomm/Cargo.toml +++ b/fcomm/Cargo.toml @@ -12,8 +12,10 @@ name = "fcomm" path = "src/bin/fcomm.rs" [dependencies] +anyhow = { workspace = true } base64 = { workspace = true } bellperson = { workspace = true } +bincode = { workspace = true } blstrs = { workspace = true } clap = { version = "3.2", features = ["derive"] } clap-verbosity-flag = "1.0" @@ -25,6 +27,8 @@ once_cell = { workspace = true } pairing = { workspace = true } pasta_curves = { workspace = true, features = ["repr-c", "serde"] } pretty_env_logger = { workspace = true } +proptest = "1.1.0" +proptest-derive = "0.3.0" rand = { workspace = true } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } diff --git a/fcomm/src/bin/fcomm.rs b/fcomm/src/bin/fcomm.rs index ba86332354..0291203c45 100644 --- a/fcomm/src/bin/fcomm.rs +++ b/fcomm/src/bin/fcomm.rs @@ -17,17 +17,19 @@ use lurk::eval::{ use lurk::field::LurkField; use lurk::proof::{nova::NovaProver, Prover}; use lurk::ptr::{Ptr, TypePredicates}; +use lurk::public_parameters::error; use lurk::store::Store; use clap::{AppSettings, Args, Parser, Subcommand}; use clap_verbosity_flag::{Verbosity, WarnLevel}; -use lurk::public_parameters::{ - committed_expression_store, error::Error, evaluate, public_params, Claim, Commitment, - CommittedExpression, Evaluation, Expression, FileStore, LurkPtr, Opening, OpeningRequest, - Proof, ReductionCount, S1, +use fcomm::{ + committed_expression_store, error::Error, evaluate, Claim, Commitment, CommittedExpression, + Evaluation, Expression, LurkPtr, Opening, OpeningRequest, Proof, ReductionCount, S1, }; +use lurk::public_parameters::{public_params, FileStore}; + /// Functional commitments #[derive(Parser, Debug)] #[clap(version, about, long_about = None)] @@ -485,12 +487,14 @@ fn expression, F: LurkField + Serialize + DeserializeOwned>( fn opening_request, F: LurkField + Serialize + DeserializeOwned>( request_path: P, -) -> Result, Error> { - OpeningRequest::read_from_json_path(request_path) +) -> Result, error::Error> { + OpeningRequest::read_from_path(request_path) } // Get proof from supplied path or else from stdin. -fn proof<'a, P: AsRef, F: LurkField>(proof_path: Option

) -> Result, Error> +fn proof<'a, P: AsRef, F: LurkField>( + proof_path: Option

, +) -> Result, error::Error> where F: Serialize + for<'de> Deserialize<'de>, { diff --git a/fcomm/src/error.rs b/fcomm/src/error.rs new file mode 100644 index 0000000000..b41e2f3f13 --- /dev/null +++ b/fcomm/src/error.rs @@ -0,0 +1,35 @@ +use anyhow; +use bellperson::SynthesisError; +use lurk::error::ReductionError; +use lurk::public_parameters::error; +use lurk::store; +use std::io; +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum Error { + #[error("Verification error: {0}")] + VerificationError(String), + #[error("Unsupported reduction count: {0}")] + UnsupportedReductionCount(usize), + #[error("IO error: {0}")] + IOError(#[from] io::Error), + #[error("Synthesis error: {0}")] + SynthesisError(#[from] SynthesisError), + #[error("Commitment parser error: {0}")] + CommitmentParseError(#[from] hex::FromHexError), + #[error("Unknown commitment")] + UnknownCommitment, + #[error("Opening Failure: {0}")] + OpeningFailure(String), + #[error("Evaluation Failure")] + EvaluationFailure(ReductionError), + #[error("Store error: {0}")] + StoreError(#[from] store::Error), + #[error("Serde error: {0}")] + SerdeError(#[from] lurk::z_data::serde::SerdeError), + #[error("Anyhow error: {0}")] + AnyhowError(#[from] anyhow::Error), + #[error("Cache error: {0}")] + CacheError(#[from] error::Error), +} diff --git a/fcomm/src/lib.rs b/fcomm/src/lib.rs new file mode 100644 index 0000000000..07b4caa5e8 --- /dev/null +++ b/fcomm/src/lib.rs @@ -0,0 +1,1317 @@ +use log::info; +use std::convert::TryFrom; +use std::sync::Arc; + +#[cfg(not(target_arch = "wasm32"))] +use proptest::prelude::*; +#[cfg(not(target_arch = "wasm32"))] +use proptest_derive::Arbitrary; + +use ff::PrimeField; +use hex::FromHex; +use lurk::error::ReductionError; +#[cfg(not(target_arch = "wasm32"))] +use lurk::field::FWrap; +use lurk::{ + circuit::ToInputs, + eval::{ + empty_sym_env, + lang::{Coproc, Lang}, + Evaluable, Evaluator, Status, Witness, IO, + }, + field::LurkField, + hash::PoseidonCache, + proof::nova::{self, NovaProver, PublicParams}, + proof::Prover, + ptr::{ContPtr, Ptr}, + store::Store, + tag::ExprTag, + writer::Write, + z_expr::ZExpr, + z_ptr::ZExprPtr, + z_store::ZStore, +}; +use once_cell::sync::OnceCell; +use pasta_curves::pallas; +use rand::rngs::OsRng; +use serde::de::DeserializeOwned; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; + +pub mod error; + +use error::Error; +use lurk::public_parameters::file_map::FileMap; + +pub const DEFAULT_REDUCTION_COUNT: ReductionCount = ReductionCount::Ten; +pub static VERBOSE: OnceCell = OnceCell::new(); + +pub type S1 = pallas::Scalar; + +mod base64 { + use serde::{Deserialize, Serialize}; + use serde::{Deserializer, Serializer}; + + pub(crate) fn serialize(v: &Vec, s: S) -> Result { + let base64 = base64::encode(v); + String::serialize(&base64, s) + } + + pub(crate) fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result, D::Error> { + let base64 = String::deserialize(d)?; + base64::decode(base64.as_bytes()).map_err(serde::de::Error::custom) + } +} + +pub type NovaProofCache = FileMap>; +pub fn nova_proof_cache(reduction_count: usize) -> NovaProofCache { + FileMap::>::new(format!("nova_proofs.{}", reduction_count)).unwrap() +} + +pub type CommittedExpressionMap = FileMap, CommittedExpression>; +pub fn committed_expression_store() -> CommittedExpressionMap { + FileMap::, CommittedExpression>::new("committed_expressions").unwrap() +} + +// Number of circuit reductions per step, equivalent to `chunk_frame_count` +#[derive(Clone, Copy, Hash, PartialEq, Eq, Serialize, Deserialize)] +pub enum ReductionCount { + One, + Five, + Ten, + OneHundred, +} + +#[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq)] +pub struct Evaluation { + pub expr: String, + pub env: String, + pub cont: String, + pub expr_out: String, + pub env_out: String, + pub cont_out: String, + pub status: Status, + pub iterations: Option, +} + +#[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] +#[cfg_attr(not(target_arch = "wasm32"), proptest(no_bound))] +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq)] +pub struct PtrEvaluation { + pub expr: LurkPtr, + pub env: LurkPtr, + pub cont: LurkCont, + pub expr_out: LurkPtr, + pub env_out: LurkPtr, + pub cont_out: LurkCont, + pub status: Status, + pub iterations: Option, +} + +#[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct Commitment { + #[cfg_attr( + not(target_arch = "wasm32"), + proptest(strategy = "any::>().prop_map(|x| x.0)") + )] + pub comm: F, +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct OpeningRequest { + pub commitment: Commitment, + pub input: Expression, + pub chain: bool, +} + +impl ToString for Commitment { + fn to_string(&self) -> String { + let s = serde_json::to_string(&self).unwrap(); + // Remove quotation marks. Yes, dumb hacks are happening. + s[1..s.len() - 1].to_string() + } +} + +impl Serialize for Commitment { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + // Use be_bytes for consistency with PrimeField printed representation. + let be_bytes: Vec = self + .comm + .to_repr() + .as_ref() + .iter() + .rev() + .map(|x| x.to_owned()) + .collect(); + + hex::serde::serialize(be_bytes, serializer) + } +} + +impl<'de, F: LurkField> Deserialize<'de> for Commitment { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + hex::serde::deserialize(deserializer) + } +} + +impl FromHex for Commitment { + type Error = hex::FromHexError; + + fn from_hex(s: T) -> Result::Error> + where + T: AsRef<[u8]>, + { + let mut v = Vec::from_hex(s)?; + v.reverse(); + let mut repr = ::Repr::default(); + repr.as_mut()[..32].copy_from_slice(&v[..]); + + Ok(Commitment { + comm: F::from_repr(repr).unwrap(), + }) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] +pub struct Expression { + pub expr: LurkPtr, +} + +#[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] +#[cfg_attr(not(target_arch = "wasm32"), proptest(no_bound))] +#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)] +pub struct Opening { + pub input: String, + pub output: String, + pub status: Status, + pub commitment: Commitment, + pub new_commitment: Option>, +} + +#[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] +#[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq)] +pub struct ZBytes { + #[serde(with = "base64")] + z_store: Vec, + #[serde(with = "base64")] + z_ptr: Vec, // can also be a scalar_cont_ptr +} + +#[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] +#[cfg_attr(not(target_arch = "wasm32"), proptest(no_bound))] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ZStorePtr { + z_store: ZStore, + z_ptr: ZExprPtr, +} + +#[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] +#[cfg_attr(not(target_arch = "wasm32"), proptest(no_bound))] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum LurkPtr { + Source(String), + ZStorePtr(ZStorePtr), +} + +#[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq)] +pub enum LurkCont { + #[default] + Outermost, + Terminal, + Error, +} + +impl Default for LurkPtr { + fn default() -> Self { + Self::Source("nil".to_string()) + } +} + +impl Eq for LurkPtr {} + +#[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] +#[cfg_attr(not(target_arch = "wasm32"), proptest(no_bound))] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct CommittedExpression { + pub expr: LurkPtr, + #[cfg_attr( + not(target_arch = "wasm32"), + proptest(strategy = "any::>().prop_map(|x| Some(x.0))") + )] + pub secret: Option, + pub commitment: Option>, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct VerificationResult { + pub verified: bool, +} + +#[derive(Serialize, Deserialize)] +pub struct Proof<'a, F: LurkField> { + pub claim: Claim, + pub proof: nova::Proof<'a, Coproc>, + pub num_steps: usize, + pub reduction_count: ReductionCount, +} + +#[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] +#[cfg_attr(not(target_arch = "wasm32"), proptest(no_bound))] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub enum Claim { + Evaluation(Evaluation), + // TODO: Add Expression type + PtrEvaluation(PtrEvaluation), + Opening(Opening), +} + +impl Deserialize<'de>> Claim { + // Returns the ZPtr corresponding to the claim + pub fn proof_key(&self) -> Result, Error> { + match self { + Claim::Evaluation(eval) => { + // Only keying on input and output for now + let expr_in = ZExprPtr::::from_lurk_str(&eval.expr)?; + let expr_out = ZExprPtr::::from_lurk_str(&eval.expr_out)?; + let expr = ZExpr::Cons(expr_in, expr_out); + Ok(expr.z_ptr(&PoseidonCache::default())) + } + Claim::PtrEvaluation(ptr_eval) => { + let expr_in: ZExprPtr = match &ptr_eval.expr { + LurkPtr::Source(source) => ZExprPtr::::from_lurk_str(source)?, + LurkPtr::ZStorePtr(zsp) => zsp.z_ptr, + }; + let expr_out = match &ptr_eval.expr_out { + LurkPtr::Source(source) => ZExprPtr::::from_lurk_str(source)?, + LurkPtr::ZStorePtr(zsp) => zsp.z_ptr, + }; + let expr = ZExpr::Cons(expr_in, expr_out); + Ok(expr.z_ptr(&PoseidonCache::default())) + } + // TODO: Is this an appropriate key for commitments? + Claim::Opening(open) => { + let expr_in = ZExprPtr::::from_lurk_str(&open.input)?; + let expr_out = ZExprPtr::::from_lurk_str(&open.output)?; + let expr = ZExpr::Cons(expr_in, expr_out); + Ok(expr.z_ptr(&PoseidonCache::default())) + } + } + } +} + +// This is just a rough idea, mostly here so we can plumb it elsewhere. The idea is that a verifier can sign an +// attestation that a given claim's proof was verified. It motivates the use of an online verifier for demo purposes. +// Although real proofs should be fast to verify, they will still be large relative to a small (auditable) bundle like +// this. Even if not entirely realistic, something with this general *shape* is likely to play a role in a recursive +// system where the ability to aggregate proof verification more soundly is possible. +//#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +pub struct Cert { + pub claim_cid: ZExprPtr, + pub proof_cid: ZExprPtr, + pub verified: bool, + pub verifier_id: String, + pub signature: String, +} + +#[allow(dead_code)] +impl Claim { + pub fn is_evaluation(&self) -> bool { + self.evaluation().is_some() + } + pub fn is_opening(&self) -> bool { + self.opening().is_some() + } + pub fn evaluation(&self) -> Option { + match self { + Self::Evaluation(e) => Some(e.clone()), + _ => None, + } + } + pub fn ptr_evaluation(&self) -> Option> { + match self { + Self::PtrEvaluation(e) => Some(e.clone()), + _ => None, + } + } + pub fn opening(&self) -> Option> { + match self { + Self::Opening(o) => Some(o.clone()), + _ => None, + } + } +} + +type E = Error; +impl TryFrom for ReductionCount { + type Error = E; + + fn try_from(count: usize) -> Result>::Error> { + match count { + 1 => Ok(ReductionCount::One), + 5 => Ok(ReductionCount::Five), + 10 => Ok(ReductionCount::Ten), + 100 => Ok(ReductionCount::OneHundred), + c => Err(Error::UnsupportedReductionCount(c)), + } + } +} +impl ReductionCount { + pub fn count(&self) -> usize { + match self { + Self::One => 1, + Self::Five => 5, + Self::Ten => 10, + Self::OneHundred => 100, + } + } +} + +impl Evaluation { + fn new( + s: &mut Store, + input: IO, + output: IO, + iterations: Option, // This might be padded, so is not quite 'iterations' in the sense of number of actual reduction steps required + // to evaluate. + ) -> Self { + let status: Status = output.cont.into(); + let terminal = status.is_terminal(); + + // For now, conservatively hide all outputs unless output is terminal. TODO: let evaluator configure this in a + // more fine-grained way, including no hiding. + // NOTE: If anything is hidden, a proof won't be possible. + macro_rules! maybe_hide { + ($x:expr) => { + if terminal { + $x + } else { + "".to_string() + } + }; + } + + let expr = input.expr.fmt_to_string(s); + let env = input.env.fmt_to_string(s); + let cont = input.cont.fmt_to_string(s); + + let expr_out = maybe_hide!(output.expr.fmt_to_string(s)); + let env_out = maybe_hide!(output.env.fmt_to_string(s)); + let cont_out = maybe_hide!(output.cont.fmt_to_string(s)); + + Self { + expr, + env, + cont, + expr_out, + env_out, + cont_out, + status, + iterations, + } + } + + pub fn eval( + store: &mut Store, + expr: Ptr, + limit: usize, + ) -> Result { + let env = empty_sym_env(store); + let lang = &Lang::>::new(); + let mut evaluator = Evaluator::new(expr, env, store, limit, lang); + + let input = evaluator.initial(); + + let (output, iterations, _) = evaluator.eval().map_err(Error::EvaluationFailure)?; + + Ok(Self::new(store, input, output, Some(iterations))) + } +} + +impl PtrEvaluation { + fn new( + s: &mut Store, + input: IO, + output: IO, + iterations: Option, // This might be padded, so is not quite 'iterations' in the sense of number of actual reduction steps required + // to evaluate. + ) -> Self { + let status: Status = output.cont.into(); + + // NOTE: We do not implement the `maybe_hide!` logic found in `Evaluation::new()`. That was a speculative design + // unsupported by this patch. In ny case, `Evaluation` and `PtrEvaluation` should be unified in the future, and + // an appropriate hiding mechanism/configuration can be added then. + Self { + expr: LurkPtr::from_ptr(s, &input.expr), + env: LurkPtr::from_ptr(s, &input.env), + cont: LurkCont::from_cont_ptr(s, &input.cont), + expr_out: LurkPtr::from_ptr(s, &output.expr), + env_out: LurkPtr::from_ptr(s, &output.env), + cont_out: LurkCont::from_cont_ptr(s, &output.cont), + status, + iterations, + } + } +} + +impl Commitment { + pub fn from_comm(s: &mut Store, ptr: &Ptr) -> Result { + assert_eq!(ExprTag::Comm, ptr.tag); + + let digest = *s + .hash_expr(ptr) + .ok_or_else(|| Error::UnknownCommitment)? + .value(); + + Ok(Commitment { comm: digest }) + } + + pub fn ptr(&self, s: &mut Store) -> Ptr { + s.intern_opaque_comm(self.comm) + } + + pub fn from_ptr_with_hiding(s: &mut Store, ptr: &Ptr) -> Result<(Self, F), Error> { + let secret = F::random(OsRng); + + let commitment = Self::from_ptr_and_secret(s, ptr, secret)?; + + Ok((commitment, secret)) + } + + pub fn from_ptr_and_secret(s: &mut Store, ptr: &Ptr, secret: F) -> Result { + let hidden = s.hide(secret, *ptr); + + Self::from_comm(s, &hidden) + } + + // Importantly, this ensures the function and secret are in the Store, s. + fn construct_with_fun_application( + s: &mut Store, + function: CommittedExpression, + input: Ptr, + limit: usize, + lang: &Lang>, + ) -> Result<(Self, Ptr), Error> { + let fun_ptr = function.expr_ptr(s, limit, lang)?; + let secret = function.secret.expect("CommittedExpression secret missing"); + + let commitment = Self::from_ptr_and_secret(s, &fun_ptr, secret)?; + + let open = s.lurk_sym("open"); + let comm_ptr = s.hide(secret, fun_ptr); + + // (open ) + let fun_expr = s.list(&[open, comm_ptr]); + + // ((open ) input) + let expression = s.list(&[fun_expr, input]); + + Ok((commitment, expression)) + } + + fn fun_application(&self, s: &mut Store, input: Ptr) -> Ptr { + let open = s.lurk_sym("open"); + let comm_ptr = self.ptr(s); + + // (open ) + let fun_expr = s.list(&[open, comm_ptr]); + + // ((open commitment) input) + s.list(&[fun_expr, input]) + } +} + +impl CommittedExpression { + pub fn expr_ptr( + &self, + s: &mut Store, + limit: usize, + lang: &Lang>, + ) -> Result, Error> { + let source_ptr = self.expr.ptr(s, limit, lang); + + Ok(source_ptr) + } +} + +impl LurkPtr { + pub fn ptr(&self, s: &mut Store, limit: usize, lang: &Lang>) -> Ptr { + match self { + LurkPtr::Source(source) => { + let ptr = s.read(source).expect("could not read source"); + assert!(!ptr.raw.is_opaque()); + let (out, _) = evaluate(s, ptr, None, limit, lang).unwrap(); + + out.expr + } + LurkPtr::ZStorePtr(z_store_ptr) => { + let z_store = &z_store_ptr.z_store; + let z_ptr = z_store_ptr.z_ptr; + s.intern_z_expr_ptr(z_ptr, z_store) + .expect("failed to intern z_ptr") + } + } + } + + pub fn from_ptr(s: &mut Store, ptr: &Ptr) -> Self { + let (z_store, z_ptr) = ZStore::new_with_expr(s, ptr); + let z_ptr = z_ptr.unwrap(); + Self::ZStorePtr(ZStorePtr { z_store, z_ptr }) + } +} + +impl LurkCont { + pub fn cont_ptr( + &self, + s: &mut Store, + ) -> ContPtr { + match self { + Self::Outermost => s.get_cont_outermost(), + Self::Terminal => s.get_cont_terminal(), + Self::Error => s.get_cont_error(), + } + } + + pub fn from_cont_ptr( + _s: &mut Store, + cont_ptr: &ContPtr, + ) -> Self { + use lurk::tag::ContTag; + + match cont_ptr.tag { + ContTag::Outermost => Self::Outermost, + ContTag::Terminal => Self::Terminal, + ContTag::Error => Self::Error, + _ => panic!("unsupported continuation"), + } + } +} + +impl Expression { + pub fn eval( + &self, + s: &mut Store, + limit: usize, + lang: &Lang>, + ) -> Result, Error> { + let expr = self.expr.ptr(s, limit, lang); + let (io, _iterations) = evaluate(s, expr, None, limit, lang)?; + + Ok(io.expr) + } +} + +impl<'a> Opening { + #[allow(clippy::too_many_arguments)] + pub fn apply_and_prove( + s: &'a mut Store, + input: Ptr, + function: CommittedExpression, + limit: usize, + chain: bool, + only_use_cached_proofs: bool, + nova_prover: &'a NovaProver>, + pp: &'a PublicParams<'_, Coproc>, + lang: Arc>>, + ) -> Result, Error> { + let claim = Self::apply(s, input, function, limit, chain, &lang)?; + Proof::prove_claim( + s, + &claim, + limit, + only_use_cached_proofs, + nova_prover, + pp, + lang, + ) + } + + pub fn open_and_prove( + s: &'a mut Store, + request: OpeningRequest, + limit: usize, + only_use_cached_proofs: bool, + nova_prover: &'a NovaProver>, + pp: &'a PublicParams<'_, Coproc>, + lang: Arc>>, + ) -> Result, Error> { + let input = request.input.expr.ptr(s, limit, &lang); + let commitment = request.commitment; + + let function_map = committed_expression_store(); + let function = function_map + .get(&commitment) + .ok_or(Error::UnknownCommitment)?; + + Self::apply_and_prove( + s, + input, + function, + limit, + request.chain, + only_use_cached_proofs, + nova_prover, + pp, + lang, + ) + } + + pub fn open( + s: &mut Store, + request: OpeningRequest, + limit: usize, + chain: bool, + lang: &Lang>, + ) -> Result, Error> { + let input = request.input.expr.ptr(s, limit, lang); + let commitment = request.commitment; + + let function_map = committed_expression_store(); + let function = function_map + .get(&commitment) + .ok_or(Error::UnknownCommitment)?; + + Self::apply(s, input, function, limit, chain, lang) + } + + fn _is_chained(&self) -> bool { + self.new_commitment.is_some() + } + + fn public_output_expression(&self, s: &mut Store) -> Ptr { + let result = s.read(&self.output).expect("unreadable result"); + + if let Some(commitment) = self.new_commitment { + let c = commitment.ptr(s); + + s.cons(result, c) + } else { + result + } + } + + pub fn apply( + s: &mut Store, + input: Ptr, + function: CommittedExpression, + limit: usize, + chain: bool, + lang: &Lang>, + ) -> Result, Error> { + let (commitment, expression) = + Commitment::construct_with_fun_application(s, function, input, limit, lang)?; + let (public_output, _iterations) = evaluate(s, expression, None, limit, lang)?; + + let (new_commitment, output_expr) = if chain { + let cons = public_output.expr; + let result_expr = s.car(&cons)?; + let new_comm = s.cdr(&cons)?; + + let new_secret0 = s.secret(new_comm).expect("secret missing"); + let new_secret = *s.hash_expr(&new_secret0).expect("hash missing").value(); + + let (_, new_fun) = s.open(new_comm).expect("opening missing"); + let new_commitment = Commitment::from_comm(s, &new_comm)?; + + s.hydrate_scalar_cache(); + + let expr = LurkPtr::from_ptr(s, &new_fun); + + let new_function = CommittedExpression:: { + expr, + secret: Some(new_secret), + commitment: Some(new_commitment), + }; + + let function_map = committed_expression_store(); + function_map.set(new_commitment, &new_function)?; + assert_eq!(new_function, function_map.get(&new_commitment).unwrap()); + + (Some(new_commitment), result_expr) + } else { + (None, public_output.expr) + }; + + let input_string = input.fmt_to_string(s); + let status = + as Evaluable, Coproc>>::status(&public_output); + let output_string = if status.is_terminal() { + // Only actual output if result is terminal. + output_expr.fmt_to_string(s) + } else { + // We don't want to leak any internal information in the case of incomplete computations. + // Provers might want to expose results in the case of explicit errors. + // For now, don't -- but consider allowing it as an option. + "".to_string() + }; + + let claim = Claim::Opening(Opening { + commitment, + new_commitment, + input: input_string, + output: output_string, + status, + }); + + Ok(claim) + } +} + +impl<'a> Proof<'a, S1> { + #[allow(clippy::too_many_arguments)] + pub fn eval_and_prove( + s: &'a mut Store, + expr: Ptr, + supplied_env: Option>, + limit: usize, + only_use_cached_proofs: bool, + nova_prover: &'a NovaProver>, + pp: &'a PublicParams<'_, Coproc>, + lang: Arc>>, + ) -> Result { + let env = supplied_env.unwrap_or_else(|| empty_sym_env(s)); + let cont = s.intern_cont_outermost(); + let input = IO { expr, env, cont }; + + // TODO: It's a little silly that we evaluate here, but evaluation is also repeated in `NovaProver::evaluate_and_prove()`. + // Refactor to avoid that. + let (public_output, _iterations) = evaluate(s, expr, supplied_env, limit, &lang)?; + + let claim = if supplied_env.is_some() { + // This is a bit of a hack, but the idea is that if the env was supplied it's likely to contain a literal function, + // which we will not be able to read. Therefore, we should not produce a string-based claim. + let ptr_evaluation = PtrEvaluation::new(s, input, public_output, None); + Claim::PtrEvaluation(ptr_evaluation) + } else { + let evaluation = Evaluation::new(s, input, public_output, None); + Claim::Evaluation(evaluation) + }; + + Self::prove_claim( + s, + &claim, + limit, + only_use_cached_proofs, + nova_prover, + pp, + lang, + ) + } + + pub fn prove_claim( + s: &'a mut Store, + claim: &Claim, + limit: usize, + only_use_cached_proofs: bool, + nova_prover: &'a NovaProver>, + pp: &'a PublicParams<'_, Coproc>, + lang: Arc>>, + ) -> Result { + let reduction_count = nova_prover.reduction_count(); + + let proof_map = nova_proof_cache(reduction_count); + let function_map = committed_expression_store(); + + let key = claim.proof_key()?.to_base32(); + + if let Some(proof) = proof_map.get(&key) { + return Ok(proof); + } + + if only_use_cached_proofs { + // FIXME: Error handling. + panic!("no cached proof"); + } + + info!("Starting Proving"); + + let (expr, env) = match &claim { + Claim::Evaluation(e) => ( + s.read(&e.expr).expect("bad expression"), + s.read(&e.env).expect("bad env"), + ), + Claim::PtrEvaluation(e) => (e.expr.ptr(s, limit, &lang), e.env.ptr(s, limit, &lang)), + Claim::Opening(o) => { + let commitment = o.commitment; + + // In order to prove the opening, we need access to the original function. + let function = function_map + .get(&commitment) + .expect("function for commitment missing"); + + let input = s.read(&o.input).expect("bad expression"); + let (c, expression) = + Commitment::construct_with_fun_application(s, function, input, limit, &lang)?; + + assert_eq!(commitment, c); + (expression, empty_sym_env(s)) + } + }; + + let (proof, _public_input, _public_output, num_steps) = nova_prover + .evaluate_and_prove(pp, expr, env, s, limit, lang.clone()) + .expect("Nova proof failed"); + + let proof = Self { + claim: claim.clone(), + proof, + num_steps, + reduction_count: ReductionCount::try_from(reduction_count)?, + }; + + match &claim { + Claim::Opening(o) => { + if o.status != Status::Terminal { + return Err(Error::OpeningFailure("Claim status is not Terminal".into())); + }; + } + Claim::Evaluation(e) => { + if e.status != Status::Terminal { + return Err(Error::EvaluationFailure(ReductionError::Misc( + "nonterminal status".into(), + ))); + }; + } + Claim::PtrEvaluation(e) => { + if e.status != Status::Terminal { + return Err(Error::EvaluationFailure(ReductionError::Misc( + "nonterminal status".into(), + ))); + } + } + }; + + proof.verify(pp, &lang).expect("Nova verification failed"); + + proof_map.set(key, &proof).unwrap(); + + Ok(proof) + } + + pub fn verify( + &self, + pp: &PublicParams<'_, Coproc>, + lang: &Lang>, + ) -> Result { + let (public_inputs, public_outputs) = self.io_vecs(lang)?; + + let claim_iterations_and_num_steps_are_consistent = if let Claim::Evaluation(Evaluation { + iterations: Some(iterations), + .. + }) = self.claim + { + // Currently, claims created by fcomm don't include the iteration count. If they do, then it should be + // possible to verify correctness. This may require making the iteration count explicit in the public + // output. That will allow maintaining iteration count without incrementing during frames added as + // padding; and it will also allow explicitly masking the count when desired for zero-knowledge. + // Meanwhile, since Nova currently requires the number of steps to be provided by the verifier, we have + // to provide it. For now, we should at least be able to calculate this value based on number of real + // iterations and number of frames per circuit. This is untested and mostly a placeholder to remind us + // that all of this will need to be handled in a more principled way eventually. (#282) + + let num_steps = self.num_steps; + + let chunk_frame_count = self.reduction_count.count(); + let expected_steps = + (iterations / chunk_frame_count) + (iterations % chunk_frame_count != 0) as usize; + + expected_steps == num_steps + } else { + true + }; + + let verified = claim_iterations_and_num_steps_are_consistent + && self + .proof + .verify(pp, self.num_steps, public_inputs, &public_outputs) + .expect("error verifying"); + + let result = VerificationResult::new(verified); + + Ok(result) + } + + pub fn evaluation_io(&self, s: &mut Store) -> Result<(IO, IO), Error> { + let evaluation = &self.claim.evaluation().expect("expected evaluation claim"); + + let input_io = { + let expr = s + .read(&evaluation.expr) + .map_err(|_| Error::VerificationError("failed to read expr".into()))?; + + let env = s + .read(&evaluation.env) + .map_err(|_| Error::VerificationError("failed to read env".into()))?; + + // FIXME: We ignore cont and assume Outermost, since we can't read a Cont. + let cont = s.intern_cont_outermost(); + + IO:: { expr, env, cont } + }; + + let output_io = { + let expr = s + .read(&evaluation.expr_out) + .map_err(|_| Error::VerificationError("failed to read expr out".into()))?; + + let env = s + .read(&evaluation.env_out) + .map_err(|_| Error::VerificationError("failed to read env out".into()))?; + let cont = evaluation + .status + .to_cont(s) + .ok_or_else(|| Error::VerificationError("continuation cannot be proved".into()))?; + + IO:: { expr, env, cont } + }; + + Ok((input_io, output_io)) + } + + pub fn ptr_evaluation_io( + &self, + s: &mut Store, + lang: &Lang>, + ) -> Result<(IO, IO), Error> { + let ptr_evaluation = &self + .claim + .ptr_evaluation() + .expect("expected PtrEvaluation claim"); + + let input_io = { + let expr = ptr_evaluation.expr.ptr(s, 0, lang); // limit is unneeded because we will not eval. we already have the ptr. + let env = ptr_evaluation.env.ptr(s, 0, lang); + let cont = ptr_evaluation.cont.cont_ptr(s); + + IO:: { expr, env, cont } + }; + + let output_io = { + let expr = ptr_evaluation.expr_out.ptr(s, 0, lang); + let env = ptr_evaluation.env_out.ptr(s, 0, lang); + let cont = ptr_evaluation.cont_out.cont_ptr(s); + + IO:: { expr, env, cont } + }; + + Ok((input_io, output_io)) + } + + pub fn opening_io(&self, s: &mut Store) -> Result<(IO, IO), Error> { + assert!(self.claim.is_opening()); + + let opening = self.claim.opening().expect("expected opening claim"); + let output = opening.public_output_expression(s); + let input = s.read(&opening.input).expect("could not read input"); + + let expression = opening.commitment.fun_application(s, input); + let outermost = s.intern_cont_outermost(); + + let input_io = IO:: { + expr: expression, + env: empty_sym_env(s), + cont: outermost, + }; + + let output_io = IO:: { + expr: output, + env: empty_sym_env(s), + cont: s.intern_cont_terminal(), + }; + + Ok((input_io, output_io)) + } + + pub fn io( + &self, + s: &mut Store, + lang: &Lang>, + ) -> Result<(IO, IO), Error> { + match self.claim { + Claim::Evaluation(_) => self.evaluation_io(s), + Claim::PtrEvaluation(_) => self.ptr_evaluation_io(s, lang), + Claim::Opening(_) => self.opening_io(s), + } + } + + fn io_vecs(&self, lang: &Lang>) -> Result<(Vec, Vec), Error> { + let s = &mut Store::::default(); + + self.io(s, lang) + .map(|(i, o)| (i.to_inputs(s), o.to_inputs(s))) + } +} + +impl VerificationResult { + fn new(verified: bool) -> Self { + Self { verified } + } +} + +pub fn evaluate( + store: &mut Store, + expr: Ptr, + supplied_env: Option>, + limit: usize, + lang: &Lang>, +) -> Result<(IO, usize), Error> { + let env = supplied_env.unwrap_or_else(|| empty_sym_env(store)); + let mut evaluator = Evaluator::new(expr, env, store, limit, lang); + + let (io, iterations, _) = evaluator.eval().map_err(Error::EvaluationFailure)?; + + assert!( as Evaluable, Coproc>>::is_terminal(&io)); + Ok((io, iterations)) +} + +#[cfg(test)] +mod test { + use super::*; + use lurk::public_parameters::FileStore; + use std::path::Path; + use std::sync::Arc; + use tempfile::Builder; + + use lurk::eval::lang::{Coproc, Lang}; + use lurk::proof::{nova::NovaProver, Prover}; + use lurk::public_parameters::public_params; + use lurk::z_data::{from_z_data, to_z_data}; + + #[test] + fn test_cert_serialization() { + use serde_json::json; + + let c = Commitment { + comm: S1::from(123), + }; + + let cid = ZExprPtr::from_parts(ExprTag::Comm, c.comm); + let cert = Cert { + claim_cid: cid, + proof_cid: cid, + verified: true, + verifier_id: "asdf".to_string(), + signature: "fdsa".to_string(), + }; + let json = json!(cert); + + let string = json.to_string(); + + let cert_again: Cert = serde_json::from_str(&string).unwrap(); + assert_eq!(cert, cert_again); + } + + // Minimal chained functional commitment test + #[test] + fn lurk_chained_functional_commitment() { + let fcomm_path_key = "FCOMM_DATA_PATH"; + let tmp_dir = Builder::new().prefix("tmp").tempdir().expect("tmp dir"); + let tmp_dir_path = Path::new(tmp_dir.path()); + let fcomm_path_val = tmp_dir_path.join("fcomm_data"); + std::env::set_var(fcomm_path_key, fcomm_path_val.clone()); + assert_eq!( + std::env::var(fcomm_path_key), + Ok(fcomm_path_val.into_os_string().into_string().unwrap()) + ); + + let function_source = "(letrec ((secret 12345) (a (lambda (acc x) (let ((acc (+ acc x))) (cons acc (hide secret (a acc))))))) (a 0))"; + let expected_io = vec![("5", "5"), ("3", "8")]; + + let mut function = CommittedExpression:: { + expr: LurkPtr::Source(function_source.into()), + secret: None, + commitment: None, + }; + + let limit = 1000; + let lang = Lang::new(); + let lang_rc = Arc::new(lang.clone()); + let rc = ReductionCount::One; + let pp = public_params(rc.count(), lang_rc.clone()).expect("public params"); + let chained = true; + let s = &mut Store::::default(); + + let io = expected_io.iter(); + + let fun_ptr = function.expr_ptr(s, limit, &lang).expect("fun_ptr"); + + let (mut commitment, secret) = Commitment::from_ptr_with_hiding(s, &fun_ptr).unwrap(); + + function.secret = Some(secret); + function.commitment = Some(commitment); + + let function_map = committed_expression_store(); + function_map + .set(commitment, &function) + .expect("function_map set"); + + for (function_input, _expected_output) in io { + let prover = NovaProver::>::new(rc.count(), lang.clone()); + + let input = s.read(function_input).expect("Read error"); + + let proof = Opening::apply_and_prove( + s, + input, + function.clone(), + limit, + chained, + false, + &prover, + &pp, + lang_rc.clone(), + ) + .expect("apply and prove"); + + proof.verify(&pp, &lang_rc).expect("Failed to verify"); + + let opening = proof.claim.opening().expect("expected opening claim"); + + match opening.new_commitment { + Some(c) => commitment = c, + _ => panic!("new commitment missing"), + } + println!("Commitment: {:?}", commitment); + } + } + proptest! { + #[test] + fn prop_z_bytes(x in any::()) { + let ser = to_z_data(&x).expect("write ZBytes"); + let de: ZBytes = from_z_data(&ser).expect("read ZBytes"); + assert_eq!(x, de); + + let ser: Vec = bincode::serialize(&x).expect("write ZBytes"); + let de: ZBytes = bincode::deserialize(&ser).expect("read ZBytes"); + assert_eq!(x, de); + + let tmp_dir = Builder::new().prefix("tmp").tempdir().expect("tmp dir"); + let tmp_dir_path = Path::new(tmp_dir.path()); + let z_bytes_path = tmp_dir_path.join("zbytes.json"); + x.write_to_path(&z_bytes_path); + assert_eq!(x, ZBytes::read_from_path(&z_bytes_path).unwrap()); + } + } + + proptest! { + #[test] + fn prop_z_store_ptr(x in any::>()) { + let ser = to_z_data(&x).expect("write ZStorePtr"); + let de: ZStorePtr = from_z_data(&ser).expect("read ZStorePtr"); + assert_eq!(x, de); + + let ser: Vec = bincode::serialize(&x).expect("write ZStorePtr"); + let de: ZStorePtr = bincode::deserialize(&ser).expect("read ZStorePtr"); + assert_eq!(x, de); + + let tmp_dir = Builder::new().prefix("tmp").tempdir().expect("tmp dir"); + let tmp_dir_path = Path::new(tmp_dir.path()); + let z_store_ptr_path = tmp_dir_path.join("zstoreptr.json"); + x.write_to_path(&z_store_ptr_path); + assert_eq!(x, ZStorePtr::::read_from_path(&z_store_ptr_path).unwrap()); + } + } + + proptest! { + #[test] + fn prop_lurk_ptr(x in any::>()) { + let ser = to_z_data(&x).expect("write LurkPtr"); + let de: LurkPtr = from_z_data(&ser).expect("read LurkPtr"); + assert_eq!(x, de); + + let ser: Vec = bincode::serialize(&x).expect("write LurkPtr"); + let de: LurkPtr = bincode::deserialize(&ser).expect("read LurkPtr"); + assert_eq!(x, de); + + let tmp_dir = Builder::new().prefix("tmp").tempdir().expect("tmp dir"); + let tmp_dir_path = Path::new(tmp_dir.path()); + let lurk_ptr_path = tmp_dir_path.join("lurkptr.json"); + x.write_to_path(&lurk_ptr_path); + assert_eq!(x, LurkPtr::::read_from_path(&lurk_ptr_path).unwrap()); + } + } + + proptest! { + #[test] + fn prop_ptr_evaluation(x in any::>()) { + let ser = to_z_data(&x).expect("write PtrEvaluation"); + let de: PtrEvaluation = from_z_data(&ser).expect("read PtrEvaluation"); + assert_eq!(x, de); + + let ser: Vec = bincode::serialize(&x).expect("write PtrEvalution"); + let de: PtrEvaluation = bincode::deserialize(&ser).expect("read PtrEvaluation"); + assert_eq!(x, de); + + let tmp_dir = Builder::new().prefix("tmp").tempdir().expect("tmp dir"); + let tmp_dir_path = Path::new(tmp_dir.path()); + let ptr_evaluation_path = tmp_dir_path.join("ptrevaluation.json"); + x.write_to_path(&ptr_evaluation_path); + assert_eq!(x, PtrEvaluation::::read_from_path(&ptr_evaluation_path).unwrap()); + } + } + + proptest! { + #[test] + fn prop_committed_expr(x in any::>()) { + let ser = to_z_data(&x).expect("write CommittedExpression"); + let de: CommittedExpression = from_z_data(&ser).expect("read CommittedExpression"); + assert_eq!(x, de); + + let ser: Vec = bincode::serialize(&x).expect("write CommittedExpression"); + let de: CommittedExpression = bincode::deserialize(&ser).expect("read CommittedExpression"); + assert_eq!(x, de); + + let tmp_dir = Builder::new().prefix("tmp").tempdir().expect("tmp dir"); + let tmp_dir_path = Path::new(tmp_dir.path()); + let committed_expr_path = tmp_dir_path.join("committedexpr.json"); + x.write_to_path(&committed_expr_path); + assert_eq!(x, CommittedExpression::::read_from_path(&committed_expr_path).unwrap()); + } + } + + proptest! { + #[test] + fn prop_opening(x in any::>()) { + let ser = to_z_data(&x).expect("write Opening"); + let de: Opening = from_z_data(&ser).expect("read Opening"); + assert_eq!(x, de); + + let ser: Vec = bincode::serialize(&x).expect("write Opening"); + let de: Opening = bincode::deserialize(&ser).expect("read Opening"); + assert_eq!(x, de); + + let tmp_dir = Builder::new().prefix("tmp").tempdir().expect("tmp dir"); + let tmp_dir_path = Path::new(tmp_dir.path()); + let opening_path = tmp_dir_path.join("opening.json"); + x.write_to_path(&opening_path); + assert_eq!(x, Opening::::read_from_path(&opening_path).unwrap()); + } + } + + proptest! { + #[test] + fn prop_claim(x in any::>()) { + let ser = to_z_data(&x).expect("write Claim"); + let de: Claim = from_z_data(&ser).expect("read Claim"); + assert_eq!(x, de); + + let ser: Vec = bincode::serialize(&x).expect("write Claim"); + let de: Claim = bincode::deserialize(&ser).expect("read Claim"); + assert_eq!(x, de); + + let tmp_dir = Builder::new().prefix("tmp").tempdir().expect("tmp dir"); + let tmp_dir_path = Path::new(tmp_dir.path()); + let claim_path = tmp_dir_path.join("claim.json"); + x.write_to_path(&claim_path); + assert_eq!(x, Claim::::read_from_path(&claim_path).unwrap()); + } + } +} diff --git a/fcomm/tests/makefile_tests.rs b/fcomm/tests/makefile_tests.rs index 6035b54803..8fa1fc3835 100644 --- a/fcomm/tests/makefile_tests.rs +++ b/fcomm/tests/makefile_tests.rs @@ -20,7 +20,7 @@ fn test_make_fcomm_examples() { let cpus = num_cpus::get(); let make_output = Command::new("make") - .current_dir(examples_dir) + .current_dir(&examples_dir) .arg(format!("-j{}", cpus)) .output() .expect("Failed to run the make command, is make installed?"); diff --git a/fcomm/tests/proof_tests.rs b/fcomm/tests/proof_tests.rs index 21754dc2d3..66ca80e278 100644 --- a/fcomm/tests/proof_tests.rs +++ b/fcomm/tests/proof_tests.rs @@ -7,7 +7,8 @@ use tempfile::{Builder, TempDir}; use pasta_curves::pallas; -use lurk::public_parameters::{Commitment, CommittedExpression, FileStore, LurkPtr, Proof}; +use fcomm::{Commitment, CommittedExpression, LurkPtr, Proof}; +use lurk::public_parameters::FileStore; use lurk::store::Store; use camino::Utf8Path; diff --git a/lurk-macros/Cargo.toml b/lurk-macros/Cargo.toml index 1a848e9bca..24eb402c30 100644 --- a/lurk-macros/Cargo.toml +++ b/lurk-macros/Cargo.toml @@ -14,7 +14,11 @@ proc-macro = true proc-macro2 = "1.0.24" quote = "1.0.9" syn = { version = "1.0.64", features = ["derive", "extra-traits", "full"] } +proptest = "1.1.0" +proptest-derive = "0.3.0" +serde = { workspace = true, features = ["derive"] } [dev-dependencies] +bincode = { workspace = true } lurk_crate = { path = "../", package = "lurk" } pasta_curves = { workspace = true, features = ["repr-c", "serde"] } diff --git a/lurk-macros/src/lib.rs b/lurk-macros/src/lib.rs index 366d2de8e8..768d7bd29e 100644 --- a/lurk-macros/src/lib.rs +++ b/lurk-macros/src/lib.rs @@ -15,8 +15,12 @@ extern crate proc_macro; use proc_macro::TokenStream; -use quote::quote; -use syn::{parse_macro_input, Data, DataEnum, DeriveInput, Ident}; +use proc_macro2::Span; +use quote::{quote, ToTokens}; +use syn::{ + parse_macro_input, AttributeArgs, Data, DataEnum, DeriveInput, Ident, Item, Lit, Meta, + MetaList, NestedMeta, Type, +}; #[proc_macro_derive(Coproc)] pub fn derive_enum_coproc(input: TokenStream) -> TokenStream { @@ -230,3 +234,144 @@ pub fn let_store(_tokens: TokenStream) -> TokenStream { pub fn lurk(tokens: TokenStream) -> TokenStream { Lurk::parse_raw(tokens.into()).unwrap().emit() } + +/// This macro is used to generate round-trip serialization tests. +/// +/// By appending `serde_test` to a struct or enum definition, you automatically derive +/// serialization tests that employ Serde for round-trip testing. The procedure in the generated tests is: +/// 1. Instantiate the type being tested +/// 2. Serialize the instance, ensuring the operation's success +/// 3. Deserialize the serialized data, comparing the resulting instance with the original one +/// +/// The type being tested must meet the following requirements: +/// * Implementations of `Debug` and `PartialEq` traits +/// * Implementation of `Arbitrary` trait +/// * Implementations of `Serialize` and `DeserializeOwned` traits +/// +/// For testing generic types, use the `types(...)` attribute to list type parameters for testing, +/// separated by commas. For complex types (e.g., ones where type parameters have their own parameters), +/// enclose them in quotation marks. To test different combinations of type parameters, `types` +/// can be used multiple times. +/// +/// # Example +/// ``` +/// use proptest_derive::Arbitrary; +/// use serde::{Serialize, Deserialize}; +/// use lurk_macros::serde_test; +/// +/// // The macro derives serialization tests using an arbitrary instance. +/// #[serde_test(types(u64, "Vec"), types(u32, bool))] +/// #[derive(Debug, Default, PartialEq, Arbitrary, Serialize, Deserialize)] +/// struct Generic { +/// t1: T1, +/// t2: T2, +/// } +/// ``` +/// +#[proc_macro_attribute] +pub fn serde_test(args: TokenStream, input: TokenStream) -> TokenStream { + let args = parse_macro_input!(args as AttributeArgs); + let input = parse_macro_input!(input as Item); + let name = match &input { + Item::Struct(item) => &item.ident, + Item::Enum(item) => &item.ident, + _ => panic!("This macro only works on structs and enums"), + }; + + // Parse arguments. + let mut types = Vec::new(); + let mut test_zdata = false; + for arg in args { + match arg { + // List arguments (as in #[serde_test(arg(val))]) + NestedMeta::Meta(Meta::List(MetaList { path, nested, .. })) => match path.get_ident() { + Some(id) if *id == "types" => { + let params = nested.iter().map(parse_type).collect::>(); + types.push(quote!(<#name<#(#params),*>>)); + } + + Some(id) if *id == "zdata" => { + if nested.len() != 1 { + panic!("zdata attribute takes 1 argument"); + } + match &nested[0] { + NestedMeta::Lit(Lit::Bool(b)) => { + test_zdata = b.value; + } + _ => panic!("zdata argument must be a boolean"), + } + } + + _ => panic!("invalid attribute {:?}", path), + }, + + _ => panic!("invalid argument {:?}", arg), + } + } + + if types.is_empty() { + // If no explicit type parameters were given for us to test with, assume the type under test + // takes no type parameters. + types.push(quote!(<#name>)); + } + + let mut output = quote! { + #input + }; + + for (i, ty) in types.into_iter().enumerate() { + let serde_test = { + let test_name = Ident::new( + &format!("test_serde_roundtrip_{}_{}", name, i), + Span::mixed_site(), + ); + quote! { + #[cfg(test)] + proptest::proptest!{ + #[test] + fn #test_name(obj in proptest::prelude::any::#ty()) { + let buf = bincode::serialize(&obj).unwrap(); + assert_eq!(obj, bincode::deserialize(&buf).unwrap()); + } + } + } + }; + + let zdata_test = if test_zdata { + let test_name = Ident::new( + &format!("test_zdata_roundtrip_{}_{}", name, i), + Span::mixed_site(), + ); + quote! { + #[cfg(test)] + proptest::proptest!{ + #[test] + fn #test_name(obj in proptest::prelude::any::#ty()) { + let ser = crate::z_data::to_z_data(&obj).unwrap(); + assert_eq!(obj, crate::z_data::from_z_data(&ser).unwrap()); + } + } + } + } else { + quote! {} + }; + + output = quote! { + #output + #serde_test + #zdata_test + }; + } + + output.into() +} + +fn parse_type(m: &NestedMeta) -> Type { + match m { + NestedMeta::Lit(Lit::Str(s)) => syn::parse_str(&s.value()).unwrap(), + NestedMeta::Meta(Meta::Path(p)) => syn::parse2(p.to_token_stream()).unwrap(), + _ => { + panic!("expected type"); + } + } +} diff --git a/src/cli/mod.rs b/src/cli/mod.rs new file mode 100644 index 0000000000..8890d2c8f3 --- /dev/null +++ b/src/cli/mod.rs @@ -0,0 +1,234 @@ +mod paths; +mod repl; + +use std::fs; +use std::path::PathBuf; + +use anyhow::{bail, Context, Result}; + +use lurk::eval::lang::Coproc; +use lurk::field::{LanguageField, LurkField}; +use lurk::store::Store; +use lurk::z_data::{from_z_data, ZData}; +use lurk::z_store::ZStore; +use pasta_curves::{pallas, vesta}; + +use clap::{Args, Parser, Subcommand}; + +// use self::prove_and_verify::verify_proof; +use self::repl::Repl; + +const DEFAULT_FIELD: LanguageField = LanguageField::Pallas; +const DEFAULT_LIMIT: usize = 100_000_000; +const DEFAULT_RC: usize = 10; + +#[derive(Parser, Debug)] +#[clap(version)] +struct Cli { + #[clap(subcommand)] + command: Command, +} + +#[derive(Subcommand, Debug)] +enum Command { + /// Loads a file, processing forms sequentially ("load" can be elided) + Load(LoadArgs), + /// Enters Lurk's REPL environment ("repl" can be elided) + Repl(ReplArgs), + // /// Verifies a Lurk proof + // Verify(VerifyArgs), +} + +#[derive(Args, Debug)] +struct LoadArgs { + /// The file to be loaded + #[clap(value_parser)] + lurk_file: PathBuf, + + /// ZStore to be preloaded before the loading the file + #[clap(long, value_parser)] + zstore: Option, + + /// Maximum number of iterations allowed (defaults to 100_000_000) + #[clap(long, value_parser)] + limit: Option, + // /// Reduction count used for proofs (defaults to 10) + // #[clap(long, value_parser)] + // rc: Option, + + // /// Flag to prove the last evaluation + // #[arg(long)] + // prove: bool, +} + +#[derive(Parser, Debug)] +struct LoadCli { + #[clap(value_parser)] + lurk_file: PathBuf, + + #[clap(long, value_parser)] + zstore: Option, + + #[clap(long, value_parser)] + limit: Option, + // #[arg(long)] + // prove: bool, + + // #[clap(long, value_parser)] + // rc: Option, +} + +impl LoadArgs { + pub fn into_cli(self) -> LoadCli { + LoadCli { + lurk_file: self.lurk_file, + zstore: self.zstore, + limit: self.limit, + // prove: self.prove, + // rc: self.rc, + } + } +} + +#[derive(Args, Debug)] +struct ReplArgs { + /// ZStore to be preloaded before entering the REPL (and loading a file) + #[clap(long, value_parser)] + zstore: Option, + + /// Optional file to be loaded before entering the REPL + #[clap(long, value_parser)] + load: Option, + + /// Maximum number of iterations allowed (defaults to 100_000_000) + #[clap(long, value_parser)] + limit: Option, + // /// Reduction count used for proofs (defaults to 10) + // #[clap(long, value_parser)] + // rc: Option, +} + +#[derive(Parser, Debug)] +struct ReplCli { + #[clap(long, value_parser)] + load: Option, + + #[clap(long, value_parser)] + zstore: Option, + + #[clap(long, value_parser)] + limit: Option, + // #[clap(long, value_parser)] + // rc: Option, +} + +impl ReplArgs { + pub fn into_cli(self) -> ReplCli { + ReplCli { + load: self.load, + zstore: self.zstore, + limit: self.limit, + // rc: self.rc, + } + } +} + +fn get_field() -> Result { + if let Ok(lurk_field) = std::env::var("LURK_FIELD") { + match lurk_field.to_lowercase().as_str() { + "bls12-381" => Ok(LanguageField::BLS12_381), + "pallas" => Ok(LanguageField::Pallas), + "vesta" => Ok(LanguageField::Vesta), + _ => bail!("Field not supported: {lurk_field}"), + } + } else { + Ok(DEFAULT_FIELD) + } +} + +fn get_store serde::de::Deserialize<'a>>( + zstore_path: &Option, +) -> Result> { + match zstore_path { + None => Ok(Store::default()), + Some(zstore_path) => { + let bytes = fs::read(zstore_path)?; + let zdata = ZData::from_bytes(&bytes)?; + let zstore: ZStore = from_z_data(&zdata)?; + Ok(zstore.to_store()) + } + } +} + +macro_rules! new_repl { + ( $cli: expr, $field: path ) => {{ + let limit = $cli.limit.unwrap_or(DEFAULT_LIMIT); + // let rc = $cli.rc.unwrap_or(DEFAULT_RC); + let mut store = get_store(&$cli.zstore).with_context(|| "reading store from file")?; + let env = store.nil(); + // Repl::<$field, Coproc<$field>>::new(store, env, limit, rc)? + Repl::<$field, Coproc<$field>>::new(store, env, limit, DEFAULT_RC)? + }}; +} + +impl ReplCli { + pub fn run(&self) -> Result<()> { + macro_rules! repl { + ( $field: path ) => {{ + let mut repl = new_repl!(self, $field); + if let Some(lurk_file) = &self.load { + repl.load_file(lurk_file)?; + } + repl.start() + }}; + } + match get_field()? { + LanguageField::Pallas => repl!(pallas::Scalar), + LanguageField::Vesta => repl!(vesta::Scalar), + LanguageField::BLS12_381 => repl!(blstrs::Scalar), + } + } +} + +impl LoadCli { + pub fn run(&self) -> Result<()> { + macro_rules! load { + ( $field: path ) => {{ + let mut repl = new_repl!(self, $field); + repl.load_file(&self.lurk_file)?; + // if self.prove { + // repl.prove_last_claim()?; + // } + Ok(()) + }}; + } + match get_field()? { + LanguageField::Pallas => load!(pallas::Scalar), + LanguageField::Vesta => load!(vesta::Scalar), + LanguageField::BLS12_381 => load!(blstrs::Scalar), + } + } +} + +// #[derive(Args, Debug)] +// struct VerifyArgs { +// #[clap(value_parser)] +// proof_file: PathBuf, +// } + +/// Parses CLI arguments and continues the program flow accordingly +pub fn parse_and_run() -> Result<()> { + #[cfg(not(target_arch = "wasm32"))] + paths::create_lurk_dir()?; + if let Ok(repl_cli) = ReplCli::try_parse() { + repl_cli.run() + } else if let Ok(load_cli) = LoadCli::try_parse() { + load_cli.run() + } else { + match Cli::parse().command { + Command::Repl(repl_args) => repl_args.into_cli().run(), + Command::Load(load_args) => load_args.into_cli().run(), + // Command::Verify(verify_args) => verify_proof(&verify_args.proof_file), + } + } +} diff --git a/src/cli/paths.rs b/src/cli/paths.rs new file mode 100644 index 0000000000..531edf03d3 --- /dev/null +++ b/src/cli/paths.rs @@ -0,0 +1,29 @@ +#[cfg(not(target_arch = "wasm32"))] +use anyhow::Result; + +#[cfg(not(target_arch = "wasm32"))] +use std::{ + fs, + path::{Path, PathBuf}, +}; + +#[cfg(not(target_arch = "wasm32"))] +fn home_dir() -> PathBuf { + home::home_dir().expect("missing home directory") +} + +#[cfg(not(target_arch = "wasm32"))] +pub fn lurk_dir() -> PathBuf { + home_dir().join(Path::new(".lurk")) +} + +#[cfg(not(target_arch = "wasm32"))] +pub fn create_lurk_dir() -> Result<()> { + let dir_path = lurk_dir(); + Ok(fs::create_dir_all(dir_path)?) +} + +#[cfg(not(target_arch = "wasm32"))] +pub fn repl_history() -> PathBuf { + lurk_dir().join(Path::new("repl-history")) +} diff --git a/src/cli/repl.rs b/src/cli/repl.rs new file mode 100644 index 0000000000..2a81ba4b46 --- /dev/null +++ b/src/cli/repl.rs @@ -0,0 +1,465 @@ +use std::io; +use std::path::Path; +use std::sync::Arc; +use std::{fs::read_to_string, process}; + +use anyhow::{bail, Context, Result}; + +use rustyline::{ + error::ReadlineError, + history::DefaultHistory, + validate::{MatchingBracketValidator, ValidationContext, ValidationResult, Validator}, + Config, Editor, +}; +use rustyline_derive::{Completer, Helper, Highlighter, Hinter}; + +use lurk::{ + eval::{lang::Lang, Evaluator}, + field::LurkField, + parser, + ptr::Ptr, + store::Store, + tag::{ContTag, ExprTag}, + writer::Write, + Num, UInt, + {coprocessor::Coprocessor, eval::IO}, +}; + +#[cfg(not(target_arch = "wasm32"))] +use crate::cli::paths::repl_history; + +#[derive(Completer, Helper, Highlighter, Hinter)] +struct InputValidator { + brackets: MatchingBracketValidator, +} + +impl Validator for InputValidator { + fn validate(&self, ctx: &mut ValidationContext<'_>) -> rustyline::Result { + self.brackets.validate(ctx) + } +} + +pub struct Repl> { + store: Store, + env: Ptr, + limit: usize, + lang: Arc>, + // last_claim: Option>, + rc: usize, +} + +fn check_non_zero(name: &str, x: usize) -> Result<()> { + if x == 0 { + bail!("`{name}` can't be zero") + } + Ok(()) +} + +/// Pads the number of iterations to the first multiple of the reduction count +/// that's equal or greater than the number of iterations +/// +/// Panics if reduction count is zero +#[allow(dead_code)] +fn pad_iterations(iterations: usize, rc: usize) -> usize { + let lower = rc * (iterations / rc); + if lower < iterations { + lower + rc + } else { + lower + } +} + +impl serde::Deserialize<'de>, C: Coprocessor> + Repl +{ + pub fn new(store: Store, env: Ptr, limit: usize, rc: usize) -> Result> { + check_non_zero("limit", limit)?; + check_non_zero("rc", rc)?; + Ok(Repl { + store, + env, + limit, + lang: Arc::new(Lang::::new()), + // last_claim: None, + rc, + }) + } + + pub fn prove_last_claim(&mut self) -> Result<()> { + Ok(()) + // match &self.last_claim { + // Some(claim) => { + // // TODO + // let _proof = prove_claim(claim); + // Ok(()) + // } + // None => { + // bail!("No claim to prove"); + // } + // } + } + + #[inline] + fn eval_expr(&mut self, expr_ptr: Ptr) -> Result<(IO, usize, Vec>)> { + Ok(Evaluator::new(expr_ptr, self.env, &mut self.store, self.limit, &self.lang).eval()?) + } + + fn peek1(&self, cmd: &str, args: &Ptr) -> Result> { + let (first, rest) = self.store.car_cdr(args)?; + if !rest.is_nil() { + bail!("`{cmd}` accepts at most one argument") + } + Ok(first) + } + + fn peek2(&self, cmd: &str, args: &Ptr) -> Result<(Ptr, Ptr)> { + let (first, rest) = self.store.car_cdr(args)?; + let (second, rest) = self.store.car_cdr(&rest)?; + if !rest.is_nil() { + bail!("`{cmd}` accepts at most two arguments") + } + Ok((first, second)) + } + + fn peek_usize(&self, cmd: &str, args: &Ptr) -> Result { + let first = self.peek1(cmd, args)?; + match first.tag { + ExprTag::Num => match self.store.fetch_num(&first).unwrap() { + Num::U64(u) => Ok(*u as usize), + _ => bail!( + "Invalid value for `{cmd}`: {}", + first.fmt_to_string(&self.store) + ), + }, + ExprTag::U64 => match self.store.fetch_uint(&first).unwrap() { + UInt::U64(u) => Ok(u as usize), + }, + _ => bail!( + "Invalid value for `{cmd}`: {}", + first.fmt_to_string(&self.store) + ), + } + } + + fn handle_meta_cases(&mut self, cmd: &str, args: &Ptr, pwd_path: &Path) -> Result<()> { + match cmd { + "def" => { + // Extends env with a non-recursive binding. + // + // This: !(:def foo (lambda () 123)) + // + // Gets macroexpanded to this: (let ((foo (lambda () 123))) + // (current-env)) + // + // And the state's env is set to the result. + let (first, second) = self.peek2(cmd, args)?; + let l = &self.store.lurk_sym("let"); + let current_env = &self.store.lurk_sym("current-env"); + let binding = &self.store.list(&[first, second]); + let bindings = &self.store.list(&[*binding]); + let current_env_call = &self.store.list(&[*current_env]); + let expanded = &self.store.list(&[*l, *bindings, *current_env_call]); + let (expanded_io, ..) = self.eval_expr(*expanded)?; + + self.env = expanded_io.expr; + + let (new_binding, _) = &self.store.car_cdr(&expanded_io.expr)?; + let (new_name, _) = self.store.car_cdr(new_binding)?; + println!("{}", new_name.fmt_to_string(&self.store)); + } + "defrec" => { + // Extends env with a recursive binding. + // + // This: !(:defrec foo (lambda () 123)) + // + // Gets macroexpanded to this: (letrec ((foo (lambda () 123))) + // (current-env)) + // + // And the state's env is set to the result. + let (first, second) = self.peek2(cmd, args)?; + let l = &self.store.lurk_sym("letrec"); + let current_env = &self.store.lurk_sym("current-env"); + let binding = &self.store.list(&[first, second]); + let bindings = &self.store.list(&[*binding]); + let current_env_call = &self.store.list(&[*current_env]); + let expanded = &self.store.list(&[*l, *bindings, *current_env_call]); + let (expanded_io, ..) = self.eval_expr(*expanded)?; + + self.env = expanded_io.expr; + + let (new_binding_outer, _) = &self.store.car_cdr(&expanded_io.expr)?; + let (new_binding_inner, _) = &self.store.car_cdr(new_binding_outer)?; + let (new_name, _) = self.store.car_cdr(new_binding_inner)?; + println!("{}", new_name.fmt_to_string(&self.store)); + } + "load" => { + let first = self.peek1(cmd, args)?; + match self.store.fetch_string(&first) { + Some(path) => { + let joined = pwd_path.join(Path::new(&path)); + self.load_file(&joined)? + } + _ => bail!("Argument of `load` must be a string."), + } + io::Write::flush(&mut io::stdout()).unwrap(); + } + "assert" => { + let first = self.peek1(cmd, args)?; + let (first_io, ..) = self.eval_expr(first)?; + if first_io.expr.is_nil() { + eprintln!( + "`assert` failed. {} evaluates to nil", + first.fmt_to_string(&self.store) + ); + process::exit(1); + } + } + "assert-eq" => { + let (first, second) = self.peek2(cmd, args)?; + let (first_io, ..) = self + .eval_expr(first) + .with_context(|| "evaluating first arg")?; + let (second_io, ..) = self + .eval_expr(second) + .with_context(|| "evaluating second arg")?; + if !&self.store.ptr_eq(&first_io.expr, &second_io.expr)? { + eprintln!( + "`assert-eq` failed. Expected:\n {} = {}\nGot:\n {} ≠ {}", + first.fmt_to_string(&self.store), + second.fmt_to_string(&self.store), + first_io.expr.fmt_to_string(&self.store), + second_io.expr.fmt_to_string(&self.store) + ); + process::exit(1); + } + } + "assert-emitted" => { + let (first, second) = self.peek2(cmd, args)?; + let (first_io, ..) = self + .eval_expr(first) + .with_context(|| "evaluating first arg")?; + let (.., emitted) = self + .eval_expr(second) + .with_context(|| "evaluating second arg")?; + let (mut first_emitted, mut rest_emitted) = self.store.car_cdr(&first_io.expr)?; + for (i, elem) in emitted.iter().enumerate() { + if elem != &first_emitted { + eprintln!( + "`assert-emitted` failed at position {i}. Expected {}, but found {}.", + first_emitted.fmt_to_string(&self.store), + elem.fmt_to_string(&self.store), + ); + process::exit(1); + } + (first_emitted, rest_emitted) = self.store.car_cdr(&rest_emitted)?; + } + } + "assert-error" => { + let first = self.peek1(cmd, args)?; + let (first_io, ..) = self.eval_expr(first)?; + if first_io.cont.tag != ContTag::Error { + eprintln!( + "`assert-error` failed. {} doesn't result on evaluation error.", + first.fmt_to_string(&self.store) + ); + process::exit(1); + } + } + "clear" => self.env = self.store.nil(), + "set-env" => { + // The state's env is set to the result of evaluating the first argument. + let first = self.peek1(cmd, args)?; + let (first_io, ..) = self.eval_expr(first)?; + self.env = first_io.expr; + } + "set-limit" => { + let limit = self.peek_usize(cmd, args)?; + check_non_zero("limit", limit)?; + self.limit = limit; + } + "set-rc" => { + let rc = self.peek_usize(cmd, args)?; + check_non_zero("rc", rc)?; + self.rc = rc; + } + "prove" => { + if !args.is_nil() { + self.eval_expr_and_set_last_claim(self.peek1(cmd, args)?)?; + } + self.prove_last_claim()?; + } + "verify" => { + todo!() + } + _ => bail!("Unsupported meta command: {cmd}"), + } + Ok(()) + } + + fn handle_meta(&mut self, expr_ptr: Ptr, pwd_path: &Path) -> Result<()> { + let (car, cdr) = self.store.car_cdr(&expr_ptr)?; + match &self.store.fetch_symbol(&car) { + Some(symbol) => { + self.handle_meta_cases(format!("{}", symbol).as_str(), &cdr, pwd_path)? + } + None => bail!( + "Meta command must be a symbol. Found {}", + car.fmt_to_string(&self.store) + ), + } + Ok(()) + } + + fn eval_expr_and_set_last_claim(&mut self, expr_ptr: Ptr) -> Result<(IO, usize)> { + self.eval_expr(expr_ptr).map(|(output, iterations, _)| { + if matches!( + output.cont.tag, + ContTag::Outermost | ContTag::Terminal | ContTag::Error + ) { + // let cont = self.store.get_cont_outermost(); + + // let claim = Claim::PtrEvaluation::(PtrEvaluation { + // expr: LurkPtr::from_ptr(&mut self.store, &expr_ptr), + // env: LurkPtr::from_ptr(&mut self.store, &self.env), + // cont: LurkCont::from_cont_ptr(&mut self.store, &cont), + // expr_out: LurkPtr::from_ptr(&mut self.store, &output.expr), + // env_out: LurkPtr::from_ptr(&mut self.store, &output.env), + // cont_out: LurkCont::from_cont_ptr(&mut self.store, &output.cont), + // status: as Evaluable, Coproc>>::status( + // &output, + // ), + // iterations: Some(pad_iterations(iterations, self.rc)), + // }); + + // self.last_claim = Some(claim); + } + (output, iterations) + }) + } + + fn handle_non_meta(&mut self, expr_ptr: Ptr) -> Result<()> { + self.eval_expr_and_set_last_claim(expr_ptr) + .map(|(output, iterations)| { + let prefix = if iterations != 1 { + format!("[{iterations} iterations] => ") + } else { + "[1 iteration] => ".into() + }; + + let suffix = match output.cont.tag { + ContTag::Outermost | ContTag::Terminal => { + output.expr.fmt_to_string(&self.store) + } + ContTag::Error => "ERROR!".into(), + _ => format!("Computation incomplete after limit: {}", self.limit), + }; + + println!("{}{}", prefix, suffix); + }) + } + + fn handle_form<'a>( + &mut self, + input: parser::Span<'a>, + pwd_path: &Path, + ) -> Result> { + let (input, ptr, is_meta) = self.store.read_maybe_meta(input)?; + + if is_meta { + self.handle_meta(ptr, pwd_path)?; + } else { + self.handle_non_meta(ptr)?; + } + Ok(input) + } + + pub fn load_file(&mut self, file_path: &Path) -> Result<()> { + let input = read_to_string(file_path)?; + println!("Loading {}", file_path.display()); + + let mut input = parser::Span::new(&input); + loop { + match self.handle_form(input, file_path) { + Ok(new_input) => input = new_input, + Err(e) => { + if let Some(parser::Error::NoInput) = e.downcast_ref::() { + // It's ok, it just means we've hit the EOF + return Ok(()); + } else { + return Err(e); + } + } + } + } + } + + pub fn start(&mut self) -> Result<()> { + println!("Lurk REPL welcomes you."); + + let pwd_path = &std::env::current_dir()?; + + let mut editor: Editor = Editor::with_config( + Config::builder() + .color_mode(rustyline::ColorMode::Enabled) + .auto_add_history(true) + .build(), + )?; + + editor.set_helper(Some(InputValidator { + brackets: MatchingBracketValidator::new(), + })); + + #[cfg(not(target_arch = "wasm32"))] + let history_path = &repl_history(); + + #[cfg(not(target_arch = "wasm32"))] + if history_path.exists() { + editor.load_history(history_path)?; + } + + loop { + match editor.readline("> ") { + Ok(line) => { + #[cfg(not(target_arch = "wasm32"))] + editor.save_history(history_path)?; + match self.store.read_maybe_meta(parser::Span::new(&line)) { + Ok((_, expr_ptr, is_meta)) => { + if is_meta { + if let Err(e) = self.handle_meta(expr_ptr, pwd_path) { + println!("!Error: {e}"); + } + } else if let Err(e) = self.handle_non_meta(expr_ptr) { + println!("Error: {e}"); + } + } + Err(parser::Error::NoInput) => (), + Err(e) => { + println!("Read error: {e}") + } + } + } + Err(ReadlineError::Interrupted | ReadlineError::Eof) => { + println!("Exiting..."); + break; + } + Err(err) => { + println!("Read line error: {err}"); + break; + } + } + } + Ok(()) + } +} + +mod test { + #[test] + fn test_padding() { + use crate::cli::repl::pad_iterations; + assert_eq!(pad_iterations(61, 10), 70); + assert_eq!(pad_iterations(1, 10), 10); + assert_eq!(pad_iterations(61, 1), 61); + assert_eq!(pad_iterations(610, 10), 610); + assert_eq!(pad_iterations(619, 20), 620); + } +} diff --git a/src/eval/mod.rs b/src/eval/mod.rs index e3845e2524..4813f78e9f 100644 --- a/src/eval/mod.rs +++ b/src/eval/mod.rs @@ -12,6 +12,8 @@ use lang::Lang; use log::info; #[cfg(not(target_arch = "wasm32"))] +use lurk_macros::serde_test; +#[cfg(not(target_arch = "wasm32"))] use proptest_derive::Arbitrary; use serde::{Deserialize, Serialize}; use std::cmp::PartialEq; @@ -60,6 +62,7 @@ pub struct Frame { } #[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] +#[cfg_attr(not(target_arch = "wasm32"), serde_test)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] pub enum Status { Terminal, diff --git a/src/main.rs b/src/main.rs index 9c7a66c6a4..026af26c9f 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,40 +1,8 @@ -use anyhow::Result; +mod cli; -use lurk::eval::lang::{Coproc, Lang}; -use lurk::field::LanguageField; -use lurk::repl::{repl_cli, ReplState}; -use pasta_curves::{pallas, vesta}; +use anyhow::Result; fn main() -> Result<()> { pretty_env_logger::init(); - - let default_field = LanguageField::Pallas; - let field = if let Ok(lurk_field) = std::env::var("LURK_FIELD") { - match lurk_field.as_str() { - "BLS12-381" => LanguageField::BLS12_381, - "PALLAS" => LanguageField::Pallas, - "VESTA" => LanguageField::Vesta, - _ => default_field, - } - } else { - default_field - }; - - match field { - LanguageField::BLS12_381 => repl_cli::< - blstrs::Scalar, - ReplState>, - Coproc, - >(Lang::>::new()), - LanguageField::Pallas => repl_cli::< - pallas::Scalar, - ReplState>, - Coproc, - >(Lang::>::new()), - LanguageField::Vesta => repl_cli::< - vesta::Scalar, - ReplState>, - Coproc, - >(Lang::>::new()), - } + cli::parse_and_run() } diff --git a/src/public_parameters/error.rs b/src/public_parameters/error.rs index a06b95f9d3..9624a614b7 100644 --- a/src/public_parameters/error.rs +++ b/src/public_parameters/error.rs @@ -1,35 +1,12 @@ -use crate::error::ReductionError; -use crate::store; -use bellperson::SynthesisError; use std::io; use thiserror::Error; #[derive(Error, Debug)] pub enum Error { - #[error("Verification error: {0}")] - VerificationError(String), - #[error("Unsupported reduction count: {0}")] - UnsupportedReductionCount(usize), #[error("IO error: {0}")] IOError(#[from] io::Error), - #[error("JSON error: {0}")] - JsonError(#[from] serde_json::Error), - #[error("Synthesis error: {0}")] - SynthesisError(#[from] SynthesisError), - #[error("Commitment parser error: {0}")] - CommitmentParseError(#[from] hex::FromHexError), - #[error("Unknown commitment")] - UnknownCommitment, - #[error("Opening Failure: {0}")] - OpeningFailure(String), - #[error("Evaluation Failure")] - EvaluationFailure(ReductionError), - #[error("Store error: {0}")] - StoreError(#[from] store::Error), #[error("Cache error: {0}")] CacheError(String), - #[error("Serde error: {0}")] - SerdeError(#[from] crate::z_data::serde::SerdeError), - #[error("Anyhow error: {0}")] - AnyhowError(#[from] anyhow::Error), + #[error("JSON error: {0}")] + JsonError(#[from] serde_json::Error), } diff --git a/src/public_parameters/mod.rs b/src/public_parameters/mod.rs index cb7027f4b3..94c637a1fd 100644 --- a/src/public_parameters/mod.rs +++ b/src/public_parameters/mod.rs @@ -1,83 +1,25 @@ -use log::info; -use std::convert::TryFrom; use std::fs::File; use std::io::{self, BufReader, BufWriter}; use std::path::Path; use std::sync::Arc; -#[cfg(not(target_arch = "wasm32"))] -use proptest::prelude::*; -#[cfg(not(target_arch = "wasm32"))] -use proptest_derive::Arbitrary; - use crate::coprocessor::Coprocessor; -use crate::error::ReductionError; -#[cfg(not(target_arch = "wasm32"))] -use crate::field::FWrap; use crate::{ - circuit::ToInputs, - eval::{ - empty_sym_env, - lang::{Coproc, Lang}, - Evaluable, Evaluator, Status, Witness, IO, - }, - field::LurkField, - hash::PoseidonCache, - proof::nova::{self, NovaProver, PublicParams}, - proof::Prover, - ptr::{ContPtr, Ptr}, - store::Store, - tag::ExprTag, - writer::Write, - z_expr::ZExpr, - z_ptr::ZExprPtr, - z_store::ZStore, + eval::lang::Lang, + proof::nova::{self, PublicParams}, }; -use ff::PrimeField; -use hex::FromHex; -use once_cell::sync::OnceCell; use pasta_curves::pallas; -use rand::rngs::OsRng; use serde::de::DeserializeOwned; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use serde::{Deserialize, Serialize}; pub mod error; -mod file_map; +pub mod file_map; mod registry; -use error::Error; -use file_map::FileMap; - -pub const DEFAULT_REDUCTION_COUNT: ReductionCount = ReductionCount::Ten; -pub static VERBOSE: OnceCell = OnceCell::new(); +use crate::public_parameters::error::Error; pub type S1 = pallas::Scalar; -mod base64 { - use serde::{Deserialize, Serialize}; - use serde::{Deserializer, Serializer}; - - pub(crate) fn serialize(v: &Vec, s: S) -> Result { - let base64 = base64::encode(v); - String::serialize(&base64, s) - } - - pub(crate) fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result, D::Error> { - let base64 = String::deserialize(d)?; - base64::decode(base64.as_bytes()).map_err(serde::de::Error::custom) - } -} - -pub type NovaProofCache = FileMap>; -pub fn nova_proof_cache(reduction_count: usize) -> NovaProofCache { - FileMap::>::new(format!("nova_proofs.{}", reduction_count)).unwrap() -} - -pub type CommittedExpressionMap = FileMap, CommittedExpression>; -pub fn committed_expression_store() -> CommittedExpressionMap { - FileMap::, CommittedExpression>::new("committed_expressions").unwrap() -} - pub fn public_params + Serialize + DeserializeOwned + 'static>( rc: usize, lang: Arc>, @@ -85,309 +27,6 @@ pub fn public_params + Serialize + DeserializeOwned + 'static let f = |lang: Arc>| Arc::new(nova::public_params(rc, lang)); registry::CACHE_REG.get_coprocessor_or_update_with(rc, f, lang) } - -// Number of circuit reductions per step, equivalent to `chunk_frame_count` -#[derive(Clone, Copy, Hash, PartialEq, Eq, Serialize, Deserialize)] -pub enum ReductionCount { - One, - Five, - Ten, - OneHundred, -} - -#[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] -#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq)] -pub struct Evaluation { - pub expr: String, - pub env: String, - pub cont: String, - pub expr_out: String, - pub env_out: String, - pub cont_out: String, - pub status: Status, - pub iterations: Option, -} - -#[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] -#[cfg_attr(not(target_arch = "wasm32"), proptest(no_bound))] -#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq)] -pub struct PtrEvaluation { - pub expr: LurkPtr, - pub env: LurkPtr, - pub cont: LurkCont, - pub expr_out: LurkPtr, - pub env_out: LurkPtr, - pub cont_out: LurkCont, - pub status: Status, - pub iterations: Option, -} - -#[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub struct Commitment { - #[cfg_attr( - not(target_arch = "wasm32"), - proptest(strategy = "any::>().prop_map(|x| x.0)") - )] - pub comm: F, -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] -pub struct OpeningRequest { - pub commitment: Commitment, - pub input: Expression, - pub chain: bool, -} - -impl ToString for Commitment { - fn to_string(&self) -> String { - let s = serde_json::to_string(&self).unwrap(); - // Remove quotation marks. Yes, dumb hacks are happening. - s[1..s.len() - 1].to_string() - } -} - -impl Serialize for Commitment { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - // Use be_bytes for consistency with PrimeField printed representation. - let be_bytes: Vec = self - .comm - .to_repr() - .as_ref() - .iter() - .rev() - .map(|x| x.to_owned()) - .collect(); - - hex::serde::serialize(be_bytes, serializer) - } -} - -impl<'de, F: LurkField> Deserialize<'de> for Commitment { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - hex::serde::deserialize(deserializer) - } -} - -impl FromHex for Commitment { - type Error = hex::FromHexError; - - fn from_hex(s: T) -> Result::Error> - where - T: AsRef<[u8]>, - { - let mut v = Vec::from_hex(s)?; - v.reverse(); - let mut repr = ::Repr::default(); - repr.as_mut()[..32].copy_from_slice(&v[..]); - - Ok(Commitment { - comm: F::from_repr(repr).unwrap(), - }) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] -pub struct Expression { - pub expr: LurkPtr, -} - -#[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] -#[cfg_attr(not(target_arch = "wasm32"), proptest(no_bound))] -#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)] -pub struct Opening { - pub input: String, - pub output: String, - pub status: Status, - pub commitment: Commitment, - pub new_commitment: Option>, -} - -#[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] -#[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq)] -pub struct ZBytes { - #[serde(with = "base64")] - z_store: Vec, - #[serde(with = "base64")] - z_ptr: Vec, // can also be a scalar_cont_ptr -} - -#[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] -#[cfg_attr(not(target_arch = "wasm32"), proptest(no_bound))] -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] -pub struct ZStorePtr { - z_store: ZStore, - z_ptr: ZExprPtr, -} - -#[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] -#[cfg_attr(not(target_arch = "wasm32"), proptest(no_bound))] -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] -pub enum LurkPtr { - Source(String), - ZStorePtr(ZStorePtr), -} - -#[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] -#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq)] -pub enum LurkCont { - #[default] - Outermost, - Terminal, - Error, -} - -impl Default for LurkPtr { - fn default() -> Self { - Self::Source("nil".to_string()) - } -} - -impl Eq for LurkPtr {} - -#[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] -#[cfg_attr(not(target_arch = "wasm32"), proptest(no_bound))] -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] -pub struct CommittedExpression { - pub expr: LurkPtr, - #[cfg_attr( - not(target_arch = "wasm32"), - proptest(strategy = "any::>().prop_map(|x| Some(x.0))") - )] - pub secret: Option, - pub commitment: Option>, -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct VerificationResult { - pub verified: bool, -} - -#[derive(Serialize, Deserialize)] -pub struct Proof<'a, F: LurkField> { - pub claim: Claim, - pub proof: nova::Proof<'a, Coproc>, - pub num_steps: usize, - pub reduction_count: ReductionCount, -} - -#[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] -#[cfg_attr(not(target_arch = "wasm32"), proptest(no_bound))] -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] -pub enum Claim { - Evaluation(Evaluation), - // TODO: Add Expression type - PtrEvaluation(PtrEvaluation), - Opening(Opening), -} - -impl Deserialize<'de>> Claim { - /// Returns the ZPtr corresponding to the claim - pub fn proof_key(&self) -> Result, Error> { - match self { - Claim::Evaluation(eval) => { - // Only keying on input and output for now - let expr_in = ZExprPtr::::from_lurk_str(&eval.expr)?; - let expr_out = ZExprPtr::::from_lurk_str(&eval.expr_out)?; - let expr = ZExpr::Cons(expr_in, expr_out); - Ok(expr.z_ptr(&PoseidonCache::default())) - } - Claim::PtrEvaluation(ptr_eval) => { - let expr_in: ZExprPtr = match &ptr_eval.expr { - LurkPtr::Source(source) => ZExprPtr::::from_lurk_str(source)?, - LurkPtr::ZStorePtr(zsp) => zsp.z_ptr, - }; - let expr_out = match &ptr_eval.expr_out { - LurkPtr::Source(source) => ZExprPtr::::from_lurk_str(source)?, - LurkPtr::ZStorePtr(zsp) => zsp.z_ptr, - }; - let expr = ZExpr::Cons(expr_in, expr_out); - Ok(expr.z_ptr(&PoseidonCache::default())) - } - // TODO: Is this an appropriate key for commitments? - Claim::Opening(open) => { - let expr_in = ZExprPtr::::from_lurk_str(&open.input)?; - let expr_out = ZExprPtr::::from_lurk_str(&open.output)?; - let expr = ZExpr::Cons(expr_in, expr_out); - Ok(expr.z_ptr(&PoseidonCache::default())) - } - } - } -} - -// This is just a rough idea, mostly here so we can plumb it elsewhere. The idea is that a verifier can sign an -// attestation that a given claim's proof was verified. It motivates the use of an online verifier for demo purposes. -// Although real proofs should be fast to verify, they will still be large relative to a small (auditable) bundle like -// this. Even if not entirely realistic, something with this general *shape* is likely to play a role in a recursive -// system where the ability to aggregate proof verification more soundly is possible. -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] -pub struct Cert { - pub claim_cid: ZExprPtr, - pub proof_cid: ZExprPtr, - pub verified: bool, - pub verifier_id: String, - pub signature: String, -} - -#[allow(dead_code)] -impl Claim { - pub fn is_evaluation(&self) -> bool { - self.evaluation().is_some() - } - pub fn is_opening(&self) -> bool { - self.opening().is_some() - } - pub fn evaluation(&self) -> Option { - match self { - Self::Evaluation(e) => Some(e.clone()), - _ => None, - } - } - pub fn ptr_evaluation(&self) -> Option> { - match self { - Self::PtrEvaluation(e) => Some(e.clone()), - _ => None, - } - } - pub fn opening(&self) -> Option> { - match self { - Self::Opening(o) => Some(o.clone()), - _ => None, - } - } -} - -type E = Error; -impl TryFrom for ReductionCount { - type Error = E; - - fn try_from(count: usize) -> Result>::Error> { - match count { - 1 => Ok(ReductionCount::One), - 5 => Ok(ReductionCount::Five), - 10 => Ok(ReductionCount::Ten), - 100 => Ok(ReductionCount::OneHundred), - c => Err(Error::UnsupportedReductionCount(c)), - } - } -} -impl ReductionCount { - pub fn count(&self) -> usize { - match self { - Self::One => 1, - Self::Five => 5, - Self::Ten => 10, - Self::OneHundred => 100, - } - } -} - pub trait FileStore where Self: Sized, @@ -435,946 +74,3 @@ where Ok(serde_json::from_reader(reader).expect("failed to read from stdin")) } } - -impl Evaluation { - fn new( - s: &mut Store, - input: IO, - output: IO, - iterations: Option, // This might be padded, so is not quite 'iterations' in the sense of number of actual reduction steps required - // to evaluate. - ) -> Self { - let status: Status = output.cont.into(); - let terminal = status.is_terminal(); - - // For now, conservatively hide all outputs unless output is terminal. TODO: let evaluator configure this in a - // more fine-grained way, including no hiding. - // NOTE: If anything is hidden, a proof won't be possible. - macro_rules! maybe_hide { - ($x:expr) => { - if terminal { - $x - } else { - "".to_string() - } - }; - } - - let expr = input.expr.fmt_to_string(s); - let env = input.env.fmt_to_string(s); - let cont = input.cont.fmt_to_string(s); - - let expr_out = maybe_hide!(output.expr.fmt_to_string(s)); - let env_out = maybe_hide!(output.env.fmt_to_string(s)); - let cont_out = maybe_hide!(output.cont.fmt_to_string(s)); - - Self { - expr, - env, - cont, - expr_out, - env_out, - cont_out, - status, - iterations, - } - } - - pub fn eval( - store: &mut Store, - expr: Ptr, - limit: usize, - ) -> Result { - let env = empty_sym_env(store); - let lang = &Lang::>::new(); - let mut evaluator = Evaluator::new(expr, env, store, limit, lang); - - let input = evaluator.initial(); - - let (output, iterations, _) = evaluator.eval().map_err(Error::EvaluationFailure)?; - - Ok(Self::new(store, input, output, Some(iterations))) - } -} - -impl PtrEvaluation { - fn new( - s: &mut Store, - input: IO, - output: IO, - iterations: Option, // This might be padded, so is not quite 'iterations' in the sense of number of actual reduction steps required - // to evaluate. - ) -> Self { - let status: Status = output.cont.into(); - - // NOTE: We do not implement the `maybe_hide!` logic found in `Evaluation::new()`. That was a speculative design - // unsupported by this patch. In ny case, `Evaluation` and `PtrEvaluation` should be unified in the future, and - // an appropriate hiding mechanism/configuration can be added then. - Self { - expr: LurkPtr::from_ptr(s, &input.expr), - env: LurkPtr::from_ptr(s, &input.env), - cont: LurkCont::from_cont_ptr(s, &input.cont), - expr_out: LurkPtr::from_ptr(s, &output.expr), - env_out: LurkPtr::from_ptr(s, &output.env), - cont_out: LurkCont::from_cont_ptr(s, &output.cont), - status, - iterations, - } - } -} - -impl Commitment { - pub fn from_comm(s: &mut Store, ptr: &Ptr) -> Result { - assert_eq!(ExprTag::Comm, ptr.tag); - - let digest = *s - .hash_expr(ptr) - .ok_or_else(|| Error::UnknownCommitment)? - .value(); - - Ok(Commitment { comm: digest }) - } - - pub fn ptr(&self, s: &mut Store) -> Ptr { - s.intern_opaque_comm(self.comm) - } - - pub fn from_ptr_with_hiding(s: &mut Store, ptr: &Ptr) -> Result<(Self, F), Error> { - let secret = F::random(OsRng); - - let commitment = Self::from_ptr_and_secret(s, ptr, secret)?; - - Ok((commitment, secret)) - } - - pub fn from_ptr_and_secret(s: &mut Store, ptr: &Ptr, secret: F) -> Result { - let hidden = s.hide(secret, *ptr); - - Self::from_comm(s, &hidden) - } - - // Importantly, this ensures the function and secret are in the Store, s. - fn construct_with_fun_application( - s: &mut Store, - function: CommittedExpression, - input: Ptr, - limit: usize, - lang: &Lang>, - ) -> Result<(Self, Ptr), Error> { - let fun_ptr = function.expr_ptr(s, limit, lang)?; - let secret = function.secret.expect("CommittedExpression secret missing"); - - let commitment = Self::from_ptr_and_secret(s, &fun_ptr, secret)?; - - let open = s.lurk_sym("open"); - let comm_ptr = s.hide(secret, fun_ptr); - - // (open ) - let fun_expr = s.list(&[open, comm_ptr]); - - // ((open ) input) - let expression = s.list(&[fun_expr, input]); - - Ok((commitment, expression)) - } - - fn fun_application(&self, s: &mut Store, input: Ptr) -> Ptr { - let open = s.lurk_sym("open"); - let comm_ptr = self.ptr(s); - - // (open ) - let fun_expr = s.list(&[open, comm_ptr]); - - // ((open commitment) input) - s.list(&[fun_expr, input]) - } -} - -impl CommittedExpression { - pub fn expr_ptr( - &self, - s: &mut Store, - limit: usize, - lang: &Lang>, - ) -> Result, Error> { - let source_ptr = self.expr.ptr(s, limit, lang); - - Ok(source_ptr) - } -} - -impl LurkPtr { - pub fn ptr(&self, s: &mut Store, limit: usize, lang: &Lang>) -> Ptr { - match self { - LurkPtr::Source(source) => { - let ptr = s.read(source).expect("could not read source"); - assert!(!ptr.raw.is_opaque()); - let (out, _) = evaluate(s, ptr, None, limit, lang).unwrap(); - - out.expr - } - LurkPtr::ZStorePtr(z_store_ptr) => { - let z_store = &z_store_ptr.z_store; - let z_ptr = z_store_ptr.z_ptr; - s.intern_z_expr_ptr(z_ptr, z_store) - .expect("failed to intern z_ptr") - } - } - } - - pub fn from_ptr(s: &mut Store, ptr: &Ptr) -> Self { - let (z_store, z_ptr) = ZStore::new_with_expr(s, ptr); - let z_ptr = z_ptr.unwrap(); - Self::ZStorePtr(ZStorePtr { z_store, z_ptr }) - } -} - -impl LurkCont { - pub fn cont_ptr( - &self, - s: &mut Store, - ) -> ContPtr { - match self { - Self::Outermost => s.get_cont_outermost(), - Self::Terminal => s.get_cont_terminal(), - Self::Error => s.get_cont_error(), - } - } - - pub fn from_cont_ptr( - _s: &mut Store, - cont_ptr: &ContPtr, - ) -> Self { - use crate::tag::ContTag; - - match cont_ptr.tag { - ContTag::Outermost => Self::Outermost, - ContTag::Terminal => Self::Terminal, - ContTag::Error => Self::Error, - _ => panic!("unsupported continuation"), - } - } -} - -impl Expression { - pub fn eval( - &self, - s: &mut Store, - limit: usize, - lang: &Lang>, - ) -> Result, Error> { - let expr = self.expr.ptr(s, limit, lang); - let (io, _iterations) = evaluate(s, expr, None, limit, lang)?; - - Ok(io.expr) - } -} - -impl<'a> Opening { - #[allow(clippy::too_many_arguments)] - pub fn apply_and_prove( - s: &'a mut Store, - input: Ptr, - function: CommittedExpression, - limit: usize, - chain: bool, - only_use_cached_proofs: bool, - nova_prover: &'a NovaProver>, - pp: &'a PublicParams<'_, Coproc>, - lang: Arc>>, - ) -> Result, Error> { - let claim = Self::apply(s, input, function, limit, chain, &lang)?; - Proof::prove_claim( - s, - &claim, - limit, - only_use_cached_proofs, - nova_prover, - pp, - lang, - ) - } - - pub fn open_and_prove( - s: &'a mut Store, - request: OpeningRequest, - limit: usize, - only_use_cached_proofs: bool, - nova_prover: &'a NovaProver>, - pp: &'a PublicParams<'_, Coproc>, - lang: Arc>>, - ) -> Result, Error> { - let input = request.input.expr.ptr(s, limit, &lang); - let commitment = request.commitment; - - let function_map = committed_expression_store(); - let function = function_map - .get(&commitment) - .ok_or(Error::UnknownCommitment)?; - - Self::apply_and_prove( - s, - input, - function, - limit, - request.chain, - only_use_cached_proofs, - nova_prover, - pp, - lang, - ) - } - - pub fn open( - s: &mut Store, - request: OpeningRequest, - limit: usize, - chain: bool, - lang: &Lang>, - ) -> Result, Error> { - let input = request.input.expr.ptr(s, limit, lang); - let commitment = request.commitment; - - let function_map = committed_expression_store(); - let function = function_map - .get(&commitment) - .ok_or(Error::UnknownCommitment)?; - - Self::apply(s, input, function, limit, chain, lang) - } - - fn _is_chained(&self) -> bool { - self.new_commitment.is_some() - } - - fn public_output_expression(&self, s: &mut Store) -> Ptr { - let result = s.read(&self.output).expect("unreadable result"); - - if let Some(commitment) = self.new_commitment { - let c = commitment.ptr(s); - - s.cons(result, c) - } else { - result - } - } - - pub fn apply( - s: &mut Store, - input: Ptr, - function: CommittedExpression, - limit: usize, - chain: bool, - lang: &Lang>, - ) -> Result, Error> { - let (commitment, expression) = - Commitment::construct_with_fun_application(s, function, input, limit, lang)?; - let (public_output, _iterations) = evaluate(s, expression, None, limit, lang)?; - - let (new_commitment, output_expr) = if chain { - let cons = public_output.expr; - let result_expr = s.car(&cons)?; - let new_comm = s.cdr(&cons)?; - - let new_secret0 = s.secret(new_comm).expect("secret missing"); - let new_secret = *s.hash_expr(&new_secret0).expect("hash missing").value(); - - let (_, new_fun) = s.open(new_comm).expect("opening missing"); - let new_commitment = Commitment::from_comm(s, &new_comm)?; - - s.hydrate_scalar_cache(); - - let expr = LurkPtr::from_ptr(s, &new_fun); - - let new_function = CommittedExpression:: { - expr, - secret: Some(new_secret), - commitment: Some(new_commitment), - }; - - let function_map = committed_expression_store(); - function_map.set(new_commitment, &new_function)?; - assert_eq!(new_function, function_map.get(&new_commitment).unwrap()); - - (Some(new_commitment), result_expr) - } else { - (None, public_output.expr) - }; - - let input_string = input.fmt_to_string(s); - let status = - as Evaluable, Coproc>>::status(&public_output); - let output_string = if status.is_terminal() { - // Only actual output if result is terminal. - output_expr.fmt_to_string(s) - } else { - // We don't want to leak any internal information in the case of incomplete computations. - // Provers might want to expose results in the case of explicit errors. - // For now, don't -- but consider allowing it as an option. - "".to_string() - }; - - let claim = Claim::Opening(Opening { - commitment, - new_commitment, - input: input_string, - output: output_string, - status, - }); - - Ok(claim) - } -} - -impl<'a> Proof<'a, S1> { - #[allow(clippy::too_many_arguments)] - pub fn eval_and_prove( - s: &'a mut Store, - expr: Ptr, - supplied_env: Option>, - limit: usize, - only_use_cached_proofs: bool, - nova_prover: &'a NovaProver>, - pp: &'a PublicParams<'_, Coproc>, - lang: Arc>>, - ) -> Result { - let env = supplied_env.unwrap_or_else(|| empty_sym_env(s)); - let cont = s.intern_cont_outermost(); - let input = IO { expr, env, cont }; - - // TODO: It's a little silly that we evaluate here, but evaluation is also repeated in `NovaProver::evaluate_and_prove()`. - // Refactor to avoid that. - let (public_output, _iterations) = evaluate(s, expr, supplied_env, limit, &lang)?; - - let claim = if supplied_env.is_some() { - // This is a bit of a hack, but the idea is that if the env was supplied it's likely to contain a literal function, - // which we will not be able to read. Therefore, we should not produce a string-based claim. - let ptr_evaluation = PtrEvaluation::new(s, input, public_output, None); - Claim::PtrEvaluation(ptr_evaluation) - } else { - let evaluation = Evaluation::new(s, input, public_output, None); - Claim::Evaluation(evaluation) - }; - - Self::prove_claim( - s, - &claim, - limit, - only_use_cached_proofs, - nova_prover, - pp, - lang, - ) - } - - pub fn prove_claim( - s: &'a mut Store, - claim: &Claim, - limit: usize, - only_use_cached_proofs: bool, - nova_prover: &'a NovaProver>, - pp: &'a PublicParams<'_, Coproc>, - lang: Arc>>, - ) -> Result { - let reduction_count = nova_prover.reduction_count(); - - let proof_map = nova_proof_cache(reduction_count); - let function_map = committed_expression_store(); - - let key = claim.proof_key()?.to_base32(); - - if let Some(proof) = proof_map.get(&key) { - return Ok(proof); - } - - if only_use_cached_proofs { - // FIXME: Error handling. - panic!("no cached proof"); - } - - info!("Starting Proving"); - - let (expr, env) = match &claim { - Claim::Evaluation(e) => ( - s.read(&e.expr).expect("bad expression"), - s.read(&e.env).expect("bad env"), - ), - Claim::PtrEvaluation(e) => (e.expr.ptr(s, limit, &lang), e.env.ptr(s, limit, &lang)), - Claim::Opening(o) => { - let commitment = o.commitment; - - // In order to prove the opening, we need access to the original function. - let function = function_map - .get(&commitment) - .expect("function for commitment missing"); - - let input = s.read(&o.input).expect("bad expression"); - let (c, expression) = - Commitment::construct_with_fun_application(s, function, input, limit, &lang)?; - - assert_eq!(commitment, c); - (expression, empty_sym_env(s)) - } - }; - - let (proof, _public_input, _public_output, num_steps) = nova_prover - .evaluate_and_prove(pp, expr, env, s, limit, lang.clone()) - .expect("Nova proof failed"); - - let proof = Self { - claim: claim.clone(), - proof, - num_steps, - reduction_count: ReductionCount::try_from(reduction_count)?, - }; - - match &claim { - Claim::Opening(o) => { - if o.status != Status::Terminal { - return Err(Error::OpeningFailure("Claim status is not Terminal".into())); - }; - } - Claim::Evaluation(e) => { - if e.status != Status::Terminal { - return Err(Error::EvaluationFailure(ReductionError::Misc( - "nonterminal status".into(), - ))); - }; - } - Claim::PtrEvaluation(e) => { - if e.status != Status::Terminal { - return Err(Error::EvaluationFailure(ReductionError::Misc( - "nonterminal status".into(), - ))); - } - } - }; - - proof.verify(pp, &lang).expect("Nova verification failed"); - - proof_map.set(key, &proof).unwrap(); - - Ok(proof) - } - - pub fn verify( - &self, - pp: &PublicParams<'_, Coproc>, - lang: &Lang>, - ) -> Result { - let (public_inputs, public_outputs) = self.io_vecs(lang)?; - - let claim_iterations_and_num_steps_are_consistent = if let Claim::Evaluation(Evaluation { - iterations: Some(iterations), - .. - }) = self.claim - { - // Currently, claims created by fcomm don't include the iteration count. If they do, then it should be - // possible to verify correctness. This may require making the iteration count explicit in the public - // output. That will allow maintaining iteration count without incrementing during frames added as - // padding; and it will also allow explicitly masking the count when desired for zero-knowledge. - // Meanwhile, since Nova currently requires the number of steps to be provided by the verifier, we have - // to provide it. For now, we should at least be able to calculate this value based on number of real - // iterations and number of frames per circuit. This is untested and mostly a placeholder to remind us - // that all of this will need to be handled in a more principled way eventually. (#282) - - let num_steps = self.num_steps; - - let chunk_frame_count = self.reduction_count.count(); - let expected_steps = - (iterations / chunk_frame_count) + (iterations % chunk_frame_count != 0) as usize; - - expected_steps == num_steps - } else { - true - }; - - let verified = claim_iterations_and_num_steps_are_consistent - && self - .proof - .verify(pp, self.num_steps, public_inputs, &public_outputs) - .expect("error verifying"); - - let result = VerificationResult::new(verified); - - Ok(result) - } - - pub fn evaluation_io(&self, s: &mut Store) -> Result<(IO, IO), Error> { - let evaluation = &self.claim.evaluation().expect("expected evaluation claim"); - - let input_io = { - let expr = s - .read(&evaluation.expr) - .map_err(|_| Error::VerificationError("failed to read expr".into()))?; - - let env = s - .read(&evaluation.env) - .map_err(|_| Error::VerificationError("failed to read env".into()))?; - - // FIXME: We ignore cont and assume Outermost, since we can't read a Cont. - let cont = s.intern_cont_outermost(); - - IO:: { expr, env, cont } - }; - - let output_io = { - let expr = s - .read(&evaluation.expr_out) - .map_err(|_| Error::VerificationError("failed to read expr out".into()))?; - - let env = s - .read(&evaluation.env_out) - .map_err(|_| Error::VerificationError("failed to read env out".into()))?; - let cont = evaluation - .status - .to_cont(s) - .ok_or_else(|| Error::VerificationError("continuation cannot be proved".into()))?; - - IO:: { expr, env, cont } - }; - - Ok((input_io, output_io)) - } - - pub fn ptr_evaluation_io( - &self, - s: &mut Store, - lang: &Lang>, - ) -> Result<(IO, IO), Error> { - let ptr_evaluation = &self - .claim - .ptr_evaluation() - .expect("expected PtrEvaluation claim"); - - let input_io = { - let expr = ptr_evaluation.expr.ptr(s, 0, lang); // limit is unneeded because we will not eval. we already have the ptr. - let env = ptr_evaluation.env.ptr(s, 0, lang); - let cont = ptr_evaluation.cont.cont_ptr(s); - - IO:: { expr, env, cont } - }; - - let output_io = { - let expr = ptr_evaluation.expr_out.ptr(s, 0, lang); - let env = ptr_evaluation.env_out.ptr(s, 0, lang); - let cont = ptr_evaluation.cont_out.cont_ptr(s); - - IO:: { expr, env, cont } - }; - - Ok((input_io, output_io)) - } - - pub fn opening_io(&self, s: &mut Store) -> Result<(IO, IO), Error> { - assert!(self.claim.is_opening()); - - let opening = self.claim.opening().expect("expected opening claim"); - let output = opening.public_output_expression(s); - let input = s.read(&opening.input).expect("could not read input"); - - let expression = opening.commitment.fun_application(s, input); - let outermost = s.intern_cont_outermost(); - - let input_io = IO:: { - expr: expression, - env: empty_sym_env(s), - cont: outermost, - }; - - let output_io = IO:: { - expr: output, - env: empty_sym_env(s), - cont: s.intern_cont_terminal(), - }; - - Ok((input_io, output_io)) - } - - pub fn io( - &self, - s: &mut Store, - lang: &Lang>, - ) -> Result<(IO, IO), Error> { - match self.claim { - Claim::Evaluation(_) => self.evaluation_io(s), - Claim::PtrEvaluation(_) => self.ptr_evaluation_io(s, lang), - Claim::Opening(_) => self.opening_io(s), - } - } - - fn io_vecs(&self, lang: &Lang>) -> Result<(Vec, Vec), Error> { - let s = &mut Store::::default(); - - self.io(s, lang) - .map(|(i, o)| (i.to_inputs(s), o.to_inputs(s))) - } -} - -impl VerificationResult { - fn new(verified: bool) -> Self { - Self { verified } - } -} - -pub fn evaluate( - store: &mut Store, - expr: Ptr, - supplied_env: Option>, - limit: usize, - lang: &Lang>, -) -> Result<(IO, usize), Error> { - let env = supplied_env.unwrap_or_else(|| empty_sym_env(store)); - let mut evaluator = Evaluator::new(expr, env, store, limit, lang); - - let (io, iterations, _) = evaluator.eval().map_err(Error::EvaluationFailure)?; - - assert!( as Evaluable< - F, - Witness, - Coproc, - >>::is_terminal(&io)); - Ok((io, iterations)) -} - -#[cfg(test)] -mod test { - use super::*; - use std::path::Path; - use std::sync::Arc; - use tempfile::Builder; - - use crate::eval::lang::{Coproc, Lang}; - use crate::proof::{nova::NovaProver, Prover}; - use crate::z_data::{from_z_data, to_z_data}; - - #[test] - fn test_cert_serialization() { - use serde_json::json; - - let c = Commitment { - comm: S1::from(123), - }; - - let cid = ZExprPtr::from_parts(ExprTag::Comm, c.comm); - let cert = Cert { - claim_cid: cid, - proof_cid: cid, - verified: true, - verifier_id: "asdf".to_string(), - signature: "fdsa".to_string(), - }; - let json = json!(cert); - - let string = json.to_string(); - - let cert_again: Cert = serde_json::from_str(&string).unwrap(); - assert_eq!(cert, cert_again); - } - - // Minimal chained functional commitment test - #[test] - fn lurk_chained_functional_commitment() { - let fcomm_path_key = "FCOMM_DATA_PATH"; - let tmp_dir = Builder::new().prefix("tmp").tempdir().expect("tmp dir"); - let tmp_dir_path = Path::new(tmp_dir.path()); - let fcomm_path_val = tmp_dir_path.join("fcomm_data"); - std::env::set_var(fcomm_path_key, fcomm_path_val.clone()); - assert_eq!( - std::env::var(fcomm_path_key), - Ok(fcomm_path_val.into_os_string().into_string().unwrap()) - ); - - let function_source = "(letrec ((secret 12345) (a (lambda (acc x) (let ((acc (+ acc x))) (cons acc (hide secret (a acc))))))) (a 0))"; - let expected_io = vec![("5", "5"), ("3", "8")]; - - let mut function = CommittedExpression:: { - expr: LurkPtr::Source(function_source.into()), - secret: None, - commitment: None, - }; - - let limit = 1000; - let lang = Lang::new(); - let lang_rc = Arc::new(lang.clone()); - let rc = ReductionCount::One; - let pp = public_params(rc.count(), lang_rc.clone()).expect("public params"); - let chained = true; - let s = &mut Store::::default(); - - let io = expected_io.iter(); - - let fun_ptr = function.expr_ptr(s, limit, &lang).expect("fun_ptr"); - - let (mut commitment, secret) = Commitment::from_ptr_with_hiding(s, &fun_ptr).unwrap(); - - function.secret = Some(secret); - function.commitment = Some(commitment); - - let function_map = committed_expression_store(); - function_map - .set(commitment, &function) - .expect("function_map set"); - - for (function_input, _expected_output) in io { - let prover = NovaProver::>::new(rc.count(), lang.clone()); - - let input = s.read(function_input).expect("Read error"); - - let proof = Opening::apply_and_prove( - s, - input, - function.clone(), - limit, - chained, - false, - &prover, - &pp, - lang_rc.clone(), - ) - .expect("apply and prove"); - - proof.verify(&pp, &lang_rc).expect("Failed to verify"); - - let opening = proof.claim.opening().expect("expected opening claim"); - - match opening.new_commitment { - Some(c) => commitment = c, - _ => panic!("new commitment missing"), - } - println!("Commitment: {:?}", commitment); - } - } - proptest! { - #[test] - fn prop_z_bytes(x in any::()) { - let ser = to_z_data(&x).expect("write ZBytes"); - let de: ZBytes = from_z_data(&ser).expect("read ZBytes"); - assert_eq!(x, de); - - let ser: Vec = bincode::serialize(&x).expect("write ZBytes"); - let de: ZBytes = bincode::deserialize(&ser).expect("read ZBytes"); - assert_eq!(x, de); - - let tmp_dir = Builder::new().prefix("tmp").tempdir().expect("tmp dir"); - let tmp_dir_path = Path::new(tmp_dir.path()); - let z_bytes_path = tmp_dir_path.join("zbytes.json"); - x.write_to_path(&z_bytes_path); - assert_eq!(x, ZBytes::read_from_path(&z_bytes_path).unwrap()); - } - } - - proptest! { - #[test] - fn prop_z_store_ptr(x in any::>()) { - let ser = to_z_data(&x).expect("write ZStorePtr"); - let de: ZStorePtr = from_z_data(&ser).expect("read ZStorePtr"); - assert_eq!(x, de); - - let ser: Vec = bincode::serialize(&x).expect("write ZStorePtr"); - let de: ZStorePtr = bincode::deserialize(&ser).expect("read ZStorePtr"); - assert_eq!(x, de); - - let tmp_dir = Builder::new().prefix("tmp").tempdir().expect("tmp dir"); - let tmp_dir_path = Path::new(tmp_dir.path()); - let z_store_ptr_path = tmp_dir_path.join("zstoreptr.json"); - x.write_to_path(&z_store_ptr_path); - assert_eq!(x, ZStorePtr::::read_from_path(&z_store_ptr_path).unwrap()); - } - } - - proptest! { - #[test] - fn prop_lurk_ptr(x in any::>()) { - let ser = to_z_data(&x).expect("write LurkPtr"); - let de: LurkPtr = from_z_data(&ser).expect("read LurkPtr"); - assert_eq!(x, de); - - let ser: Vec = bincode::serialize(&x).expect("write LurkPtr"); - let de: LurkPtr = bincode::deserialize(&ser).expect("read LurkPtr"); - assert_eq!(x, de); - - let tmp_dir = Builder::new().prefix("tmp").tempdir().expect("tmp dir"); - let tmp_dir_path = Path::new(tmp_dir.path()); - let lurk_ptr_path = tmp_dir_path.join("lurkptr.json"); - x.write_to_path(&lurk_ptr_path); - assert_eq!(x, LurkPtr::::read_from_path(&lurk_ptr_path).unwrap()); - } - } - - proptest! { - #[test] - fn prop_ptr_evaluation(x in any::>()) { - let ser = to_z_data(&x).expect("write PtrEvaluation"); - let de: PtrEvaluation = from_z_data(&ser).expect("read PtrEvaluation"); - assert_eq!(x, de); - - let ser: Vec = bincode::serialize(&x).expect("write PtrEvalution"); - let de: PtrEvaluation = bincode::deserialize(&ser).expect("read PtrEvaluation"); - assert_eq!(x, de); - - let tmp_dir = Builder::new().prefix("tmp").tempdir().expect("tmp dir"); - let tmp_dir_path = Path::new(tmp_dir.path()); - let ptr_evaluation_path = tmp_dir_path.join("ptrevaluation.json"); - x.write_to_path(&ptr_evaluation_path); - assert_eq!(x, PtrEvaluation::::read_from_path(&ptr_evaluation_path).unwrap()); - } - } - - proptest! { - #[test] - fn prop_committed_expr(x in any::>()) { - let ser = to_z_data(&x).expect("write CommittedExpression"); - let de: CommittedExpression = from_z_data(&ser).expect("read CommittedExpression"); - assert_eq!(x, de); - - let ser: Vec = bincode::serialize(&x).expect("write CommittedExpression"); - let de: CommittedExpression = bincode::deserialize(&ser).expect("read CommittedExpression"); - assert_eq!(x, de); - - let tmp_dir = Builder::new().prefix("tmp").tempdir().expect("tmp dir"); - let tmp_dir_path = Path::new(tmp_dir.path()); - let committed_expr_path = tmp_dir_path.join("committedexpr.json"); - x.write_to_path(&committed_expr_path); - assert_eq!(x, CommittedExpression::::read_from_path(&committed_expr_path).unwrap()); - } - } - - proptest! { - #[test] - fn prop_opening(x in any::>()) { - let ser = to_z_data(&x).expect("write Opening"); - let de: Opening = from_z_data(&ser).expect("read Opening"); - assert_eq!(x, de); - - let ser: Vec = bincode::serialize(&x).expect("write Opening"); - let de: Opening = bincode::deserialize(&ser).expect("read Opening"); - assert_eq!(x, de); - - let tmp_dir = Builder::new().prefix("tmp").tempdir().expect("tmp dir"); - let tmp_dir_path = Path::new(tmp_dir.path()); - let opening_path = tmp_dir_path.join("opening.json"); - x.write_to_path(&opening_path); - assert_eq!(x, Opening::::read_from_path(&opening_path).unwrap()); - } - } - - proptest! { - #[test] - fn prop_claim(x in any::>()) { - let ser = to_z_data(&x).expect("write Claim"); - let de: Claim = from_z_data(&ser).expect("read Claim"); - assert_eq!(x, de); - - let ser: Vec = bincode::serialize(&x).expect("write Claim"); - let de: Claim = bincode::deserialize(&ser).expect("read Claim"); - assert_eq!(x, de); - - let tmp_dir = Builder::new().prefix("tmp").tempdir().expect("tmp dir"); - let tmp_dir_path = Path::new(tmp_dir.path()); - let claim_path = tmp_dir_path.join("claim.json"); - x.write_to_path(&claim_path); - assert_eq!(x, Claim::::read_from_path(&claim_path).unwrap()); - } - } -} diff --git a/src/public_parameters/registry.rs b/src/public_parameters/registry.rs index b914bf6846..92b13d0a4a 100644 --- a/src/public_parameters/registry.rs +++ b/src/public_parameters/registry.rs @@ -8,13 +8,13 @@ use pasta_curves::pallas; use serde::{de::DeserializeOwned, Serialize}; use tap::TapFallible; -use crate::public_parameters::Error; +use crate::public_parameters::error::Error; use crate::{coprocessor::Coprocessor, eval::lang::Lang, proof::nova::PublicParams}; use super::file_map::FileIndex; type S1 = pallas::Scalar; -type AnyMap = anymap::Map; +type AnyMap = anymap::Map; type PublicParamMemCache = HashMap>>; /// This is a global registry for Coproc-specific parameters. diff --git a/src/symbol.rs b/src/symbol.rs index 3362869d57..c184753630 100644 --- a/src/symbol.rs +++ b/src/symbol.rs @@ -2,7 +2,10 @@ use std::fmt; use crate::parser::LURK_WHITESPACE; #[cfg(not(target_arch = "wasm32"))] +use lurk_macros::serde_test; +#[cfg(not(target_arch = "wasm32"))] use proptest_derive::Arbitrary; + /// Module for symbol type, Sym. use serde::{Deserialize, Serialize}; use std::collections::HashMap; @@ -14,6 +17,7 @@ pub const ESCAPE_CHARS: &str = "|(){}[],.:'\\\""; #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Serialize, Deserialize, Hash)] #[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] +#[cfg_attr(not(target_arch = "wasm32"), serde_test)] /// Type for hierarchical symbol names. /// /// The symbol path is encoded with a vector of strings. Keywords are symbols diff --git a/src/uint.rs b/src/uint.rs index 37637b3276..3fd5154bd6 100644 --- a/src/uint.rs +++ b/src/uint.rs @@ -1,4 +1,6 @@ #[cfg(not(target_arch = "wasm32"))] +use lurk_macros::serde_test; +#[cfg(not(target_arch = "wasm32"))] use proptest_derive::Arbitrary; use serde::{Deserialize, Serialize}; use std::{ @@ -9,6 +11,7 @@ use std::{ /// Unsigned fixed-width integer type for Lurk. #[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Eq, Serialize, Deserialize)] #[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] +#[cfg_attr(not(target_arch = "wasm32"), serde_test)] pub enum UInt { U64(u64), } diff --git a/src/z_data/z_cont.rs b/src/z_data/z_cont.rs index 44e5e4c0d5..6091afa806 100644 --- a/src/z_data/z_cont.rs +++ b/src/z_data/z_cont.rs @@ -1,5 +1,5 @@ #[cfg(not(target_arch = "wasm32"))] -use proptest::prelude::BoxedStrategy; +use lurk_macros::serde_test; #[cfg(not(target_arch = "wasm32"))] use proptest::prelude::*; use serde::{Deserialize, Serialize}; @@ -13,6 +13,10 @@ use crate::tag::Tag; use crate::z_ptr::{ZContPtr, ZExprPtr, ZPtr}; #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +#[cfg_attr( + not(target_arch = "wasm32"), + serde_test(types(pasta_curves::pallas::Scalar), zdata(true)) +)] /// A `ZCont` is the content-addressed representation of a Lurk continuation, which enables /// efficient serialization and sharing of hashed Lurk data via associated `ZContPtr`s. pub enum ZCont { @@ -366,23 +370,3 @@ impl Arbitrary for ZCont { .boxed() } } - -#[cfg(test)] -mod tests { - use super::*; - use crate::z_data::{from_z_data, to_z_data}; - use pasta_curves::pallas::Scalar; - - proptest! { - #[test] - fn prop_serde_z_cont(x in any::>()) { - let ser = to_z_data(&x).expect("write ZCont"); - let de: ZCont = from_z_data(&ser).expect("read ZCont"); - assert_eq!(x, de); - - let ser: Vec = bincode::serialize(&x).expect("write ZCont"); - let de: ZCont = bincode::deserialize(&ser).expect("read ZCont"); - assert_eq!(x, de); - } - } -} diff --git a/src/z_data/z_expr.rs b/src/z_data/z_expr.rs index 95a218767e..831838ecb8 100644 --- a/src/z_data/z_expr.rs +++ b/src/z_data/z_expr.rs @@ -1,5 +1,5 @@ #[cfg(not(target_arch = "wasm32"))] -use proptest::prelude::BoxedStrategy; +use lurk_macros::serde_test; #[cfg(not(target_arch = "wasm32"))] use proptest::prelude::*; use serde::{Deserialize, Serialize}; @@ -17,6 +17,10 @@ use crate::z_store::ZStore; use crate::UInt; #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr( + not(target_arch = "wasm32"), + serde_test(types(pasta_curves::pallas::Scalar), zdata(true)) +)] /// A `ZExpr` is the content-addressed representation of a Lurk expression, which enables /// efficient serialization and sharing of hashed Lurk data via associated `ZExprPtr`s. pub enum ZExpr { @@ -203,21 +207,9 @@ impl Arbitrary for ZExpr { mod tests { use super::*; use crate::syntax::Syntax; - use crate::z_data::{from_z_data, to_z_data}; use pasta_curves::pallas::Scalar; proptest! { - #[test] - fn prop_serde_z_expr(x in any::>()) { - let ser = to_z_data(&x).expect("write ZExpr"); - let de: ZExpr = from_z_data(&ser).expect("read ZExpr"); - assert_eq!(x, de); - - let ser: Vec = bincode::serialize(&x).expect("write ZExpr"); - let de: ZExpr = bincode::deserialize(&ser).expect("read ZExpr"); - assert_eq!(x, de); - } - #[test] // TODO: Overflows stack in non-release mode fn prop_expr_z_expr_roundtrip(x in any::>()) { diff --git a/src/z_data/z_ptr.rs b/src/z_data/z_ptr.rs index cbc82cdc85..6279b639de 100644 --- a/src/z_data/z_ptr.rs +++ b/src/z_data/z_ptr.rs @@ -1,6 +1,8 @@ use anyhow::anyhow; use base32ct::{Base32Unpadded, Encoding}; #[cfg(not(target_arch = "wasm32"))] +use lurk_macros::serde_test; +#[cfg(not(target_arch = "wasm32"))] use proptest_derive::Arbitrary; use serde::{Deserialize, Serialize}; use std::fmt; @@ -19,6 +21,14 @@ use crate::tag::{ContTag, ExprTag, Tag}; #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] #[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] +#[cfg_attr( + not(target_arch = "wasm32"), + serde_test( + types(ExprTag, pasta_curves::pallas::Scalar), + types(ContTag, pasta_curves::pallas::Scalar), + zdata(true) + ) +)] // Note: the trait bound E: Tag is not necessary in the struct, but it makes the proptest strategy more efficient. /// A struct representing a scalar pointer with a tag and a value. /// @@ -141,35 +151,9 @@ pub type ZContPtr = ZPtr; #[cfg(test)] mod tests { use super::*; - use crate::z_data::{from_z_data, to_z_data}; use pasta_curves::pallas::Scalar; - use serde::de::DeserializeOwned; - - fn test_serde_z_ptr< - P: Arbitrary + IntoHashComponents + PartialEq + Eq + Serialize + DeserializeOwned, - >( - x: P, - ) { - let ser = to_z_data(&x).expect("write ZPtr"); - let de: P = from_z_data(&ser).expect("read ZPtr"); - assert_eq!(x, de); - - let ser: Vec = bincode::serialize(&x).expect("write ZPtr"); - let de: P = bincode::deserialize(&ser).expect("read ZPtr"); - assert_eq!(x, de); - } proptest! { - #[test] - fn prop_serde_z_expr_ptr(x in any::>()) { - test_serde_z_ptr(x); - } - - #[test] - fn prop_serde_z_cont_ptr(x in any::>()) { - test_serde_z_ptr(x); - } - #[test] fn prop_base32_z_expr_ptr(x in any::>()) { assert_eq!(x, ZPtr::from_base32(&x.to_base32()).unwrap()); diff --git a/src/z_data/z_store.rs b/src/z_data/z_store.rs index 6ae704a7e1..76c45c4782 100644 --- a/src/z_data/z_store.rs +++ b/src/z_data/z_store.rs @@ -1,4 +1,6 @@ #[cfg(not(target_arch = "wasm32"))] +use lurk_macros::serde_test; +#[cfg(not(target_arch = "wasm32"))] use proptest::prelude::*; #[cfg(not(target_arch = "wasm32"))] use proptest_derive::Arbitrary; @@ -23,6 +25,10 @@ use crate::field::LurkField; #[derive(Serialize, Deserialize, Debug, PartialEq, Clone, Default)] #[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] #[cfg_attr(not(target_arch = "wasm32"), proptest(no_bound))] +#[cfg_attr( + not(target_arch = "wasm32"), + serde_test(types(pasta_curves::pallas::Scalar), zdata(true)) +)] /// A `ZStore` is a content-addressed, serializable representation of a Lurk store /// /// Whereas a `Store` contains caches of each type of Lurk data, a `ZStore` @@ -168,23 +174,3 @@ impl ZStore { (ptr, expr) } } - -#[cfg(test)] -mod tests { - use super::*; - use crate::z_data::{from_z_data, to_z_data}; - use pasta_curves::pallas::Scalar; - - proptest! { - #[test] - fn prop_serde_z_store(s in any::>()) { - let ser = to_z_data(&s).expect("write ZStore"); - let de: ZStore = from_z_data(&ser).expect("read ZStore"); - assert_eq!(s, de); - - let ser: Vec = bincode::serialize(&s).expect("write ZStore"); - let de: ZStore = bincode::deserialize(&ser).expect("read ZStore"); - assert_eq!(s, de); - } - } -}