From e0ae65b96434022b7a601b4bcb1aff092ebfd59e Mon Sep 17 00:00:00 2001 From: David Wong Date: Tue, 19 Oct 2021 22:31:59 -0700 Subject: [PATCH 1/9] [kimchi] A gate wiring should only take an array of size PERMUTS, not COLUMNS As we only care about the wiring of the first 7 (PERMUTS) columns, not all 15 (COLUMNS) columns. --- circuits/plonk-15-wires/src/gate.rs | 46 +++++----------------------- circuits/plonk-15-wires/src/wires.rs | 4 +-- ocaml/ocaml-gen/src/conv.rs | 28 +++++++++++++++++ 3 files changed, 37 insertions(+), 41 deletions(-) diff --git a/circuits/plonk-15-wires/src/gate.rs b/circuits/plonk-15-wires/src/gate.rs index acb2bef5e0..72464db8e8 100644 --- a/circuits/plonk-15-wires/src/gate.rs +++ b/circuits/plonk-15-wires/src/gate.rs @@ -416,6 +416,7 @@ impl CircuitGate { pub mod caml { use super::*; use crate::wires::caml::CamlWire; + use itertools::Itertools; use ocaml_gen::OcamlGen; use std::convert::TryInto; @@ -431,14 +432,6 @@ pub mod caml { CamlWire, CamlWire, CamlWire, - CamlWire, - CamlWire, - CamlWire, - CamlWire, - CamlWire, - CamlWire, - CamlWire, - CamlWire, ), pub c: Vec, } @@ -489,36 +482,19 @@ pub mod caml { } /// helper to convert array to tuple (OCaml doesn't have fixed-size arrays) - fn array_to_tuple( - a: [T1; 15], - ) -> (T2, T2, T2, T2, T2, T2, T2, T2, T2, T2, T2, T2, T2, T2, T2) + fn array_to_tuple(a: [T1; PERMUTS]) -> (T2, T2, T2, T2, T2, T2, T2) where T1: Clone, T2: From, { - ( - a[0].clone().into(), - a[1].clone().into(), - a[2].clone().into(), - a[3].clone().into(), - a[4].clone().into(), - a[5].clone().into(), - a[6].clone().into(), - a[7].clone().into(), - a[8].clone().into(), - a[9].clone().into(), - a[10].clone().into(), - a[11].clone().into(), - a[12].clone().into(), - a[13].clone().into(), - a[14].clone().into(), - ) + std::array::IntoIter::new(a) + .map(Into::into) + .next_tuple() + .expect("bug in array_to_tuple") } /// helper to convert tuple to array (OCaml doesn't have fixed-size arrays) - fn tuple_to_array( - a: (T1, T1, T1, T1, T1, T1, T1, T1, T1, T1, T1, T1, T1, T1, T1), - ) -> [T2; 15] + fn tuple_to_array(a: (T1, T1, T1, T1, T1, T1, T1)) -> [T2; PERMUTS] where T2: From, { @@ -530,14 +506,6 @@ pub mod caml { a.4.into(), a.5.into(), a.6.into(), - a.7.into(), - a.8.into(), - a.9.into(), - a.10.into(), - a.11.into(), - a.12.into(), - a.13.into(), - a.14.into(), ] } } diff --git a/circuits/plonk-15-wires/src/wires.rs b/circuits/plonk-15-wires/src/wires.rs index 7c426ae3c1..71d3d48f54 100644 --- a/circuits/plonk-15-wires/src/wires.rs +++ b/circuits/plonk-15-wires/src/wires.rs @@ -27,7 +27,7 @@ pub struct Wire { impl Wire { /// Creates a new set of wires for a given row. - pub fn new(row: usize) -> [Self; COLUMNS] { + pub fn new(row: usize) -> [Self; PERMUTS] { array_init(|col| Self { row, col }) } } @@ -35,7 +35,7 @@ impl Wire { /// GateWires document the wiring of a gate. More specifically, each value either /// represents the same cell (row and column) or a different cell in another row. /// (This is to help the permutation argument.) -pub type GateWires = [Wire; COLUMNS]; +pub type GateWires = [Wire; PERMUTS]; impl ToBytes for Wire { #[inline] diff --git a/ocaml/ocaml-gen/src/conv.rs b/ocaml/ocaml-gen/src/conv.rs index 32c420397b..99f0309c7c 100644 --- a/ocaml/ocaml-gen/src/conv.rs +++ b/ocaml/ocaml-gen/src/conv.rs @@ -217,3 +217,31 @@ where const_random!(u128) } } + +impl OCamlDesc for (T1, T2, T3, T4, T5, T6, T7) +where + T1: OCamlDesc, + T2: OCamlDesc, + T3: OCamlDesc, + T4: OCamlDesc, + T5: OCamlDesc, + T6: OCamlDesc, + T7: OCamlDesc, +{ + fn ocaml_desc(env: &Env, generics: &[&str]) -> String { + let v = vec![ + T1::ocaml_desc(env, generics), + T2::ocaml_desc(env, generics), + T3::ocaml_desc(env, generics), + T4::ocaml_desc(env, generics), + T5::ocaml_desc(env, generics), + T6::ocaml_desc(env, generics), + T7::ocaml_desc(env, generics), + ]; + v.join(" * ") + } + + fn unique_id() -> u128 { + const_random!(u128) + } +} From 980f0ea7ac91c4a45c6c10bba9e2f24a07a783ac Mon Sep 17 00:00:00 2001 From: David Wong Date: Tue, 19 Oct 2021 22:32:55 -0700 Subject: [PATCH 2/9] [kimchi] fix serialization --- dlog/plonk-15-wires/src/index.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/dlog/plonk-15-wires/src/index.rs b/dlog/plonk-15-wires/src/index.rs index 51caf79bf1..809f140c44 100644 --- a/dlog/plonk-15-wires/src/index.rs +++ b/dlog/plonk-15-wires/src/index.rs @@ -109,6 +109,7 @@ pub struct VerifierIndex { pub emul_comm: PolyComm, /// Chacha polynomial commitments + #[serde(bound = "PolyComm: Serialize + DeserializeOwned")] pub chacha_comm: Option<[PolyComm; 4]>, /// wire coordinate shifts @@ -296,7 +297,6 @@ where None => (), }; - /* // deserialize let mut verifier_index: Self = bincode::deserialize_from(&mut reader).map_err(|e| e.to_string())?; @@ -310,8 +310,6 @@ where verifier_index.zkpm = zk_polynomial(verifier_index.domain); Ok(verifier_index) - */ - panic!("TODO") } /// Writes a [VerifierIndex] to a file, potentially appending it to the already-existing content (if append is set to true) @@ -325,7 +323,6 @@ where let writer = BufWriter::new(file); - panic!("TODO") - // bincode::serialize_into(writer, self).map_err(|e| e.to_string()) + bincode::serialize_into(writer, self).map_err(|e| e.to_string()) } } From c4682b8020f66835a6bcc80c5cd125fbbeacbd98 Mon Sep 17 00:00:00 2001 From: David Wong Date: Tue, 19 Oct 2021 22:33:08 -0700 Subject: [PATCH 3/9] [CI] enforce cargo fmt in CI --- .github/workflows/rust.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 488e9d0cb1..c9b4fbde92 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -29,3 +29,7 @@ jobs: - name: build ocaml types run: | cargo build --features ocaml_types + + - name: Run cargo fmt + run: | + cargo fmt From a8675449742fd03db1687c045ba82e9b0c727153 Mon Sep 17 00:00:00 2001 From: David Wong Date: Tue, 19 Oct 2021 22:35:23 -0700 Subject: [PATCH 4/9] [rust] run cargo fmt --- circuits/plonk-15-wires/src/gate.rs | 139 ++++--- .../src/nolookup/constraints.rs | 147 +++---- .../plonk-15-wires/src/polynomials/lookup.rs | 359 ++++++++++-------- 3 files changed, 368 insertions(+), 277 deletions(-) diff --git a/circuits/plonk-15-wires/src/gate.rs b/circuits/plonk-15-wires/src/gate.rs index 72464db8e8..07e2fd2fa7 100644 --- a/circuits/plonk-15-wires/src/gate.rs +++ b/circuits/plonk-15-wires/src/gate.rs @@ -4,16 +4,16 @@ This source file implements Plonk constraint gate primitive. *****************************************************************************************************************/ +use crate::domains::EvaluationDomains; use crate::{nolookup::constraints::ConstraintSystem, wires::*}; -use ark_ff::bytes::{FromBytes, ToBytes}; -use ark_ff::{Field, FftField}; -use num_traits::cast::{FromPrimitive, ToPrimitive}; +use ark_ff::bytes::ToBytes; +use ark_ff::{FftField, Field}; +use ark_poly::{Evaluations as E, Radix2EvaluationDomain as D}; +use num_traits::cast::ToPrimitive; use serde::{Deserialize, Serialize}; use serde_with::serde_as; -use std::io::{Error, ErrorKind, Read, Result as IoResult, Write}; use std::collections::{HashMap, HashSet}; -use ark_poly::{Radix2EvaluationDomain as D, Evaluations as E}; -use crate::domains::EvaluationDomains; +use std::io::{Result as IoResult, Write}; /// A row accessible from a given row, corresponds to the fact that we open all polynomials /// at `zeta` **and** `omega * zeta`. @@ -39,7 +39,7 @@ impl CurrOrNext { #[derive(Clone, Copy, Debug, Serialize, Deserialize)] pub struct LocalPosition { pub row: CurrOrNext, - pub column: usize + pub column: usize, } /// Look up a single value in a lookup table. The value may be computed as a linear @@ -48,7 +48,7 @@ pub struct LocalPosition { pub struct SingleLookup { table_id: usize, // Linear combination of local-positions - pub value: Vec<(F, LocalPosition)> + pub value: Vec<(F, LocalPosition)>, } /// Let's say we want to do a lookup in a "vector-valued" table `T: Vec<[F; n]>` (here I @@ -64,23 +64,26 @@ pub struct SingleLookup { /// analogously using `joint_combiner`. /// /// This function computes that combined value. -pub fn combine_table_entry<'a, F: Field, I: DoubleEndedIterator>(joint_combiner: F, v: I) ->F { +pub fn combine_table_entry<'a, F: Field, I: DoubleEndedIterator>( + joint_combiner: F, + v: I, +) -> F { v.rev().fold(F::zero(), |acc, x| joint_combiner * acc + x) } impl SingleLookup { /// Evaluate the linear combination specifying the lookup value to a field element. pub fn evaluate F>(&self, eval: G) -> F { - self.value.iter().fold(F::zero(), |acc, (c, p)| { - acc + *c * eval(*p) - }) + self.value + .iter() + .fold(F::zero(), |acc, (c, p)| acc + *c * eval(*p)) } } /// A spec for checking that the given vector belongs to a vector-valued lookup table. #[derive(Clone, Serialize, Deserialize)] pub struct JointLookup { - pub entry: Vec> + pub entry: Vec>, } impl JointLookup { @@ -98,7 +101,20 @@ impl JointLookup { } #[repr(C)] -#[derive(Clone, Copy, Debug, PartialEq, FromPrimitive, ToPrimitive, Serialize, Deserialize, Eq, Hash, PartialOrd, Ord)] +#[derive( + Clone, + Copy, + Debug, + PartialEq, + FromPrimitive, + ToPrimitive, + Serialize, + Deserialize, + Eq, + Hash, + PartialOrd, + Ord, +)] #[cfg_attr( feature = "ocaml_types", derive(ocaml::IntoValue, ocaml::FromValue, ocaml_gen::OcamlEnum) @@ -106,7 +122,7 @@ impl JointLookup { #[cfg_attr(test, derive(proptest_derive::Arbitrary))] pub enum GateType { /// zero gate - Zero=0, + Zero = 0, /// generic arithmetic gate Generic, /// Poseidon permutation gate @@ -142,7 +158,10 @@ pub struct LookupInfo { } fn lookup_kinds() -> Vec>> { - GateType::lookup_kinds().into_iter().map(|(x, _)| x).collect() + GateType::lookup_kinds() + .into_iter() + .map(|(x, _)| x) + .collect() } fn max_lookups_per_row(kinds: &Vec>>) -> usize { @@ -163,17 +182,15 @@ impl LookupInfo { let kinds = lookup_kinds::(); let max_per_row = max_lookups_per_row(&kinds); LookupInfo { - max_joint_size: - kinds.iter().fold(0, |acc0, v| { - v.iter().fold(acc0, |acc, j| { - std::cmp::max(acc, j.entry.len()) - }) - }), + max_joint_size: kinds.iter().fold(0, |acc0, v| { + v.iter() + .fold(acc0, |acc, j| std::cmp::max(acc, j.entry.len())) + }), kinds_map: GateType::lookup_kinds_map::(), kinds, max_per_row, - empty: vec![] + empty: vec![], } } @@ -183,7 +200,7 @@ impl LookupInfo { for g in gates.iter() { let typ = g.typ; - for r in &[ CurrOrNext::Curr, CurrOrNext::Next ] { + for r in &[CurrOrNext::Curr, CurrOrNext::Next] { if let Some(v) = self.kinds_map.get(&(typ, *r)) { if self.kinds[*v].len() > 0 { return Some(LookupsUsed::Joint); @@ -198,9 +215,13 @@ impl LookupInfo { /// Each entry in `kinds` has a corresponding selector polynomial that controls whether that /// lookup kind should be enforced at a given row. This computes those selector polynomials. - pub fn selector_polynomials<'a>(&'a self, domain: EvaluationDomains, gates: &Vec>) -> Vec>> { + pub fn selector_polynomials<'a>( + &'a self, + domain: EvaluationDomains, + gates: &Vec>, + ) -> Vec>> { let n = domain.d1.size as usize; - let mut res : Vec<_> = self.kinds.iter().map(|_| vec![F::zero(); n]).collect(); + let mut res: Vec<_> = self.kinds.iter().map(|_| vec![F::zero(); n]).collect(); for i in 0..n { let typ = gates[i].typ; @@ -248,51 +269,71 @@ impl GateType { /// See circuits/plonk-15-wires/src/polynomials/chacha.rs for an explanation of /// how these work. pub fn lookup_kinds() -> Vec<(Vec>, HashSet<(GateType, CurrOrNext)>)> { - let curr_row = |column| LocalPosition { row: CurrOrNext::Curr, column }; - let chacha_pattern = - (0..4).map(|i| { + let curr_row = |column| LocalPosition { + row: CurrOrNext::Curr, + column, + }; + let chacha_pattern = (0..4) + .map(|i| { let op1 = curr_row(3 + i); let op2 = curr_row(7 + i); let res = curr_row(11 + i); - let l = |loc: LocalPosition| - SingleLookup { table_id:0, value: vec![(F::one(), loc)] }; - JointLookup { entry: vec![l(op1), l(op2), l(res)] } - }).collect(); + let l = |loc: LocalPosition| SingleLookup { + table_id: 0, + value: vec![(F::one(), loc)], + }; + JointLookup { + entry: vec![l(op1), l(op2), l(res)], + } + }) + .collect(); let mut chacha_where = HashSet::new(); - use GateType::*; use CurrOrNext::*; + use GateType::*; - for g in &[ ChaCha0, ChaCha1, ChaCha2 ] { - for r in &[ Curr, Next ] { + for g in &[ChaCha0, ChaCha1, ChaCha2] { + for r in &[Curr, Next] { chacha_where.insert((*g, *r)); } } let one_half = F::from(2u64).inverse().unwrap(); let neg_one_half = -one_half; - let chacha_final_pattern = - (0..4).map(|i| { + let chacha_final_pattern = (0..4) + .map(|i| { let nybble = curr_row(1 + i); let low_bit = curr_row(5 + i); // Check // XOR((nybble - low_bit)/2, (nybble - low_bit)/2) = 0. - let x = - SingleLookup { - table_id:0, - value: vec![(one_half, nybble), (neg_one_half, low_bit)] - }; - JointLookup { entry: vec![x.clone(), x, SingleLookup { table_id:0, value: vec![] } ] } - }).collect(); + let x = SingleLookup { + table_id: 0, + value: vec![(one_half, nybble), (neg_one_half, low_bit)], + }; + JointLookup { + entry: vec![ + x.clone(), + x, + SingleLookup { + table_id: 0, + value: vec![], + }, + ], + } + }) + .collect(); let mut chacha_final_where = HashSet::new(); - for r in &[ Curr, Next ] { + for r in &[Curr, Next] { chacha_final_where.insert((ChaChaFinal, *r)); } - vec![(chacha_pattern, chacha_where), (chacha_final_pattern, chacha_final_where)] + vec![ + (chacha_pattern, chacha_where), + (chacha_final_pattern, chacha_final_where), + ] } - + pub fn lookup_kinds_map() -> HashMap<(GateType, CurrOrNext), usize> { let mut res = HashMap::new(); let lookup_kinds = Self::lookup_kinds::(); @@ -407,7 +448,7 @@ impl CircuitGate { CompleteAdd => self.verify_complete_add(witness), Vbmul => self.verify_vbmul(witness), Endomul => self.verify_endomul(witness, cs), - ChaCha0 | ChaCha1 | ChaCha2 | ChaChaFinal => panic!("todo") + ChaCha0 | ChaCha1 | ChaCha2 | ChaChaFinal => panic!("todo"), } } } diff --git a/circuits/plonk-15-wires/src/nolookup/constraints.rs b/circuits/plonk-15-wires/src/nolookup/constraints.rs index a6a9d3f159..a4dce57e85 100644 --- a/circuits/plonk-15-wires/src/nolookup/constraints.rs +++ b/circuits/plonk-15-wires/src/nolookup/constraints.rs @@ -5,7 +5,7 @@ This source file implements Plonk circuit constraint primitive. *****************************************************************************************************************/ use crate::domains::EvaluationDomains; -use crate::gate::{LookupInfo, CircuitGate, GateType}; +use crate::gate::{CircuitGate, GateType, LookupInfo}; pub use crate::polynomial::{WitnessEvals, WitnessOverDomains, WitnessShifts}; use crate::wires::*; use ark_ff::{FftField, SquareRootField, Zero}; @@ -46,7 +46,6 @@ pub struct ConstraintSystem { // Coefficient polynomials. These define constant that gates can use as they like. // --------------------------------------- - /// coefficients polynomials in coefficient form #[serde_as(as = "[o1_utils::serialization::SerdeAs; COLUMNS]")] pub coefficientsm: [DP; COLUMNS], @@ -244,7 +243,7 @@ impl ConstraintSystem { /// creates a constraint system from a vector of gates ([CircuitGate]), some sponge parameters ([ArithmeticSpongeParams]), and the number of public inputs. pub fn create( mut gates: Vec>, - lookup_tables: Vec< Vec> >, + lookup_tables: Vec>>, fr_sponge_params: ArithmeticSpongeParams, public: usize, ) -> Option { @@ -290,20 +289,20 @@ impl ConstraintSystem { } } - let sigmal1 : [_ ; PERMUTS] = { + let sigmal1: [_; PERMUTS] = { let [s0, s1, s2, s3, s4, s5, s6] = sigmal1; - [ E::>::from_vec_and_domain(s0, domain.d1), - E::>::from_vec_and_domain(s1, domain.d1), - E::>::from_vec_and_domain(s2, domain.d1), - E::>::from_vec_and_domain(s3, domain.d1), - E::>::from_vec_and_domain(s4, domain.d1), - E::>::from_vec_and_domain(s5, domain.d1), - E::>::from_vec_and_domain(s6, domain.d1) ] + [ + E::>::from_vec_and_domain(s0, domain.d1), + E::>::from_vec_and_domain(s1, domain.d1), + E::>::from_vec_and_domain(s2, domain.d1), + E::>::from_vec_and_domain(s3, domain.d1), + E::>::from_vec_and_domain(s4, domain.d1), + E::>::from_vec_and_domain(s5, domain.d1), + E::>::from_vec_and_domain(s6, domain.d1), + ] }; - let sigmam: [DP; PERMUTS] = array_init(|i| { - sigmal1[i].clone().interpolate() - }); + let sigmam: [DP; PERMUTS] = array_init(|i| sigmal1[i].clone().interpolate()); let mut s = sid[0..2].to_vec(); // TODO(mimoo): why do we do that? sid.append(&mut s); @@ -322,7 +321,10 @@ impl ConstraintSystem { // compute ECC arithmetic constraint polynomials let complete_addm = E::>::from_vec_and_domain( - gates.iter().map(|gate| F::from((gate.typ == GateType::CompleteAdd) as u64)).collect(), + gates + .iter() + .map(|gate| F::from((gate.typ == GateType::CompleteAdd) as u64)) + .collect(), domain.d1, ) .interpolate(); @@ -350,49 +352,40 @@ impl ConstraintSystem { let chacha8 = { use GateType::*; - let has_chacha_gate = - gates.iter().any(|gate| { - match gate.typ { - ChaCha0 | ChaCha1 | ChaCha2 | ChaChaFinal => true, - _ => false - } - }); + let has_chacha_gate = gates.iter().any(|gate| match gate.typ { + ChaCha0 | ChaCha1 | ChaCha2 | ChaChaFinal => true, + _ => false, + }); if !has_chacha_gate { None } else { - let a : [_; 4] = - array_init(|i| { - let g = - match i { - 0 => ChaCha0, - 1 => ChaCha1, - 2 => ChaCha2, - 3 => ChaChaFinal, - _ => panic!("Invalid index") - }; - E::>::from_vec_and_domain( - gates - .iter() - .map(|gate| { - if gate.typ == g { - F::one() - } else { - F::zero() - } - }) - .collect(), - domain.d1) - .interpolate() - .evaluate_over_domain(domain.d8) - }); + let a: [_; 4] = array_init(|i| { + let g = match i { + 0 => ChaCha0, + 1 => ChaCha1, + 2 => ChaCha2, + 3 => ChaChaFinal, + _ => panic!("Invalid index"), + }; + E::>::from_vec_and_domain( + gates + .iter() + .map(|gate| if gate.typ == g { F::one() } else { F::zero() }) + .collect(), + domain.d1, + ) + .interpolate() + .evaluate_over_domain(domain.d8) + }); Some(a) } }; - let coefficientsm: [_; COLUMNS] = - array_init(|i| { - E::>::from_vec_and_domain( - gates.iter().map(|gate| { + let coefficientsm: [_; COLUMNS] = array_init(|i| { + E::>::from_vec_and_domain( + gates + .iter() + .map(|gate| { if i < gate.c.len() { gate.c[i] } else { @@ -400,9 +393,10 @@ impl ConstraintSystem { } }) .collect(), - domain.d1) - .interpolate() - }); + domain.d1, + ) + .interpolate() + }); // TODO: This doesn't need to be degree 8 but that would require some changes in expr let coefficients8 = array_init(|i| coefficientsm[i].evaluate_over_domain_by_ref(domain.d8)); @@ -434,37 +428,44 @@ impl ConstraintSystem { let endo = F::zero(); let lookup_table_lengths: Vec<_> = lookup_tables.iter().map(|v| v[0].len()).collect(); - let dummy_lookup_values : Vec> = - lookup_tables.iter() + let dummy_lookup_values: Vec> = lookup_tables + .iter() .map(|cols| cols.iter().map(|c| c[c.len() - 1]).collect()) .collect(); - let lookup_tables : Vec>> = - lookup_tables + let lookup_tables: Vec>> = lookup_tables .into_iter() .zip(dummy_lookup_values.iter()) .map(|(t, dummy)| { - t.into_iter().enumerate().map(|(i, mut col)| { - let d = dummy[i]; - col.extend((0..(n - col.len())).map(|_| d)); - E::>::from_vec_and_domain(col, domain.d1).interpolate() - }).collect() - }).collect(); - let lookup_tables8 = lookup_tables.iter().map(|t| { - t.iter().map(|col| col.evaluate_over_domain_by_ref(domain.d8)).collect() - }).collect(); + t.into_iter() + .enumerate() + .map(|(i, mut col)| { + let d = dummy[i]; + col.extend((0..(n - col.len())).map(|_| d)); + E::>::from_vec_and_domain(col, domain.d1).interpolate() + }) + .collect() + }) + .collect(); + let lookup_tables8 = lookup_tables + .iter() + .map(|t| { + t.iter() + .map(|col| col.evaluate_over_domain_by_ref(domain.d8)) + .collect() + }) + .collect(); let lookup_info = LookupInfo::::create(); // return result Some(ConstraintSystem { chacha8, - lookup_selectors: - if lookup_info.lookup_used(&gates).is_some() { - LookupInfo::::create().selector_polynomials(domain, &gates) - } else { - vec![] - }, + lookup_selectors: if lookup_info.lookup_used(&gates).is_some() { + LookupInfo::::create().selector_polynomials(domain, &gates) + } else { + vec![] + }, dummy_lookup_values, lookup_table_lengths, lookup_tables8, diff --git a/circuits/plonk-15-wires/src/polynomials/lookup.rs b/circuits/plonk-15-wires/src/polynomials/lookup.rs index 058a4cd56e..cd433a86ba 100644 --- a/circuits/plonk-15-wires/src/polynomials/lookup.rs +++ b/circuits/plonk-15-wires/src/polynomials/lookup.rs @@ -1,4 +1,4 @@ -//! This source file implements the arithmetization of plookup constraints +//! This source file implements the arithmetization of plookup constraints //! //! Because of our ZK-rows, we can't do the trick in the plookup paper of //! wrapping around to enforce consistency between the sorted lookup columns. @@ -18,7 +18,7 @@ //! s2 s2 s6 s6 //! //! So the direction ("increasing" or "decreasing" (relative to LookupTable) -//! is +//! is //! if i % 2 = 0 { Increasing } else { Decreasing } //! //! Then, for each i < max_lookups_per_row, if i % 2 = 0, we enforce that the @@ -28,55 +28,62 @@ use ark_poly::{Evaluations, Radix2EvaluationDomain as D}; -use ark_ff::{Field, FftField, Zero, One}; -use rand::Rng; -use CurrOrNext::*; -use std::collections::{HashMap}; +use crate::expr::{Column, ConstantExpr, Variable, E}; use crate::{ - wires::{COLUMNS}, - gate::{CircuitGate, LookupInfo, LocalPosition, CurrOrNext, SingleLookup, JointLookup}, + gate::{CircuitGate, CurrOrNext, JointLookup, LocalPosition, LookupInfo, SingleLookup}, + wires::COLUMNS, }; +use ark_ff::{FftField, Field, One, Zero}; use oracle::rndoracle::ProofError; -use crate::expr::{E, Variable, Column, ConstantExpr}; +use rand::Rng; +use std::collections::HashMap; +use CurrOrNext::*; // TODO: Update for multiple tables -fn single_lookup(s : &SingleLookup) -> E { +fn single_lookup(s: &SingleLookup) -> E { // Combine the linear combination. - s.value.iter().map(|(c, pos)| { - E::literal(*c) * E::Cell(Variable { col: Column::Witness(pos.column), row: pos.row }) - }).fold(E::zero(), |acc, e| acc + e) + s.value + .iter() + .map(|(c, pos)| { + E::literal(*c) + * E::Cell(Variable { + col: Column::Witness(pos.column), + row: pos.row, + }) + }) + .fold(E::zero(), |acc, e| acc + e) } -fn joint_lookup(j : &JointLookup) -> E { - j.entry.iter().enumerate() +fn joint_lookup(j: &JointLookup) -> E { + j.entry + .iter() + .enumerate() .map(|(i, s)| E::constant(ConstantExpr::JointCombiner.pow(i)) * single_lookup(s)) .fold(E::zero(), |acc, x| acc + x) } -struct AdjacentPairs> { +struct AdjacentPairs> { prev_second_component: Option, - i: I + i: I, } -impl> Iterator for AdjacentPairs { +impl> Iterator for AdjacentPairs { type Item = (A, A); fn next(&mut self) -> Option<(A, A)> { match self.prev_second_component { - Some(x) => { - match self.i.next() { - None => None, - Some(y) => { - self.prev_second_component = Some(y); - Some((x, y)) - } + Some(x) => match self.i.next() { + None => None, + Some(y) => { + self.prev_second_component = Some(y); + Some((x, y)) } }, None => { let x = self.i.next(); let y = self.i.next(); match (x, y) { - (None, _) | (_ , None) => None, + (None, _) | (_, None) => None, (Some(x), Some(y)) => { self.prev_second_component = Some(y); Some((x, y)) @@ -87,8 +94,11 @@ impl> Iterator for AdjacentPairs { } } -fn adjacent_pairs>(i : I) -> AdjacentPairs { - AdjacentPairs { i, prev_second_component: None } +fn adjacent_pairs>(i: I) -> AdjacentPairs { + AdjacentPairs { + i, + prev_second_component: None, + } } /// The number of random values to append to columns for zero-knowledge. @@ -96,7 +106,11 @@ pub const ZK_ROWS: usize = 3; /// Pad with zeroes and then add 3 random elements in the last two /// rows for zero knowledge. -pub fn zk_patch(mut e : Vec, d: D, rng: &mut R) -> Evaluations> { +pub fn zk_patch( + mut e: Vec, + d: D, + rng: &mut R, +) -> Evaluations> { let n = d.size as usize; let k = e.len(); assert!(k <= n - ZK_ROWS); @@ -106,7 +120,7 @@ pub fn zk_patch(mut e : Vec, d: D, rng: &mut } /// Checks that all the lookup constraints are satisfied. -pub fn verify, G: Fn() -> I>( +pub fn verify, G: Fn() -> I>( dummy_lookup_value: F, lookup_table: G, lookup_table_entries: usize, @@ -115,8 +129,10 @@ pub fn verify, G: Fn() -> I>( witness: &[Vec; COLUMNS], joint_combiner: F, sorted: &Vec>>, - ) -> () { - sorted.iter().for_each(|s| assert_eq!(d1.size, s.domain().size)); +) -> () { + sorted + .iter() + .for_each(|s| assert_eq!(d1.size, s.domain().size)); let n = d1.size as usize; let lookup_rows = n - ZK_ROWS - 1; @@ -132,9 +148,9 @@ pub fn verify, G: Fn() -> I>( } // Check sorting - let mut sorted_joined : Vec = Vec::with_capacity((lookup_rows + 1) * sorted.len()); + let mut sorted_joined: Vec = Vec::with_capacity((lookup_rows + 1) * sorted.len()); for (i, s) in sorted.iter().enumerate() { - let es = s.evals.iter().take(lookup_rows+1); + let es = s.evals.iter().take(lookup_rows + 1); if i % 2 == 0 { sorted_joined.extend(es) } else { @@ -154,7 +170,7 @@ pub fn verify, G: Fn() -> I>( let by_row = lookup_info.by_row(gates); // Compute lookups||table and check multiset equality - let sorted_counts : HashMap = { + let sorted_counts: HashMap = { let mut counts = HashMap::new(); for (i, s) in sorted.iter().enumerate() { if i % 2 == 0 { @@ -170,13 +186,16 @@ pub fn verify, G: Fn() -> I>( counts }; - let mut all_lookups : HashMap = HashMap::new(); - lookup_table().take(lookup_rows).for_each(|t| { - *all_lookups.entry(t).or_insert(0) += 1 - }); + let mut all_lookups: HashMap = HashMap::new(); + lookup_table() + .take(lookup_rows) + .for_each(|t| *all_lookups.entry(t).or_insert(0) += 1); for (i, spec) in by_row.iter().take(lookup_rows).enumerate() { - let eval = |pos : LocalPosition| -> F { - let row = match pos.row { Curr => i, Next => i + 1 }; + let eval = |pos: LocalPosition| -> F { + let row = match pos.row { + Curr => i, + Next => i + 1, + }; witness[pos.column][row] }; for joint_lookup in spec.iter() { @@ -189,7 +208,8 @@ pub fn verify, G: Fn() -> I>( assert_eq!( all_lookups.iter().fold(0, |acc, (_, v)| acc + v), - sorted_counts.iter().fold(0, |acc, (_, v)| acc + v)); + sorted_counts.iter().fold(0, |acc, (_, v)| acc + v) + ); for (k, v) in all_lookups.iter() { let s = sorted_counts.get(k).unwrap_or(&0); @@ -209,7 +229,12 @@ pub trait Entry { type Field: Field; type Params; - fn evaluate(p: & Self::Params, j: &JointLookup, witness: &[Vec; COLUMNS], row: usize) -> Self; + fn evaluate( + p: &Self::Params, + j: &JointLookup, + witness: &[Vec; COLUMNS], + row: usize, + ) -> Self; } #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] @@ -218,9 +243,17 @@ impl Entry for CombinedEntry { type Field = F; type Params = F; - fn evaluate(joint_combiner: &F, j: &JointLookup, witness: &[Vec; COLUMNS], row: usize) -> CombinedEntry { - let eval = |pos : LocalPosition| -> F { - let row = match pos.row { Curr => row, Next => row + 1 }; + fn evaluate( + joint_combiner: &F, + j: &JointLookup, + witness: &[Vec; COLUMNS], + row: usize, + ) -> CombinedEntry { + let eval = |pos: LocalPosition| -> F { + let row = match pos.row { + Curr => row, + Next => row + 1, + }; witness[pos.column][row] }; @@ -235,9 +268,17 @@ impl Entry for UncombinedEntry { type Field = F; type Params = (); - fn evaluate(_: &(), j: &JointLookup, witness: &[Vec; COLUMNS], row: usize) -> UncombinedEntry { - let eval = |pos : LocalPosition| -> F { - let row = match pos.row { Curr => row, Next => row + 1 }; + fn evaluate( + _: &(), + j: &JointLookup, + witness: &[Vec; COLUMNS], + row: usize, + ) -> UncombinedEntry { + let eval = |pos: LocalPosition| -> F { + let row = match pos.row { + Curr => row, + Next => row + 1, + }; witness[pos.column][row] }; @@ -246,13 +287,13 @@ impl Entry for UncombinedEntry { } /// Computes the sorted lookup tables required by the lookup argument. -pub fn sorted - <'a - , F: FftField - , E: Entry + Eq + std::hash::Hash + Clone - , I: Iterator - , G: Fn() -> I > - ( +pub fn sorted< + 'a, + F: FftField, + E: Entry + Eq + std::hash::Hash + Clone, + I: Iterator, + G: Fn() -> I, +>( // TODO: Multiple tables dummy_lookup_value: E, lookup_table: G, @@ -261,14 +302,12 @@ pub fn sorted gates: &Vec>, witness: &[Vec; COLUMNS], params: E::Params, - ) - -> Result>, ProofError> -{ +) -> Result>, ProofError> { // We pad the lookups so that it is as if we lookup exactly // `max_lookups_per_row` in every row. let n = d1.size as usize; - let mut counts : HashMap = HashMap::new(); + let mut counts: HashMap = HashMap::new(); let lookup_rows = n - ZK_ROWS - 1; let lookup_info = LookupInfo::::create(); @@ -292,18 +331,17 @@ pub fn sorted } let sorted = { - let mut sorted : Vec> = vec![]; + let mut sorted: Vec> = vec![]; for _ in 0..max_lookups_per_row + 1 { sorted.push(Vec::with_capacity(lookup_rows + 1)) } let mut i = 0; for t in lookup_table().take(lookup_table_entries) { - let t_count = - match counts.get(&t) { - None => return Err(ProofError::ValueNotInTable), - Some(x) => *x - }; + let t_count = match counts.get(&t) { + None => return Err(ProofError::ValueNotInTable), + Some(x) => *x, + }; for j in 0..t_count { let idx = i + j; let col = idx / lookup_rows; @@ -330,7 +368,7 @@ pub fn sorted } /// Computes the aggregation polynomial for maximum n lookups per row, whose kth entry is the product of terms -/// +/// /// (gamma(1 + beta) + t_i + beta t_{i+1}) \prod_{0 <= j < n} ( (1 + beta) (gamma + f_{i,j}) ) /// ------------------------------------------------------------------------------------------- /// \prod_{0 <= j < n+1} (gamma(1 + beta) + s_{i,j} + beta s_{i+1,j}) @@ -353,7 +391,7 @@ pub fn sorted /// /// after multiplying all of the values, all of the terms will have cancelled if s is a sorting of f and t, and the final term will be 1 /// because of the random choice of beta and gamma, there is negligible probability that the terms will cancel if s is not a sorting of f and t -pub fn aggregation<'a, R: Rng + ?Sized, F: FftField, I: Iterator>( +pub fn aggregation<'a, R: Rng + ?Sized, F: FftField, I: Iterator>( dummy_lookup_value: F, lookup_table: I, d1: D, @@ -364,8 +402,7 @@ pub fn aggregation<'a, R: Rng + ?Sized, F: FftField, I: Iterator>( gamma: F, sorted: &Vec>>, rng: &mut R, - ) -> Result>, ProofError> -{ +) -> Result>, ProofError> { let n = d1.size as usize; let lookup_rows = n - ZK_ROWS - 1; let beta1 = F::one() + beta; @@ -373,15 +410,18 @@ pub fn aggregation<'a, R: Rng + ?Sized, F: FftField, I: Iterator>( let mut lookup_aggreg = vec![F::one()]; lookup_aggreg.extend((0..lookup_rows).map(|row| { - sorted.iter().enumerate().map(|(i, s)| { - let (i1, i2) = - if i % 2 == 0 { + sorted + .iter() + .enumerate() + .map(|(i, s)| { + let (i1, i2) = if i % 2 == 0 { (row, row + 1) } else { (row + 1, row) }; - gammabeta1 + s[i1] + beta * s[i2] - }).fold(F::one(), |acc, x| acc * x) + gammabeta1 + s[i1] + beta * s[i2] + }) + .fold(F::one(), |acc, x| acc * x) })); ark_ff::fields::batch_inversion::(&mut lookup_aggreg[1..]); @@ -391,46 +431,51 @@ pub fn aggregation<'a, R: Rng + ?Sized, F: FftField, I: Iterator>( let complements_with_beta_term = { let mut v = vec![F::one()]; let x = gamma + dummy_lookup_value; - for i in 1..(max_lookups_per_row+1) { + for i in 1..(max_lookups_per_row + 1) { v.push(v[i - 1] * x) } - let beta1_per_row = beta1.pow(&[ max_lookups_per_row as u64]); + let beta1_per_row = beta1.pow(&[max_lookups_per_row as u64]); v.iter_mut().for_each(|x| *x *= beta1_per_row); v }; - adjacent_pairs(lookup_table).take(lookup_rows) - .zip(lookup_info.by_row(gates)).enumerate().for_each(| (i, ((t0, t1), spec) ) | { - let f_chunk = { - let eval = |pos : LocalPosition| -> F { - let row = match pos.row { Curr => i, Next => i + 1 }; - witness[pos.column][row] - }; + adjacent_pairs(lookup_table) + .take(lookup_rows) + .zip(lookup_info.by_row(gates)) + .enumerate() + .for_each(|(i, ((t0, t1), spec))| { + let f_chunk = { + let eval = |pos: LocalPosition| -> F { + let row = match pos.row { + Curr => i, + Next => i + 1, + }; + witness[pos.column][row] + }; - let padding = complements_with_beta_term[max_lookups_per_row - spec.len()]; + let padding = complements_with_beta_term[max_lookups_per_row - spec.len()]; - // This recomputes `joint_lookup.evaluate` on all the rows, which - // is also computed in `sorted`. It should pretty cheap relative to - // the whole cost of the prover, and saves us - // `max_lookups_per_row (=4) * n` field elements of - // memory. - spec.iter() - .fold(padding, |acc, j| { - acc * (gamma + j.evaluate(joint_combiner, &eval)) - }) - }; + // This recomputes `joint_lookup.evaluate` on all the rows, which + // is also computed in `sorted`. It should pretty cheap relative to + // the whole cost of the prover, and saves us + // `max_lookups_per_row (=4) * n` field elements of + // memory. + spec.iter().fold(padding, |acc, j| { + acc * (gamma + j.evaluate(joint_combiner, &eval)) + }) + }; - // At this point, lookup_aggreg[i + 1] contains 1/s_chunk - // f_chunk / s_chunk - lookup_aggreg[i + 1] *= f_chunk; - // f_chunk * t_chunk / s_chunk - lookup_aggreg[i + 1] *= gammabeta1 + t0 + beta * t1; - let prev = lookup_aggreg[i]; - // prev * f_chunk * t_chunk / s_chunk - lookup_aggreg[i + 1] *= prev; - }); + // At this point, lookup_aggreg[i + 1] contains 1/s_chunk + // f_chunk / s_chunk + lookup_aggreg[i + 1] *= f_chunk; + // f_chunk * t_chunk / s_chunk + lookup_aggreg[i + 1] *= gammabeta1 + t0 + beta * t1; + let prev = lookup_aggreg[i]; + // prev * f_chunk * t_chunk / s_chunk + lookup_aggreg[i + 1] *= prev; + }); Ok(zk_patch(lookup_aggreg, d1, rng)) } @@ -443,9 +488,9 @@ pub fn constraints(dummy_lookup: &Vec, d1: D) -> Vec> { // Another important thing to note is that there are no lookups permitted // in the 3rd to last row. // - // This is because computing the lookup-product requires + // This is because computing the lookup-product requires // num_lookup_rows + 1 - // rows, so we need to have + // rows, so we need to have // num_lookup_rows + 1 = n - 2 (the last 2 being reserved for the zero-knowledge random // values) and thus // @@ -454,27 +499,35 @@ pub fn constraints(dummy_lookup: &Vec, d1: D) -> Vec> { let column = |col: Column| E::cell(col, Curr); - let lookup_indicator = - lookup_info.kinds.iter().enumerate().map(|(i, _)| { - column(Column::LookupKindIndex(i)) - }).fold(E::zero(), |acc: E, x| acc + x); + let lookup_indicator = lookup_info + .kinds + .iter() + .enumerate() + .map(|(i, _)| column(Column::LookupKindIndex(i))) + .fold(E::zero(), |acc: E, x| acc + x); - let one : E = E::one(); + let one: E = E::one(); let non_lookup_indcator = one.clone() - lookup_indicator; - let dummy_lookup : ConstantExpr = - dummy_lookup.iter().rev() - .fold(ConstantExpr::zero(), |acc, x| ConstantExpr::JointCombiner * acc + ConstantExpr::Literal(*x)); + let dummy_lookup: ConstantExpr = dummy_lookup + .iter() + .rev() + .fold(ConstantExpr::zero(), |acc, x| { + ConstantExpr::JointCombiner * acc + ConstantExpr::Literal(*x) + }); let complements_with_beta_term: Vec> = { let mut v = vec![ConstantExpr::one()]; let x = ConstantExpr::Gamma + dummy_lookup; - for i in 1..(lookup_info.max_per_row+1) { + for i in 1..(lookup_info.max_per_row + 1) { v.push(v[i - 1].clone() * x.clone()) } - let beta1_per_row: ConstantExpr = (ConstantExpr::one() + ConstantExpr::Beta).pow(lookup_info.max_per_row); - v.iter().map(|x| x.clone() * beta1_per_row.clone()).collect() + let beta1_per_row: ConstantExpr = + (ConstantExpr::one() + ConstantExpr::Beta).pow(lookup_info.max_per_row); + v.iter() + .map(|x| x.clone() * beta1_per_row.clone()) + .collect() }; // This is set up so that on rows that have lookups, chunk will be equal @@ -485,22 +538,22 @@ pub fn constraints(dummy_lookup: &Vec, d1: D) -> Vec> { assert!(spec.len() <= lookup_info.max_per_row); let padding = complements_with_beta_term[lookup_info.max_per_row - spec.len()].clone(); - spec - .iter() - .map(|j| E::Constant(ConstantExpr::Gamma) + joint_lookup(j)) - .fold(E::Constant(padding), |acc: E, x| acc * x) + spec.iter() + .map(|j| E::Constant(ConstantExpr::Gamma) + joint_lookup(j)) + .fold(E::Constant(padding), |acc: E, x| acc * x) }; - let f_chunk = - lookup_info.kinds.iter().enumerate() - .map(|(i, spec)| { - column(Column::LookupKindIndex(i)) * f_term(spec) - }).fold(non_lookup_indcator * f_term(&vec![]), |acc, x| acc + x); - let gammabeta1 = || E::::Constant(ConstantExpr::Gamma * (ConstantExpr::Beta + ConstantExpr::one())); - let ft_chunk = - f_chunk + let f_chunk = lookup_info + .kinds + .iter() + .enumerate() + .map(|(i, spec)| column(Column::LookupKindIndex(i)) * f_term(spec)) + .fold(non_lookup_indcator * f_term(&vec![]), |acc, x| acc + x); + let gammabeta1 = + || E::::Constant(ConstantExpr::Gamma * (ConstantExpr::Beta + ConstantExpr::one())); + let ft_chunk = f_chunk * (gammabeta1() - + E::cell(Column::LookupTable, Curr) - + E::beta() * E::cell(Column::LookupTable, Next)); + + E::cell(Column::LookupTable, Curr) + + E::beta() * E::cell(Column::LookupTable, Next)); let num_rows = d1.size as usize; @@ -524,7 +577,7 @@ pub fn constraints(dummy_lookup: &Vec, d1: D) -> Vec> { // s2 s2 s6 s6 // // So the direction ("increasing" or "decreasing" (relative to LookupTable) - // is + // is // if i % 2 = 0 { Increasing } else { Decreasing } // // Then, for each i < max_lookups_per_row, if i % 2 = 0, we enforce that the @@ -532,42 +585,39 @@ pub fn constraints(dummy_lookup: &Vec, d1: D) -> Vec> { // and if i % 2 = 1, we enforce that the // first element of LookupSorted(i) = first element of LookupSorted(i + 1) - let s_chunk = - (0..(lookup_info.max_per_row + 1)) + let s_chunk = (0..(lookup_info.max_per_row + 1)) .map(|i| { - let (s1, s2) = - if i % 2 == 0 { - (Curr, Next) - } else { - (Next, Curr) - }; + let (s1, s2) = if i % 2 == 0 { + (Curr, Next) + } else { + (Next, Curr) + }; - gammabeta1() + gammabeta1() + E::cell(Column::LookupSorted(i), s1) + E::beta() * E::cell(Column::LookupSorted(i), s2) }) .fold(E::one(), |acc: E, x| acc * x); - let compatibility_checks : Vec<_> = (0..lookup_info.max_per_row).map(|i| { - let first_or_last = - if i % 2 == 0 { + let compatibility_checks: Vec<_> = (0..lookup_info.max_per_row) + .map(|i| { + let first_or_last = if i % 2 == 0 { // Check compatibility of the last elements num_lookup_rows } else { // Check compatibility of the first elements 0 }; - E::UnnormalizedLagrangeBasis(first_or_last) * - (column(Column::LookupSorted(i)) - - column(Column::LookupSorted(i + 1))) - }).collect(); + E::UnnormalizedLagrangeBasis(first_or_last) + * (column(Column::LookupSorted(i)) - column(Column::LookupSorted(i + 1))) + }) + .collect(); - let aggreg_equation = - E::cell(Column::LookupAggreg, Next) * s_chunk + let aggreg_equation = E::cell(Column::LookupAggreg, Next) * s_chunk - E::cell(Column::LookupAggreg, Curr) * ft_chunk; /* - aggreg.next = + aggreg.next = aggreg.curr * f_chunk * (gammabeta1 + index.lookup_tables[0][i] + beta * index.lookup_tables[0][i+1];) @@ -586,12 +636,11 @@ pub fn constraints(dummy_lookup: &Vec, d1: D) -> Vec> { let mut res = vec![ E::VanishesOnLast4Rows * aggreg_equation, - E::UnnormalizedLagrangeBasis(0) * - (E::cell(Column::LookupAggreg, Curr) - E::one()), + E::UnnormalizedLagrangeBasis(0) * (E::cell(Column::LookupAggreg, Curr) - E::one()), // Check that the 3rd to last row (index = num_rows - 3), which // contains the full product, equals 1 - E::UnnormalizedLagrangeBasis(num_lookup_rows) * - (E::cell(Column::LookupAggreg, Curr) - E::one()), + E::UnnormalizedLagrangeBasis(num_lookup_rows) + * (E::cell(Column::LookupAggreg, Curr) - E::one()), ]; res.extend(compatibility_checks); res From 9a2d0a9cdfcb3be9f3b0b98fb0206e7e5e273283 Mon Sep 17 00:00:00 2001 From: David Wong Date: Tue, 19 Oct 2021 22:36:22 -0700 Subject: [PATCH 5/9] remove dead code --- circuits/plonk-15-wires/src/gate.rs | 38 ----------------------------- 1 file changed, 38 deletions(-) diff --git a/circuits/plonk-15-wires/src/gate.rs b/circuits/plonk-15-wires/src/gate.rs index 07e2fd2fa7..a1bd9dd484 100644 --- a/circuits/plonk-15-wires/src/gate.rs +++ b/circuits/plonk-15-wires/src/gate.rs @@ -384,44 +384,6 @@ impl ToBytes for CircuitGate { } } -impl FromBytes for CircuitGate { - #[inline] - fn read(mut r: R) -> IoResult { - let row = u32::read(&mut r)? as usize; - let code = u8::read(&mut r)?; - let typ = match FromPrimitive::from_u8(code) { - Some(x) => Ok(x), - None => Err(Error::new(ErrorKind::Other, "Invalid gate type")), - }?; - - let wires = [ - Wire::read(&mut r)?, - Wire::read(&mut r)?, - Wire::read(&mut r)?, - Wire::read(&mut r)?, - Wire::read(&mut r)?, - Wire::read(&mut r)?, - Wire::read(&mut r)?, - Wire::read(&mut r)?, - Wire::read(&mut r)?, - Wire::read(&mut r)?, - Wire::read(&mut r)?, - Wire::read(&mut r)?, - Wire::read(&mut r)?, - Wire::read(&mut r)?, - Wire::read(&mut r)?, - ]; - - let c_len = u8::read(&mut r)?; - let mut c = vec![]; - for _ in 0..c_len { - c.push(F::read(&mut r)?); - } - - Ok(CircuitGate { row, typ, wires, c }) - } -} - impl CircuitGate { /// this function creates "empty" circuit gate pub fn zero(row: usize, wires: GateWires) -> Self { From c12af2183553708685c7f79c6950ed6c5a87c07e Mon Sep 17 00:00:00 2001 From: David Wong Date: Tue, 19 Oct 2021 22:36:49 -0700 Subject: [PATCH 6/9] [kimchi][permutation] implement an abstraction for shifts --- .../src/nolookup/constraints.rs | 133 +++++++++++------- 1 file changed, 83 insertions(+), 50 deletions(-) diff --git a/circuits/plonk-15-wires/src/nolookup/constraints.rs b/circuits/plonk-15-wires/src/nolookup/constraints.rs index a4dce57e85..fc47ea8ab6 100644 --- a/circuits/plonk-15-wires/src/nolookup/constraints.rs +++ b/circuits/plonk-15-wires/src/nolookup/constraints.rs @@ -173,6 +173,78 @@ pub struct ConstraintSystem { pub lookup_selectors: Vec>>, } +/// Shifts represent the shifts required in the permutation argument of PLONK +pub struct Shifts { + /// The coefficients k that create a coset when multiplied with the generator of our domain. + shifts: [F; PERMUTS], + /// A matrix that maps all cells coordinates {col, row} to their shifted field element. + /// For example the cell {col:2, row:1} will map to omega * k2, + /// which lives in map[2][1] + map: [Vec; PERMUTS], +} + +impl Shifts +where + F: FftField + SquareRootField, +{ + /// Generates the shifts for a given domain + pub fn new(domain: &D) -> Self { + let mut shifts = [F::zero(); PERMUTS]; + + // first shift is the identity + shifts[0] = F::one(); + + // sample the other shifts + let mut i: u32 = 7; + for idx in 1..(PERMUTS) { + let mut o = Self::sample(&domain, &mut i); + while shifts.iter().filter(|&r| o == *r).count() > 0 { + o = Self::sample(&domain, &mut i); + } + shifts[idx] = o; + } + + // create a map of cells to their shifted value + let map: [Vec; PERMUTS] = + array_init(|i| domain.elements().map(|elm| shifts[i] * &elm).collect()); + + // + Self { shifts, map } + } + + /// sample coordinate shifts deterministically + fn sample(domain: &D, i: &mut u32) -> F { + let mut h = Blake2b::new(); + h.update( + &{ + *i += 1; + *i + } + .to_be_bytes(), + ); + let mut r = F::from_random_bytes(&h.finalize()[..31]).unwrap(); + while r.legendre().is_qnr() == false || domain.evaluate_vanishing_polynomial(r).is_zero() { + let mut h = Blake2b::new(); + h.update( + &{ + *i += 1; + *i + } + .to_be_bytes(), + ); + r = F::from_random_bytes(&h.finalize()[..31]).unwrap(); + } + r + } + + /// Returns the field element that represents a position + fn cell_to_field(&self, &Wire { row, col }: &Wire) -> F { + self.map[col][row] + } +} + +/// + /// Returns the end of the circuit, which is used for introducing zero-knowledge in the permutation polynomial pub fn zk_w3(domain: D) -> F { domain.group_gen.pow(&[domain.size - 3]) @@ -252,17 +324,14 @@ impl ConstraintSystem { // +3 on gates.len() here to ensure that we have room for the zero-knowledge entries of the permutation polynomial // see https://minaprotocol.com/blog/a-more-efficient-approach-to-zero-knowledge-for-plonk + // TODO: hardcode this value somewhere let domain = EvaluationDomains::::create(gates.len() + 3)?; assert!(domain.d1.size > 3); // pre-compute all the elements let mut sid = domain.d1.elements().map(|elm| elm).collect::>(); - // sample the coordinate shifts - // TODO(mimoo): should we check that the shifts are all different? - let shift = Self::sample_shifts(&domain.d1, PERMUTS - 1); - let shift: [F; PERMUTS] = array_init(|i| if i == 0 { F::one() } else { shift[i - 1] }); - + // pad the rows: add zero gates to reach the domain size let n = domain.d1.size(); let mut padding = (gates.len()..n) .map(|i| { @@ -277,15 +346,17 @@ impl ConstraintSystem { .collect(); gates.append(&mut padding); - let s: [std::vec::Vec; PERMUTS] = - array_init(|i| domain.d1.elements().map(|elm| shift[i] * &elm).collect()); - let mut sigmal1 = s.clone(); + // sample the coordinate shifts + // TODO(mimoo): should we check that the shifts are all different? + let shifts = Shifts::new(&domain.d1); // compute permutation polynomials + let mut sigmal1: [Vec; PERMUTS] = + array_init(|_| vec![F::zero(); domain.d1.size as usize]); + for (row, gate) in gates.iter().enumerate() { - for col in 0..PERMUTS { - let wire = gate.wires[col]; - sigmal1[col][row] = s[wire.col][wire.row]; + for (cell, sigma) in gate.wires.iter().zip(sigmal1.iter_mut()) { + sigma[row] = shifts.cell_to_field(cell); } } @@ -497,7 +568,7 @@ impl ConstraintSystem { zkpm, vanishes_on_last_4_rows, gates, - shift, + shift: shifts.shifts, endo, fr_sponge_params, }) @@ -540,44 +611,6 @@ impl ConstraintSystem { return Ok(()); } - /// sample coordinate shifts deterministically - pub fn sample_shift(domain: &D, i: &mut u32) -> F { - let mut h = Blake2b::new(); - h.update( - &{ - *i += 1; - *i - } - .to_be_bytes(), - ); - let mut r = F::from_random_bytes(&h.finalize()[..31]).unwrap(); - while r.legendre().is_qnr() == false || domain.evaluate_vanishing_polynomial(r).is_zero() { - let mut h = Blake2b::new(); - h.update( - &{ - *i += 1; - *i - } - .to_be_bytes(), - ); - r = F::from_random_bytes(&h.finalize()[..31]).unwrap(); - } - r - } - - pub fn sample_shifts(domain: &D, len: usize) -> Vec { - let mut i: u32 = 7; - let mut shifts = Vec::with_capacity(len); - while shifts.len() < len { - let mut o = Self::sample_shift(&domain, &mut i); - while shifts.iter().filter(|&r| o == *r).count() > 0 { - o = Self::sample_shift(&domain, &mut i) - } - shifts.push(o) - } - shifts - } - /// evaluate witness polynomials over domains pub fn evaluate(&self, w: &[DP; COLUMNS], z: &DP) -> WitnessOverDomains { // compute shifted witness polynomials From 26c625e6203d43de9024f5458b0c03344cdd48bd Mon Sep 17 00:00:00 2001 From: David Wong Date: Tue, 19 Oct 2021 23:33:20 -0700 Subject: [PATCH 7/9] [kimchi] fix tests for permutation --- circuits/plonk-15-wires/src/gate.rs | 2 +- circuits/plonk-15-wires/src/nolookup/constraints.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/circuits/plonk-15-wires/src/gate.rs b/circuits/plonk-15-wires/src/gate.rs index a1bd9dd484..21f233502a 100644 --- a/circuits/plonk-15-wires/src/gate.rs +++ b/circuits/plonk-15-wires/src/gate.rs @@ -565,7 +565,7 @@ mod tests { println!("decoded gate: {:?}", decoded); prop_assert_eq!(cg.row, decoded.row); prop_assert_eq!(cg.typ, decoded.typ); - for i in 0..COLUMNS { + for i in 0..PERMUTS { prop_assert_eq!(cg.wires[i], decoded.wires[i]); } prop_assert_eq!(cg.c, decoded.c); diff --git a/circuits/plonk-15-wires/src/nolookup/constraints.rs b/circuits/plonk-15-wires/src/nolookup/constraints.rs index fc47ea8ab6..f2f639d2cc 100644 --- a/circuits/plonk-15-wires/src/nolookup/constraints.rs +++ b/circuits/plonk-15-wires/src/nolookup/constraints.rs @@ -583,7 +583,7 @@ impl ConstraintSystem { for (row, gate) in self.gates.iter().enumerate() { // check if wires are connected - for col in 0..COLUMNS { + for col in 0..PERMUTS { let wire = gate.wires[col]; if witness[col][row] != witness[wire.col][wire.row] { return Err(GateError::DisconnectedWires( From 04c7cd3d41be3a5ed1a98ab9f97b880d1ed1e233 Mon Sep 17 00:00:00 2001 From: David Wong Date: Fri, 22 Oct 2021 12:00:39 -0700 Subject: [PATCH 8/9] address feedback --- .../src/nolookup/constraints.rs | 46 +++++++++---------- 1 file changed, 21 insertions(+), 25 deletions(-) diff --git a/circuits/plonk-15-wires/src/nolookup/constraints.rs b/circuits/plonk-15-wires/src/nolookup/constraints.rs index f2f639d2cc..28052048f4 100644 --- a/circuits/plonk-15-wires/src/nolookup/constraints.rs +++ b/circuits/plonk-15-wires/src/nolookup/constraints.rs @@ -173,7 +173,8 @@ pub struct ConstraintSystem { pub lookup_selectors: Vec>>, } -/// Shifts represent the shifts required in the permutation argument of PLONK +/// Shifts represent the shifts required in the permutation argument of PLONK. +/// It also caches the shifted powers of omega for optimization purposes. pub struct Shifts { /// The coefficients k that create a coset when multiplied with the generator of our domain. shifts: [F; PERMUTS], @@ -197,11 +198,12 @@ where // sample the other shifts let mut i: u32 = 7; for idx in 1..(PERMUTS) { - let mut o = Self::sample(&domain, &mut i); - while shifts.iter().filter(|&r| o == *r).count() > 0 { - o = Self::sample(&domain, &mut i); + let mut shift = Self::sample(&domain, &mut i); + // they have to be distincts + while shifts.contains(&shift) { + shift = Self::sample(&domain, &mut i); } - shifts[idx] = o; + shifts[idx] = shift; } // create a map of cells to their shifted value @@ -213,28 +215,23 @@ where } /// sample coordinate shifts deterministically - fn sample(domain: &D, i: &mut u32) -> F { + fn sample(domain: &D, input: &mut u32) -> F { let mut h = Blake2b::new(); - h.update( - &{ - *i += 1; - *i - } - .to_be_bytes(), - ); - let mut r = F::from_random_bytes(&h.finalize()[..31]).unwrap(); - while r.legendre().is_qnr() == false || domain.evaluate_vanishing_polynomial(r).is_zero() { + + *input += 1; + h.update(&input.to_be_bytes()); + + let mut shift = F::from_random_bytes(&h.finalize()[..31]) + .expect("our field elements fit in more than 31 bytes"); + + while !shift.legendre().is_qnr() || domain.evaluate_vanishing_polynomial(shift).is_zero() { let mut h = Blake2b::new(); - h.update( - &{ - *i += 1; - *i - } - .to_be_bytes(), - ); - r = F::from_random_bytes(&h.finalize()[..31]).unwrap(); + *input += 1; + h.update(&input.to_be_bytes()); + shift = F::from_random_bytes(&h.finalize()[..31]) + .expect("our field elements fit in more than 31 bytes"); } - r + shift } /// Returns the field element that represents a position @@ -347,7 +344,6 @@ impl ConstraintSystem { gates.append(&mut padding); // sample the coordinate shifts - // TODO(mimoo): should we check that the shifts are all different? let shifts = Shifts::new(&domain.d1); // compute permutation polynomials From 923799b74a2dbb9d94349d5735a96d0d073cc8d7 Mon Sep 17 00:00:00 2001 From: David Wong Date: Mon, 25 Oct 2021 13:55:30 -0700 Subject: [PATCH 9/9] [kimchi] add a getter to shifts --- circuits/plonk-15-wires/src/nolookup/constraints.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/circuits/plonk-15-wires/src/nolookup/constraints.rs b/circuits/plonk-15-wires/src/nolookup/constraints.rs index 28052048f4..89271cc7ad 100644 --- a/circuits/plonk-15-wires/src/nolookup/constraints.rs +++ b/circuits/plonk-15-wires/src/nolookup/constraints.rs @@ -214,6 +214,11 @@ where Self { shifts, map } } + /// retrieve the shifts + pub fn shifts(&self) -> &[F; PERMUTS] { + &self.shifts + } + /// sample coordinate shifts deterministically fn sample(domain: &D, input: &mut u32) -> F { let mut h = Blake2b::new();