diff --git a/.gitignore b/.gitignore index bceff729..8c24fd98 100644 --- a/.gitignore +++ b/.gitignore @@ -2,4 +2,4 @@ Cargo.lock **/*.rs.bk .vscode -**/*.html \ No newline at end of file +**/*.html diff --git a/Cargo.toml b/Cargo.toml index 6d30863d..eb97a653 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "halo2curves" -version = "0.3.1" +version = "0.3.2" authors = [ "Sean Bowe ", "Jack Grigg ", @@ -16,6 +16,7 @@ description = "Elliptic curve implementations and wrappers for halo2 library" criterion = { version = "0.3", features = ["html_reports"] } rand_xorshift = "0.3" ark-std = { version = "0.3", features = ["print-trace"] } +pretty_assertions = "1.3.0" [dependencies] subtle = "2.4" diff --git a/rust-toolchain b/rust-toolchain new file mode 100644 index 00000000..07ade694 --- /dev/null +++ b/rust-toolchain @@ -0,0 +1 @@ +nightly \ No newline at end of file diff --git a/src/arithmetic.rs b/src/arithmetic.rs index 388a422b..410f54ac 100644 --- a/src/arithmetic.rs +++ b/src/arithmetic.rs @@ -76,6 +76,13 @@ pub(crate) const fn adc(a: u64, b: u64, carry: bool) -> (u64, bool) { a.carrying_add(b, carry) } +/// Compute a + b + carry, returning the result and the new carry as a u64. +#[inline(always)] +pub(crate) const fn adc_u64(a: u64, b: u64, carry: u64) -> (u64, u64) { + let ret = (a as u128) + (b as u128) + (carry as u128); + (ret as u64, (ret >> 64) as u64) +} + /// Compute a - b - borrow, returning the result and the new borrow. #[inline(always)] pub(crate) const fn sbb(a: u64, b: u64, borrow: bool) -> (u64, bool) { diff --git a/src/bls12_381/curve.rs b/src/bls12_381/curve.rs new file mode 100644 index 00000000..4afedbc7 --- /dev/null +++ b/src/bls12_381/curve.rs @@ -0,0 +1,663 @@ +use crate::bls12_381::Fq; +use crate::bls12_381::Fq2; +use crate::bls12_381::Fr; +use crate::{Coordinates, CurveAffine, CurveAffineExt, CurveExt, Group}; +use core::cmp; +use core::fmt::Debug; +use core::iter::Sum; +use core::ops::{Add, Mul, Neg, Sub}; +use ff::Field; +use group::Curve; +use group::{cofactor::CofactorGroup, prime::PrimeCurveAffine, Group as _, GroupEncoding}; +use rand::RngCore; +use serde::{Deserialize, Serialize}; +use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption}; + +use crate::{ + batch_add, impl_add_binop_specify_output, impl_binops_additive, + impl_binops_additive_specify_output, impl_binops_multiplicative, + impl_binops_multiplicative_mixed, impl_sub_binop_specify_output, new_curve_impl_bls12_381, +}; + +use super::BLS_X; +use super::BLS_X_IS_NEGATIVE; + +new_curve_impl_bls12_381!( + (pub), // (($($privacy:tt)*), + G1, // $name:ident, + G1Affine, // $name_affine:ident, + G1Compressed, // $name_compressed:ident, + Fq::size(), // $compressed_size:expr, + Fq, // $base:ident, + Fr, // $scalar:ident, + (G1_GENERATOR_X,G1_GENERATOR_Y), // $generator:expr, + G1_B, // $constant_b:expr, + "bls12-381_g1", // $curve_id:literal, +); + +new_curve_impl_bls12_381!( + (pub), + G2, // $name:ident, + G2Affine, // $name_affine:ident, + G2Compressed, // $name_compressed:ident, + Fq2::size(), // $compressed_size:expr, + Fq2, // $base:ident, + Fr, // $scalar:ident, + (G2_GENERATOR_X, G2_GENERATOR_Y), // $generator:expr, + G2_B, // $constant_b:expr, + "bls12-381_g2", // $curve_id:literal, +); + +impl CurveAffineExt for G1Affine { + batch_add!(); + + fn into_coordinates(self) -> (Self::Base, Self::Base) { + (self.x, self.y) + } +} + +impl CurveAffineExt for G2Affine { + batch_add!(); + + fn into_coordinates(self) -> (Self::Base, Self::Base) { + (self.x, self.y) + } +} + +const G1_GENERATOR_X: Fq = Fq::from_raw_unchecked([ + 0x5cb38790fd530c16, + 0x7817fc679976fff5, + 0x154f95c7143ba1c1, + 0xf0ae6acdf3d0e747, + 0xedce6ecc21dbf440, + 0x120177419e0bfb75, +]); +const G1_GENERATOR_Y: Fq = Fq::from_raw_unchecked([ + 0xbaac93d50ce72271, + 0x8c22631a7918fd8e, + 0xdd595f13570725ce, + 0x51ac582950405194, + 0x0e1c8c3fad0059c0, + 0x0bbc3efc5008a26a, +]); +const G1_B: Fq = Fq::from_raw_unchecked([ + 0xaa270000000cfff3, + 0x53cc0032fc34000a, + 0x478fe97a6b0a807f, + 0xb1d37ebee6ba24d7, + 0x8ec9733bbf78ab2f, + 0x09d645513d83de7e, +]); + +const ENDO_BETA: Fq = Fq::from_raw_unchecked([ + 0x30f1361b798a64e8, + 0xf3b8ddab7ece5a2a, + 0x16a8ca3ac61577f7, + 0xc26a2ff874fd029b, + 0x3636b76660701c6e, + 0x051ba4ab241b6160, +]); + +const G2_B: Fq2 = Fq2 { + c0: Fq::from_raw_unchecked([ + 0xaa270000000cfff3, + 0x53cc0032fc34000a, + 0x478fe97a6b0a807f, + 0xb1d37ebee6ba24d7, + 0x8ec9733bbf78ab2f, + 0x09d645513d83de7e, + ]), + c1: Fq::from_raw_unchecked([ + 0xaa270000000cfff3, + 0x53cc0032fc34000a, + 0x478fe97a6b0a807f, + 0xb1d37ebee6ba24d7, + 0x8ec9733bbf78ab2f, + 0x09d645513d83de7e, + ]), +}; + +const G2_GENERATOR_X: Fq2 = Fq2 { + c0: Fq::from_raw_unchecked([ + 0xf5f28fa202940a10, + 0xb3f5fb2687b4961a, + 0xa1a893b53e2ae580, + 0x9894999d1a3caee9, + 0x6f67b7631863366b, + 0x058191924350bcd7, + ]), + c1: Fq::from_raw_unchecked([ + 0xa5a9c0759e23f606, + 0xaaa0c59dbccd60c3, + 0x3bb17e18e2867806, + 0x1b1ab6cc8541b367, + 0xc2b6ed0ef2158547, + 0x11922a097360edf3, + ]), +}; + +const G2_GENERATOR_Y: Fq2 = Fq2 { + c0: Fq::from_raw_unchecked([ + 0x4c730af860494c4a, + 0x597cfa1f5e369c5a, + 0xe7e6856caa0a635a, + 0xbbefb5e96e0d495f, + 0x07d3a975f0ef25a2, + 0x0083fd8e7e80dae5, + ]), + + c1: Fq::from_raw_unchecked([ + 0xadc0fc92df64b05d, + 0x18aa270a2b1461dc, + 0x86adac6a3be4eba0, + 0x79495c4ec93da33a, + 0xe7175850a43ccaed, + 0x0b2bc2a163de1bf2, + ]), +}; + +// const B3: Fq2 = Fq2::add(&Fq2::add(&B, &B), &B); + +trait CurveEndo: CurveExt { + fn endomorphism_base(&self) -> Self; + fn endomorphism_scalars(k: &Self::ScalarExt) -> (u128, u128); +} + +impl CurveEndo for G1 { + fn endomorphism_base(&self) -> Self { + unimplemented!(); + } + + fn endomorphism_scalars(_: &Self::ScalarExt) -> (u128, u128) { + unimplemented!(); + } +} + +impl CurveEndo for G2 { + fn endomorphism_base(&self) -> Self { + unimplemented!(); + } + + fn endomorphism_scalars(_: &Self::ScalarExt) -> (u128, u128) { + unimplemented!(); + } +} + +fn endomorphism(p: &G1Affine) -> G1Affine { + // Endomorphism of the points on the curve. + // endomorphism_p(x,y) = (BETA * x, y) + // where BETA is a non-trivial cubic root of unity in Fq. + let mut res = *p; + res.x *= ENDO_BETA; + res +} + +impl G1 { + /// Multiply `self` by `crate::BLS_X`, using double and add. + fn mul_by_x(&self) -> Self { + let mut xself = G1::identity(); + // NOTE: in BLS12-381 we can just skip the first bit. + let mut x = BLS_X >> 1; + let mut tmp = *self; + while x != 0 { + tmp = tmp.double(); + + if x % 2 == 1 { + xself += tmp; + } + x >>= 1; + } + // finally, flip the sign + if BLS_X_IS_NEGATIVE { + xself = -xself; + } + xself + } + + #[inline(always)] + fn mul_by_3b(a: Fq) -> Fq { + let a = a + a; // 2 + let a = a + a; // 4 + a + a + a // 12 + } +} + +impl group::cofactor::CofactorGroup for G1 { + type Subgroup = G1; + + fn clear_cofactor(&self) -> Self { + self - self.mul_by_x() + } + + fn into_subgroup(self) -> CtOption { + CtOption::new(self, 1.into()) + } + + fn is_torsion_free(&self) -> Choice { + unimplemented!() + } +} + +impl G2 { + /// Multiply `self` by `crate::BLS_X`, using double and add. + fn mul_by_x(&self) -> Self { + let mut xself = Self::identity(); + // NOTE: in BLS12-381 we can just skip the first bit. + let mut x = BLS_X >> 1; + let mut acc = *self; + while x != 0 { + acc = acc.double(); + if x % 2 == 1 { + xself += acc; + } + x >>= 1; + } + // finally, flip the sign + if BLS_X_IS_NEGATIVE { + xself = -xself; + } + xself + } + + fn psi(&self) -> Self { + // 1 / ((u+1) ^ ((q-1)/3)) + let psi_coeff_x = Fq2 { + c0: Fq::zero(), + c1: Fq([ + 0x890dc9e4867545c3, + 0x2af322533285a5d5, + 0x50880866309b7e2c, + 0xa20d1b8c7e881024, + 0x14e4f04fe2db9068, + 0x14e56d3f1564853a, + ]), + }; + // 1 / ((u+1) ^ (p-1)/2) + let psi_coeff_y = Fq2 { + c0: Fq([ + 0x3e2f585da55c9ad1, + 0x4294213d86c18183, + 0x382844c88b623732, + 0x92ad2afd19103e18, + 0x1d794e4fac7cf0b9, + 0x0bd592fc7d825ec8, + ]), + c1: Fq([ + 0x7bcfa7a25aa30fda, + 0xdc17dec12a927e7c, + 0x2f088dd86b4ebef1, + 0xd1ca2087da74d4a7, + 0x2da2596696cebc1d, + 0x0e2b7eedbbfd87d2, + ]), + }; + + let mut x = self.x; + let mut y = self.y; + let mut z = self.z; + + x.frobenius_map(1); + y.frobenius_map(1); + z.frobenius_map(1); + + Self { + // x = frobenius(x)/((u+1)^((p-1)/3)) + x: x * psi_coeff_x, + // y = frobenius(y)/(u+1)^((p-1)/2) + y: y * psi_coeff_y, + // z = frobenius(z) + z, + } + } + + fn psi2(&self) -> Self { + // 1 / 2 ^ ((q-1)/3) + let psi2_coeff_x = Fq2 { + c0: Fq([ + 0xcd03c9e48671f071, + 0x5dab22461fcda5d2, + 0x587042afd3851b95, + 0x8eb60ebe01bacb9e, + 0x03f97d6e83d050d2, + 0x18f0206554638741, + ]), + c1: Fq::zero(), + }; + + Self { + // x = frobenius^2(x)/2^((p-1)/3); note that q^2 is the order of the field. + x: self.x * psi2_coeff_x, + // y = -frobenius^2(y); note that q^2 is the order of the field. + y: self.y.neg(), + // z = z + z: self.z, + } + } + + #[inline(always)] + fn mul_by_3b(x: Fq2) -> Fq2 { + let b3: Fq2 = Fq2::add(&Fq2::add(&G2_B, &G2_B), &G2_B); + x * b3 + } +} + +impl CofactorGroup for G2 { + type Subgroup = G2; + + /// Clears the cofactor, using [Budroni-Pintore](https://ia.cr/2017/419). + /// This is equivalent to multiplying by $h\_\textrm{eff} = 3(z^2 - 1) \cdot + /// h_2$, where $h_2$ is the cofactor of $\mathbb{G}\_2$ and $z$ is the + /// parameter of BLS12-381. + fn clear_cofactor(&self) -> Self { + let t1 = self.mul_by_x(); // P + let t2 = self.psi(); // psi(P) + + self.double().psi2() // psi^2(2P) + + (t1 + t2).mul_by_x() // psi^2(2P) + [x^2] P + [x] psi(P) + - t1 // psi^2(2P) + [x^2 - x] P + [x] psi(P) + - t2 // psi^2(2P) + [x^2 - x] P + [x - 1] psi(P) + - self // psi^2(2P) + [x^2 - x - 1] P + [x - 1] psi(P) + } + + fn into_subgroup(self) -> CtOption { + unimplemented!(); + } + + /// Returns true if this point is free of an $h$-torsion component, and so it + /// exists within the $q$-order subgroup $\mathbb{G}_2$. This should always return true + /// unless an "unchecked" API was used. + fn is_torsion_free(&self) -> Choice { + // Algorithm from Section 4 of https://eprint.iacr.org/2021/1130 + // Updated proof of correctness in https://eprint.iacr.org/2022/352 + // + // Check that psi(P) == [x] P + let p = *self; + p.psi().ct_eq(&p.mul_by_x()) + } +} + +#[cfg(test)] +mod tests { + + use crate::bls12_381::G1; + // use crate::bls12_381::{curve::CurveEndo, curve::ENDO_BETA, Fr, G1Affine, G2}; + // use ff::Field; + // use rand_core::OsRng; + // use crate::CurveExt; + + #[test] + fn test_curve_g1() { + crate::tests::curve::curve_tests_bls12_381::(); + } + + #[test] + fn test_curve_g2() { + crate::tests::curve::curve_tests_bls12_381::(); + } + + // TODO - [TEST] [serde] Need to add support for serde + // #[test] + // fn test_serialization() { + // crate::tests::curve::random_serialization_test::(); + // crate::tests::curve::random_serialization_test::(); + // } +} + +impl group::UncompressedEncoding for G1Affine { + type Uncompressed = G1Compressed; + + fn from_uncompressed(_: &Self::Uncompressed) -> CtOption { + unimplemented!(); + } + + fn from_uncompressed_unchecked(_: &Self::Uncompressed) -> CtOption { + unimplemented!(); + } + + fn to_uncompressed(&self) -> Self::Uncompressed { + unimplemented!(); + } +} + +impl group::UncompressedEncoding for G2Affine { + type Uncompressed = G2Compressed; + + fn from_uncompressed(_: &Self::Uncompressed) -> CtOption { + unimplemented!(); + } + + fn from_uncompressed_unchecked(_: &Self::Uncompressed) -> CtOption { + unimplemented!(); + } + + fn to_uncompressed(&self) -> Self::Uncompressed { + unimplemented!(); + } +} + +impl G1Affine { + /// Returns true if this point is free of an $h$-torsion component, and so it + /// exists within the $q$-order subgroup $\mathbb{G}_1$. This should always return true + /// unless an "unchecked" API was used. + fn is_torsion_free(&self) -> Choice { + // Algorithm from Section 6 of https://eprint.iacr.org/2021/1130 + // Updated proof of correctness in https://eprint.iacr.org/2022/352 + // + // Check that endomorphism_p(P) == -[x^2] P + + let minus_x_squared_times_p = G1::from(self).mul_by_x().mul_by_x().neg(); + let endomorphism_p = endomorphism(self); + minus_x_squared_times_p.ct_eq(&G1::from(endomorphism_p)) + } + + fn to_compressed(self) -> [u8; 48] { + // Strictly speaking, self.x is zero already when self.infinity is true, but + // to guard against implementation mistakes we do not assume this. + let mut res = Fq::conditional_select(&self.x, &Fq::zero(), self.infinity).to_bytes(); + + // This point is in compressed form, so we set the most significant bit. + res[0] |= 1u8 << 7; + + // Is this point at infinity? If so, set the second-most significant bit. + res[0] |= u8::conditional_select(&0u8, &(1u8 << 6), self.infinity); + + // Is the y-coordinate the lexicographically largest of the two associated with the + // x-coordinate? If so, set the third-most significant bit so long as this is not + // the point at infinity. + res[0] |= u8::conditional_select( + &0u8, + &(1u8 << 5), + (!self.infinity) & self.y.lexicographically_largest(), + ); + + res + } + + /// Attempts to deserialize a compressed element. See [`notes::serialization`](crate::notes::serialization) + /// for details about how group elements are serialized. + pub fn from_compressed(bytes: &[u8; 48]) -> CtOption { + // We already know the point is on the curve because this is established + // by the y-coordinate recovery procedure in from_compressed_unchecked(). + + Self::from_compressed_unchecked(bytes).and_then(|p| CtOption::new(p, p.is_torsion_free())) + } + + /// Attempts to deserialize an uncompressed element, not checking if the + /// element is in the correct subgroup. + /// **This is dangerous to call unless you trust the bytes you are reading; otherwise, + /// API invariants may be broken.** Please consider using `from_compressed()` instead. + pub fn from_compressed_unchecked(bytes: &[u8; 48]) -> CtOption { + // Obtain the three flags from the start of the byte sequence + let compression_flag_set = Choice::from((bytes[0] >> 7) & 1); + let infinity_flag_set = Choice::from((bytes[0] >> 6) & 1); + let sort_flag_set = Choice::from((bytes[0] >> 5) & 1); + + // Attempt to obtain the x-coordinate + let x = { + let mut tmp = [0; 48]; + tmp.copy_from_slice(&bytes[0..48]); + + // Mask away the flag bits + tmp[0] &= 0b0001_1111; + + Fq::from_bytes(&tmp) + }; + + x.and_then(|x| { + // If the infinity flag is set, return the value assuming + // the x-coordinate is zero and the sort bit is not set. + // + // Otherwise, return a recovered point (assuming the correct + // y-coordinate can be found) so long as the infinity flag + // was not set. + CtOption::new( + G1Affine::identity(), + infinity_flag_set & // Infinity flag should be set + compression_flag_set & // Compression flag should be set + (!sort_flag_set) & // Sort flag should not be set + x.is_zero(), // The x-coordinate should be zero + ) + .or_else(|| { + // Recover a y-coordinate given x by y = sqrt(x^3 + 4) + ((x.square() * x) + G1_B).sqrt().and_then(|y| { + // Switch to the correct y-coordinate if necessary. + let y = Fq::conditional_select( + &y, + &-y, + y.lexicographically_largest() ^ sort_flag_set, + ); + + CtOption::new( + G1Affine { + x, + y, + infinity: infinity_flag_set, + }, + (!infinity_flag_set) & // Infinity flag should not be set + compression_flag_set, // Compression flag should be set + ) + }) + }) + }) + } +} + +impl G2Affine { + /// Returns true if this point is free of an $h$-torsion component, and so it + /// exists within the $q$-order subgroup $\mathbb{G}_2$. This should always return true + /// unless an "unchecked" API was used. + pub fn is_torsion_free(&self) -> Choice { + // Algorithm from Section 4 of https://eprint.iacr.org/2021/1130 + // Updated proof of correctness in https://eprint.iacr.org/2022/352 + // + // Check that psi(P) == [x] P + let p = G2::from(self); + p.psi().ct_eq(&p.mul_by_x()) + } + + fn to_compressed(self) -> [u8; 96] { + // Strictly speaking, self.x is zero already when self.infinity is true, but + // to guard against implementation mistakes we do not assume this. + let x = Fq2::conditional_select(&self.x, &Fq2::zero(), self.infinity); + + let mut res = [0; 96]; + + res[0..48].copy_from_slice(&x.c1.to_bytes()[..]); + res[48..96].copy_from_slice(&x.c0.to_bytes()[..]); + + // This point is in compressed form, so we set the most significant bit. + res[0] |= 1u8 << 7; + + // Is this point at infinity? If so, set the second-most significant bit. + res[0] |= u8::conditional_select(&0u8, &(1u8 << 6), self.infinity); + + // Is the y-coordinate the lexicographically largest of the two associated with the + // x-coordinate? If so, set the third-most significant bit so long as this is not + // the point at infinity. + res[0] |= u8::conditional_select( + &0u8, + &(1u8 << 5), + (!self.infinity) & self.y.lexicographically_largest(), + ); + + res + } + + /// Attempts to deserialize a compressed element. See [`notes::serialization`](crate::notes::serialization) + /// for details about how group elements are serialized. + pub fn from_compressed(bytes: &[u8; 96]) -> CtOption { + // We already know the point is on the curve because this is established + // by the y-coordinate recovery procedure in from_compressed_unchecked(). + + Self::from_compressed_unchecked(bytes).and_then(|p| CtOption::new(p, p.is_torsion_free())) + } + + /// Attempts to deserialize an uncompressed element, not checking if the + /// element is in the correct subgroup. + /// **This is dangerous to call unless you trust the bytes you are reading; otherwise, + /// API invariants may be broken.** Please consider using `from_compressed()` instead. + pub fn from_compressed_unchecked(bytes: &[u8; 96]) -> CtOption { + // Obtain the three flags from the start of the byte sequence + let compression_flag_set = Choice::from((bytes[0] >> 7) & 1); + let infinity_flag_set = Choice::from((bytes[0] >> 6) & 1); + let sort_flag_set = Choice::from((bytes[0] >> 5) & 1); + + // Attempt to obtain the x-coordinate + let xc1 = { + let mut tmp = [0; 48]; + tmp.copy_from_slice(&bytes[0..48]); + + // Mask away the flag bits + tmp[0] &= 0b0001_1111; + + Fq::from_bytes(&tmp) + }; + let xc0 = { + let mut tmp = [0; 48]; + tmp.copy_from_slice(&bytes[48..96]); + + Fq::from_bytes(&tmp) + }; + + xc1.and_then(|xc1| { + xc0.and_then(|xc0| { + let x = Fq2 { c0: xc0, c1: xc1 }; + + // If the infinity flag is set, return the value assuming + // the x-coordinate is zero and the sort bit is not set. + // + // Otherwise, return a recovered point (assuming the correct + // y-coordinate can be found) so long as the infinity flag + // was not set. + CtOption::new( + G2Affine::identity(), + infinity_flag_set & // Infinity flag should be set + compression_flag_set & // Compression flag should be set + (!sort_flag_set) & // Sort flag should not be set + x.is_zero(), // The x-coordinate should be zero + ) + .or_else(|| { + // Recover a y-coordinate given x by y = sqrt(x^3 + 4) + ((x.square() * x) + G2_B).sqrt().and_then(|y| { + // Switch to the correct y-coordinate if necessary. + let y = Fq2::conditional_select( + &y, + &-y, + y.lexicographically_largest() ^ sort_flag_set, + ); + + CtOption::new( + G2Affine { + x, + y, + infinity: infinity_flag_set, + }, + (!infinity_flag_set) & // Infinity flag should not be set + compression_flag_set, // Compression flag should be set + ) + }) + }) + }) + }) + } +} diff --git a/src/bls12_381/engine.rs b/src/bls12_381/engine.rs new file mode 100644 index 00000000..e9ae1cad --- /dev/null +++ b/src/bls12_381/engine.rs @@ -0,0 +1,1769 @@ +#![allow(clippy::suspicious_arithmetic_impl)] +use crate::bls12_381::curve::*; +use crate::bls12_381::fq12::*; +use crate::bls12_381::fq2::*; +use crate::bls12_381::fq6::*; +use crate::bls12_381::fr::*; +use crate::pairing::{Engine, MillerLoopResult, MultiMillerLoop, PairingCurveAffine}; +use core::borrow::Borrow; +use core::iter::Sum; +use core::ops::{Add, Mul, MulAssign, Neg, Sub}; +use ff::{Field, PrimeField}; +use group::cofactor::CofactorCurveAffine; +use group::Group; +use rand_core::RngCore; +use subtle::{Choice, ConditionallySelectable, ConstantTimeEq}; + +pub const BLS_X: u64 = 0xd201_0000_0001_0000; +pub const BLS_X_IS_NEGATIVE: bool = true; + +impl PairingCurveAffine for G1Affine { + type Pair = G2Affine; + type PairingResult = Gt; + + fn pairing_with(&self, other: &Self::Pair) -> Self::PairingResult { + pairing(self, other) + } +} + +impl PairingCurveAffine for G2Affine { + type Pair = G1Affine; + type PairingResult = Gt; + + fn pairing_with(&self, other: &Self::Pair) -> Self::PairingResult { + pairing(other, self) + } +} + +#[derive(Copy, Clone, Debug, Default)] +pub struct Gt(pub(crate) Fq12); + +impl std::fmt::Display for Gt { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{self:?}") + } +} + +impl ConstantTimeEq for Gt { + fn ct_eq(&self, other: &Self) -> Choice { + self.0.ct_eq(&other.0) + } +} + +impl ConditionallySelectable for Gt { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + Gt(Fq12::conditional_select(&a.0, &b.0, choice)) + } +} + +impl Eq for Gt {} +impl PartialEq for Gt { + #[inline] + fn eq(&self, other: &Self) -> bool { + bool::from(self.ct_eq(other)) + } +} + +impl Gt { + /// Returns the group identity, which is $1$. + pub fn identity() -> Gt { + Gt(Fq12::one()) + } + + /// Doubles this group element. + pub fn double(&self) -> Gt { + Gt(self.0.square()) + } +} + +impl<'a> Neg for &'a Gt { + type Output = Gt; + + #[inline] + fn neg(self) -> Gt { + // The element is unitary, so we just conjugate. + let mut u = self.0; + u.conjugate(); + Gt(u) + } +} + +impl Neg for Gt { + type Output = Gt; + + #[inline] + fn neg(self) -> Gt { + -&self + } +} + +impl<'a, 'b> Add<&'b Gt> for &'a Gt { + type Output = Gt; + + #[inline] + fn add(self, rhs: &'b Gt) -> Gt { + Gt(self.0 * rhs.0) + } +} + +impl<'a, 'b> Sub<&'b Gt> for &'a Gt { + type Output = Gt; + + #[inline] + fn sub(self, rhs: &'b Gt) -> Gt { + self + (-rhs) + } +} + +impl<'a, 'b> Mul<&'b Fr> for &'a Gt { + type Output = Gt; + + fn mul(self, other: &'b Fr) -> Self::Output { + let mut acc = Gt::identity(); + + for bit in other + .to_repr() + .iter() + .rev() + .flat_map(|byte| (0..8).rev().map(move |i| Choice::from((byte >> i) & 1u8))) + .skip(1) + { + acc = acc.double(); + acc = Gt::conditional_select(&acc, &(acc + self), bit); + } + + acc + } +} + +use crate::{ + impl_add_binop_specify_output, impl_binops_additive, impl_binops_additive_specify_output, + impl_binops_multiplicative, impl_binops_multiplicative_mixed, impl_sub_binop_specify_output, +}; +impl_binops_additive!(Gt, Gt); +impl_binops_multiplicative!(Gt, Fr); + +impl Sum for Gt +where + T: Borrow, +{ + fn sum(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Self::identity(), |acc, item| acc + item.borrow()) + } +} + +impl Group for Gt { + type Scalar = Fr; + + fn random(_: impl RngCore) -> Self { + unimplemented!(); + } + + fn identity() -> Self { + Self::identity() + } + + fn generator() -> Self { + unimplemented!(); + } + + fn is_identity(&self) -> Choice { + self.ct_eq(&Self::identity()) + } + + #[must_use] + fn double(&self) -> Self { + self.double() + } +} + +#[derive(Clone, Debug)] +pub struct G2Prepared { + pub(crate) coeffs: Vec<(Fq2, Fq2, Fq2)>, + pub(crate) infinity: bool, +} + +impl G2Prepared { + pub fn is_zero(&self) -> bool { + self.infinity + } +} + +impl From for G2Prepared { + fn from(q: G2Affine) -> G2Prepared { + struct Adder { + cur: G2, + base: G2Affine, + coeffs: Vec<(Fq2, Fq2, Fq2)>, + } + + impl MillerLoopDriver for Adder { + type Output = (); + + fn doubling_step(&mut self, _: Self::Output) -> Self::Output { + let coeffs = doubling_step(&mut self.cur); + self.coeffs.push(coeffs); + } + fn addition_step(&mut self, _: Self::Output) -> Self::Output { + let coeffs = addition_step(&mut self.cur, &self.base); + self.coeffs.push(coeffs); + } + fn square_output(_: Self::Output) -> Self::Output {} + fn conjugate(_: Self::Output) -> Self::Output {} + fn one() -> Self::Output {} + } + + let is_identity = q.is_identity(); + let q = G2Affine::conditional_select(&q, &G2Affine::generator(), is_identity); + + let mut adder = Adder { + cur: G2::from(q), + base: q, + coeffs: Vec::with_capacity(68), + }; + + miller_loop_bls12_381(&mut adder); + + assert_eq!(adder.coeffs.len(), 68); + + G2Prepared { + infinity: is_identity.into(), + coeffs: adder.coeffs, + } + } +} + +impl MillerLoopResult for Gt { + type Gt = Self; + fn final_exponentiation(&self) -> Gt { + fn exp_by_x(f: &mut Fq12) { + let x = BLS_X; + let mut res = Fq12::one(); + for i in (0..64).rev() { + res.cyclotomic_square(); + if ((x >> i) & 1) == 1 { + res.mul_assign(f); + } + } + *f = res; + } + + let r = self.0; + let mut f1 = self.0; + f1.conjugate(); + + Gt(r.invert() + .map(|mut f2| { + let mut r = f1; + r.mul_assign(&f2); + f2 = r; + r.frobenius_map(2); + r.mul_assign(&f2); + + let mut fp = r; + fp.frobenius_map(1); + + let mut fp2 = r; + fp2.frobenius_map(2); + let mut fp3 = fp2; + fp3.frobenius_map(1); + + let mut fu = r; + exp_by_x(&mut fu); + + let mut fu2 = fu; + exp_by_x(&mut fu2); + + let mut fu3 = fu2; + exp_by_x(&mut fu3); + + let mut y3 = fu; + y3.frobenius_map(1); + + let mut fu2p = fu2; + fu2p.frobenius_map(1); + + let mut fu3p = fu3; + fu3p.frobenius_map(1); + + let mut y2 = fu2; + y2.frobenius_map(2); + + let mut y0 = fp; + y0.mul_assign(&fp2); + y0.mul_assign(&fp3); + + let mut y1 = r; + y1.conjugate(); + + let mut y5 = fu2; + y5.conjugate(); + + y3.conjugate(); + + let mut y4 = fu; + y4.mul_assign(&fu2p); + y4.conjugate(); + + let mut y6 = fu3; + y6.mul_assign(&fu3p); + y6.conjugate(); + + y6.cyclotomic_square(); + y6.mul_assign(&y4); + y6.mul_assign(&y5); + + let mut t1 = y3; + t1.mul_assign(&y5); + t1.mul_assign(&y6); + + y6.mul_assign(&y2); + + t1.cyclotomic_square(); + t1.mul_assign(&y6); + t1.cyclotomic_square(); + + let mut t0 = t1; + t0.mul_assign(&y1); + + t1.mul_assign(&y0); + + t0.cyclotomic_square(); + t0.mul_assign(&t1); + + t0 + }) + .unwrap()) + } +} + +#[derive(Copy, Clone, Debug)] +pub struct MillerLoopResultBls12381(pub(crate) Fq12); + +impl Default for MillerLoopResultBls12381 { + fn default() -> Self { + MillerLoopResultBls12381(Fq12::one()) + } +} + +impl ConditionallySelectable for MillerLoopResultBls12381 { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + MillerLoopResultBls12381(Fq12::conditional_select(&a.0, &b.0, choice)) + } +} + +impl MillerLoopResultBls12381 { + /// This performs a "final exponentiation" routine to convert the result + /// of a Miller loop into an element of `Gt` with help of efficient squaring + /// operation in the so-called `cyclotomic subgroup` of `Fq6` so that + /// it can be compared with other elements of `Gt`. + pub fn final_exponentiation_bls12_381(&self) -> Gt { + #[must_use] + fn fp4_square(a: Fq2, b: Fq2) -> (Fq2, Fq2) { + let t0 = a.square(); + let t1 = b.square(); + let mut t2 = t1.mul_by_nonresidue_bls12_381(); + let c0 = t2 + t0; + t2 = a + b; + t2 = t2.square(); + t2 -= t0; + let c1 = t2 - t1; + + (c0, c1) + } + + // Adaptation of Algorithm 5.5.4, Guide to Pairing-Based Cryptography + // Faster Squaring in the Cyclotomic Subgroup of Sixth Degree Extensions + // https://eprint.iacr.org/2009/565.pdf + #[must_use] + fn cyclotomic_square(f: Fq12) -> Fq12 { + let mut z0 = f.c0.c0; + let mut z4 = f.c0.c1; + let mut z3 = f.c0.c2; + let mut z2 = f.c1.c0; + let mut z1 = f.c1.c1; + let mut z5 = f.c1.c2; + + let (t0, t1) = fp4_square(z0, z1); + + // For A + z0 = t0 - z0; + z0 = z0 + z0 + t0; + + z1 = t1 + z1; + z1 = z1 + z1 + t1; + + let (mut t0, t1) = fp4_square(z2, z3); + let (t2, t3) = fp4_square(z4, z5); + + // For C + z4 = t0 - z4; + z4 = z4 + z4 + t0; + + z5 = t1 + z5; + z5 = z5 + z5 + t1; + + // For B + t0 = t3.mul_by_nonresidue_bls12_381(); + z2 = t0 + z2; + z2 = z2 + z2 + t0; + + z3 = t2 - z3; + z3 = z3 + z3 + t2; + + Fq12 { + c0: Fq6 { + c0: z0, + c1: z4, + c2: z3, + }, + c1: Fq6 { + c0: z2, + c1: z1, + c2: z5, + }, + } + } + + #[must_use] + fn cycolotomic_exp(f: Fq12) -> Fq12 { + let x = BLS_X; + let mut tmp = Fq12::one(); + let mut found_one = false; + for i in (0..64).rev().map(|b| ((x >> b) & 1) == 1) { + if found_one { + tmp = cyclotomic_square(tmp) + } else { + found_one = i; + } + + if i { + tmp *= f; + } + } + + tmp.conjugate_ret() + } + + let mut f = self.0; + let mut t0 = f; + t0.frobenius_map(6); + + Gt(f.invert() + .map(|mut t1| { + let mut t2 = t0 * t1; + t1 = t2; + + let mut t2_temp = t2; + t2_temp.frobenius_map(2); + t2 = t2_temp; + + t2 *= t1; + t1 = cyclotomic_square(t2).conjugate_ret(); + let mut t3 = cycolotomic_exp(t2); + let mut t4 = cyclotomic_square(t3); + let mut t5 = t1 * t3; + t1 = cycolotomic_exp(t5); + t0 = cycolotomic_exp(t1); + let mut t6 = cycolotomic_exp(t0); + t6 *= t4; + t4 = cycolotomic_exp(t6); + t5 = t5.conjugate_ret(); + t4 *= t5 * t2; + t5 = t2.conjugate_ret(); + t1 *= t2; + + let mut t1_temp = t1; + t1_temp.frobenius_map(3); + t1 = t1_temp; + + t6 *= t5; + + let mut t6_temp = t6; + t6_temp.frobenius_map(1); + t6 = t6_temp; + + t3 *= t0; + + let mut t3_temp = t3; + t3_temp.frobenius_map(2); + t3 = t3_temp; + + t3 *= t1; + t3 *= t6; + f = t3 * t4; + + f + }) + // We unwrap() because `MillerLoopResult` can only be constructed + // by a function within this crate, and we uphold the invariant + // that the enclosed value is nonzero. + .unwrap()) + } +} + +pub fn multi_miller_loop(terms: &[(&G1Affine, &G2Prepared)]) -> Gt { + let mut pairs = vec![]; + for &(p, q) in terms { + if !bool::from(p.is_identity()) && !q.is_zero() { + pairs.push((p, q.coeffs.iter())); + } + } + + // Final steps of the line function on prepared coefficients + fn ell(f: &mut Fq12, coeffs: &(Fq2, Fq2, Fq2), p: &G1Affine) { + let mut c0 = coeffs.0; + let mut c1 = coeffs.1; + + c0.c0.mul_assign(&p.y); + c0.c1.mul_assign(&p.y); + + c1.c0.mul_assign(&p.x); + c1.c1.mul_assign(&p.x); + + // Sparse multiplication in Fq12 + f.mul_by_034(&c0, &c1, &coeffs.2); + } + + let mut f = Fq12::one(); + + let mut found_one = false; + for i in BitIterator::new(&[BLS_X >> 1]) { + if !found_one { + found_one = i; + continue; + } + + for &mut (p, ref mut coeffs) in &mut pairs { + ell(&mut f, coeffs.next().unwrap(), p); + } + + if i { + for &mut (p, ref mut coeffs) in &mut pairs { + ell(&mut f, coeffs.next().unwrap(), p); + } + } + + f.square(); + } + + for &mut (p, ref mut coeffs) in &mut pairs { + ell(&mut f, coeffs.next().unwrap(), p); + } + + if BLS_X_IS_NEGATIVE { + f.conjugate(); + } + + Gt(f) +} + +/// Computes $$\sum_{i=1}^n \textbf{ML}(a_i, b_i)$$ given a series of terms +/// $$(a_1, b_1), (a_2, b_2), ..., (a_n, b_n).$$ +/// +/// Requires the `alloc` and `pairing` crate features to be enabled. +pub fn multi_miller_loop_bls12_381(terms: &[(&G1Affine, &G2Prepared)]) -> MillerLoopResultBls12381 { + struct Adder<'a, 'b, 'c> { + terms: &'c [(&'a G1Affine, &'b G2Prepared)], + index: usize, + } + + impl<'a, 'b, 'c> MillerLoopDriver for Adder<'a, 'b, 'c> { + type Output = Fq12; + + fn doubling_step(&mut self, mut f: Self::Output) -> Self::Output { + let index = self.index; + for term in self.terms { + let either_identity = term.0.is_identity() | Choice::from(term.1.infinity as u8); + + let new_f = ell(f, &term.1.coeffs[index], term.0); + f = Fq12::conditional_select(&new_f, &f, either_identity); + } + self.index += 1; + + f + } + fn addition_step(&mut self, mut f: Self::Output) -> Self::Output { + let index = self.index; + for term in self.terms { + let either_identity = term.0.is_identity() | Choice::from(term.1.infinity as u8); + + let new_f = ell(f, &term.1.coeffs[index], term.0); + f = Fq12::conditional_select(&new_f, &f, either_identity); + } + self.index += 1; + + f + } + fn square_output(f: Self::Output) -> Self::Output { + f.square() + } + fn conjugate(f: Self::Output) -> Self::Output { + f.conjugate_ret() + } + fn one() -> Self::Output { + Fq12::one() + } + } + + let mut adder = Adder { terms, index: 0 }; + + let tmp = miller_loop_bls12_381(&mut adder); + + MillerLoopResultBls12381(tmp) +} + +pub fn pairing(g1: &G1Affine, g2: &G2Affine) -> Gt { + let g2 = G2Prepared::from(*g2); + let terms: &[(&G1Affine, &G2Prepared)] = &[(g1, &g2)]; + let u = multi_miller_loop(terms); + u.final_exponentiation() +} + +pub fn pairing_bls12_381(p: &G1Affine, q: &G2Affine) -> Gt { + struct Adder { + cur: G2, + base: G2Affine, + p: G1Affine, + } + + impl MillerLoopDriver for Adder { + type Output = Fq12; + + fn doubling_step(&mut self, f: Self::Output) -> Self::Output { + let coeffs = doubling_step(&mut self.cur); + ell(f, &coeffs, &self.p) + } + fn addition_step(&mut self, f: Self::Output) -> Self::Output { + let coeffs = addition_step(&mut self.cur, &self.base); + ell(f, &coeffs, &self.p) + } + fn square_output(f: Self::Output) -> Self::Output { + f.square() + } + fn conjugate(f: Self::Output) -> Self::Output { + f.conjugate_ret() + } + fn one() -> Self::Output { + Fq12::one() + } + } + + let either_identity = p.is_identity() | q.is_identity(); + let p = G1Affine::conditional_select(p, &G1Affine::generator(), either_identity); + let q = G2Affine::conditional_select(q, &G2Affine::generator(), either_identity); + + let mut adder = Adder { + cur: G2::from(q), + base: q, + p, + }; + + let tmp = miller_loop_bls12_381(&mut adder); + let tmp = MillerLoopResultBls12381(Fq12::conditional_select( + &tmp, + &Fq12::one(), + either_identity, + )); + tmp.final_exponentiation_bls12_381() +} + +trait MillerLoopDriver { + type Output; + + fn doubling_step(&mut self, f: Self::Output) -> Self::Output; + fn addition_step(&mut self, f: Self::Output) -> Self::Output; + fn square_output(f: Self::Output) -> Self::Output; + fn conjugate(f: Self::Output) -> Self::Output; + fn one() -> Self::Output; +} + +/// This is a "generic" implementation of the Miller loop to avoid duplicating code +/// structure elsewhere; instead, we'll write concrete instantiations of +/// `MillerLoopDriver` for whatever purposes we need (such as caching modes). +fn miller_loop_bls12_381(driver: &mut D) -> D::Output { + let mut f = D::one(); + + let mut found_one = false; + for i in (0..64).rev().map(|b| (((BLS_X >> 1) >> b) & 1) == 1) { + if !found_one { + found_one = i; + continue; + } + + f = driver.doubling_step(f); + + if i { + f = driver.addition_step(f); + } + + f = D::square_output(f); + } + + f = driver.doubling_step(f); + + if BLS_X_IS_NEGATIVE { + f = D::conjugate(f); + } + + f +} + +fn ell(f: Fq12, coeffs: &(Fq2, Fq2, Fq2), p: &G1Affine) -> Fq12 { + let mut c0 = coeffs.0; + let mut c1 = coeffs.1; + + c0.c0 *= p.y; + c0.c1 *= p.y; + + c1.c0 *= p.x; + c1.c1 *= p.x; + + f.mul_by_014_bls12_381(&coeffs.2, &c1, &c0) +} + +fn addition_step(r: &mut G2, q: &G2Affine) -> (Fq2, Fq2, Fq2) { + // Adaptation of Algorithm 27, https://eprint.iacr.org/2010/354.pdf + let zsquared = r.z.square(); + let ysquared = q.y.square(); + let t0 = zsquared * q.x; + let t1 = ((q.y + r.z).square() - ysquared - zsquared) * zsquared; + let t2 = t0 - r.x; + let t3 = t2.square(); + let t4 = t3 + t3; + let t4 = t4 + t4; + let t5 = t4 * t2; + let t6 = t1 - r.y - r.y; + let t9 = t6 * q.x; + let t7 = t4 * r.x; + r.x = t6.square() - t5 - t7 - t7; + r.z = (r.z + t2).square() - zsquared - t3; + let t10 = q.y + r.z; + let t8 = (t7 - r.x) * t6; + let t0 = r.y * t5; + let t0 = t0 + t0; + r.y = t8 - t0; + let t10 = t10.square() - ysquared; + let ztsquared = r.z.square(); + let t10 = t10 - ztsquared; + let t9 = t9 + t9 - t10; + let t10 = r.z + r.z; + let t6 = -t6; + let t1 = t6 + t6; + + (t10, t1, t9) +} + +fn doubling_step(r: &mut G2) -> (Fq2, Fq2, Fq2) { + // Adaptation of Algorithm 26, https://eprint.iacr.org/2010/354.pdf + let tmp0 = r.x.square(); + let tmp1 = r.y.square(); + let tmp2 = tmp1.square(); + let tmp3 = (tmp1 + r.x).square() - tmp0 - tmp2; + let tmp3 = tmp3 + tmp3; + let tmp4 = tmp0 + tmp0 + tmp0; + let tmp6 = r.x + tmp4; + let tmp5 = tmp4.square(); + let zsquared = r.z.square(); + r.x = tmp5 - tmp3 - tmp3; + r.z = (r.z + r.y).square() - tmp1 - zsquared; + r.y = (tmp3 - r.x) * tmp4; + let tmp2 = tmp2 + tmp2; + let tmp2 = tmp2 + tmp2; + let tmp2 = tmp2 + tmp2; + r.y -= tmp2; + let tmp3 = tmp4 * zsquared; + let tmp3 = tmp3 + tmp3; + let tmp3 = -tmp3; + let tmp6 = tmp6.square() - tmp0 - tmp5; + let tmp1 = tmp1 + tmp1; + let tmp1 = tmp1 + tmp1; + let tmp6 = tmp6 - tmp1; + let tmp0 = r.z * zsquared; + let tmp0 = tmp0 + tmp0; + + (tmp0, tmp3, tmp6) +} + +#[derive(Clone, Debug)] +pub struct Bls12_381; + +impl Engine for Bls12_381 { + type Scalar = Fr; + type G1 = G1; + type G1Affine = G1Affine; + type G2 = G2; + type G2Affine = G2Affine; + type Gt = Gt; + + fn pairing(p: &Self::G1Affine, q: &Self::G2Affine) -> Self::Gt { + pairing(p, q) + } +} + +impl MultiMillerLoop for Bls12_381 { + type G2Prepared = G2Prepared; + type Result = Gt; + + fn multi_miller_loop(terms: &[(&Self::G1Affine, &Self::G2Prepared)]) -> Self::Result { + multi_miller_loop(terms) + } +} + +#[derive(Debug)] +pub struct BitIterator { + t: E, + n: usize, +} + +impl> BitIterator { + pub fn new(t: E) -> Self { + let n = t.as_ref().len() * 64; + + BitIterator { t, n } + } +} + +impl> Iterator for BitIterator { + type Item = bool; + + fn next(&mut self) -> Option { + if self.n == 0 { + None + } else { + self.n -= 1; + let part = self.n / 64; + let bit = self.n - (64 * part); + + Some(self.t.as_ref()[part] & (1 << bit) > 0) + } + } +} + +#[cfg(test)] +mod test { + use crate::bls12_381::fq::*; + use crate::bls12_381::Fq6; + + use super::*; + use pasta_curves::arithmetic::CurveExt; + use pretty_assertions::assert_eq; + use rand::SeedableRng; + use rand_xorshift::XorShiftRng; + + #[test] + fn test_double_g1() { + { + let tmp = G1::identity().double(); + assert!(bool::from(tmp.is_identity())); + assert!(bool::from(tmp.is_on_curve())); + } + { + let tmp = G1::generator().double(); + assert!(!bool::from(tmp.is_identity())); + assert!(bool::from(tmp.is_on_curve())); + + assert_eq!( + G1Affine::from(tmp), + G1Affine { + x: Fq::from_raw_unchecked([ + 0x53e9_78ce_58a9_ba3c, + 0x3ea0_583c_4f3d_65f9, + 0x4d20_bb47_f001_2960, + 0xa54c_664a_e5b2_b5d9, + 0x26b5_52a3_9d7e_b21f, + 0x0008_895d_26e6_8785, + ]), + y: Fq::from_raw_unchecked([ + 0x7011_0b32_9829_3940, + 0xda33_c539_3f1f_6afc, + 0xb86e_dfd1_6a5a_a785, + 0xaec6_d1c9_e7b1_c895, + 0x25cf_c2b5_22d1_1720, + 0x0636_1c83_f8d0_9b15, + ]), + infinity: Choice::from(0u8), + } + ); + } + } + + #[test] + fn test_double_g2() { + { + let tmp = G2::identity().double(); + assert!(bool::from(tmp.is_identity())); + assert!(bool::from(tmp.is_on_curve())); + } + { + let tmp = G2::generator().double(); + assert!(!bool::from(tmp.is_identity())); + assert!(bool::from(tmp.is_on_curve())); + + assert_eq!( + G2Affine::from(tmp), + G2Affine { + x: Fq2 { + c0: Fq::from_raw_unchecked([ + 0xe9d9_e2da_9620_f98b, + 0x54f1_1993_46b9_7f36, + 0x3db3_b820_376b_ed27, + 0xcfdb_31c9_b0b6_4f4c, + 0x41d7_c127_8635_4493, + 0x0571_0794_c255_c064, + ]), + c1: Fq::from_raw_unchecked([ + 0xd6c1_d3ca_6ea0_d06e, + 0xda0c_bd90_5595_489f, + 0x4f53_52d4_3479_221d, + 0x8ade_5d73_6f8c_97e0, + 0x48cc_8433_925e_f70e, + 0x08d7_ea71_ea91_ef81, + ]), + }, + y: Fq2 { + c0: Fq::from_raw_unchecked([ + 0x15ba_26eb_4b0d_186f, + 0x0d08_6d64_b7e9_e01e, + 0xc8b8_48dd_652f_4c78, + 0xeecf_46a6_123b_ae4f, + 0x255e_8dd8_b6dc_812a, + 0x1641_42af_21dc_f93f, + ]), + c1: Fq::from_raw_unchecked([ + 0xf9b4_a1a8_9598_4db4, + 0xd417_b114_cccf_f748, + 0x6856_301f_c89f_086e, + 0x41c7_7787_8931_e3da, + 0x3556_b155_066a_2105, + 0x00ac_f7d3_25cb_89cf, + ]), + }, + infinity: Choice::from(0u8), + } + ); + } + } + + // TODO Generalize pairing (from BN254) with BLS12-381 + // #[test] + // fn test_pairing_1() { + // let g1 = G1::generator(); + // let mut g2 = G2::generator(); + // g2 = g2.double(); + // let g1_affine = G1Affine::from(g1); + // let g2_affine = G2Affine::from(g2); + // let pair12 = Bls12_381::pairing(&g1_affine, &g2_affine); + + // let mut g3 = G1::generator(); + // let g4 = G2::generator(); + // g3 = g3.double(); + // let pair21 = Bls12_381::pairing(&G1Affine::from(g3), &G2Affine::from(g4)); + + // assert_eq!(pair12, pair21); + // } + + #[test] + fn test_pairing_1_bls12_381() { + let g1 = G1::generator(); + let mut g2 = G2::generator(); + g2 = g2.double(); + let pair12 = pairing_bls12_381(&G1Affine::from(g1), &G2Affine::from(g2)); + + let mut g3 = G1::generator(); + let g4 = G2::generator(); + g3 = g3.double(); + let pair21 = pairing_bls12_381(&G1Affine::from(g3), &G2Affine::from(g4)); + + assert_eq!(pair12, pair21); + } + + // TODO Generalize pairing (from BN254) with BLS12-381 + // #[test] + // fn test_pairing_2() { + // let g1 = G1::generator(); + // let mut g2 = G2::generator(); + // g2 = g2.double().double(); + // let pair12 = Bls12_381::pairing(&G1Affine::from(g1), &G2Affine::from(g2)); + + // let mut g1 = G1::generator(); + // let mut g2 = G2::generator(); + // g1 = g1.double(); + // g2 = g2.double(); + // let pair21 = Bls12_381::pairing(&G1Affine::from(g1), &G2Affine::from(g2)); + + // assert_eq!(pair12, pair21); + // } + + #[test] + fn test_pairing_2_bls12_381() { + let g1 = G1::generator(); + let mut g2 = G2::generator(); + g2 = g2.double().double(); + let pair12 = pairing_bls12_381(&G1Affine::from(g1), &G2Affine::from(g2)); + + let mut g1 = G1::generator(); + let mut g2 = G2::generator(); + g1 = g1.double(); + g2 = g2.double(); + let pair21 = pairing_bls12_381(&G1Affine::from(g1), &G2Affine::from(g2)); + + assert_eq!(pair12, pair21); + } + + #[test] + fn test_pairing_3() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, + 0xbc, 0xe5, + ]); + for _ in 0..1000 { + let a = Fr::random(&mut rng); + let b = Fr::random(&mut rng); + + let mut g1 = G1::generator(); + g1.mul_assign(a); + + let mut g2 = G2::generator(); + g1.mul_assign(b); + + let pair_ab = Bls12_381::pairing(&G1Affine::from(g1), &G2Affine::from(g2)); + + g1 = G1::generator(); + g1.mul_assign(b); + + g2 = G2::generator(); + g1.mul_assign(a); + + let pair_ba = Bls12_381::pairing(&G1Affine::from(g1), &G2Affine::from(g2)); + + assert_eq!(pair_ab, pair_ba); + } + } + + #[test] + fn test_pairing_3_bls12_381() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, + 0xbc, 0xe5, + ]); + for _ in 0..1000 { + let a = Fr::random(&mut rng); + let b = Fr::random(&mut rng); + + let mut g1 = G1::generator(); + g1.mul_assign(a); + + let mut g2 = G2::generator(); + g1.mul_assign(b); + + let pair_ab = pairing_bls12_381(&G1Affine::from(g1), &G2Affine::from(g2)); + + g1 = G1::generator(); + g1.mul_assign(b); + + g2 = G2::generator(); + g1.mul_assign(a); + + let pair_ba = pairing_bls12_381(&G1Affine::from(g1), &G2Affine::from(g2)); + + assert_eq!(pair_ab, pair_ba); + } + } + + // TODO Generalize pairing (from BN254) with BLS12-381 + // #[test] + // fn test_pairing_against_relic() { + // let a = G1Affine::generator(); + // let b = G2Affine::generator(); + + // let res = Bls12_381::pairing(&a, &b); + // println!("res: {:#?}", res); + // let prep = G2Prepared::from(b); + // let res_miller_loop = multi_miller_loop(&[(&a, &prep)]); + // println!("res_miller_loop: {:#?}", res_miller_loop); + // assert_eq!(res, res_miller_loop.final_exponentiation()); + + // assert_eq!( + // res.0, + // Fq12 { + // c0: Fq6 { + // c0: Fq2 { + // c0: Fq::from_raw_unchecked([ + // 0x1972_e433_a01f_85c5, + // 0x97d3_2b76_fd77_2538, + // 0xc8ce_546f_c96b_cdf9, + // 0xcef6_3e73_66d4_0614, + // 0xa611_3427_8184_3780, + // 0x13f3_448a_3fc6_d825, + // ]), + // c1: Fq::from_raw_unchecked([ + // 0xd263_31b0_2e9d_6995, + // 0x9d68_a482_f779_7e7d, + // 0x9c9b_2924_8d39_ea92, + // 0xf480_1ca2_e131_07aa, + // 0xa16c_0732_bdbc_b066, + // 0x083c_a4af_ba36_0478, + // ]) + // }, + // c1: Fq2 { + // c0: Fq::from_raw_unchecked([ + // 0x59e2_61db_0916_b641, + // 0x2716_b6f4_b23e_960d, + // 0xc8e5_5b10_a0bd_9c45, + // 0x0bdb_0bd9_9c4d_eda8, + // 0x8cf8_9ebf_57fd_aac5, + // 0x12d6_b792_9e77_7a5e, + // ]), + // c1: Fq::from_raw_unchecked([ + // 0x5fc8_5188_b0e1_5f35, + // 0x34a0_6e3a_8f09_6365, + // 0xdb31_26a6_e02a_d62c, + // 0xfc6f_5aa9_7d9a_990b, + // 0xa12f_55f5_eb89_c210, + // 0x1723_703a_926f_8889, + // ]) + // }, + // c2: Fq2 { + // c0: Fq::from_raw_unchecked([ + // 0x9358_8f29_7182_8778, + // 0x43f6_5b86_11ab_7585, + // 0x3183_aaf5_ec27_9fdf, + // 0xfa73_d7e1_8ac9_9df6, + // 0x64e1_76a6_a64c_99b0, + // 0x179f_a78c_5838_8f1f, + // ]), + // c1: Fq::from_raw_unchecked([ + // 0x672a_0a11_ca2a_ef12, + // 0x0d11_b9b5_2aa3_f16b, + // 0xa444_12d0_699d_056e, + // 0xc01d_0177_221a_5ba5, + // 0x66e0_cede_6c73_5529, + // 0x05f5_a71e_9fdd_c339, + // ]) + // } + // }, + // c1: Fq6 { + // c0: Fq2 { + // c0: Fq::from_raw_unchecked([ + // 0xd30a_88a1_b062_c679, + // 0x5ac5_6a5d_35fc_8304, + // 0xd0c8_34a6_a81f_290d, + // 0xcd54_30c2_da37_07c7, + // 0xf0c2_7ff7_8050_0af0, + // 0x0924_5da6_e2d7_2eae, + // ]), + // c1: Fq::from_raw_unchecked([ + // 0x9f2e_0676_791b_5156, + // 0xe2d1_c823_4918_fe13, + // 0x4c9e_459f_3c56_1bf4, + // 0xa3e8_5e53_b9d3_e3c1, + // 0x820a_121e_21a7_0020, + // 0x15af_6183_41c5_9acc, + // ]) + // }, + // c1: Fq2 { + // c0: Fq::from_raw_unchecked([ + // 0x7c95_658c_2499_3ab1, + // 0x73eb_3872_1ca8_86b9, + // 0x5256_d749_4774_34bc, + // 0x8ba4_1902_ea50_4a8b, + // 0x04a3_d3f8_0c86_ce6d, + // 0x18a6_4a87_fb68_6eaa, + // ]), + // c1: Fq::from_raw_unchecked([ + // 0xbb83_e71b_b920_cf26, + // 0x2a52_77ac_92a7_3945, + // 0xfc0e_e59f_94f0_46a0, + // 0x7158_cdf3_7860_58f7, + // 0x7cc1_061b_82f9_45f6, + // 0x03f8_47aa_9fdb_e567, + // ]) + // }, + // c2: Fq2 { + // c0: Fq::from_raw_unchecked([ + // 0x8078_dba5_6134_e657, + // 0x1cd7_ec9a_4399_8a6e, + // 0xb1aa_599a_1a99_3766, + // 0xc9a0_f62f_0842_ee44, + // 0x8e15_9be3_b605_dffa, + // 0x0c86_ba0d_4af1_3fc2, + // ]), + // c1: Fq::from_raw_unchecked([ + // 0xe80f_f2a0_6a52_ffb1, + // 0x7694_ca48_721a_906c, + // 0x7583_183e_03b0_8514, + // 0xf567_afdd_40ce_e4e2, + // 0x9a6d_96d2_e526_a5fc, + // 0x197e_9f49_861f_2242, + // ]) + // } + // } + // } + // ); + // } + + #[test] + fn test_pairing_against_relic_bls12_381() { + let a = G1Affine::generator(); + let b = G2Affine::generator(); + + let res = pairing_bls12_381(&a, &b); + let prep = G2Prepared::from(b); + let res_miller_loop = multi_miller_loop_bls12_381(&[(&a, &prep)]); + assert_eq!(res, res_miller_loop.final_exponentiation_bls12_381()); + assert_eq!( + res.0, + Fq12 { + c0: Fq6 { + c0: Fq2 { + c0: Fq::from_raw_unchecked([ + 0x1972_e433_a01f_85c5, + 0x97d3_2b76_fd77_2538, + 0xc8ce_546f_c96b_cdf9, + 0xcef6_3e73_66d4_0614, + 0xa611_3427_8184_3780, + 0x13f3_448a_3fc6_d825, + ]), + c1: Fq::from_raw_unchecked([ + 0xd263_31b0_2e9d_6995, + 0x9d68_a482_f779_7e7d, + 0x9c9b_2924_8d39_ea92, + 0xf480_1ca2_e131_07aa, + 0xa16c_0732_bdbc_b066, + 0x083c_a4af_ba36_0478, + ]) + }, + c1: Fq2 { + c0: Fq::from_raw_unchecked([ + 0x59e2_61db_0916_b641, + 0x2716_b6f4_b23e_960d, + 0xc8e5_5b10_a0bd_9c45, + 0x0bdb_0bd9_9c4d_eda8, + 0x8cf8_9ebf_57fd_aac5, + 0x12d6_b792_9e77_7a5e, + ]), + c1: Fq::from_raw_unchecked([ + 0x5fc8_5188_b0e1_5f35, + 0x34a0_6e3a_8f09_6365, + 0xdb31_26a6_e02a_d62c, + 0xfc6f_5aa9_7d9a_990b, + 0xa12f_55f5_eb89_c210, + 0x1723_703a_926f_8889, + ]) + }, + c2: Fq2 { + c0: Fq::from_raw_unchecked([ + 0x9358_8f29_7182_8778, + 0x43f6_5b86_11ab_7585, + 0x3183_aaf5_ec27_9fdf, + 0xfa73_d7e1_8ac9_9df6, + 0x64e1_76a6_a64c_99b0, + 0x179f_a78c_5838_8f1f, + ]), + c1: Fq::from_raw_unchecked([ + 0x672a_0a11_ca2a_ef12, + 0x0d11_b9b5_2aa3_f16b, + 0xa444_12d0_699d_056e, + 0xc01d_0177_221a_5ba5, + 0x66e0_cede_6c73_5529, + 0x05f5_a71e_9fdd_c339, + ]) + } + }, + c1: Fq6 { + c0: Fq2 { + c0: Fq::from_raw_unchecked([ + 0xd30a_88a1_b062_c679, + 0x5ac5_6a5d_35fc_8304, + 0xd0c8_34a6_a81f_290d, + 0xcd54_30c2_da37_07c7, + 0xf0c2_7ff7_8050_0af0, + 0x0924_5da6_e2d7_2eae, + ]), + c1: Fq::from_raw_unchecked([ + 0x9f2e_0676_791b_5156, + 0xe2d1_c823_4918_fe13, + 0x4c9e_459f_3c56_1bf4, + 0xa3e8_5e53_b9d3_e3c1, + 0x820a_121e_21a7_0020, + 0x15af_6183_41c5_9acc, + ]) + }, + c1: Fq2 { + c0: Fq::from_raw_unchecked([ + 0x7c95_658c_2499_3ab1, + 0x73eb_3872_1ca8_86b9, + 0x5256_d749_4774_34bc, + 0x8ba4_1902_ea50_4a8b, + 0x04a3_d3f8_0c86_ce6d, + 0x18a6_4a87_fb68_6eaa, + ]), + c1: Fq::from_raw_unchecked([ + 0xbb83_e71b_b920_cf26, + 0x2a52_77ac_92a7_3945, + 0xfc0e_e59f_94f0_46a0, + 0x7158_cdf3_7860_58f7, + 0x7cc1_061b_82f9_45f6, + 0x03f8_47aa_9fdb_e567, + ]) + }, + c2: Fq2 { + c0: Fq::from_raw_unchecked([ + 0x8078_dba5_6134_e657, + 0x1cd7_ec9a_4399_8a6e, + 0xb1aa_599a_1a99_3766, + 0xc9a0_f62f_0842_ee44, + 0x8e15_9be3_b605_dffa, + 0x0c86_ba0d_4af1_3fc2, + ]), + c1: Fq::from_raw_unchecked([ + 0xe80f_f2a0_6a52_ffb1, + 0x7694_ca48_721a_906c, + 0x7583_183e_03b0_8514, + 0xf567_afdd_40ce_e4e2, + 0x9a6d_96d2_e526_a5fc, + 0x197e_9f49_861f_2242, + ]) + } + } + } + ); + } + + // TODO Generalize pairing (from BN254) with BLS12-381 + // #[test] + // fn random_bilinearity_tests() { + // let mut rng = XorShiftRng::from_seed([ + // 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, + // 0xbc, 0xe5, + // ]); + + // for _ in 0..1000 { + // let mut a = G1::generator(); + // let ka = Fr::random(&mut rng); + // a.mul_assign(ka); + + // let mut b = G2::generator(); + // let kb = Fr::random(&mut rng); + // b.mul_assign(kb); + + // let c = Fr::random(&mut rng); + // let d = Fr::random(&mut rng); + + // let mut ac = a; + // ac.mul_assign(c); + + // let mut ad = a; + // ad.mul_assign(d); + + // let mut bc = b; + // bc.mul_assign(c); + + // let mut bd = b; + // bd.mul_assign(d); + + // let acbd = Bls12_381::pairing(&G1Affine::from(ac), &G2Affine::from(bd)); + // let adbc = Bls12_381::pairing(&G1Affine::from(ad), &G2Affine::from(bc)); + + // let mut cd = c; + // cd.mul_assign(&d); + + // cd *= Fr([1, 0, 0, 0]); + + // let abcd = Gt(Bls12_381::pairing(&G1Affine::from(a), &G2Affine::from(b)) + // .0 + // .pow_vartime(cd.0)); + + // assert_eq!(acbd, adbc); + // assert_eq!(acbd, abcd); + // } + // } + + #[test] + fn random_bilinearity_tests_bls12_381() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, + 0xbc, 0xe5, + ]); + + for _ in 0..1000 { + let mut a = G1::generator(); + let ka = Fr::random(&mut rng); + a.mul_assign(ka); + + let mut b = G2::generator(); + let kb = Fr::random(&mut rng); + b.mul_assign(kb); + + let c = Fr::random(&mut rng); + let d = Fr::random(&mut rng); + + let mut ac = a; + ac.mul_assign(c); + + let mut ad = a; + ad.mul_assign(d); + + let mut bc = b; + bc.mul_assign(c); + + let mut bd = b; + bd.mul_assign(d); + + let acbd = pairing_bls12_381(&G1Affine::from(ac), &G2Affine::from(bd)); + let adbc = pairing_bls12_381(&G1Affine::from(ad), &G2Affine::from(bc)); + + let mut cd = c; + cd.mul_assign(&d); + + cd *= Fr([1, 0, 0, 0]); + + let abcd = Gt(pairing_bls12_381(&G1Affine::from(a), &G2Affine::from(b)) + .0 + .pow_vartime(cd.0)); + + assert_eq!(acbd, adbc); + assert_eq!(acbd, abcd); + } + } + + // TODO Generalize pairing (from BN254) with BLS12-381 + // #[test] + // fn test_bilinearity() { + // let a = Fr::from_raw([1, 2, 3, 4]).invert().unwrap().square(); + // let b = Fr::from_raw([5, 6, 7, 8]).invert().unwrap().square(); + // let c = a * b; + + // let g = G1Affine::from(G1Affine::generator() * a); + // let h = G2Affine::from(G2Affine::generator() * b); + // let p = Bls12_381::pairing(&g, &h); + + // assert!(p != Gt::identity()); + + // let expected = G1Affine::from(G1Affine::generator() * c); + + // assert_eq!(p, Bls12_381::pairing(&expected, &G2Affine::generator())); + // assert_eq!( + // p, + // Bls12_381::pairing(&G1Affine::generator(), &G2Affine::generator()) * c + // ); + // } + + #[test] + fn test_bilinearity_bls12_381() { + let a = Fr::from_raw([1, 2, 3, 4]).invert().unwrap().square(); + let b = Fr::from_raw([5, 6, 7, 8]).invert().unwrap().square(); + let c = a * b; + + let g = G1Affine::from(G1Affine::generator() * a); + let h = G2Affine::from(G2Affine::generator() * b); + let p = pairing_bls12_381(&g, &h); + + assert!(p != Gt::identity()); + + let expected = G1Affine::from(G1Affine::generator() * c); + + assert_eq!(p, pairing_bls12_381(&expected, &G2Affine::generator())); + assert_eq!( + p, + pairing_bls12_381(&G1Affine::generator(), &G2Affine::generator()) * c + ); + } + + #[test] + pub fn engine_tests() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, + 0xbc, 0xe5, + ]); + + for _ in 0..10 { + let a = G1Affine::from(G1::random(&mut rng)); + let b = G2Affine::from(G2::random(&mut rng)); + + assert!(a.pairing_with(&b) == b.pairing_with(&a)); + assert!(a.pairing_with(&b) == Bls12_381::pairing(&a, &b)); + } + + for _ in 0..1000 { + let z1 = G1Affine::identity(); + let z2 = G2Prepared::from(G2Affine::identity()); + + let a = G1Affine::from(G1::random(&mut rng)); + let b = G2Prepared::from(G2Affine::from(G2::random(&mut rng))); + let c = G1Affine::from(G1::random(&mut rng)); + let d = G2Prepared::from(G2Affine::from(G2::random(&mut rng))); + + assert_eq!( + Fq12::one(), + multi_miller_loop(&[(&z1, &b)]).final_exponentiation().0, + ); + + assert_eq!( + Fq12::one(), + multi_miller_loop(&[(&a, &z2)]).final_exponentiation().0, + ); + + assert_eq!( + multi_miller_loop(&[(&z1, &b), (&c, &d)]).final_exponentiation(), + multi_miller_loop(&[(&a, &z2), (&c, &d)]).final_exponentiation(), + ); + + assert_eq!( + multi_miller_loop(&[(&a, &b), (&z1, &d)]).final_exponentiation(), + multi_miller_loop(&[(&a, &b), (&c, &z2)]).final_exponentiation(), + ); + } + } + + #[test] + pub fn engine_tests_bls12_381() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, + 0xbc, 0xe5, + ]); + + // for _ in 0..10 { + // let a = G1Affine::from(G1::random(&mut rng)); + // let b = G2Affine::from(G2::random(&mut rng)); + + // assert!(a.pairing_with(&b) == b.pairing_with(&a)); + // assert!(a.pairing_with(&b) == pairing_bls12_381(&a, &b)); + // } + + for _ in 0..1000 { + let z1 = G1Affine::identity(); + let z2 = G2Prepared::from(G2Affine::identity()); + + let a = G1Affine::from(G1::random(&mut rng)); + let b = G2Prepared::from(G2Affine::from(G2::random(&mut rng))); + let c = G1Affine::from(G1::random(&mut rng)); + let d = G2Prepared::from(G2Affine::from(G2::random(&mut rng))); + + assert_eq!( + Fq12::one(), + multi_miller_loop_bls12_381(&[(&z1, &b)]) + .final_exponentiation_bls12_381() + .0, + ); + + assert_eq!( + Fq12::one(), + multi_miller_loop_bls12_381(&[(&a, &z2)]) + .final_exponentiation_bls12_381() + .0, + ); + + assert_eq!( + multi_miller_loop_bls12_381(&[(&z1, &b), (&c, &d)]) + .final_exponentiation_bls12_381(), + multi_miller_loop_bls12_381(&[(&a, &z2), (&c, &d)]) + .final_exponentiation_bls12_381(), + ); + + assert_eq!( + multi_miller_loop_bls12_381(&[(&a, &b), (&z1, &d)]) + .final_exponentiation_bls12_381(), + multi_miller_loop_bls12_381(&[(&a, &b), (&c, &z2)]) + .final_exponentiation_bls12_381(), + ); + } + } + + #[test] + fn random_miller_loop_tests() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, + 0xbc, 0xe5, + ]); + + // Exercise a double miller loop + for _ in 0..1000 { + let a = G1Affine::from(G1::random(&mut rng)); + let b = G2Affine::from(G2::random(&mut rng)); + let c = G1Affine::from(G1::random(&mut rng)); + let d = G2Affine::from(G2::random(&mut rng)); + + let ab = Bls12_381::pairing(&a, &b); + let cd = Bls12_381::pairing(&c, &d); + + let mut abcd = ab; + abcd = Gt(abcd.0 * cd.0); + + let b = G2Prepared::from(b); + let d = G2Prepared::from(d); + + let abcd_with_double_loop = + multi_miller_loop(&[(&a, &b), (&c, &d)]).final_exponentiation(); + + assert_eq!(abcd, abcd_with_double_loop); + } + } + + #[test] + fn random_miller_loop_tests_bls12_381() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, + 0xbc, 0xe5, + ]); + + // Exercise a double miller loop + for _ in 0..1000 { + let a = G1Affine::from(G1::random(&mut rng)); + let b = G2Affine::from(G2::random(&mut rng)); + let c = G1Affine::from(G1::random(&mut rng)); + let d = G2Affine::from(G2::random(&mut rng)); + + let ab = pairing_bls12_381(&a, &b); + let cd = pairing_bls12_381(&c, &d); + + let mut abcd = ab; + abcd = Gt(abcd.0 * cd.0); + + let b = G2Prepared::from(b); + let d = G2Prepared::from(d); + + let abcd_with_double_loop = + multi_miller_loop_bls12_381(&[(&a, &b), (&c, &d)]).final_exponentiation_bls12_381(); + + assert_eq!(abcd, abcd_with_double_loop); + } + } + + #[test] + fn test_multi_miller_loop() { + let a1 = G1Affine::generator(); + let b1 = G2Affine::generator(); + + let a2 = G1Affine::from( + G1Affine::generator() * Fr::from_raw([1, 2, 3, 4]).invert().unwrap().square(), + ); + let b2 = G2Affine::from( + G2Affine::generator() * Fr::from_raw([4, 2, 2, 4]).invert().unwrap().square(), + ); + + let a3 = G1Affine::identity(); + let b3 = G2Affine::from( + G2Affine::generator() * Fr::from_raw([9, 2, 2, 4]).invert().unwrap().square(), + ); + + let a4 = G1Affine::from( + G1Affine::generator() * Fr::from_raw([5, 5, 5, 5]).invert().unwrap().square(), + ); + let b4 = G2Affine::identity(); + + let a5 = G1Affine::from( + G1Affine::generator() * Fr::from_raw([323, 32, 3, 1]).invert().unwrap().square(), + ); + let b5 = G2Affine::from( + G2Affine::generator() * Fr::from_raw([4, 2, 2, 9099]).invert().unwrap().square(), + ); + + let b1_prepared = G2Prepared::from(b1); + let b2_prepared = G2Prepared::from(b2); + let b3_prepared = G2Prepared::from(b3); + let b4_prepared = G2Prepared::from(b4); + let b5_prepared = G2Prepared::from(b5); + + let expected = Bls12_381::pairing(&a1, &b1) + + Bls12_381::pairing(&a2, &b2) + + Bls12_381::pairing(&a3, &b3) + + Bls12_381::pairing(&a4, &b4) + + Bls12_381::pairing(&a5, &b5); + + let test = multi_miller_loop(&[ + (&a1, &b1_prepared), + (&a2, &b2_prepared), + (&a3, &b3_prepared), + (&a4, &b4_prepared), + (&a5, &b5_prepared), + ]) + .final_exponentiation(); + + assert_eq!(expected, test); + } + + #[test] + fn test_multi_miller_loop_bls12_381() { + let a1 = G1Affine::generator(); + let b1 = G2Affine::generator(); + + let a2 = G1Affine::from( + G1Affine::generator() * Fr::from_raw([1, 2, 3, 4]).invert().unwrap().square(), + ); + let b2 = G2Affine::from( + G2Affine::generator() * Fr::from_raw([4, 2, 2, 4]).invert().unwrap().square(), + ); + + let a3 = G1Affine::identity(); + let b3 = G2Affine::from( + G2Affine::generator() * Fr::from_raw([9, 2, 2, 4]).invert().unwrap().square(), + ); + + let a4 = G1Affine::from( + G1Affine::generator() * Fr::from_raw([5, 5, 5, 5]).invert().unwrap().square(), + ); + let b4 = G2Affine::identity(); + + let a5 = G1Affine::from( + G1Affine::generator() * Fr::from_raw([323, 32, 3, 1]).invert().unwrap().square(), + ); + let b5 = G2Affine::from( + G2Affine::generator() * Fr::from_raw([4, 2, 2, 9099]).invert().unwrap().square(), + ); + + let b1_prepared = G2Prepared::from(b1); + let b2_prepared = G2Prepared::from(b2); + let b3_prepared = G2Prepared::from(b3); + let b4_prepared = G2Prepared::from(b4); + let b5_prepared = G2Prepared::from(b5); + + let expected = pairing_bls12_381(&a1, &b1) + + pairing_bls12_381(&a2, &b2) + + pairing_bls12_381(&a3, &b3) + + pairing_bls12_381(&a4, &b4) + + pairing_bls12_381(&a5, &b5); + + let test = multi_miller_loop_bls12_381(&[ + (&a1, &b1_prepared), + (&a2, &b2_prepared), + (&a3, &b3_prepared), + (&a4, &b4_prepared), + (&a5, &b5_prepared), + ]) + .final_exponentiation_bls12_381(); + + assert_eq!(expected, test); + } + + #[test] + fn test_bit_iterator() { + let mut a = BitIterator::new([0xa953d79b83f6ab59, 0x6dea2059e200bd39]); + let expected = "01101101111010100010000001011001111000100000000010111101001110011010100101010011110101111001101110000011111101101010101101011001"; + + for e in expected.chars() { + assert!(a.next().unwrap() == (e == '1')); + } + + assert!(a.next().is_none()); + + let expected = "1010010101111110101010000101101011101000011101110101001000011001100100100011011010001011011011010001011011101100110100111011010010110001000011110100110001100110011101101000101100011100100100100100001010011101010111110011101011000011101000111011011101011001"; + + let mut a = BitIterator::new([ + 0x429d5f3ac3a3b759, + 0xb10f4c66768b1c92, + 0x92368b6d16ecd3b4, + 0xa57ea85ae8775219, + ]); + + for e in expected.chars() { + assert!(a.next().unwrap() == (e == '1')); + } + + assert!(a.next().is_none()); + } +} diff --git a/src/bls12_381/fq.rs b/src/bls12_381/fq.rs new file mode 100644 index 00000000..9815e1a5 --- /dev/null +++ b/src/bls12_381/fq.rs @@ -0,0 +1,509 @@ +#[cfg(feature = "asm")] +use super::assembly::assembly_field; + +use super::LegendreSymbol; +use crate::arithmetic::{adc, adc_u64, mac, macx, sbb}; +use pasta_curves::arithmetic::{FieldExt, Group, SqrtRatio}; +use serde::{Deserialize, Serialize}; + +use core::convert::TryInto; +use core::fmt; +use core::ops::{Add, Mul, Neg, Sub}; +use ff::PrimeField; +use rand::RngCore; +use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption}; + +/// This represents an element of $\mathbb{F}_q$ where +/// +/// `q = 4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787` +/// `q = 0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaab` +/// +/// is the base field of the BLS12-381 curve. +// The internal representation of this type is six 64-bit unsigned +// integers in little-endian order. `Fq` values are always in +// Montgomery form; i.e., Fq(a) = aR mod q, with R = 2^384. +#[derive(Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)] +pub struct Fq(pub(crate) [u64; 6]); + +/// Constant representing the modulus +/// q = 0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaab +const MODULUS: Fq = Fq([ + 0xb9feffffffffaaab, + 0x1eabfffeb153ffff, + 0x6730d2a0f6b0f624, + 0x64774b84f38512bf, + 0x4b1ba7b6434bacd7, + 0x1a0111ea397fe69a, +]); +const MODULUS_STR: &str = "0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaab"; +const MODULUS_BITS: u32 = 381; + +// 2^s * t = MODULUS - 1 with t odd +const S: u32 = 1; + +/// INV = -(q^{-1} mod 2^64) mod 2^64 +const INV: u64 = 0x89f3fffcfffcfffd; + +// R = 2^384 % q +// R = 0x15f65ec3fa80e4935c071a97a256ec6d77ce5853705257455f48985753c758baebf4000bc40c0002760900000002fffd +const R: Fq = Fq([ + 0x760900000002fffd, + 0xebf4000bc40c0002, + 0x5f48985753c758ba, + 0x77ce585370525745, + 0x5c071a97a256ec6d, + 0x15f65ec3fa80e493, +]); + +// ` = 2^(384*2) mod p +// R2 = 0x11988fe592cae3aa9a793e85b519952d67eb88a9939d83c08de5476c4c95b6d50a76e6a609d104f1f4df1f341c341746 +const R2: Fq = Fq([ + 0xf4df1f341c341746, + 0xa76e6a609d104f1, + 0x8de5476c4c95b6d5, + 0x67eb88a9939d83c0, + 0x9a793e85b519952d, + 0x11988fe592cae3aa, +]); + +// R2 = 2^(384*3) mod p +// R2 = 0xaa6346091755d4d2512d4356572472834c04e5e921e17619a53352a615e29dd315f831e03a7adf8ed48ac6bd94ca1e0 +const R3: Fq = Fq([ + 0xed48ac6bd94ca1e0, + 0x315f831e03a7adf8, + 0x9a53352a615e29dd, + 0x34c04e5e921e1761, + 0x2512d43565724728, + 0x0aa6346091755d4d, +]); + +// NEGATIVE_ONE = -((2**384) mod q) mod q +// NEGATIVE_ONE = 0x40ab3263eff0206ef148d1ea0f4c069eca8f3318332bb7a07e83a49a2e99d6932b7fff2ed47fffd43f5fffffffcaaae +pub const NEGATIVE_ONE: Fq = Fq([ + 0x43f5fffffffcaaae, + 0x32b7fff2ed47fffd, + 0x7e83a49a2e99d69, + 0xeca8f3318332bb7a, + 0xef148d1ea0f4c069, + 0x40ab3263eff0206, +]); + +// Unused constant +const TWO_INV: Fq = Fq::zero(); + +// 2^s root of unity computed by GENERATOR^t +// 0x40ab3263eff0206ef148d1ea0f4c069eca8f3318332bb7a7e83a49a2e99d6932b7fff2ed47fffd43f5fffffffcaaae +const ROOT_OF_UNITY: Fq = Fq([ + 0x43f5fffffffcaaae, + 0x32b7fff2ed47fffd, + 0x7e83a49a2e99d69, + 0xeca8f3318332bb7a, + 0xef148d1ea0f4c069, + 0x40ab3263eff0206, +]); + +// Unused constant for base field +const ROOT_OF_UNITY_INV: Fq = Fq::zero(); + +// Unused constant for base field +const DELTA: Fq = Fq::zero(); + +// Unused constant +const ZETA: Fq = Fq::zero(); + +use crate::{ + field_arithmetic_bls12_381, field_common_fq, field_specific_bls12_381, + impl_add_binop_specify_output, impl_binops_additive, impl_binops_additive_specify_output, + impl_binops_multiplicative, impl_binops_multiplicative_mixed, impl_sub_binop_specify_output, +}; +impl_binops_additive!(Fq, Fq); +impl_binops_multiplicative!(Fq, Fq); +#[cfg(not(feature = "asm"))] +field_common_fq!( + Fq, + MODULUS, + INV, + MODULUS_STR, + TWO_INV, + ROOT_OF_UNITY_INV, + DELTA, + ZETA, + R, + R2, + R3 +); +#[cfg(not(feature = "asm"))] +field_arithmetic_bls12_381!(Fq, MODULUS, INV, sparse); +#[cfg(feature = "asm")] +assembly_field!( + Fq, + MODULUS, + INV, + MODULUS_STR, + TWO_INV, + ROOT_OF_UNITY_INV, + DELTA, + ZETA, + R, + R2, + R3 +); + +impl Fq { + pub const fn size() -> usize { + 48 + } + + pub fn legendre(&self) -> LegendreSymbol { + // s = self^((q - 1) // 2) + // (q - 1) / 2 = 0xd0088f51cbff34d258dd3db21a5d66bb23ba5c279c2895fb39869507b587b12f55ffff58a9ffffdcff7fffffffd555 + let s = &[ + 0xdcff7fffffffd555, + 0xf55ffff58a9ffff, + 0xb39869507b587b12, + 0xb23ba5c279c2895f, + 0x258dd3db21a5d66b, + 0xd0088f51cbff34d, + ]; + let s = self.pow_fq(s); + if s == Self::zero() { + LegendreSymbol::Zero + } else if s == Self::one() { + LegendreSymbol::QuadraticResidue + } else { + LegendreSymbol::QuadraticNonResidue + } + } + + fn pow_fq(&self, by: &[u64; 6]) -> Self { + let mut res = Self::one(); + for e in by.iter().rev() { + for i in (0..64).rev() { + res = res.square(); + + if ((*e >> i) & 1) == 1 { + res *= self; + } + } + } + res + } +} + +impl ff::Field for Fq { + fn random(mut rng: impl RngCore) -> Self { + let mut random_bytes = [0; 64]; + rng.fill_bytes(&mut random_bytes[..]); + + Self::from_bytes_wide(&random_bytes) + } + + #[inline(always)] + fn zero() -> Self { + Self::zero() + } + + #[inline(always)] + fn one() -> Self { + Self::one() + } + + fn double(&self) -> Self { + self.double() + } + + #[inline(always)] + fn square(&self) -> Self { + self.square() + } + + /// Computes the square root of this element, if it exists. + fn sqrt(&self) -> CtOption { + // We use Shank's method, as q = 3 (mod 4). This means + // we only need to exponentiate by ( q + 1 ) / 4. This only + // works for elements that are actually quadratic residue, + // so we check that we got the correct result at the end. + // (q + 1) / 4 = 0x680447a8e5ff9a692c6e9ed90d2eb35d91dd2e13ce144afd9cc34a83dac3d8907aaffffac54ffffee7fbfffffffeaab + const Q_PLUS_1_DIV_4: [u64; 6] = [ + 0xee7fbfffffffeaab, + 0x07aaffffac54ffff, + 0xd9cc34a83dac3d89, + 0xd91dd2e13ce144af, + 0x92c6e9ed90d2eb35, + 0x0680447a8e5ff9a6, + ]; + let sqrt = self.pow_fq(&Q_PLUS_1_DIV_4); + + CtOption::new(sqrt, sqrt.square().ct_eq(self)) + } + + /// Computes the multiplicative inverse of this element, + /// failing if the element is zero. + fn invert(&self) -> CtOption { + // Exponentiate by q - 2 + // Q_MIN_2 = 0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaa9 + let tmp = self.pow_fq(&[ + 0xb9feffffffffaaa9, + 0x1eabfffeb153ffff, + 0x6730d2a0f6b0f624, + 0x64774b84f38512bf, + 0x4b1ba7b6434bacd7, + 0x1a0111ea397fe69a, + ]); + + CtOption::new(tmp, !self.ct_eq(&Self::zero())) + } +} + +use std::ops::Index; +use std::ops::RangeFull; + +#[derive(Clone, Copy, Debug)] +pub struct FqBytes { + pub slice: [u8; 48], +} + +impl From<[u8; 48]> for FqBytes { + fn from(bytes: [u8; 48]) -> Self { + Self { slice: bytes } + } +} + +impl Index for FqBytes { + type Output = [u8]; + fn index(&self, _: RangeFull) -> &[u8] { + &self.slice[..] + } +} + +impl Index for FqBytes { + type Output = u8; + fn index(&self, idx: usize) -> &Self::Output { + &self.slice[idx] + } +} + +impl Default for FqBytes { + fn default() -> Self { + Self { slice: [0u8; 48] } + } +} + +impl AsMut<[u8]> for FqBytes { + fn as_mut(&mut self) -> &mut [u8] { + &mut self.slice + } +} + +impl AsRef<[u8]> for FqBytes { + fn as_ref(&self) -> &[u8] { + &self.slice + } +} + +impl ff::PrimeField for Fq { + // type Repr = [u8; 48]; + // Need to implement FqBytes otherwise will get the error: + // "the trait bound `[u8; 48]: std::default::Default` is not satisfied the + // following other types implement trait `std::default::Default`", + // where trade bound `Default` on Repr is required by PrimeField + type Repr = FqBytes; + + const NUM_BITS: u32 = MODULUS_BITS; + const CAPACITY: u32 = Self::NUM_BITS - 1; + const S: u32 = S; + + fn from_repr(repr: Self::Repr) -> CtOption { + let mut tmp = Fq([0, 0, 0, 0, 0, 0]); + + tmp.0[0] = u64::from_le_bytes(repr.slice[0..8].try_into().unwrap()); + tmp.0[1] = u64::from_le_bytes(repr.slice[8..16].try_into().unwrap()); + tmp.0[2] = u64::from_le_bytes(repr.slice[16..24].try_into().unwrap()); + tmp.0[3] = u64::from_le_bytes(repr.slice[24..32].try_into().unwrap()); + tmp.0[4] = u64::from_le_bytes(repr.slice[32..40].try_into().unwrap()); + tmp.0[5] = u64::from_le_bytes(repr.slice[40..48].try_into().unwrap()); + + // Try to subtract the modulus + let (_, borrow) = tmp.0[0].overflowing_sub(MODULUS.0[0]); + let (_, borrow) = sbb(tmp.0[1], MODULUS.0[1], borrow); + let (_, borrow) = sbb(tmp.0[2], MODULUS.0[2], borrow); + let (_, borrow) = sbb(tmp.0[3], MODULUS.0[3], borrow); + let (_, borrow) = sbb(tmp.0[4], MODULUS.0[4], borrow); + let (_, borrow) = sbb(tmp.0[5], MODULUS.0[5], borrow); + + // If the element is smaller than MODULUS then the + // subtraction will underflow, producing a borrow value + // of 0xffff...ffff. Otherwise, it'll be zero. + let is_some = (borrow as u8) & 1; + + // Convert to Montgomery form by computing + // (a.R^0 * R^2) / R = a.R + tmp *= &R2; + + CtOption::new(tmp, Choice::from(is_some)) + } + + fn to_repr(&self) -> Self::Repr { + // Turn into canonical form by computing + // (a.R) / R = a + #[cfg(feature = "asm")] + let tmp = Self::montgomery_reduce(&[ + self.0[0], self.0[1], self.0[2], self.0[3], self.0[4], self.0[5], 0, 0, 0, 0, 0, 0, + ]); + + #[cfg(not(feature = "asm"))] + let tmp = Self::montgomery_reduce_short( + self.0[0], self.0[1], self.0[2], self.0[3], self.0[4], self.0[5], + ); + + let mut res = [0; 48]; + res[0..8].copy_from_slice(&tmp.0[0].to_le_bytes()); + res[8..16].copy_from_slice(&tmp.0[1].to_le_bytes()); + res[16..24].copy_from_slice(&tmp.0[2].to_le_bytes()); + res[24..32].copy_from_slice(&tmp.0[3].to_le_bytes()); + res[32..40].copy_from_slice(&tmp.0[4].to_le_bytes()); + res[40..48].copy_from_slice(&tmp.0[5].to_le_bytes()); + + FqBytes { slice: res } + } + + fn is_odd(&self) -> Choice { + Choice::from(self.to_repr().as_ref()[0] & 1) + } + + fn multiplicative_generator() -> Self { + unimplemented!() + } + + fn root_of_unity() -> Self { + ROOT_OF_UNITY + } +} + +impl SqrtRatio for Fq { + const T_MINUS1_OVER2: [u64; 4] = unimplemented!(); + + fn get_lower_32(&self) -> u32 { + unimplemented!() + } +} + +#[cfg(test)] +mod test { + use super::*; + use ff::Field; + use rand_core::OsRng; + + #[test] + fn test_ser() { + let a0 = Fq::random(OsRng); + let a_bytes = a0.to_bytes(); + let a1 = Fq::from_bytes(&a_bytes).unwrap(); + assert_eq!(a0, a1); + } + + #[test] + fn test_sqrt_fq() { + // a = 4 + let a = Fq::from_raw_unchecked([ + 0xaa270000000cfff3, + 0x53cc0032fc34000a, + 0x478fe97a6b0a807f, + 0xb1d37ebee6ba24d7, + 0x8ec9733bbf78ab2f, + 0x09d645513d83de7e, + ]); + + assert_eq!( + // sqrt(4) = -2 + -a.sqrt().unwrap(), + // 2 + Fq::from_raw_unchecked([ + 0x3213_0000_0006_554f, + 0xb93c_0018_d6c4_0005, + 0x5760_5e0d_b0dd_bb51, + 0x8b25_6521_ed1f_9bcb, + 0x6cf2_8d79_0162_2c03, + 0x11eb_ab9d_bb81_e28c, + ]) + ); + + for _ in 0..10000 { + let a = Fq::random(OsRng); + let mut b = a; + b = b.square(); + assert_eq!(b.legendre(), LegendreSymbol::QuadraticResidue); + + let b = b.sqrt().unwrap(); + let mut negb = b; + negb = negb.neg(); + + assert!(a == b || a == negb); + } + + let mut c = Fq::one(); + for _ in 0..10000 { + let mut b = c; + b = b.square(); + assert_eq!(b.legendre(), LegendreSymbol::QuadraticResidue); + + b = b.sqrt().unwrap(); + + if b != c { + b = b.neg(); + } + + assert_eq!(b, c); + + c += &Fq::one(); + } + } + + // TODO [TEST] [from_u512] + // #[test] + // fn test_from_u512() { + // let a = Fq::from_u512([ + // 0xaaaaaaaaaaaaaaaa, + // 0xaaaaaaaaaaaaaaaa, + // 0xaaaaaaaaaaaaaaaa, + // 0xaaaaaaaaaaaaaaaa, + // 0xaaaaaaaaaaaaaaaa, + // 0xaaaaaaaaaaaaaaaa, + // 0xaaaaaaaaaaaaaaaa, + // 0xaaaaaaaaaaaaaaaa, + // ]); + // println!("a = {:?}", a); + // // 0x01dce8d1b03439d8a725335a9edeb9d2b94a9d23e2648fdfac2575af577605842e217ad51b4754df0efe265b19724868 + + // assert_eq!( + // Fq::from_raw_unchecked([ + // 0x0efe265b19724868, + // 0x2e217ad51b4754df, + // 0xac2575af57760584, + // 0xb94a9d23e2648fdf, + // 0xa725335a9edeb9d2, + // 0x01dce8d1b03439d8, + // ]), + // Fq::from_u512([ + // 0xaaaaaaaaaaaaaaaa, + // 0xaaaaaaaaaaaaaaaa, + // 0xaaaaaaaaaaaaaaaa, + // 0xaaaaaaaaaaaaaaaa, + // 0xaaaaaaaaaaaaaaaa, + // 0xaaaaaaaaaaaaaaaa, + // 0xaaaaaaaaaaaaaaaa, + // 0xaaaaaaaaaaaaaaaa + // ]) + // ); + // } + + #[test] + fn test_field() { + crate::tests::field::random_field_tests::("fq".to_string()); + } + + #[test] + fn test_serialization() { + crate::tests::field::random_serialization_test::("fq".to_string()); + } +} diff --git a/src/bls12_381/fq12.rs b/src/bls12_381/fq12.rs new file mode 100644 index 00000000..f7e9386e --- /dev/null +++ b/src/bls12_381/fq12.rs @@ -0,0 +1,732 @@ +use super::fq::Fq; +use super::fq2::Fq2; +use super::fq6::Fq6; +use core::ops::{Add, Mul, Neg, Sub}; +use ff::Field; +use rand::RngCore; +use serde::{Deserialize, Serialize}; +use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption}; + +#[derive(Copy, Clone, Debug, Eq, PartialEq, Default, Serialize, Deserialize)] +pub struct Fq12 { + pub c0: Fq6, + pub c1: Fq6, +} + +impl ConditionallySelectable for Fq12 { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + Fq12 { + c0: Fq6::conditional_select(&a.c0, &b.c0, choice), + c1: Fq6::conditional_select(&a.c1, &b.c1, choice), + } + } +} + +impl ConstantTimeEq for Fq12 { + fn ct_eq(&self, other: &Self) -> Choice { + self.c0.ct_eq(&other.c0) & self.c1.ct_eq(&other.c1) + } +} + +impl Neg for Fq12 { + type Output = Fq12; + + #[inline] + fn neg(self) -> Fq12 { + -&self + } +} + +impl<'a> Neg for &'a Fq12 { + type Output = Fq12; + + #[inline] + fn neg(self) -> Fq12 { + self.neg() + } +} + +impl<'a, 'b> Sub<&'b Fq12> for &'a Fq12 { + type Output = Fq12; + + #[inline] + fn sub(self, rhs: &'b Fq12) -> Fq12 { + self.sub(rhs) + } +} + +impl<'a, 'b> Add<&'b Fq12> for &'a Fq12 { + type Output = Fq12; + + #[inline] + fn add(self, rhs: &'b Fq12) -> Fq12 { + self.add(rhs) + } +} + +impl<'a, 'b> Mul<&'b Fq12> for &'a Fq12 { + type Output = Fq12; + + #[inline] + fn mul(self, rhs: &'b Fq12) -> Fq12 { + self.mul(rhs) + } +} + +use crate::{ + impl_add_binop_specify_output, impl_binops_additive, impl_binops_additive_specify_output, + impl_binops_multiplicative, impl_binops_multiplicative_mixed, impl_sub_binop_specify_output, +}; +impl_binops_additive!(Fq12, Fq12); +impl_binops_multiplicative!(Fq12, Fq12); + +impl Fq12 { + pub fn mul_assign(&mut self, other: &Self) { + let t0 = self.c0 * other.c0; + let mut t1 = self.c1 * other.c1; + let t2 = other.c0 + other.c1; + + self.c1 += &self.c0; + self.c1 *= &t2; + self.c1 -= &t0; + self.c1 -= &t1; + + t1.mul_by_nonresidue(); + self.c0 = t0 + t1; + } + + pub fn square_assign(&mut self) { + let mut ab = self.c0 * self.c1; + + let c0c1 = self.c0 + self.c1; + + let mut c0 = self.c1; + c0.mul_by_nonresidue(); + c0 += &self.c0; + c0 *= &c0c1; + c0 -= &ab; + self.c1 = ab; + self.c1 += &ab; + ab.mul_by_nonresidue(); + c0 -= &ab; + self.c0 = c0; + } + + pub fn double(&self) -> Self { + Self { + c0: self.c0.double(), + c1: self.c1.double(), + } + } + + pub fn double_assign(&mut self) { + self.c0 = self.c0.double(); + self.c1 = self.c1.double(); + } + + pub fn add(&self, other: &Self) -> Self { + Self { + c0: self.c0 + other.c0, + c1: self.c1 + other.c1, + } + } + + pub fn sub(&self, other: &Self) -> Self { + Self { + c0: self.c0 - other.c0, + c1: self.c1 - other.c1, + } + } + + pub fn mul(&self, other: &Self) -> Self { + let mut t = *other; + t.mul_assign(self); + t + } + + pub fn square(&self) -> Self { + let mut t = *self; + t.square_assign(); + t + } + + #[inline(always)] + pub fn neg(&self) -> Self { + Self { + c0: -self.c0, + c1: -self.c1, + } + } + + #[inline(always)] + pub fn conjugate(&mut self) { + self.c1 = -self.c1; + } + + #[inline(always)] + pub fn conjugate_ret(&self) -> Self { + Self { + c0: self.c0, + c1: -self.c1, + } + } + + pub fn frobenius_map(&mut self, power: usize) { + self.c0.frobenius_map(power); + self.c1.frobenius_map(power); + + self.c1.c0.mul_assign(&FROBENIUS_COEFF_FQ12_C1[power % 12]); + self.c1.c1.mul_assign(&FROBENIUS_COEFF_FQ12_C1[power % 12]); + self.c1.c2.mul_assign(&FROBENIUS_COEFF_FQ12_C1[power % 12]); + } + + /// Alternative implementation of frobenius_map(), keeping here for reference + /// Raises this element to p. + // #[inline(always)] + // pub fn frobenius_map_conjugate(&mut self) { + // self.c0.frobenius_map_conjugate(); + // self.c1.frobenius_map_conjugate(); + + // // c1 = c1 * (u + 1)^((p - 1) / 6) + // self.c1 *= Fq6::from(Fq2 { + // c0: Fq::from_raw_unchecked([ + // 0x0708_9552_b319_d465, + // 0xc669_5f92_b50a_8313, + // 0x97e8_3ccc_d117_228f, + // 0xa35b_aeca_b2dc_29ee, + // 0x1ce3_93ea_5daa_ce4d, + // 0x08f2_220f_b0fb_66eb, + // ]), + // c1: Fq::from_raw_unchecked([ + // 0xb2f6_6aad_4ce5_d646, + // 0x5842_a06b_fc49_7cec, + // 0xcf48_95d4_2599_d394, + // 0xc11b_9cba_40a8_e8d0, + // 0x2e38_13cb_e5a0_de89, + // 0x110e_efda_8884_7faf, + // ]), + // }); + // } + + pub fn mul_by_014(&mut self, c0: &Fq2, c1: &Fq2, c4: &Fq2) { + let mut aa = self.c0; + aa.mul_by_01(c0, c1); + let mut bb = self.c1; + bb.mul_by_1(c4); + let o = c1 + c4; + self.c1 += &self.c0; + self.c1.mul_by_01(c0, &o); + self.c1 -= &aa; + self.c1 -= &bb; + self.c0 = bb; + self.c0.mul_by_nonresidue(); + self.c0 += &aa; + } + + pub fn mul_by_014_bls12_381(&self, c0: &Fq2, c1: &Fq2, c4: &Fq2) -> Fq12 { + let aa = self.c0.mul_by_01_bls12_381(c0, c1); + let bb = self.c1.mul_by_1_bls12_381(c4); + let o = c1 + c4; + let c1 = self.c1 + self.c0; + let c1 = c1.mul_by_01_bls12_381(c0, &o); + let c1 = c1 - aa - bb; + let c0 = bb; + let c0 = c0.mul_by_nonresidue_bls12_381(); + let c0 = c0 + aa; + + Fq12 { c0, c1 } + } + + pub fn mul_by_034(&mut self, c0: &Fq2, c3: &Fq2, c4: &Fq2) { + let t0 = Fq6 { + c0: self.c0.c0 * c0, + c1: self.c0.c1 * c0, + c2: self.c0.c2 * c0, + }; + let mut t1 = self.c1; + t1.mul_by_01(c3, c4); + let o = c0 + c3; + let mut t2 = self.c0 + self.c1; + t2.mul_by_01(&o, c4); + t2 -= t0; + self.c1 = t2 - t1; + t1.mul_by_nonresidue(); + self.c0 = t0 + t1; + } + + pub fn invert(&self) -> CtOption { + let mut c0s = self.c0; + c0s.square_assign(); + let mut c1s = self.c1; + c1s.square_assign(); + c1s.mul_by_nonresidue(); + c0s -= &c1s; + + c0s.invert().map(|t| { + let mut tmp = Fq12 { c0: t, c1: t }; + tmp.c0.mul_assign(&self.c0); + tmp.c1.mul_assign(&self.c1); + tmp.c1 = tmp.c1.neg(); + + tmp + }) + } + + pub fn cyclotomic_square(&mut self) { + fn fp4_square(c0: &mut Fq2, c1: &mut Fq2, a0: &Fq2, a1: &Fq2) { + let t0 = a0.square(); + let t1 = a1.square(); + let mut t2 = t1; + t2.mul_by_nonresidue(); + *c0 = t2 + t0; + t2 = a0 + a1; + t2.square_assign(); + t2 -= t0; + *c1 = t2 - t1; + } + + let mut t3 = Fq2::zero(); + let mut t4 = Fq2::zero(); + let mut t5 = Fq2::zero(); + let mut t6 = Fq2::zero(); + + fp4_square(&mut t3, &mut t4, &self.c0.c0, &self.c1.c1); + let mut t2 = t3 - self.c0.c0; + t2.double_assign(); + self.c0.c0 = t2 + t3; + + t2 = t4 + self.c1.c1; + t2.double_assign(); + self.c1.c1 = t2 + t4; + + fp4_square(&mut t3, &mut t4, &self.c1.c0, &self.c0.c2); + fp4_square(&mut t5, &mut t6, &self.c0.c1, &self.c1.c2); + + t2 = t3 - self.c0.c1; + t2.double_assign(); + self.c0.c1 = t2 + t3; + t2 = t4 + self.c1.c2; + t2.double_assign(); + self.c1.c2 = t2 + t4; + t3 = t6; + t3.mul_by_nonresidue(); + t2 = t3 + self.c1.c0; + t2.double_assign(); + self.c1.c0 = t2 + t3; + t2 = t5 - self.c0.c2; + t2.double_assign(); + self.c0.c2 = t2 + t5; + } +} + +impl Field for Fq12 { + fn random(mut rng: impl RngCore) -> Self { + Fq12 { + c0: Fq6::random(&mut rng), + c1: Fq6::random(&mut rng), + } + } + + fn zero() -> Self { + Fq12 { + c0: Fq6::zero(), + c1: Fq6::zero(), + } + } + + fn one() -> Self { + Fq12 { + c0: Fq6::one(), + c1: Fq6::zero(), + } + } + + fn is_zero(&self) -> Choice { + self.c0.is_zero() & self.c1.is_zero() + } + + fn square(&self) -> Self { + self.square() + } + + fn double(&self) -> Self { + self.double() + } + + fn sqrt(&self) -> CtOption { + unimplemented!() + } + + fn invert(&self) -> CtOption { + self.invert() + } +} + +// non_residue^((modulus^i-1)/6) for i=0,...,11 +pub const FROBENIUS_COEFF_FQ12_C1: [Fq2; 12] = [ + // Fq2(u + 1)**(((q^0) - 1) / 6) + Fq2 { + c0: Fq([ + 0x760900000002fffd, + 0xebf4000bc40c0002, + 0x5f48985753c758ba, + 0x77ce585370525745, + 0x5c071a97a256ec6d, + 0x15f65ec3fa80e493, + ]), + c1: Fq([0x0, 0x0, 0x0, 0x0, 0x0, 0x0]), + }, + // Fq2(u + 1)**(((q^1) - 1) / 6) + Fq2 { + c0: Fq([ + 0x7089552b319d465, + 0xc6695f92b50a8313, + 0x97e83cccd117228f, + 0xa35baecab2dc29ee, + 0x1ce393ea5daace4d, + 0x8f2220fb0fb66eb, + ]), + c1: Fq([ + 0xb2f66aad4ce5d646, + 0x5842a06bfc497cec, + 0xcf4895d42599d394, + 0xc11b9cba40a8e8d0, + 0x2e3813cbe5a0de89, + 0x110eefda88847faf, + ]), + }, + // Fq2(u + 1)**(((q^2) - 1) / 6) + Fq2 { + c0: Fq([ + 0xecfb361b798dba3a, + 0xc100ddb891865a2c, + 0xec08ff1232bda8e, + 0xd5c13cc6f1ca4721, + 0x47222a47bf7b5c04, + 0x110f184e51c5f59, + ]), + c1: Fq([0x0, 0x0, 0x0, 0x0, 0x0, 0x0]), + }, + // Fq2(u + 1)**(((q^3) - 1) / 6) + Fq2 { + c0: Fq([ + 0x3e2f585da55c9ad1, + 0x4294213d86c18183, + 0x382844c88b623732, + 0x92ad2afd19103e18, + 0x1d794e4fac7cf0b9, + 0xbd592fc7d825ec8, + ]), + c1: Fq([ + 0x7bcfa7a25aa30fda, + 0xdc17dec12a927e7c, + 0x2f088dd86b4ebef1, + 0xd1ca2087da74d4a7, + 0x2da2596696cebc1d, + 0xe2b7eedbbfd87d2, + ]), + }, + // Fq2(u + 1)**(((q^4) - 1) / 6) + Fq2 { + c0: Fq([ + 0x30f1361b798a64e8, + 0xf3b8ddab7ece5a2a, + 0x16a8ca3ac61577f7, + 0xc26a2ff874fd029b, + 0x3636b76660701c6e, + 0x51ba4ab241b6160, + ]), + c1: Fq([0x0, 0x0, 0x0, 0x0, 0x0, 0x0]), + }, + // Fq2(u + 1)**(((q^5) - 1) / 6) + Fq2 { + c0: Fq([ + 0x3726c30af242c66c, + 0x7c2ac1aad1b6fe70, + 0xa04007fbba4b14a2, + 0xef517c3266341429, + 0x95ba654ed2226b, + 0x2e370eccc86f7dd, + ]), + c1: Fq([ + 0x82d83cf50dbce43f, + 0xa2813e53df9d018f, + 0xc6f0caa53c65e181, + 0x7525cf528d50fe95, + 0x4a85ed50f4798a6b, + 0x171da0fd6cf8eebd, + ]), + }, + // Fq2(u + 1)**(((q^6) - 1) / 6) + Fq2 { + c0: Fq([ + 0x43f5fffffffcaaae, + 0x32b7fff2ed47fffd, + 0x7e83a49a2e99d69, + 0xeca8f3318332bb7a, + 0xef148d1ea0f4c069, + 0x40ab3263eff0206, + ]), + c1: Fq([0x0, 0x0, 0x0, 0x0, 0x0, 0x0]), + }, + // Fq2(u + 1)**(((q^7) - 1) / 6) + Fq2 { + c0: Fq([ + 0xb2f66aad4ce5d646, + 0x5842a06bfc497cec, + 0xcf4895d42599d394, + 0xc11b9cba40a8e8d0, + 0x2e3813cbe5a0de89, + 0x110eefda88847faf, + ]), + c1: Fq([ + 0x7089552b319d465, + 0xc6695f92b50a8313, + 0x97e83cccd117228f, + 0xa35baecab2dc29ee, + 0x1ce393ea5daace4d, + 0x8f2220fb0fb66eb, + ]), + }, + // Fq2(u + 1)**(((q^8) - 1) / 6) + Fq2 { + c0: Fq([ + 0xcd03c9e48671f071, + 0x5dab22461fcda5d2, + 0x587042afd3851b95, + 0x8eb60ebe01bacb9e, + 0x3f97d6e83d050d2, + 0x18f0206554638741, + ]), + c1: Fq([0x0, 0x0, 0x0, 0x0, 0x0, 0x0]), + }, + // Fq2(u + 1)**(((q^9) - 1) / 6) + Fq2 { + c0: Fq([ + 0x7bcfa7a25aa30fda, + 0xdc17dec12a927e7c, + 0x2f088dd86b4ebef1, + 0xd1ca2087da74d4a7, + 0x2da2596696cebc1d, + 0xe2b7eedbbfd87d2, + ]), + c1: Fq([ + 0x3e2f585da55c9ad1, + 0x4294213d86c18183, + 0x382844c88b623732, + 0x92ad2afd19103e18, + 0x1d794e4fac7cf0b9, + 0xbd592fc7d825ec8, + ]), + }, + // Fq2(u + 1)**(((q^10) - 1) / 6) + Fq2 { + c0: Fq([ + 0x890dc9e4867545c3, + 0x2af322533285a5d5, + 0x50880866309b7e2c, + 0xa20d1b8c7e881024, + 0x14e4f04fe2db9068, + 0x14e56d3f1564853a, + ]), + c1: Fq([0x0, 0x0, 0x0, 0x0, 0x0, 0x0]), + }, + // Fq2(u + 1)**(((q^11) - 1) / 6) + Fq2 { + c0: Fq([ + 0x82d83cf50dbce43f, + 0xa2813e53df9d018f, + 0xc6f0caa53c65e181, + 0x7525cf528d50fe95, + 0x4a85ed50f4798a6b, + 0x171da0fd6cf8eebd, + ]), + c1: Fq([ + 0x3726c30af242c66c, + 0x7c2ac1aad1b6fe70, + 0xa04007fbba4b14a2, + 0xef517c3266341429, + 0x95ba654ed2226b, + 0x2e370eccc86f7dd, + ]), + }, +]; + +#[cfg(test)] +mod test { + use super::*; + use rand::SeedableRng; + use rand_xorshift::XorShiftRng; + + #[test] + fn test_fq12_mul_by_014() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, + 0xbc, 0xe5, + ]); + + for _ in 0..1000 { + let c0 = Fq2::random(&mut rng); + let c1 = Fq2::random(&mut rng); + let c5 = Fq2::random(&mut rng); + let mut a = Fq12::random(&mut rng); + let mut b = a; + + a.mul_by_014(&c0, &c1, &c5); + b.mul_assign(&Fq12 { + c0: Fq6 { + c0, + c1, + c2: Fq2::zero(), + }, + c1: Fq6 { + c0: Fq2::zero(), + c1: c5, + c2: Fq2::zero(), + }, + }); + + assert_eq!(a, b); + } + } + + #[test] + fn test_fq12_mul_by_034() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, + 0xbc, 0xe5, + ]); + + for _ in 0..1000 { + let c0 = Fq2::random(&mut rng); + let c3 = Fq2::random(&mut rng); + let c4 = Fq2::random(&mut rng); + let mut a = Fq12::random(&mut rng); + let mut b = a; + + a.mul_by_034(&c0, &c3, &c4); + b.mul_assign(&Fq12 { + c0: Fq6 { + c0, + c1: Fq2::zero(), + c2: Fq2::zero(), + }, + c1: Fq6 { + c0: c3, + c1: c4, + c2: Fq2::zero(), + }, + }); + + assert_eq!(a, b); + } + } + + #[test] + fn test_squaring() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, + 0xbc, 0xe5, + ]); + + for _ in 0..1000 { + let mut a = Fq12::random(&mut rng); + let mut b = a; + b.mul_assign(&a); + a.square_assign(); + assert_eq!(a, b); + } + } + + #[test] + fn test_frobenius() { + let fq2_basic = Fq2 { + c0: Fq::from_raw_unchecked([ + 0x2d0078036923ffc7, + 0x11e59ea221a3b6d2, + 0x8b1a52e0a90f59ed, + 0xb966ce3bc2108b13, + 0xccc649c4b9532bf3, + 0xf8d295b2ded9dc, + ]), + c1: Fq::from_raw_unchecked([ + 0x977df6efcdaee0db, + 0x946ae52d684fa7ed, + 0xbe203411c66fb3a5, + 0xb3f8afc0ee248cad, + 0x4e464dea5bcfd41e, + 0x12d1137b8a6a837, + ]), + }; + + let fq6_basic = Fq6 { + c0: fq2_basic, + c1: fq2_basic, + c2: fq2_basic, + }; + + let fq12_basic = Fq12 { + c0: fq6_basic, + c1: fq6_basic, + }; + + let mut fq12_test = fq12_basic; + fq12_test.frobenius_map(0); + assert_eq!(fq12_test, fq12_basic); + + let mut fq12_test_2 = fq12_basic; + fq12_test_2.frobenius_map(12); + assert_eq!(fq12_test_2, fq12_basic); + } + + // #[test] + // fn test_frobenius_map_mix() { + // let fq2_basic = Fq2 { + // c0: Fq::from_raw_unchecked([ + // 0x2d0078036923ffc7, + // 0x11e59ea221a3b6d2, + // 0x8b1a52e0a90f59ed, + // 0xb966ce3bc2108b13, + // 0xccc649c4b9532bf3, + // 0xf8d295b2ded9dc, + // ]), + // c1: Fq::from_raw_unchecked([ + // 0x977df6efcdaee0db, + // 0x946ae52d684fa7ed, + // 0xbe203411c66fb3a5, + // 0xb3f8afc0ee248cad, + // 0x4e464dea5bcfd41e, + // 0x12d1137b8a6a837, + // ]), + // }; + + // let fq6_basic = Fq6 { + // c0: fq2_basic, + // c1: fq2_basic, + // c2: fq2_basic, + // }; + + // let fq12_basic = Fq12 { + // c0: fq6_basic, + // c1: fq6_basic, + // }; + + // let mut fq12_test_1 = fq12_basic; + // fq12_test_1.frobenius_map(1); + // let mut fq12_test_conjugate_1 = fq12_basic; + // fq12_test_conjugate_1.frobenius_map_conjugate(); + // assert_eq!(fq12_test_1, fq12_test_conjugate_1); + + // let mut fq12_test_1 = fq12_basic; + // fq12_test_1.frobenius_map(2); + // let mut fq12_test_conjugate_1 = fq12_basic; + // fq12_test_conjugate_1.frobenius_map_conjugate(); + // fq12_test_conjugate_1.frobenius_map_conjugate(); + // assert_eq!(fq12_test_1, fq12_test_conjugate_1); + // } + + #[test] + fn test_field() { + crate::tests::field::random_field_tests::("fq12".to_string()); + } +} diff --git a/src/bls12_381/fq2.rs b/src/bls12_381/fq2.rs new file mode 100644 index 00000000..0a415770 --- /dev/null +++ b/src/bls12_381/fq2.rs @@ -0,0 +1,992 @@ +use super::fq::Fq; +use super::LegendreSymbol; +use core::convert::TryInto; +use core::ops::{Add, Mul, Neg, Sub}; +use ff::Field; +use pasta_curves::arithmetic::{FieldExt, Group, SqrtRatio}; +use rand::RngCore; +use serde::{Deserialize, Serialize}; +use std::cmp::Ordering; +use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption}; + +const MODULUS_BITS: u32 = 381; + +/// An element of Fq2, represented by c0 + c1 * u. +#[derive(Copy, Clone, Debug, Serialize, Deserialize)] +pub struct Fq2 { + pub c0: Fq, + pub c1: Fq, +} + +/// `Fq2` elements are ordered lexicographically. +impl Ord for Fq2 { + #[inline(always)] + fn cmp(&self, other: &Fq2) -> Ordering { + match self.c1.cmp(&other.c1) { + Ordering::Greater => Ordering::Greater, + Ordering::Less => Ordering::Less, + Ordering::Equal => self.c0.cmp(&other.c0), + } + } +} + +impl PartialOrd for Fq2 { + #[inline(always)] + fn partial_cmp(&self, other: &Fq2) -> Option { + Some(self.cmp(other)) + } +} + +impl ConditionallySelectable for Fq2 { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + Fq2 { + c0: Fq::conditional_select(&a.c0, &b.c0, choice), + c1: Fq::conditional_select(&a.c1, &b.c1, choice), + } + } +} + +impl Eq for Fq2 {} +impl PartialEq for Fq2 { + #[inline] + fn eq(&self, other: &Self) -> bool { + bool::from(self.ct_eq(other)) + } +} + +impl ConstantTimeEq for Fq2 { + fn ct_eq(&self, other: &Self) -> Choice { + self.c0.ct_eq(&other.c0) & self.c1.ct_eq(&other.c1) + } +} + +impl Default for Fq2 { + #[inline] + fn default() -> Self { + Self::zero() + } +} + +impl From for [u8; 96] { + fn from(value: Fq2) -> [u8; 96] { + value.to_bytes() + } +} + +impl<'a> From<&'a Fq2> for [u8; 96] { + fn from(value: &'a Fq2) -> [u8; 96] { + value.to_bytes() + } +} + +impl Neg for Fq2 { + type Output = Fq2; + + #[inline] + fn neg(self) -> Fq2 { + -&self + } +} + +impl<'a> Neg for &'a Fq2 { + type Output = Fq2; + + #[inline] + fn neg(self) -> Fq2 { + self.neg() + } +} + +impl<'a, 'b> Sub<&'b Fq2> for &'a Fq2 { + type Output = Fq2; + + #[inline] + fn sub(self, rhs: &'b Fq2) -> Fq2 { + self.sub(rhs) + } +} + +impl<'a, 'b> Add<&'b Fq2> for &'a Fq2 { + type Output = Fq2; + + #[inline] + fn add(self, rhs: &'b Fq2) -> Fq2 { + self.add(rhs) + } +} + +impl<'a, 'b> Mul<&'b Fq2> for &'a Fq2 { + type Output = Fq2; + + #[inline] + fn mul(self, rhs: &'b Fq2) -> Fq2 { + self.mul(rhs) + } +} + +use crate::{ + impl_add_binop_specify_output, impl_binops_additive, impl_binops_additive_specify_output, + impl_binops_multiplicative, impl_binops_multiplicative_mixed, impl_sub_binop_specify_output, +}; +impl_binops_additive!(Fq2, Fq2); +impl_binops_multiplicative!(Fq2, Fq2); + +impl Fq2 { + pub const fn new(c0: Fq, c1: Fq) -> Self { + Fq2 { c0, c1 } + } + + pub const fn size() -> usize { + 96 + } + /// Attempts to convert a little-endian byte representation of + /// a scalar into a `Fq`, failing if the input is not canonical. + pub fn from_bytes(bytes: &[u8; 96]) -> CtOption { + let c0 = Fq::from_bytes(bytes[0..48].try_into().unwrap()); + let c1 = Fq::from_bytes(bytes[48..96].try_into().unwrap()); + CtOption::new( + Fq2 { + c0: c0.unwrap(), + c1: c1.unwrap(), + }, + c0.is_some() & c1.is_some(), + ) + } + + /// Converts an element of `Fq2` into a byte representation in + /// little-endian byte order. + pub fn to_bytes(&self) -> [u8; 96] { + let mut res = [0u8; 96]; + let c0_bytes = self.c0.to_bytes(); + let c1_bytes = self.c1.to_bytes(); + res[0..48].copy_from_slice(&c0_bytes[..]); + res[48..96].copy_from_slice(&c1_bytes[..]); + res + } + + pub fn legendre(&self) -> LegendreSymbol { + self.norm().legendre() + } + + pub fn mul_assign(&mut self, other: &Self) { + let mut t1 = self.c0 * other.c0; + let mut t0 = self.c0 + self.c1; + let t2 = self.c1 * other.c1; + self.c1 = other.c0 + other.c1; + self.c0 = t1 - t2; + t1 += t2; + t0 *= self.c1; + self.c1 = t0 - t1; + } + + pub fn square_assign(&mut self) { + let ab = self.c0 * self.c1; + let c0c1 = self.c0 + self.c1; + let mut c0 = -self.c1; + c0 += self.c0; + c0 *= c0c1; + c0 -= ab; + self.c1 = ab.double(); + self.c0 = c0 + ab; + } + + pub fn double(&self) -> Self { + Self { + c0: self.c0.double(), + c1: self.c1.double(), + } + } + + pub fn double_assign(&mut self) { + self.c0 = self.c0.double(); + self.c1 = self.c1.double(); + } + + pub fn add(&self, other: &Self) -> Self { + Self { + c0: self.c0.add(&other.c0), + c1: self.c1.add(&other.c1), + } + } + + pub fn sub(&self, other: &Self) -> Self { + Self { + c0: self.c0.sub(&other.c0), + c1: self.c1.sub(&other.c1), + } + } + + pub fn mul(&self, other: &Self) -> Self { + let mut t = *other; + t.mul_assign(self); + t + } + + pub fn square(&self) -> Self { + let mut t = *self; + t.square_assign(); + t + } + + pub fn neg(&self) -> Self { + Self { + c0: self.c0.neg(), + c1: self.c1.neg(), + } + } + + /// Alternative implementation of frobenius_map(), keeping here for reference + /// Raises this element to p. + // #[inline(always)] + // pub fn frobenius_map_conjugate(&mut self) { + // This is always just a conjugation. If you're curious why, here's + // an article about it: https://alicebob.cryptoland.net/the-frobenius-endomorphism-with-finite-fields/ + // self.conjugate() + // self.c1 = -self.c1; + // } + + pub fn frobenius_map(&mut self, power: usize) { + self.c1 = self.c1.mul(&FROBENIUS_COEFF_FQ2_C1[power % 2]); + } + + #[inline(always)] + pub fn mul_by_nonresidue(&mut self) { + // Multiply a + bu by u + 1, getting + // au + a + bu^2 + bu + // and because u^2 = -1, we get + // (a - b) + (a + b)u + let t0 = self.c0; + self.c0 -= self.c1; + self.c1 += t0; + } + + #[inline(always)] + pub fn mul_by_nonresidue_bls12_381(&self) -> Fq2 { + // Multiply a + bu by u + 1, getting + // au + a + bu^2 + bu + // and because u^2 = -1, we get + // (a - b) + (a + b)u + + Fq2 { + c0: self.c0 - self.c1, + c1: self.c0 + self.c1, + } + } + + // Multiply this element by ξ where ξ=i+9 + pub fn mul_by_xi(&mut self) { + // (xi+y)(i+9) = (9x+y)i+(9y-x) + let t0 = self.c0; + let t1 = self.c1; + + // 8*x*i + 8*y + self.double_assign(); + self.double_assign(); + self.double_assign(); + + // 9*y + self.c0 += &t0; + // (9*y - x) + self.c0 -= &t1; + + // (9*x)i + self.c1 += &t1; + // (9*x + y) + self.c1 += &t0; + } + + /// Norm of Fq2 as extension field in i over Fq + pub fn norm(&self) -> Fq { + let mut t0 = self.c0; + let mut t1 = self.c1; + t0 = t0.square(); + t1 = t1.square(); + t1 + t0 + } + + pub fn invert(&self) -> CtOption { + let mut t1 = self.c1; + t1 = t1.square(); + let mut t0 = self.c0; + t0 = t0.square(); + t0 += &t1; + t0.invert().map(|t| { + let mut tmp = Fq2 { + c0: self.c0, + c1: self.c1, + }; + tmp.c0 *= &t; + tmp.c1 *= &t; + tmp.c1 = -tmp.c1; + + tmp + }) + } + + /// Returns whether or not this element is strictly lexicographically + /// larger than its negation. + #[inline] + pub fn lexicographically_largest(&self) -> Choice { + // If this element's c1 coefficient is lexicographically largest + // then it is lexicographically largest. Otherwise, in the event + // the c1 coefficient is zero and the c0 coefficient is + // lexicographically largest, then this element is lexicographically + // largest. + + self.c1.lexicographically_largest() + | (self.c1.is_zero() & self.c0.lexicographically_largest()) + } +} + +impl Field for Fq2 { + fn random(mut rng: impl RngCore) -> Self { + Fq2 { + c0: Fq::random(&mut rng), + c1: Fq::random(&mut rng), + } + } + + fn zero() -> Self { + Fq2 { + c0: Fq::zero(), + c1: Fq::zero(), + } + } + + fn one() -> Self { + Fq2 { + c0: Fq::one(), + c1: Fq::zero(), + } + } + + fn is_zero(&self) -> Choice { + self.c0.is_zero() & self.c1.is_zero() + } + + fn square(&self) -> Self { + self.square() + } + + fn double(&self) -> Self { + self.double() + } + + fn sqrt(&self) -> CtOption { + // Algorithm 9, https://eprint.iacr.org/2012/685.pdf + // with constant time modifications. + + CtOption::new(Fq2::zero(), self.is_zero()).or_else(|| { + // a1 = self^((p - 3) / 4) + let a1 = self.pow_vartime([ + 0xee7f_bfff_ffff_eaaa, + 0x07aa_ffff_ac54_ffff, + 0xd9cc_34a8_3dac_3d89, + 0xd91d_d2e1_3ce1_44af, + 0x92c6_e9ed_90d2_eb35, + 0x0680_447a_8e5f_f9a6, + ]); + + // alpha = a1^2 * self = self^((p - 3) / 2 + 1) = self^((p - 1) / 2) + let alpha = a1.square() * self; + + // x0 = self^((p + 1) / 4) + let x0 = a1 * self; + + // In the event that alpha = -1, the element is order p - 1 and so + // we're just trying to get the square of an element of the subfield + // Fp. This is given by x0 * u, since u = sqrt(-1). Since the element + // x0 = a + bu has b = 0, the solution is therefore au. + CtOption::new( + Fq2 { + c0: -x0.c1, + c1: x0.c0, + }, + alpha.ct_eq(&(Fq2::one()).neg()), + ) + // Otherwise, the correct solution is (1 + alpha)^((q - 1) // 2) * x0 + .or_else(|| { + CtOption::new( + (alpha + Fq2::one()).pow_vartime([ + 0xdcff_7fff_ffff_d555, + 0x0f55_ffff_58a9_ffff, + 0xb398_6950_7b58_7b12, + 0xb23b_a5c2_79c2_895f, + 0x258d_d3db_21a5_d66b, + 0x0d00_88f5_1cbf_f34d, + ]) * x0, + Choice::from(1), + ) + }) + // Only return the result if it's really the square root (and so + // self is actually quadratic nonresidue) + .and_then(|sqrt| CtOption::new(sqrt, sqrt.square().ct_eq(self))) + }) + } + + fn invert(&self) -> CtOption { + self.invert() + } +} + +impl From for Fq2 { + fn from(bit: bool) -> Fq2 { + if bit { + Fq2::one() + } else { + Fq2::zero() + } + } +} + +impl From for Fq2 { + fn from(val: u64) -> Self { + Fq2 { + c0: Fq::from(val), + c1: Fq::zero(), + } + } +} + +impl FieldExt for Fq2 { + const MODULUS: &'static str = + "0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaab"; + + const ROOT_OF_UNITY_INV: Self = Fq2 { + c0: Fq::zero(), + c1: Fq::zero(), + }; + const DELTA: Self = Fq2 { + c0: Fq::zero(), + c1: Fq::zero(), + }; + const TWO_INV: Fq2 = Fq2 { + c0: Fq::zero(), + c1: Fq::zero(), + }; + + // Unused constant + const ZETA: Self = Fq2 { + c0: Fq::zero(), + c1: Fq::zero(), + }; + + /// Converts a 512-bit little endian integer into + /// a `Fq` by reducing by the modulus. + fn from_bytes_wide(bytes: &[u8; 64]) -> Self { + Self::new(Fq::from_bytes_wide(bytes), Fq::zero()) + } + + fn from_u128(v: u128) -> Self { + Fq2 { + c0: Fq::from_raw_unchecked([v as u64, (v >> 64) as u64, 0, 0, 0, 0]), + c1: Fq::zero(), + } + } + + fn get_lower_128(&self) -> u128 { + self.c0.get_lower_128() + } + + // /// Writes this element in its normalized, little endian form into a buffer. + // fn write(&self, writer: &mut W) -> io::Result<()> { + // let compressed = self.to_bytes(); + // writer.write_all(&compressed[..]) + // } + + // /// Reads a normalized, little endian represented field element from a + // /// buffer. + // fn read(reader: &mut R) -> io::Result { + // let mut compressed = [0u8; 96]; + // reader.read_exact(&mut compressed[..])?; + // Option::from(Self::from_bytes(&compressed)) + // .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "invalid point encoding in proof")) + // } +} + +impl SqrtRatio for Fq2 { + const T_MINUS1_OVER2: [u64; 4] = unimplemented!(); + + fn pow_by_t_minus1_over2(&self) -> Self { + unimplemented!(); + } + + fn get_lower_32(&self) -> u32 { + unimplemented!(); + } + + #[cfg(feature = "sqrt-table")] + fn sqrt_ratio(num: &Self, div: &Self) -> (Choice, Self) { + unimplemented!(); + } + + #[cfg(feature = "sqrt-table")] + fn sqrt_alt(&self) -> (Choice, Self) { + unimplemented!(); + } +} + +impl Group for Fq2 { + type Scalar = Fq2; + + fn group_zero() -> Self { + Self::zero() + } + fn group_add(&mut self, rhs: &Self) { + *self += *rhs; + } + fn group_sub(&mut self, rhs: &Self) { + *self -= *rhs; + } + fn group_scale(&mut self, by: &Self::Scalar) { + *self *= *by; + } +} + +#[derive(Clone, Copy, Debug)] +pub struct Fq2Bytes { + pub slice: [u8; 96], +} + +impl Default for Fq2Bytes { + fn default() -> Self { + Self { slice: [0u8; 96] } + } +} + +impl AsMut<[u8]> for Fq2Bytes { + fn as_mut(&mut self) -> &mut [u8] { + &mut self.slice + } +} + +impl AsRef<[u8]> for Fq2Bytes { + fn as_ref(&self) -> &[u8] { + &self.slice + } +} + +impl ff::PrimeField for Fq2 { + type Repr = Fq2Bytes; + + const NUM_BITS: u32 = MODULUS_BITS; + const CAPACITY: u32 = MODULUS_BITS - 1; + + const S: u32 = 0; + + fn from_repr(repr: Self::Repr) -> CtOption { + let c0 = Fq::from_bytes(&repr.slice[..48].try_into().unwrap()); + let c1 = Fq::from_bytes(&repr.slice[48..].try_into().unwrap()); + // Disallow overflow representation + CtOption::new(Fq2::new(c0.unwrap(), c1.unwrap()), Choice::from(1)) + } + + fn to_repr(&self) -> Self::Repr { + Fq2Bytes { + slice: self.to_bytes(), + } + } + + fn is_odd(&self) -> Choice { + Choice::from(self.to_repr().as_ref()[0] & 1) + } + + fn multiplicative_generator() -> Self { + unimplemented!() + } + + fn root_of_unity() -> Self { + unimplemented!() + } +} + +impl crate::serde::SerdeObject for Fq2 { + fn from_raw_bytes_unchecked(bytes: &[u8]) -> Self { + debug_assert_eq!(bytes.len(), 96); + let [c0, c1] = [0, 48].map(|i| Fq::from_raw_bytes_unchecked(&bytes[i..i + 48])); + Self { c0, c1 } + } + fn from_raw_bytes(bytes: &[u8]) -> Option { + if bytes.len() != 96 { + return None; + } + let [c0, c1] = [0, 48].map(|i| Fq::from_raw_bytes(&bytes[i..i + 48])); + c0.zip(c1).map(|(c0, c1)| Self { c0, c1 }) + } + fn to_raw_bytes(&self) -> Vec { + let mut res = Vec::with_capacity(96); + for limb in self.c0.0.iter().chain(self.c1.0.iter()) { + res.extend_from_slice(&limb.to_le_bytes()); + } + res + } + fn read_raw_unchecked(reader: &mut R) -> Self { + let [c0, c1] = [(); 2].map(|_| Fq::read_raw_unchecked(reader)); + Self { c0, c1 } + } + fn read_raw(reader: &mut R) -> std::io::Result { + let c0 = Fq::read_raw(reader)?; + let c1 = Fq::read_raw(reader)?; + Ok(Self { c0, c1 }) + } + fn write_raw(&self, writer: &mut W) -> std::io::Result<()> { + self.c0.write_raw(writer)?; + self.c1.write_raw(writer) + } +} + +pub const FROBENIUS_COEFF_FQ2_C1: [Fq; 2] = [ + // Fq(-1)**(((q^0) - 1) / 2) + // it's 1 in Montgommery form + Fq([ + 0x760900000002fffd, + 0xebf4000bc40c0002, + 0x5f48985753c758ba, + 0x77ce585370525745, + 0x5c071a97a256ec6d, + 0x15f65ec3fa80e493, + ]), + // Fq(-1)**(((q^1) - 1) / 2) + Fq([ + 0x43f5fffffffcaaae, + 0x32b7fff2ed47fffd, + 0x7e83a49a2e99d69, + 0xeca8f3318332bb7a, + 0xef148d1ea0f4c069, + 0x40ab3263eff0206, + ]), +]; + +#[cfg(test)] +mod test { + use super::*; + use rand::SeedableRng; + use rand_core::OsRng; + use rand_xorshift::XorShiftRng; + + #[test] + #[ignore] + fn test_ser() { + let a0 = Fq2::random(OsRng); + let a_bytes = a0.to_bytes(); + let a1 = Fq2::from_bytes(&a_bytes).unwrap(); + assert_eq!(a0, a1); + } + + #[test] + fn test_fq2_ordering() { + let mut a = Fq2 { + c0: Fq::zero(), + c1: Fq::zero(), + }; + + let mut b = a; + + assert!(a.cmp(&b) == Ordering::Equal); + b.c0 += &Fq::one(); + assert!(a.cmp(&b) == Ordering::Less); + a.c0 += &Fq::one(); + assert!(a.cmp(&b) == Ordering::Equal); + b.c1 += &Fq::one(); + assert!(a.cmp(&b) == Ordering::Less); + a.c0 += &Fq::one(); + assert!(a.cmp(&b) == Ordering::Less); + a.c1 += &Fq::one(); + assert!(a.cmp(&b) == Ordering::Greater); + b.c0 += &Fq::one(); + assert!(a.cmp(&b) == Ordering::Equal); + } + + #[test] + fn test_fq2_basics() { + assert_eq!( + Fq2 { + c0: Fq::zero(), + c1: Fq::zero(), + }, + Fq2::zero() + ); + assert_eq!( + Fq2 { + c0: Fq::one(), + c1: Fq::zero(), + }, + Fq2::one() + ); + assert_eq!(Fq2::zero().is_zero().unwrap_u8(), 1); + assert_eq!(Fq2::one().is_zero().unwrap_u8(), 0); + assert_eq!( + Fq2 { + c0: Fq::zero(), + c1: Fq::one(), + } + .is_zero() + .unwrap_u8(), + 0 + ); + } + + #[test] + fn test_fq2_squaring() { + let mut a = Fq2 { + c0: Fq::one(), + c1: Fq::one(), + }; // u + 1 + a.square_assign(); + assert_eq!( + a, + Fq2 { + c0: Fq::zero(), + c1: Fq::one() + Fq::one(), + } + ); // 2u + + let mut a = Fq2 { + c0: Fq::zero(), + c1: Fq::one(), + }; // u + a.square_assign(); + assert_eq!(a, { + let neg1 = -Fq::one(); + Fq2 { + c0: neg1, + c1: Fq::zero(), + } + }); // -1 + } + + #[test] + fn test_fq2_mul_nonresidue() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, + 0xbc, 0xe5, + ]); + + let nqr = Fq2 { + c0: Fq::one(), + c1: Fq::one(), + }; + + for _ in 0..1000 { + let mut a = Fq2::random(&mut rng); + let mut b = a; + a.mul_by_nonresidue(); + b.mul_assign(&nqr); + + assert_eq!(a, b); + } + } + + #[test] + fn test_fq2_legendre() { + assert_eq!(LegendreSymbol::Zero, Fq2::zero().legendre()); + // i^2 = -1 + let mut m1 = Fq2::one(); + m1 = m1.neg(); + assert_eq!(LegendreSymbol::QuadraticResidue, m1.legendre()); + m1.mul_by_nonresidue(); + assert_eq!(LegendreSymbol::QuadraticNonResidue, m1.legendre()); + } + + #[test] + pub fn test_sqrt() { + // a = 1488924004771393321054797166853618474668089414631333405711627789629391903630694737978065425271543178763948256226639*u + 784063022264861764559335808165825052288770346101304131934508881646553551234697082295473567906267937225174620141295 + let a = Fq2 { + c0: Fq::from_raw_unchecked([ + 0x2bee_d146_27d7_f9e9, + 0xb661_4e06_660e_5dce, + 0x06c4_cc7c_2f91_d42c, + 0x996d_7847_4b7a_63cc, + 0xebae_bc4c_820d_574e, + 0x1886_5e12_d93f_d845, + ]), + c1: Fq::from_raw_unchecked([ + 0x7d82_8664_baf4_f566, + 0xd17e_6639_96ec_7339, + 0x679e_ad55_cb40_78d0, + 0xfe3b_2260_e001_ec28, + 0x3059_93d0_43d9_1b68, + 0x0626_f03c_0489_b72d, + ]), + }; + + assert_eq!(a.sqrt().unwrap().square(), a); + + // b = 5, which is a generator of the p - 1 order + // multiplicative subgroup + let b = Fq2 { + c0: Fq::from_raw_unchecked([ + 0x6631_0000_0010_5545, + 0x2114_0040_0eec_000d, + 0x3fa7_af30_c820_e316, + 0xc52a_8b8d_6387_695d, + 0x9fb4_e61d_1e83_eac5, + 0x005c_b922_afe8_4dc7, + ]), + c1: Fq::zero(), + }; + + assert_eq!(b.sqrt().unwrap().square(), b); + + // c = 25, which is a generator of the (p - 1) / 2 order + // multiplicative subgroup + let c = Fq2 { + c0: Fq::from_raw_unchecked([ + 0x44f6_0000_0051_ffae, + 0x86b8_0141_9948_0043, + 0xd715_9952_f1f3_794a, + 0x755d_6e3d_fe1f_fc12, + 0xd36c_d6db_5547_e905, + 0x02f8_c8ec_bf18_67bb, + ]), + c1: Fq::zero(), + }; + + assert_eq!(c.sqrt().unwrap().square(), c); + + // 2155129644831861015726826462986972654175647013268275306775721078997042729172900466542651176384766902407257452753362*u + 2796889544896299244102912275102369318775038861758288697415827248356648685135290329705805931514906495247464901062529 + // is nonsquare. + assert!(bool::from( + Fq2 { + c0: Fq::from_raw_unchecked([ + 0xc5fa_1bc8_fd00_d7f6, + 0x3830_ca45_4606_003b, + 0x2b28_7f11_04b1_02da, + 0xa7fb_30f2_8230_f23e, + 0x339c_db9e_e953_dbf0, + 0x0d78_ec51_d989_fc57, + ]), + c1: Fq::from_raw_unchecked([ + 0x27ec_4898_cf87_f613, + 0x9de1_394e_1abb_05a5, + 0x0947_f85d_c170_fc14, + 0x586f_bc69_6b61_14b7, + 0x2b34_75a4_077d_7169, + 0x13e1_c895_cc4b_6c22, + ]) + } + .sqrt() + .is_none() + )); + } + + #[test] + fn test_frobenius_map() { + let fq2_basic = Fq2 { + c0: Fq::from_raw_unchecked([ + 0x2d0078036923ffc7, + 0x11e59ea221a3b6d2, + 0x8b1a52e0a90f59ed, + 0xb966ce3bc2108b13, + 0xccc649c4b9532bf3, + 0xf8d295b2ded9dc, + ]), + c1: Fq::from_raw_unchecked([ + 0x977df6efcdaee0db, + 0x946ae52d684fa7ed, + 0xbe203411c66fb3a5, + 0xb3f8afc0ee248cad, + 0x4e464dea5bcfd41e, + 0x12d1137b8a6a837, + ]), + }; + + let mut fq2_test = fq2_basic; + fq2_test.frobenius_map(0); + assert_eq!(fq2_test, fq2_basic); + + let mut fq2_test_2 = fq2_basic; + fq2_test_2.frobenius_map(1); + assert_eq!( + fq2_test_2, + Fq2 { + c0: Fq::from_raw_unchecked([ + 0x2d0078036923ffc7, + 0x11e59ea221a3b6d2, + 0x8b1a52e0a90f59ed, + 0xb966ce3bc2108b13, + 0xccc649c4b9532bf3, + 0xf8d295b2ded9dc + ]), + c1: Fq::from_raw_unchecked([ + 0x228109103250c9d0, + 0x8a411ad149045812, + 0xa9109e8f3041427e, + 0xb07e9bc405608611, + 0xfcd559cbe77bd8b8, + 0x18d400b280d93e62 + ]), + } + ); + + let mut fq2_test_3 = fq2_basic; + fq2_test_3.frobenius_map(1); + fq2_test_3.frobenius_map(1); + assert_eq!(fq2_test_3, fq2_basic); + + let mut fq2_test_4 = fq2_basic; + fq2_test_4.frobenius_map(2); + assert_eq!(fq2_test_4, fq2_basic); + } + + // #[test] + // fn test_frobenius_map_conjugate() { + // let fq2_basic = Fq2 { + // c0: Fq::from_raw_unchecked([ + // 0x2d0078036923ffc7, + // 0x11e59ea221a3b6d2, + // 0x8b1a52e0a90f59ed, + // 0xb966ce3bc2108b13, + // 0xccc649c4b9532bf3, + // 0xf8d295b2ded9dc, + // ]), + // c1: Fq::from_raw_unchecked([ + // 0x977df6efcdaee0db, + // 0x946ae52d684fa7ed, + // 0xbe203411c66fb3a5, + // 0xb3f8afc0ee248cad, + // 0x4e464dea5bcfd41e, + // 0x12d1137b8a6a837, + // ]), + // }; + + // let mut fq2_test_2 = fq2_basic; + // fq2_test_2.frobenius_map_conjugate(); + // assert_eq!( + // fq2_test_2, + // Fq2 { + // c0: Fq::from_raw_unchecked([ + // 0x2d0078036923ffc7, + // 0x11e59ea221a3b6d2, + // 0x8b1a52e0a90f59ed, + // 0xb966ce3bc2108b13, + // 0xccc649c4b9532bf3, + // 0xf8d295b2ded9dc + // ]), + // c1: Fq::from_raw_unchecked([ + // 0x228109103250c9d0, + // 0x8a411ad149045812, + // 0xa9109e8f3041427e, + // 0xb07e9bc405608611, + // 0xfcd559cbe77bd8b8, + // 0x18d400b280d93e62 + // ]), + // } + // ); + + // let mut fq2_test_3 = fq2_basic; + // fq2_test_3.frobenius_map_conjugate(); + // fq2_test_3.frobenius_map_conjugate(); + // assert_eq!(fq2_test_3, fq2_basic); + // } + + #[test] + fn test_field() { + crate::tests::field::random_field_tests::("fq2".to_string()); + } + + #[test] + fn test_serialization() { + crate::tests::field::random_serialization_test::("fq2".to_string()); + } +} diff --git a/src/bls12_381/fq6.rs b/src/bls12_381/fq6.rs new file mode 100644 index 00000000..682b8a09 --- /dev/null +++ b/src/bls12_381/fq6.rs @@ -0,0 +1,877 @@ +use super::fq::Fq; +use super::fq2::Fq2; +use core::ops::{Add, Mul, Neg, Sub}; +use ff::Field; +use rand::RngCore; +use serde::{Deserialize, Serialize}; +use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption}; + +#[derive(Copy, Clone, Debug, Eq, PartialEq, Default, Serialize, Deserialize)] +pub struct Fq6 { + pub c0: Fq2, + pub c1: Fq2, + pub c2: Fq2, +} + +impl ConditionallySelectable for Fq6 { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + Fq6 { + c0: Fq2::conditional_select(&a.c0, &b.c0, choice), + c1: Fq2::conditional_select(&a.c1, &b.c1, choice), + c2: Fq2::conditional_select(&a.c2, &b.c2, choice), + } + } +} + +impl ConstantTimeEq for Fq6 { + fn ct_eq(&self, other: &Self) -> Choice { + self.c0.ct_eq(&other.c0) & self.c1.ct_eq(&other.c1) & self.c2.ct_eq(&other.c2) + } +} + +impl Neg for Fq6 { + type Output = Fq6; + + #[inline] + fn neg(self) -> Fq6 { + -&self + } +} + +impl<'a> Neg for &'a Fq6 { + type Output = Fq6; + + #[inline] + fn neg(self) -> Fq6 { + self.neg() + } +} + +impl<'a, 'b> Sub<&'b Fq6> for &'a Fq6 { + type Output = Fq6; + + #[inline] + fn sub(self, rhs: &'b Fq6) -> Fq6 { + self.sub(rhs) + } +} + +impl<'a, 'b> Add<&'b Fq6> for &'a Fq6 { + type Output = Fq6; + + #[inline] + fn add(self, rhs: &'b Fq6) -> Fq6 { + self.add(rhs) + } +} + +impl<'a, 'b> Mul<&'b Fq6> for &'a Fq6 { + type Output = Fq6; + + #[inline] + fn mul(self, rhs: &'b Fq6) -> Fq6 { + self.mul(rhs) + } +} + +impl From for Fq6 { + fn from(f: Fq2) -> Fq6 { + Fq6 { + c0: f, + c1: Fq2::zero(), + c2: Fq2::zero(), + } + } +} + +use crate::{ + impl_add_binop_specify_output, impl_binops_additive, impl_binops_additive_specify_output, + impl_binops_multiplicative, impl_binops_multiplicative_mixed, impl_sub_binop_specify_output, +}; +impl_binops_additive!(Fq6, Fq6); +impl_binops_multiplicative!(Fq6, Fq6); + +impl Fq6 { + pub fn mul_assign(&mut self, other: &Self) { + let mut a_a = self.c0; + let mut b_b = self.c1; + let mut c_c = self.c2; + a_a *= &other.c0; + b_b *= &other.c1; + c_c *= &other.c2; + + let mut t1 = other.c1; + t1 += &other.c2; + { + let mut tmp = self.c1; + tmp += &self.c2; + + t1 *= &tmp; + t1 -= &b_b; + t1 -= &c_c; + t1.mul_by_nonresidue(); + t1 += &a_a; + } + + let mut t3 = other.c0; + t3 += &other.c2; + { + let mut tmp = self.c0; + tmp += &self.c2; + + t3 *= &tmp; + t3 -= &a_a; + t3 += &b_b; + t3 -= &c_c; + } + + let mut t2 = other.c0; + t2 += &other.c1; + { + let mut tmp = self.c0; + tmp += &self.c1; + + t2 *= &tmp; + t2 -= &a_a; + t2 -= &b_b; + c_c.mul_by_nonresidue(); + t2 += &c_c; + } + + self.c0 = t1; + self.c1 = t2; + self.c2 = t3; + } + + pub fn square_assign(&mut self) { + // s0 = a^2 + let mut s0 = self.c0; + s0.square_assign(); + // s1 = 2ab + let mut ab = self.c0; + ab *= &self.c1; + let mut s1 = ab; + s1.double_assign(); + // s2 = (a - b + c)^2 + let mut s2 = self.c0; + s2 -= &self.c1; + s2 += &self.c2; + s2.square_assign(); + // bc + let mut bc = self.c1; + bc *= &self.c2; + // s3 = 2bc + let mut s3 = bc; + s3.double_assign(); + // s4 = c^2 + let mut s4 = self.c2; + s4.square_assign(); + + // new c0 = 2bc.mul_by_xi + a^2 + self.c0 = s3; + self.c0.mul_by_nonresidue(); + // self.c0.mul_by_xi(); + self.c0 += &s0; + + // new c1 = (c^2).mul_by_xi + 2ab + self.c1 = s4; + self.c1.mul_by_nonresidue(); + // self.c1.mul_by_xi(); + self.c1 += &s1; + + // new c2 = 2ab + (a - b + c)^2 + 2bc - a^2 - c^2 = b^2 + 2ac + self.c2 = s1; + self.c2 += &s2; + self.c2 += &s3; + self.c2 -= &s0; + self.c2 -= &s4; + } + + pub fn double(&self) -> Self { + Self { + c0: self.c0.double(), + c1: self.c1.double(), + c2: self.c2.double(), + } + } + + pub fn double_assign(&mut self) { + self.c0 = self.c0.double(); + self.c1 = self.c1.double(); + self.c2 = self.c2.double(); + } + + pub fn add(&self, other: &Self) -> Self { + Self { + c0: self.c0 + other.c0, + c1: self.c1 + other.c1, + c2: self.c2 + other.c2, + } + } + + pub fn sub(&self, other: &Self) -> Self { + Self { + c0: self.c0 - other.c0, + c1: self.c1 - other.c1, + c2: self.c2 - other.c2, + } + } + + pub fn mul(&self, other: &Self) -> Self { + let mut t = *other; + t.mul_assign(self); + t + } + + pub fn square(&self) -> Self { + let mut t = *self; + t.square_assign(); + t + } + + pub fn neg(&self) -> Self { + Self { + c0: -self.c0, + c1: -self.c1, + c2: -self.c2, + } + } + + pub fn frobenius_map(&mut self, power: usize) { + self.c0.frobenius_map(power); + self.c1.frobenius_map(power); + self.c2.frobenius_map(power); + + self.c1.mul_assign(&FROBENIUS_COEFF_FQ6_C1[power % 6]); + self.c2.mul_assign(&FROBENIUS_COEFF_FQ6_C2[power % 6]); + } + + /// Alternative implementation of frobenius_map(), keeping here for reference + /// Raises this element to p. + // #[inline(always)] + // pub fn frobenius_map_conjugate(&mut self) { + // self.c0.frobenius_map_conjugate(); + // self.c1.frobenius_map_conjugate(); + // self.c2.frobenius_map_conjugate(); + + // // c1 = c1 * (u + 1)^((p - 1) / 3) + // self.c1 *= Fq2 { + // c0: Fq::zero(), + // c1: Fq::from_raw_unchecked([ + // 0xcd03_c9e4_8671_f071, + // 0x5dab_2246_1fcd_a5d2, + // 0x5870_42af_d385_1b95, + // 0x8eb6_0ebe_01ba_cb9e, + // 0x03f9_7d6e_83d0_50d2, + // 0x18f0_2065_5463_8741, + // ]), + // }; + + // // c2 = c2 * (u + 1)^((2p - 2) / 3) + // self.c2 *= Fq2 { + // c0: Fq::from_raw_unchecked([ + // 0x890d_c9e4_8675_45c3, + // 0x2af3_2253_3285_a5d5, + // 0x5088_0866_309b_7e2c, + // 0xa20d_1b8c_7e88_1024, + // 0x14e4_f04f_e2db_9068, + // 0x14e5_6d3f_1564_853a, + // ]), + // c1: Fq::zero(), + // }; + // } + + /// Multiply by cubic nonresidue v. + pub fn mul_by_nonresidue(&mut self) { + use std::mem::swap; + swap(&mut self.c0, &mut self.c1); + swap(&mut self.c0, &mut self.c2); + // c0, c1, c2 -> c2, c0, c1 + self.c0.mul_by_nonresidue(); + } + + /// Multiply by quadratic nonresidue v. + pub fn mul_by_nonresidue_bls12_381(&self) -> Self { + // Given a + bv + cv^2, this produces + // av + bv^2 + cv^3 + // but because v^3 = u + 1, we have + // c(u + 1) + av + v^2 + + Fq6 { + c0: self.c2.mul_by_nonresidue_bls12_381(), + c1: self.c0, + c2: self.c1, + } + } + + /// Multiply by cubic nonresidue v. + pub fn mul_by_v(&mut self) { + use std::mem::swap; + swap(&mut self.c0, &mut self.c1); + swap(&mut self.c0, &mut self.c2); + + self.c0.mul_by_xi(); + } + + pub fn mul_by_1(&mut self, c1: &Fq2) { + let mut b_b = self.c1; + b_b *= c1; + + let mut t1 = *c1; + { + let mut tmp = self.c1; + tmp += &self.c2; + + t1 *= &tmp; + t1 -= &b_b; + t1.mul_by_nonresidue(); + } + + let mut t2 = *c1; + { + let mut tmp = self.c0; + tmp += &self.c1; + + t2 *= &tmp; + t2 -= &b_b; + } + + self.c0 = t1; + self.c1 = t2; + self.c2 = b_b; + } + + pub fn mul_by_1_bls12_381(&self, c1: &Fq2) -> Fq6 { + Fq6 { + c0: (self.c2 * c1).mul_by_nonresidue_bls12_381(), + c1: self.c0 * c1, + c2: self.c1 * c1, + } + } + + pub fn mul_by_01(&mut self, c0: &Fq2, c1: &Fq2) { + let mut a_a = self.c0; + let mut b_b = self.c1; + a_a *= c0; + b_b *= c1; + + let mut t1 = *c1; + { + let mut tmp = self.c1; + tmp += &self.c2; + + t1 *= &tmp; + t1 -= &b_b; + t1.mul_by_nonresidue(); + t1 += &a_a; + } + + let mut t3 = *c0; + { + let mut tmp = self.c0; + tmp += &self.c2; + + t3 *= &tmp; + t3 -= &a_a; + t3 += &b_b; + } + + let mut t2 = *c0; + t2 += c1; + { + let mut tmp = self.c0; + tmp += &self.c1; + + t2 *= &tmp; + t2 -= &a_a; + t2 -= &b_b; + } + + self.c0 = t1; + self.c1 = t2; + self.c2 = t3; + } + + pub fn mul_by_01_bls12_381(&self, c0: &Fq2, c1: &Fq2) -> Fq6 { + let a_a = self.c0 * c0; + let b_b = self.c1 * c1; + + let t1 = (self.c2 * c1).mul_by_nonresidue_bls12_381() + a_a; + + let t2 = (c0 + c1) * (self.c0 + self.c1) - a_a - b_b; + + let t3 = self.c2 * c0 + b_b; + + Fq6 { + c0: t1, + c1: t2, + c2: t3, + } + } + + fn invert(&self) -> CtOption { + let mut c0 = self.c2; + c0.mul_by_nonresidue(); + c0 *= &self.c1; + c0 = -c0; + { + let mut c0s = self.c0; + c0s.square_assign(); + c0 += &c0s; + } + let mut c1 = self.c2; + c1.square_assign(); + c1.mul_by_nonresidue(); + { + let mut c01 = self.c0; + c01 *= &self.c1; + c1 -= &c01; + } + let mut c2 = self.c1; + c2.square_assign(); + { + let mut c02 = self.c0; + c02 *= &self.c2; + c2 -= &c02; + } + + let mut tmp1 = self.c2; + tmp1 *= &c1; + let mut tmp2 = self.c1; + tmp2 *= &c2; + tmp1 += &tmp2; + tmp1.mul_by_nonresidue(); + tmp2 = self.c0; + tmp2 *= &c0; + tmp1 += &tmp2; + + tmp1.invert().map(|t| { + let mut tmp = Fq6 { + c0: t, + c1: t, + c2: t, + }; + tmp.c0 *= &c0; + tmp.c1 *= &c1; + tmp.c2 *= &c2; + + tmp + }) + } +} + +impl Field for Fq6 { + fn random(mut rng: impl RngCore) -> Self { + Fq6 { + c0: Fq2::random(&mut rng), + c1: Fq2::random(&mut rng), + c2: Fq2::random(&mut rng), + } + } + + fn zero() -> Self { + Fq6 { + c0: Fq2::zero(), + c1: Fq2::zero(), + c2: Fq2::zero(), + } + } + + fn one() -> Self { + Fq6 { + c0: Fq2::one(), + c1: Fq2::zero(), + c2: Fq2::zero(), + } + } + + fn is_zero(&self) -> Choice { + self.c0.is_zero() & self.c1.is_zero() + } + + fn square(&self) -> Self { + self.square() + } + + fn double(&self) -> Self { + self.double() + } + + fn sqrt(&self) -> CtOption { + unimplemented!() + } + + fn invert(&self) -> CtOption { + self.invert() + } +} + +pub const FROBENIUS_COEFF_FQ6_C1: [Fq2; 6] = [ + // Fq2(u + 1)**(((q^0) - 1) / 3) + Fq2 { + c0: Fq([ + 0x760900000002fffd, + 0xebf4000bc40c0002, + 0x5f48985753c758ba, + 0x77ce585370525745, + 0x5c071a97a256ec6d, + 0x15f65ec3fa80e493, + ]), + c1: Fq([0x0, 0x0, 0x0, 0x0, 0x0, 0x0]), + }, + // Fq2(u + 1)**(((q^1) - 1) / 3) + Fq2 { + c0: Fq([0x0, 0x0, 0x0, 0x0, 0x0, 0x0]), + c1: Fq([ + 0xcd03c9e48671f071, + 0x5dab22461fcda5d2, + 0x587042afd3851b95, + 0x8eb60ebe01bacb9e, + 0x3f97d6e83d050d2, + 0x18f0206554638741, + ]), + }, + // Fq2(u + 1)**(((q^2) - 1) / 3) + Fq2 { + c0: Fq([ + 0x30f1361b798a64e8, + 0xf3b8ddab7ece5a2a, + 0x16a8ca3ac61577f7, + 0xc26a2ff874fd029b, + 0x3636b76660701c6e, + 0x51ba4ab241b6160, + ]), + c1: Fq([0x0, 0x0, 0x0, 0x0, 0x0, 0x0]), + }, + // Fq2(u + 1)**(((q^3) - 1) / 3) + Fq2 { + c0: Fq([0x0, 0x0, 0x0, 0x0, 0x0, 0x0]), + c1: Fq([ + 0x760900000002fffd, + 0xebf4000bc40c0002, + 0x5f48985753c758ba, + 0x77ce585370525745, + 0x5c071a97a256ec6d, + 0x15f65ec3fa80e493, + ]), + }, + // Fq2(u + 1)**(((q^4) - 1) / 3) + Fq2 { + c0: Fq([ + 0xcd03c9e48671f071, + 0x5dab22461fcda5d2, + 0x587042afd3851b95, + 0x8eb60ebe01bacb9e, + 0x3f97d6e83d050d2, + 0x18f0206554638741, + ]), + c1: Fq([0x0, 0x0, 0x0, 0x0, 0x0, 0x0]), + }, + // Fq2(u + 1)**(((q^5) - 1) / 3) + Fq2 { + c0: Fq([0x0, 0x0, 0x0, 0x0, 0x0, 0x0]), + c1: Fq([ + 0x30f1361b798a64e8, + 0xf3b8ddab7ece5a2a, + 0x16a8ca3ac61577f7, + 0xc26a2ff874fd029b, + 0x3636b76660701c6e, + 0x51ba4ab241b6160, + ]), + }, +]; + +pub const FROBENIUS_COEFF_FQ6_C2: [Fq2; 6] = [ + // Fq2(u + 1)**(((2q^0) - 2) / 3) + Fq2 { + c0: Fq([ + 0x760900000002fffd, + 0xebf4000bc40c0002, + 0x5f48985753c758ba, + 0x77ce585370525745, + 0x5c071a97a256ec6d, + 0x15f65ec3fa80e493, + ]), + c1: Fq([0x0, 0x0, 0x0, 0x0, 0x0, 0x0]), + }, + // Fq2(u + 1)**(((2q^1) - 2) / 3) + Fq2 { + c0: Fq([ + 0x890dc9e4867545c3, + 0x2af322533285a5d5, + 0x50880866309b7e2c, + 0xa20d1b8c7e881024, + 0x14e4f04fe2db9068, + 0x14e56d3f1564853a, + ]), + c1: Fq([0x0, 0x0, 0x0, 0x0, 0x0, 0x0]), + }, + // Fq2(u + 1)**(((2q^2) - 2) / 3) + Fq2 { + c0: Fq([ + 0xcd03c9e48671f071, + 0x5dab22461fcda5d2, + 0x587042afd3851b95, + 0x8eb60ebe01bacb9e, + 0x3f97d6e83d050d2, + 0x18f0206554638741, + ]), + c1: Fq([0x0, 0x0, 0x0, 0x0, 0x0, 0x0]), + }, + // Fq2(u + 1)**(((2q^3) - 2) / 3) + Fq2 { + c0: Fq([ + 0x43f5fffffffcaaae, + 0x32b7fff2ed47fffd, + 0x7e83a49a2e99d69, + 0xeca8f3318332bb7a, + 0xef148d1ea0f4c069, + 0x40ab3263eff0206, + ]), + c1: Fq([0x0, 0x0, 0x0, 0x0, 0x0, 0x0]), + }, + // Fq2(u + 1)**(((2q^4) - 2) / 3) + Fq2 { + c0: Fq([ + 0x30f1361b798a64e8, + 0xf3b8ddab7ece5a2a, + 0x16a8ca3ac61577f7, + 0xc26a2ff874fd029b, + 0x3636b76660701c6e, + 0x51ba4ab241b6160, + ]), + c1: Fq([0x0, 0x0, 0x0, 0x0, 0x0, 0x0]), + }, + // Fq2(u + 1)**(((2q^5) - 2) / 3) + Fq2 { + c0: Fq([ + 0xecfb361b798dba3a, + 0xc100ddb891865a2c, + 0xec08ff1232bda8e, + 0xd5c13cc6f1ca4721, + 0x47222a47bf7b5c04, + 0x110f184e51c5f59, + ]), + c1: Fq([0x0, 0x0, 0x0, 0x0, 0x0, 0x0]), + }, +]; + +#[cfg(test)] +mod test { + use super::*; + use rand::SeedableRng; + use rand_xorshift::XorShiftRng; + + #[test] + fn test_fq6_mul_nonresidue() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, + 0xbc, 0xe5, + ]); + + let nqr = Fq6 { + c0: Fq2::zero(), + c1: Fq2::one(), + c2: Fq2::zero(), + }; + + for _ in 0..1000 { + let mut a = Fq6::random(&mut rng); + let mut b = a; + a.mul_by_nonresidue(); + b.mul_assign(&nqr); + + assert_eq!(a, b); + } + } + + #[test] + fn test_fq6_mul_by_1() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, + 0xbc, 0xe5, + ]); + + for _ in 0..1000 { + let c1 = Fq2::random(&mut rng); + let mut a = Fq6::random(&mut rng); + let mut b = a; + + a.mul_by_1(&c1); + b.mul_assign(&Fq6 { + c0: Fq2::zero(), + c1, + c2: Fq2::zero(), + }); + + assert_eq!(a, b); + } + } + + #[test] + fn test_fq6_mul_by_01() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, + 0xbc, 0xe5, + ]); + + for _ in 0..1000 { + let c0 = Fq2::random(&mut rng); + let c1 = Fq2::random(&mut rng); + let mut a = Fq6::random(&mut rng); + let mut b = a; + + a.mul_by_01(&c0, &c1); + b.mul_assign(&Fq6 { + c0, + c1, + c2: Fq2::zero(), + }); + + assert_eq!(a, b); + } + } + + #[test] + fn test_squaring() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, + 0xbc, 0xe5, + ]); + + for _ in 0..1000 { + let mut a = Fq6::random(&mut rng); + let mut b = a; + b.mul_assign(&a); + a.square_assign(); + assert_eq!(a, b); + } + } + + #[test] + fn test_frobenius() { + let fq2_basic = Fq2 { + c0: Fq::from_raw_unchecked([ + 0x2d0078036923ffc7, + 0x11e59ea221a3b6d2, + 0x8b1a52e0a90f59ed, + 0xb966ce3bc2108b13, + 0xccc649c4b9532bf3, + 0xf8d295b2ded9dc, + ]), + c1: Fq::from_raw_unchecked([ + 0x977df6efcdaee0db, + 0x946ae52d684fa7ed, + 0xbe203411c66fb3a5, + 0xb3f8afc0ee248cad, + 0x4e464dea5bcfd41e, + 0x12d1137b8a6a837, + ]), + }; + + let fq6_basic = Fq6 { + c0: fq2_basic, + c1: fq2_basic, + c2: fq2_basic, + }; + + let mut fq6_test = fq6_basic; + fq6_test.frobenius_map(0); + assert_eq!(fq6_test, fq6_basic); + + let mut fq6_test = fq6_basic; + fq6_test.frobenius_map(1); + assert_ne!(fq6_test, fq6_basic); + + let mut fq6_test = fq6_basic; + fq6_test.frobenius_map(6); + assert_eq!(fq6_test, fq6_basic); + } + + // #[test] + // fn test_frobenius_map_conjugate() { + // let fq2_basic = Fq2 { + // c0: Fq::from_raw_unchecked([ + // 0x2d0078036923ffc7, + // 0x11e59ea221a3b6d2, + // 0x8b1a52e0a90f59ed, + // 0xb966ce3bc2108b13, + // 0xccc649c4b9532bf3, + // 0xf8d295b2ded9dc, + // ]), + // c1: Fq::from_raw_unchecked([ + // 0x228109103250c9d0, + // 0x8a411ad149045812, + // 0xa9109e8f3041427e, + // 0xb07e9bc405608611, + // 0xfcd559cbe77bd8b8, + // 0x18d400b280d93e62, + // ]), + // }; + + // let fq6_basic = Fq6 { + // c0: fq2_basic, + // c1: fq2_basic, + // c2: fq2_basic, + // }; + + // let mut fq6_test = fq6_basic; + // fq6_test.frobenius_map_conjugate(); + // fq6_test.frobenius_map_conjugate(); + // fq6_test.frobenius_map_conjugate(); + // fq6_test.frobenius_map_conjugate(); + // fq6_test.frobenius_map_conjugate(); + // fq6_test.frobenius_map_conjugate(); + // assert_eq!(fq6_test, fq6_basic); + + // let mut fq6_test = fq6_basic; + // fq6_test.frobenius_map_conjugate(); + // assert_ne!(fq6_test, fq6_basic); + // } + + // #[test] + // fn test_frobenius_map_mix() { + // let fq2_basic = Fq2 { + // c0: Fq::from_raw_unchecked([ + // 0x2d0078036923ffc7, + // 0x11e59ea221a3b6d2, + // 0x8b1a52e0a90f59ed, + // 0xb966ce3bc2108b13, + // 0xccc649c4b9532bf3, + // 0xf8d295b2ded9dc, + // ]), + // c1: Fq::from_raw_unchecked([ + // 0x977df6efcdaee0db, + // 0x946ae52d684fa7ed, + // 0xbe203411c66fb3a5, + // 0xb3f8afc0ee248cad, + // 0x4e464dea5bcfd41e, + // 0x12d1137b8a6a837, + // ]), + // }; + + // let fq6_basic = Fq6 { + // c0: fq2_basic, + // c1: fq2_basic, + // c2: fq2_basic, + // }; + + // let mut fq6_test_1 = fq6_basic; + // fq6_test_1.frobenius_map(1); + // let mut fq6_test_conjugate_1 = fq6_basic; + // fq6_test_conjugate_1.frobenius_map_conjugate(); + // assert_eq!(fq6_test_1, fq6_test_conjugate_1); + + // let mut fq6_test_1 = fq6_basic; + // fq6_test_1.frobenius_map(2); + // let mut fq6_test_conjugate_1 = fq6_basic; + // fq6_test_conjugate_1.frobenius_map_conjugate(); + // fq6_test_conjugate_1.frobenius_map_conjugate(); + // assert_eq!(fq6_test_1, fq6_test_conjugate_1); + // } + + #[test] + fn test_field() { + crate::tests::field::random_field_tests::("fq6".to_string()); + } +} diff --git a/src/bls12_381/fr.rs b/src/bls12_381/fr.rs new file mode 100644 index 00000000..24367246 --- /dev/null +++ b/src/bls12_381/fr.rs @@ -0,0 +1,559 @@ +#[cfg(feature = "asm")] +use super::assembly::assemblyfield; + +use super::LegendreSymbol; +use crate::arithmetic::{adc, mac, macx, sbb}; +use core::convert::TryInto; +use core::fmt; +use core::ops::{Add, Mul, Neg, Sub}; +use ff::PrimeField; +use pasta_curves::arithmetic::{FieldExt, Group, SqrtRatio}; +use rand::RngCore; +use serde::{Deserialize, Serialize}; +use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption}; + +/// This represents an element of $\mathbb{F}r$ where +/// +/// `r = 52435875175126190479447740508185965837690552500527637822603658699938581184513` +/// `r = 0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001` +/// +/// is the scalar field of the BLS12-381 curve. +// The internal representation of this type is four 64-bit unsigned +// integers in little-endian order. `Fr` values are always in +// Montgomery form; i.e., Fr(a) = aR mod r, with R = 2^256. +#[derive(Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)] +pub struct Fr(pub(crate) [u64; 4]); + +/// Constant representing the modulus +/// r = 0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001 +const MODULUS: Fr = Fr([ + 0xffffffff00000001, + 0x53bda402fffe5bfe, + 0x3339d80809a1d805, + 0x73eda753299d7d48, +]); + +// The number of bits needed to represent the modulus. +const MODULUS_BITS: u32 = 255; + +const MODULUS_STR: &str = "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001"; + +/// INV = -(r^{-1} mod 2^64) mod 2^64 +const INV: u64 = 0xfffffffeffffffff; + +/// `R = 2^256 mod r` +/// `0x1824b159acc5056f998c4fefecbc4ff55884b7fa0003480200000001fffffffe` +const R: Fr = Fr([ + 0x1fffffffe, + 0x5884b7fa00034802, + 0x998c4fefecbc4ff5, + 0x1824b159acc5056f, +]); + +/// `R^2 = 2^512 mod r` +/// `0x748d9d99f59ff1105d314967254398f2b6cedcb87925c23c999e990f3f29c6d` +const R2: Fr = Fr([ + 0xc999e990f3f29c6d, + 0x2b6cedcb87925c23, + 0x5d314967254398f, + 0x748d9d99f59ff11, +]); + +/// `R^3 = 2^768 mod r` +/// `0x6e2a5bb9c8db33e973d13c71c7b5f4181b3e0d188cf06990c62c1807439b73af` +const R3: Fr = Fr([ + 0xc62c1807439b73af, + 0x1b3e0d188cf06990, + 0x73d13c71c7b5f418, + 0x6e2a5bb9c8db33e9, +]); + +/// `GENERATOR = 7 mod r` is a generator of the `r - 1` order multiplicative +/// subgroup, or in other words a primitive root of the field. +const GENERATOR: Fr = Fr::from_raw([ + 0xefffffff1, + 0x17e363d300189c0f, + 0xff9c57876f8457b0, + 0x351332208fc5a8c4, +]); + +// 2^s * t = MODULUS - 1 with t odd +const S: u32 = 32; + +/// GENERATOR^t where t * 2^s + 1 = r +/// with t odd. In other words, this +/// is a 2^s root of unity. +/// `0x5bf3adda19e9b27baf53ae352a31e645b1b4c801819d7ecb9b58d8c5f0e466a` +const ROOT_OF_UNITY: Fr = Fr([ + 0xb9b58d8c5f0e466a, + 0x5b1b4c801819d7ec, + 0xaf53ae352a31e64, + 0x5bf3adda19e9b27b, +]); + +/// 1 / 2 mod r +/// 0x0c1258acd66282b7ccc627f7f65e27faac425bfd0001a40100000000ffffffff +const TWO_INV: Fr = Fr([ + 0x00000000ffffffff, + 0xac425bfd0001a401, + 0xccc627f7f65e27fa, + 0x0c1258acd66282b7, +]); + +/// 1 / ROOT_OF_UNITY mod r +/// 0x2d2fc049658afd43f9c3f1d75f7a3b2745f37b7f96b6cad34256481adcf3219a +const ROOT_OF_UNITY_INV: Fr = Fr([ + 0x4256481adcf3219a, + 0x45f37b7f96b6cad3, + 0xf9c3f1d75f7a3b27, + 0x2d2fc049658afd43, +]); + +/// GENERATOR^{2^s} where t * 2^s + 1 = r +/// with t odd. In other words, this +/// is a t root of unity. +// 0x6185d06627c067cb51e114186a8b970d4b64c08919e299e670e310d3d146f96a +const DELTA: Fr = Fr([ + 0x70e310d3d146f96a, + 0x4b64c08919e299e6, + 0x51e114186a8b970d, + 0x6185d06627c067cb, +]); + +// Unused constant +const ZETA: Fr = Fr::zero(); + +use crate::{ + field_arithmetic, field_common, field_specific, impl_add_binop_specify_output, + impl_binops_additive, impl_binops_additive_specify_output, impl_binops_multiplicative, + impl_binops_multiplicative_mixed, impl_sub_binop_specify_output, +}; +impl_binops_additive!(Fr, Fr); +impl_binops_multiplicative!(Fr, Fr); +#[cfg(not(feature = "asm"))] +field_common!( + Fr, + MODULUS, + INV, + MODULUS_STR, + TWO_INV, + ROOT_OF_UNITY_INV, + DELTA, + ZETA, + R, + R2, + R3 +); +#[cfg(not(feature = "asm"))] +field_arithmetic!(Fr, MODULUS, INV, sparse); +#[cfg(feature = "asm")] +assembly_field!( + Fr, + MODULUS, + INV, + MODULUS_STR, + TWO_INV, + ROOT_OF_UNITY_INV, + DELTA, + ZETA, + R, + R2, + R3 +); + +impl Fr { + fn legendre(&self) -> LegendreSymbol { + // s = self^((r - 1) // 2) + let s = self.pow(&[ + 0x7fffffff80000000, + 0xa9ded2017fff2dff, + 0x199cec0404d0ec02, + 0x39f6d3a994cebea4, + ]); + if s == Self::zero() { + LegendreSymbol::Zero + } else if s == Self::one() { + LegendreSymbol::QuadraticResidue + } else { + LegendreSymbol::QuadraticNonResidue + } + } +} + +impl ff::Field for Fr { + fn random(mut rng: impl RngCore) -> Self { + Self::from_u512([ + rng.next_u64(), + rng.next_u64(), + rng.next_u64(), + rng.next_u64(), + rng.next_u64(), + rng.next_u64(), + rng.next_u64(), + rng.next_u64(), + ]) + } + + fn zero() -> Self { + Self::zero() + } + + fn one() -> Self { + Self::one() + } + + fn double(&self) -> Self { + self.double() + } + + fn is_zero_vartime(&self) -> bool { + self == &Self::zero() + } + + #[inline(always)] + fn square(&self) -> Self { + self.square() + } + + /// Computes the square root of this element, if it exists. + fn sqrt(&self) -> CtOption { + // Tonelli-Shank's algorithm for q mod 16 = 1 + // https://eprint.iacr.org/2012/685.pdf (page 12, algorithm 5) + match self.legendre() { + LegendreSymbol::Zero => CtOption::new(*self, Choice::from(1u8)), + LegendreSymbol::QuadraticNonResidue => CtOption::new(Fr::from(0), Choice::from(0u8)), + LegendreSymbol::QuadraticResidue => { + let mut c = ROOT_OF_UNITY; + // r = self^((t + 1) // 2) + let mut r = self.pow(&[ + 0x7fff2dff80000000, + 0x4d0ec02a9ded201, + 0x94cebea4199cec04, + 0x39f6d3a9, + ]); + // t = self^t + let mut t = self.pow(&[ + 0xfffe5bfeffffffff, + 0x9a1d80553bda402, + 0x299d7d483339d808, + 0x73eda753, + ]); + let mut m = S; + + while t != Self::one() { + let mut i = 1; + { + let mut t2i = t; + t2i = t2i.square(); + loop { + if t2i == Self::one() { + break; + } + t2i = t2i.square(); + i += 1; + } + } + + for _ in 0..(m - i - 1) { + c = c.square(); + } + r = r.mul(&c); + c = c.square(); + t = t.mul(&c); + m = i; + } + + CtOption::new(r, Choice::from(1u8)) + } + } + } + + /// Computes the multiplicative inverse of this element, + /// failing if the element is zero. + fn invert(&self) -> CtOption { + #[inline(always)] + fn square_assign_multi(n: &mut Fr, num_times: usize) { + for _ in 0..num_times { + *n = n.square(); + } + } + // found using https://github.com/kwantam/addchain + let mut t0 = self.square(); + let mut t1 = t0 * self; + let mut t16 = t0.square(); + let mut t6 = t16.square(); + let mut t5 = t6 * t0; + t0 = t6 * t16; + let mut t12 = t5 * t16; + let mut t2 = t6.square(); + let mut t7 = t5 * t6; + let mut t15 = t0 * t5; + let mut t17 = t12.square(); + t1 *= t17; + let mut t3 = t7 * t2; + let t8 = t1 * t17; + let t4 = t8 * t2; + let t9 = t8 * t7; + t7 = t4 * t5; + let t11 = t4 * t17; + t5 = t9 * t17; + let t14 = t7 * t15; + let t13 = t11 * t12; + t12 = t11 * t17; + t15 *= &t12; + t16 *= &t15; + t3 *= &t16; + t17 *= &t3; + t0 *= &t17; + t6 *= &t0; + t2 *= &t6; + square_assign_multi(&mut t0, 8); + t0 *= &t17; + square_assign_multi(&mut t0, 9); + t0 *= &t16; + square_assign_multi(&mut t0, 9); + t0 *= &t15; + square_assign_multi(&mut t0, 9); + t0 *= &t15; + square_assign_multi(&mut t0, 7); + t0 *= &t14; + square_assign_multi(&mut t0, 7); + t0 *= &t13; + square_assign_multi(&mut t0, 10); + t0 *= &t12; + square_assign_multi(&mut t0, 9); + t0 *= &t11; + square_assign_multi(&mut t0, 8); + t0 *= &t8; + square_assign_multi(&mut t0, 8); + t0 *= self; + square_assign_multi(&mut t0, 14); + t0 *= &t9; + square_assign_multi(&mut t0, 10); + t0 *= &t8; + square_assign_multi(&mut t0, 15); + t0 *= &t7; + square_assign_multi(&mut t0, 10); + t0 *= &t6; + square_assign_multi(&mut t0, 8); + t0 *= &t5; + square_assign_multi(&mut t0, 16); + t0 *= &t3; + square_assign_multi(&mut t0, 8); + t0 *= &t2; + square_assign_multi(&mut t0, 7); + t0 *= &t4; + square_assign_multi(&mut t0, 9); + t0 *= &t2; + square_assign_multi(&mut t0, 8); + t0 *= &t3; + square_assign_multi(&mut t0, 8); + t0 *= &t2; + square_assign_multi(&mut t0, 8); + t0 *= &t2; + square_assign_multi(&mut t0, 8); + t0 *= &t2; + square_assign_multi(&mut t0, 8); + t0 *= &t3; + square_assign_multi(&mut t0, 8); + t0 *= &t2; + square_assign_multi(&mut t0, 8); + t0 *= &t2; + square_assign_multi(&mut t0, 5); + t0 *= &t1; + square_assign_multi(&mut t0, 5); + t0 *= &t1; + + CtOption::new(t0, !self.ct_eq(&Self::zero())) + } +} + +impl ff::PrimeField for Fr { + type Repr = [u8; 32]; + + const NUM_BITS: u32 = MODULUS_BITS; + const CAPACITY: u32 = Self::NUM_BITS - 1; + const S: u32 = S; + + /// Attempts to convert a little-endian byte representation of + /// a scalar into a `Scalar`, failing if the input is not canonical. + fn from_repr(repr: Self::Repr) -> CtOption { + let mut tmp = Fr([0, 0, 0, 0]); + + tmp.0[0] = u64::from_le_bytes(repr[0..8].try_into().unwrap()); + tmp.0[1] = u64::from_le_bytes(repr[8..16].try_into().unwrap()); + tmp.0[2] = u64::from_le_bytes(repr[16..24].try_into().unwrap()); + tmp.0[3] = u64::from_le_bytes(repr[24..32].try_into().unwrap()); + + // Try to subtract the modulus + let (_, borrow) = tmp.0[0].overflowing_sub(MODULUS.0[0]); + let (_, borrow) = sbb(tmp.0[1], MODULUS.0[1], borrow); + let (_, borrow) = sbb(tmp.0[2], MODULUS.0[2], borrow); + let (_, borrow) = sbb(tmp.0[3], MODULUS.0[3], borrow); + + // If the element is smaller than MODULUS then the + // subtraction will underflow, producing a borrow value + // of 0xffff...ffff. Otherwise, it'll be zero. + let is_some = (borrow as u8) & 1; + + // Convert to Montgomery form by computing + // (a.R^0 * R^2) / R = a.R + tmp *= &R2; + + CtOption::new(tmp, Choice::from(is_some)) + } + + fn to_repr(&self) -> Self::Repr { + // Turn into canonical form by computing + // (a.R) / R = a + #[cfg(feature = "asm")] + let tmp = Fr::montgomery_reduce(&[self.0[0], self.0[1], self.0[2], self.0[3], 0, 0, 0, 0]); + + #[cfg(not(feature = "asm"))] + let tmp = Fr::montgomery_reduce_short(self.0[0], self.0[1], self.0[2], self.0[3]); + + let mut res = [0; 32]; + res[0..8].copy_from_slice(&tmp.0[0].to_le_bytes()); + res[8..16].copy_from_slice(&tmp.0[1].to_le_bytes()); + res[16..24].copy_from_slice(&tmp.0[2].to_le_bytes()); + res[24..32].copy_from_slice(&tmp.0[3].to_le_bytes()); + + res + } + + fn is_odd(&self) -> Choice { + Choice::from(self.to_repr()[0] & 1) + } + + fn multiplicative_generator() -> Self { + GENERATOR + } + + fn root_of_unity() -> Self { + ROOT_OF_UNITY + } +} + +impl SqrtRatio for Fr { + /// `(t - 1) // 2` where t * 2^s + 1 = p with t odd. + /// (t - 1) // 2 = 6104339283789297388802252303364915521546564123189034618274734669823 + /// = 0x39F6D3A994CEBEA4199CEC0404D0EC02A9DED2017FFF2DFF7FFFFFFF + const T_MINUS1_OVER2: [u64; 4] = unimplemented!(); + + fn get_lower_32(&self) -> u32 { + #[cfg(not(feature = "asm"))] + let tmp = Fr::montgomery_reduce_short(self.0[0], self.0[1], self.0[2], self.0[3]); + + #[cfg(feature = "asm")] + let tmp = Fr::montgomery_reduce(&[self.0[0], self.0[1], self.0[2], self.0[3], 0, 0, 0, 0]); + + tmp.0[0] as u32 + } +} + +#[cfg(test)] +mod test { + use super::*; + use ff::Field; + use rand_core::OsRng; + + #[test] + fn test_ser() { + let a0 = Fr::random(OsRng); + let a_bytes = a0.to_bytes(); + let a1 = Fr::from_bytes(&a_bytes).unwrap(); + assert_eq!(a0, a1); + } + + #[test] + fn test_sqrt() { + { + assert_eq!(Fr::zero().sqrt().unwrap(), Fr::zero()); + } + { + assert_eq!(Fr::one().sqrt().unwrap(), Fr::one()); + } + + for _ in 0..100 { + let a = Fr::random(OsRng); + let mut b = a; + b = b.square(); + let b = b.sqrt().unwrap(); + let mut negb = b; + negb = negb.neg(); + assert!(a == b || a == negb); + } + } + + #[test] + fn test_root_of_unity() { + assert_eq!( + Fr::root_of_unity().pow_vartime([1u64 << Fr::S, 0, 0, 0]), + Fr::one() + ); + } + + #[test] + fn test_inv_root_of_unity() { + assert_eq!(Fr::root_of_unity() * Fr::ROOT_OF_UNITY_INV, Fr::one(),); + } + + #[test] + fn test_field() { + crate::tests::field::random_field_tests::("bls12-381 scalar".to_string()); + } + + #[test] + fn test_delta() { + assert_eq!( + Fr::DELTA.pow(&[ + 0xfffe_5bfe_ffff_ffff, + 0x09a1_d805_53bd_a402, + 0x299d_7d48_3339_d808, + 0x0000_0000_73ed_a753, + ]), + Fr::one(), + ); + } + + #[test] + fn test_from_u512_zero() { + assert_eq!( + Fr::zero(), + Fr::from_u512([ + MODULUS.0[0], + MODULUS.0[1], + MODULUS.0[2], + MODULUS.0[3], + 0, + 0, + 0, + 0 + ]) + ); + } + + #[test] + fn test_from_u512_r() { + assert_eq!(R, Fr::from_u512([1, 0, 0, 0, 0, 0, 0, 0])); + } + + #[test] + fn test_from_u512_r2() { + assert_eq!(R2, Fr::from_u512([0, 0, 0, 0, 1, 0, 0, 0])); + } + + #[test] + fn test_from_u512_max() { + let max_u64 = 0xffff_ffff_ffff_ffff; + assert_eq!( + R3 - R, + Fr::from_u512([max_u64, max_u64, max_u64, max_u64, max_u64, max_u64, max_u64, max_u64]) + ); + } + + #[test] + fn test_serialization() { + crate::tests::field::random_serialization_test::("fr".to_string()); + } +} diff --git a/src/bls12_381/mod.rs b/src/bls12_381/mod.rs new file mode 100644 index 00000000..9cd08946 --- /dev/null +++ b/src/bls12_381/mod.rs @@ -0,0 +1,25 @@ +mod curve; +mod engine; +mod fq; +mod fq12; +mod fq2; +mod fq6; +mod fr; + +#[cfg(feature = "asm")] +mod assembly; + +pub use curve::*; +pub use engine::*; +pub use fq::*; +pub use fq12::*; +pub use fq2::*; +pub use fq6::*; +pub use fr::*; + +#[derive(Debug, PartialEq, Eq)] +pub enum LegendreSymbol { + Zero = 0, + QuadraticResidue = 1, + QuadraticNonResidue = -1, +} diff --git a/src/bn256/fq.rs b/src/bn256/fq.rs index b108e2ed..965175cb 100644 --- a/src/bn256/fq.rs +++ b/src/bn256/fq.rs @@ -26,7 +26,7 @@ pub struct Fq(pub(crate) [u64; 4]); /// Constant representing the modulus /// q = 0x30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47 -pub const MODULUS: Fq = Fq([ +const MODULUS: Fq = Fq([ 0x3c208c16d87cfd47, 0x97816a916871ca8d, 0xb85045b68181585d, @@ -290,7 +290,21 @@ impl SqrtRatio for Fq { mod test { use super::*; use ff::Field; - use rand_core::OsRng; + use rand_core::{OsRng, SeedableRng}; + use rand_xorshift::XorShiftRng; + + #[test] + fn test_ser() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, + 0xbc, 0xe5, + ]); + + let a0 = Fq::random(&mut rng); + let a_bytes = a0.to_bytes(); + let a1 = Fq::from_bytes(&a_bytes).unwrap(); + assert_eq!(a0, a1); + } #[test] fn test_sqrt_fq() { diff --git a/src/bn256/fq2.rs b/src/bn256/fq2.rs index c362669d..f1733e27 100644 --- a/src/bn256/fq2.rs +++ b/src/bn256/fq2.rs @@ -143,7 +143,7 @@ impl Fq2 { ) } - /// Converts an element of `Fq` into a byte representation in + /// Converts an element of `Fq2` into a byte representation in /// little-endian byte order. pub fn to_bytes(&self) -> [u8; 64] { let mut res = [0u8; 64]; diff --git a/src/bn256/fr.rs b/src/bn256/fr.rs index 9bddcda3..1f71171c 100644 --- a/src/bn256/fr.rs +++ b/src/bn256/fr.rs @@ -26,7 +26,7 @@ pub struct Fr(pub(crate) [u64; 4]); /// Constant representing the modulus /// r = 0x30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001 -pub const MODULUS: Fr = Fr([ +const MODULUS: Fr = Fr([ 0x43e1f593f0000001, 0x2833e84879b97091, 0xb85045b68181585d, @@ -297,7 +297,22 @@ impl SqrtRatio for Fr { mod test { use super::*; use ff::Field; + use rand::SeedableRng; use rand_core::OsRng; + use rand_xorshift::XorShiftRng; + + #[test] + fn test_ser() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, + 0xbc, 0xe5, + ]); + + let a0 = Fr::random(&mut rng); + let a_bytes = a0.to_bytes(); + let a1 = Fr::from_bytes(&a_bytes).unwrap(); + assert_eq!(a0, a1); + } #[test] fn test_sqrt() { diff --git a/src/derive/curve.rs b/src/derive/curve.rs index 1d51aeaa..0cfd6b2d 100644 --- a/src/derive/curve.rs +++ b/src/derive/curve.rs @@ -549,6 +549,13 @@ macro_rules! new_curve_impl { // Affine implementations + // Iterator for multiple affine points in halo2-lib during BLS signature verification + impl<'a> std::iter::Sum<&'a Self> for $name_affine { + fn sum(iter: I) -> $name_affine where I: Iterator { + iter.fold($name_affine::identity(), |acc, x| (acc + x).into()) + } + } + impl std::fmt::Debug for $name_affine { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { if self.is_identity().into() { diff --git a/src/derive/curve_bls12_381.rs b/src/derive/curve_bls12_381.rs new file mode 100644 index 00000000..0e23cdd6 --- /dev/null +++ b/src/derive/curve_bls12_381.rs @@ -0,0 +1,867 @@ +#[macro_export] +macro_rules! new_curve_impl_bls12_381 { + (($($privacy:tt)*), + $name:ident, + $name_affine:ident, + $name_compressed:ident, + $compressed_size:expr, + $base:ident, + $scalar:ident, + $generator:expr, + $constant_b:expr, + $curve_id:literal, + ) => { + + #[derive(Copy, Clone, Debug, Serialize, Deserialize)] + $($privacy)* struct $name { + pub x: $base, + pub y: $base, + pub z: $base, + } + + #[derive(Copy, Clone)] + $($privacy)* struct $name_affine { + pub x: $base, + pub y: $base, + pub infinity: Choice, + } + + #[derive(Copy, Clone, Hash)] + $($privacy)* struct $name_compressed([u8; $compressed_size]); + + impl $name { + pub fn generator() -> Self { + let generator = $name_affine::generator(); + Self { + x: generator.x, + y: generator.y, + z: $base::one(), + } + } + + const fn curve_constant_b() -> $base { + $name_affine::curve_constant_b() + } + } + + impl $name_affine { + pub fn generator() -> Self { + Self { + x: $generator.0, + y: $generator.1, + infinity: Choice::from(0u8), + } + } + + const fn curve_constant_b() -> $base { + $constant_b + } + + pub fn random(mut rng: impl RngCore) -> Self { + loop { + let x = $base::random(&mut rng); + let ysign = (rng.next_u32() % 2) as u8; + + let x3 = x.square() * x; + let y = (x3 + $name::curve_constant_b()).sqrt(); + if let Some(y) = Option::<$base>::from(y) { + let sign = y.to_bytes()[0] & 1; + let y = if ysign ^ sign == 0 { y } else { -y }; + + let p = $name_affine { + x, + y, + infinity: 0.into(), + }; + + + use $crate::group::cofactor::CofactorGroup; + let p = p.to_curve(); + return p.clear_cofactor().to_affine() + } + } + } + } + + // Compressed + + impl std::fmt::Debug for $name_compressed { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + self.0[..].fmt(f) + } + } + + impl Default for $name_compressed { + fn default() -> Self { + $name_compressed([0; $compressed_size]) + } + } + + impl AsRef<[u8]> for $name_compressed { + fn as_ref(&self) -> &[u8] { + &self.0 + } + } + + impl AsMut<[u8]> for $name_compressed { + fn as_mut(&mut self) -> &mut [u8] { + &mut self.0 + } + } + + + // Jacobian implementations + + impl<'a> From<&'a $name_affine> for $name { + fn from(p: &'a $name_affine) -> $name { + p.to_curve() + } + } + + impl From<$name_affine> for $name { + fn from(p: $name_affine) -> $name { + p.to_curve() + } + } + + impl Default for $name { + fn default() -> $name { + $name::identity() + } + } + + impl subtle::ConstantTimeEq for $name { + fn ct_eq(&self, other: &Self) -> Choice { + let x1 = self.x * other.z; + let x2 = other.x * self.z; + + let y1 = self.y * other.z; + let y2 = other.y * self.z; + + let self_is_zero = self.is_identity(); + let other_is_zero = other.is_identity(); + + (self_is_zero & other_is_zero) // Both point at infinity + | ((!self_is_zero) & (!other_is_zero) & x1.ct_eq(&x2) & y1.ct_eq(&y2)) + // Neither point at infinity, coordinates are the same + } + + } + + impl subtle::ConditionallySelectable for $name { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + $name { + x: $base::conditional_select(&a.x, &b.x, choice), + y: $base::conditional_select(&a.y, &b.y, choice), + z: $base::conditional_select(&a.z, &b.z, choice), + } + } + } + + impl PartialEq for $name { + fn eq(&self, other: &Self) -> bool { + self.ct_eq(other).into() + } + } + + impl cmp::Eq for $name {} + + impl CurveExt for $name { + + type ScalarExt = $scalar; + type Base = $base; + type AffineExt = $name_affine; + + const CURVE_ID: &'static str = $curve_id; + + fn endo(&self) -> Self { + self.endomorphism_base() + } + + fn jacobian_coordinates(&self) -> ($base, $base, $base) { + (self.x, self.y, self.z) + } + + + fn hash_to_curve<'a>(_: &'a str) -> Box Self + 'a> { + unimplemented!(); + } + + fn is_on_curve(&self) -> Choice { + (self.y.square() * self.z).ct_eq(&(self.x.square() * self.x + self.z.square() * self.z * $name::curve_constant_b())) + | self.z.is_zero() + } + + fn b() -> Self::Base { + $name::curve_constant_b() + } + + fn a() -> Self::Base { + Self::Base::zero() + } + + fn new_jacobian(x: Self::Base, y: Self::Base, z: Self::Base) -> CtOption { + let p = $name { x, y, z }; + CtOption::new(p, p.is_on_curve()) + } + } + + impl group::Curve for $name { + + type AffineRepr = $name_affine; + + /// Converts a batch of `G1Projective` elements into `G1Affine` elements. This + /// function will panic if `p.len() != q.len()`. + fn batch_normalize(p: &[Self], q: &mut [Self::AffineRepr]) { + assert_eq!(p.len(), q.len()); + + let mut acc = $base::one(); + for (p, q) in p.iter().zip(q.iter_mut()) { + // We use the `x` field of $name_affine to store the product + // of previous z-coordinates seen. + q.x = acc; + + // We will end up skipping all identities in p + acc = $base::conditional_select(&(acc * p.z), &acc, p.is_identity()); + } + + // This is the inverse, as all z-coordinates are nonzero and the ones + // that are not are skipped. + acc = acc.invert().unwrap(); + + for (p, q) in p.iter().rev().zip(q.iter_mut().rev()) { + let skip = p.is_identity(); + + // Compute tmp = 1/z + let tmp = q.x * acc; + + // Cancel out z-coordinate in denominator of `acc` + acc = $base::conditional_select(&(acc * p.z), &acc, skip); + + // Set the coordinates to the correct value + q.x = p.x * tmp; + q.y = p.y * tmp; + q.infinity = Choice::from(0u8); + + *q = $name_affine::conditional_select(&q, &$name_affine::identity(), skip); + } + } + + fn to_affine(&self) -> Self::AffineRepr { + let zinv = self.z.invert().unwrap_or($base::zero()); + let x = self.x * zinv; + let y = self.y * zinv; + + let tmp = $name_affine { + x, + y, + infinity: Choice::from(0u8), + }; + + $name_affine::conditional_select(&tmp, &$name_affine::identity(), zinv.is_zero()) + } + } + + impl group::Group for $name { // G1, G2 + type Scalar = $scalar; + + fn random(mut rng: impl RngCore) -> Self { + $name_affine::random(&mut rng).to_curve() + } + + fn double(&self) -> Self { + let t0 = self.y.square(); + let z3 = t0 + t0; + let z3 = z3 + z3; + let z3 = z3 + z3; + let t1 = self.y * self.z; + let t2 = self.z.square(); + let t2 = $name::mul_by_3b(t2); + let x3 = t2 * z3; + let y3 = t0 + t2; + let z3 = t1 * z3; + let t1 = t2 + t2; + let t2 = t1 + t2; + let t0 = t0 - t2; + let y3 = t0 * y3; + let y3 = x3 + y3; + let t1 = self.x * self.y; + let x3 = t0 * t1; + let x3 = x3 + x3; + + let tmp = $name { + x: x3, + y: y3, + z: z3, + }; + + $name::conditional_select(&tmp, &$name::identity(), self.is_identity()) + } + + fn generator() -> Self { + $name::generator() + } + + fn identity() -> Self { + Self { + x: $base::zero(), + y: $base::one(), + z: $base::zero(), + } + } + + fn is_identity(&self) -> Choice { + self.z.is_zero() + } + } + + impl GroupEncoding for $name { // G1 + type Repr = $name_compressed; + + fn from_bytes(bytes: &Self::Repr) -> CtOption { + $name_affine::from_bytes(bytes).map(Self::from) + } + + fn from_bytes_unchecked(bytes: &Self::Repr) -> CtOption { + $name_affine::from_bytes(bytes).map(Self::from) + } + + fn to_bytes(&self) -> Self::Repr { + $name_affine::from(self).to_bytes() // G1Affine + } + } + + impl $crate::serde::SerdeObject for $name { + fn from_raw_bytes_unchecked(bytes: &[u8]) -> Self { + debug_assert_eq!(bytes.len(), 3 * $base::size()); + let [x, y, z] = [0, 1, 2] + .map(|i| $base::from_raw_bytes_unchecked(&bytes[i * $base::size()..(i + 1) * $base::size()])); + Self { x, y, z } + } + fn from_raw_bytes(bytes: &[u8]) -> Option { + if bytes.len() != 3 * $base::size() { + return None; + } + let [x, y, z] = + [0, 1, 2].map(|i| $base::from_raw_bytes(&bytes[i * $base::size()..(i + 1) * $base::size()])); + x.zip(y).zip(z).and_then(|((x, y), z)| { + let res = Self { x, y, z }; + // Check that the point is on the curve. + bool::from(res.is_on_curve()).then(|| res) + }) + } + fn to_raw_bytes(&self) -> Vec { + let mut res = Vec::with_capacity(3 * $base::size()); + Self::write_raw(self, &mut res).unwrap(); + res + } + fn read_raw_unchecked(reader: &mut R) -> Self { + let [x, y, z] = [(); 3].map(|_| $base::read_raw_unchecked(reader)); + Self { x, y, z } + } + fn read_raw(reader: &mut R) -> std::io::Result { + let x = $base::read_raw(reader)?; + let y = $base::read_raw(reader)?; + let z = $base::read_raw(reader)?; + Ok(Self { x, y, z }) + } + fn write_raw(&self, writer: &mut W) -> std::io::Result<()> { + self.x.write_raw(writer)?; + self.y.write_raw(writer)?; + self.z.write_raw(writer) + } + } + + impl group::prime::PrimeGroup for $name {} + + impl group::prime::PrimeCurve for $name { + type Affine = $name_affine; + } + + impl group::cofactor::CofactorCurve for $name { + type Affine = $name_affine; + } + + impl Group for $name { + type Scalar = $scalar; + + fn group_zero() -> Self { + Self::identity() + } + fn group_add(&mut self, rhs: &Self) { + *self += *rhs; + } + fn group_sub(&mut self, rhs: &Self) { + *self -= *rhs; + } + fn group_scale(&mut self, by: &Self::Scalar) { + *self *= *by; + } + } + + // Affine implementations + + impl std::fmt::Debug for $name_affine { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + if self.is_identity().into() { + write!(f, "Infinity") + } else { + write!(f, "({:?}, {:?})", self.x, self.y) + } + } + } + + impl<'a> From<&'a $name> for $name_affine { + fn from(p: &'a $name) -> $name_affine { + p.to_affine() + } + } + + impl From<$name> for $name_affine { + fn from(p: $name) -> $name_affine { + p.to_affine() + } + } + + impl Default for $name_affine { + fn default() -> $name_affine { + $name_affine::identity() + } + } + + impl subtle::ConstantTimeEq for $name_affine { + fn ct_eq(&self, other: &Self) -> Choice { + // The only cases in which two points are equal are + // 1. infinity is set on both + // 2. infinity is not set on both, and their coordinates are equal + + (self.infinity & other.infinity) + | ((!self.infinity) + & (!other.infinity) + & self.x.ct_eq(&other.x) + & self.y.ct_eq(&other.y)) + } + } + + impl subtle::ConditionallySelectable for $name_affine { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + $name_affine { + x: $base::conditional_select(&a.x, &b.x, choice), + y: $base::conditional_select(&a.y, &b.y, choice), + infinity: Choice::conditional_select(&a.infinity, &b.infinity, choice), + } + } + } + + impl cmp::Eq for $name_affine {} + + impl cmp::PartialEq for $name_affine { + #[inline] + fn eq(&self, other: &Self) -> bool { + bool::from(self.ct_eq(other)) + } + } + + impl group::GroupEncoding for $name_affine { + type Repr = $name_compressed; + + fn from_bytes(bytes: &Self::Repr) -> CtOption { + + Self::from_compressed(&bytes.0) + } + + fn from_bytes_unchecked(bytes: &Self::Repr) -> CtOption { + Self::from_bytes(bytes) + } + + fn to_bytes(&self) -> Self::Repr { + $name_compressed(self.to_compressed()) + } + } + + // TODO [serde] Add support for BLS12-381 + // impl $crate::serde::SerdeObject for $name_affine { + // fn from_raw_bytes_unchecked(bytes: &[u8]) -> Self { + // debug_assert_eq!(bytes.len(), 2 * $base::size()); + // let [x, y] = + // [0, $base::size()].map(|i| $base::from_raw_bytes_unchecked(&bytes[i..i + $base::size()])); + // Self { x, y } + // } + // fn from_raw_bytes(bytes: &[u8]) -> Option { + // if bytes.len() != 2 * $base::size() { + // return None; + // } + // let [x, y] = [0, $base::size()].map(|i| $base::from_raw_bytes(&bytes[i..i + $base::size()])); + // x.zip(y).and_then(|(x, y)| { + // let res = Self { x, y }; + // // Check that the point is on the curve. + // bool::from(res.is_on_curve()).then(|| res) + // }) + // } + // fn to_raw_bytes(&self) -> Vec { + // let mut res = Vec::with_capacity(2 * $base::size()); + // Self::write_raw(self, &mut res).unwrap(); + // res + // } + // fn read_raw_unchecked(reader: &mut R) -> Self { + // let [x, y] = [(); 2].map(|_| $base::read_raw_unchecked(reader)); + // Self { x, y } + // } + // fn read_raw(reader: &mut R) -> std::io::Result { + // let x = $base::read_raw(reader)?; + // let y = $base::read_raw(reader)?; + // Ok(Self { x, y }) + // } + // fn write_raw(&self, writer: &mut W) -> std::io::Result<()> { + // self.x.write_raw(writer)?; + // self.y.write_raw(writer) + // } + // } + + impl group::prime::PrimeCurveAffine for $name_affine { + type Curve = $name; + type Scalar = $scalar; + + + fn generator() -> Self { + $name_affine::generator() + } + + fn identity() -> Self { + Self { + x: $base::zero(), + y: $base::one(), + infinity: Choice::from(1u8), + } + } + + fn is_identity(&self) -> Choice { + self.infinity + } + + fn to_curve(&self) -> Self::Curve { + $name { + x: self.x, + y: self.y, + z: $base::conditional_select(&$base::one(), &$base::zero(), self.infinity), + } + } + } + + impl group::cofactor::CofactorCurveAffine for $name_affine { + type Curve = $name; + type Scalar = $scalar; + + fn identity() -> Self { + ::identity() + } + + fn generator() -> Self { + ::generator() + } + + fn is_identity(&self) -> Choice { + ::is_identity(self) + } + + fn to_curve(&self) -> Self::Curve { + ::to_curve(self) + } + } + + + impl CurveAffine for $name_affine { + type ScalarExt = $scalar; + type Base = $base; + type CurveExt = $name; + + fn is_on_curve(&self) -> Choice { + // y^2 - x^3 - ax ?= b + (self.y.square() - self.x.square() * self.x).ct_eq(&$name::curve_constant_b()) + | self.infinity + } + + fn coordinates(&self) -> CtOption> { + Coordinates::from_xy( self.x, self.y ) + } + + fn from_xy(x: Self::Base, y: Self::Base) -> CtOption { + let p = $name_affine { + x, y, infinity: Choice::from(0u8), + }; + CtOption::new(p, p.is_on_curve()) + } + + fn a() -> Self::Base { + Self::Base::zero() + } + + fn b() -> Self::Base { + $name::curve_constant_b() + } + } + + + impl_binops_additive!($name, $name); + impl_binops_additive!($name, $name_affine); + impl_binops_additive_specify_output!($name_affine, $name_affine, $name); + impl_binops_additive_specify_output!($name_affine, $name, $name); + impl_binops_multiplicative!($name, $scalar); + impl_binops_multiplicative_mixed!($name_affine, $scalar, $name); + + impl<'a> Neg for &'a $name { + type Output = $name; + + fn neg(self) -> $name { + $name { + x: self.x, + y: -self.y, + z: self.z, + } + } + } + + impl Neg for $name { + type Output = $name; + + fn neg(self) -> $name { + -&self + } + } + + impl Sum for $name + where + T: core::borrow::Borrow<$name>, + { + fn sum(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Self::identity(), |acc, item| acc + item.borrow()) + } + } + + impl<'a, 'b> Add<&'a $name> for &'b $name { + type Output = $name; + + fn add(self, rhs: &'a $name) -> $name { + let t0 = self.x * rhs.x; + let t1 = self.y * rhs.y; + let t2 = self.z * rhs.z; + let t3 = self.x + self.y; + let t4 = rhs.x + rhs.y; + let t3 = t3 * t4; + let t4 = t0 + t1; + let t3 = t3 - t4; + let t4 = self.y + self.z; + let x3 = rhs.y + rhs.z; + let t4 = t4 * x3; + let x3 = t1 + t2; + let t4 = t4 - x3; + let x3 = self.x + self.z; + let y3 = rhs.x + rhs.z; + let x3 = x3 * y3; + let y3 = t0 + t2; + let y3 = x3 - y3; + let x3 = t0 + t0; + let t0 = x3 + t0; + let t2 = $name::mul_by_3b(t2); + let z3 = t1 + t2; + let t1 = t1 - t2; + let y3 = $name::mul_by_3b(y3); + let x3 = t4 * y3; + let t2 = t3 * t1; + let x3 = t2 - x3; + let y3 = y3 * t0; + let t1 = t1 * z3; + let y3 = t1 + y3; + let t0 = t0 * t3; + let z3 = z3 * t4; + let z3 = z3 + t0; + + $name { + x: x3, + y: y3, + z: z3, + } + } + } + + impl<'a, 'b> Add<&'a $name_affine> for &'b $name { + type Output = $name; + + fn add(self, rhs: &'a $name_affine) -> $name { + // Algorithm 8, https://eprint.iacr.org/2015/1060.pdf + let t0 = self.x * rhs.x; + let t1 = self.y * rhs.y; + let t3 = rhs.x + rhs.y; + let t4 = self.x + self.y; + let t3 = t3 * t4; + let t4 = t0 + t1; + let t3 = t3 - t4; + let t4 = rhs.y * self.z; + let t4 = t4 + self.y; + let y3 = rhs.x * self.z; + let y3 = y3 + self.x; + let x3 = t0 + t0; + let t0 = x3 + t0; + let t2 = $name::mul_by_3b(self.z); + let z3 = t1 + t2; + let t1 = t1 - t2; + let y3 = $name::mul_by_3b(y3); + let x3 = t4 * y3; + let t2 = t3 * t1; + let x3 = t2 - x3; + let y3 = y3 * t0; + let t1 = t1 * z3; + let y3 = t1 + y3; + let t0 = t0 * t3; + let z3 = z3 * t4; + let z3 = z3 + t0; + + let tmp = $name { + x: x3, + y: y3, + z: z3, + }; + + $name::conditional_select(&tmp, self, rhs.is_identity()) + } + } + + impl<'a, 'b> Sub<&'a $name> for &'b $name { + type Output = $name; + + fn sub(self, other: &'a $name) -> $name { + self + (-other) + } + } + + impl<'a, 'b> Sub<&'a $name_affine> for &'b $name { + type Output = $name; + + fn sub(self, other: &'a $name_affine) -> $name { + self + (-other) + } + } + + + + #[allow(clippy::suspicious_arithmetic_impl)] + impl<'a, 'b> Mul<&'b $scalar> for &'a $name { + type Output = $name; + + // This is a simple double-and-add implementation of point + // multiplication, moving from most significant to least + // significant bit of the scalar. + + fn mul(self, other: &'b $scalar) -> Self::Output { + let mut acc = $name::identity(); + let other = other.to_bytes(); + for bit in other + // .to_repr() + .iter() + .rev() + .flat_map(|byte| (0..8).rev().map(move |i| Choice::from((byte >> i) & 1u8))) + .skip(1) + { + acc = acc.double(); + acc = $name::conditional_select(&acc, &(acc + self), bit); + } + acc + } + } + + impl<'a> Neg for &'a $name_affine { + type Output = $name_affine; + + fn neg(self) -> $name_affine { + $name_affine { + x: self.x, + y: $base::conditional_select(&-self.y, &$base::one(), self.infinity), + infinity: self.infinity, + } + } + } + + impl Neg for $name_affine { + type Output = $name_affine; + + fn neg(self) -> $name_affine { + -&self + } + } + + impl<'a, 'b> Add<&'a $name> for &'b $name_affine { + type Output = $name; + + fn add(self, rhs: &'a $name) -> $name { + rhs + self + } + } + + impl<'a, 'b> Add<&'a $name_affine> for &'b $name_affine { + type Output = $name; + + fn add(self, rhs: &'a $name_affine) -> $name { + if bool::from(self.is_identity()) { + rhs.to_curve() + } else if bool::from(rhs.is_identity()) { + self.to_curve() + } else { + if self.x == rhs.x { + if self.y == rhs.y { + self.to_curve().double() + } else { + $name::identity() + } + } else { + let h = rhs.x - self.x; + let hh = h.square(); + let i = hh + hh; + let i = i + i; + let j = h * i; + let r = rhs.y - self.y; + let r = r + r; + let v = self.x * i; + let x3 = r.square() - j - v - v; + let j = self.y * j; + let j = j + j; + let y3 = r * (v - x3) - j; + let z3 = h + h; + + $name { + x: x3, y: y3, z: z3 + } + } + } + } + } + + impl<'a, 'b> Sub<&'a $name_affine> for &'b $name_affine { + type Output = $name; + + fn sub(self, other: &'a $name_affine) -> $name { + self + (-other) + } + } + + impl<'a, 'b> Sub<&'a $name> for &'b $name_affine { + type Output = $name; + + fn sub(self, other: &'a $name) -> $name { + self + (-other) + } + } + + #[allow(clippy::suspicious_arithmetic_impl)] + impl<'a, 'b> Mul<&'b $scalar> for &'a $name_affine { + type Output = $name; + + fn mul(self, other: &'b $scalar) -> Self::Output { + // need to convert from $name_affine to $name + $name::from(self) * other + } + } + }; +} diff --git a/src/derive/field_bls12_381.rs b/src/derive/field_bls12_381.rs new file mode 100644 index 00000000..1753ed11 --- /dev/null +++ b/src/derive/field_bls12_381.rs @@ -0,0 +1,1013 @@ +#[macro_export] +macro_rules! field_common_fq { + ( + $field:ident, + $modulus:ident, + $inv:ident, + $modulus_str:ident, + $two_inv:ident, + $root_of_unity_inv:ident, + $delta:ident, + $zeta:ident, + $r:ident, + $r2:ident, + $r3:ident + ) => { + impl $field { + /// Returns zero, the additive identity. + #[inline] + pub const fn zero() -> $field { + $field([0, 0, 0, 0, 0, 0]) + } + + /// Returns one, the multiplicative identity. + #[inline] + pub const fn one() -> $field { + $r + } + + // TODO [from_u512] Add support for BLS12-381 + fn from_u512(limbs: [u64; 8]) -> $field { + let d0 = $field([limbs[0], limbs[1], limbs[2], limbs[3], limbs[4], limbs[5]]); + let d1 = $field([limbs[6], limbs[7], 0, 0, 0, 0]); + // Convert to Montgomery form + d0 * $r2 + d1 * $r3 + } + + /// Constructs an element of `Fq` without checking that it is + /// canonical. + pub const fn from_raw_unchecked(limbs: [u64; 6]) -> $field { + $field(limbs) + } + + /// Convert a little-endian byte representation of a scalar into a `Fq` + pub fn from_bytes(bytes: &[u8; 48]) -> CtOption<$field> { + ::from_repr(FqBytes::from(*bytes)) + } + + // Converts an element of `Fq` into a byte representation in + // little-endian byte order. + pub fn to_bytes(&self) -> [u8; 48] { + ::to_repr(self).slice + } + + /// Returns whether or not this element is strictly lexicographically + /// larger than its negation. + pub fn lexicographically_largest(&self) -> Choice { + // This can be determined by checking to see if the element is + // larger than (p - 1) // 2. If we subtract by ((p - 1) // 2) + 1 + // and there is no underflow, then the element must be larger than + // (p - 1) // 2. + + // First, because self is in Montgomery form we need to reduce it + let tmp = $field::montgomery_reduce( + self.0[0], self.0[1], self.0[2], self.0[3], self.0[4], self.0[5], 0, 0, 0, 0, + 0, 0, + ); + + let (_, borrow) = sbb(tmp.0[0], 0xdcff_7fff_ffff_d556, false); + let (_, borrow) = sbb(tmp.0[1], 0x0f55_ffff_58a9_ffff, borrow); + let (_, borrow) = sbb(tmp.0[2], 0xb398_6950_7b58_7b12, borrow); + let (_, borrow) = sbb(tmp.0[3], 0xb23b_a5c2_79c2_895f, borrow); + let (_, borrow) = sbb(tmp.0[4], 0x258d_d3db_21a5_d66b, borrow); + let (_, borrow) = sbb(tmp.0[5], 0x0d00_88f5_1cbf_f34d, borrow); + + // If the element was smaller, the subtraction will underflow + // producing a borrow value of 0xffff...ffff, otherwise it will + // be zero. We create a Choice representing true if there was + // overflow (and so this element is not lexicographically larger + // than its negation) and then negate it. + + !Choice::from((borrow as u8) & 1) + } + } + + impl Group for $field { + type Scalar = Self; + + fn group_zero() -> Self { + Self::zero() + } + fn group_add(&mut self, rhs: &Self) { + *self += *rhs; + } + fn group_sub(&mut self, rhs: &Self) { + *self -= *rhs; + } + fn group_scale(&mut self, by: &Self::Scalar) { + *self *= *by; + } + } + + impl fmt::Debug for $field { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let tmp = self.to_repr(); + write!(f, "0x")?; + for &b in tmp.slice.iter().rev() { + write!(f, "{:02x}", b)?; + } + Ok(()) + } + } + + impl Default for $field { + #[inline] + fn default() -> Self { + Self::zero() + } + } + + impl From for $field { + fn from(bit: bool) -> $field { + if bit { + $field::one() + } else { + $field::zero() + } + } + } + + impl From for $field { + fn from(val: u64) -> $field { + $field([val, 0, 0, 0, 0, 0]) * $r2 + } + } + + impl ConstantTimeEq for $field { + fn ct_eq(&self, other: &Self) -> Choice { + self.0[0].ct_eq(&other.0[0]) + & self.0[1].ct_eq(&other.0[1]) + & self.0[2].ct_eq(&other.0[2]) + & self.0[3].ct_eq(&other.0[3]) + & self.0[4].ct_eq(&other.0[4]) + & self.0[5].ct_eq(&other.0[5]) + } + } + + impl core::cmp::Ord for $field { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + let left = self.to_repr(); + let right = other.to_repr(); + left.slice + .iter() + .zip(right.slice.iter()) + .rev() + .find_map(|(left_byte, right_byte)| match left_byte.cmp(right_byte) { + core::cmp::Ordering::Equal => None, + res => Some(res), + }) + .unwrap_or(core::cmp::Ordering::Equal) + } + } + + impl core::cmp::PartialOrd for $field { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + + impl ConditionallySelectable for $field { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + $field([ + u64::conditional_select(&a.0[0], &b.0[0], choice), + u64::conditional_select(&a.0[1], &b.0[1], choice), + u64::conditional_select(&a.0[2], &b.0[2], choice), + u64::conditional_select(&a.0[3], &b.0[3], choice), + u64::conditional_select(&a.0[4], &b.0[4], choice), + u64::conditional_select(&a.0[5], &b.0[5], choice), + ]) + } + } + + impl<'a> Neg for &'a $field { + type Output = $field; + + #[inline] + fn neg(self) -> $field { + self.neg() + } + } + + impl Neg for $field { + type Output = $field; + + #[inline] + fn neg(self) -> $field { + -&self + } + } + + impl<'a, 'b> Sub<&'b $field> for &'a $field { + type Output = $field; + + #[inline] + fn sub(self, rhs: &'b $field) -> $field { + self.sub(rhs) + } + } + + impl<'a, 'b> Add<&'b $field> for &'a $field { + type Output = $field; + + #[inline] + fn add(self, rhs: &'b $field) -> $field { + self.add(rhs) + } + } + + impl<'a, 'b> Mul<&'b $field> for &'a $field { + type Output = $field; + + #[inline] + fn mul(self, rhs: &'b $field) -> $field { + self.mul(rhs) + } + } + + impl From<[u64; 6]> for $field { + fn from(digits: [u64; 6]) -> Self { + Self::from_raw_unchecked(digits) + } + } + + impl From<$field> for [u64; 6] { + fn from(elt: $field) -> [u64; 6] { + // Turn into canonical form by computing + // (a.R) / R = a + #[cfg(feature = "asm")] + let tmp = $field::montgomery_reduce(&[ + elt.0[0], elt.0[1], elt.0[2], elt.0[3], elt.0[4], elt.0[5], 0, 0, 0, 0, 0, 0, + ]); + + #[cfg(not(feature = "asm"))] + let tmp = $field::montgomery_reduce_short( + elt.0[0], elt.0[1], elt.0[2], elt.0[3], elt.0[4], elt.0[5], + ); + + tmp.0 + } + } + + impl From<$field> for FqBytes { + fn from(value: $field) -> FqBytes { + value.to_repr() + } + } + + impl<'a> From<&'a $field> for FqBytes { + fn from(value: &'a $field) -> FqBytes { + value.to_repr() + } + } + + impl From<$field> for i128 { + fn from(value: $field) -> i128 { + let tmp: [u64; 6] = value.into(); + if tmp[2] == 0 && tmp[3] == 0 { + i128::from(tmp[0]) | (i128::from(tmp[1]) << 64) + } else { + // modulus - tmp + let (a0, borrow) = $modulus.0[0].overflowing_sub(tmp[0]); + let (a1, _) = sbb($modulus.0[1], tmp[1], borrow); + + -(i128::from(a0) | (i128::from(a1) << 64)) + } + } + } + + impl FieldExt for $field { + const MODULUS: &'static str = $modulus_str; + const TWO_INV: Self = $two_inv; + const ROOT_OF_UNITY_INV: Self = $root_of_unity_inv; + const DELTA: Self = $delta; + const ZETA: Self = $zeta; + + fn from_u128(v: u128) -> Self { + $field::from_raw_unchecked([v as u64, (v >> 64) as u64, 0, 0, 0, 0]) + } + + /// Converts a 512-bit little endian integer into + /// a `$field` by reducing by the modulus. + fn from_bytes_wide(bytes: &[u8; 64]) -> $field { + $field::from_u512([ + u64::from_le_bytes(bytes[0..8].try_into().unwrap()), + u64::from_le_bytes(bytes[8..16].try_into().unwrap()), + u64::from_le_bytes(bytes[16..24].try_into().unwrap()), + u64::from_le_bytes(bytes[24..32].try_into().unwrap()), + u64::from_le_bytes(bytes[32..40].try_into().unwrap()), + u64::from_le_bytes(bytes[40..48].try_into().unwrap()), + u64::from_le_bytes(bytes[48..56].try_into().unwrap()), + u64::from_le_bytes(bytes[56..64].try_into().unwrap()), + ]) + } + + fn get_lower_128(&self) -> u128 { + let tmp = $field::montgomery_reduce_short( + self.0[0], self.0[1], self.0[2], self.0[3], self.0[4], self.0[5], + ); + + u128::from(tmp.0[0]) | (u128::from(tmp.0[1]) << 64) + } + } + + // Assume Fq is stored in a 48-byte (384-bit) object + impl $crate::serde::SerdeObject for $field { + fn from_raw_bytes_unchecked(bytes: &[u8]) -> Self { + debug_assert_eq!(bytes.len(), 48); + let inner = [0, 8, 16, 24, 32, 40] + .map(|i| u64::from_le_bytes(bytes[i..i + 8].try_into().unwrap())); + Self(inner) + } + fn from_raw_bytes(bytes: &[u8]) -> Option { + if bytes.len() != 48 { + return None; + } + let elt = Self::from_raw_bytes_unchecked(bytes); + Self::is_less_than(&elt.0, &$modulus.0).then(|| elt) + } + fn to_raw_bytes(&self) -> Vec { + let mut res = Vec::with_capacity(48); + for limb in self.0.iter() { + res.extend_from_slice(&limb.to_le_bytes()); + } + res + } + fn read_raw_unchecked(reader: &mut R) -> Self { + let inner = [(); 6].map(|_| { + let mut buf = [0; 8]; + reader.read_exact(&mut buf).unwrap(); + u64::from_le_bytes(buf) + }); + Self(inner) + } + fn read_raw(reader: &mut R) -> std::io::Result { + let mut inner = [0u64; 6]; + for limb in inner.iter_mut() { + let mut buf = [0; 8]; + reader.read_exact(&mut buf)?; + *limb = u64::from_le_bytes(buf); + } + let elt = Self(inner); + Self::is_less_than(&elt.0, &$modulus.0) + .then(|| elt) + .ok_or_else(|| { + std::io::Error::new( + std::io::ErrorKind::InvalidData, + "input number is not less than field modulus", + ) + }) + } + fn write_raw(&self, writer: &mut W) -> std::io::Result<()> { + for limb in self.0.iter() { + writer.write_all(&limb.to_le_bytes())?; + } + Ok(()) + } + } + }; +} + +// Called in fq.rs: field_arithmetic_bls12_381!(Fq, MODULUS, INV, sparse); +#[macro_export] +macro_rules! field_arithmetic_bls12_381 { + ($field:ident, $modulus:ident, $inv:ident, $field_type:ident) => { + field_specific_bls12_381!($field, $modulus, $inv, $field_type); + impl $field { + /// Doubles this field element. + #[inline] + pub const fn double(&self) -> $field { + self.add(self) + } + + /// Squares this element. + #[inline] + pub const fn square(&self) -> $field { + // Not sure why this 6 limbs adataion from 4 limbs isn't working + // let r0; + // let mut r1; + // let mut r2; + // let mut r3; + // let mut r4; + // let mut r5; + // let mut r6; + // let mut r7; + // let mut r8; + // let mut r9; + // let mut r10; + // let mut r11; + // let mut carry; + // let mut carry2; + + // (r1, carry) = self.0[0].widening_mul(self.0[1]); + // (r2, carry) = self.0[0].carrying_mul(self.0[2], carry); + // (r3, carry) = self.0[0].carrying_mul(self.0[3], carry); + // (r4, carry) = self.0[0].carrying_mul(self.0[4], carry); + // (r5, r6) = self.0[0].carrying_mul(self.0[5], carry); + + // (r3, carry) = macx(r3, self.0[1], self.0[2]); + // (r4, carry) = mac(r4, self.0[1], self.0[3], carry); + // (r5, carry) = mac(r5, self.0[1], self.0[4], carry); + // (r6, r7) = mac(r6, self.0[1], self.0[5], carry); + + // (r5, carry) = macx(r5, self.0[2], self.0[3]); + // (r6, carry) = mac(r6, self.0[2], self.0[4], carry); + // (r7, r8) = mac(r7, self.0[2], self.0[5], carry); + + // (r7, carry) = macx(r7, self.0[3], self.0[4]); + // (r8, r9) = mac(r8, self.0[3], self.0[5], carry); + + // (r9, r10) = macx(r9, self.0[4], self.0[5]); + + // r11 = r10 >> 63; + // r10 = (r10 << 1) | (r9 >> 63); + // r9 = (r9 << 1) | (r8 >> 63); + // r8 = (r8 << 1) | (r7 >> 63); + // r7 = (r7 << 1) | (r6 >> 63); + // r6 = (r6 << 1) | (r5 >> 63); + // r5 = (r5 << 1) | (r4 >> 63); + // r4 = (r4 << 1) | (r3 >> 63); + // r3 = (r3 << 1) | (r2 >> 63); + // r2 = (r2 << 1) | (r1 >> 63); + // r1 <<= 1; + + // (r0, carry) = self.0[0].widening_mul(self.0[0]); + // (r1, carry2) = r1.overflowing_add(carry); + // (r2, carry) = mac(r2, self.0[1], self.0[1], carry2 as u64); + // (r3, carry2) = r3.overflowing_add(carry); + // (r4, carry) = mac(r4, self.0[2], self.0[2], carry2 as u64); + // (r5, carry2) = r5.overflowing_add(carry); + // (r6, carry) = mac(r6, self.0[3], self.0[3], carry2 as u64); + // (r7, carry2) = r7.overflowing_add(carry); + // (r8, carry) = mac(r8, self.0[4], self.0[4], carry2 as u64); + // (r9, carry2) = r9.overflowing_add(carry); + // (r10, carry) = mac(r10, self.0[5], self.0[5], carry2 as u64); + // r11 = r11.wrapping_add(carry); + + // $field::montgomery_reduce(r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11) + + let (t1, carry) = mac(0, self.0[0], self.0[1], 0); + let (t2, carry) = mac(0, self.0[0], self.0[2], carry); + let (t3, carry) = mac(0, self.0[0], self.0[3], carry); + let (t4, carry) = mac(0, self.0[0], self.0[4], carry); + let (t5, t6) = mac(0, self.0[0], self.0[5], carry); + + let (t3, carry) = mac(t3, self.0[1], self.0[2], 0); + let (t4, carry) = mac(t4, self.0[1], self.0[3], carry); + let (t5, carry) = mac(t5, self.0[1], self.0[4], carry); + let (t6, t7) = mac(t6, self.0[1], self.0[5], carry); + + let (t5, carry) = mac(t5, self.0[2], self.0[3], 0); + let (t6, carry) = mac(t6, self.0[2], self.0[4], carry); + let (t7, t8) = mac(t7, self.0[2], self.0[5], carry); + + let (t7, carry) = mac(t7, self.0[3], self.0[4], 0); + let (t8, t9) = mac(t8, self.0[3], self.0[5], carry); + + let (t9, t10) = mac(t9, self.0[4], self.0[5], 0); + + let t11 = t10 >> 63; + let t10 = (t10 << 1) | (t9 >> 63); + let t9 = (t9 << 1) | (t8 >> 63); + let t8 = (t8 << 1) | (t7 >> 63); + let t7 = (t7 << 1) | (t6 >> 63); + let t6 = (t6 << 1) | (t5 >> 63); + let t5 = (t5 << 1) | (t4 >> 63); + let t4 = (t4 << 1) | (t3 >> 63); + let t3 = (t3 << 1) | (t2 >> 63); + let t2 = (t2 << 1) | (t1 >> 63); + let t1 = t1 << 1; + + let (t0, carry) = mac(0, self.0[0], self.0[0], 0); + let (t1, carry) = adc_u64(t1, 0, carry); + let (t2, carry) = mac(t2, self.0[1], self.0[1], carry); + let (t3, carry) = adc_u64(t3, 0, carry); + let (t4, carry) = mac(t4, self.0[2], self.0[2], carry); + let (t5, carry) = adc_u64(t5, 0, carry); + let (t6, carry) = mac(t6, self.0[3], self.0[3], carry); + let (t7, carry) = adc_u64(t7, 0, carry); + let (t8, carry) = mac(t8, self.0[4], self.0[4], carry); + let (t9, carry) = adc_u64(t9, 0, carry); + let (t10, carry) = mac(t10, self.0[5], self.0[5], carry); + let (t11, _) = adc_u64(t11, 0, carry); + + Self::montgomery_reduce(t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11) + } + + /// Subtracts `rhs` from `self`, returning the result. + #[inline] + pub const fn sub(&self, rhs: &Self) -> Self { + let (d0, borrow) = self.0[0].overflowing_sub(rhs.0[0]); + let (d1, borrow) = sbb(self.0[1], rhs.0[1], borrow); + let (d2, borrow) = sbb(self.0[2], rhs.0[2], borrow); + let (d3, borrow) = sbb(self.0[3], rhs.0[3], borrow); + let (d4, borrow) = sbb(self.0[4], rhs.0[4], borrow); + let (d5, borrow) = sbb(self.0[5], rhs.0[5], borrow); + + let borrow = 0u64.wrapping_sub(borrow as u64); + // If underflow occurred on the final limb, borrow = 0xfff...fff, otherwise + // borrow = 0x000...000. Thus, we use it as a mask to conditionally add the modulus. + let (d0, carry) = d0.overflowing_add($modulus.0[0] & borrow); + let (d1, carry) = adc(d1, $modulus.0[1] & borrow, carry); + let (d2, carry) = adc(d2, $modulus.0[2] & borrow, carry); + let (d3, carry) = adc(d3, $modulus.0[3] & borrow, carry); + let (d4, carry) = adc(d4, $modulus.0[4] & borrow, carry); + let (d5, _) = adc(d5, $modulus.0[5] & borrow, carry); + $field([d0, d1, d2, d3, d4, d5]) + } + + /// Negates `self`. + #[inline] + pub const fn neg(&self) -> Self { + if self.0[0] == 0 + && self.0[1] == 0 + && self.0[2] == 0 + && self.0[3] == 0 + && self.0[4] == 0 + && self.0[5] == 0 + { + return $field([0, 0, 0, 0, 0, 0]); + } + // Subtract `self` from `MODULUS` to negate. Ignore the final + // borrow because it cannot underflow; self is guaranteed to + // be in the field. + let (d0, borrow) = $modulus.0[0].overflowing_sub(self.0[0]); + let (d1, borrow) = sbb($modulus.0[1], self.0[1], borrow); + let (d2, borrow) = sbb($modulus.0[2], self.0[2], borrow); + let (d3, borrow) = sbb($modulus.0[3], self.0[3], borrow); + let (d4, borrow) = sbb($modulus.0[4], self.0[4], borrow); + let d5 = $modulus.0[5] - (self.0[5] + borrow as u64); + + $field([d0, d1, d2, d3, d4, d5]) + } + + /// Montgomery reduce where last 6 registers are 0 + #[inline(always)] + pub(crate) const fn montgomery_reduce_short( + mut r0: u64, + mut r1: u64, + mut r2: u64, + mut r3: u64, + mut r4: u64, + mut r5: u64, + ) -> $field { + // The Montgomery reduction here is based on Algorithm 14.32 in + // Handbook of Applied Cryptography + // . + let mut k; + + k = r0.wrapping_mul($inv); + (_, r0) = macx(r0, k, $modulus.0[0]); + (r1, r0) = mac(r1, k, $modulus.0[1], r0); + (r2, r0) = mac(r2, k, $modulus.0[2], r0); + (r3, r0) = mac(r3, k, $modulus.0[3], r0); + (r4, r0) = mac(r4, k, $modulus.0[4], r0); + (r5, r0) = mac(r5, k, $modulus.0[5], r0); + + k = r1.wrapping_mul($inv); + (_, r1) = macx(r1, k, $modulus.0[0]); + (r2, r1) = mac(r2, k, $modulus.0[1], r1); + (r3, r1) = mac(r3, k, $modulus.0[2], r1); + (r4, r1) = mac(r4, k, $modulus.0[3], r1); + (r5, r1) = mac(r5, k, $modulus.0[4], r1); + (r0, r1) = mac(r0, k, $modulus.0[5], r1); + + k = r2.wrapping_mul($inv); + (_, r2) = macx(r2, k, $modulus.0[0]); + (r3, r2) = mac(r3, k, $modulus.0[1], r2); + (r4, r2) = mac(r4, k, $modulus.0[2], r2); + (r5, r2) = mac(r5, k, $modulus.0[3], r2); + (r0, r2) = mac(r0, k, $modulus.0[4], r2); + (r1, r2) = mac(r1, k, $modulus.0[5], r2); + + k = r3.wrapping_mul($inv); + (_, r3) = macx(r3, k, $modulus.0[0]); + (r4, r3) = mac(r4, k, $modulus.0[1], r3); + (r5, r3) = mac(r5, k, $modulus.0[2], r3); + (r0, r3) = mac(r0, k, $modulus.0[3], r3); + (r1, r3) = mac(r1, k, $modulus.0[4], r3); + (r2, r3) = mac(r2, k, $modulus.0[5], r3); + + k = r4.wrapping_mul($inv); + (_, r4) = macx(r4, k, $modulus.0[0]); + (r5, r4) = mac(r5, k, $modulus.0[1], r4); + (r0, r4) = mac(r0, k, $modulus.0[2], r4); + (r1, r4) = mac(r1, k, $modulus.0[3], r4); + (r2, r4) = mac(r2, k, $modulus.0[4], r4); + (r3, r4) = mac(r3, k, $modulus.0[5], r4); + + k = r5.wrapping_mul($inv); + (_, r5) = macx(r5, k, $modulus.0[0]); + (r0, r5) = mac(r0, k, $modulus.0[1], r5); + (r1, r5) = mac(r1, k, $modulus.0[2], r5); + (r2, r5) = mac(r2, k, $modulus.0[3], r5); + (r3, r5) = mac(r3, k, $modulus.0[4], r5); + (r4, r5) = mac(r4, k, $modulus.0[5], r5); + + // Result may be within MODULUS of the correct value + (&$field([r0, r1, r2, r3, r4, r5])).sub(&$modulus) + } + + #[inline(always)] + fn is_less_than(x: &[u64; 6], y: &[u64; 6]) -> bool { + let (_, borrow) = x[0].overflowing_sub(y[0]); + let (_, borrow) = x[1].borrowing_sub(y[1], borrow); + let (_, borrow) = x[2].borrowing_sub(y[2], borrow); + let (_, borrow) = x[3].borrowing_sub(y[3], borrow); + let (_, borrow) = x[4].borrowing_sub(y[4], borrow); + let (_, borrow) = x[5].borrowing_sub(y[5], borrow); + borrow + } + } + }; +} + +#[macro_export] +macro_rules! field_specific_bls12_381 { + ($field:ident, $modulus:ident, $inv:ident, sparse) => { + impl $field { + /// Adds `rhs` to `self`, returning the result. + #[inline] + pub const fn add(&self, rhs: &Self) -> Self { + let (d0, carry) = self.0[0].overflowing_add(rhs.0[0]); + let (d1, carry) = self.0[1].carrying_add(rhs.0[1], carry); + let (d2, carry) = self.0[2].carrying_add(rhs.0[2], carry); + let (d3, carry) = self.0[3].carrying_add(rhs.0[3], carry); + let (d4, carry) = self.0[4].carrying_add(rhs.0[4], carry); + // sparse means that the sum won't overflow the top register + let d5 = self.0[5] + rhs.0[5] + carry as u64; + + // Attempt to subtract the modulus, to ensure the value + // is smaller than the modulus. + (&$field([d0, d1, d2, d3, d4, d5])).sub(&$modulus) + } + + /// Multiplies `rhs` by `self`, returning the result. + #[inline] + pub const fn mul(&self, rhs: &Self) -> $field { + // When the highest bit in the top register of the modulus is 0 and the rest of the bits are not all 1, we can use an optimization from the gnark team: https://hackmd.io/@gnark/modular_multiplication + + // I think this is exactly the same as the previous `mul` implementation with `montgomery_reduce` at the end (where `montgomery_reduce` is slightly cheaper in "sparse" setting) + // Maybe the use of mutable variables is slightly more efficient? + let mut r0; + let mut r1; + let mut t0; + let mut t1; + let mut t2; + let mut t3; + let mut t4; + let mut t5; + let mut k; + + (t0, r0) = self.0[0].widening_mul(rhs.0[0]); + k = t0.wrapping_mul($inv); + (_, r1) = macx(t0, k, $modulus.0[0]); + (t1, r0) = self.0[0].carrying_mul(rhs.0[1], r0); + (t0, r1) = mac(t1, k, $modulus.0[1], r1); + (t2, r0) = self.0[0].carrying_mul(rhs.0[2], r0); + (t1, r1) = mac(t2, k, $modulus.0[2], r1); + (t3, r0) = self.0[0].carrying_mul(rhs.0[3], r0); + (t2, r1) = mac(t3, k, $modulus.0[3], r1); + (t4, r0) = self.0[0].carrying_mul(rhs.0[4], r0); + (t3, r1) = mac(t4, k, $modulus.0[4], r1); + (t5, r0) = self.0[0].carrying_mul(rhs.0[5], r0); + (t4, r1) = mac(t5, k, $modulus.0[5], r1); + t5 = r0 + r1; + + (t0, r0) = macx(t0, self.0[1], rhs.0[0]); + k = t0.wrapping_mul($inv); + (_, r1) = macx(t0, k, $modulus.0[0]); + (t1, r0) = mac(t1, self.0[1], rhs.0[1], r0); + (t0, r1) = mac(t1, k, $modulus.0[1], r1); + (t2, r0) = mac(t2, self.0[1], rhs.0[2], r0); + (t1, r1) = mac(t2, k, $modulus.0[2], r1); + (t3, r0) = mac(t3, self.0[1], rhs.0[3], r0); + (t2, r1) = mac(t3, k, $modulus.0[3], r1); + (t4, r0) = mac(t4, self.0[1], rhs.0[4], r0); + (t3, r1) = mac(t4, k, $modulus.0[4], r1); + (t5, r0) = mac(t5, self.0[1], rhs.0[5], r0); + (t4, r1) = mac(t5, k, $modulus.0[5], r1); + t5 = r0 + r1; + + (t0, r0) = macx(t0, self.0[2], rhs.0[0]); + k = t0.wrapping_mul($inv); + (_, r1) = macx(t0, k, $modulus.0[0]); + (t1, r0) = mac(t1, self.0[2], rhs.0[1], r0); + (t0, r1) = mac(t1, k, $modulus.0[1], r1); + (t2, r0) = mac(t2, self.0[2], rhs.0[2], r0); + (t1, r1) = mac(t2, k, $modulus.0[2], r1); + (t3, r0) = mac(t3, self.0[2], rhs.0[3], r0); + (t2, r1) = mac(t3, k, $modulus.0[3], r1); + (t4, r0) = mac(t4, self.0[2], rhs.0[4], r0); + (t3, r1) = mac(t4, k, $modulus.0[4], r1); + (t5, r0) = mac(t5, self.0[2], rhs.0[5], r0); + (t4, r1) = mac(t5, k, $modulus.0[5], r1); + t5 = r0 + r1; + + (t0, r0) = macx(t0, self.0[3], rhs.0[0]); + k = t0.wrapping_mul($inv); + (_, r1) = macx(t0, k, $modulus.0[0]); + (t1, r0) = mac(t1, self.0[3], rhs.0[1], r0); + (t0, r1) = mac(t1, k, $modulus.0[1], r1); + (t2, r0) = mac(t2, self.0[3], rhs.0[2], r0); + (t1, r1) = mac(t2, k, $modulus.0[2], r1); + (t3, r0) = mac(t3, self.0[3], rhs.0[3], r0); + (t2, r1) = mac(t3, k, $modulus.0[3], r1); + (t4, r0) = mac(t4, self.0[3], rhs.0[4], r0); + (t3, r1) = mac(t4, k, $modulus.0[4], r1); + (t5, r0) = mac(t5, self.0[3], rhs.0[5], r0); + (t4, r1) = mac(t5, k, $modulus.0[5], r1); + t5 = r0 + r1; + + (t0, r0) = macx(t0, self.0[4], rhs.0[0]); + k = t0.wrapping_mul($inv); + (_, r1) = macx(t0, k, $modulus.0[0]); + (t1, r0) = mac(t1, self.0[4], rhs.0[1], r0); + (t0, r1) = mac(t1, k, $modulus.0[1], r1); + (t2, r0) = mac(t2, self.0[4], rhs.0[2], r0); + (t1, r1) = mac(t2, k, $modulus.0[2], r1); + (t3, r0) = mac(t3, self.0[4], rhs.0[3], r0); + (t2, r1) = mac(t3, k, $modulus.0[3], r1); + (t4, r0) = mac(t4, self.0[4], rhs.0[4], r0); + (t3, r1) = mac(t4, k, $modulus.0[4], r1); + (t5, r0) = mac(t5, self.0[4], rhs.0[5], r0); + (t4, r1) = mac(t5, k, $modulus.0[5], r1); + t5 = r0 + r1; + + (t0, r0) = macx(t0, self.0[5], rhs.0[0]); + k = t0.wrapping_mul($inv); + (_, r1) = macx(t0, k, $modulus.0[0]); + (t1, r0) = mac(t1, self.0[5], rhs.0[1], r0); + (t0, r1) = mac(t1, k, $modulus.0[1], r1); + (t2, r0) = mac(t2, self.0[5], rhs.0[2], r0); + (t1, r1) = mac(t2, k, $modulus.0[2], r1); + (t3, r0) = mac(t3, self.0[5], rhs.0[3], r0); + (t2, r1) = mac(t3, k, $modulus.0[3], r1); + (t4, r0) = mac(t4, self.0[5], rhs.0[4], r0); + (t3, r1) = mac(t4, k, $modulus.0[4], r1); + (t5, r0) = mac(t5, self.0[5], rhs.0[5], r0); + (t4, r1) = mac(t5, k, $modulus.0[5], r1); + t5 = r0 + r1; + + // Result may be within MODULUS of the correct value + (&$field([t0, t1, t2, t3, t4, t5])).sub(&$modulus) + } + + #[allow(clippy::too_many_arguments)] + #[inline(always)] + pub(crate) const fn montgomery_reduce( + r0: u64, + mut r1: u64, + mut r2: u64, + mut r3: u64, + mut r4: u64, + mut r5: u64, + mut r6: u64, + mut r7: u64, + mut r8: u64, + mut r9: u64, + mut r10: u64, + mut r11: u64, + ) -> $field { + // The Montgomery reduction here is based on Algorithm 14.32 in + // Handbook of Applied Cryptography + // . + let mut k; + let mut carry; + let mut carry2; + + k = r0.wrapping_mul($inv); + (_, carry) = macx(r0, k, $modulus.0[0]); + (r1, carry) = mac(r1, k, $modulus.0[1], carry); + (r2, carry) = mac(r2, k, $modulus.0[2], carry); + (r3, carry) = mac(r3, k, $modulus.0[3], carry); + (r4, carry) = mac(r4, k, $modulus.0[4], carry); + (r5, carry) = mac(r5, k, $modulus.0[5], carry); + (r6, carry2) = r6.overflowing_add(carry); + + k = r1.wrapping_mul($inv); + (_, carry) = macx(r1, k, $modulus.0[0]); + (r2, carry) = mac(r2, k, $modulus.0[1], carry); + (r3, carry) = mac(r3, k, $modulus.0[2], carry); + (r4, carry) = mac(r4, k, $modulus.0[3], carry); + (r5, carry) = mac(r5, k, $modulus.0[4], carry); + (r6, carry) = mac(r6, k, $modulus.0[5], carry); + (r7, carry2) = adc(r7, carry, carry2); + + k = r2.wrapping_mul($inv); + (_, carry) = macx(r2, k, $modulus.0[0]); + (r3, carry) = mac(r3, k, $modulus.0[1], carry); + (r4, carry) = mac(r4, k, $modulus.0[2], carry); + (r5, carry) = mac(r5, k, $modulus.0[3], carry); + (r6, carry) = mac(r6, k, $modulus.0[4], carry); + (r7, carry) = mac(r7, k, $modulus.0[5], carry); + (r8, carry2) = adc(r8, carry, carry2); + + k = r3.wrapping_mul($inv); + (_, carry) = macx(r3, k, $modulus.0[0]); + (r4, carry) = mac(r4, k, $modulus.0[1], carry); + (r5, carry) = mac(r5, k, $modulus.0[2], carry); + (r6, carry) = mac(r6, k, $modulus.0[3], carry); + (r7, carry) = mac(r7, k, $modulus.0[4], carry); + (r8, carry) = mac(r8, k, $modulus.0[5], carry); + (r9, carry2) = adc(r9, carry, carry2); + + k = r4.wrapping_mul($inv); + (_, carry) = macx(r4, k, $modulus.0[0]); + (r5, carry) = mac(r5, k, $modulus.0[1], carry); + (r6, carry) = mac(r6, k, $modulus.0[2], carry); + (r7, carry) = mac(r7, k, $modulus.0[3], carry); + (r8, carry) = mac(r8, k, $modulus.0[4], carry); + (r9, carry) = mac(r9, k, $modulus.0[5], carry); + (r10, carry2) = adc(r10, carry, carry2); + + k = r5.wrapping_mul($inv); + (_, carry) = macx(r5, k, $modulus.0[0]); + (r6, carry) = mac(r6, k, $modulus.0[1], carry); + (r7, carry) = mac(r7, k, $modulus.0[2], carry); + (r8, carry) = mac(r8, k, $modulus.0[3], carry); + (r9, carry) = mac(r9, k, $modulus.0[4], carry); + (r10, carry) = mac(r10, k, $modulus.0[5], carry); + (r11, _) = adc(r11, carry, carry2); + + // Result may be within MODULUS of the correct value + (&$field([r6, r7, r8, r9, r10, r11])).sub(&$modulus) + } + } + }; + // below for `dense` doesn't get called currently + ($field:ident, $modulus:ident, $inv:ident, dense) => { + impl $field { + /// Adds `rhs` to `self`, returning the result. + #[inline] + pub const fn add(&self, rhs: &Self) -> Self { + let (d0, carry) = self.0[0].overflowing_add(rhs.0[0]); + let (d1, carry) = adc(self.0[1], rhs.0[1], carry); + let (d2, carry) = adc(self.0[2], rhs.0[2], carry); + let (d3, carry) = adc(self.0[3], rhs.0[3], carry); + let (d4, carry) = adc(self.0[4], rhs.0[4], carry); + let (d5, carry) = adc(self.0[5], rhs.0[5], carry); + + // Attempt to subtract the modulus, to ensure the value + // is smaller than the modulus. + let (d0, borrow) = d0.overflowing_sub($modulus.0[0]); + let (d1, borrow) = sbb(d1, $modulus.0[1], borrow); + let (d2, borrow) = sbb(d2, $modulus.0[2], borrow); + let (d3, borrow) = sbb(d3, $modulus.0[3], borrow); + let (d4, borrow) = sbb(d4, $modulus.0[4], borrow); + let (d5, borrow) = sbb(d5, $modulus.0[5], borrow); + let borrow = (carry as u64).wrapping_sub(borrow as u64); + + let (d0, carry) = d0.overflowing_add($modulus.0[0] & borrow); + let (d1, carry) = adc(d1, $modulus.0[1] & borrow, carry); + let (d2, carry) = adc(d2, $modulus.0[2] & borrow, carry); + let (d3, carry) = adc(d3, $modulus.0[3] & borrow, carry); + let (d4, carry) = adc(d4, $modulus.0[4] & borrow, carry); + let (d5, _) = adc(d5, $modulus.0[5] & borrow, carry); + + $field([d0, d1, d2, d3, d4, d5]) + } + + /// Multiplies `rhs` by `self`, returning the result. + #[inline] + pub const fn mul(&self, rhs: &Self) -> $field { + // Schoolbook multiplication + + let (r0, carry) = mac(0, self.0[0], rhs.0[0], 0); + let (r1, carry) = mac(0, self.0[0], rhs.0[1], carry); + let (r2, carry) = mac(0, self.0[0], rhs.0[2], carry); + let (r3, carry) = mac(0, self.0[0], rhs.0[3], carry); + let (r4, carry) = mac(0, self.0[0], rhs.0[4], carry); + let (r5, r6) = mac(0, self.0[0], rhs.0[5], carry); + + let (r1, carry) = mac(r1, self.0[1], rhs.0[0], 0); + let (r2, carry) = mac(r2, self.0[1], rhs.0[1], carry); + let (r3, carry) = mac(r3, self.0[1], rhs.0[2], carry); + let (r4, carry) = mac(r4, self.0[1], rhs.0[3], carry); + let (r5, carry) = mac(r5, self.0[1], rhs.0[4], carry); + let (r6, r7) = mac(r6, self.0[1], rhs.0[5], carry); + + let (r2, carry) = mac(r2, self.0[2], rhs.0[0], 0); + let (r3, carry) = mac(r3, self.0[2], rhs.0[1], carry); + let (r4, carry) = mac(r4, self.0[2], rhs.0[2], carry); + let (r5, carry) = mac(r5, self.0[2], rhs.0[4], carry); + let (r6, carry) = mac(r6, self.0[2], rhs.0[5], carry); + let (r7, r8) = mac(r7, self.0[2], rhs.0[5], carry); + + let (r3, carry) = mac(r3, self.0[3], rhs.0[0], 0); + let (r4, carry) = mac(r4, self.0[3], rhs.0[1], carry); + let (r5, carry) = mac(r5, self.0[3], rhs.0[2], carry); + let (r6, carry) = mac(r5, self.0[3], rhs.0[3], carry); + let (r7, carry) = mac(r5, self.0[3], rhs.0[4], carry); + let (r8, r9) = mac(r6, self.0[3], rhs.0[5], carry); + + let (r4, carry) = mac(r4, self.0[4], rhs.0[0], 0); + let (r5, carry) = mac(r5, self.0[4], rhs.0[1], carry); + let (r6, carry) = mac(r6, self.0[4], rhs.0[2], carry); + let (r7, carry) = mac(r7, self.0[4], rhs.0[3], carry); + let (r8, carry) = mac(r8, self.0[4], rhs.0[4], carry); + let (r9, r10) = mac(r9, self.0[4], rhs.0[5], carry); + + let (r5, carry) = mac(r5, self.0[5], rhs.0[0], 0); + let (r6, carry) = mac(r6, self.0[5], rhs.0[1], carry); + let (r7, carry) = mac(r7, self.0[5], rhs.0[2], carry); + let (r8, carry) = mac(r8, self.0[5], rhs.0[3], carry); + let (r9, carry) = mac(r9, self.0[5], rhs.0[4], carry); + let (r10, r11) = mac(r10, self.0[5], rhs.0[5], carry); + + $field::montgomery_reduce(r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11) + } + + #[allow(clippy::too_many_arguments)] + #[inline(always)] + pub(crate) const fn montgomery_reduce( + r0: u64, + mut r1: u64, + mut r2: u64, + mut r3: u64, + mut r4: u64, + mut r5: u64, + mut r6: u64, + mut r7: u64, + ) -> Self { + // The Montgomery reduction here is based on Algorithm 14.32 in + // Handbook of Applied Cryptography + // . + let mut k; + let mut carry; + let mut carry2; + + k = r0.wrapping_mul($inv); + (_, carry) = macx(r0, k, $modulus.0[0]); + (r1, carry) = mac(r1, k, $modulus.0[1], carry); + (r2, carry) = mac(r2, k, $modulus.0[2], carry); + (r3, carry) = mac(r3, k, $modulus.0[3], carry); + (r4, carry) = mac(r4, k, $modulus.0[4], carry); + (r5, carry) = mac(r5, k, $modulus.0[5], carry); + (r6, carry2) = r6.overflowing_add(carry); + + k = r1.wrapping_mul($inv); + (_, carry) = k.carrying_mul($modulus.0[0], r1); + (r2, carry) = mac(r2, k, $modulus.0[1], carry); + (r3, carry) = mac(r3, k, $modulus.0[2], carry); + (r4, carry) = mac(r4, k, $modulus.0[3], carry); + (r5, carry) = mac(r5, k, $modulus.0[4], carry); + (r6, carry) = mac(r6, k, $modulus.0[5], carry); + (r7, carry2) = adc(r7, carry, carry2); + + k = r2.wrapping_mul($inv); + (_, carry) = macx(r2, k, $modulus.0[0]); + (r3, carry) = mac(r3, k, $modulus.0[1], carry); + (r4, carry) = mac(r4, k, $modulus.0[2], carry); + (r5, carry) = mac(r5, k, $modulus.0[3], carry); + (r6, carry) = mac(r6, k, $modulus.0[4], carry); + (r7, carry) = mac(r7, k, $modulus.0[5], carry); + (r8, carry2) = adc(r8, carry, carry2); + + k = r3.wrapping_mul($inv); + (_, carry) = macx(r3, k, $modulus.0[0]); + (r4, carry) = mac(r4, k, $modulus.0[1], carry); + (r5, carry) = mac(r5, k, $modulus.0[2], carry); + (r6, carry) = mac(r6, k, $modulus.0[3], carry); + (r7, carry) = mac(r7, k, $modulus.0[4], carry); + (r8, carry) = mac(r8, k, $modulus.0[5], carry); + (r9, carry2) = adc(r9, carry, carry2); + + k = r4.wrapping_mul($inv); + (_, carry) = macx(r4, k, $modulus.0[0]); + (r5, carry) = mac(r5, k, $modulus.0[1], carry); + (r6, carry) = mac(r6, k, $modulus.0[2], carry); + (r7, carry) = mac(r7, k, $modulus.0[3], carry); + (r8, carry) = mac(r8, k, $modulus.0[4], carry); + (r9, carry) = mac(r9, k, $modulus.0[5], carry); + (r10, carry2) = adc(r10, carry, carry2); + + k = r5.wrapping_mul($inv); + (_, carry) = macx(r5, k, $modulus.0[0]); + (r6, carry) = mac(r6, k, $modulus.0[1], carry); + (r7, carry) = mac(r7, k, $modulus.0[2], carry); + (r8, carry) = mac(r8, k, $modulus.0[3], carry); + (r9, carry) = mac(r9, k, $modulus.0[4], carry); + (r10, carry) = mac(r10, k, $modulus.0[5], carry); + (r11, carry2) = adc(r11, carry, carry2); + + // Result may be within MODULUS of the correct value + let mut borrow; + (r6, borrow) = r6.overflowing_sub($modulus.0[0]); + (r7, borrow) = sbb(r7, $modulus.0[1], borrow); + (r8, borrow) = sbb(r8, $modulus.0[2], borrow); + (r9, borrow) = sbb(r9, $modulus.0[3], borrow); + (r10, borrow) = sbb(r10, $modulus.0[4], borrow); + (r11, borrow) = sbb(r11, $modulus.0[5], borrow); + let borrow = (carry2 as u64).wrapping_sub(borrow as u64); + + (r6, carry2) = r6.overflowing_add($modulus.0[0] & borrow); + (r7, carry2) = adc(r7, $modulus.0[1] & borrow, carry2); + (r8, carry2) = adc(r8, $modulus.0[2] & borrow, carry2); + (r9, carry2) = adc(r9, $modulus.0[3] & borrow, carry2); + (r10, carry2) = adc(r10, $modulus.0[4] & borrow, carry2); + (r11, _) = adc(r11, $modulus.0[5] & borrow, carry2); + $field([r6, r7, r8, r9, r10, r11]) + } + } + }; +} diff --git a/src/derive/mod.rs b/src/derive/mod.rs index de8bc4cc..66527e79 100644 --- a/src/derive/mod.rs +++ b/src/derive/mod.rs @@ -1,7 +1,9 @@ #[macro_use] pub mod curve; +pub mod curve_bls12_381; #[macro_use] pub mod field; +pub mod field_bls12_381; #[macro_export] macro_rules! impl_add_binop_specify_output { diff --git a/src/lib.rs b/src/lib.rs index abdf1e2c..48cf0bec 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -4,6 +4,7 @@ mod arithmetic; +pub mod bls12_381; pub mod bn256; pub mod pairing; pub mod pasta; diff --git a/src/tests/curve.rs b/src/tests/curve.rs index e6e6797f..45d5c210 100644 --- a/src/tests/curve.rs +++ b/src/tests/curve.rs @@ -16,6 +16,17 @@ pub fn curve_tests() { serdes::(); } +pub fn curve_tests_bls12_381() { + is_on_curve::(); + equality::(); + projective_to_affine_affine_to_projective::(); + projective_addition::(); + mixed_addition::(); + multiplication::(); + batch_normalize::(); + // serdes::(); // TODO [TEST] [serde] Add support for BLS12_381 G1 & G2 +} + fn serdes() { for _ in 0..100 { let projective_point = G::random(OsRng);