Skip to content

Commit

Permalink
optimize cross-term computation (#328)
Browse files Browse the repository at this point in the history
* optimize computation of cross-term

* clippy fixes

* cargo fmt
  • Loading branch information
srinathsetty authored Aug 19, 2024
1 parent d2c52bd commit cc55f0a
Show file tree
Hide file tree
Showing 5 changed files with 21 additions and 35 deletions.
2 changes: 1 addition & 1 deletion examples/minroot.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ impl<G: Group> MinRootIteration<G> {
let mut x_i = *x_0;
let mut y_i = *y_0;
for _i in 0..num_iters {
let x_i_plus_1 = (x_i + y_i).pow_vartime(&exp.to_u64_digits()); // computes the fifth root of x_i + y_i
let x_i_plus_1 = (x_i + y_i).pow_vartime(exp.to_u64_digits()); // computes the fifth root of x_i + y_i

// sanity check
if cfg!(debug_assertions) {
Expand Down
2 changes: 1 addition & 1 deletion src/provider/keccak.rs
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ impl<E: Engine> TranscriptEngineTrait<E> for Keccak256Transcript<E> {

fn absorb<T: TranscriptReprTrait<E::GE>>(&mut self, label: &'static [u8], o: &T) {
self.transcript.update(label);
self.transcript.update(&o.to_transcript_bytes());
self.transcript.update(o.to_transcript_bytes());
}

fn dom_sep(&mut self, bytes: &'static [u8]) {
Expand Down
50 changes: 17 additions & 33 deletions src/r1cs/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -250,42 +250,26 @@ impl<E: Engine> R1CSShape<E> {
U2: &R1CSInstance<E>,
W2: &R1CSWitness<E>,
) -> Result<(Vec<E::Scalar>, Commitment<E>), NovaError> {
let (AZ_1, BZ_1, CZ_1) = {
let Z1 = [W1.W.clone(), vec![U1.u], U1.X.clone()].concat();
self.multiply_vec(&Z1)?
};

let (AZ_2, BZ_2, CZ_2) = {
let Z2 = [W2.W.clone(), vec![E::Scalar::ONE], U2.X.clone()].concat();
self.multiply_vec(&Z2)?
};
let Z1 = [W1.W.clone(), vec![U1.u], U1.X.clone()].concat();
let Z2 = [W2.W.clone(), vec![E::Scalar::ONE], U2.X.clone()].concat();

// The following code uses the optimization suggested in
// Section 5.2 of [Mova](https://eprint.iacr.org/2024/1220.pdf)
let Z = Z1
.into_par_iter()
.zip(Z2.into_par_iter())
.map(|(z1, z2)| z1 + z2)
.collect::<Vec<E::Scalar>>();
let u = U1.u + E::Scalar::ONE; // U2.u = 1

let (AZ_1_circ_BZ_2, AZ_2_circ_BZ_1, u_1_cdot_CZ_2, u_2_cdot_CZ_1) = {
let AZ_1_circ_BZ_2 = (0..AZ_1.len())
.into_par_iter()
.map(|i| AZ_1[i] * BZ_2[i])
.collect::<Vec<E::Scalar>>();
let AZ_2_circ_BZ_1 = (0..AZ_2.len())
.into_par_iter()
.map(|i| AZ_2[i] * BZ_1[i])
.collect::<Vec<E::Scalar>>();
let u_1_cdot_CZ_2 = (0..CZ_2.len())
.into_par_iter()
.map(|i| U1.u * CZ_2[i])
.collect::<Vec<E::Scalar>>();
let u_2_cdot_CZ_1 = (0..CZ_1.len())
.into_par_iter()
.map(|i| CZ_1[i])
.collect::<Vec<E::Scalar>>();
(AZ_1_circ_BZ_2, AZ_2_circ_BZ_1, u_1_cdot_CZ_2, u_2_cdot_CZ_1)
};
let (AZ, BZ, CZ) = self.multiply_vec(&Z)?;

let T = AZ_1_circ_BZ_2
let T = AZ
.par_iter()
.zip(&AZ_2_circ_BZ_1)
.zip(&u_1_cdot_CZ_2)
.zip(&u_2_cdot_CZ_1)
.map(|(((a, b), c), d)| *a + *b - *c - *d)
.zip(BZ.par_iter())
.zip(CZ.par_iter())
.zip(W1.E.par_iter())
.map(|(((az, bz), cz), e)| *az * *bz - u * *cz - *e)
.collect::<Vec<E::Scalar>>();

let comm_T = CE::<E>::commit(ck, &T);
Expand Down
1 change: 1 addition & 0 deletions src/spartan/polys/masked_eq.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ mod tests {
use rand_chacha::ChaCha20Rng;
use rand_core::{CryptoRng, RngCore, SeedableRng};

#[allow(clippy::needless_borrows_for_generic_args)]
fn test_masked_eq_polynomial_with<F: PrimeField, R: RngCore + CryptoRng>(
num_vars: usize,
num_masked_vars: usize,
Expand Down
1 change: 1 addition & 0 deletions src/spartan/polys/multilinear.rs
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,7 @@ mod tests {
}

/// Returns a random ML polynomial
#[allow(clippy::needless_borrows_for_generic_args)]
fn random<R: RngCore + CryptoRng, Scalar: PrimeField>(
num_vars: usize,
mut rng: &mut R,
Expand Down

0 comments on commit cc55f0a

Please sign in to comment.