diff --git a/README.md b/README.md index 571e3aa1..9bb2395a 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,7 @@ The library has not been released as a crate yet (as of Nov 2023) but the API ha - generate inclusion proofs from a list of entity IDs (tree required) - verify an inclusion proof using a root hash (no tree required) -See the [examples](https://github.com/silversixpence-crypto/dapol/examples) directory for details on how to use the API. +See the [examples](https://github.com/silversixpence-crypto/dapol/examples) directory or [docs](https://docs.rs/dapol/latest/dapol/#rust-api) for details on how to use the API. ### CLI @@ -81,12 +81,12 @@ Building a tree can be done: Build a tree using config file (full log verbosity): ```bash -./target/release/dapol -vvv build-tree config-file ./tree_config_example.toml +./target/release/dapol -vvv build-tree config-file ./examples/tree_config_example.toml ``` Add serialization: ```bash -./target/release/dapol -vvv build-tree config-file ./tree_config_example.toml --serialize . +./target/release/dapol -vvv build-tree config-file ./examples/tree_config_example.toml --serialize . ``` Deserialize a tree from a file: @@ -96,7 +96,13 @@ Deserialize a tree from a file: Generate proofs (proofs will live in the `./inclusion_proofs/` directory): ```bash -./target/release/dapol -vvv build-tree config-file ./tree_config_example.toml --gen-proofs ./examples/entities_example.csv +./target/release/dapol -vvv build-tree config-file ./examples/tree_config_example.toml --gen-proofs ./examples/entities_example.csv +``` + +Build a tree using cli args as apposed to a config file: +```bash +# this will generate random secrets & 1000 random entities +./target/release/dapol -vvv build-tree new --accumulator ndm-smt --height 16 --random-entities 1000 ``` #### Proof generation diff --git a/benches/criterion_benches.rs b/benches/criterion_benches.rs index 75893820..b372a647 100644 --- a/benches/criterion_benches.rs +++ b/benches/criterion_benches.rs @@ -25,7 +25,7 @@ mod memory_usage_estimation; use memory_usage_estimation::estimated_total_memory_usage_mb; mod utils; -use utils::{abs_diff, bytes_as_string, system_total_memory_mb}; +use utils::{abs_diff, bytes_to_string, system_total_memory_mb}; /// Determines how many runs are done for number of entities. /// The higher this value the more runs that are done. @@ -116,7 +116,7 @@ pub fn bench_build_tree(c: &mut Criterion) { format!( "height_{}/max_thread_count_{}/num_entities_{}", h.as_u32(), - t.get_value(), + t.as_u8(), n ), ), @@ -168,19 +168,18 @@ pub fn bench_build_tree(c: &mut Criterion) { let path = Accumulator::parse_accumulator_serialization_path(dir).unwrap(); let acc = Accumulator::NdmSmt(ndm_smt.expect("Tree should have been built")); - group.bench_with_input( + group.bench_function( BenchmarkId::new( "serialize_tree", format!( "height_{}/max_thread_count_{}/num_entities_{}", h.as_u32(), - t.get_value(), + t.as_u8(), n ), ), - &(h, t, n), - |bench, tup| { - bench.iter(|| acc.serialize(path.clone())); + |bench| { + bench.iter(|| acc.serialize(path.clone()).unwrap()); }, ); @@ -190,7 +189,7 @@ pub fn bench_build_tree(c: &mut Criterion) { println!( "\nSerialized tree file size: {}\n", - bytes_as_string(file_size as usize) + bytes_to_string(file_size as usize) ); } } diff --git a/benches/inputs.rs b/benches/inputs.rs index 31d44c31..79b8086e 100644 --- a/benches/inputs.rs +++ b/benches/inputs.rs @@ -64,7 +64,7 @@ pub fn num_entities_greater_than(n: u64) -> Vec { pub fn max_thread_counts() -> Vec { let mut tc: Vec = Vec::new(); - let max_thread_count: u8 = MaxThreadCount::default().get_value(); + let max_thread_count: u8 = MaxThreadCount::default().as_u8(); let step = if max_thread_count < 8 { 1 diff --git a/benches/large_input_benches.rs b/benches/large_input_benches.rs index e21f2e6d..035ff3f6 100644 --- a/benches/large_input_benches.rs +++ b/benches/large_input_benches.rs @@ -12,7 +12,7 @@ mod memory_usage_estimation; use memory_usage_estimation::estimated_total_memory_usage_mb; mod utils; -use utils::{bytes_as_string, system_total_memory_mb, abs_diff}; +use utils::{bytes_to_string, system_total_memory_mb, abs_diff}; /// Determines how many runs are done for number of entities. /// The higher this value the fewer runs that are done. @@ -87,7 +87,7 @@ fn main() { "\nRunning benchmark for input values \ (height {}, max_thread_count {}, num_entities {})", h.as_u32(), - t.get_value(), + t.as_u8(), n ); @@ -137,9 +137,9 @@ fn main() { Serialized tree file size: {}\n \ ========================================================================", tree_build_time, - bytes_as_string(mem_used_tree_build), + bytes_to_string(mem_used_tree_build), serialization_time, - bytes_as_string(file_size as usize) + bytes_to_string(file_size as usize) ); } } diff --git a/benches/memory_measurement.rs b/benches/memory_measurement.rs index cb14d301..c0d4f3e9 100644 --- a/benches/memory_measurement.rs +++ b/benches/memory_measurement.rs @@ -49,7 +49,7 @@ impl Measurement for Memory { struct MemoryFormatter; impl ValueFormatter for MemoryFormatter { fn format_value(&self, value: f64) -> String { - bytes_as_string(value as usize) + bytes_to_string(value as usize) } fn format_throughput(&self, throughput: &Throughput, value: f64) -> String { @@ -60,7 +60,7 @@ impl ValueFormatter for MemoryFormatter { } } - fn scale_values(&self, typical_value: f64, values: &mut [f64]) -> &'static str { + fn scale_values(&self, _typical_value: f64, values: &mut [f64]) -> &'static str { for val in values { *val = ((*val / 1024u64.pow(2) as f64) * 1000.0).round() / 1000.0; } @@ -69,7 +69,7 @@ impl ValueFormatter for MemoryFormatter { fn scale_throughputs( &self, - typical_value: f64, + _typical_value: f64, throughput: &Throughput, values: &mut [f64], ) -> &'static str { @@ -105,7 +105,7 @@ impl ValueFormatter for MemoryFormatter { } } -fn bytes_as_string(num_bytes: usize) -> String { +fn bytes_to_string(num_bytes: usize) -> String { if num_bytes < 1024 { format!("{} bytes", num_bytes) } else if num_bytes >= 1024 && num_bytes < 1024usize.pow(2) { diff --git a/benches/memory_usage_estimation.rs b/benches/memory_usage_estimation.rs index 21105329..615f1328 100644 --- a/benches/memory_usage_estimation.rs +++ b/benches/memory_usage_estimation.rs @@ -1,4 +1,4 @@ -use dapol::{Accumulator, EntityId, Height, InclusionProof, MaxThreadCount}; +use dapol::{Height}; /// Estimated memory usage in MB. /// The equation was calculated using the plane_of_best_fit.py script diff --git a/benches/utils.rs b/benches/utils.rs index 7456ee93..586baf04 100644 --- a/benches/utils.rs +++ b/benches/utils.rs @@ -19,7 +19,7 @@ pub fn abs_diff(x: usize, y: usize) -> usize { } } -pub fn bytes_as_string(num_bytes: usize) -> String { +pub fn bytes_to_string(num_bytes: usize) -> String { if num_bytes < 1024 { format!("{} bytes", num_bytes) } else if num_bytes >= 1024 && num_bytes < 1024usize.pow(2) { @@ -46,6 +46,7 @@ pub fn bytes_as_string(num_bytes: usize) -> String { // ------------------------------------------------------------------------------------------------- // Testing jemalloc_ctl to make sure it gives expected memory readings. +#[allow(dead_code)] pub fn bench_test_jemalloc_readings() { use jemalloc_ctl::{epoch, stats}; @@ -65,8 +66,8 @@ pub fn bench_test_jemalloc_readings() { println!( "buf capacity: {:<6}", - bytes_as_string(buf.capacity()) + bytes_to_string(buf.capacity()) ); - println!("Memory usage: {} allocated", bytes_as_string(diff),); + println!("Memory usage: {} allocated", bytes_to_string(diff),); } diff --git a/src/accumulators/config.rs b/src/accumulators/config.rs index fb390668..8e98daf0 100644 --- a/src/accumulators/config.rs +++ b/src/accumulators/config.rs @@ -14,7 +14,7 @@ use super::{ndm_smt, Accumulator}; /// accumulator_type = "ndm-smt" /// ``` /// -/// The rest of the config details can be found in the submodules: +/// The rest of the config details can be found in the sub-modules: /// - [crate][accumulators][NdmSmtConfig] /// /// Config deserialization example: diff --git a/src/accumulators/ndm_smt.rs b/src/accumulators/ndm_smt.rs index ccd20610..a3c6953a 100644 --- a/src/accumulators/ndm_smt.rs +++ b/src/accumulators/ndm_smt.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use primitive_types::H256; use serde::{Deserialize, Serialize}; -use log::error; +use log::{error, info}; use logging_timer::{timer, Level}; use rayon::prelude::*; @@ -89,6 +89,20 @@ impl NdmSmt { let salt_b_bytes = secrets.salt_b.as_bytes(); let salt_s_bytes = secrets.salt_s.as_bytes(); + info!( + "\nCreating NDM-SMT with the following configuration:\n \ + - height: {}\n \ + - number of entities: {}\n \ + - master secret: 0x{}\n \ + - salt b: 0x{}\n \ + - salt s: 0x{}", + height.as_u32(), + entities.len(), + master_secret_bytes.iter().map(|b| format!("{:02x}", b)).collect::(), + salt_b_bytes.iter().map(|b| format!("{:02x}", b)).collect::(), + salt_s_bytes.iter().map(|b| format!("{:02x}", b)).collect::(), + ); + let (leaf_nodes, entity_coord_tuples) = { // Map the entities to bottom-layer leaf nodes. @@ -263,7 +277,7 @@ fn new_padding_node_content_closure( move |coord: &Coordinate| { // TODO unfortunately we copy data here, maybe there is a way to do without // copying - let coord_bytes = coord.as_bytes(); + let coord_bytes = coord.to_bytes(); // pad_secret is given as 'w' in the DAPOL+ paper let pad_secret = generate_key(None, &master_secret_bytes, Some(&coord_bytes)); let pad_secret_bytes: [u8; 32] = pad_secret.into(); diff --git a/src/accumulators/ndm_smt/ndm_smt_config.rs b/src/accumulators/ndm_smt/ndm_smt_config.rs index e3277846..1f10fa38 100644 --- a/src/accumulators/ndm_smt/ndm_smt_config.rs +++ b/src/accumulators/ndm_smt/ndm_smt_config.rs @@ -151,7 +151,11 @@ impl NdmSmtConfigBuilder { pub fn build(&self) -> NdmSmtConfig { let entities = EntityConfig { file_path: self.entities.clone().and_then(|e| e.file_path).or(None), - num_random_entities: self.entities.clone().and_then(|e| e.num_random_entities).or(None), + num_random_entities: self + .entities + .clone() + .and_then(|e| e.num_random_entities) + .or(None), }; NdmSmtConfig { @@ -256,7 +260,12 @@ mod tests { fn builder_without_any_values_fails() { use crate::entity::EntitiesParserError; let res = NdmSmtConfigBuilder::default().build().parse(); - assert_err!(res, Err(NdmSmtConfigParserError::EntitiesError(EntitiesParserError::NumEntitiesNotSet))); + assert_err!( + res, + Err(NdmSmtConfigParserError::EntitiesError( + EntitiesParserError::NumEntitiesNotSet + )) + ); } #[test] diff --git a/src/accumulators/ndm_smt/x_coord_generator.rs b/src/accumulators/ndm_smt/x_coord_generator.rs index 4b668087..ff3347a9 100644 --- a/src/accumulators/ndm_smt/x_coord_generator.rs +++ b/src/accumulators/ndm_smt/x_coord_generator.rs @@ -35,24 +35,37 @@ use std::collections::HashMap; /// optimized by a HashMap. This algorithm wraps the `rng`, efficiently avoiding /// collisions. Here is some pseudo code explaining how it works: /// -/// ```bash,ignore -/// if N > max_x_coord throw error -/// for i in range [0, N]: -/// - pick random k in range [i, max_x_coord] -/// - if k in map then set v = map[k] -/// - while map[v] exists: v = map[v] -/// - result = v -/// - else result = k -/// - set map[k] = i +/// Key: +/// - `n` is the number of users that need to be mapped to leaf nodes +/// - `x_coord` is the index of the leaf node (left-most x-coord is 0, +/// right-most x-coord is `max_x_coord`) +/// - `user_mapping` is the result of the algorithm, where each user is given a +/// leaf node index i.e. `user_mapping: users -> indices` +/// - `tracking_map` is used to determine which indices have been used +/// +/// ```python,ignore +/// if n > max_x_coord throw error +/// +/// user_mapping = new_empty_hash_map() +/// tracking_map = new_empty_hash_map() +/// +/// for i in [0, n): +/// pick random k in range [i, max_x_coord] +/// if k in tracking_map then set v = traking_map[k] +/// while traking_map[v] exists: v = tracking_map[v] +/// set user_mapping[i] = v +/// else user_mapping[i] = k +/// set tracking_map[k] = i /// ``` /// -/// Assuming `rng` is constant-time the above algorithm has time complexity -/// `O(N)`. Note that the second loop (the while loop) will only execute a -/// total of `N` times throughout the entire loop cycle of the first loop. -/// This is because the second loop will only execute if a chain in the map -/// exists, and the worst case happens when there is 1 long chain containing -/// all the elements of the map; in this case the second loop will only execute -/// on 1 of the iterations of the first loop. +/// Assuming `rng` is constant-time and the HashMap is optimized by some +/// balanced search tree then the above algorithm has time and memory complexity +/// `O(n log(n))` in the worst case. Note that the second loop (the while loop) +/// will only execute a total of `n` times throughout the entire loop cycle of +/// the first loop. This is because the second loop will only execute if a chain +/// in the map exists, and the worst case happens when there is 1 long chain +/// containing all the elements of the map; in this case the second loop will +/// only execute on 1 of the iterations of the first loop. pub struct RandomXCoordGenerator { rng: ThreadRng, used_x_coords: HashMap, diff --git a/src/binary_tree.rs b/src/binary_tree.rs index a26f5a14..dff6f53c 100644 --- a/src/binary_tree.rs +++ b/src/binary_tree.rs @@ -187,7 +187,7 @@ impl Coordinate { /// the next 8 elements of the array, directly after the first element. /// Both x- & y-coords are given in Little Endian byte order. /// https://stackoverflow.com/questions/71788974/concatenating-two-u16s-to-a-single-array-u84 - pub fn as_bytes(&self) -> [u8; 32] { + pub fn to_bytes(&self) -> [u8; 32] { let mut c = [0u8; 32]; let (left, mid) = c.split_at_mut(1); left.copy_from_slice(&self.y.to_le_bytes()); @@ -427,7 +427,7 @@ mod tests { let x = 258; let y = 12; let coord = Coordinate { x, y }; - let bytes = coord.as_bytes(); + let bytes = coord.to_bytes(); assert_eq!(bytes.len(), 32, "Byte array should be 256 bits"); diff --git a/src/binary_tree/path_siblings.rs b/src/binary_tree/path_siblings.rs index dad286a5..5a2b5f43 100644 --- a/src/binary_tree/path_siblings.rs +++ b/src/binary_tree/path_siblings.rs @@ -392,7 +392,7 @@ mod tests { use super::super::*; use super::*; use crate::binary_tree::utils::test_utils::{ - full_bottom_layer, get_padding_function, single_leaf, sparse_leaves, TestContent, + full_bottom_layer, generate_padding_closure, single_leaf, sparse_leaves, TestContent, }; #[test] @@ -405,7 +405,7 @@ mod tests { .with_height(height) .with_store_depth(MIN_STORE_DEPTH) .with_leaf_nodes(leaf_nodes.clone()) - .build_using_single_threaded_algorithm(get_padding_function()) + .build_using_single_threaded_algorithm(generate_padding_closure()) .unwrap(); let leaf_node = tree_single_threaded.get_leaf_node(10).unwrap(); @@ -413,7 +413,7 @@ mod tests { let siblings = PathSiblings::build_using_single_threaded_algorithm( &tree_single_threaded, &leaf_node, - get_padding_function(), + generate_padding_closure(), ) .expect("PathSiblings generation should have been successful"); @@ -437,7 +437,7 @@ mod tests { .with_height(height) .with_store_depth(MIN_STORE_DEPTH) .with_leaf_nodes(leaf_nodes.clone()) - .build_using_multi_threaded_algorithm(get_padding_function()) + .build_using_multi_threaded_algorithm(generate_padding_closure()) .unwrap(); let leaf_node = tree_multi_threaded.get_leaf_node(10).unwrap(); @@ -445,7 +445,7 @@ mod tests { let siblings = PathSiblings::build_using_multi_threaded_algorithm( &tree_multi_threaded, &leaf_node, - get_padding_function(), + generate_padding_closure(), ) .expect("PathSiblings generation should have been successful"); @@ -469,7 +469,7 @@ mod tests { .with_height(height) .with_leaf_nodes(leaf_nodes.clone()) .with_store_depth(MIN_STORE_DEPTH) - .build_using_single_threaded_algorithm(get_padding_function()) + .build_using_single_threaded_algorithm(generate_padding_closure()) .unwrap(); let leaf_node = tree_single_threaded.get_leaf_node(6).unwrap(); @@ -477,7 +477,7 @@ mod tests { let siblings = PathSiblings::build_using_single_threaded_algorithm( &tree_single_threaded, &leaf_node, - get_padding_function(), + generate_padding_closure(), ) .expect("PathSiblings generation should have been successful"); @@ -501,7 +501,7 @@ mod tests { .with_height(height) .with_leaf_nodes(leaf_nodes.clone()) .with_store_depth(MIN_STORE_DEPTH) - .build_using_multi_threaded_algorithm(get_padding_function()) + .build_using_multi_threaded_algorithm(generate_padding_closure()) .unwrap(); let leaf_node = tree_multi_threaded.get_leaf_node(6).unwrap(); @@ -509,7 +509,7 @@ mod tests { let siblings = PathSiblings::build_using_multi_threaded_algorithm( &tree_multi_threaded, &leaf_node, - get_padding_function(), + generate_padding_closure(), ) .expect("PathSiblings generation should have been successful"); @@ -534,7 +534,7 @@ mod tests { .with_height(height.clone()) .with_leaf_nodes(leaf_node.clone()) .with_store_depth(MIN_STORE_DEPTH) - .build_using_single_threaded_algorithm(get_padding_function()) + .build_using_single_threaded_algorithm(generate_padding_closure()) .unwrap(); let leaf_node = tree_single_threaded.get_leaf_node(i).unwrap(); @@ -542,7 +542,7 @@ mod tests { let siblings = PathSiblings::build_using_single_threaded_algorithm( &tree_single_threaded, &leaf_node, - get_padding_function(), + generate_padding_closure(), ) .expect("PathSiblings generation should have been successful"); @@ -568,7 +568,7 @@ mod tests { .with_height(height.clone()) .with_leaf_nodes(leaf_node.clone()) .with_store_depth(MIN_STORE_DEPTH) - .build_using_multi_threaded_algorithm(get_padding_function()) + .build_using_multi_threaded_algorithm(generate_padding_closure()) .unwrap(); let leaf_node = tree_multi_threaded.get_leaf_node(x_coord).unwrap(); @@ -576,7 +576,7 @@ mod tests { let siblings = PathSiblings::build_using_multi_threaded_algorithm( &tree_multi_threaded, &leaf_node, - get_padding_function(), + generate_padding_closure(), ) .expect("PathSiblings build should have been successful"); diff --git a/src/binary_tree/tree_builder.rs b/src/binary_tree/tree_builder.rs index 0936779f..728ec338 100644 --- a/src/binary_tree/tree_builder.rs +++ b/src/binary_tree/tree_builder.rs @@ -296,7 +296,7 @@ mod tests { use super::super::*; use super::*; use crate::binary_tree::utils::test_utils::{ - full_bottom_layer, get_padding_function, single_leaf, sparse_leaves, TestContent, + full_bottom_layer, generate_padding_closure, single_leaf, sparse_leaves, TestContent, }; use crate::utils::test_utils::{assert_err, assert_err_simple}; @@ -323,13 +323,13 @@ mod tests { let single_threaded = TreeBuilder::new() .with_height(height.clone()) .with_leaf_nodes(leaf_nodes.clone()) - .build_using_single_threaded_algorithm(get_padding_function()) + .build_using_single_threaded_algorithm(generate_padding_closure()) .unwrap(); let multi_threaded = TreeBuilder::new() .with_height(height.clone()) .with_leaf_nodes(leaf_nodes) - .build_using_multi_threaded_algorithm(get_padding_function()) + .build_using_multi_threaded_algorithm(generate_padding_closure()) .unwrap(); assert_eq!(single_threaded.root, multi_threaded.root); @@ -346,13 +346,13 @@ mod tests { let single_threaded = TreeBuilder::new() .with_height(height.clone()) .with_leaf_nodes(leaf_nodes.clone()) - .build_using_single_threaded_algorithm(get_padding_function()) + .build_using_single_threaded_algorithm(generate_padding_closure()) .unwrap(); let multi_threaded = TreeBuilder::new() .with_height(height.clone()) .with_leaf_nodes(leaf_nodes) - .build_using_multi_threaded_algorithm(get_padding_function()) + .build_using_multi_threaded_algorithm(generate_padding_closure()) .unwrap(); assert_eq!(single_threaded.root, multi_threaded.root); @@ -370,13 +370,13 @@ mod tests { let single_threaded = TreeBuilder::new() .with_height(height.clone()) .with_leaf_nodes(leaf_node.clone()) - .build_using_single_threaded_algorithm(get_padding_function()) + .build_using_single_threaded_algorithm(generate_padding_closure()) .unwrap(); let multi_threaded = TreeBuilder::new() .with_height(height.clone()) .with_leaf_nodes(leaf_node) - .build_using_multi_threaded_algorithm(get_padding_function()) + .build_using_multi_threaded_algorithm(generate_padding_closure()) .unwrap(); assert_eq!(single_threaded.root, multi_threaded.root); diff --git a/src/binary_tree/tree_builder/multi_threaded.rs b/src/binary_tree/tree_builder/multi_threaded.rs index c76cd714..e5459089 100644 --- a/src/binary_tree/tree_builder/multi_threaded.rs +++ b/src/binary_tree/tree_builder/multi_threaded.rs @@ -96,7 +96,7 @@ where let store = Arc::new(DashMap::>::new()); let params = RecursionParams::from_tree_height(height.clone()) .with_store_depth(store_depth) - .with_max_thread_count(max_thread_count.get_value()); + .with_max_thread_count(max_thread_count.as_u8()); if height.max_bottom_layer_nodes() / leaf_nodes.len() as u64 <= MIN_RECOMMENDED_SPARSITY as u64 { @@ -150,7 +150,7 @@ impl DashMapStore { /// If all nodes satisfy `node.coord.x <= mid` then `Full` is returned. /// If no nodes satisfy `node.coord.x <= mid` then `Empty` is returned. // TODO can be optimized using a binary search -fn get_num_nodes_left_of(x_coord_mid: u64, nodes: &Vec>) -> NumNodes { +fn num_nodes_left_of(x_coord_mid: u64, nodes: &Vec>) -> NumNodes { nodes .iter() .rposition(|leaf| leaf.coord.x <= x_coord_mid) @@ -437,7 +437,7 @@ where let within_store_depth_for_children = params.y_coord > params.height.as_raw_int() - params.store_depth; - let pair = match get_num_nodes_left_of(params.x_coord_mid, &leaves) { + let pair = match num_nodes_left_of(params.x_coord_mid, &leaves) { NumNodes::Partial(index) => { let right_leaves = leaves.split_off(index + 1); let left_leaves = leaves; @@ -542,7 +542,7 @@ mod tests { use super::super::*; use super::*; use crate::binary_tree::utils::test_utils::{ - full_bottom_layer, get_padding_function, single_leaf, sparse_leaves, TestContent, + full_bottom_layer, generate_padding_closure, single_leaf, sparse_leaves, TestContent, }; use crate::utils::test_utils::{assert_err, assert_err_simple}; @@ -555,7 +555,7 @@ mod tests { let leaf_nodes = full_bottom_layer(&height); let res = TreeBuilder::new() .with_leaf_nodes(leaf_nodes) - .build_using_multi_threaded_algorithm(get_padding_function()); + .build_using_multi_threaded_algorithm(generate_padding_closure()); // cannot use assert_err because it requires Func to have the Debug trait assert_err_simple!(res, Err(TreeBuildError::NoHeightProvided)); @@ -566,7 +566,7 @@ mod tests { let height = Height::from(4); let res = TreeBuilder::new() .with_height(height) - .build_using_multi_threaded_algorithm(get_padding_function()); + .build_using_multi_threaded_algorithm(generate_padding_closure()); // cannot use assert_err because it requires Func to have the Debug trait assert_err_simple!(res, Err(TreeBuildError::NoLeafNodesProvided)); @@ -578,7 +578,7 @@ mod tests { let res = TreeBuilder::::new() .with_height(height) .with_leaf_nodes(Vec::>::new()) - .build_using_multi_threaded_algorithm(get_padding_function()); + .build_using_multi_threaded_algorithm(generate_padding_closure()); assert_err!(res, Err(TreeBuildError::EmptyLeaves)); } @@ -599,7 +599,7 @@ mod tests { let res = TreeBuilder::new() .with_height(height) .with_leaf_nodes(leaf_nodes) - .build_using_multi_threaded_algorithm(get_padding_function()); + .build_using_multi_threaded_algorithm(generate_padding_closure()); assert_err!(res, Err(TreeBuildError::TooManyLeaves)); } @@ -613,7 +613,7 @@ mod tests { let res = TreeBuilder::new() .with_height(height) .with_leaf_nodes(leaf_nodes) - .build_using_multi_threaded_algorithm(get_padding_function()); + .build_using_multi_threaded_algorithm(generate_padding_closure()); // cannot use assert_err because it requires Func to have the Debug trait assert_err_simple!(res, Err(TreeBuildError::DuplicateLeaves)); @@ -627,7 +627,7 @@ mod tests { let res = TreeBuilder::new() .with_height(height) .with_leaf_nodes(vec![leaf_node]) - .build_using_multi_threaded_algorithm(get_padding_function()); + .build_using_multi_threaded_algorithm(generate_padding_closure()); // cannot use assert_err because it requires Func to have the Debug trait assert_err_simple!(res, Err(TreeBuildError::InvalidXCoord)); @@ -645,7 +645,7 @@ mod tests { let tree = TreeBuilder::new() .with_height(height.clone()) .with_leaf_nodes(leaf_nodes.clone()) - .build_using_multi_threaded_algorithm(get_padding_function()) + .build_using_multi_threaded_algorithm(generate_padding_closure()) .unwrap(); let root = tree.root(); @@ -654,7 +654,7 @@ mod tests { let tree = TreeBuilder::new() .with_height(height) .with_leaf_nodes(leaf_nodes) - .build_using_multi_threaded_algorithm(get_padding_function()) + .build_using_multi_threaded_algorithm(generate_padding_closure()) .unwrap(); assert_eq!(root, tree.root()); @@ -668,7 +668,7 @@ mod tests { let tree = TreeBuilder::new() .with_height(height) .with_leaf_nodes(leaf_nodes.clone()) - .build_using_multi_threaded_algorithm(get_padding_function()) + .build_using_multi_threaded_algorithm(generate_padding_closure()) .unwrap(); for leaf in leaf_nodes { @@ -689,7 +689,7 @@ mod tests { let tree = TreeBuilder::new() .with_height(height.clone()) .with_leaf_nodes(leaf_nodes.clone()) - .build_using_multi_threaded_algorithm(get_padding_function()) + .build_using_multi_threaded_algorithm(generate_padding_closure()) .unwrap(); let middle_layer = height.as_raw_int() / 2; @@ -728,7 +728,7 @@ mod tests { .with_height(height.clone()) .with_leaf_nodes(leaf_nodes.clone()) .with_store_depth(store_depth) - .build_using_multi_threaded_algorithm(get_padding_function()) + .build_using_multi_threaded_algorithm(generate_padding_closure()) .unwrap(); let layer_below_root = height.as_raw_int() - 1; diff --git a/src/binary_tree/tree_builder/single_threaded.rs b/src/binary_tree/tree_builder/single_threaded.rs index bce8291d..728022dc 100644 --- a/src/binary_tree/tree_builder/single_threaded.rs +++ b/src/binary_tree/tree_builder/single_threaded.rs @@ -313,7 +313,7 @@ where mod tests { use super::super::*; use crate::binary_tree::utils::test_utils::{ - full_bottom_layer, get_padding_function, single_leaf, sparse_leaves, TestContent, + full_bottom_layer, generate_padding_closure, single_leaf, sparse_leaves, TestContent, }; use crate::utils::test_utils::{assert_err, assert_err_simple}; @@ -326,7 +326,7 @@ mod tests { let leaf_nodes = full_bottom_layer(&height); let res = TreeBuilder::new() .with_leaf_nodes(leaf_nodes) - .build_using_single_threaded_algorithm(get_padding_function()); + .build_using_single_threaded_algorithm(generate_padding_closure()); // cannot use assert_err because it requires Func to have the Debug trait assert_err_simple!(res, Err(TreeBuildError::NoHeightProvided)); @@ -337,7 +337,7 @@ mod tests { let height = Height::from(4); let res = TreeBuilder::new() .with_height(height) - .build_using_single_threaded_algorithm(get_padding_function()); + .build_using_single_threaded_algorithm(generate_padding_closure()); // cannot use assert_err because it requires Func to have the Debug trait assert_err_simple!(res, Err(TreeBuildError::NoLeafNodesProvided)); @@ -349,7 +349,7 @@ mod tests { let res = TreeBuilder::::new() .with_height(height) .with_leaf_nodes(Vec::>::new()) - .build_using_single_threaded_algorithm(get_padding_function()); + .build_using_single_threaded_algorithm(generate_padding_closure()); assert_err!(res, Err(TreeBuildError::EmptyLeaves)); } @@ -370,7 +370,7 @@ mod tests { let res = TreeBuilder::new() .with_height(height) .with_leaf_nodes(leaf_nodes) - .build_using_single_threaded_algorithm(get_padding_function()); + .build_using_single_threaded_algorithm(generate_padding_closure()); assert_err!(res, Err(TreeBuildError::TooManyLeaves)); } @@ -384,7 +384,7 @@ mod tests { let res = TreeBuilder::new() .with_height(height) .with_leaf_nodes(leaf_nodes) - .build_using_single_threaded_algorithm(get_padding_function()); + .build_using_single_threaded_algorithm(generate_padding_closure()); // cannot use assert_err because it requires Func to have the Debug trait assert_err_simple!(res, Err(TreeBuildError::DuplicateLeaves)); @@ -398,7 +398,7 @@ mod tests { let res = TreeBuilder::new() .with_height(height) .with_leaf_nodes(vec![leaf_node]) - .build_using_single_threaded_algorithm(get_padding_function()); + .build_using_single_threaded_algorithm(generate_padding_closure()); // cannot use assert_err because it requires Func to have the Debug trait assert_err_simple!(res, Err(TreeBuildError::InvalidXCoord)); @@ -416,7 +416,7 @@ mod tests { let tree = TreeBuilder::new() .with_height(height.clone()) .with_leaf_nodes(leaf_nodes.clone()) - .build_using_single_threaded_algorithm(&get_padding_function()) + .build_using_single_threaded_algorithm(&generate_padding_closure()) .unwrap(); let root = tree.root(); @@ -425,7 +425,7 @@ mod tests { let tree = TreeBuilder::new() .with_height(height) .with_leaf_nodes(leaf_nodes) - .build_using_single_threaded_algorithm(&get_padding_function()) + .build_using_single_threaded_algorithm(&generate_padding_closure()) .unwrap(); assert_eq!(root, tree.root()); @@ -439,7 +439,7 @@ mod tests { let tree = TreeBuilder::new() .with_height(height) .with_leaf_nodes(leaf_nodes.clone()) - .build_using_single_threaded_algorithm(&get_padding_function()) + .build_using_single_threaded_algorithm(&generate_padding_closure()) .unwrap(); for leaf in leaf_nodes { @@ -460,7 +460,7 @@ mod tests { let tree = TreeBuilder::new() .with_height(height.clone()) .with_leaf_nodes(leaf_nodes.clone()) - .build_using_single_threaded_algorithm(&get_padding_function()) + .build_using_single_threaded_algorithm(&generate_padding_closure()) .unwrap(); let middle_layer = height.as_raw_int() / 2; @@ -499,7 +499,7 @@ mod tests { .with_height(height.clone()) .with_leaf_nodes(leaf_nodes.clone()) .with_store_depth(store_depth) - .build_using_single_threaded_algorithm(&get_padding_function()) + .build_using_single_threaded_algorithm(&generate_padding_closure()) .unwrap(); let layer_below_root = height.as_raw_int() - 1; diff --git a/src/binary_tree/utils.rs b/src/binary_tree/utils.rs index 21503982..9969d7cf 100644 --- a/src/binary_tree/utils.rs +++ b/src/binary_tree/utils.rs @@ -37,7 +37,7 @@ pub mod test_utils { } } - pub fn get_padding_function() -> impl Fn(&Coordinate) -> TestContent { + pub fn generate_padding_closure() -> impl Fn(&Coordinate) -> TestContent { |_coord: &Coordinate| -> TestContent { TestContent { value: 0, diff --git a/src/hasher.rs b/src/hasher.rs index c41f1882..f28bce7f 100644 --- a/src/hasher.rs +++ b/src/hasher.rs @@ -1,5 +1,7 @@ use primitive_types::H256; +const DELIMITER: &[u8] = ";".as_bytes(); + /// Abstraction of a hash function, allows easy switching of hash function. /// /// The main purpose of the hash function is usage in the binary tree merge @@ -25,6 +27,7 @@ impl Hasher { pub fn update(&mut self, input: &[u8]) -> &mut Self { self.0.update(input); + self.0.update(DELIMITER); self } @@ -58,7 +61,7 @@ mod tests { let hash = hasher.finalize(); assert_eq!( hash, - H256::from_str("e4bf4e238e74eb8d253191a56b594565514201a71373c86e304628ed623c4850") + H256::from_str("09eb9ee70fc9df4d767b07cc5befc6f7a303fa0025fca014e22e8c3dc9927767") .unwrap() ); } diff --git a/src/inclusion_proof/aggregated_range_proof.rs b/src/inclusion_proof/aggregated_range_proof.rs index 10c14792..4594200c 100644 --- a/src/inclusion_proof/aggregated_range_proof.rs +++ b/src/inclusion_proof/aggregated_range_proof.rs @@ -207,7 +207,7 @@ impl AggregatedRangeProof { commitments: &Vec, upper_bound_bit_length: u8, ) -> Result<(), RangeProofError> { - if commitments.len() != self.get_input_size() as usize { + if commitments.len() != self.input_size() as usize { return Err(RangeProofError::InputVectorLengthMismatch); } @@ -261,7 +261,7 @@ impl AggregatedRangeProof { .map_err(RangeProofError::BulletproofVerificationError) } - fn get_input_size(&self) -> u8 { + fn input_size(&self) -> u8 { match self { AggregatedRangeProof::Padding { proof: _, diff --git a/src/max_thread_count.rs b/src/max_thread_count.rs index 5908c272..6952c614 100644 --- a/src/max_thread_count.rs +++ b/src/max_thread_count.rs @@ -29,7 +29,7 @@ impl MaxThreadCount { MaxThreadCount(max_thread_count) } - pub fn get_value(&self) -> u8 { + pub fn as_u8(&self) -> u8 { self.0 } } @@ -74,7 +74,7 @@ use clap::builder::{OsStr, Str}; impl From for OsStr { fn from(max_thread_count: MaxThreadCount) -> OsStr { - OsStr::from(Str::from(max_thread_count.get_value().to_string())) + OsStr::from(Str::from(max_thread_count.as_u8().to_string())) } } @@ -122,6 +122,6 @@ mod tests { #[test] fn default_without_initializing_machine_parallelism() { - assert_eq!(MaxThreadCount::default().get_value(), DEFAULT_MAX_THREAD_COUNT); + assert_eq!(MaxThreadCount::default().as_u8(), DEFAULT_MAX_THREAD_COUNT); } } diff --git a/src/node_content/full_node.rs b/src/node_content/full_node.rs index ed88023c..708acda5 100644 --- a/src/node_content/full_node.rs +++ b/src/node_content/full_node.rs @@ -116,7 +116,7 @@ impl FullNodeContent { let commitment = PedersenGens::default().commit(Scalar::from(liability), blinding_factor_scalar); - let coord_bytes = coord.as_bytes(); + let coord_bytes = coord.to_bytes(); let salt_bytes: [u8; 32] = salt.into(); // Compute the hash: `H("pad" | coordinate | salt)` diff --git a/src/node_content/hidden_node.rs b/src/node_content/hidden_node.rs index a2411522..f24e402f 100644 --- a/src/node_content/hidden_node.rs +++ b/src/node_content/hidden_node.rs @@ -87,7 +87,7 @@ impl HiddenNodeContent { // Compute the hash: `H("pad" | coordinate | salt)` let mut hasher = Hasher::new(); hasher.update("pad".as_bytes()); - hasher.update(&coord.as_bytes()); + hasher.update(&coord.to_bytes()); hasher.update(&salt_bytes); let hash = hasher.finalize();