Skip to content

Commit

Permalink
feat(crypto): CRP-2597 make NiDkgTag and NiDkgId non-Copy (#2347)
Browse files Browse the repository at this point in the history
Adapts the `NiDkgTag`, and subsequently also the `NiDkgId` to be
non-`Copy`.

This is needed so that in a next step, for the vetKeys feature, we can
extend the `NiDkgTag` with a variant that holds a `MasterPublicKeyId`,
which is not `Copy` because it indirectly contains `name: String`.

A consequence of making `NiDkgTag` and `NiDkgId` non-`Copy` is that we
have to `clone` these structs at various places in the code. Compared to
the cryptographic protocols in which the structs are used, the `clone`s
are negligible, however.

In the `NiDkgCspClient`, various `dkg_id: NiDkgId` parameters were
unused, so they were directly removed to avoid unnecessary cloning.

An alternative to making the `NiDkgTag` non-`Copy` would be to replace
the `String` in the `MasterPublicKeyId` with a new struct that holds a
length-limited byte array that represents the name, so that it can be
kept `Copy` (similar to how this is done in the
`ic_principal::Principal`). A downside of that approach is that
proposals would fail silently if the permitted length is exceeded.
Still, we could explore this option in the future. For now, the priority
is to unblock the above-mentioned work of extending the `NiDkgTag` with
a variant that holds a `MasterPublicKeyId`. If we later make the structs
`Copy` again, the compiler will guide us in removing all the unnecessary
clones (with `error: using clone on type NiDkgTag which implements the
Copy trait`).
  • Loading branch information
fspreiss authored Nov 6, 2024
1 parent 4991a57 commit 210f1ef
Show file tree
Hide file tree
Showing 43 changed files with 380 additions and 429 deletions.
13 changes: 7 additions & 6 deletions rs/consensus/src/consensus/dkg_key_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -312,14 +312,14 @@ impl DkgKeyManager {
.current_transcripts()
.iter()
.filter(|(_, t)| !self.pending_transcript_loads.contains_key(&t.dkg_id))
.map(|(_, t)| (current_interval_start, t.dkg_id));
.map(|(_, t)| (current_interval_start, t.dkg_id.clone()));

// For next transcripts, we take the start of the next interval as a deadline.
let next_transcripts_with_load_deadlines = summary
.next_transcripts()
.iter()
.filter(|(_, t)| !self.pending_transcript_loads.contains_key(&t.dkg_id))
.map(|(_, t)| (next_interval_start, t.dkg_id));
.map(|(_, t)| (next_interval_start, t.dkg_id.clone()));

current_transcripts_with_load_deadlines
.chain(next_transcripts_with_load_deadlines)
Expand All @@ -333,7 +333,8 @@ impl DkgKeyManager {
let logger = self.logger.clone();
let summary = summary.clone();
let (tx, rx) = sync_channel(0);
self.pending_transcript_loads.insert(dkg_id, (deadline, rx));
self.pending_transcript_loads
.insert(dkg_id.clone(), (deadline, rx));

std::thread::spawn(move || {
let transcript = summary
Expand Down Expand Up @@ -596,7 +597,7 @@ mod tests {
.current_transcripts()
.values()
.chain(dkg_summary.next_transcripts().values())
.map(|t| t.dkg_id)
.map(|t| t.dkg_id.clone())
.collect::<HashSet<_>>();
// We expect the genesis summary to contain exactly 2 current transcripts.
assert_eq!(summary_0_transcripts.len(), 2);
Expand Down Expand Up @@ -629,7 +630,7 @@ mod tests {
.current_transcripts()
.values()
.chain(dkg_summary.next_transcripts().values())
.map(|t| t.dkg_id)
.map(|t| t.dkg_id.clone())
.collect::<HashSet<_>>();
// For the 3rd summary we expect 2 current and 2 next transcripts.
assert_eq!(summary_2_transcripts.len(), 4);
Expand Down Expand Up @@ -659,7 +660,7 @@ mod tests {
.current_transcripts()
.values()
.chain(dkg_summary.next_transcripts().values())
.map(|t| t.dkg_id)
.map(|t| t.dkg_id.clone())
.collect::<HashSet<_>>();
// For the 3rd summary we expect 2 current and 2 next transcripts.
assert_eq!(summary_3_transcripts.len(), 4);
Expand Down
4 changes: 2 additions & 2 deletions rs/consensus/src/consensus/share_aggregator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ impl ShareAggregator {
&self.log,
self.membership.as_ref(),
self.crypto.as_aggregate(),
Box::new(|_| dkg_id),
Box::new(|_| dkg_id.clone()),
shares,
))
}
Expand Down Expand Up @@ -153,7 +153,7 @@ impl ShareAggregator {
&self.log,
self.membership.as_ref(),
self.crypto.as_aggregate(),
Box::new(|_| dkg_id),
Box::new(|_| dkg_id.clone()),
shares,
);
if !result.is_empty() {
Expand Down
8 changes: 4 additions & 4 deletions rs/consensus/src/consensus/validator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -203,10 +203,10 @@ impl SignatureVerify for RandomTape {
let dkg_id = active_low_threshold_nidkg_id(pool.as_cache(), self.height())
.ok_or_else(|| ValidationFailure::DkgSummaryNotFound(self.height()))?;
if self.signature.signer == dkg_id {
crypto.verify_aggregate(self, self.signature.signer)?;
crypto.verify_aggregate(self, self.signature.signer.clone())?;
Ok(())
} else {
Err(InvalidArtifactReason::InappropriateDkgId(self.signature.signer).into())
Err(InvalidArtifactReason::InappropriateDkgId(self.signature.signer.clone()).into())
}
}
}
Expand Down Expand Up @@ -244,10 +244,10 @@ impl SignatureVerify for RandomBeacon {
let dkg_id = active_low_threshold_nidkg_id(pool.as_cache(), self.height())
.ok_or_else(|| ValidationFailure::DkgSummaryNotFound(self.height()))?;
if self.signature.signer == dkg_id {
crypto.verify_aggregate(self, self.signature.signer)?;
crypto.verify_aggregate(self, self.signature.signer.clone())?;
Ok(())
} else {
Err(InvalidArtifactReason::InappropriateDkgId(self.signature.signer).into())
Err(InvalidArtifactReason::InappropriateDkgId(self.signature.signer.clone()).into())
}
}
}
Expand Down
20 changes: 11 additions & 9 deletions rs/consensus/src/dkg.rs
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ impl DkgImpl {

let content =
match ic_interfaces::crypto::NiDkgAlgorithm::create_dealing(&*self.crypto, config) {
Ok(dealing) => DealingContent::new(dealing, config.dkg_id()),
Ok(dealing) => DealingContent::new(dealing, config.dkg_id().clone()),
Err(err) => {
match config.dkg_id().target_subnet {
NiDkgTargetSubnet::Local => error!(
Expand Down Expand Up @@ -216,7 +216,7 @@ impl DkgImpl {
return Mutations::from(ChangeAction::RemoveFromUnvalidated((*message).clone()));
}

let message_dkg_id = message.content.dkg_id;
let message_dkg_id = &message.content.dkg_id;

// If the dealing refers to a DKG interval starting at a different height,
// we skip it.
Expand All @@ -226,7 +226,7 @@ impl DkgImpl {

// If the dealing refers a config which is not among the ongoing DKGs,
// we reject it.
let config = match configs.get(&message_dkg_id) {
let config = match configs.get(message_dkg_id) {
Some(config) => config,
None => {
return get_handle_invalid_change_action(
Expand Down Expand Up @@ -311,7 +311,7 @@ impl DkgImpl {

fn contains_dkg_messages(dkg_pool: &dyn DkgPool, config: &NiDkgConfig, replica_id: NodeId) -> bool {
dkg_pool.get_validated().any(|message| {
message.content.dkg_id == config.dkg_id() && message.signature.signer == replica_id
&message.content.dkg_id == config.dkg_id() && message.signature.signer == replica_id
})
}

Expand Down Expand Up @@ -348,7 +348,7 @@ impl<T: DkgPool> PoolMutationsProducer<T> for DkgImpl {
.get_unvalidated()
// Group all unvalidated dealings by dealer.
.fold(BTreeMap::new(), |mut map, dealing| {
let key = (dealing.signature.signer, dealing.content.dkg_id);
let key = (dealing.signature.signer, dealing.content.dkg_id.clone());
let dealings: &mut Vec<_> = map.entry(key).or_default();
dealings.push(dealing);
processed += 1;
Expand Down Expand Up @@ -499,10 +499,12 @@ pub fn make_registry_cup_from_cup_contents(

let low_dkg_id = dkg_summary
.current_transcript(&NiDkgTag::LowThreshold)
.dkg_id;
.dkg_id
.clone();
let high_dkg_id = dkg_summary
.current_transcript(&NiDkgTag::HighThreshold)
.dkg_id;
.dkg_id
.clone();

// In a NNS subnet recovery case the block validation context needs to reference a registry
// version of the NNS to be recovered. Otherwise the validation context points to a registry
Expand Down Expand Up @@ -1247,7 +1249,7 @@ mod tests {

// Now we create a message with an unknown Dkg id and verify
// that it gets rejected.
let mut invalid_dkg_id = valid_dealing_message.content.dkg_id;
let mut invalid_dkg_id = valid_dealing_message.content.dkg_id.clone();
invalid_dkg_id.dealer_subnet = subnet_test_id(444);
let mut invalid_dealing_message = valid_dealing_message.clone();
invalid_dealing_message.content.dkg_id = invalid_dkg_id;
Expand Down Expand Up @@ -1325,7 +1327,7 @@ mod tests {
let dkg_id_from_future = NiDkgId {
start_block_height: ic_types::Height::from(1000),
dealer_subnet: valid_dealing_message.content.dkg_id.dealer_subnet,
dkg_tag: valid_dealing_message.content.dkg_id.dkg_tag,
dkg_tag: valid_dealing_message.content.dkg_id.dkg_tag.clone(),
target_subnet: NiDkgTargetSubnet::Local,
};
let mut dealing_message_from_future = valid_dealing_message;
Expand Down
52 changes: 28 additions & 24 deletions rs/consensus/src/dkg/payload_builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ pub fn create_payload(
// Make sure the message relates to one of the ongoing DKGs and it's from a unique
// dealer.
last_dkg_summary.configs.contains_key(&msg.content.dkg_id)
&& !dealers_from_chain.contains(&(msg.content.dkg_id, msg.signature.signer))
&& !dealers_from_chain.contains(&(msg.content.dkg_id.clone(), msg.signature.signer))
})
.take(max_dealings_per_block)
.cloned()
Expand Down Expand Up @@ -160,11 +160,11 @@ pub(super) fn create_summary_payload(
Ok(transcript) => {
let previous_value_found = if dkg_id.target_subnet == NiDkgTargetSubnet::Local {
next_transcripts
.insert(dkg_id.dkg_tag, transcript)
.insert(dkg_id.dkg_tag.clone(), transcript)
.is_some()
} else {
transcripts_for_remote_subnets
.insert(*dkg_id, Ok(transcript))
.insert(dkg_id.clone(), Ok(transcript))
.is_some()
};
if previous_value_found {
Expand Down Expand Up @@ -199,7 +199,7 @@ pub(super) fn create_summary_payload(
&last_summary
.transcripts_for_remote_subnets
.iter()
.map(|(id, _, result)| (*id, result.clone()))
.map(|(id, _, result)| (id.clone(), result.clone()))
.collect(),
&last_summary.initial_dkg_attempts,
&logger,
Expand Down Expand Up @@ -261,7 +261,7 @@ fn create_transcript(
_logger: &ReplicaLogger,
) -> Result<NiDkgTranscript, DkgCreateTranscriptError> {
let no_dealings = BTreeMap::new();
let dealings = all_dealings.get(&config.dkg_id()).unwrap_or(&no_dealings);
let dealings = all_dealings.get(config.dkg_id()).unwrap_or(&no_dealings);

ic_interfaces::crypto::NiDkgAlgorithm::create_transcript(crypto, config, dealings)
}
Expand Down Expand Up @@ -312,16 +312,16 @@ fn compute_remote_dkg_data(
// if we do, move it to the new summary.
if let Some((id, transcript)) = previous_transcripts
.iter()
.find(|(id, _)| eq_sans_height(id, &dkg_id))
.find(|(id, _)| eq_sans_height(id, dkg_id))
{
new_transcripts.insert(*id, transcript.clone());
new_transcripts.insert(id.clone(), transcript.clone());
}
// If not, we check if we computed a transcript for this config in the last round. And
// if not, we move the config into the new summary so that we try again in
// the next round.
else if !new_transcripts
.iter()
.any(|(id, _)| eq_sans_height(id, &dkg_id))
.any(|(id, _)| eq_sans_height(id, dkg_id))
{
expected_configs.push(config)
}
Expand Down Expand Up @@ -364,7 +364,7 @@ fn compute_remote_dkg_data(
if failed_target_ids.contains(&id) {
for config in config_group.iter() {
new_transcripts.insert(
config.dkg_id(),
config.dkg_id().clone(),
Err(REMOTE_DKG_REPEATED_FAILURE_ERROR.to_string()),
);
}
Expand Down Expand Up @@ -493,7 +493,7 @@ pub(crate) fn get_configs_for_local_transcripts(
let dkg_id = NiDkgId {
start_block_height,
dealer_subnet: subnet_id,
dkg_tag: *tag,
dkg_tag: tag.clone(),
target_subnet: NiDkgTargetSubnet::Local,
};
let (dealers, resharing_transcript) = match tag {
Expand Down Expand Up @@ -701,9 +701,13 @@ fn create_remote_dkg_configs(
};

let low_thr_config =
do_create_remote_dkg_config(&low_thr_dkg_id, dealers, receivers, registry_version);
let high_thr_config =
do_create_remote_dkg_config(&high_thr_dkg_id, dealers, receivers, registry_version);
do_create_remote_dkg_config(low_thr_dkg_id.clone(), dealers, receivers, registry_version);
let high_thr_config = do_create_remote_dkg_config(
high_thr_dkg_id.clone(),
dealers,
receivers,
registry_version,
);
let sibl_err = String::from("Failed to create the sibling config");
match (low_thr_config, high_thr_config) {
(Ok(config0), Ok(config1)) => Ok((config0, config1)),
Expand Down Expand Up @@ -733,20 +737,20 @@ fn create_remote_dkg_configs(
}

fn do_create_remote_dkg_config(
dkg_id: &NiDkgId,
dkg_id: NiDkgId,
dealers: &BTreeSet<NodeId>,
receivers: &BTreeSet<NodeId>,
registry_version: &RegistryVersion,
) -> Result<NiDkgConfig, NiDkgConfigValidationError> {
NiDkgConfig::new(NiDkgConfigData {
dkg_id: *dkg_id,
threshold: NumberOfNodes::from(
dkg_id.dkg_tag.threshold_for_subnet_of_size(receivers.len()) as u32,
),
dkg_id,
max_corrupt_dealers: NumberOfNodes::from(get_faults_tolerated(dealers.len()) as u32),
max_corrupt_receivers: NumberOfNodes::from(get_faults_tolerated(receivers.len()) as u32),
dealers: dealers.clone(),
receivers: receivers.clone(),
threshold: NumberOfNodes::from(
dkg_id.dkg_tag.threshold_for_subnet_of_size(receivers.len()) as u32,
),
registry_version: *registry_version,
resharing_transcript: None,
})
Expand Down Expand Up @@ -842,10 +846,10 @@ mod tests {
let config = configs[index].clone();
assert_eq!(
config.dkg_id(),
NiDkgId {
&NiDkgId {
start_block_height,
dealer_subnet: subnet_id,
dkg_tag: *tag,
dkg_tag: tag.clone(),
target_subnet: NiDkgTargetSubnet::Local,
}
);
Expand Down Expand Up @@ -1180,12 +1184,12 @@ mod tests {
&NiDkgId {
start_block_height: Height::from(0),
dealer_subnet: subnet_id,
dkg_tag: *tag,
dkg_tag: tag.clone(),
target_subnet: NiDkgTargetSubnet::Local,
}
);

assert_eq!(&conf.dkg_id(), id);
assert_eq!(conf.dkg_id(), id);
assert_eq!(
conf.registry_version(),
RegistryVersion::from(initial_registry_version)
Expand Down Expand Up @@ -1285,12 +1289,12 @@ mod tests {
&NiDkgId {
start_block_height: Height::from(expected_height),
dealer_subnet: subnet_id,
dkg_tag: *tag,
dkg_tag: tag.clone(),
target_subnet: NiDkgTargetSubnet::Local,
}
);

assert_eq!(&conf.dkg_id(), id);
assert_eq!(conf.dkg_id(), id);
assert_eq!(
conf.registry_version(),
RegistryVersion::from(initial_registry_version)
Expand Down
2 changes: 1 addition & 1 deletion rs/consensus/src/dkg/payload_validator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@ fn validate_dealings_payload(
let dealers_from_payload: HashSet<_> = dealings
.messages
.iter()
.map(|message| (message.content.dkg_id, message.signature.signer))
.map(|message| (message.content.dkg_id.clone(), message.signature.signer))
.collect();

if dealers_from_payload.len() != dealings.messages.len() {
Expand Down
8 changes: 6 additions & 2 deletions rs/consensus/src/dkg/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,11 @@ pub(super) fn get_dealers_from_chain(
) -> HashSet<(NiDkgId, NodeId)> {
get_dkg_dealings(pool_reader, block)
.into_iter()
.flat_map(|(dkg_id, dealings)| dealings.into_keys().map(move |node_id| (dkg_id, node_id)))
.flat_map(|(dkg_id, dealings)| {
dealings
.into_keys()
.map(move |node_id| (dkg_id.clone(), node_id))
})
.collect()
}

Expand All @@ -35,7 +39,7 @@ pub(super) fn get_dkg_dealings(
.messages
.iter()
.for_each(|msg| {
let collected_dealings = acc.entry(msg.content.dkg_id).or_default();
let collected_dealings = acc.entry(msg.content.dkg_id.clone()).or_default();
assert!(
collected_dealings
.insert(msg.signature.signer, msg.content.dealing.clone())
Expand Down
Loading

0 comments on commit 210f1ef

Please sign in to comment.