diff --git a/Cargo.lock b/Cargo.lock index 8a5d6423c7c1..823f98cdbc0e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6588,6 +6588,7 @@ dependencies = [ "ic-crypto-test-utils-ni-dkg", "ic-crypto-tree-hash", "ic-interfaces", + "ic-interfaces-state-manager", "ic-limits", "ic-logger", "ic-metrics", @@ -6598,6 +6599,7 @@ dependencies = [ "ic-test-utilities-consensus", "ic-test-utilities-logger", "ic-test-utilities-registry", + "ic-test-utilities-state", "ic-test-utilities-time", "ic-test-utilities-types", "ic-types", @@ -7988,12 +7990,14 @@ name = "ic-consensus-dkg" version = "0.9.0" dependencies = [ "ic-artifact-pool", + "ic-config", "ic-consensus-mocks", "ic-consensus-utils", "ic-crypto-temp-crypto", "ic-crypto-test-utils-crypto-returning-ok", "ic-crypto-test-utils-ni-dkg", "ic-interfaces", + "ic-interfaces-mocks", "ic-interfaces-registry", "ic-interfaces-registry-mocks", "ic-interfaces-state-manager", diff --git a/rs/artifact_pool/BUILD.bazel b/rs/artifact_pool/BUILD.bazel index 2da1b69323eb..cb95aed1e72e 100644 --- a/rs/artifact_pool/BUILD.bazel +++ b/rs/artifact_pool/BUILD.bazel @@ -9,12 +9,14 @@ DEV_DEPENDENCIES = [ "//rs/crypto/test_utils/crypto_returning_ok", "//rs/crypto/test_utils/ni-dkg", "//rs/crypto/tree_hash", + "//rs/interfaces/state_manager", "//rs/limits", "//rs/test_utilities", "//rs/test_utilities/artifact_pool", "//rs/test_utilities/consensus", "//rs/test_utilities/logger", "//rs/test_utilities/registry", + "//rs/test_utilities/state", "//rs/test_utilities/time", "//rs/test_utilities/types", "@crate_index//:rand", diff --git a/rs/artifact_pool/Cargo.toml b/rs/artifact_pool/Cargo.toml index b09328ef1dc9..f9989af3d799 100644 --- a/rs/artifact_pool/Cargo.toml +++ b/rs/artifact_pool/Cargo.toml @@ -40,11 +40,13 @@ ic-crypto-tree-hash = { path = "../crypto/tree_hash" } ic-crypto-test-utils-canister-threshold-sigs = { path = "../crypto/test_utils/canister_threshold_sigs" } ic-crypto-test-utils-crypto-returning-ok = { path = "../crypto/test_utils/crypto_returning_ok" } ic-crypto-test-utils-ni-dkg = { path = "../crypto/test_utils/ni-dkg" } +ic-interfaces-state-manager = { path = "../interfaces/state_manager" } ic-test-artifact-pool = { path = "../test_utilities/artifact_pool" } ic-test-utilities = { path = "../test_utilities" } ic-test-utilities-consensus = { path = "../test_utilities/consensus" } ic-test-utilities-logger = { path = "../test_utilities/logger" } ic-test-utilities-registry = { path = "../test_utilities/registry" } +ic-test-utilities-state = { path = "../test_utilities/state" } ic-test-utilities-time = { path = "../test_utilities/time" } ic-test-utilities-types = { path = "../test_utilities/types" } rand = { workspace = true } diff --git a/rs/artifact_pool/src/consensus_pool_cache.rs b/rs/artifact_pool/src/consensus_pool_cache.rs index 8da4b9750f94..fbcc91782af6 100644 --- a/rs/artifact_pool/src/consensus_pool_cache.rs +++ b/rs/artifact_pool/src/consensus_pool_cache.rs @@ -508,7 +508,7 @@ mod test { use ic_crypto_test_utils_crypto_returning_ok::CryptoReturningOk; use ic_interfaces::consensus_pool::{HEIGHT_CONSIDERED_BEHIND, ValidatedConsensusArtifact}; use ic_test_artifact_pool::consensus_pool::{Round, TestConsensusPool}; - use ic_test_utilities::state_manager::FakeStateManager; + use ic_test_utilities::state_manager::{FakeStateManager, RefMockStateManager}; use ic_test_utilities_consensus::fake::*; use ic_test_utilities_registry::{SubnetRecordBuilder, setup_registry}; use ic_test_utilities_time::FastForwardTimeSource; @@ -637,6 +637,15 @@ mod test { .build(), )]; + let state_manager = RefMockStateManager::default(); + state_manager + .get_mut() + .expect_get_state_at() + .return_const(Ok(ic_interfaces_state_manager::Labeled::new( + Height::new(0), + Arc::new(ic_test_utilities_state::get_initial_state(0, 0)), + ))); + let mut pool = TestConsensusPool::new( node_test_id(0), subnet_test_id(1), @@ -644,7 +653,7 @@ mod test { FastForwardTimeSource::new(), setup_registry(subnet_test_id(1), subnet_records), Arc::new(CryptoReturningOk::default()), - Arc::new(FakeStateManager::new()), + Arc::new(state_manager), None, ); diff --git a/rs/consensus/dkg/BUILD.bazel b/rs/consensus/dkg/BUILD.bazel index 4b9dcbe2f510..f270aed44004 100644 --- a/rs/consensus/dkg/BUILD.bazel +++ b/rs/consensus/dkg/BUILD.bazel @@ -23,10 +23,12 @@ DEPENDENCIES = [ DEV_DEPENDENCIES = [ # Keep sorted. "//rs/artifact_pool", + "//rs/config", "//rs/consensus/mocks", "//rs/crypto/temp_crypto", "//rs/crypto/test_utils/crypto_returning_ok", "//rs/crypto/test_utils/ni-dkg", + "//rs/interfaces/mocks", "//rs/interfaces/registry/mocks", "//rs/registry/keys", "//rs/registry/subnet_features", diff --git a/rs/consensus/dkg/Cargo.toml b/rs/consensus/dkg/Cargo.toml index 675332fb83be..3e2bb75533c2 100644 --- a/rs/consensus/dkg/Cargo.toml +++ b/rs/consensus/dkg/Cargo.toml @@ -25,6 +25,8 @@ rayon = { workspace = true } [dev-dependencies] +ic-config = { path = "../../config" } +ic-interfaces-mocks = { path = "../../interfaces/mocks" } ic-interfaces-registry-mocks = { path = "../../interfaces/registry/mocks" } ic-test-artifact-pool = { path = "../../test_utilities/artifact_pool" } prost = { workspace = true } diff --git a/rs/consensus/dkg/src/lib.rs b/rs/consensus/dkg/src/lib.rs index 0f599d47b436..69db12c562b2 100644 --- a/rs/consensus/dkg/src/lib.rs +++ b/rs/consensus/dkg/src/lib.rs @@ -44,14 +44,18 @@ mod utils; pub use dkg_key_manager::DkgKeyManager; pub use payload_builder::{create_payload, get_dkg_summary_from_cup_contents}; -// The maximal number of DKGs for other subnets we want to run in one interval. +/// The maximal number of DKGs for other subnets we want to run in one interval. const MAX_REMOTE_DKGS_PER_INTERVAL: usize = 1; -// The maximum number of intervals during which an initial DKG request is -// attempted. +/// The maximum number of early remote DKG transcripts we want to include in a data payload. +/// Note that responses for `SetupInitialDKG` requests contain two transcripts. +const MAX_EARLY_REMOTE_TRANSCRIPTS: usize = 2; + +/// The maximum number of intervals during which an initial DKG request is +/// attempted. const MAX_REMOTE_DKG_ATTEMPTS: u32 = 5; -// Generic error string for failed remote DKG requests. +/// Generic error string for failed remote DKG requests. const REMOTE_DKG_REPEATED_FAILURE_ERROR: &str = "Attempts to run this DKG repeatedly failed"; struct Metrics { @@ -395,8 +399,11 @@ impl BouncerFactory for DkgBouncer { mod tests { use super::*; use crate::test_utils::{ + complement_state_manager_with_dkg_contexts, complement_state_manager_with_reshare_chain_key_request, - complement_state_manager_with_setup_initial_dkg_request, + complement_state_manager_with_setup_initial_dkg_request, create_dealing, + extract_dkg_configs_from_highest_block, extract_remote_dkg_ids_from_highest_block, + make_reshare_chain_key_context, make_setup_initial_dkg_context, }; use core::panic; use ic_artifact_pool::dkg_pool::DkgPoolImpl; @@ -406,29 +413,43 @@ mod tests { }; use ic_consensus_utils::pool_reader::PoolReader; use ic_crypto_test_utils_crypto_returning_ok::CryptoReturningOk; - use ic_crypto_test_utils_ni_dkg::dummy_dealing; + use ic_crypto_test_utils_ni_dkg::{dummy_dealing, dummy_transcript_for_tests_with_params}; use ic_interfaces::{ consensus_pool::ConsensusPool, p2p::consensus::{BouncerFactory, BouncerValue, MutablePool, UnvalidatedArtifact}, }; + use ic_interfaces_mocks::crypto::MockCrypto; use ic_interfaces_registry::RegistryClient; + use ic_logger::no_op_logger; use ic_management_canister_types_private::{MasterPublicKeyId, VetKdCurve, VetKdKeyId}; use ic_metrics::MetricsRegistry; use ic_registry_subnet_features::{ChainKeyConfig, KeyConfig}; use ic_test_artifact_pool::consensus_pool::TestConsensusPool; + use ic_test_utilities_consensus::fake::{FakeContentSigner, FromParent}; use ic_test_utilities_logger::with_test_replica_logger; use ic_test_utilities_registry::{SubnetRecordBuilder, add_subnet_record}; use ic_test_utilities_types::ids::{node_test_id, subnet_test_id}; use ic_types::{ - RegistryVersion, ReplicaVersion, - consensus::{Block, BlockPayload}, - crypto::CryptoHash, - crypto::threshold_sig::ni_dkg::{ - NiDkgId, NiDkgMasterPublicKeyId, NiDkgTargetId, NiDkgTargetSubnet, + NumberOfNodes, RegistryVersion, ReplicaVersion, + batch::ValidationContext, + consensus::{ + Block, BlockPayload, BlockProposal, DataPayload, HasHeight, Payload, SummaryPayload, + dkg::{DkgDataPayload, DkgSummary}, + get_faults_tolerated, + }, + crypto::{ + AlgorithmId, CryptoHash, + error::MalformedPublicKeyError, + threshold_sig::ni_dkg::{ + NiDkgId, NiDkgMasterPublicKeyId, NiDkgTag, NiDkgTargetId, NiDkgTargetSubnet, + config::NiDkgConfigData, errors::create_transcript_error::DkgCreateTranscriptError, + }, }, time::UNIX_EPOCH, }; + use payload_validator::validate_payload; use std::{collections::BTreeSet, convert::TryFrom}; + use test_utils::{extract_dealings_from_highest_block, extract_remote_dkgs_from_highest_block}; use utils::{tags_iter, vetkd_key_ids_for_subnet}; #[test] @@ -1357,7 +1378,7 @@ mod tests { dependencies.state_manager.clone(), dependencies.registry.get_latest_version(), vec![], - Some(1), + Some(dkg_interval_length as usize + 1), None, ); @@ -1612,7 +1633,810 @@ mod tests { block.height.get() ); } - }) + }); + } + + const EARLY_DKG_INTERVAL: u64 = 99; + + /// Common setup for early transcript tests using `setup_initial_dkg`. + /// Advances to the first summary block and returns the deps, target id, + /// and the two remote DKG ids (low + high threshold). + fn setup_initial_dkg_test( + pool_config: ic_config::artifact_pool::ArtifactPoolConfig, + ) -> (Dependencies, NiDkgTargetId, Vec) { + let node_ids = (1..8).map(node_test_id).collect::>(); + + let mut deps = dependencies_with_subnet_records_with_raw_state_manager( + pool_config, + subnet_test_id(0), + vec![( + 10, + SubnetRecordBuilder::from(&node_ids) + .with_dkg_interval_length(EARLY_DKG_INTERVAL) + .build(), + )], + ); + + let target_id = NiDkgTargetId::new([0_u8; 32]); + complement_state_manager_with_setup_initial_dkg_request( + deps.state_manager.clone(), + deps.registry.get_latest_version(), + vec![10, 11, 12, 13], + None, + Some(target_id), + ); + + deps.pool + .advance_round_normal_operation_n(EARLY_DKG_INTERVAL + 1); + + // Verify that the initial summary block contains the two remote configs. + assert_eq!(extract_dkg_configs_from_highest_block(&deps.pool).len(), 4); + assert_eq!(extract_remote_dkgs_from_highest_block(&deps.pool).len(), 0); + let remote_dkg_ids = extract_remote_dkg_ids_from_highest_block(&deps.pool, target_id); + assert_eq!(remote_dkg_ids.len(), 2); + + (deps, target_id, remote_dkg_ids) + } + + /// Add 3 dealings per config to the DKG pool, advancing the consensus + /// pool one round after each config. + fn add_dealings_for_configs(deps: &mut Dependencies, dkg_ids: &[NiDkgId]) { + for dkg_id in dkg_ids { + let dealings = (0..3) + .map(|i| ChangeAction::AddToValidated(create_dealing(i, dkg_id.clone()))) + .collect::>(); + deps.dkg_pool.write().unwrap().apply(dealings); + deps.pool.advance_round_normal_operation(); + } + } + + /// Assert that the highest block's payload passes DKG validation. + fn assert_highest_block_validates(deps: &Dependencies) { + let block: Block = deps + .pool + .validated() + .block_proposal() + .get_highest() + .unwrap() + .content + .into_inner(); + let pool_reader = PoolReader::new(&deps.pool); + let height = block.height().decrement(); + let parent = pool_reader + .get_notarized_block(&block.parent, height) + .map(|block| block.into_inner()) + .unwrap(); + + assert!( + validate_payload( + subnet_test_id(0), + deps.registry.as_ref(), + deps.crypto.as_ref(), + &pool_reader, + &*deps.dkg_pool.read().unwrap(), + parent, + block.payload.as_ref(), + deps.state_manager.as_ref(), + &block.context, + &MetricsRegistry::new().int_counter_vec( + "consensus_dkg_validator", + "DKG validator counter", + &["type"], + ), + &no_op_logger(), + ) + .is_ok() + ); + } + + /// Advance through a full DKG interval and verify that no early remote + /// transcripts or dealings appear in any block. + fn assert_no_early_transcript_duplicates(pool: &mut TestConsensusPool, interval_length: u64) { + for _ in 0..interval_length + 1 { + pool.advance_round_normal_operation(); + assert_eq!(extract_dealings_from_highest_block(pool).len(), 0); + assert_eq!(extract_remote_dkgs_from_highest_block(pool).len(), 0); + } + } + + #[test] + fn test_early_setup_initial_dkg_transcripts() { + ic_test_utilities::artifact_pool_config::with_test_pool_config(|pool_config| { + let (mut deps, target_id, remote_dkg_ids) = setup_initial_dkg_test(pool_config); + + // Add dealings for first config only; not enough for both transcripts + let dealings = (0..3) + .map(|i| ChangeAction::AddToValidated(create_dealing(i, remote_dkg_ids[0].clone()))) + .collect::>(); + deps.dkg_pool.write().unwrap().apply(dealings); + deps.pool.advance_round_normal_operation(); + assert_eq!(extract_dealings_from_highest_block(&deps.pool).len(), 3); + assert_eq!(extract_remote_dkgs_from_highest_block(&deps.pool).len(), 0); + + // No new dealings; building remote transcripts fails (need both high and low) + deps.pool.advance_round_normal_operation(); + assert_eq!(extract_dealings_from_highest_block(&deps.pool).len(), 0); + assert_eq!(extract_remote_dkgs_from_highest_block(&deps.pool).len(), 0); + + // Add dealings for second config + let dealings = (0..3) + .map(|i| ChangeAction::AddToValidated(create_dealing(i, remote_dkg_ids[1].clone()))) + .collect::>(); + deps.dkg_pool.write().unwrap().apply(dealings); + deps.pool.advance_round_normal_operation(); + assert_eq!(extract_dealings_from_highest_block(&deps.pool).len(), 3); + assert_eq!(extract_remote_dkgs_from_highest_block(&deps.pool).len(), 0); + + // Now sufficient dealings are on chain; early remote transcripts should appear + deps.pool.advance_round_normal_operation(); + assert_eq!(extract_dealings_from_highest_block(&deps.pool).len(), 0); + let remote_dkgs = extract_remote_dkgs_from_highest_block(&deps.pool); + assert_eq!(remote_dkgs.len(), 2); + for (dkg_id, _, result) in &remote_dkgs { + assert_eq!(dkg_id.target_subnet, NiDkgTargetSubnet::Remote(target_id)); + assert!(result.is_ok()); + } + assert!( + remote_dkgs + .iter() + .any(|(id, _, _)| id.dkg_tag == NiDkgTag::HighThreshold) + ); + assert!( + remote_dkgs + .iter() + .any(|(id, _, _)| id.dkg_tag == NiDkgTag::LowThreshold) + ); + + assert_highest_block_validates(&deps); + + // Also validate with empty transcripts_for_remote_subnets (early + // transcripts are only an optimization, so validation must still pass). + let block: Block = deps + .pool + .validated() + .block_proposal() + .get_highest() + .unwrap() + .content + .into_inner(); + let pool_reader = PoolReader::new(&deps.pool); + let height = block.height().decrement(); + let parent = pool_reader + .get_notarized_block(&block.parent, height) + .map(|block| block.into_inner()) + .unwrap(); + let payload_without_early_remote = match block.payload.as_ref() { + BlockPayload::Data(data) => { + let dkg_without_remote = + DkgDataPayload::new(data.dkg.start_height, data.dkg.messages.clone()); + BlockPayload::Data(DataPayload { + batch: data.batch.clone(), + dkg: dkg_without_remote, + idkg: data.idkg.clone(), + }) + } + _ => panic!("expected data block"), + }; + assert!( + validate_payload( + subnet_test_id(0), + deps.registry.as_ref(), + deps.crypto.as_ref(), + &pool_reader, + &*deps.dkg_pool.read().unwrap(), + parent, + &payload_without_early_remote, + deps.state_manager.as_ref(), + &block.context, + &MetricsRegistry::new().int_counter_vec( + "consensus_dkg_validator", + "DKG validator counter", + &["type"], + ), + &no_op_logger(), + ) + .is_ok() + ); + + // Verify that no more transcripts are created in later blocks. + assert_no_early_transcript_duplicates(&mut deps.pool, EARLY_DKG_INTERVAL); + }); + } + + /// Tests that no early remote transcripts are created when a + /// setup_initial_dkg target has only one of its expected two configs + /// in the summary (because one transcript was already created in a + /// previous summary block). Instead, the next summary block should + /// contain both transcripts. + #[test] + fn test_no_early_transcripts_for_single_setup_initial_dkg_config() { + ic_test_utilities::artifact_pool_config::with_test_pool_config(|pool_config| { + let (mut deps, target_id, remote_dkg_ids) = setup_initial_dkg_test(pool_config); + + let original_summary = { + let block: Block = deps + .pool + .validated() + .block_proposal() + .get_highest() + .unwrap() + .content + .into_inner(); + block.payload.as_ref().as_summary().dkg.clone() + }; + + add_dealings_for_configs(&mut deps, &remote_dkg_ids); + + // Construct a modified summary with only 1 remote config. + // This simulates the scenario where one transcript was already + // created in a previous summary block, and only the remaining + // config needs to be computed. + let removed_dkg_id = remote_dkg_ids[0].clone(); + let kept_tag = remote_dkg_ids[1].dkg_tag.clone(); + let mut dummy_transcript = ic_crypto_test_utils_ni_dkg::dummy_transcript_for_tests(); + dummy_transcript.dkg_id = removed_dkg_id.clone(); + let modified_summary = DkgSummary::new( + original_summary + .configs + .values() + .filter(|c| { + c.dkg_id().target_subnet == NiDkgTargetSubnet::Local + || c.dkg_id().dkg_tag == kept_tag + }) + .cloned() + .collect(), + original_summary.current_transcripts().clone(), + original_summary.next_transcripts().clone(), + vec![( + removed_dkg_id, + ic_types::messages::CallbackId::from(0_u64), + Ok(dummy_transcript), + )], + original_summary.registry_version, + original_summary.interval_length, + original_summary.next_interval_length, + original_summary.height, + BTreeMap::new(), + ); + + assert_eq!( + modified_summary + .configs + .values() + .filter(|c| matches!(c.dkg_id().target_subnet, NiDkgTargetSubnet::Remote(_))) + .count(), + 1, + ); + + let parent = deps.pool.get_cache().finalized_block(); + let pool_reader = PoolReader::new(&deps.pool); + let validation_context = ValidationContext { + registry_version: deps.registry.get_latest_version(), + certified_height: Height::from(0), + time: UNIX_EPOCH, + }; + + // Even though sufficient dealings exist on chain for both configs, + // no early transcript should be created because the summary only + // has 1 of the expected 2 configs for a setup_initial_dkg target. + let early_transcripts = payload_builder::create_early_remote_transcripts( + &pool_reader, + deps.crypto.as_ref(), + &parent, + &modified_summary, + deps.state_manager.as_ref(), + &validation_context, + no_op_logger(), + ) + .unwrap(); + assert!( + early_transcripts.is_empty(), + "No early transcripts should be created for a single \ + setup_initial_dkg config, but got {early_transcripts:?}", + ); + + // The next summary should contain both transcripts: the one already + // in the modified summary's transcripts_for_remote_subnets and the + // one created from the remaining config's dealings. + let next_summary = payload_builder::create_summary_payload( + subnet_test_id(0), + deps.registry.as_ref(), + deps.crypto.as_ref(), + &pool_reader, + &modified_summary, + &parent, + deps.registry.get_latest_version(), + deps.state_manager.as_ref(), + &validation_context, + no_op_logger(), + ) + .unwrap(); + assert_eq!( + next_summary.transcripts_for_remote_subnets.len(), + 2, + "The next summary should contain both remote transcripts", + ); + for (dkg_id, _callback_id, result) in &next_summary.transcripts_for_remote_subnets { + assert_eq!(dkg_id.target_subnet, NiDkgTargetSubnet::Remote(target_id)); + assert!(result.is_ok()); + } + + // Control: using the original summary with both configs DOES + // produce early transcripts. + let early_transcripts = payload_builder::create_early_remote_transcripts( + &pool_reader, + deps.crypto.as_ref(), + &parent, + &original_summary, + deps.state_manager.as_ref(), + &validation_context, + no_op_logger(), + ) + .unwrap(); + assert_eq!(early_transcripts.len(), 2); + for (dkg_id, _callback_id, result) in &early_transcripts { + assert_eq!(dkg_id.target_subnet, NiDkgTargetSubnet::Remote(target_id)); + assert!(result.is_ok()); + } + + // If a config exists in the summary but there is no corresponding + // context in the state, no early transcript should be created. + let unrelated_target_id = NiDkgTargetId::new([1_u8; 32]); + let no_match_state_manager = + Arc::new(ic_test_utilities::state_manager::RefMockStateManager::default()); + complement_state_manager_with_setup_initial_dkg_request( + no_match_state_manager.clone(), + deps.registry.get_latest_version(), + vec![10, 11, 12, 13], + None, + Some(unrelated_target_id), + ); + let early_transcripts = payload_builder::create_early_remote_transcripts( + &pool_reader, + deps.crypto.as_ref(), + &parent, + &original_summary, + no_match_state_manager.as_ref(), + &validation_context, + no_op_logger(), + ) + .unwrap(); + assert!( + early_transcripts.is_empty(), + "No early transcripts should be created when config's target_id \ + has no corresponding context, but got {early_transcripts:?}", + ); + }); + } + + #[test] + fn test_early_remote_transcripts_with_reproducible_crypto_error() { + ic_test_utilities::artifact_pool_config::with_test_pool_config(|pool_config| { + let (mut deps, target_id, remote_dkg_ids) = setup_initial_dkg_test(pool_config); + + add_dealings_for_configs(&mut deps, &remote_dkg_ids); + + let mut mock_crypto = MockCrypto::new(); + mock_crypto + .expect_ni_dkg_create_transcript() + .returning(|_config, _dealings| { + Err( + DkgCreateTranscriptError::MalformedResharingTranscriptInConfig( + MalformedPublicKeyError { + algorithm: AlgorithmId::Groth20_Bls12_381, + key_bytes: None, + internal_error: "test error".to_string(), + }, + ), + ) + }); + + let parent = deps.pool.get_cache().finalized_block(); + + // Scope the pool borrow so we can mutate the pool afterwards + let payload_with_errors = { + let pool_reader = PoolReader::new(&deps.pool); + let last_summary_block = pool_reader.dkg_summary_block(&parent).unwrap(); + let last_summary = &last_summary_block.payload.as_ref().as_summary().dkg; + let validation_context = ValidationContext { + registry_version: deps.registry.get_latest_version(), + certified_height: Height::from(0), + time: UNIX_EPOCH, + }; + + let early_transcripts = payload_builder::create_early_remote_transcripts( + &pool_reader, + &mock_crypto, + &parent, + last_summary, + deps.state_manager.as_ref(), + &validation_context, + no_op_logger(), + ) + .unwrap(); + + assert_eq!(early_transcripts.len(), 2); + for (dkg_id, _callback_id, result) in &early_transcripts { + assert_eq!(dkg_id.target_subnet, NiDkgTargetSubnet::Remote(target_id)); + let error_msg = result.as_ref().unwrap_err(); + assert!( + error_msg.contains("test error"), + "Error message should contain the original error, got: {error_msg}" + ); + } + + let payload = BlockPayload::Data(DataPayload { + batch: ic_types::batch::BatchPayload::default(), + dkg: DkgDataPayload::new_with_remote_dkg_transcripts( + last_summary_block.height, + vec![], + early_transcripts, + ), + idkg: Default::default(), + }); + + assert!( + validate_payload( + subnet_test_id(0), + deps.registry.as_ref(), + &mock_crypto, + &pool_reader, + &*deps.dkg_pool.read().unwrap(), + parent.clone(), + &payload, + deps.state_manager.as_ref(), + &validation_context, + &MetricsRegistry::new().int_counter_vec( + "consensus_dkg_validator", + "DKG validator counter", + &["type"], + ), + &no_op_logger(), + ) + .is_ok(), + "Payload with reproducible crypto errors should validate successfully" + ); + + payload + }; + + // Insert the payload with errors into the pool as part of a new block. + let mut block = Block::from_parent(&parent); + block.payload = Payload::new(ic_types::crypto::crypto_hash, payload_with_errors); + let proposal = BlockProposal::fake(block, node_test_id(0)); + deps.pool.advance_round_with_block(&proposal); + + // Verify that no more transcripts are created in later blocks. + assert_no_early_transcript_duplicates(&mut deps.pool, EARLY_DKG_INTERVAL); + }); + } + + #[test] + fn test_early_reshare_chain_key_transcripts() { + ic_test_utilities::artifact_pool_config::with_test_pool_config(|pool_config| { + let node_ids = (1..5).map(node_test_id).collect::>(); + let key_id = VetKdKeyId { + curve: VetKdCurve::Bls12_381_G2, + name: String::from("some_vetkey"), + }; + let target_id = NiDkgTargetId::new([0_u8; 32]); + + let mut deps = dependencies_with_subnet_records_with_raw_state_manager( + pool_config, + subnet_test_id(0), + vec![( + 10, + SubnetRecordBuilder::from(&node_ids) + .with_dkg_interval_length(EARLY_DKG_INTERVAL) + .with_chain_key_config(ChainKeyConfig { + key_configs: vec![KeyConfig { + key_id: MasterPublicKeyId::VetKd(key_id.clone()), + pre_signatures_to_create_in_advance: None, + max_queue_size: 20, + }], + signature_request_timeout_ns: None, + idkg_key_rotation_period_ms: None, + max_parallel_pre_signature_transcripts_in_creation: None, + }) + .build(), + )], + ); + + complement_state_manager_with_reshare_chain_key_request( + deps.state_manager.clone(), + deps.registry.get_latest_version(), + key_id.clone(), + vec![10, 11, 12, 13], + None, + Some(target_id), + ); + + deps.pool + .advance_round_normal_operation_n(EARLY_DKG_INTERVAL + 1); + let remote_dkg_ids = extract_remote_dkg_ids_from_highest_block(&deps.pool, target_id); + assert_eq!(remote_dkg_ids.len(), 1); + assert_eq!(extract_dkg_configs_from_highest_block(&deps.pool).len(), 4); + assert_eq!(extract_remote_dkgs_from_highest_block(&deps.pool).len(), 0); + + add_dealings_for_configs(&mut deps, &remote_dkg_ids); + // 2f + 1 dealings for high threshold VetKD resharing + assert_eq!(extract_dealings_from_highest_block(&deps.pool).len(), 3); + assert_eq!(extract_remote_dkgs_from_highest_block(&deps.pool).len(), 0); + + // Now sufficient dealings are in the pool; early remote transcript should appear + deps.pool.advance_round_normal_operation(); + assert_eq!(extract_dealings_from_highest_block(&deps.pool).len(), 0); + let remote_dkgs = extract_remote_dkgs_from_highest_block(&deps.pool); + assert_eq!(remote_dkgs.len(), 1); + let (dkg_id, _, result) = &remote_dkgs[0]; + assert_eq!(dkg_id.target_subnet, NiDkgTargetSubnet::Remote(target_id)); + assert!(result.is_ok()); + assert_eq!( + dkg_id.dkg_tag, + NiDkgTag::HighThresholdForKey(NiDkgMasterPublicKeyId::VetKd(key_id)) + ); + + // Verify that the highest block validates. + assert_highest_block_validates(&deps); + + // Verify that no more transcripts are created in later blocks. + assert_no_early_transcript_duplicates(&mut deps.pool, EARLY_DKG_INTERVAL); + }); + } + + /// Tests that when the state has a SetupInitialDKG context (needing 2 + /// transcripts), a ReshareChainKey context (needing 1 transcript), and an + /// additional SetupInitialDKG context (lowest target_id) with insufficient + /// dealings for one of its configs: + /// - The first setup target is skipped (insufficient dealings). + /// - The remaining two contexts compete for MAX_EARLY_REMOTE_TRANSCRIPTS + /// (= 2), and only the first context's transcripts are included. + #[test] + fn test_early_remote_transcripts_respects_max() { + for (skipped_target_bytes, setup_target_bytes, reshare_target_bytes, desc) in [ + ([0_u8; 32], [1_u8; 32], [2_u8; 32], "SetupInitialDKG first"), + ([0_u8; 32], [2_u8; 32], [1_u8; 32], "ReshareChainKey first"), + ] { + ic_test_utilities::artifact_pool_config::with_test_pool_config(|pool_config| { + let node_ids = (0..4).map(node_test_id).collect::>(); + let key_id = VetKdKeyId { + curve: VetKdCurve::Bls12_381_G2, + name: String::from("some_vetkey"), + }; + // This context always comes first but will be + // skipped because one of its two configs lacks dealings. + let skipped_target_id = NiDkgTargetId::new(skipped_target_bytes); + let setup_target_id = NiDkgTargetId::new(setup_target_bytes); + let reshare_target_id = NiDkgTargetId::new(reshare_target_bytes); + + let mut deps = dependencies_with_subnet_records_with_raw_state_manager( + pool_config, + subnet_test_id(0), + vec![( + 10, + SubnetRecordBuilder::from(&node_ids) + .with_dkg_interval_length(EARLY_DKG_INTERVAL) + .with_chain_key_config(ChainKeyConfig { + key_configs: vec![KeyConfig { + key_id: MasterPublicKeyId::VetKd(key_id.clone()), + pre_signatures_to_create_in_advance: None, + max_queue_size: 20, + }], + signature_request_timeout_ns: None, + idkg_key_rotation_period_ms: None, + max_parallel_pre_signature_transcripts_in_creation: None, + }) + .build(), + )], + ); + + let registry_version = deps.registry.get_latest_version(); + complement_state_manager_with_dkg_contexts( + deps.state_manager.clone(), + vec![ + make_setup_initial_dkg_context( + registry_version, + vec![10, 11, 12, 13], + skipped_target_id, + ), + make_setup_initial_dkg_context( + registry_version, + vec![10, 11, 12, 13], + setup_target_id, + ), + make_reshare_chain_key_context( + registry_version, + key_id.clone(), + vec![10, 11, 12, 13], + reshare_target_id, + ), + ], + None, + ); + + // Advance to one block before the summary. + deps.pool + .advance_round_normal_operation_n(EARLY_DKG_INTERVAL); + + // The original summary only includes configs for the first target + // (skipped_target_id) due to MAX_REMOTE_DKGS_PER_INTERVAL=1. + let summary_proposal = deps.pool.make_next_block(); + let mut summary_block: Block = summary_proposal.content.into_inner(); + let original_summary = summary_block.payload.as_ref().as_summary().dkg.clone(); + + let skipped_remote_dkg_ids: Vec = original_summary + .configs + .keys() + .filter(|id| id.target_subnet == NiDkgTargetSubnet::Remote(skipped_target_id)) + .cloned() + .collect(); + assert_eq!( + skipped_remote_dkg_ids.len(), + 2, + "[{desc}] Expected 2 skipped SetupInitialDKG remote configs" + ); + + // Create setup configs for setup_target_id by cloning from the + // skipped target's configs (same structure, different target). + let setup_configs: Vec = original_summary + .configs + .values() + .filter(|c| { + c.dkg_id().target_subnet == NiDkgTargetSubnet::Remote(skipped_target_id) + }) + .map(|c| { + NiDkgConfig::new(NiDkgConfigData { + dkg_id: NiDkgId { + target_subnet: NiDkgTargetSubnet::Remote(setup_target_id), + ..c.dkg_id().clone() + }, + max_corrupt_dealers: c.max_corrupt_dealers(), + dealers: c.dealers().get().clone(), + max_corrupt_receivers: c.max_corrupt_receivers(), + receivers: c.receivers().get().clone(), + threshold: c.threshold().get(), + registry_version: c.registry_version(), + resharing_transcript: c.resharing_transcript().clone(), + }) + .unwrap() + }) + .collect(); + let setup_remote_dkg_ids: Vec = + setup_configs.iter().map(|c| c.dkg_id().clone()).collect(); + + // Manually create a ReshareChainKey config. + let reshare_dkg_id = NiDkgId { + start_block_height: original_summary.height, + dealer_subnet: subnet_test_id(0), + dkg_tag: NiDkgTag::HighThresholdForKey(NiDkgMasterPublicKeyId::VetKd( + key_id.clone(), + )), + target_subnet: NiDkgTargetSubnet::Remote(reshare_target_id), + }; + + let dealers: BTreeSet<_> = node_ids.iter().cloned().collect(); + let receivers: BTreeSet<_> = (10..14).map(node_test_id).collect(); + + let resharing_transcript = dummy_transcript_for_tests_with_params( + node_ids.clone(), + NiDkgTag::HighThresholdForKey(NiDkgMasterPublicKeyId::VetKd(key_id.clone())), + 3, // 2f + 1 + 10, + ); + + let reshare_config = NiDkgConfig::new(NiDkgConfigData { + threshold: NumberOfNodes::from( + reshare_dkg_id + .dkg_tag + .threshold_for_subnet_of_size(receivers.len()) + as u32, + ), + dkg_id: reshare_dkg_id.clone(), + max_corrupt_dealers: NumberOfNodes::from( + get_faults_tolerated(dealers.len()) as u32 + ), + max_corrupt_receivers: NumberOfNodes::from( + get_faults_tolerated(receivers.len()) as u32, + ), + dealers, + receivers, + registry_version, + resharing_transcript: Some(resharing_transcript), + }) + .expect("Failed to create reshare config"); + + // Build a modified summary with configs for all three targets. + let mut all_configs: Vec = + original_summary.configs.values().cloned().collect(); + all_configs.extend(setup_configs); + all_configs.push(reshare_config); + let modified_summary = DkgSummary::new( + all_configs, + original_summary.current_transcripts().clone(), + original_summary.next_transcripts().clone(), + vec![], + original_summary.registry_version, + original_summary.interval_length, + original_summary.next_interval_length, + original_summary.height, + BTreeMap::new(), + ); + summary_block.payload = Payload::new( + ic_types::crypto::crypto_hash, + BlockPayload::Summary(SummaryPayload { + dkg: modified_summary, + idkg: None, + }), + ); + let proposal = BlockProposal::fake(summary_block, node_test_id(0)); + deps.pool.advance_round_with_block(&proposal); + + // Add dealings for only ONE of the skipped target's two configs + // (insufficient), all setup target configs, and the reshare config. + let dkg_ids_with_dealings: Vec<&NiDkgId> = + std::iter::once(&skipped_remote_dkg_ids[0]) + .chain(setup_remote_dkg_ids.iter()) + .chain(std::iter::once(&reshare_dkg_id)) + .collect(); + for dkg_id in &dkg_ids_with_dealings { + let dealings: Vec<_> = (0..3) + .map(|i| ChangeAction::AddToValidated(create_dealing(i, (*dkg_id).clone()))) + .collect(); + deps.dkg_pool.write().unwrap().apply(dealings); + } + deps.pool.advance_round_normal_operation(); + + assert_eq!( + extract_dealings_from_highest_block(&deps.pool).len(), + 9, // 3 * (f + 1) setup, plus 2f + 1 reshare, where f = 1 + "[{desc}] 9 dealings should be in the block" + ); + assert_eq!( + extract_remote_dkgs_from_highest_block(&deps.pool).len(), + 0, + "[{desc}] no early transcripts yet" + ); + + // The skipped target is skipped (insufficient dealings for its + // second config). The remaining setup and reshare targets compete + // for MAX_EARLY_REMOTE_TRANSCRIPTS. + deps.pool.advance_round_normal_operation(); + assert_eq!(extract_dealings_from_highest_block(&deps.pool).len(), 0); + let remote_dkgs = extract_remote_dkgs_from_highest_block(&deps.pool); + + if setup_target_id < reshare_target_id { + assert_eq!( + remote_dkgs.len(), + 2, + "[{desc}] Expected 2 SetupInitialDKG transcripts, got {}", + remote_dkgs.len() + ); + for (dkg_id, _, result) in &remote_dkgs { + assert_eq!( + dkg_id.target_subnet, + NiDkgTargetSubnet::Remote(setup_target_id), + "[{desc}] transcript should be for SetupInitialDKG target id" + ); + assert!(result.is_ok(), "[{desc}]"); + } + } else { + // Reshare comes first: 1 reshare transcript; setup's 2 exceed the limit. + assert_eq!( + remote_dkgs.len(), + 1, + "[{desc}] Expected 1 ReshareChainKey transcript, got {}", + remote_dkgs.len() + ); + let (dkg_id, _, result) = &remote_dkgs[0]; + assert_eq!( + dkg_id.target_subnet, + NiDkgTargetSubnet::Remote(reshare_target_id), + "[{desc}] transcript should be for ReshareChainKey target id" + ); + assert!(result.is_ok(), "[{desc}]"); + } + }); + } } #[test] diff --git a/rs/consensus/dkg/src/payload_builder.rs b/rs/consensus/dkg/src/payload_builder.rs index 4e1aad0b6ae3..312c94c7dc52 100644 --- a/rs/consensus/dkg/src/payload_builder.rs +++ b/rs/consensus/dkg/src/payload_builder.rs @@ -1,12 +1,13 @@ use crate::{ - MAX_REMOTE_DKG_ATTEMPTS, MAX_REMOTE_DKGS_PER_INTERVAL, REMOTE_DKG_REPEATED_FAILURE_ERROR, + MAX_EARLY_REMOTE_TRANSCRIPTS, MAX_REMOTE_DKG_ATTEMPTS, MAX_REMOTE_DKGS_PER_INTERVAL, + REMOTE_DKG_REPEATED_FAILURE_ERROR, utils::{self, tags_iter, vetkd_key_ids_for_subnet}, }; use ic_consensus_utils::{crypto::ConsensusCrypto, pool_reader::PoolReader}; use ic_interfaces::{crypto::ErrorReproducibility, dkg::DkgPool}; use ic_interfaces_registry::RegistryClient; use ic_interfaces_state_manager::StateReader; -use ic_logger::{ReplicaLogger, error, warn}; +use ic_logger::{ReplicaLogger, error, info, warn}; use ic_protobuf::registry::subnet::v1::{ CatchUpPackageContents, chain_key_initialization::Initialization, }; @@ -76,7 +77,8 @@ pub fn create_payload( ) .map(DkgPayload::Summary) } else { - // If the height is not a start height, create a payload with new dealings. + // If the height is not a start height, create a payload with new dealings, + // and possibly early remote transcripts. create_data_payload( pool_reader, dkg_pool, @@ -84,6 +86,10 @@ pub fn create_payload( max_dealings_per_block, &last_summary_block, last_dkg_summary, + crypto, + state_reader, + validation_context, + logger, ) .map(DkgPayload::Data) } @@ -96,6 +102,10 @@ fn create_data_payload( max_dealings_per_block: usize, last_summary_block: &Block, last_dkg_summary: &DkgSummary, + crypto: &dyn ConsensusCrypto, + state_reader: &dyn StateReader, + validation_context: &ValidationContext, + logger: ReplicaLogger, ) -> Result { // Get all dealer ids from the chain. let dealers_from_chain = utils::get_dealers_from_chain(pool_reader, parent); @@ -109,12 +119,141 @@ fn create_data_payload( max_dealings_per_block, ); - Ok(DkgDataPayload::new( + let remote_dkg_transcripts = create_early_remote_transcripts( + pool_reader, + crypto, + parent, + last_dkg_summary, + state_reader, + validation_context, + logger.clone(), + )?; + + if !remote_dkg_transcripts.is_empty() { + info!( + logger, + "Including {} early remote DKG transcripts in data block payload at height {}", + remote_dkg_transcripts.len(), + parent.height.increment(), + ); + } + + // Include any early remote transcripts + Ok(DkgDataPayload::new_with_remote_dkg_transcripts( last_summary_block.height, new_validated_dealings, + remote_dkg_transcripts, )) } +#[allow(clippy::type_complexity)] +pub(crate) fn create_early_remote_transcripts( + pool_reader: &PoolReader<'_>, + crypto: &dyn ConsensusCrypto, + parent: &Block, + last_dkg_summary: &DkgSummary, + state_reader: &dyn StateReader, + validation_context: &ValidationContext, + logger: ReplicaLogger, +) -> Result)>, DkgPayloadCreationError> { + // Return an error on transient state manager errors + let state = state_reader + .get_state_at(validation_context.certified_height) + .map_err(DkgPayloadCreationError::StateManagerError)?; + + // Since this function is relatively expensive, we simply return if there are no outstanding DKG contexts + let callback_id_map = build_target_id_callback_map(state.get_ref()); + if callback_id_map.is_empty() { + return Ok(vec![]); + } + + // Get all dealings for DKGs that have not been completed yet + let (all_dealings, completed) = utils::get_dkg_dealings(pool_reader, parent); + + // Collect map of remote target_ids to DKG configs + let mut remote_configs: BTreeMap> = BTreeMap::new(); + for config in last_dkg_summary.configs.values() { + let dkg_id = config.dkg_id(); + if completed.contains(dkg_id) { + // Skip DKGs that have already been completed + continue; + } + if let NiDkgTargetSubnet::Remote(target_id) = dkg_id.target_subnet { + remote_configs.entry(target_id).or_default().push(config); + } + } + + // Try to create transcripts for all configs of each target_id. Note that we either include + // all transcript results for a target_id or none of them. + let mut selected_transcripts = vec![]; + for (target_id, configs) in remote_configs { + // Lookup the callback id and the expected number of configs for this target_id + let Some((expected_config_num, callback_id)) = callback_id_map.get(&target_id) else { + warn!( + logger, + "Unable to find callback id associated with remote target id {target_id:?} at block height {}", + parent.height.increment() + ); + continue; + }; + + // Check that we have the expected number of configs for this target_id + if configs.len() != *expected_config_num { + // This may happen if we did not manage to create all required transcripts as part of + // the last summary block. We will handle this in the next summary block instead. + continue; + } + + // Ensure that creating these transcripts would not exceed the maximum number of early + // remote transcripts. We continue with the next target_id in case it requires less + // transcripts. + if selected_transcripts.len() + configs.len() > MAX_EARLY_REMOTE_TRANSCRIPTS { + continue; + } + + // If any of the configs has less dealings than the threshold, we skip this target_id + if configs.iter().any(|config| { + let dealings_count = all_dealings + .get(config.dkg_id()) + .map_or(0, |dealings| dealings.len()); + dealings_count < config.collection_threshold().get() as usize + }) { + continue; + } + + // For each config, try to build the necessary (dkg_id, callback_id, transcript_result) triple + for config in configs.iter() { + // Generate the transcript. We need to retry transient errors, as a payload containing + // transient errors may not be verifiable by peers. + let transcript_result = match create_transcript(crypto, config, &all_dealings, &logger) + { + Ok(transcript) => Ok(transcript), + // Note that we handled the reproducible error case of not having enough dealings + // already beforehand. + Err(err) if err.is_reproducible() => { + // Including the error in the payload will cause the context to receive + // a reject response. + let error_message = format!( + "Failed to create early remote transcript for dkg id {:?} at height {}: {}", + config.dkg_id(), + parent.height.increment(), + err + ); + error!(logger, "{error_message}"); + Err(error_message) + } + Err(err) => { + // Return on transient crypto errors + return Err(DkgPayloadCreationError::DkgCreateTranscriptError(err)); + } + }; + selected_transcripts.push((config.dkg_id().clone(), *callback_id, transcript_result)); + } + } + + Ok(selected_transcripts) +} + /// Selects dealings from the validated pool to include in a block payload. /// /// When selecting dealings, the following constraints apply: @@ -253,11 +392,15 @@ pub(super) fn create_summary_payload( validation_context: &ValidationContext, logger: ReplicaLogger, ) -> Result { - let (all_dealings, _) = utils::get_dkg_dealings(pool_reader, parent); + let (all_dealings, completed_dkgs) = utils::get_dkg_dealings(pool_reader, parent); let mut transcripts_for_remote_subnets = BTreeMap::new(); let mut next_transcripts = BTreeMap::new(); // Try to create transcripts from the last round. for (dkg_id, config) in last_summary.configs.iter() { + if completed_dkgs.contains(dkg_id) { + // Skip DKGs that have already been completed as part of data blocks + continue; + } match create_transcript(crypto, config, &all_dealings, &logger) { Ok(transcript) => { let previous_value_found = if dkg_id.target_subnet == NiDkgTargetSubnet::Local { @@ -329,6 +472,9 @@ pub(super) fn create_summary_payload( .map(|(id, _, result)| (id.clone(), result.clone())) .collect(); + let completed_target_ids = + get_completed_target_ids(last_summary.configs.keys(), &completed_dkgs); + let (mut configs, transcripts_for_remote_subnets, initial_dkg_attempts) = compute_remote_dkg_data( subnet_id, @@ -339,6 +485,7 @@ pub(super) fn create_summary_payload( transcripts_for_remote_subnets, &previous_transcripts, &reshared_transcripts, + &completed_target_ids, &last_summary.initial_dkg_attempts, &logger, )?; @@ -419,6 +566,7 @@ fn compute_remote_dkg_data( mut new_transcripts: BTreeMap>, previous_transcripts: &BTreeMap>, reshared_transcripts: &BTreeMap, + completed_target_ids: &BTreeSet, previous_attempts: &BTreeMap, logger: &ReplicaLogger, ) -> Result< @@ -439,6 +587,7 @@ fn compute_remote_dkg_data( state.get_ref(), validation_context, reshared_transcripts, + completed_target_ids, logger, )?; @@ -735,6 +884,7 @@ fn process_subnet_call_context( state: &ReplicatedState, validation_context: &ValidationContext, reshared_transcripts: &BTreeMap, + completed_target_ids: &BTreeSet, logger: &ReplicaLogger, ) -> Result< ( @@ -751,6 +901,7 @@ fn process_subnet_call_context( registry_client, state, validation_context, + completed_target_ids, logger, )?; @@ -761,6 +912,7 @@ fn process_subnet_call_context( state, validation_context, reshared_transcripts, + completed_target_ids, ); let dkg_configs = init_dkg_configs @@ -786,6 +938,7 @@ fn process_reshare_chain_key_contexts( state: &ReplicatedState, validation_context: &ValidationContext, reshared_transcripts: &BTreeMap, + completed_target_ids: &BTreeSet, ) -> ( Vec>, Vec<(NiDkgId, String)>, @@ -805,6 +958,11 @@ fn process_reshare_chain_key_contexts( continue; } + // If the DKG has already been completed, skip this context + if completed_target_ids.contains(&context.target_id) { + continue; + } + // Only process NiDkgMasterPublicKeyId let Ok(key_id) = NiDkgMasterPublicKeyId::try_from(context.key_id.clone()) else { continue; @@ -849,6 +1007,7 @@ fn process_setup_initial_dkg_contexts( registry_client: &dyn RegistryClient, state: &ReplicatedState, validation_context: &ValidationContext, + completed_target_ids: &BTreeSet, logger: &ReplicaLogger, ) -> Result< ( @@ -871,6 +1030,11 @@ fn process_setup_initial_dkg_contexts( continue; } + // If the DKG has already been completed, skip this context + if completed_target_ids.contains(&context.target_id) { + continue; + } + // Dealers must be in the same registry_version. let dealers = get_node_list(this_subnet_id, registry_client, context.registry_version)?; @@ -910,40 +1074,71 @@ pub(crate) fn get_node_list( .collect()) } -// Compares two DKG ids without considering the start block heights. This -// function is only used for DKGs for other subnets, as the start block height -// is not used to differentiate two DKGs for the same subnet. +/// Returns the set of remote target IDs for which all configured DKGs have +/// been completed. +fn get_completed_target_ids<'a>( + config_ids: impl Iterator, + completed: &BTreeSet, +) -> BTreeSet { + let mut remote_dkgs_by_target: BTreeMap> = BTreeMap::new(); + for dkg_id in config_ids { + if let NiDkgTargetSubnet::Remote(target_id) = dkg_id.target_subnet { + remote_dkgs_by_target + .entry(target_id) + .or_default() + .push(dkg_id); + } + } + remote_dkgs_by_target + .into_iter() + .filter(|(_, dkg_ids)| dkg_ids.iter().all(|id| completed.contains(id))) + .map(|(target_id, _)| target_id) + .collect() +} + +/// Compares two DKG ids without considering the start block heights. This +/// function is only used for DKGs for other subnets, as the start block height +/// is not used to differentiate two DKGs for the same subnet. fn eq_sans_height(dkg_id1: &NiDkgId, dkg_id2: &NiDkgId) -> bool { dkg_id1.dealer_subnet == dkg_id2.dealer_subnet && dkg_id1.dkg_tag == dkg_id2.dkg_tag && dkg_id1.target_subnet == dkg_id2.target_subnet } -fn add_callback_ids_to_transcript_results( - new_transcripts: BTreeMap>, +/// Build a map from target id to callback id according to contexts in the replicated state. +/// Additionally, for each target ID, return the expected number of DKG instances necessary +/// to answer the request. Specifically, setup initial DKG requests require two DKGs, whereas +/// resharing a chain key requires one DKG instance. +fn build_target_id_callback_map( state: &ReplicatedState, - log: &ReplicaLogger, -) -> Vec<(NiDkgId, CallbackId, Result)> { - // Build a map from target id to callback id +) -> BTreeMap { let call_contexts = &state.metadata.subnet_call_context_manager; - let callback_id_map = call_contexts + call_contexts .setup_initial_dkg_contexts .iter() - .map(|(&callback_id, context)| (context.target_id, callback_id)) + .map(|(&callback_id, context)| (context.target_id, (2, callback_id))) .chain( call_contexts .reshare_chain_key_contexts .iter() - .map(|(&callback_id, context)| (context.target_id, callback_id)), + .map(|(&callback_id, context)| (context.target_id, (1, callback_id))), ) - .collect::>(); + .collect() +} +fn add_callback_ids_to_transcript_results( + new_transcripts: BTreeMap>, + state: &ReplicatedState, + log: &ReplicaLogger, +) -> Vec<(NiDkgId, CallbackId, Result)> { + // Build a map from target id to callback id + let callback_id_map = build_target_id_callback_map(state); new_transcripts .into_iter() .filter_map(|(id, result)| match id.target_subnet { NiDkgTargetSubnet::Local => None, NiDkgTargetSubnet::Remote(target_id) => match callback_id_map.get(&target_id) { - Some(&callback_id) => Some((id, callback_id, result)), + Some(&(_, callback_id)) => Some((id, callback_id, result)), None => { error!( log, @@ -958,7 +1153,7 @@ fn add_callback_ids_to_transcript_results( .collect() } -/// This function is called on each entry on the SetupInitialDkgContext. It returns +/// This function is called for each entry on the SubnetCallContext. It returns /// either the created high and low configs for the entry or returns two errors /// identified by the NiDkgId. pub(crate) fn create_low_high_remote_dkg_configs( @@ -1065,11 +1260,17 @@ mod tests { }; use ic_crypto_test_utils_ni_dkg::dummy_transcript_for_tests_with_params; use ic_logger::replica_logger::no_op_logger; - use ic_management_canister_types_private::{VetKdCurve, VetKdKeyId}; + use ic_management_canister_types_private::{MasterPublicKeyId, VetKdCurve, VetKdKeyId}; use ic_registry_client_helpers::subnet::SubnetRegistry; + use ic_replicated_state::metadata_state::subnet_call_context_manager::{ + ReshareChainKeyContext, SetupInitialDkgContext, SubnetCallContext, + }; use ic_test_utilities_logger::with_test_replica_logger; use ic_test_utilities_registry::{SubnetRecordBuilder, add_subnet_record}; - use ic_test_utilities_types::ids::{node_test_id, subnet_test_id}; + use ic_test_utilities_types::{ + ids::{node_test_id, subnet_test_id}, + messages::RequestBuilder, + }; use ic_types::{ RegistryVersion, crypto::threshold_sig::ni_dkg::{NiDkgId, NiDkgTag, NiDkgTargetId, NiDkgTargetSubnet}, @@ -1304,6 +1505,7 @@ mod tests { BTreeMap::new(), &BTreeMap::new(), &BTreeMap::new(), + &BTreeSet::new(), &BTreeMap::new(), &logger, ) @@ -1338,6 +1540,7 @@ mod tests { BTreeMap::new(), &BTreeMap::new(), &BTreeMap::new(), + &BTreeSet::new(), &initial_dkg_attempts, &logger, ) @@ -1382,6 +1585,7 @@ mod tests { BTreeMap::new(), &BTreeMap::new(), &BTreeMap::new(), + &BTreeSet::new(), &initial_dkg_attempts, &logger, ) @@ -1965,6 +2169,142 @@ mod tests { }); } + #[test] + fn test_get_completed_target_ids() { + let targets: Vec<_> = (1..=3).map(|i| NiDkgTargetId::new([i; 32])).collect(); + let tags = [NiDkgTag::LowThreshold, NiDkgTag::HighThreshold]; + + let config_ids: Vec<_> = targets + .iter() + .flat_map(|t| { + tags.iter().map(|tag| NiDkgId { + start_block_height: Height::from(1), + dealer_subnet: subnet_test_id(1), + dkg_tag: tag.clone(), + target_subnet: NiDkgTargetSubnet::Remote(*t), + }) + }) + .collect(); + + // target 0 is fully completed, target 1 only has low completed, target 2 is not completed + let completed: BTreeSet<_> = config_ids[..3].iter().cloned().collect(); + + let result = get_completed_target_ids(config_ids.iter(), &completed); + assert_eq!(result, BTreeSet::from([targets[0]])); + } + + #[test] + fn test_process_subnet_call_context_ignores_completed_targets() { + ic_test_utilities::artifact_pool_config::with_test_pool_config(|pool_config| { + let node_ids = vec![node_test_id(0), node_test_id(1)]; + let subnet_id = subnet_test_id(0); + let Dependencies { registry, .. } = + dependencies_with_subnet_records_with_raw_state_manager( + pool_config, + subnet_id, + vec![( + 10, + SubnetRecordBuilder::from(&node_ids) + .with_dkg_interval_length(99) + .build(), + )], + ); + + let key_id = VetKdKeyId { + curve: VetKdCurve::Bls12_381_G2, + name: String::from("some_vetkey"), + }; + let ni_dkg_key_id = NiDkgMasterPublicKeyId::VetKd(key_id.clone()); + let tag = NiDkgTag::HighThresholdForKey(ni_dkg_key_id); + + let registry_version = registry.get_latest_version(); + let completed_init_dkg_target = NiDkgTargetId::new([1_u8; 32]); + let pending_init_dkg_target = NiDkgTargetId::new([2_u8; 32]); + let completed_reshare_target = NiDkgTargetId::new([3_u8; 32]); + let pending_reshare_target = NiDkgTargetId::new([4_u8; 32]); + + let mut state = ic_test_utilities_state::get_initial_state(0, 0); + let target_nodes: BTreeSet<_> = + vec![10, 11, 12].into_iter().map(node_test_id).collect(); + + for target_id in [completed_init_dkg_target, pending_init_dkg_target] { + state.metadata.subnet_call_context_manager.push_context( + SubnetCallContext::SetupInitialDKG(SetupInitialDkgContext { + request: RequestBuilder::new().build(), + nodes_in_target_subnet: target_nodes.clone(), + target_id, + registry_version, + time: state.time(), + }), + ); + } + + for target_id in [completed_reshare_target, pending_reshare_target] { + state.metadata.subnet_call_context_manager.push_context( + SubnetCallContext::ReshareChainKey(ReshareChainKeyContext { + request: RequestBuilder::new().build(), + key_id: MasterPublicKeyId::VetKd(key_id.clone()), + nodes: target_nodes.clone(), + registry_version, + time: state.time(), + target_id, + }), + ); + } + + let reshared_transcripts = BTreeMap::from([( + tag.clone(), + dummy_transcript_for_tests_with_params( + node_ids.clone(), + tag.clone(), + tag.threshold_for_subnet_of_size(node_ids.len()) as u32, + 10, + ), + )]); + + let validation_context = ValidationContext { + registry_version, + certified_height: Height::from(0), + time: UNIX_EPOCH, + }; + + let completed_target_ids = + BTreeSet::from([completed_init_dkg_target, completed_reshare_target]); + + let (configs, errors, valid_target_ids) = process_subnet_call_context( + subnet_id, + Height::from(0), + registry.as_ref(), + &state, + &validation_context, + &reshared_transcripts, + &completed_target_ids, + &no_op_logger(), + ) + .unwrap(); + + // One setup_initial_dkg group (low + high) and one reshare_chain_key group + assert_eq!(configs.len(), 2); + assert_eq!(configs[0].len(), 2); + for config in &configs[0] { + assert_eq!( + config.dkg_id().target_subnet, + NiDkgTargetSubnet::Remote(pending_init_dkg_target) + ); + } + assert_eq!(configs[1].len(), 1); + assert_eq!( + configs[1][0].dkg_id().target_subnet, + NiDkgTargetSubnet::Remote(pending_reshare_target) + ); + assert!(errors.is_empty()); + assert_eq!( + valid_target_ids, + vec![pending_init_dkg_target, pending_reshare_target] + ); + }); + } + struct TestDkgPool { messages: Vec, } diff --git a/rs/consensus/dkg/src/payload_validator.rs b/rs/consensus/dkg/src/payload_validator.rs index 2b4a6c6ad487..c20a9abed3d3 100644 --- a/rs/consensus/dkg/src/payload_validator.rs +++ b/rs/consensus/dkg/src/payload_validator.rs @@ -1,4 +1,5 @@ -use crate::{crypto_validate_dealing, payload_builder, utils}; +use self::payload_builder::create_early_remote_transcripts; +use super::{crypto_validate_dealing, payload_builder, utils}; use ic_consensus_utils::{crypto::ConsensusCrypto, pool_reader::PoolReader}; use ic_interfaces::{ dkg::{DkgPayloadValidationError, DkgPool}, @@ -6,7 +7,7 @@ use ic_interfaces::{ }; use ic_interfaces_registry::RegistryClient; use ic_interfaces_state_manager::StateReader; -use ic_logger::{ReplicaLogger, warn}; +use ic_logger::{ReplicaLogger, info, warn}; use ic_registry_client_helpers::subnet::SubnetRegistry; use ic_replicated_state::ReplicatedState; use ic_types::{ @@ -67,7 +68,7 @@ pub fn validate_payload( registry_version, state_reader, validation_context, - ic_logger::replica_logger::no_op_logger(), + log.clone(), )?; if summary_payload.dkg != expected_summary { return Err(InvalidDkgPayloadReason::MismatchedDkgSummary( @@ -123,6 +124,9 @@ pub fn validate_payload( &data_payload.dkg, max_dealings_per_block, &parent, + state_reader, + validation_context, + log, metrics, ) } @@ -139,6 +143,9 @@ fn validate_dealings_payload( dealings: &DkgDataPayload, max_dealings_per_payload: usize, parent: &Block, + state_reader: &dyn StateReader, + validation_context: &ValidationContext, + log: &ReplicaLogger, metrics: &IntCounterVec, ) -> ValidationResult { if dealings.start_height != parent.payload.as_ref().dkg_interval_start_height() { @@ -195,8 +202,32 @@ fn validate_dealings_payload( // If we have early transcripts, we compare them if !dealings.transcripts_for_remote_subnets.is_empty() { - // For now payloads with early transcripts are rejected - return Err(InvalidDkgPayloadReason::InvalidEarlyNiDkgTranscripts.into()); + let expected_transcripts = create_early_remote_transcripts( + pool_reader, + crypto, + parent, + last_summary, + state_reader, + validation_context, + log.clone(), + )?; + + if dealings.transcripts_for_remote_subnets != expected_transcripts { + warn!( + log, + "Failed to validate {} early remote DKG transcripts in data block payload at height {}", + dealings.transcripts_for_remote_subnets.len(), + parent.height.increment(), + ); + return Err(InvalidDkgPayloadReason::InvalidEarlyNiDkgTranscripts.into()); + } + + info!( + log, + "Validated {} early remote DKG transcripts in data block payload at height {}", + dealings.transcripts_for_remote_subnets.len(), + parent.height.increment(), + ); } Ok(()) @@ -451,8 +482,8 @@ mod tests { } #[test] - fn validate_dealings_payload_when_remote_transcripts_present_fails_test() { - // Data payloads with early/remote transcripts are rejected for now. + fn validate_dealings_payload_when_invalid_early_remote_transcripts_present_fails_test() { + // Data payloads with invalid early/remote transcripts are rejected. ic_test_utilities::artifact_pool_config::with_test_pool_config(|pool_config| { let registry_version = 1; let committee = [NODE_1, NODE_2, NODE_3]; diff --git a/rs/consensus/dkg/src/test_utils.rs b/rs/consensus/dkg/src/test_utils.rs index c4e1c7117fc0..f242a1c07f8d 100644 --- a/rs/consensus/dkg/src/test_utils.rs +++ b/rs/consensus/dkg/src/test_utils.rs @@ -1,9 +1,11 @@ use ic_crypto_test_utils_ni_dkg::dummy_dealing; +use ic_interfaces::consensus_pool::ConsensusPool; use ic_interfaces_state_manager::Labeled; use ic_management_canister_types_private::{MasterPublicKeyId, VetKdKeyId}; use ic_replicated_state::metadata_state::subnet_call_context_manager::{ ReshareChainKeyContext, SetupInitialDkgContext, SubnetCallContext, }; +use ic_test_artifact_pool::consensus_pool::TestConsensusPool; use ic_test_utilities::state_manager::RefMockStateManager; use ic_test_utilities_consensus::fake::FakeContentSigner; use ic_test_utilities_types::{ @@ -12,38 +14,65 @@ use ic_test_utilities_types::{ }; use ic_types::{ Height, NumberOfNodes, RegistryVersion, - consensus::dkg::{DealingContent, Message}, + consensus::{ + BlockPayload, + dkg::{DealingContent, DealingMessages, Message}, + }, crypto::threshold_sig::ni_dkg::{ - NiDkgId, NiDkgTag, NiDkgTargetId, NiDkgTargetSubnet, + NiDkgId, NiDkgTag, NiDkgTargetId, NiDkgTargetSubnet, NiDkgTranscript, config::{NiDkgConfig, NiDkgConfigData}, }, + messages::CallbackId, +}; +use std::{ + collections::{BTreeMap, BTreeSet}, + sync::Arc, }; -use std::{collections::BTreeSet, sync::Arc}; -pub(super) fn complement_state_manager_with_setup_initial_dkg_request( - state_manager: Arc, +pub(super) fn make_setup_initial_dkg_context( registry_version: RegistryVersion, node_ids: Vec, + target_id: NiDkgTargetId, +) -> SubnetCallContext { + SubnetCallContext::SetupInitialDKG(SetupInitialDkgContext { + request: RequestBuilder::new().build(), + nodes_in_target_subnet: node_ids.into_iter().map(node_test_id).collect(), + target_id, + registry_version, + time: ic_types::time::UNIX_EPOCH, + }) +} + +pub(super) fn make_reshare_chain_key_context( + registry_version: RegistryVersion, + key_id: VetKdKeyId, + node_ids: Vec, + target_id: NiDkgTargetId, +) -> SubnetCallContext { + SubnetCallContext::ReshareChainKey(ReshareChainKeyContext { + request: RequestBuilder::new().build(), + key_id: MasterPublicKeyId::VetKd(key_id), + nodes: node_ids.into_iter().map(node_test_id).collect(), + registry_version, + time: ic_types::time::UNIX_EPOCH, + target_id, + }) +} + +/// Set up the state manager mock to return an initial state containing the +/// given subnet call contexts. +pub(super) fn complement_state_manager_with_dkg_contexts( + state_manager: Arc, + contexts: Vec, times: Option, - target: Option, ) { let mut state = ic_test_utilities_state::get_initial_state(0, 0); - - // Add the context into state_manager. - let nodes_in_target_subnet = node_ids.into_iter().map(node_test_id).collect(); - - if let Some(target_id) = target { - state.metadata.subnet_call_context_manager.push_context( - SubnetCallContext::SetupInitialDKG(SetupInitialDkgContext { - request: RequestBuilder::new().build(), - nodes_in_target_subnet, - target_id, - registry_version, - time: state.time(), - }), - ); + for context in contexts { + state + .metadata + .subnet_call_context_manager + .push_context(context); } - let mut mock = state_manager.get_mut(); let expectation = mock .expect_get_state_at() @@ -53,6 +82,20 @@ pub(super) fn complement_state_manager_with_setup_initial_dkg_request( } } +pub(super) fn complement_state_manager_with_setup_initial_dkg_request( + state_manager: Arc, + registry_version: RegistryVersion, + node_ids: Vec, + times: Option, + target: Option, +) { + let contexts = target + .into_iter() + .map(|t| make_setup_initial_dkg_context(registry_version, node_ids.clone(), t)) + .collect(); + complement_state_manager_with_dkg_contexts(state_manager, contexts, times); +} + pub(super) fn complement_state_manager_with_reshare_chain_key_request( state_manager: Arc, registry_version: RegistryVersion, @@ -61,33 +104,81 @@ pub(super) fn complement_state_manager_with_reshare_chain_key_request( times: Option, target: Option, ) { - let mut state = ic_test_utilities_state::get_initial_state(0, 0); + let contexts = target + .into_iter() + .map(|t| { + make_reshare_chain_key_context(registry_version, key_id.clone(), node_ids.clone(), t) + }) + .collect(); + complement_state_manager_with_dkg_contexts(state_manager, contexts, times); +} - // Add the context into state_manager. - let nodes_in_target_subnet = node_ids.into_iter().map(node_test_id).collect(); - - if let Some(target_id) = target { - state.metadata.subnet_call_context_manager.push_context( - SubnetCallContext::ReshareChainKey(ReshareChainKeyContext { - request: RequestBuilder::new().build(), - key_id: MasterPublicKeyId::VetKd(key_id), - nodes: nodes_in_target_subnet, - registry_version, - time: state.time(), - target_id, - }), - ); +/// Extract the remote dkg transcripts from the current highest validated block +pub(super) fn extract_remote_dkgs_from_highest_block( + pool: &TestConsensusPool, +) -> Vec<(NiDkgId, CallbackId, Result)> { + let block: ic_types::consensus::Block = pool + .validated() + .block_proposal() + .get_highest() + .unwrap() + .content + .into_inner(); + + match block.payload.as_ref() { + BlockPayload::Summary(summary) => summary.dkg.transcripts_for_remote_subnets.clone(), + BlockPayload::Data(data) => data.dkg.transcripts_for_remote_subnets.clone(), } +} - let mut mock = state_manager.get_mut(); - let expectation = mock - .expect_get_state_at() - .return_const(Ok(Labeled::new(Height::new(0), Arc::new(state)))); - if let Some(times) = times { - expectation.times(times); +/// Extract the dealings from the current highest validated block +pub(super) fn extract_dealings_from_highest_block(pool: &TestConsensusPool) -> DealingMessages { + let block: ic_types::consensus::Block = pool + .validated() + .block_proposal() + .get_highest() + .unwrap() + .content + .into_inner(); + + match block.payload.as_ref() { + BlockPayload::Summary(_) => vec![], + BlockPayload::Data(data) => data.dkg.messages.clone(), + } +} + +/// Extract the remote dkg IDs from the current highest validated block +pub(super) fn extract_remote_dkg_ids_from_highest_block( + pool: &TestConsensusPool, + target_id: NiDkgTargetId, +) -> Vec { + extract_dkg_configs_from_highest_block(pool) + .iter() + .filter(|(id, _)| id.target_subnet == NiDkgTargetSubnet::Remote(target_id)) + .map(|(id, _)| id) + .cloned() + .collect() +} + +/// Extract the DKG configs from the current highest validated block +pub(super) fn extract_dkg_configs_from_highest_block( + pool: &TestConsensusPool, +) -> BTreeMap { + let block: ic_types::consensus::Block = pool + .validated() + .block_proposal() + .get_highest() + .unwrap() + .content + .into_inner(); + + match block.payload.as_ref() { + BlockPayload::Summary(summary) => summary.dkg.configs.clone(), + BlockPayload::Data(_) => BTreeMap::new(), } } +/// Create a dealing from the node `node_idx` pub(super) fn create_dealing(node_idx: u64, dkg_id: NiDkgId) -> Message { let content = DealingContent::new(dummy_dealing(node_idx as u8), dkg_id); Message::fake(content, node_test_id(node_idx)) diff --git a/rs/consensus/src/consensus/notary.rs b/rs/consensus/src/consensus/notary.rs index 39836b93479a..e9660391d4c3 100644 --- a/rs/consensus/src/consensus/notary.rs +++ b/rs/consensus/src/consensus/notary.rs @@ -716,6 +716,13 @@ mod tests { .get_mut() .expect_latest_certified_height() .return_const(gap_trigger_height); + state_manager + .get_mut() + .expect_get_state_at() + .return_const(Ok(ic_interfaces_state_manager::Labeled::new( + Height::new(0), + Arc::new(ic_test_utilities_state::get_initial_state(0, 0)), + ))); assert_matches!( get_adjusted_notary_delay_from_settings( @@ -734,6 +741,13 @@ mod tests { .get_mut() .expect_latest_certified_height() .return_const(PoolReader::new(&pool).get_finalized_height()); + state_manager + .get_mut() + .expect_get_state_at() + .return_const(Ok(ic_interfaces_state_manager::Labeled::new( + Height::new(0), + Arc::new(ic_test_utilities_state::get_initial_state(0, 0)), + ))); assert_eq!( get_adjusted_notary_delay_from_settings( @@ -752,6 +766,13 @@ mod tests { .get_mut() .expect_latest_certified_height() .return_const(PoolReader::new(&pool).get_finalized_height()); + state_manager + .get_mut() + .expect_get_state_at() + .return_const(Ok(ic_interfaces_state_manager::Labeled::new( + Height::new(0), + Arc::new(ic_test_utilities_state::get_initial_state(0, 0)), + ))); pool.advance_round_normal_operation_no_cup(); diff --git a/rs/test_utilities/artifact_pool/src/consensus_pool.rs b/rs/test_utilities/artifact_pool/src/consensus_pool.rs index d1a46bfde8ce..da6ed4f77a27 100644 --- a/rs/test_utilities/artifact_pool/src/consensus_pool.rs +++ b/rs/test_utilities/artifact_pool/src/consensus_pool.rs @@ -33,6 +33,8 @@ use std::sync::Arc; use std::sync::RwLock; use std::time::Instant; +const MAX_DEALINGS_PER_BLOCK: usize = 20; + #[allow(clippy::type_complexity)] pub struct TestConsensusPool { subnet_id: SubnetId, @@ -160,7 +162,7 @@ fn dkg_payload_builder_fn( &*state_manager, validation_context, no_op_logger(), - 10, // at most dealings per block + MAX_DEALINGS_PER_BLOCK, ) .unwrap_or_else(|err| panic!("Couldn't create the payload: {err:?}")) }) diff --git a/rs/types/types/src/consensus/dkg.rs b/rs/types/types/src/consensus/dkg.rs index 47243a8d8550..1cf407f04496 100644 --- a/rs/types/types/src/consensus/dkg.rs +++ b/rs/types/types/src/consensus/dkg.rs @@ -505,12 +505,12 @@ impl TryFrom for DkgDataPayload { } impl DkgDataPayload { - /// Return an empty DealingsPayload using the given start_height. + /// Return an empty [`DkgDataPayload`] using the given start_height. pub fn new_empty(start_height: Height) -> Self { Self::new(start_height, vec![]) } - /// Return an new DealingsPayload. + /// Return a new [`DkgDataPayload`]. pub fn new(start_height: Height, messages: DealingMessages) -> Self { Self { start_height, @@ -519,6 +519,19 @@ impl DkgDataPayload { } } + /// Return a new [`DkgDataPayload`] with the given remote DKG transcripts. + pub fn new_with_remote_dkg_transcripts( + start_height: Height, + messages: DealingMessages, + remote_dkg_transcripts: Vec<(NiDkgId, CallbackId, Result)>, + ) -> Self { + Self { + start_height, + messages, + transcripts_for_remote_subnets: remote_dkg_transcripts, + } + } + /// Returns true if the payload is empty pub fn is_empty(&self) -> bool { let DkgDataPayload {