From 1889c478130630be1d44c2e0c07c7888684e4774 Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Mon, 3 Jul 2023 20:02:44 +0300 Subject: [PATCH 01/46] Changed core identity type to be constructured from static and dynamic parts --- model/flow/identity.go | 49 +++++++++++++++++++++++++++++------------- 1 file changed, 34 insertions(+), 15 deletions(-) diff --git a/model/flow/identity.go b/model/flow/identity.go index f05188988e6..b38c32c9b0b 100644 --- a/model/flow/identity.go +++ b/model/flow/identity.go @@ -27,8 +27,8 @@ const DefaultInitialWeight = 100 // rxid is the regex for parsing node identity entries. var rxid = regexp.MustCompile(`^(collection|consensus|execution|verification|access)-([0-9a-fA-F]{64})@([\w\d]+|[\w\d][\w\d\-]*[\w\d](?:\.*[\w\d][\w\d\-]*[\w\d])*|[\w\d][\w\d\-]*[\w\d])(:[\d]+)?=(\d{1,20})$`) -// Identity represents the public identity of one network participant (node). -type Identity struct { +// IdentitySkeleton represents the static part of public identity of one network participant (node). +type IdentitySkeleton struct { // NodeID uniquely identifies a particular node. A node's ID is fixed for // the duration of that node's participation in the network. NodeID Identifier @@ -37,9 +37,18 @@ type Identity struct { // Role is the node's role in the network and defines its abilities and // responsibilities. Role Role + // InitialWeight is a 'trust score' initially assigned by EpochSetup event after + // the staking phase. The initial weights define the supermajority thresholds for + // the cluster and security node consensus throughout the Epoch. + InitialWeight uint64 + StakingPubKey crypto.PublicKey + NetworkPubKey crypto.PublicKey +} + +// DynamicIdentity represents the dynamic part of public identity of one network participant (node). +type DynamicIdentity struct { // Weight represents the node's authority to perform certain tasks relative - // to other nodes. For example, in the consensus committee, the node's weight - // represents the weight assigned to its votes. + // to other nodes. // // A node's weight is distinct from its stake. Stake represents the quantity // of FLOW tokens held by the network in escrow during the course of the node's @@ -48,17 +57,22 @@ type Identity struct { // // Nodes which are registered to join at the next epoch will appear in the // identity table but are considered to have zero weight up until their first - // epoch begins. Likewise nodes which were registered in the previous epoch + // epoch begins. Likewise, nodes which were registered in the previous epoch // but have left at the most recent epoch boundary will appear in the identity // table with zero weight. Weight uint64 // Ejected represents whether a node has been permanently removed from the // network. A node may be ejected for either: - // * committing one protocol felony - // * committing a series of protocol misdemeanours - Ejected bool - StakingPubKey crypto.PublicKey - NetworkPubKey crypto.PublicKey + // * request self-ejection to protect its stake in case the node operator suspects + // the node's keys to be compromised + // * committing a serious protocol violation or multiple smaller misdemeanours + Ejected bool +} + +// Identity is combined from static and dynamic part and represents the full public identity of one network participant (node). +type Identity struct { + IdentitySkeleton + DynamicIdentity } // ParseIdentity parses a string representation of an identity. @@ -82,10 +96,15 @@ func ParseIdentity(identity string) (*Identity, error) { // create the identity iy := Identity{ - NodeID: nodeID, - Address: address, - Role: role, - Weight: weight, + IdentitySkeleton: IdentitySkeleton{ + NodeID: nodeID, + Address: address, + Role: role, + InitialWeight: weight, + }, + DynamicIdentity: DynamicIdentity{ + Weight: weight, + }, } return &iy, nil @@ -563,7 +582,7 @@ func (il IdentityList) Exists(target *Identity) bool { // target: value to search for // CAUTION: The identity list MUST be sorted prior to calling this method func (il IdentityList) IdentifierExists(target Identifier) bool { - _, ok := slices.BinarySearchFunc(il, &Identity{NodeID: target}, func(a, b *Identity) int { + _, ok := slices.BinarySearchFunc(il, &Identity{IdentitySkeleton: IdentitySkeleton{NodeID: target}}, func(a, b *Identity) int { return bytes.Compare(a.NodeID[:], b.NodeID[:]) }) return ok From 81ca0c8dc3828a2c3a3ea0d31e31fae46738825c Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Mon, 3 Jul 2023 20:11:46 +0300 Subject: [PATCH 02/46] Fixed usages of Identity --- model/bootstrap/node_info.go | 18 ++- model/convert/service_event.go | 11 +- utils/unittest/fixtures.go | 16 ++- utils/unittest/service_events_fixtures.go | 150 ++++++++++++++-------- 4 files changed, 127 insertions(+), 68 deletions(-) diff --git a/model/bootstrap/node_info.go b/model/bootstrap/node_info.go index cdc6f855c4a..6dce969478c 100644 --- a/model/bootstrap/node_info.go +++ b/model/bootstrap/node_info.go @@ -347,12 +347,18 @@ func (node NodeInfo) PartnerPublic() PartnerNodeInfoPub { // Identity returns the node info as a public Flow identity. func (node NodeInfo) Identity() *flow.Identity { identity := &flow.Identity{ - NodeID: node.NodeID, - Address: node.Address, - Role: node.Role, - Weight: node.Weight, - StakingPubKey: node.StakingPubKey(), - NetworkPubKey: node.NetworkPubKey(), + IdentitySkeleton: flow.IdentitySkeleton{ + NodeID: node.NodeID, + Address: node.Address, + Role: node.Role, + InitialWeight: node.Weight, + StakingPubKey: node.stakingPubKey, + NetworkPubKey: node.networkPubKey, + }, + DynamicIdentity: flow.DynamicIdentity{ + Weight: node.Weight, + Ejected: false, + }, } return identity } diff --git a/model/convert/service_event.go b/model/convert/service_event.go index b3e9902e1e4..4db354eaf24 100644 --- a/model/convert/service_event.go +++ b/model/convert/service_event.go @@ -589,9 +589,14 @@ func convertParticipants(cdcParticipants []cadence.Value) (flow.IdentityList, er } identity := &flow.Identity{ - Address: string(address), - Weight: uint64(initialWeight), - Role: flow.Role(role), + IdentitySkeleton: flow.IdentitySkeleton{ + InitialWeight: uint64(initialWeight), + Address: string(address), + Role: flow.Role(role), + }, + DynamicIdentity: flow.DynamicIdentity{ + Weight: uint64(initialWeight), + }, } // convert nodeID string into identifier diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index 884bdd5abf7..eeeedf0ecc5 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -1055,11 +1055,17 @@ func IdentityFixture(opts ...func(*flow.Identity)) *flow.Identity { nodeID := IdentifierFixture() stakingKey := StakingPrivKeyByIdentifier(nodeID) identity := flow.Identity{ - NodeID: nodeID, - Address: fmt.Sprintf("address-%x", nodeID[0:7]), - Role: flow.RoleConsensus, - Weight: 1000, - StakingPubKey: stakingKey.PublicKey(), + IdentitySkeleton: flow.IdentitySkeleton{ + NodeID: nodeID, + Address: fmt.Sprintf("address-%x", nodeID[0:7]), + Role: flow.RoleConsensus, + InitialWeight: 1000, + StakingPubKey: stakingKey.PublicKey(), + }, + DynamicIdentity: flow.DynamicIdentity{ + Weight: 1000, + Ejected: false, + }, } for _, apply := range opts { apply(&identity) diff --git a/utils/unittest/service_events_fixtures.go b/utils/unittest/service_events_fixtures.go index 8ae5a5b6a62..56337e8588d 100644 --- a/utils/unittest/service_events_fixtures.go +++ b/utils/unittest/service_events_fixtures.go @@ -49,60 +49,102 @@ func EpochSetupFixtureByChainID(chain flow.ChainID) (flow.Event, *flow.EpochSetu }, Participants: flow.IdentityList{ { - Role: flow.RoleCollection, - NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000001"), - Address: "1.flow.com", - NetworkPubKey: MustDecodePublicKeyHex(crypto.ECDSAP256, "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), - StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), - Weight: 100, - }, - { - Role: flow.RoleCollection, - NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000002"), - Address: "2.flow.com", - NetworkPubKey: MustDecodePublicKeyHex(crypto.ECDSAP256, "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), - StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), - Weight: 100, - }, - { - Role: flow.RoleCollection, - NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000003"), - Address: "3.flow.com", - NetworkPubKey: MustDecodePublicKeyHex(crypto.ECDSAP256, "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), - StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), - Weight: 100, - }, - { - Role: flow.RoleCollection, - NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000004"), - Address: "4.flow.com", - NetworkPubKey: MustDecodePublicKeyHex(crypto.ECDSAP256, "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), - StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), - Weight: 100, - }, - { - Role: flow.RoleConsensus, - NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000011"), - Address: "11.flow.com", - NetworkPubKey: MustDecodePublicKeyHex(crypto.ECDSAP256, "cfdfe8e4362c8f79d11772cb7277ab16e5033a63e8dd5d34caf1b041b77e5b2d63c2072260949ccf8907486e4cfc733c8c42ca0e4e208f30470b0d950856cd47"), - StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "8207559cd7136af378bba53a8f0196dee3849a3ab02897c1995c3e3f6ca0c4a776c3ae869d1ddbb473090054be2400ad06d7910aa2c5d1780220fdf3765a3c1764bce10c6fe66a5a2be51a422e878518bd750424bb56b8a0ecf0f8ad2057e83f"), - Weight: 100, - }, - { - Role: flow.RoleExecution, - NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000021"), - Address: "21.flow.com", - NetworkPubKey: MustDecodePublicKeyHex(crypto.ECDSAP256, "d64318ba0dbf68f3788fc81c41d507c5822bf53154530673127c66f50fe4469ccf1a054a868a9f88506a8999f2386d86fcd2b901779718cba4fb53c2da258f9e"), - StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "880b162b7ec138b36af401d07868cb08d25746d905395edbb4625bdf105d4bb2b2f4b0f4ae273a296a6efefa7ce9ccb914e39947ce0e83745125cab05d62516076ff0173ed472d3791ccef937597c9ea12381d76f547a092a4981d77ff3fba83"), - Weight: 100, - }, - { - Role: flow.RoleVerification, - NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000031"), - Address: "31.flow.com", - NetworkPubKey: MustDecodePublicKeyHex(crypto.ECDSAP256, "697241208dcc9142b6f53064adc8ff1c95760c68beb2ba083c1d005d40181fd7a1b113274e0163c053a3addd47cd528ec6a1f190cf465aac87c415feaae011ae"), - StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "b1f97d0a06020eca97352e1adde72270ee713c7daf58da7e74bf72235321048b4841bdfc28227964bf18e371e266e32107d238358848bcc5d0977a0db4bda0b4c33d3874ff991e595e0f537c7b87b4ddce92038ebc7b295c9ea20a1492302aa7"), - Weight: 100, + flow.IdentitySkeleton{ + Role: flow.RoleCollection, + NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000001"), + Address: "1.flow.com", + NetworkPubKey: MustDecodePublicKeyHex(crypto.ECDSAP256, "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), + StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), + InitialWeight: 100, + }, + flow.DynamicIdentity{ + Weight: 100, + Ejected: false, + }, + }, + { + flow.IdentitySkeleton{ + Role: flow.RoleCollection, + NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000002"), + Address: "2.flow.com", + NetworkPubKey: MustDecodePublicKeyHex(crypto.ECDSAP256, "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), + StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), + InitialWeight: 100, + }, + flow.DynamicIdentity{ + Weight: 100, + Ejected: false, + }, + }, + { + flow.IdentitySkeleton{ + Role: flow.RoleCollection, + NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000003"), + Address: "3.flow.com", + NetworkPubKey: MustDecodePublicKeyHex(crypto.ECDSAP256, "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), + StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), + InitialWeight: 100, + }, + flow.DynamicIdentity{ + Weight: 100, + Ejected: false, + }, + }, + { + flow.IdentitySkeleton{ + Role: flow.RoleCollection, + NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000004"), + Address: "4.flow.com", + NetworkPubKey: MustDecodePublicKeyHex(crypto.ECDSAP256, "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), + StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), + InitialWeight: 100, + }, + flow.DynamicIdentity{ + Weight: 100, + Ejected: false, + }, + }, + { + flow.IdentitySkeleton{ + Role: flow.RoleConsensus, + NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000011"), + Address: "11.flow.com", + NetworkPubKey: MustDecodePublicKeyHex(crypto.ECDSAP256, "cfdfe8e4362c8f79d11772cb7277ab16e5033a63e8dd5d34caf1b041b77e5b2d63c2072260949ccf8907486e4cfc733c8c42ca0e4e208f30470b0d950856cd47"), + StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "8207559cd7136af378bba53a8f0196dee3849a3ab02897c1995c3e3f6ca0c4a776c3ae869d1ddbb473090054be2400ad06d7910aa2c5d1780220fdf3765a3c1764bce10c6fe66a5a2be51a422e878518bd750424bb56b8a0ecf0f8ad2057e83f"), + InitialWeight: 100, + }, + flow.DynamicIdentity{ + Weight: 100, + Ejected: false, + }, + }, + { + flow.IdentitySkeleton{ + Role: flow.RoleExecution, + NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000021"), + Address: "21.flow.com", + NetworkPubKey: MustDecodePublicKeyHex(crypto.ECDSAP256, "d64318ba0dbf68f3788fc81c41d507c5822bf53154530673127c66f50fe4469ccf1a054a868a9f88506a8999f2386d86fcd2b901779718cba4fb53c2da258f9e"), + StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "880b162b7ec138b36af401d07868cb08d25746d905395edbb4625bdf105d4bb2b2f4b0f4ae273a296a6efefa7ce9ccb914e39947ce0e83745125cab05d62516076ff0173ed472d3791ccef937597c9ea12381d76f547a092a4981d77ff3fba83"), + InitialWeight: 100, + }, + flow.DynamicIdentity{ + Weight: 100, + Ejected: false, + }, + }, + { + flow.IdentitySkeleton{ + Role: flow.RoleVerification, + NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000031"), + Address: "31.flow.com", + NetworkPubKey: MustDecodePublicKeyHex(crypto.ECDSAP256, "697241208dcc9142b6f53064adc8ff1c95760c68beb2ba083c1d005d40181fd7a1b113274e0163c053a3addd47cd528ec6a1f190cf465aac87c415feaae011ae"), + StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "b1f97d0a06020eca97352e1adde72270ee713c7daf58da7e74bf72235321048b4841bdfc28227964bf18e371e266e32107d238358848bcc5d0977a0db4bda0b4c33d3874ff991e595e0f537c7b87b4ddce92038ebc7b295c9ea20a1492302aa7"), + InitialWeight: 100, + }, + flow.DynamicIdentity{ + Weight: 100, + Ejected: false, + }, }, }, } From c19603d1bfeb55a03af3c3b9d6d5ca2e94a6fa8f Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Tue, 4 Jul 2023 13:55:18 +0300 Subject: [PATCH 03/46] Updated usages of flow.Identity to use flow.IdentitySkeleton. Fixed tests. Part 1 --- cmd/bootstrap/run/cluster_qc.go | 2 +- cmd/bootstrap/run/qc.go | 2 +- .../transit/cmd/generate_root_block_vote.go | 4 +- cmd/observer/node_builder/observer_builder.go | 16 +-- cmd/scaffold.go | 2 +- .../export_report.json | 4 +- .../timeout_processor_test.go | 2 +- .../verification/combined_signer_v2_test.go | 6 +- .../verification/combined_signer_v3_test.go | 6 +- .../verification/staking_signer_test.go | 4 +- .../combined_vote_processor_v2_test.go | 4 +- .../combined_vote_processor_v3_test.go | 4 +- .../staking_vote_processor_test.go | 2 +- consensus/integration/nodes_test.go | 2 +- engine/access/apiproxy/access_api_proxy.go | 10 +- .../access/apiproxy/access_api_proxy_test.go | 10 +- engine/testutil/nodes.go | 4 +- follower/consensus_follower.go | 10 +- follower/follower_builder.go | 12 +- model/convert/fixtures_test.go | 126 ++++++++++++------ model/flow/identity.go | 6 + model/flow/identity_test.go | 74 ++++++---- module/local/me.go | 4 +- module/local/me_nokey.go | 4 +- module/local/me_test.go | 4 +- module/upstream/upstream_connector.go | 6 +- network/internal/p2putils/utils.go | 2 +- network/p2p/utils/p2putils.go | 4 +- 28 files changed, 206 insertions(+), 130 deletions(-) diff --git a/cmd/bootstrap/run/cluster_qc.go b/cmd/bootstrap/run/cluster_qc.go index fa91e5cc4f8..a55b7920708 100644 --- a/cmd/bootstrap/run/cluster_qc.go +++ b/cmd/bootstrap/run/cluster_qc.go @@ -80,7 +80,7 @@ func createRootBlockVotes(participants []bootstrap.NodeInfo, rootBlock *model.Bl if err != nil { return nil, fmt.Errorf("could not retrieve private keys for participant: %w", err) } - me, err := local.New(participant.Identity(), keys.StakingKey) + me, err := local.New(participant.Identity().IdentitySkeleton, keys.StakingKey) if err != nil { return nil, err } diff --git a/cmd/bootstrap/run/qc.go b/cmd/bootstrap/run/qc.go index c07879eb446..3facb38f442 100644 --- a/cmd/bootstrap/run/qc.go +++ b/cmd/bootstrap/run/qc.go @@ -120,7 +120,7 @@ func GenerateRootBlockVotes(block *flow.Block, participantData *ParticipantData) if err != nil { return nil, fmt.Errorf("could not get private keys for participant: %w", err) } - me, err := local.New(p.Identity(), keys.StakingKey) + me, err := local.New(p.Identity().IdentitySkeleton, keys.StakingKey) if err != nil { return nil, err } diff --git a/cmd/bootstrap/transit/cmd/generate_root_block_vote.go b/cmd/bootstrap/transit/cmd/generate_root_block_vote.go index 89702a388fa..562edc67372 100644 --- a/cmd/bootstrap/transit/cmd/generate_root_block_vote.go +++ b/cmd/bootstrap/transit/cmd/generate_root_block_vote.go @@ -60,11 +60,11 @@ func generateVote(c *cobra.Command, args []string) { } stakingPrivKey := nodeInfo.StakingPrivKey.PrivateKey - identity := &flow.Identity{ + identity := flow.IdentitySkeleton{ NodeID: nodeID, Address: nodeInfo.Address, Role: nodeInfo.Role, - Weight: flow.DefaultInitialWeight, + InitialWeight: flow.DefaultInitialWeight, StakingPubKey: stakingPrivKey.PublicKey(), NetworkPubKey: nodeInfo.NetworkPrivKey.PrivateKey.PublicKey(), } diff --git a/cmd/observer/node_builder/observer_builder.go b/cmd/observer/node_builder/observer_builder.go index c0a6d62b2b4..bd307cf38c8 100644 --- a/cmd/observer/node_builder/observer_builder.go +++ b/cmd/observer/node_builder/observer_builder.go @@ -95,7 +95,7 @@ type ObserverServiceConfig struct { bootstrapNodeAddresses []string bootstrapNodePublicKeys []string observerNetworkingKeyPath string - bootstrapIdentities flow.IdentityList // the identity list of bootstrap peers the node uses to discover other nodes + bootstrapIdentities flow.IdentitySkeletonList // the identity list of bootstrap peers the node uses to discover other nodes apiRatelimits map[string]int apiBurstlimits map[string]int rpcConf rpc.Config @@ -103,7 +103,7 @@ type ObserverServiceConfig struct { apiTimeout time.Duration upstreamNodeAddresses []string upstreamNodePublicKeys []string - upstreamIdentities flow.IdentityList // the identity list of upstream peers the node uses to forward API requests to + upstreamIdentities flow.IdentitySkeletonList // the identity list of upstream peers the node uses to forward API requests to } // DefaultObserverServiceConfig defines all the default values for the ObserverServiceConfig @@ -202,7 +202,7 @@ func (builder *ObserverServiceBuilder) deriveUpstreamIdentities() error { return fmt.Errorf("number of addresses and keys provided for the boostrap nodes don't match") } - ids := make([]*flow.Identity, len(addresses)) + ids := make(flow.IdentitySkeletonList, len(addresses)) for i, address := range addresses { key := keys[i] @@ -214,7 +214,7 @@ func (builder *ObserverServiceBuilder) deriveUpstreamIdentities() error { } // create the identity of the peer by setting only the relevant fields - ids[i] = &flow.Identity{ + ids[i] = &flow.IdentitySkeleton{ NodeID: flow.ZeroID, // the NodeID is the hash of the staking key and for the public network it does not apply Address: address, Role: flow.RoleAccess, // the upstream node has to be an access node @@ -516,12 +516,12 @@ func publicNetworkMsgValidators(log zerolog.Logger, idProvider module.IdentityPr // BootstrapIdentities converts the bootstrap node addresses and keys to a Flow Identity list where // each Flow Identity is initialized with the passed address, the networking key // and the Node ID set to ZeroID, role set to Access, 0 stake and no staking key. -func BootstrapIdentities(addresses []string, keys []string) (flow.IdentityList, error) { +func BootstrapIdentities(addresses []string, keys []string) (flow.IdentitySkeletonList, error) { if len(addresses) != len(keys) { return nil, fmt.Errorf("number of addresses and keys provided for the boostrap nodes don't match") } - ids := make([]*flow.Identity, len(addresses)) + ids := make(flow.IdentitySkeletonList, len(addresses)) for i, address := range addresses { bytes, err := hex.DecodeString(keys[i]) if err != nil { @@ -534,7 +534,7 @@ func BootstrapIdentities(addresses []string, keys []string) (flow.IdentityList, } // create the identity of the peer by setting only the relevant fields - ids[i] = &flow.Identity{ + ids[i] = &flow.IdentitySkeleton{ NodeID: flow.ZeroID, // the NodeID is the hash of the staking key and for the public network it does not apply Address: address, Role: flow.RoleAccess, // the upstream node has to be an access node @@ -759,7 +759,7 @@ func (builder *ObserverServiceBuilder) initPublicLibp2pNode(networkKey crypto.Pr func (builder *ObserverServiceBuilder) initObserverLocal() func(node *cmd.NodeConfig) error { return func(node *cmd.NodeConfig) error { // for an observer, set the identity here explicitly since it will not be found in the protocol state - self := &flow.Identity{ + self := flow.IdentitySkeleton{ NodeID: node.NodeID, NetworkPubKey: node.NetworkKey.PublicKey(), StakingPubKey: nil, // no staking key needed for the observer diff --git a/cmd/scaffold.go b/cmd/scaffold.go index bacce0e22ab..adeb9a56d87 100644 --- a/cmd/scaffold.go +++ b/cmd/scaffold.go @@ -1258,7 +1258,7 @@ func (fnb *FlowNodeBuilder) initLocal() error { return fmt.Errorf("configured staking key does not match protocol state") } - fnb.Me, err = local.New(self, fnb.StakingKey) + fnb.Me, err = local.New(self.IdentitySkeleton, fnb.StakingKey) if err != nil { return fmt.Errorf("could not initialize local: %w", err) } diff --git a/cmd/util/cmd/execution-state-extract/export_report.json b/cmd/util/cmd/execution-state-extract/export_report.json index 2cbadb698d0..f470ddf155f 100644 --- a/cmd/util/cmd/execution-state-extract/export_report.json +++ b/cmd/util/cmd/execution-state-extract/export_report.json @@ -1,6 +1,6 @@ { "EpochCounter": 0, - "PreviousStateCommitment": "18eb0e8beef7ce851e552ecd29c813fde0a9e6f0c5614d7615642076602a48cf", - "CurrentStateCommitment": "18eb0e8beef7ce851e552ecd29c813fde0a9e6f0c5614d7615642076602a48cf", + "PreviousStateCommitment": "aca1a4c335b1525a5f8f768a5408ca3fbc86a87817ed1f3cfa44f303743846b7", + "CurrentStateCommitment": "aca1a4c335b1525a5f8f768a5408ca3fbc86a87817ed1f3cfa44f303743846b7", "ReportSucceeded": true } \ No newline at end of file diff --git a/consensus/hotstuff/timeoutcollector/timeout_processor_test.go b/consensus/hotstuff/timeoutcollector/timeout_processor_test.go index b37188c5857..3ca024cd84c 100644 --- a/consensus/hotstuff/timeoutcollector/timeout_processor_test.go +++ b/consensus/hotstuff/timeoutcollector/timeout_processor_test.go @@ -466,7 +466,7 @@ func TestTimeoutProcessor_BuildVerifyTC(t *testing.T) { stakingPriv := unittest.StakingPrivKeyFixture() identity.StakingPubKey = stakingPriv.PublicKey() - me, err := local.New(identity, stakingPriv) + me, err := local.New(identity.IdentitySkeleton, stakingPriv) require.NoError(t, err) signers[identity.NodeID] = verification.NewStakingSigner(me) diff --git a/consensus/hotstuff/verification/combined_signer_v2_test.go b/consensus/hotstuff/verification/combined_signer_v2_test.go index 6947a12acf1..f5fbac9bb0d 100644 --- a/consensus/hotstuff/verification/combined_signer_v2_test.go +++ b/consensus/hotstuff/verification/combined_signer_v2_test.go @@ -44,7 +44,7 @@ func TestCombinedSignWithBeaconKey(t *testing.T) { nodeID.NodeID = signerID nodeID.StakingPubKey = stakingPriv.PublicKey() - me, err := local.New(nodeID, stakingPriv) + me, err := local.New(nodeID.IdentitySkeleton, stakingPriv) require.NoError(t, err) signer := NewCombinedSigner(me, beaconKeyStore) @@ -137,7 +137,7 @@ func TestCombinedSignWithNoBeaconKey(t *testing.T) { nodeID.NodeID = signerID nodeID.StakingPubKey = stakingPriv.PublicKey() - me, err := local.New(nodeID, stakingPriv) + me, err := local.New(nodeID.IdentitySkeleton, stakingPriv) require.NoError(t, err) signer := NewCombinedSigner(me, beaconKeyStore) @@ -218,7 +218,7 @@ func TestCombinedSign_BeaconKeyStore_ViewForUnknownEpoch(t *testing.T) { nodeID := unittest.IdentityFixture() nodeID.StakingPubKey = stakingPriv.PublicKey() - me, err := local.New(nodeID, stakingPriv) + me, err := local.New(nodeID.IdentitySkeleton, stakingPriv) require.NoError(t, err) signer := NewCombinedSigner(me, beaconKeyStore) diff --git a/consensus/hotstuff/verification/combined_signer_v3_test.go b/consensus/hotstuff/verification/combined_signer_v3_test.go index 1a59d6d047a..4afd14dcb85 100644 --- a/consensus/hotstuff/verification/combined_signer_v3_test.go +++ b/consensus/hotstuff/verification/combined_signer_v3_test.go @@ -44,7 +44,7 @@ func TestCombinedSignWithBeaconKeyV3(t *testing.T) { nodeID.NodeID = signerID nodeID.StakingPubKey = stakingPriv.PublicKey() - me, err := local.New(nodeID, stakingPriv) + me, err := local.New(nodeID.IdentitySkeleton, stakingPriv) require.NoError(t, err) signer := NewCombinedSignerV3(me, beaconKeyStore) @@ -104,7 +104,7 @@ func TestCombinedSignWithNoBeaconKeyV3(t *testing.T) { nodeID.NodeID = signerID nodeID.StakingPubKey = stakingPriv.PublicKey() - me, err := local.New(nodeID, stakingPriv) + me, err := local.New(nodeID.IdentitySkeleton, stakingPriv) require.NoError(t, err) signer := NewCombinedSignerV3(me, beaconKeyStore) @@ -290,7 +290,7 @@ func TestCombinedSign_BeaconKeyStore_ViewForUnknownEpochv3(t *testing.T) { nodeID := unittest.IdentityFixture() nodeID.StakingPubKey = stakingPriv.PublicKey() - me, err := local.New(nodeID, stakingPriv) + me, err := local.New(nodeID.IdentitySkeleton, stakingPriv) require.NoError(t, err) signer := NewCombinedSigner(me, beaconKeyStore) diff --git a/consensus/hotstuff/verification/staking_signer_test.go b/consensus/hotstuff/verification/staking_signer_test.go index fc563266f92..33ead119498 100644 --- a/consensus/hotstuff/verification/staking_signer_test.go +++ b/consensus/hotstuff/verification/staking_signer_test.go @@ -47,7 +47,7 @@ func TestStakingSigner_CreateProposal(t *testing.T) { require.Nil(t, proposal) }) t.Run("created-proposal", func(t *testing.T) { - me, err := local.New(signer, stakingPriv) + me, err := local.New(signer.IdentitySkeleton, stakingPriv) require.NoError(t, err) signerIdentity := unittest.IdentityFixture(unittest.WithNodeID(signerID), @@ -88,7 +88,7 @@ func TestStakingSigner_CreateVote(t *testing.T) { require.Nil(t, proposal) }) t.Run("created-vote", func(t *testing.T) { - me, err := local.New(signer, stakingPriv) + me, err := local.New(signer.IdentitySkeleton, stakingPriv) require.NoError(t, err) signerIdentity := unittest.IdentityFixture(unittest.WithNodeID(signerID), diff --git a/consensus/hotstuff/votecollector/combined_vote_processor_v2_test.go b/consensus/hotstuff/votecollector/combined_vote_processor_v2_test.go index 1c005388d40..926f5fe8946 100644 --- a/consensus/hotstuff/votecollector/combined_vote_processor_v2_test.go +++ b/consensus/hotstuff/votecollector/combined_vote_processor_v2_test.go @@ -816,7 +816,7 @@ func TestCombinedVoteProcessorV2_BuildVerifyQC(t *testing.T) { beaconSignerStore := hsig.NewEpochAwareRandomBeaconKeyStore(epochLookup, keys) - me, err := local.New(identity, stakingPriv) + me, err := local.New(identity.IdentitySkeleton, stakingPriv) require.NoError(t, err) signers[identity.NodeID] = verification.NewCombinedSigner(me, beaconSignerStore) @@ -838,7 +838,7 @@ func TestCombinedVoteProcessorV2_BuildVerifyQC(t *testing.T) { beaconSignerStore := hsig.NewEpochAwareRandomBeaconKeyStore(epochLookup, keys) - me, err := local.New(identity, stakingPriv) + me, err := local.New(identity.IdentitySkeleton, stakingPriv) require.NoError(t, err) signers[identity.NodeID] = verification.NewCombinedSigner(me, beaconSignerStore) diff --git a/consensus/hotstuff/votecollector/combined_vote_processor_v3_test.go b/consensus/hotstuff/votecollector/combined_vote_processor_v3_test.go index 831a68e1650..cca64d1b667 100644 --- a/consensus/hotstuff/votecollector/combined_vote_processor_v3_test.go +++ b/consensus/hotstuff/votecollector/combined_vote_processor_v3_test.go @@ -952,7 +952,7 @@ func TestCombinedVoteProcessorV3_BuildVerifyQC(t *testing.T) { beaconSignerStore := hsig.NewEpochAwareRandomBeaconKeyStore(epochLookup, keys) - me, err := local.New(identity, stakingPriv) + me, err := local.New(identity.IdentitySkeleton, stakingPriv) require.NoError(t, err) signers[identity.NodeID] = verification.NewCombinedSignerV3(me, beaconSignerStore) @@ -974,7 +974,7 @@ func TestCombinedVoteProcessorV3_BuildVerifyQC(t *testing.T) { beaconSignerStore := hsig.NewEpochAwareRandomBeaconKeyStore(epochLookup, keys) - me, err := local.New(identity, stakingPriv) + me, err := local.New(identity.IdentitySkeleton, stakingPriv) require.NoError(t, err) signers[identity.NodeID] = verification.NewCombinedSignerV3(me, beaconSignerStore) diff --git a/consensus/hotstuff/votecollector/staking_vote_processor_test.go b/consensus/hotstuff/votecollector/staking_vote_processor_test.go index b6efe8f93c4..c463f221ffb 100644 --- a/consensus/hotstuff/votecollector/staking_vote_processor_test.go +++ b/consensus/hotstuff/votecollector/staking_vote_processor_test.go @@ -260,7 +260,7 @@ func TestStakingVoteProcessorV2_BuildVerifyQC(t *testing.T) { stakingPriv := unittest.StakingPrivKeyFixture() identity.StakingPubKey = stakingPriv.PublicKey() - me, err := local.New(identity, stakingPriv) + me, err := local.New(identity.IdentitySkeleton, stakingPriv) require.NoError(t, err) signers[identity.NodeID] = verification.NewStakingSigner(me) diff --git a/consensus/integration/nodes_test.go b/consensus/integration/nodes_test.go index 79fc57a56f1..ced8ee7d111 100644 --- a/consensus/integration/nodes_test.go +++ b/consensus/integration/nodes_test.go @@ -444,7 +444,7 @@ func createNode( require.NoError(t, err) // make local - me, err := local.New(identity, privateKeys.StakingKey) + me, err := local.New(identity.IdentitySkeleton, privateKeys.StakingKey) require.NoError(t, err) // add a network for this node to the hub diff --git a/engine/access/apiproxy/access_api_proxy.go b/engine/access/apiproxy/access_api_proxy.go index d72ec5bb5e2..126246b9947 100644 --- a/engine/access/apiproxy/access_api_proxy.go +++ b/engine/access/apiproxy/access_api_proxy.go @@ -294,14 +294,14 @@ func (h *FlowAccessAPIRouter) GetExecutionResultForBlockID(context context.Conte type FlowAccessAPIForwarder struct { lock sync.Mutex roundRobin int - ids flow.IdentityList + ids flow.IdentitySkeletonList upstream []access.AccessAPIClient connections []*grpc.ClientConn timeout time.Duration maxMsgSize uint } -func NewFlowAccessAPIForwarder(identities flow.IdentityList, timeout time.Duration, maxMsgSize uint) (*FlowAccessAPIForwarder, error) { +func NewFlowAccessAPIForwarder(identities flow.IdentitySkeletonList, timeout time.Duration, maxMsgSize uint) (*FlowAccessAPIForwarder, error) { forwarder := &FlowAccessAPIForwarder{maxMsgSize: maxMsgSize} err := forwarder.setFlowAccessAPI(identities, timeout) return forwarder, err @@ -311,11 +311,11 @@ func NewFlowAccessAPIForwarder(identities flow.IdentityList, timeout time.Durati // It is used by Observer services, Blockchain Data Service, etc. // Make sure that this is just for observation and not a staked participant in the flow network. // This means that observers see a copy of the data but there is no interaction to ensure integrity from the root block. -func (ret *FlowAccessAPIForwarder) setFlowAccessAPI(accessNodeAddressAndPort flow.IdentityList, timeout time.Duration) error { +func (ret *FlowAccessAPIForwarder) setFlowAccessAPI(accessNodeAddressAndPort flow.IdentitySkeletonList, timeout time.Duration) error { ret.timeout = timeout ret.ids = accessNodeAddressAndPort - ret.upstream = make([]access.AccessAPIClient, accessNodeAddressAndPort.Count()) - ret.connections = make([]*grpc.ClientConn, accessNodeAddressAndPort.Count()) + ret.upstream = make([]access.AccessAPIClient, len(accessNodeAddressAndPort)) + ret.connections = make([]*grpc.ClientConn, len(accessNodeAddressAndPort)) for i, identity := range accessNodeAddressAndPort { // Store the faultTolerantClient setup parameters such as address, public, key and timeout, so that // we can refresh the API on connection loss diff --git a/engine/access/apiproxy/access_api_proxy_test.go b/engine/access/apiproxy/access_api_proxy_test.go index 9f5a5aa74b8..34a15d633f1 100644 --- a/engine/access/apiproxy/access_api_proxy_test.go +++ b/engine/access/apiproxy/access_api_proxy_test.go @@ -135,7 +135,10 @@ func TestNewFlowCachedAccessAPIProxy(t *testing.T) { } // Prepare a proxy that fails due to the second connection being idle - l := flow.IdentityList{{Address: unittest.IPPort("11634")}, {Address: unittest.IPPort("11635")}} + l := flow.IdentitySkeletonList{ + {Address: unittest.IPPort("11634")}, + {Address: unittest.IPPort("11635")}, + } c := FlowAccessAPIForwarder{} err = c.setFlowAccessAPI(l, time.Second) if err == nil { @@ -151,7 +154,10 @@ func TestNewFlowCachedAccessAPIProxy(t *testing.T) { background := context.Background() // Prepare a proxy - l = flow.IdentityList{{Address: unittest.IPPort("11634")}, {Address: unittest.IPPort("11635")}} + l = flow.IdentitySkeletonList{ + {Address: unittest.IPPort("11634")}, + {Address: unittest.IPPort("11635")}, + } c = FlowAccessAPIForwarder{} err = c.setFlowAccessAPI(l, time.Second) if err != nil { diff --git a/engine/testutil/nodes.go b/engine/testutil/nodes.go index fc5df47f25d..5121bf6fc42 100644 --- a/engine/testutil/nodes.go +++ b/engine/testutil/nodes.go @@ -216,7 +216,7 @@ func LocalFixture(t testing.TB, identity *flow.Identity) module.Local { // sets staking public key of the node identity.StakingPubKey = sk.PublicKey() - me, err := local.New(identity, sk) + me, err := local.New(identity.IdentitySkeleton, sk) require.NoError(t, err) return me @@ -284,7 +284,7 @@ func CollectionNode(t *testing.T, hub *stub.Hub, identity bootstrap.NodeInfo, ro node := GenericNode(t, hub, identity.Identity(), rootSnapshot) privKeys, err := identity.PrivateKeys() require.NoError(t, err) - node.Me, err = local.New(identity.Identity(), privKeys.StakingKey) + node.Me, err = local.New(identity.Identity().IdentitySkeleton, privKeys.StakingKey) require.NoError(t, err) pools := epochs.NewTransactionPools( diff --git a/follower/consensus_follower.go b/follower/consensus_follower.go index 56863bcf530..c4df88bc082 100644 --- a/follower/consensus_follower.go +++ b/follower/consensus_follower.go @@ -107,10 +107,12 @@ func bootstrapIdentities(bootstrapNodes []BootstrapNodeInfo) flow.IdentityList { ids := make(flow.IdentityList, len(bootstrapNodes)) for i, b := range bootstrapNodes { ids[i] = &flow.Identity{ - Role: flow.RoleAccess, - NetworkPubKey: b.NetworkPublicKey, - Address: fmt.Sprintf("%s:%d", b.Host, b.Port), - StakingPubKey: nil, + IdentitySkeleton: flow.IdentitySkeleton{ + Role: flow.RoleAccess, + NetworkPubKey: b.NetworkPublicKey, + Address: fmt.Sprintf("%s:%d", b.Host, b.Port), + StakingPubKey: nil, + }, } } return ids diff --git a/follower/follower_builder.go b/follower/follower_builder.go index 1650fa15799..a91a2a4453d 100644 --- a/follower/follower_builder.go +++ b/follower/follower_builder.go @@ -442,10 +442,12 @@ func BootstrapIdentities(addresses []string, keys []string) (flow.IdentityList, // create the identity of the peer by setting only the relevant fields ids[i] = &flow.Identity{ - NodeID: flow.ZeroID, // the NodeID is the hash of the staking key and for the public network it does not apply - Address: address, - Role: flow.RoleAccess, // the upstream node has to be an access node - NetworkPubKey: networkKey, + IdentitySkeleton: flow.IdentitySkeleton{ + NodeID: flow.ZeroID, // the NodeID is the hash of the staking key and for the public network it does not apply + Address: address, + Role: flow.RoleAccess, // the upstream node has to be an access node + NetworkPubKey: networkKey, + }, } } return ids, nil @@ -660,7 +662,7 @@ func (builder *FollowerServiceBuilder) initPublicLibp2pNode(networkKey crypto.Pr func (builder *FollowerServiceBuilder) initObserverLocal() func(node *cmd.NodeConfig) error { return func(node *cmd.NodeConfig) error { // for an observer, set the identity here explicitly since it will not be found in the protocol state - self := &flow.Identity{ + self := flow.IdentitySkeleton{ NodeID: node.NodeID, NetworkPubKey: node.NetworkKey.PublicKey(), StakingPubKey: nil, // no staking key needed for the observer diff --git a/model/convert/fixtures_test.go b/model/convert/fixtures_test.go index 5c99d8709ee..cc3dc6996e3 100644 --- a/model/convert/fixtures_test.go +++ b/model/convert/fixtures_test.go @@ -47,60 +47,102 @@ func EpochSetupFixture(chain flow.ChainID) (flow.Event, *flow.EpochSetup) { }, Participants: flow.IdentityList{ { - Role: flow.RoleCollection, - NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000001"), - Address: "1.flow.com", - NetworkPubKey: unittest.MustDecodePublicKeyHex(crypto.ECDSAP256, "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), - StakingPubKey: unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), - Weight: 100, + flow.IdentitySkeleton{ + Role: flow.RoleCollection, + NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000001"), + Address: "1.flow.com", + NetworkPubKey: unittest.MustDecodePublicKeyHex(crypto.ECDSAP256, "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), + StakingPubKey: unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), + InitialWeight: 100, + }, + flow.DynamicIdentity{ + Weight: 100, + Ejected: false, + }, }, { - Role: flow.RoleCollection, - NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000002"), - Address: "2.flow.com", - NetworkPubKey: unittest.MustDecodePublicKeyHex(crypto.ECDSAP256, "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), - StakingPubKey: unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), - Weight: 100, + flow.IdentitySkeleton{ + Role: flow.RoleCollection, + NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000002"), + Address: "2.flow.com", + NetworkPubKey: unittest.MustDecodePublicKeyHex(crypto.ECDSAP256, "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), + StakingPubKey: unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), + InitialWeight: 100, + }, + flow.DynamicIdentity{ + Weight: 100, + Ejected: false, + }, }, { - Role: flow.RoleCollection, - NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000003"), - Address: "3.flow.com", - NetworkPubKey: unittest.MustDecodePublicKeyHex(crypto.ECDSAP256, "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), - StakingPubKey: unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), - Weight: 100, + flow.IdentitySkeleton{ + Role: flow.RoleCollection, + NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000003"), + Address: "3.flow.com", + NetworkPubKey: unittest.MustDecodePublicKeyHex(crypto.ECDSAP256, "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), + StakingPubKey: unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), + InitialWeight: 100, + }, + flow.DynamicIdentity{ + Weight: 100, + Ejected: false, + }, }, { - Role: flow.RoleCollection, - NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000004"), - Address: "4.flow.com", - NetworkPubKey: unittest.MustDecodePublicKeyHex(crypto.ECDSAP256, "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), - StakingPubKey: unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), - Weight: 100, + flow.IdentitySkeleton{ + Role: flow.RoleCollection, + NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000004"), + Address: "4.flow.com", + NetworkPubKey: unittest.MustDecodePublicKeyHex(crypto.ECDSAP256, "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), + StakingPubKey: unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), + InitialWeight: 100, + }, + flow.DynamicIdentity{ + Weight: 100, + Ejected: false, + }, }, { - Role: flow.RoleConsensus, - NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000011"), - Address: "11.flow.com", - NetworkPubKey: unittest.MustDecodePublicKeyHex(crypto.ECDSAP256, "cfdfe8e4362c8f79d11772cb7277ab16e5033a63e8dd5d34caf1b041b77e5b2d63c2072260949ccf8907486e4cfc733c8c42ca0e4e208f30470b0d950856cd47"), - StakingPubKey: unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "8207559cd7136af378bba53a8f0196dee3849a3ab02897c1995c3e3f6ca0c4a776c3ae869d1ddbb473090054be2400ad06d7910aa2c5d1780220fdf3765a3c1764bce10c6fe66a5a2be51a422e878518bd750424bb56b8a0ecf0f8ad2057e83f"), - Weight: 100, + flow.IdentitySkeleton{ + Role: flow.RoleConsensus, + NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000011"), + Address: "11.flow.com", + NetworkPubKey: unittest.MustDecodePublicKeyHex(crypto.ECDSAP256, "cfdfe8e4362c8f79d11772cb7277ab16e5033a63e8dd5d34caf1b041b77e5b2d63c2072260949ccf8907486e4cfc733c8c42ca0e4e208f30470b0d950856cd47"), + StakingPubKey: unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "8207559cd7136af378bba53a8f0196dee3849a3ab02897c1995c3e3f6ca0c4a776c3ae869d1ddbb473090054be2400ad06d7910aa2c5d1780220fdf3765a3c1764bce10c6fe66a5a2be51a422e878518bd750424bb56b8a0ecf0f8ad2057e83f"), + InitialWeight: 100, + }, + flow.DynamicIdentity{ + Weight: 100, + Ejected: false, + }, }, { - Role: flow.RoleExecution, - NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000021"), - Address: "21.flow.com", - NetworkPubKey: unittest.MustDecodePublicKeyHex(crypto.ECDSAP256, "d64318ba0dbf68f3788fc81c41d507c5822bf53154530673127c66f50fe4469ccf1a054a868a9f88506a8999f2386d86fcd2b901779718cba4fb53c2da258f9e"), - StakingPubKey: unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "880b162b7ec138b36af401d07868cb08d25746d905395edbb4625bdf105d4bb2b2f4b0f4ae273a296a6efefa7ce9ccb914e39947ce0e83745125cab05d62516076ff0173ed472d3791ccef937597c9ea12381d76f547a092a4981d77ff3fba83"), - Weight: 100, + flow.IdentitySkeleton{ + Role: flow.RoleExecution, + NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000021"), + Address: "21.flow.com", + NetworkPubKey: unittest.MustDecodePublicKeyHex(crypto.ECDSAP256, "d64318ba0dbf68f3788fc81c41d507c5822bf53154530673127c66f50fe4469ccf1a054a868a9f88506a8999f2386d86fcd2b901779718cba4fb53c2da258f9e"), + StakingPubKey: unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "880b162b7ec138b36af401d07868cb08d25746d905395edbb4625bdf105d4bb2b2f4b0f4ae273a296a6efefa7ce9ccb914e39947ce0e83745125cab05d62516076ff0173ed472d3791ccef937597c9ea12381d76f547a092a4981d77ff3fba83"), + InitialWeight: 100, + }, + flow.DynamicIdentity{ + Weight: 100, + Ejected: false, + }, }, { - Role: flow.RoleVerification, - NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000031"), - Address: "31.flow.com", - NetworkPubKey: unittest.MustDecodePublicKeyHex(crypto.ECDSAP256, "697241208dcc9142b6f53064adc8ff1c95760c68beb2ba083c1d005d40181fd7a1b113274e0163c053a3addd47cd528ec6a1f190cf465aac87c415feaae011ae"), - StakingPubKey: unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "b1f97d0a06020eca97352e1adde72270ee713c7daf58da7e74bf72235321048b4841bdfc28227964bf18e371e266e32107d238358848bcc5d0977a0db4bda0b4c33d3874ff991e595e0f537c7b87b4ddce92038ebc7b295c9ea20a1492302aa7"), - Weight: 100, + flow.IdentitySkeleton{ + Role: flow.RoleVerification, + NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000031"), + Address: "31.flow.com", + NetworkPubKey: unittest.MustDecodePublicKeyHex(crypto.ECDSAP256, "697241208dcc9142b6f53064adc8ff1c95760c68beb2ba083c1d005d40181fd7a1b113274e0163c053a3addd47cd528ec6a1f190cf465aac87c415feaae011ae"), + StakingPubKey: unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "b1f97d0a06020eca97352e1adde72270ee713c7daf58da7e74bf72235321048b4841bdfc28227964bf18e371e266e32107d238358848bcc5d0977a0db4bda0b4c33d3874ff991e595e0f537c7b87b4ddce92038ebc7b295c9ea20a1492302aa7"), + InitialWeight: 100, + }, + flow.DynamicIdentity{ + Weight: 100, + Ejected: false, + }, }, }, } diff --git a/model/flow/identity.go b/model/flow/identity.go index b38c32c9b0b..2be78ef479a 100644 --- a/model/flow/identity.go +++ b/model/flow/identity.go @@ -279,6 +279,9 @@ func (iy *Identity) EqualTo(other *Identity) bool { if iy.Role != other.Role { return false } + if iy.InitialWeight != other.InitialWeight { + return false + } if iy.Weight != other.Weight { return false } @@ -314,6 +317,9 @@ type IdentityOrder func(*Identity, *Identity) bool // Identities are COPIED from the source slice. type IdentityMapFunc func(Identity) Identity +// IdentitySkeletonList is a list of nodes skeletons. +type IdentitySkeletonList []*IdentitySkeleton + // IdentityList is a list of nodes. type IdentityList []*Identity diff --git a/model/flow/identity_test.go b/model/flow/identity_test.go index 9c1a137d8ab..7f7a420b00a 100644 --- a/model/flow/identity_test.go +++ b/model/flow/identity_test.go @@ -258,56 +258,64 @@ func TestIdentity_EqualTo(t *testing.T) { }) t.Run("NodeID diff", func(t *testing.T) { - a := &flow.Identity{NodeID: [32]byte{1, 2, 3}} - b := &flow.Identity{NodeID: [32]byte{2, 2, 2}} + a := &flow.Identity{IdentitySkeleton: flow.IdentitySkeleton{NodeID: [32]byte{1, 2, 3}}} + b := &flow.Identity{IdentitySkeleton: flow.IdentitySkeleton{NodeID: [32]byte{2, 2, 2}}} require.False(t, a.EqualTo(b)) require.False(t, b.EqualTo(a)) }) t.Run("Address diff", func(t *testing.T) { - a := &flow.Identity{Address: "b"} - b := &flow.Identity{Address: "c"} + a := &flow.Identity{IdentitySkeleton: flow.IdentitySkeleton{Address: "b"}} + b := &flow.Identity{IdentitySkeleton: flow.IdentitySkeleton{Address: "c"}} require.False(t, a.EqualTo(b)) require.False(t, b.EqualTo(a)) }) t.Run("Role diff", func(t *testing.T) { - a := &flow.Identity{Role: flow.RoleCollection} - b := &flow.Identity{Role: flow.RoleExecution} + a := &flow.Identity{IdentitySkeleton: flow.IdentitySkeleton{Role: flow.RoleCollection}} + b := &flow.Identity{IdentitySkeleton: flow.IdentitySkeleton{Role: flow.RoleExecution}} + + require.False(t, a.EqualTo(b)) + require.False(t, b.EqualTo(a)) + }) + + t.Run("Initial weight diff", func(t *testing.T) { + a := &flow.Identity{IdentitySkeleton: flow.IdentitySkeleton{InitialWeight: 1}} + b := &flow.Identity{IdentitySkeleton: flow.IdentitySkeleton{InitialWeight: 2}} require.False(t, a.EqualTo(b)) require.False(t, b.EqualTo(a)) }) t.Run("Weight diff", func(t *testing.T) { - a := &flow.Identity{Weight: 1} - b := &flow.Identity{Weight: 2} + a := &flow.Identity{DynamicIdentity: flow.DynamicIdentity{Weight: 1}} + b := &flow.Identity{DynamicIdentity: flow.DynamicIdentity{Weight: 2}} require.False(t, a.EqualTo(b)) require.False(t, b.EqualTo(a)) }) t.Run("Ejected diff", func(t *testing.T) { - a := &flow.Identity{Ejected: true} - b := &flow.Identity{Ejected: false} + a := &flow.Identity{DynamicIdentity: flow.DynamicIdentity{Ejected: true}} + b := &flow.Identity{DynamicIdentity: flow.DynamicIdentity{Ejected: false}} require.False(t, a.EqualTo(b)) require.False(t, b.EqualTo(a)) }) t.Run("StakingPubKey diff", func(t *testing.T) { - a := &flow.Identity{StakingPubKey: pks[0]} - b := &flow.Identity{StakingPubKey: pks[1]} + a := &flow.Identity{IdentitySkeleton: flow.IdentitySkeleton{StakingPubKey: pks[0]}} + b := &flow.Identity{IdentitySkeleton: flow.IdentitySkeleton{StakingPubKey: pks[1]}} require.False(t, a.EqualTo(b)) require.False(t, b.EqualTo(a)) }) t.Run("NetworkPubKey diff", func(t *testing.T) { - a := &flow.Identity{NetworkPubKey: pks[0]} - b := &flow.Identity{NetworkPubKey: pks[1]} + a := &flow.Identity{IdentitySkeleton: flow.IdentitySkeleton{NetworkPubKey: pks[0]}} + b := &flow.Identity{IdentitySkeleton: flow.IdentitySkeleton{NetworkPubKey: pks[1]}} require.False(t, a.EqualTo(b)) require.False(t, b.EqualTo(a)) @@ -315,22 +323,32 @@ func TestIdentity_EqualTo(t *testing.T) { t.Run("Same data equals", func(t *testing.T) { a := &flow.Identity{ - NodeID: flow.Identifier{1, 2, 3}, - Address: "address", - Role: flow.RoleCollection, - Weight: 23, - Ejected: false, - StakingPubKey: pks[0], - NetworkPubKey: pks[1], + IdentitySkeleton: flow.IdentitySkeleton{ + NodeID: flow.Identifier{1, 2, 3}, + Address: "address", + Role: flow.RoleCollection, + InitialWeight: 23, + StakingPubKey: pks[0], + NetworkPubKey: pks[1], + }, + DynamicIdentity: flow.DynamicIdentity{ + Weight: 23, + Ejected: false, + }, } b := &flow.Identity{ - NodeID: flow.Identifier{1, 2, 3}, - Address: "address", - Role: flow.RoleCollection, - Weight: 23, - Ejected: false, - StakingPubKey: pks[0], - NetworkPubKey: pks[1], + IdentitySkeleton: flow.IdentitySkeleton{ + NodeID: flow.Identifier{1, 2, 3}, + Address: "address", + Role: flow.RoleCollection, + InitialWeight: 23, + StakingPubKey: pks[0], + NetworkPubKey: pks[1], + }, + DynamicIdentity: flow.DynamicIdentity{ + Weight: 23, + Ejected: false, + }, } require.True(t, a.EqualTo(b)) diff --git a/module/local/me.go b/module/local/me.go index 6a2f1ce117a..6353cdab846 100644 --- a/module/local/me.go +++ b/module/local/me.go @@ -12,11 +12,11 @@ import ( ) type Local struct { - me *flow.Identity + me flow.IdentitySkeleton sk crypto.PrivateKey // instance of the node's private staking key } -func New(id *flow.Identity, sk crypto.PrivateKey) (*Local, error) { +func New(id flow.IdentitySkeleton, sk crypto.PrivateKey) (*Local, error) { if !sk.PublicKey().Equals(id.StakingPubKey) { return nil, fmt.Errorf("cannot initialize with mismatching keys, expect %v, but got %v", id.StakingPubKey, sk.PublicKey()) diff --git a/module/local/me_nokey.go b/module/local/me_nokey.go index d9de4348dc1..dc9ab729186 100644 --- a/module/local/me_nokey.go +++ b/module/local/me_nokey.go @@ -10,10 +10,10 @@ import ( ) type LocalNoKey struct { - me *flow.Identity + me flow.IdentitySkeleton } -func NewNoKey(id *flow.Identity) (*LocalNoKey, error) { +func NewNoKey(id flow.IdentitySkeleton) (*LocalNoKey, error) { l := &LocalNoKey{ me: id, } diff --git a/module/local/me_test.go b/module/local/me_test.go index 42e46ae8c2f..825f1e9aa03 100644 --- a/module/local/me_test.go +++ b/module/local/me_test.go @@ -15,7 +15,7 @@ func TestInitializeWithMatchingKey(t *testing.T) { nodeID := unittest.IdentityFixture() nodeID.StakingPubKey = stakingPriv.PublicKey() - me, err := New(nodeID, stakingPriv) + me, err := New(nodeID.IdentitySkeleton, stakingPriv) require.NoError(t, err) require.Equal(t, nodeID.NodeID, me.NodeID()) } @@ -29,6 +29,6 @@ func TestInitializeWithMisMatchingKey(t *testing.T) { nodeID := unittest.IdentityFixture() nodeID.StakingPubKey = badPriv.PublicKey() - _, err := New(nodeID, stakingPriv) + _, err := New(nodeID.IdentitySkeleton, stakingPriv) require.Error(t, err) } diff --git a/module/upstream/upstream_connector.go b/module/upstream/upstream_connector.go index 36eb362e4f2..2ad57713be8 100644 --- a/module/upstream/upstream_connector.go +++ b/module/upstream/upstream_connector.go @@ -20,7 +20,7 @@ import ( // upstreamConnector tries to connect the unstaked AN with atleast one of the configured bootstrap access nodes type upstreamConnector struct { lm *lifecycle.LifecycleManager - bootstrapIdentities flow.IdentityList + bootstrapIdentities flow.IdentitySkeletonList logger zerolog.Logger unstakedNode p2p.LibP2PNode cancel context.CancelFunc @@ -28,7 +28,7 @@ type upstreamConnector struct { maxRetries uint64 } -func NewUpstreamConnector(bootstrapIdentities flow.IdentityList, unstakedNode p2p.LibP2PNode, logger zerolog.Logger) *upstreamConnector { +func NewUpstreamConnector(bootstrapIdentities flow.IdentitySkeletonList, unstakedNode p2p.LibP2PNode, logger zerolog.Logger) *upstreamConnector { return &upstreamConnector{ lm: lifecycle.NewLifecycleManager(), bootstrapIdentities: bootstrapIdentities, @@ -86,7 +86,7 @@ func (connector *upstreamConnector) Ready() <-chan struct{} { } // connect is run to connect to an boostrap peer -func (connector *upstreamConnector) connect(ctx context.Context, bootstrapPeer flow.Identity) error { +func (connector *upstreamConnector) connect(ctx context.Context, bootstrapPeer flow.IdentitySkeleton) error { select { // check for a cancelled/expired context diff --git a/network/internal/p2putils/utils.go b/network/internal/p2putils/utils.go index 2415ca5b4c8..00dd13c2e4e 100644 --- a/network/internal/p2putils/utils.go +++ b/network/internal/p2putils/utils.go @@ -117,7 +117,7 @@ func FilterStream(host host.Host, targetID peer.ID, protocol core.ProtocolID, di } // NetworkingInfo returns ip, port, libp2p public key of the identity. -func NetworkingInfo(identity flow.Identity) (string, string, crypto.PubKey, error) { +func NetworkingInfo(identity flow.IdentitySkeleton) (string, string, crypto.PubKey, error) { // split the node address into ip and port ip, port, err := net.SplitHostPort(identity.Address) if err != nil { diff --git a/network/p2p/utils/p2putils.go b/network/p2p/utils/p2putils.go index 552aa5c99a6..05b52ce52fc 100644 --- a/network/p2p/utils/p2putils.go +++ b/network/p2p/utils/p2putils.go @@ -19,7 +19,7 @@ import ( // flow.Identity ---> peer.AddrInfo // |-- Address ---> |-- []multiaddr.Multiaddr // |-- NetworkPublicKey ---> |-- ID -func PeerAddressInfo(identity flow.Identity) (peer.AddrInfo, error) { +func PeerAddressInfo(identity flow.IdentitySkeleton) (peer.AddrInfo, error) { ip, port, key, err := p2putils.NetworkingInfo(identity) if err != nil { return peer.AddrInfo{}, fmt.Errorf("could not translate identity to networking info %s: %w", identity.NodeID.String(), err) @@ -46,7 +46,7 @@ func PeerInfosFromIDs(ids flow.IdentityList) ([]peer.AddrInfo, map[flow.Identifi validIDs := make([]peer.AddrInfo, 0, len(ids)) invalidIDs := make(map[flow.Identifier]error) for _, id := range ids { - peerInfo, err := PeerAddressInfo(*id) + peerInfo, err := PeerAddressInfo(id.IdentitySkeleton) if err != nil { invalidIDs[id.NodeID] = err continue From 0f333f3850cae709075cd3ad1f55db22819fba7a Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Tue, 4 Jul 2023 15:30:51 +0300 Subject: [PATCH 04/46] Fixed compilation issues in tests --- .../export_report.json | 4 ++-- follower/consensus_follower.go | 16 ++++++-------- follower/follower_builder.go | 22 +++++++++---------- module/dkg/broker_test.go | 2 +- network/internal/p2pfixtures/fixtures.go | 2 +- network/p2p/connection/connManager_test.go | 2 +- network/p2p/p2pnode/libp2pNode_test.go | 14 ++++++------ network/p2p/p2pnode/libp2pStream_test.go | 16 +++++++------- network/p2p/p2pnode/libp2pUtils_test.go | 4 ++-- network/p2p/test/fixtures.go | 2 +- network/p2p/test/sporking_test.go | 6 ++--- network/p2p/test/topic_validator_test.go | 22 +++++++++---------- 12 files changed, 54 insertions(+), 58 deletions(-) diff --git a/cmd/util/cmd/execution-state-extract/export_report.json b/cmd/util/cmd/execution-state-extract/export_report.json index f470ddf155f..9b8d0b03753 100644 --- a/cmd/util/cmd/execution-state-extract/export_report.json +++ b/cmd/util/cmd/execution-state-extract/export_report.json @@ -1,6 +1,6 @@ { "EpochCounter": 0, - "PreviousStateCommitment": "aca1a4c335b1525a5f8f768a5408ca3fbc86a87817ed1f3cfa44f303743846b7", - "CurrentStateCommitment": "aca1a4c335b1525a5f8f768a5408ca3fbc86a87817ed1f3cfa44f303743846b7", + "PreviousStateCommitment": "829fd1ec06c9a40e5ceba9cff537e30806332aba6065da21a11556136d171c98", + "CurrentStateCommitment": "829fd1ec06c9a40e5ceba9cff537e30806332aba6065da21a11556136d171c98", "ReportSucceeded": true } \ No newline at end of file diff --git a/follower/consensus_follower.go b/follower/consensus_follower.go index c4df88bc082..c6f02cd5503 100644 --- a/follower/consensus_follower.go +++ b/follower/consensus_follower.go @@ -103,16 +103,14 @@ type BootstrapNodeInfo struct { NetworkPublicKey crypto.PublicKey // the network public key of the bootstrap peer } -func bootstrapIdentities(bootstrapNodes []BootstrapNodeInfo) flow.IdentityList { - ids := make(flow.IdentityList, len(bootstrapNodes)) +func bootstrapIdentities(bootstrapNodes []BootstrapNodeInfo) flow.IdentitySkeletonList { + ids := make(flow.IdentitySkeletonList, len(bootstrapNodes)) for i, b := range bootstrapNodes { - ids[i] = &flow.Identity{ - IdentitySkeleton: flow.IdentitySkeleton{ - Role: flow.RoleAccess, - NetworkPubKey: b.NetworkPublicKey, - Address: fmt.Sprintf("%s:%d", b.Host, b.Port), - StakingPubKey: nil, - }, + ids[i] = &flow.IdentitySkeleton{ + Role: flow.RoleAccess, + NetworkPubKey: b.NetworkPublicKey, + Address: fmt.Sprintf("%s:%d", b.Host, b.Port), + StakingPubKey: nil, } } return ids diff --git a/follower/follower_builder.go b/follower/follower_builder.go index a91a2a4453d..e7d874c4795 100644 --- a/follower/follower_builder.go +++ b/follower/follower_builder.go @@ -86,8 +86,8 @@ import ( type FollowerServiceConfig struct { bootstrapNodeAddresses []string bootstrapNodePublicKeys []string - bootstrapIdentities flow.IdentityList // the identity list of bootstrap peers the node uses to discover other nodes - NetworkKey crypto.PrivateKey // the networking key passed in by the caller when being used as a library + bootstrapIdentities flow.IdentitySkeletonList // the identity list of bootstrap peers the node uses to discover other nodes + NetworkKey crypto.PrivateKey // the networking key passed in by the caller when being used as a library baseOptions []cmd.Option } @@ -325,7 +325,7 @@ func (builder *FollowerServiceBuilder) BuildConsensusFollower() cmd.NodeBuilder type FollowerOption func(*FollowerServiceConfig) -func WithBootStrapPeers(bootstrapNodes ...*flow.Identity) FollowerOption { +func WithBootStrapPeers(bootstrapNodes ...*flow.IdentitySkeleton) FollowerOption { return func(config *FollowerServiceConfig) { config.bootstrapIdentities = bootstrapNodes } @@ -417,13 +417,13 @@ func publicNetworkMsgValidators(log zerolog.Logger, idProvider module.IdentityPr // BootstrapIdentities converts the bootstrap node addresses and keys to a Flow Identity list where // each Flow Identity is initialized with the passed address, the networking key // and the Node ID set to ZeroID, role set to Access, 0 stake and no staking key. -func BootstrapIdentities(addresses []string, keys []string) (flow.IdentityList, error) { +func BootstrapIdentities(addresses []string, keys []string) (flow.IdentitySkeletonList, error) { if len(addresses) != len(keys) { return nil, fmt.Errorf("number of addresses and keys provided for the boostrap nodes don't match") } - ids := make([]*flow.Identity, len(addresses)) + ids := make(flow.IdentitySkeletonList, len(addresses)) for i, address := range addresses { key := keys[i] @@ -441,13 +441,11 @@ func BootstrapIdentities(addresses []string, keys []string) (flow.IdentityList, } // create the identity of the peer by setting only the relevant fields - ids[i] = &flow.Identity{ - IdentitySkeleton: flow.IdentitySkeleton{ - NodeID: flow.ZeroID, // the NodeID is the hash of the staking key and for the public network it does not apply - Address: address, - Role: flow.RoleAccess, // the upstream node has to be an access node - NetworkPubKey: networkKey, - }, + ids[i] = &flow.IdentitySkeleton{ + NodeID: flow.ZeroID, // the NodeID is the hash of the staking key and for the public network it does not apply + Address: address, + Role: flow.RoleAccess, // the upstream node has to be an access node + NetworkPubKey: networkKey, } } return ids, nil diff --git a/module/dkg/broker_test.go b/module/dkg/broker_test.go index 85b744a913d..38c2e048a93 100644 --- a/module/dkg/broker_test.go +++ b/module/dkg/broker_test.go @@ -32,7 +32,7 @@ func initCommittee(n int) (identities flow.IdentityList, locals []module.Local) for i, key := range privateStakingKeys { id := unittest.IdentityFixture(unittest.WithStakingPubKey(key.PublicKey())) identities = append(identities, id) - local, _ := local.New(id, privateStakingKeys[i]) + local, _ := local.New(id.IdentitySkeleton, privateStakingKeys[i]) locals = append(locals, local) } return identities, locals diff --git a/network/internal/p2pfixtures/fixtures.go b/network/internal/p2pfixtures/fixtures.go index de942fe83dd..2a95eb7f6d2 100644 --- a/network/internal/p2pfixtures/fixtures.go +++ b/network/internal/p2pfixtures/fixtures.go @@ -229,7 +229,7 @@ func AddNodesToEachOthersPeerStore(t *testing.T, nodes []p2p.LibP2PNode, ids flo if node == other { continue } - otherPInfo, err := utils.PeerAddressInfo(*ids[i]) + otherPInfo, err := utils.PeerAddressInfo(ids[i].IdentitySkeleton) require.NoError(t, err) node.Host().Peerstore().AddAddrs(otherPInfo.ID, otherPInfo.Addrs, peerstore.AddressTTL) } diff --git a/network/p2p/connection/connManager_test.go b/network/p2p/connection/connManager_test.go index a9c66179969..e8d79a3f6d0 100644 --- a/network/p2p/connection/connManager_test.go +++ b/network/p2p/connection/connManager_test.go @@ -93,7 +93,7 @@ func testSequence(t *testing.T, sequence []fun, connMgr *connection.ConnManager) func generatePeerInfo(t *testing.T) peer.ID { key := p2pfixtures.NetworkingKeyFixtures(t) identity := unittest.IdentityFixture(unittest.WithNetworkingKey(key.PublicKey()), unittest.WithAddress("1.1.1.1:0")) - pInfo, err := utils.PeerAddressInfo(*identity) + pInfo, err := utils.PeerAddressInfo(identity.IdentitySkeleton) require.NoError(t, err) return pInfo.ID } diff --git a/network/p2p/p2pnode/libp2pNode_test.go b/network/p2p/p2pnode/libp2pNode_test.go index 3644bd3dbf2..68384c12140 100644 --- a/network/p2p/p2pnode/libp2pNode_test.go +++ b/network/p2p/p2pnode/libp2pNode_test.go @@ -58,7 +58,7 @@ func TestMultiAddress(t *testing.T) { } for _, tc := range tt { - ip, port, _, err := p2putils.NetworkingInfo(*tc.identity) + ip, port, _, err := p2putils.NetworkingInfo(tc.identity.IdentitySkeleton) require.NoError(t, err) actualAddress := utils.MultiAddressStr(ip, port) @@ -97,12 +97,12 @@ func TestGetPeerInfo(t *testing.T) { identity := unittest.IdentityFixture(unittest.WithNetworkingKey(key.PublicKey()), unittest.WithAddress("1.1.1.1:0")) // translates node-i address into info - info, err := utils.PeerAddressInfo(*identity) + info, err := utils.PeerAddressInfo(identity.IdentitySkeleton) require.NoError(t, err) // repeats the translation for node-i for j := 0; j < 10; j++ { - rinfo, err := utils.PeerAddressInfo(*identity) + rinfo, err := utils.PeerAddressInfo(identity.IdentitySkeleton) require.NoError(t, err) assert.Equal(t, rinfo.String(), info.String(), "inconsistent id generated") } @@ -122,7 +122,7 @@ func TestAddPeers(t *testing.T) { // add the remaining nodes to the first node as its set of peers for _, identity := range identities[1:] { - peerInfo, err := utils.PeerAddressInfo(*identity) + peerInfo, err := utils.PeerAddressInfo(identity.IdentitySkeleton) require.NoError(t, err) require.NoError(t, nodes[0].AddPeer(ctx, peerInfo)) } @@ -184,7 +184,7 @@ func TestConnGater(t *testing.T) { p2ptest.StartNode(t, signalerCtx, node1, 100*time.Millisecond) defer p2ptest.StopNode(t, node1, cancel, 100*time.Millisecond) - node1Info, err := utils.PeerAddressInfo(identity1) + node1Info, err := utils.PeerAddressInfo(identity1.IdentitySkeleton) assert.NoError(t, err) node2Peers := unittest.NewProtectedMap[peer.ID, struct{}]() @@ -205,7 +205,7 @@ func TestConnGater(t *testing.T) { p2ptest.StartNode(t, signalerCtx, node2, 100*time.Millisecond) defer p2ptest.StopNode(t, node2, cancel, 100*time.Millisecond) - node2Info, err := utils.PeerAddressInfo(identity2) + node2Info, err := utils.PeerAddressInfo(identity2.IdentitySkeleton) assert.NoError(t, err) node1.Host().Peerstore().AddAddrs(node2Info.ID, node2Info.Addrs, peerstore.PermanentAddrTTL) @@ -460,7 +460,7 @@ func createConcurrentStreams(t *testing.T, ctx context.Context, nodes []p2p.LibP continue } - pInfo, err := utils.PeerAddressInfo(*ids[i]) + pInfo, err := utils.PeerAddressInfo(ids[i].IdentitySkeleton) require.NoError(t, err) this.Host().Peerstore().AddAddrs(pInfo.ID, pInfo.Addrs, peerstore.AddressTTL) diff --git a/network/p2p/p2pnode/libp2pStream_test.go b/network/p2p/p2pnode/libp2pStream_test.go index e3b7bf281b3..05645cce61f 100644 --- a/network/p2p/p2pnode/libp2pStream_test.go +++ b/network/p2p/p2pnode/libp2pStream_test.go @@ -57,7 +57,7 @@ func TestStreamClosing(t *testing.T) { p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond) - nodeInfo1, err := utils.PeerAddressInfo(*identities[1]) + nodeInfo1, err := utils.PeerAddressInfo(identities[1].IdentitySkeleton) require.NoError(t, err) senderWG := sync.WaitGroup{} @@ -176,7 +176,7 @@ func testCreateStream(t *testing.T, sporkId flow.Identifier, unicasts []protocol streamCount := 100 var streams []network.Stream for i := 0; i < streamCount; i++ { - pInfo, err := utils.PeerAddressInfo(*id2) + pInfo, err := utils.PeerAddressInfo(id2.IdentitySkeleton) require.NoError(t, err) nodes[0].Host().Peerstore().AddAddrs(pInfo.ID, pInfo.Addrs, peerstore.AddressTTL) anotherStream, err := nodes[0].CreateStream(ctx, pInfo.ID) @@ -243,7 +243,7 @@ func TestCreateStream_FallBack(t *testing.T) { streamCount := 100 var streams []network.Stream for i := 0; i < streamCount; i++ { - pInfo, err := utils.PeerAddressInfo(otherId) + pInfo, err := utils.PeerAddressInfo(otherId.IdentitySkeleton) require.NoError(t, err) thisNode.Host().Peerstore().AddAddrs(pInfo.ID, pInfo.Addrs, peerstore.AddressTTL) @@ -296,7 +296,7 @@ func TestCreateStreamIsConcurrencySafe(t *testing.T) { p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond) - nodeInfo1, err := utils.PeerAddressInfo(*identities[1]) + nodeInfo1, err := utils.PeerAddressInfo(identities[1].IdentitySkeleton) require.NoError(t, err) wg := sync.WaitGroup{} @@ -359,7 +359,7 @@ func TestNoBackoffWhenCreatingStream(t *testing.T) { defer p2ptest.StopNode(t, node1, cancel1, 100*time.Millisecond) id2 := identities[1] - pInfo, err := utils.PeerAddressInfo(*id2) + pInfo, err := utils.PeerAddressInfo(id2.IdentitySkeleton) require.NoError(t, err) nodes[0].Host().Peerstore().AddAddrs(pInfo.ID, pInfo.Addrs, peerstore.AddressTTL) maxTimeToWait := p2pnode.MaxConnectAttempt * unicast.MaxRetryJitter * time.Millisecond @@ -519,7 +519,7 @@ func TestCreateStreamTimeoutWithUnresponsiveNode(t *testing.T) { require.NoError(t, listener.Close()) }() - silentNodeInfo, err := utils.PeerAddressInfo(silentNodeId) + silentNodeInfo, err := utils.PeerAddressInfo(silentNodeId.IdentitySkeleton) require.NoError(t, err) timeout := 1 * time.Second @@ -558,7 +558,7 @@ func TestCreateStreamIsConcurrent(t *testing.T) { p2ptest.StartNodes(t, signalerCtx, goodNodes, 100*time.Millisecond) defer p2ptest.StopNodes(t, goodNodes, cancel, 100*time.Millisecond) - goodNodeInfo1, err := utils.PeerAddressInfo(*goodNodeIds[1]) + goodNodeInfo1, err := utils.PeerAddressInfo(goodNodeIds[1].IdentitySkeleton) require.NoError(t, err) // create a silent node which never replies @@ -566,7 +566,7 @@ func TestCreateStreamIsConcurrent(t *testing.T) { defer func() { require.NoError(t, listener.Close()) }() - silentNodeInfo, err := utils.PeerAddressInfo(silentNodeId) + silentNodeInfo, err := utils.PeerAddressInfo(silentNodeId.IdentitySkeleton) require.NoError(t, err) // creates a stream to unresponsive node and makes sure that the stream creation is blocked diff --git a/network/p2p/p2pnode/libp2pUtils_test.go b/network/p2p/p2pnode/libp2pUtils_test.go index 7d4d676c66d..a7de1d181ac 100644 --- a/network/p2p/p2pnode/libp2pUtils_test.go +++ b/network/p2p/p2pnode/libp2pUtils_test.go @@ -31,7 +31,7 @@ func TestLibP2PUtilsTestSuite(t *testing.T) { func (ts *LibP2PUtilsTestSuite) TestPeerInfoFromID() { ids, exceptedPeerInfos := idsAndPeerInfos(ts.T()) for i, id := range ids { - actualAddrInfo, err := utils.PeerAddressInfo(*id) + actualAddrInfo, err := utils.PeerAddressInfo(id.IdentitySkeleton) assert.NoError(ts.T(), err) assert.Equal(ts.T(), exceptedPeerInfos[i].String(), actualAddrInfo.String()) } @@ -85,6 +85,6 @@ func BenchmarkPeerInfoFromID(b *testing.B) { id.Address = "1.1.1.1:3569" b.StartTimer() for n := 0; n < b.N; n++ { - _, _ = utils.PeerAddressInfo(*id) + _, _ = utils.PeerAddressInfo(id.IdentitySkeleton) } } diff --git a/network/p2p/test/fixtures.go b/network/p2p/test/fixtures.go index d2d8ebfac9c..366800cb1ec 100644 --- a/network/p2p/test/fixtures.go +++ b/network/p2p/test/fixtures.go @@ -401,7 +401,7 @@ func LetNodesDiscoverEachOther(t *testing.T, ctx context.Context, nodes []p2p.Li if node == other { continue } - otherPInfo, err := utils.PeerAddressInfo(*ids[i]) + otherPInfo, err := utils.PeerAddressInfo(ids[i].IdentitySkeleton) require.NoError(t, err) require.NoError(t, node.AddPeer(ctx, otherPInfo)) } diff --git a/network/p2p/test/sporking_test.go b/network/p2p/test/sporking_test.go index bee29c54aed..6488247bf07 100644 --- a/network/p2p/test/sporking_test.go +++ b/network/p2p/test/sporking_test.go @@ -84,7 +84,7 @@ func TestCrosstalkPreventionOnNetworkKeyChange(t *testing.T) { p2ptest.StartNode(t, signalerCtx2, node2, 100*time.Millisecond) - peerInfo2, err := utils.PeerAddressInfo(id2) + peerInfo2, err := utils.PeerAddressInfo(id2.IdentitySkeleton) require.NoError(t, err) // create stream from node 1 to node 2 @@ -144,7 +144,7 @@ func TestOneToOneCrosstalkPrevention(t *testing.T) { p2ptest.StartNode(t, signalerCtx1, node1, 100*time.Millisecond) defer p2ptest.StopNode(t, node1, cancel1, 100*time.Millisecond) - peerInfo1, err := utils.PeerAddressInfo(id1) + peerInfo1, err := utils.PeerAddressInfo(id1.IdentitySkeleton) require.NoError(t, err) // create and start node 2 on localhost and random port @@ -216,7 +216,7 @@ func TestOneToKCrosstalkPrevention(t *testing.T) { p2ptest.StartNode(t, signalerCtx2, node2, 100*time.Millisecond) defer p2ptest.StopNode(t, node2, cancel2, 100*time.Millisecond) - pInfo2, err := utils.PeerAddressInfo(id2) + pInfo2, err := utils.PeerAddressInfo(id2.IdentitySkeleton) require.NoError(t, err) // spork topic is derived by suffixing the channel with the root block ID diff --git a/network/p2p/test/topic_validator_test.go b/network/p2p/test/topic_validator_test.go index b6f0dfe7ba5..0a4f854b6ab 100644 --- a/network/p2p/test/topic_validator_test.go +++ b/network/p2p/test/topic_validator_test.go @@ -68,7 +68,7 @@ func TestTopicValidator_Unstaked(t *testing.T) { return nil } - pInfo2, err := utils.PeerAddressInfo(identity2) + pInfo2, err := utils.PeerAddressInfo(identity2.IdentitySkeleton) require.NoError(t, err) // node1 is connected to node2 @@ -123,7 +123,7 @@ func TestTopicValidator_PublicChannel(t *testing.T) { channel := channels.PublicSyncCommittee topic := channels.TopicFromChannel(channel, sporkId) - pInfo2, err := utils.PeerAddressInfo(identity2) + pInfo2, err := utils.PeerAddressInfo(identity2.IdentitySkeleton) require.NoError(t, err) // node1 is connected to node2 @@ -183,7 +183,7 @@ func TestTopicValidator_TopicMismatch(t *testing.T) { channel := channels.ConsensusCommittee topic := channels.TopicFromChannel(channel, sporkId) - pInfo2, err := utils.PeerAddressInfo(identity2) + pInfo2, err := utils.PeerAddressInfo(identity2.IdentitySkeleton) require.NoError(t, err) // node1 is connected to node2 @@ -235,7 +235,7 @@ func TestTopicValidator_InvalidTopic(t *testing.T) { topic := channels.Topic("invalid-topic") - pInfo2, err := utils.PeerAddressInfo(identity2) + pInfo2, err := utils.PeerAddressInfo(identity2.IdentitySkeleton) require.NoError(t, err) // node1 is connected to node2 @@ -307,10 +307,10 @@ func TestAuthorizedSenderValidator_Unauthorized(t *testing.T) { authorizedSenderValidator := validator.NewAuthorizedSenderValidator(logger, violationsConsumer, getIdentity) pubsubMessageValidator := authorizedSenderValidator.PubSubMessageValidator(channel) - pInfo1, err := utils.PeerAddressInfo(identity1) + pInfo1, err := utils.PeerAddressInfo(identity1.IdentitySkeleton) require.NoError(t, err) - pInfo2, err := utils.PeerAddressInfo(identity2) + pInfo2, err := utils.PeerAddressInfo(identity2.IdentitySkeleton) require.NoError(t, err) // node1 is connected to node2, and the an1 is connected to node1 @@ -416,7 +416,7 @@ func TestAuthorizedSenderValidator_InvalidMsg(t *testing.T) { authorizedSenderValidator := validator.NewAuthorizedSenderValidator(logger, violationsConsumer, getIdentity) pubsubMessageValidator := authorizedSenderValidator.PubSubMessageValidator(channel) - pInfo2, err := utils.PeerAddressInfo(identity2) + pInfo2, err := utils.PeerAddressInfo(identity2.IdentitySkeleton) require.NoError(t, err) // node1 is connected to node2 @@ -489,10 +489,10 @@ func TestAuthorizedSenderValidator_Ejected(t *testing.T) { authorizedSenderValidator := validator.NewAuthorizedSenderValidator(logger, violationsConsumer, getIdentity) pubsubMessageValidator := authorizedSenderValidator.PubSubMessageValidator(channel) - pInfo1, err := utils.PeerAddressInfo(identity1) + pInfo1, err := utils.PeerAddressInfo(identity1.IdentitySkeleton) require.NoError(t, err) - pInfo2, err := utils.PeerAddressInfo(identity2) + pInfo2, err := utils.PeerAddressInfo(identity2.IdentitySkeleton) require.NoError(t, err) // node1 is connected to node2, and the an1 is connected to node1 @@ -584,10 +584,10 @@ func TestAuthorizedSenderValidator_ClusterChannel(t *testing.T) { authorizedSenderValidator := validator.NewAuthorizedSenderValidator(logger, violationsConsumer, getIdentity) pubsubMessageValidator := authorizedSenderValidator.PubSubMessageValidator(channel) - pInfo1, err := utils.PeerAddressInfo(identity1) + pInfo1, err := utils.PeerAddressInfo(identity1.IdentitySkeleton) require.NoError(t, err) - pInfo2, err := utils.PeerAddressInfo(identity2) + pInfo2, err := utils.PeerAddressInfo(identity2.IdentitySkeleton) require.NoError(t, err) // ln3 <-> sn1 <-> sn2 From cff04e26021d92a9e6f152eb62cd3fb4fcd04b85 Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Tue, 4 Jul 2023 15:35:35 +0300 Subject: [PATCH 05/46] Linted --- model/convert/fixtures_test.go | 28 +++++++++++------------ utils/unittest/service_events_fixtures.go | 28 +++++++++++------------ 2 files changed, 28 insertions(+), 28 deletions(-) diff --git a/model/convert/fixtures_test.go b/model/convert/fixtures_test.go index cc3dc6996e3..56c32ec93e3 100644 --- a/model/convert/fixtures_test.go +++ b/model/convert/fixtures_test.go @@ -47,7 +47,7 @@ func EpochSetupFixture(chain flow.ChainID) (flow.Event, *flow.EpochSetup) { }, Participants: flow.IdentityList{ { - flow.IdentitySkeleton{ + IdentitySkeleton: flow.IdentitySkeleton{ Role: flow.RoleCollection, NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000001"), Address: "1.flow.com", @@ -55,13 +55,13 @@ func EpochSetupFixture(chain flow.ChainID) (flow.Event, *flow.EpochSetup) { StakingPubKey: unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), InitialWeight: 100, }, - flow.DynamicIdentity{ + DynamicIdentity: flow.DynamicIdentity{ Weight: 100, Ejected: false, }, }, { - flow.IdentitySkeleton{ + IdentitySkeleton: flow.IdentitySkeleton{ Role: flow.RoleCollection, NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000002"), Address: "2.flow.com", @@ -69,13 +69,13 @@ func EpochSetupFixture(chain flow.ChainID) (flow.Event, *flow.EpochSetup) { StakingPubKey: unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), InitialWeight: 100, }, - flow.DynamicIdentity{ + DynamicIdentity: flow.DynamicIdentity{ Weight: 100, Ejected: false, }, }, { - flow.IdentitySkeleton{ + IdentitySkeleton: flow.IdentitySkeleton{ Role: flow.RoleCollection, NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000003"), Address: "3.flow.com", @@ -83,13 +83,13 @@ func EpochSetupFixture(chain flow.ChainID) (flow.Event, *flow.EpochSetup) { StakingPubKey: unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), InitialWeight: 100, }, - flow.DynamicIdentity{ + DynamicIdentity: flow.DynamicIdentity{ Weight: 100, Ejected: false, }, }, { - flow.IdentitySkeleton{ + IdentitySkeleton: flow.IdentitySkeleton{ Role: flow.RoleCollection, NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000004"), Address: "4.flow.com", @@ -97,13 +97,13 @@ func EpochSetupFixture(chain flow.ChainID) (flow.Event, *flow.EpochSetup) { StakingPubKey: unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), InitialWeight: 100, }, - flow.DynamicIdentity{ + DynamicIdentity: flow.DynamicIdentity{ Weight: 100, Ejected: false, }, }, { - flow.IdentitySkeleton{ + IdentitySkeleton: flow.IdentitySkeleton{ Role: flow.RoleConsensus, NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000011"), Address: "11.flow.com", @@ -111,13 +111,13 @@ func EpochSetupFixture(chain flow.ChainID) (flow.Event, *flow.EpochSetup) { StakingPubKey: unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "8207559cd7136af378bba53a8f0196dee3849a3ab02897c1995c3e3f6ca0c4a776c3ae869d1ddbb473090054be2400ad06d7910aa2c5d1780220fdf3765a3c1764bce10c6fe66a5a2be51a422e878518bd750424bb56b8a0ecf0f8ad2057e83f"), InitialWeight: 100, }, - flow.DynamicIdentity{ + DynamicIdentity: flow.DynamicIdentity{ Weight: 100, Ejected: false, }, }, { - flow.IdentitySkeleton{ + IdentitySkeleton: flow.IdentitySkeleton{ Role: flow.RoleExecution, NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000021"), Address: "21.flow.com", @@ -125,13 +125,13 @@ func EpochSetupFixture(chain flow.ChainID) (flow.Event, *flow.EpochSetup) { StakingPubKey: unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "880b162b7ec138b36af401d07868cb08d25746d905395edbb4625bdf105d4bb2b2f4b0f4ae273a296a6efefa7ce9ccb914e39947ce0e83745125cab05d62516076ff0173ed472d3791ccef937597c9ea12381d76f547a092a4981d77ff3fba83"), InitialWeight: 100, }, - flow.DynamicIdentity{ + DynamicIdentity: flow.DynamicIdentity{ Weight: 100, Ejected: false, }, }, { - flow.IdentitySkeleton{ + IdentitySkeleton: flow.IdentitySkeleton{ Role: flow.RoleVerification, NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000031"), Address: "31.flow.com", @@ -139,7 +139,7 @@ func EpochSetupFixture(chain flow.ChainID) (flow.Event, *flow.EpochSetup) { StakingPubKey: unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "b1f97d0a06020eca97352e1adde72270ee713c7daf58da7e74bf72235321048b4841bdfc28227964bf18e371e266e32107d238358848bcc5d0977a0db4bda0b4c33d3874ff991e595e0f537c7b87b4ddce92038ebc7b295c9ea20a1492302aa7"), InitialWeight: 100, }, - flow.DynamicIdentity{ + DynamicIdentity: flow.DynamicIdentity{ Weight: 100, Ejected: false, }, diff --git a/utils/unittest/service_events_fixtures.go b/utils/unittest/service_events_fixtures.go index 56337e8588d..c5581bfec72 100644 --- a/utils/unittest/service_events_fixtures.go +++ b/utils/unittest/service_events_fixtures.go @@ -49,7 +49,7 @@ func EpochSetupFixtureByChainID(chain flow.ChainID) (flow.Event, *flow.EpochSetu }, Participants: flow.IdentityList{ { - flow.IdentitySkeleton{ + IdentitySkeleton: flow.IdentitySkeleton{ Role: flow.RoleCollection, NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000001"), Address: "1.flow.com", @@ -57,13 +57,13 @@ func EpochSetupFixtureByChainID(chain flow.ChainID) (flow.Event, *flow.EpochSetu StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), InitialWeight: 100, }, - flow.DynamicIdentity{ + DynamicIdentity: flow.DynamicIdentity{ Weight: 100, Ejected: false, }, }, { - flow.IdentitySkeleton{ + IdentitySkeleton: flow.IdentitySkeleton{ Role: flow.RoleCollection, NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000002"), Address: "2.flow.com", @@ -71,13 +71,13 @@ func EpochSetupFixtureByChainID(chain flow.ChainID) (flow.Event, *flow.EpochSetu StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), InitialWeight: 100, }, - flow.DynamicIdentity{ + DynamicIdentity: flow.DynamicIdentity{ Weight: 100, Ejected: false, }, }, { - flow.IdentitySkeleton{ + IdentitySkeleton: flow.IdentitySkeleton{ Role: flow.RoleCollection, NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000003"), Address: "3.flow.com", @@ -85,13 +85,13 @@ func EpochSetupFixtureByChainID(chain flow.ChainID) (flow.Event, *flow.EpochSetu StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), InitialWeight: 100, }, - flow.DynamicIdentity{ + DynamicIdentity: flow.DynamicIdentity{ Weight: 100, Ejected: false, }, }, { - flow.IdentitySkeleton{ + IdentitySkeleton: flow.IdentitySkeleton{ Role: flow.RoleCollection, NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000004"), Address: "4.flow.com", @@ -99,13 +99,13 @@ func EpochSetupFixtureByChainID(chain flow.ChainID) (flow.Event, *flow.EpochSetu StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), InitialWeight: 100, }, - flow.DynamicIdentity{ + DynamicIdentity: flow.DynamicIdentity{ Weight: 100, Ejected: false, }, }, { - flow.IdentitySkeleton{ + IdentitySkeleton: flow.IdentitySkeleton{ Role: flow.RoleConsensus, NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000011"), Address: "11.flow.com", @@ -113,13 +113,13 @@ func EpochSetupFixtureByChainID(chain flow.ChainID) (flow.Event, *flow.EpochSetu StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "8207559cd7136af378bba53a8f0196dee3849a3ab02897c1995c3e3f6ca0c4a776c3ae869d1ddbb473090054be2400ad06d7910aa2c5d1780220fdf3765a3c1764bce10c6fe66a5a2be51a422e878518bd750424bb56b8a0ecf0f8ad2057e83f"), InitialWeight: 100, }, - flow.DynamicIdentity{ + DynamicIdentity: flow.DynamicIdentity{ Weight: 100, Ejected: false, }, }, { - flow.IdentitySkeleton{ + IdentitySkeleton: flow.IdentitySkeleton{ Role: flow.RoleExecution, NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000021"), Address: "21.flow.com", @@ -127,13 +127,13 @@ func EpochSetupFixtureByChainID(chain flow.ChainID) (flow.Event, *flow.EpochSetu StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "880b162b7ec138b36af401d07868cb08d25746d905395edbb4625bdf105d4bb2b2f4b0f4ae273a296a6efefa7ce9ccb914e39947ce0e83745125cab05d62516076ff0173ed472d3791ccef937597c9ea12381d76f547a092a4981d77ff3fba83"), InitialWeight: 100, }, - flow.DynamicIdentity{ + DynamicIdentity: flow.DynamicIdentity{ Weight: 100, Ejected: false, }, }, { - flow.IdentitySkeleton{ + IdentitySkeleton: flow.IdentitySkeleton{ Role: flow.RoleVerification, NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000031"), Address: "31.flow.com", @@ -141,7 +141,7 @@ func EpochSetupFixtureByChainID(chain flow.ChainID) (flow.Event, *flow.EpochSetu StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "b1f97d0a06020eca97352e1adde72270ee713c7daf58da7e74bf72235321048b4841bdfc28227964bf18e371e266e32107d238358848bcc5d0977a0db4bda0b4c33d3874ff991e595e0f537c7b87b4ddce92038ebc7b295c9ea20a1492302aa7"), InitialWeight: 100, }, - flow.DynamicIdentity{ + DynamicIdentity: flow.DynamicIdentity{ Weight: 100, Ejected: false, }, From b3018778ee498cf5701157691788a7a35a0ede93 Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Tue, 4 Jul 2023 16:00:56 +0300 Subject: [PATCH 06/46] Fixed more tests --- .../cmd/execution-state-extract/export_report.json | 4 ++-- model/flow/identity.go | 13 ++++++++++++- model/flow/identity_test.go | 3 ++- 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/cmd/util/cmd/execution-state-extract/export_report.json b/cmd/util/cmd/execution-state-extract/export_report.json index 9b8d0b03753..5f0dd5f132a 100644 --- a/cmd/util/cmd/execution-state-extract/export_report.json +++ b/cmd/util/cmd/execution-state-extract/export_report.json @@ -1,6 +1,6 @@ { "EpochCounter": 0, - "PreviousStateCommitment": "829fd1ec06c9a40e5ceba9cff537e30806332aba6065da21a11556136d171c98", - "CurrentStateCommitment": "829fd1ec06c9a40e5ceba9cff537e30806332aba6065da21a11556136d171c98", + "PreviousStateCommitment": "170cbb4dd9c2b90362d09e5c05641de30e1ac6aa938ed4ea866fa592fba16566", + "CurrentStateCommitment": "170cbb4dd9c2b90362d09e5c05641de30e1ac6aa938ed4ea866fa592fba16566", "ReportSucceeded": true } \ No newline at end of file diff --git a/model/flow/identity.go b/model/flow/identity.go index 2be78ef479a..58194420f44 100644 --- a/model/flow/identity.go +++ b/model/flow/identity.go @@ -130,7 +130,9 @@ type encodableIdentity struct { NodeID Identifier Address string `json:",omitempty"` Role Role + InitialWeight uint64 Weight uint64 + Ejected bool StakingPubKey []byte NetworkPubKey []byte } @@ -145,7 +147,14 @@ type decodableIdentity struct { } func encodableFromIdentity(iy Identity) (encodableIdentity, error) { - ie := encodableIdentity{iy.NodeID, iy.Address, iy.Role, iy.Weight, nil, nil} + ie := encodableIdentity{ + NodeID: iy.NodeID, + Address: iy.Address, + Role: iy.Role, + InitialWeight: iy.InitialWeight, + Weight: iy.Weight, + Ejected: iy.Ejected, + } if iy.StakingPubKey != nil { ie.StakingPubKey = iy.StakingPubKey.Encode() } @@ -208,7 +217,9 @@ func identityFromEncodable(ie encodableIdentity, identity *Identity) error { identity.NodeID = ie.NodeID identity.Address = ie.Address identity.Role = ie.Role + identity.InitialWeight = ie.InitialWeight identity.Weight = ie.Weight + identity.Ejected = ie.Ejected var err error if ie.StakingPubKey != nil { if identity.StakingPubKey, err = crypto.DecodePublicKey(crypto.BLSBLS12381, ie.StakingPubKey); err != nil { diff --git a/model/flow/identity_test.go b/model/flow/identity_test.go index 7f7a420b00a..7d98974dfe3 100644 --- a/model/flow/identity_test.go +++ b/model/flow/identity_test.go @@ -81,7 +81,8 @@ func TestIdentityEncodingJSON(t *testing.T) { enc, err := json.Marshal(identity) require.NoError(t, err) // emulate the old encoding by replacing the new field with old field name - enc = []byte(strings.Replace(string(enc), "Weight", "Stake", 1)) + // NOTE: use replace in such way to avoid replacing InitialWeight field. + enc = []byte(strings.Replace(string(enc), "\"Weight", "\"Stake", 1)) var dec flow.Identity err = json.Unmarshal(enc, &dec) require.NoError(t, err) From a0599fbe50480deeb9a0a600484062d5fe3482be Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Tue, 4 Jul 2023 16:23:33 +0300 Subject: [PATCH 07/46] Updated Replicas to return identity skeleton. Updated implementors and fixed usages. Updated signer indices --- consensus/hotstuff/committee.go | 6 ++-- .../hotstuff/committees/cluster_committee.go | 4 +-- .../committees/consensus_committee.go | 21 +++++++----- .../hotstuff/committees/leader/cluster.go | 2 +- .../hotstuff/committees/leader/consensus.go | 2 +- .../committees/leader/leader_selection.go | 4 +-- .../hotstuff/committees/metrics_wrapper.go | 4 +-- consensus/hotstuff/committees/static.go | 4 +-- consensus/hotstuff/mocks/dynamic_committee.go | 4 +-- consensus/hotstuff/mocks/packer.go | 2 +- consensus/hotstuff/mocks/replicas.go | 4 +-- consensus/hotstuff/mocks/validator.go | 2 +- consensus/hotstuff/signature.go | 2 +- .../signature/block_signer_decoder.go | 3 +- consensus/hotstuff/signature/packer.go | 2 +- .../hotstuff/timeoutcollector/aggregation.go | 4 +-- consensus/hotstuff/validator.go | 2 +- .../hotstuff/validator/metrics_wrapper.go | 2 +- consensus/hotstuff/validator/validator.go | 2 +- .../verification/combined_verifier_v2.go | 6 ++-- .../verification/combined_verifier_v3.go | 6 ++-- .../hotstuff/verification/staking_verifier.go | 6 ++-- consensus/hotstuff/verifier.go | 6 ++-- engine/testutil/nodes.go | 4 +-- model/flow/identity.go | 32 +++++++++++++++++-- module/signature/signer_indices.go | 14 ++++---- 26 files changed, 90 insertions(+), 60 deletions(-) diff --git a/consensus/hotstuff/committee.go b/consensus/hotstuff/committee.go index cac2e3a877e..caf2e0f0e34 100644 --- a/consensus/hotstuff/committee.go +++ b/consensus/hotstuff/committee.go @@ -78,8 +78,7 @@ type Replicas interface { // Returns the following expected errors for invalid inputs: // - model.ErrViewForUnknownEpoch if no epoch containing the given view is known // - // TODO: should return identity skeleton https://github.com/dapperlabs/flow-go/issues/6232 - IdentitiesByEpoch(view uint64) (flow.IdentityList, error) + IdentitiesByEpoch(view uint64) (flow.IdentitySkeletonList, error) // IdentityByEpoch returns the full Identity for specified HotStuff participant. // The node must be a legitimate HotStuff participant with NON-ZERO WEIGHT at the specified block. @@ -92,8 +91,7 @@ type Replicas interface { // Returns the following expected errors for invalid inputs: // - model.ErrViewForUnknownEpoch if no epoch containing the given view is known // - // TODO: should return identity skeleton https://github.com/dapperlabs/flow-go/issues/6232 - IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.Identity, error) + IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.IdentitySkeleton, error) } // DynamicCommittee extends Replicas to provide the consensus committee for the purposes diff --git a/consensus/hotstuff/committees/cluster_committee.go b/consensus/hotstuff/committees/cluster_committee.go index 565261dd7ee..a7c61034c34 100644 --- a/consensus/hotstuff/committees/cluster_committee.go +++ b/consensus/hotstuff/committees/cluster_committee.go @@ -130,7 +130,7 @@ func (c *Cluster) IdentityByBlock(blockID flow.Identifier, nodeID flow.Identifie // IdentitiesByEpoch returns the initial cluster members for this epoch. The view // parameter is the view in the cluster consensus. Since clusters only exist for // one epoch, we don't need to check the view. -func (c *Cluster) IdentitiesByEpoch(_ uint64) (flow.IdentityList, error) { +func (c *Cluster) IdentitiesByEpoch(view uint64) (flow.IdentitySkeletonList, error) { return c.initialClusterMembers, nil } @@ -141,7 +141,7 @@ func (c *Cluster) IdentitiesByEpoch(_ uint64) (flow.IdentityList, error) { // Returns: // - model.InvalidSignerError if nodeID was not listed by the Epoch Setup event as an // authorized participant in this cluster -func (c *Cluster) IdentityByEpoch(_ uint64, nodeID flow.Identifier) (*flow.Identity, error) { +func (c *Cluster) IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.IdentitySkeleton, error) { identity, ok := c.initialClusterMembers.ByNodeID(nodeID) if !ok { return nil, model.NewInvalidSignerErrorf("node %v is not an authorized hotstuff participant", nodeID) diff --git a/consensus/hotstuff/committees/consensus_committee.go b/consensus/hotstuff/committees/consensus_committee.go index 2c81adc78f3..4b110a9b895 100644 --- a/consensus/hotstuff/committees/consensus_committee.go +++ b/consensus/hotstuff/committees/consensus_committee.go @@ -26,8 +26,8 @@ type staticEpochInfo struct { randomSource []byte // random source of epoch leaders *leader.LeaderSelection // pre-computed leader selection for the epoch // TODO: should use identity skeleton https://github.com/dapperlabs/flow-go/issues/6232 - initialCommittee flow.IdentityList - initialCommitteeMap map[flow.Identifier]*flow.Identity + initialCommittee flow.IdentitySkeletonList + initialCommitteeMap map[flow.Identifier]*flow.IdentitySkeleton weightThresholdForQC uint64 // computed based on initial committee weights weightThresholdForTO uint64 // computed based on initial committee weights dkg hotstuff.DKG @@ -56,7 +56,7 @@ func newStaticEpochInfo(epoch protocol.Epoch) (*staticEpochInfo, error) { if err != nil { return nil, fmt.Errorf("could not initial identities: %w", err) } - initialCommittee := initialIdentities.Filter(filter.IsVotingConsensusCommitteeMember) + initialCommittee := initialIdentities.Filter(filter.IsVotingConsensusCommitteeMember).ToSkeleton() dkg, err := epoch.DKG() if err != nil { return nil, fmt.Errorf("could not get dkg: %w", err) @@ -89,7 +89,12 @@ func newEmergencyFallbackEpoch(lastCommittedEpoch *staticEpochInfo) (*staticEpoc if err != nil { return nil, fmt.Errorf("could not create rng from seed: %w", err) } - leaders, err := leader.ComputeLeaderSelection(lastCommittedEpoch.finalView+1, rng, leader.EstimatedSixMonthOfViews, lastCommittedEpoch.initialCommittee) + leaders, err := leader.ComputeLeaderSelection( + lastCommittedEpoch.finalView+1, + rng, + leader.EstimatedSixMonthOfViews, + lastCommittedEpoch.initialCommittee, + ) if err != nil { return nil, fmt.Errorf("could not compute leader selection for fallback epoch: %w", err) } @@ -226,7 +231,7 @@ func (c *Consensus) IdentityByBlock(blockID flow.Identifier, nodeID flow.Identif // - model.ErrViewForUnknownEpoch if no committed epoch containing the given view is known. // This is an expected error and must be handled. // - unspecific error in case of unexpected problems and bugs -func (c *Consensus) IdentitiesByEpoch(view uint64) (flow.IdentityList, error) { +func (c *Consensus) IdentitiesByEpoch(view uint64) (flow.IdentitySkeletonList, error) { epochInfo, err := c.staticEpochInfoByView(view) if err != nil { return nil, err @@ -245,14 +250,14 @@ func (c *Consensus) IdentitiesByEpoch(view uint64) (flow.IdentityList, error) { // - model.InvalidSignerError if nodeID was not listed by the Epoch Setup event as an // authorized consensus participants. // - unspecific error in case of unexpected problems and bugs -func (c *Consensus) IdentityByEpoch(view uint64, nodeID flow.Identifier) (*flow.Identity, error) { +func (c *Consensus) IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.IdentitySkeleton, error) { epochInfo, err := c.staticEpochInfoByView(view) if err != nil { return nil, err } - identity, ok := epochInfo.initialCommitteeMap[nodeID] + identity, ok := epochInfo.initialCommitteeMap[participantID] if !ok { - return nil, model.NewInvalidSignerErrorf("id %v is not a valid node id", nodeID) + return nil, model.NewInvalidSignerErrorf("id %v is not a valid node id", participantID) } return identity, nil } diff --git a/consensus/hotstuff/committees/leader/cluster.go b/consensus/hotstuff/committees/leader/cluster.go index b1a2af13be2..f530e176f56 100644 --- a/consensus/hotstuff/committees/leader/cluster.go +++ b/consensus/hotstuff/committees/leader/cluster.go @@ -40,7 +40,7 @@ func SelectionForCluster(cluster protocol.Cluster, epoch protocol.Epoch) (*Leade firstView, rng, int(finalView-firstView+1), - identities, + identities.ToSkeleton(), ) return leaders, err } diff --git a/consensus/hotstuff/committees/leader/consensus.go b/consensus/hotstuff/committees/leader/consensus.go index 17f8c108069..a51ae3b758f 100644 --- a/consensus/hotstuff/committees/leader/consensus.go +++ b/consensus/hotstuff/committees/leader/consensus.go @@ -43,7 +43,7 @@ func SelectionForConsensus(epoch protocol.Epoch) (*LeaderSelection, error) { firstView, rng, int(finalView-firstView+1), // add 1 because both first/final view are inclusive - identities.Filter(filter.IsVotingConsensusCommitteeMember), + identities.Filter(filter.IsVotingConsensusCommitteeMember).ToSkeleton(), ) return leaders, err } diff --git a/consensus/hotstuff/committees/leader/leader_selection.go b/consensus/hotstuff/committees/leader/leader_selection.go index bc1936cc197..ec84a4c7e46 100644 --- a/consensus/hotstuff/committees/leader/leader_selection.go +++ b/consensus/hotstuff/committees/leader/leader_selection.go @@ -93,7 +93,7 @@ func ComputeLeaderSelection( firstView uint64, rng random.Rand, count int, - identities flow.IdentityList, + identities flow.IdentitySkeletonList, ) (*LeaderSelection, error) { if count < 1 { @@ -102,7 +102,7 @@ func ComputeLeaderSelection( weights := make([]uint64, 0, len(identities)) for _, id := range identities { - weights = append(weights, id.Weight) + weights = append(weights, id.InitialWeight) } leaders, err := weightedRandomSelection(rng, count, weights) diff --git a/consensus/hotstuff/committees/metrics_wrapper.go b/consensus/hotstuff/committees/metrics_wrapper.go index e1bdcbc059a..c790657dcab 100644 --- a/consensus/hotstuff/committees/metrics_wrapper.go +++ b/consensus/hotstuff/committees/metrics_wrapper.go @@ -43,14 +43,14 @@ func (w CommitteeMetricsWrapper) IdentityByBlock(blockID flow.Identifier, partic return identity, err } -func (w CommitteeMetricsWrapper) IdentitiesByEpoch(view uint64) (flow.IdentityList, error) { +func (w CommitteeMetricsWrapper) IdentitiesByEpoch(view uint64) (flow.IdentitySkeletonList, error) { processStart := time.Now() identities, err := w.committee.IdentitiesByEpoch(view) w.metrics.CommitteeProcessingDuration(time.Since(processStart)) return identities, err } -func (w CommitteeMetricsWrapper) IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.Identity, error) { +func (w CommitteeMetricsWrapper) IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.IdentitySkeleton, error) { processStart := time.Now() identity, err := w.committee.IdentityByEpoch(view, participantID) w.metrics.CommitteeProcessingDuration(time.Since(processStart)) diff --git a/consensus/hotstuff/committees/static.go b/consensus/hotstuff/committees/static.go index b95c6448dff..2a6615a9fc6 100644 --- a/consensus/hotstuff/committees/static.go +++ b/consensus/hotstuff/committees/static.go @@ -55,11 +55,11 @@ func (s Static) IdentityByBlock(_ flow.Identifier, participantID flow.Identifier return identity, nil } -func (s Static) IdentitiesByEpoch(_ uint64) (flow.IdentityList, error) { +func (s Static) IdentitiesByEpoch(view uint64) (flow.IdentitySkeletonList, error) { return s.participants, nil } -func (s Static) IdentityByEpoch(_ uint64, participantID flow.Identifier) (*flow.Identity, error) { +func (s Static) IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.IdentitySkeleton, error) { identity, ok := s.participants.ByNodeID(participantID) if !ok { return nil, model.NewInvalidSignerErrorf("unknown participant %x", participantID) diff --git a/consensus/hotstuff/mocks/dynamic_committee.go b/consensus/hotstuff/mocks/dynamic_committee.go index 67acf8f8bcb..e576c1b847d 100644 --- a/consensus/hotstuff/mocks/dynamic_committee.go +++ b/consensus/hotstuff/mocks/dynamic_committee.go @@ -67,7 +67,7 @@ func (_m *DynamicCommittee) IdentitiesByBlock(blockID flow.Identifier) (flow.Ide } // IdentitiesByEpoch provides a mock function with given fields: view -func (_m *DynamicCommittee) IdentitiesByEpoch(view uint64) (flow.IdentityList, error) { +func (_m *DynamicCommittee) IdentitiesByEpoch(view uint64) (flow.IdentitySkeletonList, error) { ret := _m.Called(view) var r0 flow.IdentityList @@ -119,7 +119,7 @@ func (_m *DynamicCommittee) IdentityByBlock(blockID flow.Identifier, participant } // IdentityByEpoch provides a mock function with given fields: view, participantID -func (_m *DynamicCommittee) IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.Identity, error) { +func (_m *DynamicCommittee) IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.IdentitySkeleton, error) { ret := _m.Called(view, participantID) var r0 *flow.Identity diff --git a/consensus/hotstuff/mocks/packer.go b/consensus/hotstuff/mocks/packer.go index b9d7bb573cf..26c26cf501c 100644 --- a/consensus/hotstuff/mocks/packer.go +++ b/consensus/hotstuff/mocks/packer.go @@ -50,7 +50,7 @@ func (_m *Packer) Pack(view uint64, sig *hotstuff.BlockSignatureData) ([]byte, [ } // Unpack provides a mock function with given fields: signerIdentities, sigData -func (_m *Packer) Unpack(signerIdentities flow.IdentityList, sigData []byte) (*hotstuff.BlockSignatureData, error) { +func (_m *Packer) Unpack(signerIdentities flow.IdentitySkeletonList, sigData []byte) (*hotstuff.BlockSignatureData, error) { ret := _m.Called(signerIdentities, sigData) var r0 *hotstuff.BlockSignatureData diff --git a/consensus/hotstuff/mocks/replicas.go b/consensus/hotstuff/mocks/replicas.go index 965031dafd2..8e85b9f5a5c 100644 --- a/consensus/hotstuff/mocks/replicas.go +++ b/consensus/hotstuff/mocks/replicas.go @@ -41,7 +41,7 @@ func (_m *Replicas) DKG(view uint64) (hotstuff.DKG, error) { } // IdentitiesByEpoch provides a mock function with given fields: view -func (_m *Replicas) IdentitiesByEpoch(view uint64) (flow.IdentityList, error) { +func (_m *Replicas) IdentitiesByEpoch(view uint64) (flow.IdentitySkeletonList, error) { ret := _m.Called(view) var r0 flow.IdentityList @@ -67,7 +67,7 @@ func (_m *Replicas) IdentitiesByEpoch(view uint64) (flow.IdentityList, error) { } // IdentityByEpoch provides a mock function with given fields: view, participantID -func (_m *Replicas) IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.Identity, error) { +func (_m *Replicas) IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.IdentitySkeleton, error) { ret := _m.Called(view, participantID) var r0 *flow.Identity diff --git a/consensus/hotstuff/mocks/validator.go b/consensus/hotstuff/mocks/validator.go index d31e02dd1c9..351cd03511b 100644 --- a/consensus/hotstuff/mocks/validator.go +++ b/consensus/hotstuff/mocks/validator.go @@ -58,7 +58,7 @@ func (_m *Validator) ValidateTC(tc *flow.TimeoutCertificate) error { } // ValidateVote provides a mock function with given fields: vote -func (_m *Validator) ValidateVote(vote *model.Vote) (*flow.Identity, error) { +func (_m *Validator) ValidateVote(vote *model.Vote) (*flow.IdentitySkeleton, error) { ret := _m.Called(vote) var r0 *flow.Identity diff --git a/consensus/hotstuff/signature.go b/consensus/hotstuff/signature.go index 0fc56748ab2..84b07fb778d 100644 --- a/consensus/hotstuff/signature.go +++ b/consensus/hotstuff/signature.go @@ -174,5 +174,5 @@ type Packer interface { // It returns: // - (sigData, nil) if successfully unpacked the signature data // - (nil, model.InvalidFormatError) if failed to unpack the signature data - Unpack(signerIdentities flow.IdentityList, sigData []byte) (*BlockSignatureData, error) + Unpack(signerIdentities flow.IdentitySkeletonList, sigData []byte) (*BlockSignatureData, error) } diff --git a/consensus/hotstuff/signature/block_signer_decoder.go b/consensus/hotstuff/signature/block_signer_decoder.go index 46a2036c50a..82c6f865350 100644 --- a/consensus/hotstuff/signature/block_signer_decoder.go +++ b/consensus/hotstuff/signature/block_signer_decoder.go @@ -44,11 +44,12 @@ func (b *BlockSignerDecoder) DecodeSignerIDs(header *flow.Header) (flow.Identifi // try asking by parent ID // TODO: this assumes no identity table changes within epochs, must be changed for Dynamic Protocol State // See https://github.com/onflow/flow-go/issues/4085 - members, err = b.IdentitiesByBlock(header.ParentID) + byBlockMembers, err := b.IdentitiesByBlock(header.ParentID) if err != nil { return nil, fmt.Errorf("could not retrieve identities for block %x with QC view %d for parent %x: %w", header.ID(), header.ParentView, header.ParentID, err) // state.ErrUnknownSnapshotReference or exception } + members = byBlockMembers.ToSkeleton() } else { return nil, fmt.Errorf("unexpected error retrieving identities for block %v: %w", header.ID(), err) } diff --git a/consensus/hotstuff/signature/packer.go b/consensus/hotstuff/signature/packer.go index 4b6652ce66f..20f819569b9 100644 --- a/consensus/hotstuff/signature/packer.go +++ b/consensus/hotstuff/signature/packer.go @@ -69,7 +69,7 @@ func (p *ConsensusSigDataPacker) Pack(view uint64, sig *hotstuff.BlockSignatureD // It returns: // - (sigData, nil) if successfully unpacked the signature data // - (nil, model.InvalidFormatError) if failed to unpack the signature data -func (p *ConsensusSigDataPacker) Unpack(signerIdentities flow.IdentityList, sigData []byte) (*hotstuff.BlockSignatureData, error) { +func (p *ConsensusSigDataPacker) Unpack(signerIdentities flow.IdentitySkeletonList, sigData []byte) (*hotstuff.BlockSignatureData, error) { // decode into typed data data, err := p.Decode(sigData) // all potential error are of type `model.InvalidFormatError` if err != nil { diff --git a/consensus/hotstuff/timeoutcollector/aggregation.go b/consensus/hotstuff/timeoutcollector/aggregation.go index 4a2c3ce5b2b..672eb5d3a52 100644 --- a/consensus/hotstuff/timeoutcollector/aggregation.go +++ b/consensus/hotstuff/timeoutcollector/aggregation.go @@ -63,7 +63,7 @@ var _ hotstuff.TimeoutSignatureAggregator = (*TimeoutSignatureAggregator)(nil) // signature aggregation task in the protocol. func NewTimeoutSignatureAggregator( view uint64, // view for which we are aggregating signatures - ids flow.IdentityList, // list of all authorized signers + ids flow.IdentitySkeletonList, // list of all authorized signers dsTag string, // domain separation tag used by the signature ) (*TimeoutSignatureAggregator, error) { if len(ids) == 0 { @@ -81,7 +81,7 @@ func NewTimeoutSignatureAggregator( for _, id := range ids { idToInfo[id.NodeID] = signerInfo{ pk: id.StakingPubKey, - weight: id.Weight, + weight: id.InitialWeight, } } diff --git a/consensus/hotstuff/validator.go b/consensus/hotstuff/validator.go index 5bcc77f1810..be3313e9f26 100644 --- a/consensus/hotstuff/validator.go +++ b/consensus/hotstuff/validator.go @@ -31,5 +31,5 @@ type Validator interface { // the following errors are expected: // * model.InvalidVoteError for invalid votes // * model.ErrViewForUnknownEpoch if the vote refers unknown epoch - ValidateVote(vote *model.Vote) (*flow.Identity, error) + ValidateVote(vote *model.Vote) (*flow.IdentitySkeleton, error) } diff --git a/consensus/hotstuff/validator/metrics_wrapper.go b/consensus/hotstuff/validator/metrics_wrapper.go index 127ca317094..8876acef248 100644 --- a/consensus/hotstuff/validator/metrics_wrapper.go +++ b/consensus/hotstuff/validator/metrics_wrapper.go @@ -47,7 +47,7 @@ func (w ValidatorMetricsWrapper) ValidateProposal(proposal *model.Proposal) erro return err } -func (w ValidatorMetricsWrapper) ValidateVote(vote *model.Vote) (*flow.Identity, error) { +func (w ValidatorMetricsWrapper) ValidateVote(vote *model.Vote) (*flow.IdentitySkeleton, error) { processStart := time.Now() identity, err := w.validator.ValidateVote(vote) w.metrics.ValidatorProcessingDuration(time.Since(processStart)) diff --git a/consensus/hotstuff/validator/validator.go b/consensus/hotstuff/validator/validator.go index b9cafdc5d89..933c3751619 100644 --- a/consensus/hotstuff/validator/validator.go +++ b/consensus/hotstuff/validator/validator.go @@ -294,7 +294,7 @@ func (v *Validator) ValidateProposal(proposal *model.Proposal) error { // - model.ErrViewForUnknownEpoch if the vote refers unknown epoch // // Any other error should be treated as exception -func (v *Validator) ValidateVote(vote *model.Vote) (*flow.Identity, error) { +func (v *Validator) ValidateVote(vote *model.Vote) (*flow.IdentitySkeleton, error) { voter, err := v.committee.IdentityByEpoch(vote.View, vote.SignerID) if model.IsInvalidSignerError(err) { return nil, newInvalidVoteError(vote, err) diff --git a/consensus/hotstuff/verification/combined_verifier_v2.go b/consensus/hotstuff/verification/combined_verifier_v2.go index ee67a4ea36a..e0894771d00 100644 --- a/consensus/hotstuff/verification/combined_verifier_v2.go +++ b/consensus/hotstuff/verification/combined_verifier_v2.go @@ -53,7 +53,7 @@ func NewCombinedVerifier(committee hotstuff.Replicas, packer hotstuff.Packer) *C // - model.ErrViewForUnknownEpoch if no epoch containing the given view is known // - unexpected errors should be treated as symptoms of bugs or uncovered // edge cases in the logic (i.e. as fatal) -func (c *CombinedVerifier) VerifyVote(signer *flow.Identity, sigData []byte, view uint64, blockID flow.Identifier) error { +func (c *CombinedVerifier) VerifyVote(signer *flow.IdentitySkeleton, sigData []byte, view uint64, blockID flow.Identifier) error { // create the to-be-signed message msg := MakeVoteMessage(view, blockID) @@ -120,7 +120,7 @@ func (c *CombinedVerifier) VerifyVote(signer *flow.Identity, sigData []byte, vie // - model.ErrInvalidSignature if a signature is invalid // - model.ErrViewForUnknownEpoch if no epoch containing the given view is known // - error if running into any unexpected exception (i.e. fatal error) -func (c *CombinedVerifier) VerifyQC(signers flow.IdentityList, sigData []byte, view uint64, blockID flow.Identifier) error { +func (c *CombinedVerifier) VerifyQC(signers flow.IdentitySkeletonList, sigData []byte, view uint64, blockID flow.Identifier) error { dkg, err := c.committee.DKG(view) if err != nil { return fmt.Errorf("could not get dkg data: %w", err) @@ -160,7 +160,7 @@ func (c *CombinedVerifier) VerifyQC(signers flow.IdentityList, sigData []byte, v // - model.ErrInvalidSignature if a signature is invalid // - unexpected errors should be treated as symptoms of bugs or uncovered // edge cases in the logic (i.e. as fatal) -func (c *CombinedVerifier) VerifyTC(signers flow.IdentityList, sigData []byte, view uint64, highQCViews []uint64) error { +func (c *CombinedVerifier) VerifyTC(signers flow.IdentitySkeletonList, sigData []byte, view uint64, highQCViews []uint64) error { stakingPks := signers.PublicStakingKeys() return verifyTCSignatureManyMessages(stakingPks, sigData, view, highQCViews, c.timeoutObjectHasher) } diff --git a/consensus/hotstuff/verification/combined_verifier_v3.go b/consensus/hotstuff/verification/combined_verifier_v3.go index 8f5f9acd8f0..4863690994b 100644 --- a/consensus/hotstuff/verification/combined_verifier_v3.go +++ b/consensus/hotstuff/verification/combined_verifier_v3.go @@ -57,7 +57,7 @@ func NewCombinedVerifierV3(committee hotstuff.Replicas, packer hotstuff.Packer) // // This implementation already support the cases, where the DKG committee is a // _strict subset_ of the full consensus committee. -func (c *CombinedVerifierV3) VerifyVote(signer *flow.Identity, sigData []byte, view uint64, blockID flow.Identifier) error { +func (c *CombinedVerifierV3) VerifyVote(signer *flow.IdentitySkeleton, sigData []byte, view uint64, blockID flow.Identifier) error { // create the to-be-signed message msg := MakeVoteMessage(view, blockID) @@ -125,7 +125,7 @@ func (c *CombinedVerifierV3) VerifyVote(signer *flow.Identity, sigData []byte, v // // This implementation already support the cases, where the DKG committee is a // _strict subset_ of the full consensus committee. -func (c *CombinedVerifierV3) VerifyQC(signers flow.IdentityList, sigData []byte, view uint64, blockID flow.Identifier) error { +func (c *CombinedVerifierV3) VerifyQC(signers flow.IdentitySkeletonList, sigData []byte, view uint64, blockID flow.Identifier) error { signerIdentities := signers.Lookup() dkg, err := c.committee.DKG(view) if err != nil { @@ -227,7 +227,7 @@ func (c *CombinedVerifierV3) VerifyQC(signers flow.IdentityList, sigData []byte, // - model.ErrInvalidSignature if a signature is invalid // - unexpected errors should be treated as symptoms of bugs or uncovered // edge cases in the logic (i.e. as fatal) -func (c *CombinedVerifierV3) VerifyTC(signers flow.IdentityList, sigData []byte, view uint64, highQCViews []uint64) error { +func (c *CombinedVerifierV3) VerifyTC(signers flow.IdentitySkeletonList, sigData []byte, view uint64, highQCViews []uint64) error { stakingPks := signers.PublicStakingKeys() return verifyTCSignatureManyMessages(stakingPks, sigData, view, highQCViews, c.timeoutObjectHasher) } diff --git a/consensus/hotstuff/verification/staking_verifier.go b/consensus/hotstuff/verification/staking_verifier.go index 60b2f45f4d5..207e11c8ad9 100644 --- a/consensus/hotstuff/verification/staking_verifier.go +++ b/consensus/hotstuff/verification/staking_verifier.go @@ -37,7 +37,7 @@ func NewStakingVerifier() *StakingVerifier { // - model.ErrInvalidSignature is the signature is invalid // - unexpected errors should be treated as symptoms of bugs or uncovered // edge cases in the logic (i.e. as fatal) -func (v *StakingVerifier) VerifyVote(signer *flow.Identity, sigData []byte, view uint64, blockID flow.Identifier) error { +func (v *StakingVerifier) VerifyVote(signer *flow.IdentitySkeleton, sigData []byte, view uint64, blockID flow.Identifier) error { // create the to-be-signed message msg := MakeVoteMessage(view, blockID) @@ -65,7 +65,7 @@ func (v *StakingVerifier) VerifyVote(signer *flow.Identity, sigData []byte, view // edge cases in the logic (i.e. as fatal) // // In the single verification case, `sigData` represents a single signature (`crypto.Signature`). -func (v *StakingVerifier) VerifyQC(signers flow.IdentityList, sigData []byte, view uint64, blockID flow.Identifier) error { +func (v *StakingVerifier) VerifyQC(signers flow.IdentitySkeletonList, sigData []byte, view uint64, blockID flow.Identifier) error { msg := MakeVoteMessage(view, blockID) err := verifyAggregatedSignatureOneMessage(signers.PublicStakingKeys(), sigData, v.stakingHasher, msg) @@ -85,6 +85,6 @@ func (v *StakingVerifier) VerifyQC(signers flow.IdentityList, sigData []byte, vi // - model.ErrInvalidSignature if a signature is invalid // - unexpected errors should be treated as symptoms of bugs or uncovered // edge cases in the logic (i.e. as fatal) -func (v *StakingVerifier) VerifyTC(signers flow.IdentityList, sigData []byte, view uint64, highQCViews []uint64) error { +func (v *StakingVerifier) VerifyTC(signers flow.IdentitySkeletonList, sigData []byte, view uint64, highQCViews []uint64) error { return verifyTCSignatureManyMessages(signers.PublicStakingKeys(), sigData, view, highQCViews, v.timeoutObjectHasher) } diff --git a/consensus/hotstuff/verifier.go b/consensus/hotstuff/verifier.go index 126ac7f78db..354b406cdab 100644 --- a/consensus/hotstuff/verifier.go +++ b/consensus/hotstuff/verifier.go @@ -38,7 +38,7 @@ type Verifier interface { // where querying of DKG might fail if no epoch containing the given view is known. // * unexpected errors should be treated as symptoms of bugs or uncovered // edge cases in the logic (i.e. as fatal) - VerifyVote(voter *flow.Identity, sigData []byte, view uint64, blockID flow.Identifier) error + VerifyVote(voter *flow.IdentitySkeleton, sigData []byte, view uint64, blockID flow.Identifier) error // VerifyQC checks the cryptographic validity of a QC's `SigData` w.r.t. the // given view and blockID. It is the responsibility of the calling code to ensure that @@ -58,7 +58,7 @@ type Verifier interface { // where querying of DKG might fail if no epoch containing the given view is known. // * unexpected errors should be treated as symptoms of bugs or uncovered // edge cases in the logic (i.e. as fatal) - VerifyQC(signers flow.IdentityList, sigData []byte, view uint64, blockID flow.Identifier) error + VerifyQC(signers flow.IdentitySkeletonList, sigData []byte, view uint64, blockID flow.Identifier) error // VerifyTC checks cryptographic validity of the TC's `sigData` w.r.t. the // given view. It is the responsibility of the calling code to ensure @@ -69,5 +69,5 @@ type Verifier interface { // * model.ErrInvalidSignature if a signature is invalid // * unexpected errors should be treated as symptoms of bugs or uncovered // edge cases in the logic (i.e. as fatal) - VerifyTC(signers flow.IdentityList, sigData []byte, view uint64, highQCViews []uint64) error + VerifyTC(signers flow.IdentitySkeletonList, sigData []byte, view uint64, highQCViews []uint64) error } diff --git a/engine/testutil/nodes.go b/engine/testutil/nodes.go index 5121bf6fc42..3ec40aa306b 100644 --- a/engine/testutil/nodes.go +++ b/engine/testutil/nodes.go @@ -857,11 +857,11 @@ func (s *RoundRobinLeaderSelection) IdentityByBlock(_ flow.Identifier, participa return id, nil } -func (s *RoundRobinLeaderSelection) IdentitiesByEpoch(_ uint64) (flow.IdentityList, error) { +func (s *RoundRobinLeaderSelection) IdentitiesByEpoch(view uint64) (flow.IdentitySkeletonList, error) { return s.identities, nil } -func (s *RoundRobinLeaderSelection) IdentityByEpoch(_ uint64, participantID flow.Identifier) (*flow.Identity, error) { +func (s *RoundRobinLeaderSelection) IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.IdentitySkeleton, error) { id, found := s.identities.ByNodeID(participantID) if !found { return nil, model.NewInvalidSignerErrorf("unknown participant %x", participantID) diff --git a/model/flow/identity.go b/model/flow/identity.go index 58194420f44..6cdc65f97e8 100644 --- a/model/flow/identity.go +++ b/model/flow/identity.go @@ -394,6 +394,14 @@ func (il IdentityList) Selector() IdentityFilter { } } +func (il IdentitySkeletonList) Lookup() map[Identifier]*IdentitySkeleton { + lookup := make(map[Identifier]*IdentitySkeleton, len(il)) + for _, identity := range il { + lookup[identity.NodeID] = identity + } + return lookup +} + func (il IdentityList) Lookup() map[Identifier]*Identity { lookup := make(map[Identifier]*Identity, len(il)) for _, identity := range il { @@ -425,8 +433,17 @@ func (il IdentityList) NodeIDs() IdentifierList { return nodeIDs } +// NodeIDs returns the NodeIDs of the nodes in the list. +func (il IdentitySkeletonList) NodeIDs() IdentifierList { + nodeIDs := make([]Identifier, 0, len(il)) + for _, id := range il { + nodeIDs = append(nodeIDs, id.NodeID) + } + return nodeIDs +} + // PublicStakingKeys returns a list with the public staking keys (order preserving). -func (il IdentityList) PublicStakingKeys() []crypto.PublicKey { +func (il IdentitySkeletonList) PublicStakingKeys() []crypto.PublicKey { pks := make([]crypto.PublicKey, 0, len(il)) for _, id := range il { pks = append(pks, id.StakingPubKey) @@ -456,10 +473,10 @@ func (il IdentityList) Checksum() Identifier { } // TotalWeight returns the total weight of all given identities. -func (il IdentityList) TotalWeight() uint64 { +func (il IdentitySkeletonList) TotalWeight() uint64 { var total uint64 for _, identity := range il { - total += identity.Weight + total += identity.InitialWeight } return total } @@ -616,3 +633,12 @@ func (il IdentityList) GetIndex(target Identifier) (uint, bool) { } return uint(i), true } + +// ToSkeleton converts the identity list to a list of identity skeletons. +func (il IdentityList) ToSkeleton() IdentitySkeletonList { + skeletons := make(IdentitySkeletonList, len(il)) + for i, id := range il { + skeletons[i] = &id.IdentitySkeleton + } + return skeletons +} diff --git a/module/signature/signer_indices.go b/module/signature/signer_indices.go index 68e3c78f1d5..abd7a9ccabd 100644 --- a/module/signature/signer_indices.go +++ b/module/signature/signer_indices.go @@ -127,9 +127,9 @@ func EncodeSignerToIndicesAndSigType( // Expected Error returns during normal operations: // - signature.IsInvalidSigTypesError if the given `sigType` does not encode a valid sequence of signature types func DecodeSigTypeToStakingAndBeaconSigners( - signers flow.IdentityList, + signers flow.IdentitySkeletonList, sigType []byte, -) (flow.IdentityList, flow.IdentityList, error) { +) (flow.IdentitySkeletonList, flow.IdentitySkeletonList, error) { numberSigners := len(signers) if err := validPadding(sigType, numberSigners); err != nil { if errors.Is(err, ErrIncompatibleBitVectorLength) || errors.Is(err, ErrIllegallyPaddedBitVector) { @@ -139,8 +139,8 @@ func DecodeSigTypeToStakingAndBeaconSigners( } // decode bits to Identities - stakingSigners := make(flow.IdentityList, 0, numberSigners) - beaconSigners := make(flow.IdentityList, 0, numberSigners) + stakingSigners := make(flow.IdentitySkeletonList, 0, numberSigners) + beaconSigners := make(flow.IdentitySkeletonList, 0, numberSigners) for i, signer := range signers { if bitutils.ReadBit(sigType, i) == 0 { stakingSigners = append(stakingSigners, signer) @@ -281,15 +281,15 @@ func decodeSignerIndices( // Expected Error returns during normal operations: // * signature.InvalidSignerIndicesError if the given index vector `prefixed` does not encode a valid set of signers func DecodeSignerIndicesToIdentities( - canonicalIdentities flow.IdentityList, + canonicalIdentities flow.IdentitySkeletonList, prefixed []byte, -) (flow.IdentityList, error) { +) (flow.IdentitySkeletonList, error) { indices, err := decodeSignerIndices(canonicalIdentities.NodeIDs(), prefixed) if err != nil { return nil, err } - signers := make(flow.IdentityList, 0, len(indices)) + signers := make(flow.IdentitySkeletonList, 0, len(indices)) for _, index := range indices { signers = append(signers, canonicalIdentities[index]) } From 8c6bfc2e831b3bc3db271899ecfe5dc1f87e9de8 Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Tue, 4 Jul 2023 16:38:51 +0300 Subject: [PATCH 08/46] Fixed compilation issues for cluster committee --- consensus/hotstuff/committees/cluster_committee.go | 10 +++++----- consensus/hotstuff/committees/static.go | 8 ++++---- engine/consensus/ingestion/core.go | 4 ++-- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/consensus/hotstuff/committees/cluster_committee.go b/consensus/hotstuff/committees/cluster_committee.go index a7c61034c34..bd12f12b75c 100644 --- a/consensus/hotstuff/committees/cluster_committee.go +++ b/consensus/hotstuff/committees/cluster_committee.go @@ -50,7 +50,7 @@ func NewClusterCommittee( return nil, fmt.Errorf("could not compute leader selection for cluster: %w", err) } - totalWeight := cluster.Members().TotalWeight() + totalWeight := cluster.Members().ToSkeleton().TotalWeight() com := &Cluster{ state: state, payloads: payloads, @@ -131,7 +131,7 @@ func (c *Cluster) IdentityByBlock(blockID flow.Identifier, nodeID flow.Identifie // parameter is the view in the cluster consensus. Since clusters only exist for // one epoch, we don't need to check the view. func (c *Cluster) IdentitiesByEpoch(view uint64) (flow.IdentitySkeletonList, error) { - return c.initialClusterMembers, nil + return c.initialClusterMembers.ToSkeleton(), nil } // IdentityByEpoch returns the node from the initial cluster members for this epoch. @@ -142,11 +142,11 @@ func (c *Cluster) IdentitiesByEpoch(view uint64) (flow.IdentitySkeletonList, err // - model.InvalidSignerError if nodeID was not listed by the Epoch Setup event as an // authorized participant in this cluster func (c *Cluster) IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.IdentitySkeleton, error) { - identity, ok := c.initialClusterMembers.ByNodeID(nodeID) + identity, ok := c.initialClusterMembers.ByNodeID(participantID) if !ok { - return nil, model.NewInvalidSignerErrorf("node %v is not an authorized hotstuff participant", nodeID) + return nil, model.NewInvalidSignerErrorf("node %v is not an authorized hotstuff participant", participantID) } - return identity, nil + return &identity.IdentitySkeleton, nil } func (c *Cluster) LeaderForView(view uint64) (flow.Identifier, error) { diff --git a/consensus/hotstuff/committees/static.go b/consensus/hotstuff/committees/static.go index 2a6615a9fc6..20978105ca5 100644 --- a/consensus/hotstuff/committees/static.go +++ b/consensus/hotstuff/committees/static.go @@ -56,7 +56,7 @@ func (s Static) IdentityByBlock(_ flow.Identifier, participantID flow.Identifier } func (s Static) IdentitiesByEpoch(view uint64) (flow.IdentitySkeletonList, error) { - return s.participants, nil + return s.participants.ToSkeleton(), nil } func (s Static) IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.IdentitySkeleton, error) { @@ -64,7 +64,7 @@ func (s Static) IdentityByEpoch(view uint64, participantID flow.Identifier) (*fl if !ok { return nil, model.NewInvalidSignerErrorf("unknown participant %x", participantID) } - return identity, nil + return &identity.IdentitySkeleton, nil } func (s Static) LeaderForView(_ uint64) (flow.Identifier, error) { @@ -72,11 +72,11 @@ func (s Static) LeaderForView(_ uint64) (flow.Identifier, error) { } func (s Static) QuorumThresholdForView(_ uint64) (uint64, error) { - return WeightThresholdToBuildQC(s.participants.TotalWeight()), nil + return WeightThresholdToBuildQC(s.participants.ToSkeleton().TotalWeight()), nil } func (s Static) TimeoutThresholdForView(_ uint64) (uint64, error) { - return WeightThresholdToTimeout(s.participants.TotalWeight()), nil + return WeightThresholdToTimeout(s.participants.ToSkeleton().TotalWeight()), nil } func (s Static) Self() flow.Identifier { diff --git a/engine/consensus/ingestion/core.go b/engine/consensus/ingestion/core.go index abe7e1ca420..5c7dda18b82 100644 --- a/engine/consensus/ingestion/core.go +++ b/engine/consensus/ingestion/core.go @@ -173,7 +173,7 @@ func (e *Core) validateGuarantors(guarantee *flow.CollectionGuarantee) error { } // ensure the guarantors are from the same cluster - clusterMembers := cluster.Members() + clusterMembers := cluster.Members().ToSkeleton() // find guarantors by signer indices guarantors, err := signature.DecodeSignerIndicesToIdentities(clusterMembers, guarantee.SignerIndices) @@ -187,7 +187,7 @@ func (e *Core) validateGuarantors(guarantee *flow.CollectionGuarantee) error { // determine whether signers reach minimally required stake threshold threshold := committees.WeightThresholdToBuildQC(clusterMembers.TotalWeight()) // compute required stake threshold - totalStake := flow.IdentityList(guarantors).TotalWeight() + totalStake := guarantors.TotalWeight() if totalStake < threshold { return engine.NewInvalidInputErrorf("collection guarantee qc signers have insufficient stake of %d (required=%d)", totalStake, threshold) } From 1cbbe9bcffcb58d54a7084ae01b8c63512627a1b Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Tue, 4 Jul 2023 16:42:13 +0300 Subject: [PATCH 09/46] Updated mocks --- consensus/hotstuff/mocks/dynamic_committee.go | 16 ++++++++-------- consensus/hotstuff/mocks/packer.go | 6 +++--- consensus/hotstuff/mocks/replicas.go | 16 ++++++++-------- consensus/hotstuff/mocks/validator.go | 8 ++++---- consensus/hotstuff/mocks/verifier.go | 12 ++++++------ 5 files changed, 29 insertions(+), 29 deletions(-) diff --git a/consensus/hotstuff/mocks/dynamic_committee.go b/consensus/hotstuff/mocks/dynamic_committee.go index e576c1b847d..580fb350ed2 100644 --- a/consensus/hotstuff/mocks/dynamic_committee.go +++ b/consensus/hotstuff/mocks/dynamic_committee.go @@ -70,16 +70,16 @@ func (_m *DynamicCommittee) IdentitiesByBlock(blockID flow.Identifier) (flow.Ide func (_m *DynamicCommittee) IdentitiesByEpoch(view uint64) (flow.IdentitySkeletonList, error) { ret := _m.Called(view) - var r0 flow.IdentityList + var r0 flow.IdentitySkeletonList var r1 error - if rf, ok := ret.Get(0).(func(uint64) (flow.IdentityList, error)); ok { + if rf, ok := ret.Get(0).(func(uint64) (flow.IdentitySkeletonList, error)); ok { return rf(view) } - if rf, ok := ret.Get(0).(func(uint64) flow.IdentityList); ok { + if rf, ok := ret.Get(0).(func(uint64) flow.IdentitySkeletonList); ok { r0 = rf(view) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(flow.IdentityList) + r0 = ret.Get(0).(flow.IdentitySkeletonList) } } @@ -122,16 +122,16 @@ func (_m *DynamicCommittee) IdentityByBlock(blockID flow.Identifier, participant func (_m *DynamicCommittee) IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.IdentitySkeleton, error) { ret := _m.Called(view, participantID) - var r0 *flow.Identity + var r0 *flow.IdentitySkeleton var r1 error - if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) (*flow.Identity, error)); ok { + if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) (*flow.IdentitySkeleton, error)); ok { return rf(view, participantID) } - if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) *flow.Identity); ok { + if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) *flow.IdentitySkeleton); ok { r0 = rf(view, participantID) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*flow.Identity) + r0 = ret.Get(0).(*flow.IdentitySkeleton) } } diff --git a/consensus/hotstuff/mocks/packer.go b/consensus/hotstuff/mocks/packer.go index 26c26cf501c..8daf1ef76f1 100644 --- a/consensus/hotstuff/mocks/packer.go +++ b/consensus/hotstuff/mocks/packer.go @@ -55,10 +55,10 @@ func (_m *Packer) Unpack(signerIdentities flow.IdentitySkeletonList, sigData []b var r0 *hotstuff.BlockSignatureData var r1 error - if rf, ok := ret.Get(0).(func(flow.IdentityList, []byte) (*hotstuff.BlockSignatureData, error)); ok { + if rf, ok := ret.Get(0).(func(flow.IdentitySkeletonList, []byte) (*hotstuff.BlockSignatureData, error)); ok { return rf(signerIdentities, sigData) } - if rf, ok := ret.Get(0).(func(flow.IdentityList, []byte) *hotstuff.BlockSignatureData); ok { + if rf, ok := ret.Get(0).(func(flow.IdentitySkeletonList, []byte) *hotstuff.BlockSignatureData); ok { r0 = rf(signerIdentities, sigData) } else { if ret.Get(0) != nil { @@ -66,7 +66,7 @@ func (_m *Packer) Unpack(signerIdentities flow.IdentitySkeletonList, sigData []b } } - if rf, ok := ret.Get(1).(func(flow.IdentityList, []byte) error); ok { + if rf, ok := ret.Get(1).(func(flow.IdentitySkeletonList, []byte) error); ok { r1 = rf(signerIdentities, sigData) } else { r1 = ret.Error(1) diff --git a/consensus/hotstuff/mocks/replicas.go b/consensus/hotstuff/mocks/replicas.go index 8e85b9f5a5c..702b8b5c90c 100644 --- a/consensus/hotstuff/mocks/replicas.go +++ b/consensus/hotstuff/mocks/replicas.go @@ -44,16 +44,16 @@ func (_m *Replicas) DKG(view uint64) (hotstuff.DKG, error) { func (_m *Replicas) IdentitiesByEpoch(view uint64) (flow.IdentitySkeletonList, error) { ret := _m.Called(view) - var r0 flow.IdentityList + var r0 flow.IdentitySkeletonList var r1 error - if rf, ok := ret.Get(0).(func(uint64) (flow.IdentityList, error)); ok { + if rf, ok := ret.Get(0).(func(uint64) (flow.IdentitySkeletonList, error)); ok { return rf(view) } - if rf, ok := ret.Get(0).(func(uint64) flow.IdentityList); ok { + if rf, ok := ret.Get(0).(func(uint64) flow.IdentitySkeletonList); ok { r0 = rf(view) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(flow.IdentityList) + r0 = ret.Get(0).(flow.IdentitySkeletonList) } } @@ -70,16 +70,16 @@ func (_m *Replicas) IdentitiesByEpoch(view uint64) (flow.IdentitySkeletonList, e func (_m *Replicas) IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.IdentitySkeleton, error) { ret := _m.Called(view, participantID) - var r0 *flow.Identity + var r0 *flow.IdentitySkeleton var r1 error - if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) (*flow.Identity, error)); ok { + if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) (*flow.IdentitySkeleton, error)); ok { return rf(view, participantID) } - if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) *flow.Identity); ok { + if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) *flow.IdentitySkeleton); ok { r0 = rf(view, participantID) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*flow.Identity) + r0 = ret.Get(0).(*flow.IdentitySkeleton) } } diff --git a/consensus/hotstuff/mocks/validator.go b/consensus/hotstuff/mocks/validator.go index 351cd03511b..d8cbf2fc265 100644 --- a/consensus/hotstuff/mocks/validator.go +++ b/consensus/hotstuff/mocks/validator.go @@ -61,16 +61,16 @@ func (_m *Validator) ValidateTC(tc *flow.TimeoutCertificate) error { func (_m *Validator) ValidateVote(vote *model.Vote) (*flow.IdentitySkeleton, error) { ret := _m.Called(vote) - var r0 *flow.Identity + var r0 *flow.IdentitySkeleton var r1 error - if rf, ok := ret.Get(0).(func(*model.Vote) (*flow.Identity, error)); ok { + if rf, ok := ret.Get(0).(func(*model.Vote) (*flow.IdentitySkeleton, error)); ok { return rf(vote) } - if rf, ok := ret.Get(0).(func(*model.Vote) *flow.Identity); ok { + if rf, ok := ret.Get(0).(func(*model.Vote) *flow.IdentitySkeleton); ok { r0 = rf(vote) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*flow.Identity) + r0 = ret.Get(0).(*flow.IdentitySkeleton) } } diff --git a/consensus/hotstuff/mocks/verifier.go b/consensus/hotstuff/mocks/verifier.go index 3ba02ff54e1..73f40205ec6 100644 --- a/consensus/hotstuff/mocks/verifier.go +++ b/consensus/hotstuff/mocks/verifier.go @@ -14,11 +14,11 @@ type Verifier struct { } // VerifyQC provides a mock function with given fields: signers, sigData, view, blockID -func (_m *Verifier) VerifyQC(signers flow.IdentityList, sigData []byte, view uint64, blockID flow.Identifier) error { +func (_m *Verifier) VerifyQC(signers flow.IdentitySkeletonList, sigData []byte, view uint64, blockID flow.Identifier) error { ret := _m.Called(signers, sigData, view, blockID) var r0 error - if rf, ok := ret.Get(0).(func(flow.IdentityList, []byte, uint64, flow.Identifier) error); ok { + if rf, ok := ret.Get(0).(func(flow.IdentitySkeletonList, []byte, uint64, flow.Identifier) error); ok { r0 = rf(signers, sigData, view, blockID) } else { r0 = ret.Error(0) @@ -28,11 +28,11 @@ func (_m *Verifier) VerifyQC(signers flow.IdentityList, sigData []byte, view uin } // VerifyTC provides a mock function with given fields: signers, sigData, view, highQCViews -func (_m *Verifier) VerifyTC(signers flow.IdentityList, sigData []byte, view uint64, highQCViews []uint64) error { +func (_m *Verifier) VerifyTC(signers flow.IdentitySkeletonList, sigData []byte, view uint64, highQCViews []uint64) error { ret := _m.Called(signers, sigData, view, highQCViews) var r0 error - if rf, ok := ret.Get(0).(func(flow.IdentityList, []byte, uint64, []uint64) error); ok { + if rf, ok := ret.Get(0).(func(flow.IdentitySkeletonList, []byte, uint64, []uint64) error); ok { r0 = rf(signers, sigData, view, highQCViews) } else { r0 = ret.Error(0) @@ -42,11 +42,11 @@ func (_m *Verifier) VerifyTC(signers flow.IdentityList, sigData []byte, view uin } // VerifyVote provides a mock function with given fields: voter, sigData, view, blockID -func (_m *Verifier) VerifyVote(voter *flow.Identity, sigData []byte, view uint64, blockID flow.Identifier) error { +func (_m *Verifier) VerifyVote(voter *flow.IdentitySkeleton, sigData []byte, view uint64, blockID flow.Identifier) error { ret := _m.Called(voter, sigData, view, blockID) var r0 error - if rf, ok := ret.Get(0).(func(*flow.Identity, []byte, uint64, flow.Identifier) error); ok { + if rf, ok := ret.Get(0).(func(*flow.IdentitySkeleton, []byte, uint64, flow.Identifier) error); ok { r0 = rf(voter, sigData, view, blockID) } else { r0 = ret.Error(0) From 19d5adaf99de65b784527e88c81f81c53fc68c26 Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Wed, 5 Jul 2023 21:28:04 +0300 Subject: [PATCH 10/46] Updated hotstuff committee to return skeleton and full identites depending on context. Updated usages. Fixed tests --- .../export_report.json | 4 +- .../committees/cluster_committee_test.go | 10 ++-- .../committees/consensus_committee_test.go | 12 ++--- .../leader/leader_selection_test.go | 52 ++++++++++--------- .../hotstuff/integration/instance_test.go | 16 +++--- .../hotstuff/safetyrules/safety_rules_test.go | 2 +- .../signature/block_signer_decoder_test.go | 6 +-- consensus/hotstuff/signature/packer_test.go | 24 ++++----- .../timeoutcollector/aggregation_test.go | 18 +++---- .../timeout_processor_test.go | 37 ++++++------- .../hotstuff/validator/validator_test.go | 40 +++++++------- .../verification/combined_signer_v2_test.go | 12 ++--- .../verification/combined_signer_v3_test.go | 12 ++--- .../verification/staking_signer_test.go | 10 ++-- .../combined_vote_processor_v2_test.go | 6 +-- .../combined_vote_processor_v3_test.go | 4 +- .../staking_vote_processor_test.go | 7 +-- consensus/integration/epoch_test.go | 4 +- engine/access/rpc/engine.go | 11 ++++ model/flow/identity.go | 10 ++++ model/flow/mapfunc/identity.go | 3 +- module/signature/signer_indices_test.go | 40 ++++++++------ state/protocol/badger/snapshot.go | 2 +- utils/unittest/fixtures.go | 7 +++ 24 files changed, 195 insertions(+), 154 deletions(-) diff --git a/cmd/util/cmd/execution-state-extract/export_report.json b/cmd/util/cmd/execution-state-extract/export_report.json index 5f0dd5f132a..ac9526fe448 100644 --- a/cmd/util/cmd/execution-state-extract/export_report.json +++ b/cmd/util/cmd/execution-state-extract/export_report.json @@ -1,6 +1,6 @@ { "EpochCounter": 0, - "PreviousStateCommitment": "170cbb4dd9c2b90362d09e5c05641de30e1ac6aa938ed4ea866fa592fba16566", - "CurrentStateCommitment": "170cbb4dd9c2b90362d09e5c05641de30e1ac6aa938ed4ea866fa592fba16566", + "PreviousStateCommitment": "c14260392ed0f6d2a46782a6ea6c9d9259263ad750cf5df70c2bce3b84196373", + "CurrentStateCommitment": "c14260392ed0f6d2a46782a6ea6c9d9259263ad750cf5df70c2bce3b84196373", "ReportSucceeded": true } \ No newline at end of file diff --git a/consensus/hotstuff/committees/cluster_committee_test.go b/consensus/hotstuff/committees/cluster_committee_test.go index e6c36aea044..51ccb43b010 100644 --- a/consensus/hotstuff/committees/cluster_committee_test.go +++ b/consensus/hotstuff/committees/cluster_committee_test.go @@ -73,11 +73,11 @@ func (suite *ClusterSuite) SetupTest() { func (suite *ClusterSuite) TestThresholds() { threshold, err := suite.com.QuorumThresholdForView(rand.Uint64()) suite.Require().NoError(err) - suite.Assert().Equal(WeightThresholdToBuildQC(suite.members.TotalWeight()), threshold) + suite.Assert().Equal(WeightThresholdToBuildQC(suite.members.ToSkeleton().TotalWeight()), threshold) threshold, err = suite.com.TimeoutThresholdForView(rand.Uint64()) suite.Require().NoError(err) - suite.Assert().Equal(WeightThresholdToTimeout(suite.members.TotalWeight()), threshold) + suite.Assert().Equal(WeightThresholdToTimeout(suite.members.ToSkeleton().TotalWeight()), threshold) } // TestInvalidSigner tests that the InvalidSignerError sentinel is @@ -159,7 +159,7 @@ func (suite *ClusterSuite) TestInvalidSigner() { suite.Run("by epoch", func() { actual, err := suite.com.IdentityByEpoch(rand.Uint64(), realEjectedClusterMember.NodeID) suite.Assert().NoError(err) - suite.Assert().Equal(realEjectedClusterMember, actual) + suite.Assert().Equal(realEjectedClusterMember.IdentitySkeleton, *actual) }) }) @@ -177,7 +177,7 @@ func (suite *ClusterSuite) TestInvalidSigner() { suite.Run("by epoch", func() { actual, err := suite.com.IdentityByEpoch(rand.Uint64(), realNoWeightClusterMember.NodeID) suite.Require().NoError(err) - suite.Assert().Equal(realNoWeightClusterMember, actual) + suite.Assert().Equal(realNoWeightClusterMember.IdentitySkeleton, *actual) }) }) @@ -195,7 +195,7 @@ func (suite *ClusterSuite) TestInvalidSigner() { suite.Run("by epoch", func() { actual, err := suite.com.IdentityByEpoch(rand.Uint64(), realClusterMember.NodeID) suite.Require().NoError(err) - suite.Assert().Equal(realClusterMember, actual) + suite.Assert().Equal(realClusterMember.IdentitySkeleton, *actual) }) }) } diff --git a/consensus/hotstuff/committees/consensus_committee_test.go b/consensus/hotstuff/committees/consensus_committee_test.go index 61012ee51a9..afcf738f888 100644 --- a/consensus/hotstuff/committees/consensus_committee_test.go +++ b/consensus/hotstuff/committees/consensus_committee_test.go @@ -370,7 +370,7 @@ func (suite *ConsensusSuite) TestIdentitiesByEpoch() { t.Run("should be able to retrieve real identity", func(t *testing.T) { actual, err := suite.committee.IdentityByEpoch(unittest.Uint64InRange(1, 100), realIdentity.NodeID) require.NoError(t, err) - require.Equal(t, realIdentity, actual) + require.Equal(t, realIdentity.IdentitySkeleton, *actual) }) t.Run("should return ErrViewForUnknownEpoch for view outside existing epoch", func(t *testing.T) { @@ -387,7 +387,7 @@ func (suite *ConsensusSuite) TestIdentitiesByEpoch() { t.Run("should be able to retrieve epoch 1 identity in epoch 1", func(t *testing.T) { actual, err := suite.committee.IdentityByEpoch(unittest.Uint64InRange(1, 100), realIdentity.NodeID) require.NoError(t, err) - require.Equal(t, realIdentity, actual) + require.Equal(t, realIdentity.IdentitySkeleton, *actual) }) t.Run("should be unable to retrieve epoch 1 identity in epoch 2", func(t *testing.T) { @@ -405,7 +405,7 @@ func (suite *ConsensusSuite) TestIdentitiesByEpoch() { t.Run("should be able to retrieve epoch 2 identity in epoch 2", func(t *testing.T) { actual, err := suite.committee.IdentityByEpoch(unittest.Uint64InRange(101, 200), epoch2Identity.NodeID) require.NoError(t, err) - require.Equal(t, epoch2Identity, actual) + require.Equal(t, epoch2Identity.IdentitySkeleton, *actual) }) t.Run("should return ErrViewForUnknownEpoch for view outside existing epochs", func(t *testing.T) { @@ -428,8 +428,8 @@ func (suite *ConsensusSuite) TestThresholds() { identities := unittest.IdentityListFixture(10) - prevEpoch := newMockEpoch(suite.currentEpochCounter-1, identities.Map(mapfunc.WithWeight(100)), 1, 100, unittest.SeedFixture(prg.RandomSourceLength), true) - currEpoch := newMockEpoch(suite.currentEpochCounter, identities.Map(mapfunc.WithWeight(200)), 101, 200, unittest.SeedFixture(32), true) + prevEpoch := newMockEpoch(suite.currentEpochCounter-1, identities.Map(mapfunc.WithInitialWeight(100)), 1, 100, unittest.SeedFixture(prg.RandomSourceLength), true) + currEpoch := newMockEpoch(suite.currentEpochCounter, identities.Map(mapfunc.WithInitialWeight(200)), 101, 200, unittest.SeedFixture(32), true) suite.epochs.Add(prevEpoch) suite.epochs.Add(currEpoch) @@ -466,7 +466,7 @@ func (suite *ConsensusSuite) TestThresholds() { }) // now, add a valid next epoch - nextEpoch := newMockEpoch(suite.currentEpochCounter+1, identities.Map(mapfunc.WithWeight(300)), 201, 300, unittest.SeedFixture(prg.RandomSourceLength), true) + nextEpoch := newMockEpoch(suite.currentEpochCounter+1, identities.Map(mapfunc.WithInitialWeight(300)), 201, 300, unittest.SeedFixture(prg.RandomSourceLength), true) suite.CommitEpoch(nextEpoch) t.Run("next epoch ready", func(t *testing.T) { diff --git a/consensus/hotstuff/committees/leader/leader_selection_test.go b/consensus/hotstuff/committees/leader/leader_selection_test.go index ecf13e4aa83..7630002353c 100644 --- a/consensus/hotstuff/committees/leader/leader_selection_test.go +++ b/consensus/hotstuff/committees/leader/leader_selection_test.go @@ -25,7 +25,7 @@ func TestSingleConsensusNode(t *testing.T) { identity := unittest.IdentityFixture(unittest.WithWeight(8)) rng := getPRG(t, someSeed) - selection, err := ComputeLeaderSelection(0, rng, 10, []*flow.Identity{identity}) + selection, err := ComputeLeaderSelection(0, rng, 10, flow.IdentitySkeletonList{&identity.IdentitySkeleton}) require.NoError(t, err) for i := uint64(0); i < 10; i++ { leaderID, err := selection.LeaderForView(i) @@ -126,9 +126,9 @@ func TestDeterministic(t *testing.T) { const N_VIEWS = 100 const N_NODES = 4 - identities := unittest.IdentityListFixture(N_NODES) + identities := unittest.IdentityListFixture(N_NODES).ToSkeleton() for i, identity := range identities { - identity.Weight = uint64(i + 1) + identity.InitialWeight = uint64(i + 1) } rng := getPRG(t, someSeed) @@ -158,16 +158,16 @@ func TestInputValidation(t *testing.T) { // should return an error if we request to compute leader selection for <1 views t.Run("epoch containing no views", func(t *testing.T) { count := 0 - _, err := ComputeLeaderSelection(0, rng, count, unittest.IdentityListFixture(4)) + _, err := ComputeLeaderSelection(0, rng, count, unittest.IdentityListFixture(4).ToSkeleton()) assert.Error(t, err) count = -1 - _, err = ComputeLeaderSelection(0, rng, count, unittest.IdentityListFixture(4)) + _, err = ComputeLeaderSelection(0, rng, count, unittest.IdentityListFixture(4).ToSkeleton()) assert.Error(t, err) }) // epoch with no possible leaders should return an error t.Run("epoch without participants", func(t *testing.T) { - identities := unittest.IdentityListFixture(0) + identities := unittest.IdentityListFixture(0).ToSkeleton() _, err := ComputeLeaderSelection(0, rng, 100, identities) assert.Error(t, err) }) @@ -181,7 +181,7 @@ func TestViewOutOfRange(t *testing.T) { firstView := uint64(100) finalView := uint64(200) - identities := unittest.IdentityListFixture(4) + identities := unittest.IdentityListFixture(4).ToSkeleton() leaders, err := ComputeLeaderSelection(firstView, rng, int(finalView-firstView+1), identities) require.Nil(t, err) @@ -236,10 +236,10 @@ func TestDifferentSeedWillProduceDifferentSelection(t *testing.T) { seed2[0] = 8 rng2 := getPRG(t, seed2) - leaders1, err := ComputeLeaderSelection(0, rng1, N_VIEWS, identities) + leaders1, err := ComputeLeaderSelection(0, rng1, N_VIEWS, identities.ToSkeleton()) require.NoError(t, err) - leaders2, err := ComputeLeaderSelection(0, rng2, N_VIEWS, identities) + leaders2, err := ComputeLeaderSelection(0, rng2, N_VIEWS, identities.ToSkeleton()) require.NoError(t, err) diff := 0 @@ -267,9 +267,9 @@ func TestLeaderSelectionAreWeighted(t *testing.T) { const N_VIEWS = 100000 const N_NODES = 4 - identities := unittest.IdentityListFixture(N_NODES) + identities := unittest.IdentityListFixture(N_NODES).ToSkeleton() for i, identity := range identities { - identity.Weight = uint64(i + 1) + identity.InitialWeight = uint64(i + 1) } leaders, err := ComputeLeaderSelection(0, rng, N_VIEWS, identities) @@ -287,7 +287,7 @@ func TestLeaderSelectionAreWeighted(t *testing.T) { for nodeID, selectedCount := range selected { identity, ok := identities.ByNodeID(nodeID) require.True(t, ok) - target := uint64(N_VIEWS) * identity.Weight / 10 + target := uint64(N_VIEWS) * identity.InitialWeight / 10 var diff uint64 if selectedCount > target { @@ -307,14 +307,15 @@ func BenchmarkLeaderSelection(b *testing.B) { const N_VIEWS = 15000000 const N_NODES = 20 - identities := make([]*flow.Identity, 0, N_NODES) + identities := make(flow.IdentityList, 0, N_NODES) for i := 0; i < N_NODES; i++ { identities = append(identities, unittest.IdentityFixture(unittest.WithWeight(uint64(i)))) } + skeletonIdentities := identities.ToSkeleton() rng := getPRG(b, someSeed) for n := 0; n < b.N; n++ { - _, err := ComputeLeaderSelection(0, rng, N_VIEWS, identities) + _, err := ComputeLeaderSelection(0, rng, N_VIEWS, skeletonIdentities) require.NoError(b, err) } @@ -322,8 +323,8 @@ func BenchmarkLeaderSelection(b *testing.B) { func TestInvalidTotalWeight(t *testing.T) { rng := getPRG(t, someSeed) - identities := unittest.IdentityListFixture(4, unittest.WithWeight(0)) - _, err := ComputeLeaderSelection(0, rng, 10, identities) + identities := unittest.IdentityListFixture(4, unittest.WithInitialWeight(0)) + _, err := ComputeLeaderSelection(0, rng, 10, identities.ToSkeleton()) require.Error(t, err) } @@ -338,10 +339,10 @@ func TestZeroWeightNodeWillNotBeSelected(t *testing.T) { t.Run("small dataset", func(t *testing.T) { const N_VIEWS = 100 - weightless := unittest.IdentityListFixture(5, unittest.WithWeight(0)) - weightful := unittest.IdentityListFixture(5) + weightless := unittest.IdentityListFixture(5, unittest.WithInitialWeight(0)).ToSkeleton() + weightful := unittest.IdentityListFixture(5).ToSkeleton() for i, identity := range weightful { - identity.Weight = uint64(i + 1) + identity.InitialWeight = uint64(i + 1) } identities := append(weightless, weightful...) @@ -368,17 +369,18 @@ func TestZeroWeightNodeWillNotBeSelected(t *testing.T) { toolRng := getPRG(t, someSeed) // create 1002 nodes with all 0 weight - identities := unittest.IdentityListFixture(1002, unittest.WithWeight(0)) + fullIdentities := unittest.IdentityListFixture(1002, unittest.WithInitialWeight(0)) // create 2 nodes with 1 weight, and place them in between // index 233-777 n := toolRng.UintN(777-233) + 233 m := toolRng.UintN(777-233) + 233 - identities[n].Weight = 1 - identities[m].Weight = 1 + fullIdentities[n].InitialWeight = 1 + fullIdentities[m].InitialWeight = 1 // the following code check the zero weight node should not be selected - weightful := identities.Filter(filter.HasWeight(true)) + weightful := fullIdentities.Filter(filter.HasWeight(true)).ToSkeleton() + identities := fullIdentities.ToSkeleton() count := 1000 selectionFromAll, err := ComputeLeaderSelection(0, rng, count, identities) @@ -401,11 +403,11 @@ func TestZeroWeightNodeWillNotBeSelected(t *testing.T) { t.Run("if there is only 1 node has weight, then it will be always be the leader and the only leader", func(t *testing.T) { toolRng := getPRG(t, someSeed) - identities := unittest.IdentityListFixture(1000, unittest.WithWeight(0)) + identities := unittest.IdentityListFixture(1000, unittest.WithInitialWeight(0)).ToSkeleton() n := rng.UintN(1000) weight := n + 1 - identities[n].Weight = weight + identities[n].InitialWeight = weight onlyNodeWithWeight := identities[n] selections, err := ComputeLeaderSelection(0, toolRng, 1000, identities) diff --git a/consensus/hotstuff/integration/instance_test.go b/consensus/hotstuff/integration/instance_test.go index bf12244c099..8728b1b994e 100644 --- a/consensus/hotstuff/integration/instance_test.go +++ b/consensus/hotstuff/integration/instance_test.go @@ -170,14 +170,14 @@ func NewInstance(t *testing.T, options ...Option) *Instance { // program the hotstuff committee state in.committee.On("IdentitiesByEpoch", mock.Anything).Return( - func(_ uint64) flow.IdentityList { - return in.participants + func(_ uint64) flow.IdentitySkeletonList { + return in.participants.ToSkeleton() }, nil, ) for _, participant := range in.participants { in.committee.On("IdentityByBlock", mock.Anything, participant.NodeID).Return(participant, nil) - in.committee.On("IdentityByEpoch", mock.Anything, participant.NodeID).Return(participant, nil) + in.committee.On("IdentityByEpoch", mock.Anything, participant.NodeID).Return(&participant.IdentitySkeleton, nil) } in.committee.On("Self").Return(in.localID) in.committee.On("LeaderForView", mock.Anything).Return( @@ -185,8 +185,8 @@ func NewInstance(t *testing.T, options ...Option) *Instance { return in.participants[int(view)%len(in.participants)].NodeID }, nil, ) - in.committee.On("QuorumThresholdForView", mock.Anything).Return(committees.WeightThresholdToBuildQC(in.participants.TotalWeight()), nil) - in.committee.On("TimeoutThresholdForView", mock.Anything).Return(committees.WeightThresholdToTimeout(in.participants.TotalWeight()), nil) + in.committee.On("QuorumThresholdForView", mock.Anything).Return(committees.WeightThresholdToBuildQC(in.participants.ToSkeleton().TotalWeight()), nil) + in.committee.On("TimeoutThresholdForView", mock.Anything).Return(committees.WeightThresholdToTimeout(in.participants.ToSkeleton().TotalWeight()), nil) // program the builder module behaviour in.builder.On("BuildOn", mock.Anything, mock.Anything).Return( @@ -413,14 +413,14 @@ func NewInstance(t *testing.T, options ...Option) *Instance { in.queue <- qc } - minRequiredWeight := committees.WeightThresholdToBuildQC(uint64(in.participants.Count()) * weight) + minRequiredWeight := committees.WeightThresholdToBuildQC(uint64(len(in.participants)) * weight) voteProcessorFactory := mocks.NewVoteProcessorFactory(t) voteProcessorFactory.On("Create", mock.Anything, mock.Anything).Return( func(log zerolog.Logger, proposal *model.Proposal) hotstuff.VerifyingVoteProcessor { stakingSigAggtor := helper.MakeWeightedSignatureAggregator(weight) stakingSigAggtor.On("Verify", mock.Anything, mock.Anything).Return(nil).Maybe() - rbRector := helper.MakeRandomBeaconReconstructor(msig.RandomBeaconThreshold(int(in.participants.Count()))) + rbRector := helper.MakeRandomBeaconReconstructor(msig.RandomBeaconThreshold(len(in.participants))) rbRector.On("Verify", mock.Anything, mock.Anything).Return(nil).Maybe() return votecollector.NewCombinedVoteProcessor( @@ -468,7 +468,7 @@ func NewInstance(t *testing.T, options ...Option) *Instance { newestView.Set(newestQCView) identity, ok := in.participants.ByNodeID(signerID) require.True(t, ok) - return totalWeight.Add(identity.Weight) + return totalWeight.Add(identity.InitialWeight) }, nil, ).Maybe() aggregator.On("Aggregate", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return( diff --git a/consensus/hotstuff/safetyrules/safety_rules_test.go b/consensus/hotstuff/safetyrules/safety_rules_test.go index 2c2d9cc201a..6309394e880 100644 --- a/consensus/hotstuff/safetyrules/safety_rules_test.go +++ b/consensus/hotstuff/safetyrules/safety_rules_test.go @@ -61,7 +61,7 @@ func (s *SafetyRulesTestSuite) SetupTest() { s.committee.On("Self").Return(s.ourIdentity.NodeID).Maybe() s.committee.On("IdentityByBlock", mock.Anything, s.ourIdentity.NodeID).Return(s.ourIdentity, nil).Maybe() s.committee.On("IdentityByBlock", s.proposal.Block.BlockID, s.proposal.Block.ProposerID).Return(s.proposerIdentity, nil).Maybe() - s.committee.On("IdentityByEpoch", mock.Anything, s.ourIdentity.NodeID).Return(s.ourIdentity, nil).Maybe() + s.committee.On("IdentityByEpoch", mock.Anything, s.ourIdentity.NodeID).Return(&s.ourIdentity.IdentitySkeleton, nil).Maybe() s.safetyData = &hotstuff.SafetyData{ LockedOneChainView: s.bootstrapBlock.View, diff --git a/consensus/hotstuff/signature/block_signer_decoder_test.go b/consensus/hotstuff/signature/block_signer_decoder_test.go index 78efb3005eb..30d0ecf9bdd 100644 --- a/consensus/hotstuff/signature/block_signer_decoder_test.go +++ b/consensus/hotstuff/signature/block_signer_decoder_test.go @@ -36,7 +36,7 @@ func (s *blockSignerDecoderSuite) SetupTest() { // mock consensus committee s.committee = hotstuff.NewDynamicCommittee(s.T()) - s.committee.On("IdentitiesByEpoch", mock.Anything).Return(s.allConsensus, nil).Maybe() + s.committee.On("IdentitiesByEpoch", mock.Anything).Return(s.allConsensus.ToSkeleton(), nil).Maybe() // prepare valid test block: voterIndices, err := signature.EncodeSignersToIndices(s.allConsensus.NodeIDs(), s.allConsensus.NodeIDs()) @@ -139,8 +139,8 @@ func (s *blockSignerDecoderSuite) Test_EpochTransition() { // PARENT <- | -- B blockView := s.block.Header.View parentView := s.block.Header.ParentView - epoch1Committee := s.allConsensus - epoch2Committee := s.allConsensus.SamplePct(.8) + epoch1Committee := s.allConsensus.ToSkeleton() + epoch2Committee := s.allConsensus.SamplePct(.8).ToSkeleton() *s.committee = *hotstuff.NewDynamicCommittee(s.T()) s.committee.On("IdentitiesByEpoch", parentView).Return(epoch1Committee, nil).Maybe() diff --git a/consensus/hotstuff/signature/packer_test.go b/consensus/hotstuff/signature/packer_test.go index 862534d6eda..042495eb6d0 100644 --- a/consensus/hotstuff/signature/packer_test.go +++ b/consensus/hotstuff/signature/packer_test.go @@ -16,11 +16,11 @@ import ( "github.com/onflow/flow-go/utils/unittest" ) -func newPacker(identities flow.IdentityList) *ConsensusSigDataPacker { +func newPacker(identities flow.IdentitySkeletonList) *ConsensusSigDataPacker { // mock consensus committee committee := &mocks.DynamicCommittee{} committee.On("IdentitiesByEpoch", mock.Anything).Return( - func(_ uint64) flow.IdentityList { + func(_ uint64) flow.IdentitySkeletonList { return identities }, nil, @@ -29,7 +29,7 @@ func newPacker(identities flow.IdentityList) *ConsensusSigDataPacker { return NewConsensusSigDataPacker(committee) } -func makeBlockSigData(committee flow.IdentityList) *hotstuff.BlockSignatureData { +func makeBlockSigData(committee flow.IdentitySkeletonList) *hotstuff.BlockSignatureData { blockSigData := &hotstuff.BlockSignatureData{ StakingSigners: []flow.Identifier{ committee[0].NodeID, // A @@ -54,7 +54,7 @@ func makeBlockSigData(committee flow.IdentityList) *hotstuff.BlockSignatureData // aggregated random beacon sigs are from [D,F] func TestPackUnpack(t *testing.T) { // prepare data for testing - committee := unittest.IdentityListFixture(6, unittest.WithRole(flow.RoleConsensus)) + committee := unittest.IdentityListFixture(6, unittest.WithRole(flow.RoleConsensus)).ToSkeleton() view := rand.Uint64() blockSigData := makeBlockSigData(committee) @@ -100,9 +100,9 @@ func TestPackUnpack_EmptySigners(t *testing.T) { require.NoError(t, err) // create packer with a non-empty committee (honest node trying to decode the sig data) - committee := unittest.IdentityListFixture(6, unittest.WithRole(flow.RoleConsensus)) + committee := unittest.IdentityListFixture(6, unittest.WithRole(flow.RoleConsensus)).ToSkeleton() packer := newPacker(committee) - unpacked, err := packer.Unpack(make([]*flow.Identity, 0), sig) + unpacked, err := packer.Unpack(make(flow.IdentitySkeletonList, 0), sig) require.NoError(t, err) // check that the unpack data match with the original data @@ -117,7 +117,7 @@ func TestPackUnpack_EmptySigners(t *testing.T) { // it's able to pack and unpack func TestPackUnpackManyNodes(t *testing.T) { // prepare data for testing - committee := unittest.IdentityListFixture(200, unittest.WithRole(flow.RoleConsensus)) + committee := unittest.IdentityListFixture(200, unittest.WithRole(flow.RoleConsensus)).ToSkeleton() view := rand.Uint64() blockSigData := makeBlockSigData(committee) stakingSigners := make([]flow.Identifier, 0) @@ -161,7 +161,7 @@ func TestPackUnpackManyNodes(t *testing.T) { // if the sig data can not be decoded, return model.InvalidFormatError func TestFailToDecode(t *testing.T) { // prepare data for testing - committee := unittest.IdentityListFixture(6, unittest.WithRole(flow.RoleConsensus)) + committee := unittest.IdentityListFixture(6, unittest.WithRole(flow.RoleConsensus)).ToSkeleton() view := rand.Uint64() blockSigData := makeBlockSigData(committee) @@ -184,7 +184,7 @@ func TestFailToDecode(t *testing.T) { // if the signer IDs doesn't match, return InvalidFormatError func TestMismatchSignerIDs(t *testing.T) { // prepare data for testing - committee := unittest.IdentityListFixture(9, unittest.WithRole(flow.RoleConsensus)) + committee := unittest.IdentityListFixture(9, unittest.WithRole(flow.RoleConsensus)).ToSkeleton() view := rand.Uint64() blockSigData := makeBlockSigData(committee[:6]) @@ -216,7 +216,7 @@ func TestMismatchSignerIDs(t *testing.T) { // if sig type doesn't match, return InvalidFormatError func TestInvalidSigType(t *testing.T) { // prepare data for testing - committee := unittest.IdentityListFixture(6, unittest.WithRole(flow.RoleConsensus)) + committee := unittest.IdentityListFixture(6, unittest.WithRole(flow.RoleConsensus)).ToSkeleton() view := rand.Uint64() blockSigData := makeBlockSigData(committee) @@ -250,7 +250,7 @@ func TestInvalidSigType(t *testing.T) { // no random beacon signers func TestPackUnpackWithoutRBAggregatedSig(t *testing.T) { // prepare data for testing - committee := unittest.IdentityListFixture(3, unittest.WithRole(flow.RoleConsensus)) + committee := unittest.IdentityListFixture(3, unittest.WithRole(flow.RoleConsensus)).ToSkeleton() view := rand.Uint64() blockSigData := &hotstuff.BlockSignatureData{ @@ -292,7 +292,7 @@ func TestPackUnpackWithoutRBAggregatedSig(t *testing.T) { // with different structure format, more specifically there is no difference between // nil and empty slices for RandomBeaconSigners and AggregatedRandomBeaconSig. func TestPackWithoutRBAggregatedSig(t *testing.T) { - identities := unittest.IdentityListFixture(3, unittest.WithRole(flow.RoleConsensus)) + identities := unittest.IdentityListFixture(3, unittest.WithRole(flow.RoleConsensus)).ToSkeleton() committee := identities.NodeIDs() // prepare data for testing diff --git a/consensus/hotstuff/timeoutcollector/aggregation_test.go b/consensus/hotstuff/timeoutcollector/aggregation_test.go index 8adc1cacccc..b6f35026294 100644 --- a/consensus/hotstuff/timeoutcollector/aggregation_test.go +++ b/consensus/hotstuff/timeoutcollector/aggregation_test.go @@ -21,7 +21,7 @@ import ( // createAggregationData is a helper which creates fixture data for testing func createAggregationData(t *testing.T, signersNumber int) ( *TimeoutSignatureAggregator, - flow.IdentityList, + flow.IdentitySkeletonList, []crypto.PublicKey, []crypto.Signature, []hotstuff.TimeoutSignerInfo, @@ -37,14 +37,14 @@ func createAggregationData(t *testing.T, signersNumber int) ( hashers := make([]hash.Hasher, 0, signersNumber) // create keys, identities and signatures - ids := make([]*flow.Identity, 0, signersNumber) + ids := make(flow.IdentitySkeletonList, 0, signersNumber) pks := make([]crypto.PublicKey, 0, signersNumber) view := 10 + uint64(rand.Uint32()) for i := 0; i < signersNumber; i++ { sk := unittest.PrivateKeyFixture(crypto.BLSBLS12381, crypto.KeyGenSeedMinLen) identity := unittest.IdentityFixture(unittest.WithStakingPubKey(sk.PublicKey())) // id - ids = append(ids, identity) + ids = append(ids, &identity.IdentitySkeleton) // keys newestQCView := uint64(rand.Intn(int(view))) msg := verification.MakeTimeoutMessage(view, newestQCView) @@ -74,10 +74,10 @@ func TestNewTimeoutSignatureAggregator(t *testing.T) { sk := unittest.PrivateKeyFixture(crypto.ECDSAP256, crypto.KeyGenSeedMinLen) signer := unittest.IdentityFixture(unittest.WithStakingPubKey(sk.PublicKey())) // wrong key type - _, err := NewTimeoutSignatureAggregator(0, flow.IdentityList{signer}, tag) + _, err := NewTimeoutSignatureAggregator(0, flow.IdentitySkeletonList{&signer.IdentitySkeleton}, tag) require.Error(t, err) // empty signers - _, err = NewTimeoutSignatureAggregator(0, flow.IdentityList{}, tag) + _, err = NewTimeoutSignatureAggregator(0, flow.IdentitySkeletonList{}, tag) require.Error(t, err) } @@ -102,7 +102,7 @@ func TestTimeoutSignatureAggregator_HappyPath(t *testing.T) { // ignore weight as comparing against expected weight is not thread safe require.NoError(t, err) }(i, sig) - expectedWeight += ids[i+subSet].Weight + expectedWeight += ids[i+subSet].InitialWeight } wg.Wait() @@ -118,7 +118,7 @@ func TestTimeoutSignatureAggregator_HappyPath(t *testing.T) { for i, sig := range sigs[:subSet] { weight, err := aggregator.VerifyAndAdd(ids[i].NodeID, sig, signersData[i].NewestQCView) require.NoError(t, err) - expectedWeight += ids[i].Weight + expectedWeight += ids[i].InitialWeight require.Equal(t, expectedWeight, weight) // test TotalWeight require.Equal(t, expectedWeight, aggregator.TotalWeight()) @@ -154,7 +154,7 @@ func TestTimeoutSignatureAggregator_VerifyAndAdd(t *testing.T) { // add signatures for i, sig := range sigs { weight, err := aggregator.VerifyAndAdd(ids[i].NodeID, sig, signersInfo[i].NewestQCView) - expectedWeight += ids[i].Weight + expectedWeight += ids[i].InitialWeight require.Equal(t, expectedWeight, weight) require.NoError(t, err) } @@ -205,7 +205,7 @@ func TestTimeoutSignatureAggregator_Aggregate(t *testing.T) { for i, sig := range sigs { weight, err := aggregator.VerifyAndAdd(ids[i].NodeID, sig, signersInfo[i].NewestQCView) if err == nil { - expectedWeight += ids[i].Weight + expectedWeight += ids[i].InitialWeight } require.Equal(t, expectedWeight, weight) } diff --git a/consensus/hotstuff/timeoutcollector/timeout_processor_test.go b/consensus/hotstuff/timeoutcollector/timeout_processor_test.go index 3ca024cd84c..7d79a1b868a 100644 --- a/consensus/hotstuff/timeoutcollector/timeout_processor_test.go +++ b/consensus/hotstuff/timeoutcollector/timeout_processor_test.go @@ -36,8 +36,8 @@ func TestTimeoutProcessor(t *testing.T) { type TimeoutProcessorTestSuite struct { suite.Suite - participants flow.IdentityList - signer *flow.Identity + participants flow.IdentitySkeletonList + signer *flow.IdentitySkeleton view uint64 sigWeight uint64 totalWeight atomic.Uint64 @@ -55,7 +55,7 @@ func (s *TimeoutProcessorTestSuite) SetupTest() { s.validator = mocks.NewValidator(s.T()) s.sigAggregator = mocks.NewTimeoutSignatureAggregator(s.T()) s.notifier = mocks.NewTimeoutCollectorConsumer(s.T()) - s.participants = unittest.IdentityListFixture(11, unittest.WithWeight(s.sigWeight)).Sort(order.Canonical) + s.participants = unittest.IdentityListFixture(11, unittest.WithWeight(s.sigWeight)).Sort(order.Canonical).ToSkeleton() s.signer = s.participants[0] s.view = (uint64)(rand.Uint32() + 100) s.totalWeight = *atomic.NewUint64(0) @@ -462,6 +462,7 @@ func TestTimeoutProcessor_BuildVerifyTC(t *testing.T) { // signers hold objects that are created with private key and can sign votes and proposals signers := make(map[flow.Identifier]*verification.StakingSigner) // prepare staking signers, each signer has its own private/public key pair + // identities must be in canonical order stakingSigners := unittest.IdentityListFixture(11, func(identity *flow.Identity) { stakingPriv := unittest.StakingPrivKeyFixture() identity.StakingPubKey = stakingPriv.PublicKey() @@ -470,12 +471,10 @@ func TestTimeoutProcessor_BuildVerifyTC(t *testing.T) { require.NoError(t, err) signers[identity.NodeID] = verification.NewStakingSigner(me) - }) - // identities must be in canonical order - stakingSigners = stakingSigners.Sort(order.Canonical) + }).Sort(order.Canonical) // utility function which generates a valid timeout for every signer - createTimeouts := func(participants flow.IdentityList, view uint64, newestQC *flow.QuorumCertificate, lastViewTC *flow.TimeoutCertificate) []*model.TimeoutObject { + createTimeouts := func(participants flow.IdentitySkeletonList, view uint64, newestQC *flow.QuorumCertificate, lastViewTC *flow.TimeoutCertificate) []*model.TimeoutObject { timeouts := make([]*model.TimeoutObject, 0, len(participants)) for _, signer := range participants { timeout, err := signers[signer.NodeID].CreateTimeout(view, newestQC, lastViewTC) @@ -491,20 +490,22 @@ func TestTimeoutProcessor_BuildVerifyTC(t *testing.T) { block := helper.MakeBlock(helper.WithBlockView(view-1), helper.WithBlockProposer(leader.NodeID)) + stakingSignersSkeleton := stakingSigners.ToSkeleton() + committee := mocks.NewDynamicCommittee(t) - committee.On("IdentitiesByEpoch", mock.Anything).Return(stakingSigners, nil) + committee.On("IdentitiesByEpoch", mock.Anything).Return(stakingSignersSkeleton, nil) committee.On("IdentitiesByBlock", mock.Anything).Return(stakingSigners, nil) - committee.On("QuorumThresholdForView", mock.Anything).Return(committees.WeightThresholdToBuildQC(stakingSigners.TotalWeight()), nil) - committee.On("TimeoutThresholdForView", mock.Anything).Return(committees.WeightThresholdToTimeout(stakingSigners.TotalWeight()), nil) + committee.On("QuorumThresholdForView", mock.Anything).Return(committees.WeightThresholdToBuildQC(stakingSignersSkeleton.TotalWeight()), nil) + committee.On("TimeoutThresholdForView", mock.Anything).Return(committees.WeightThresholdToTimeout(stakingSignersSkeleton.TotalWeight()), nil) // create first QC for view N-1, this will be our olderQC - olderQC := createRealQC(t, committee, stakingSigners, signers, block) + olderQC := createRealQC(t, committee, stakingSignersSkeleton, signers, block) // now create a second QC for view N, this will be our newest QC nextBlock := helper.MakeBlock( helper.WithBlockView(view), helper.WithBlockProposer(leader.NodeID), helper.WithBlockQC(olderQC)) - newestQC := createRealQC(t, committee, stakingSigners, signers, nextBlock) + newestQC := createRealQC(t, committee, stakingSignersSkeleton, signers, nextBlock) // At this point we have created two QCs for round N-1 and N. // Next step is create a TC for view N. @@ -523,7 +524,7 @@ func TestTimeoutProcessor_BuildVerifyTC(t *testing.T) { lastViewTC = tc } - aggregator, err := NewTimeoutSignatureAggregator(view, stakingSigners, msig.CollectorTimeoutTag) + aggregator, err := NewTimeoutSignatureAggregator(view, stakingSignersSkeleton, msig.CollectorTimeoutTag) require.NoError(t, err) notifier := mocks.NewTimeoutCollectorConsumer(t) @@ -533,7 +534,7 @@ func TestTimeoutProcessor_BuildVerifyTC(t *testing.T) { require.NoError(t, err) // last view was successful, no lastViewTC in this case - timeouts := createTimeouts(stakingSigners, view, olderQC, nil) + timeouts := createTimeouts(stakingSignersSkeleton, view, olderQC, nil) for _, timeout := range timeouts { err := processor.Process(timeout) require.NoError(t, err) @@ -544,7 +545,7 @@ func TestTimeoutProcessor_BuildVerifyTC(t *testing.T) { // at this point we have created QCs for view N-1 and N additionally a TC for view N, we can create TC for view N+1 // with timeout objects containing both QC and TC for view N - aggregator, err = NewTimeoutSignatureAggregator(view+1, stakingSigners, msig.CollectorTimeoutTag) + aggregator, err = NewTimeoutSignatureAggregator(view+1, stakingSignersSkeleton, msig.CollectorTimeoutTag) require.NoError(t, err) notifier = mocks.NewTimeoutCollectorConsumer(t) @@ -555,8 +556,8 @@ func TestTimeoutProcessor_BuildVerifyTC(t *testing.T) { // part of committee will use QC, another part TC, this will result in aggregated signature consisting // of two types of messages with views N-1 and N representing the newest QC known to replicas. - timeoutsWithQC := createTimeouts(stakingSigners[:len(stakingSigners)/2], view+1, newestQC, nil) - timeoutsWithTC := createTimeouts(stakingSigners[len(stakingSigners)/2:], view+1, olderQC, lastViewTC) + timeoutsWithQC := createTimeouts(stakingSignersSkeleton[:len(stakingSignersSkeleton)/2], view+1, newestQC, nil) + timeoutsWithTC := createTimeouts(stakingSignersSkeleton[len(stakingSignersSkeleton)/2:], view+1, olderQC, lastViewTC) timeouts = append(timeoutsWithQC, timeoutsWithTC...) for _, timeout := range timeouts { err := processor.Process(timeout) @@ -570,7 +571,7 @@ func TestTimeoutProcessor_BuildVerifyTC(t *testing.T) { func createRealQC( t *testing.T, committee hotstuff.DynamicCommittee, - signers flow.IdentityList, + signers flow.IdentitySkeletonList, signerObjects map[flow.Identifier]*verification.StakingSigner, block *model.Block, ) *flow.QuorumCertificate { diff --git a/consensus/hotstuff/validator/validator_test.go b/consensus/hotstuff/validator/validator_test.go index 6eb3da069ce..43ec36d5f19 100644 --- a/consensus/hotstuff/validator/validator_test.go +++ b/consensus/hotstuff/validator/validator_test.go @@ -30,14 +30,14 @@ type ProposalSuite struct { suite.Suite participants flow.IdentityList indices []byte - leader *flow.Identity + leader *flow.IdentitySkeleton finalized uint64 parent *model.Block block *model.Block - voters flow.IdentityList + voters flow.IdentitySkeletonList proposal *model.Proposal vote *model.Vote - voter *flow.Identity + voter *flow.IdentitySkeleton committee *mocks.Replicas verifier *mocks.Verifier validator *Validator @@ -47,7 +47,7 @@ func (ps *ProposalSuite) SetupTest() { // the leader is a random node for now ps.finalized = uint64(rand.Uint32() + 1) ps.participants = unittest.IdentityListFixture(8, unittest.WithRole(flow.RoleConsensus)) - ps.leader = ps.participants[0] + ps.leader = &ps.participants[0].IdentitySkeleton // the parent is the last finalized block, followed directly by a block from the leader ps.parent = helper.MakeBlock( @@ -69,7 +69,7 @@ func (ps *ProposalSuite) SetupTest() { voterIDs, err := signature.DecodeSignerIndicesToIdentifiers(ps.participants.NodeIDs(), ps.block.QC.SignerIndices) require.NoError(ps.T(), err) - ps.voters = ps.participants.Filter(filter.HasNodeID(voterIDs...)) + ps.voters = ps.participants.Filter(filter.HasNodeID(voterIDs...)).ToSkeleton() ps.proposal = &model.Proposal{Block: ps.block} ps.vote = ps.proposal.ProposerVote() ps.voter = ps.leader @@ -77,15 +77,15 @@ func (ps *ProposalSuite) SetupTest() { // set up the mocked hotstuff Replicas state ps.committee = &mocks.Replicas{} ps.committee.On("LeaderForView", ps.block.View).Return(ps.leader.NodeID, nil) - ps.committee.On("QuorumThresholdForView", mock.Anything).Return(committees.WeightThresholdToBuildQC(ps.participants.TotalWeight()), nil) + ps.committee.On("QuorumThresholdForView", mock.Anything).Return(committees.WeightThresholdToBuildQC(ps.participants.ToSkeleton().TotalWeight()), nil) ps.committee.On("IdentitiesByEpoch", mock.Anything).Return( - func(_ uint64) flow.IdentityList { - return ps.participants + func(_ uint64) flow.IdentitySkeletonList { + return ps.participants.ToSkeleton() }, nil, ) for _, participant := range ps.participants { - ps.committee.On("IdentityByEpoch", mock.Anything, participant.NodeID).Return(participant, nil) + ps.committee.On("IdentityByEpoch", mock.Anything, participant.NodeID).Return(&participant.IdentitySkeleton, nil) } // set up the mocked verifier @@ -152,7 +152,7 @@ func (ps *ProposalSuite) TestProposalWrongLeader() { // change the hotstuff.Replicas to return a different leader *ps.committee = mocks.Replicas{} ps.committee.On("LeaderForView", ps.block.View).Return(ps.participants[1].NodeID, nil) - for _, participant := range ps.participants { + for _, participant := range ps.participants.ToSkeleton() { ps.committee.On("IdentityByEpoch", mock.Anything, participant.NodeID).Return(participant, nil) } @@ -465,7 +465,7 @@ func TestValidateVote(t *testing.T) { type VoteSuite struct { suite.Suite - signer *flow.Identity + signer *flow.IdentitySkeleton block *model.Block vote *model.Vote verifier *mocks.Verifier @@ -476,7 +476,7 @@ type VoteSuite struct { func (vs *VoteSuite) SetupTest() { // create a random signing identity - vs.signer = unittest.IdentityFixture(unittest.WithRole(flow.RoleConsensus)) + vs.signer = &unittest.IdentityFixture(unittest.WithRole(flow.RoleConsensus)).IdentitySkeleton // create a block that should be signed vs.block = helper.MakeBlock() @@ -570,8 +570,8 @@ func TestValidateQC(t *testing.T) { type QCSuite struct { suite.Suite - participants flow.IdentityList - signers flow.IdentityList + participants flow.IdentitySkeletonList + signers flow.IdentitySkeletonList block *model.Block qc *flow.QuorumCertificate committee *mocks.Replicas @@ -584,7 +584,7 @@ func (qs *QCSuite) SetupTest() { qs.participants = unittest.IdentityListFixture(10, unittest.WithRole(flow.RoleConsensus), unittest.WithWeight(1), - ) + ).ToSkeleton() // signers are a qualified majority at 7 qs.signers = qs.participants[:7] @@ -599,7 +599,7 @@ func (qs *QCSuite) SetupTest() { // return the correct participants and identities from view state qs.committee = &mocks.Replicas{} qs.committee.On("IdentitiesByEpoch", mock.Anything).Return( - func(_ uint64) flow.IdentityList { + func(_ uint64) flow.IdentitySkeletonList { return qs.participants }, nil, @@ -726,8 +726,8 @@ func TestValidateTC(t *testing.T) { type TCSuite struct { suite.Suite - participants flow.IdentityList - signers flow.IdentityList + participants flow.IdentitySkeletonList + signers flow.IdentitySkeletonList indices []byte block *model.Block tc *flow.TimeoutCertificate @@ -742,7 +742,7 @@ func (s *TCSuite) SetupTest() { s.participants = unittest.IdentityListFixture(10, unittest.WithRole(flow.RoleConsensus), unittest.WithWeight(1), - ) + ).ToSkeleton() // signers are a qualified majority at 7 s.signers = s.participants[:7] @@ -775,7 +775,7 @@ func (s *TCSuite) SetupTest() { // return the correct participants and identities from view state s.committee = &mocks.DynamicCommittee{} s.committee.On("IdentitiesByEpoch", mock.Anything, mock.Anything).Return( - func(view uint64) flow.IdentityList { + func(view uint64) flow.IdentitySkeletonList { return s.participants }, nil, diff --git a/consensus/hotstuff/verification/combined_signer_v2_test.go b/consensus/hotstuff/verification/combined_signer_v2_test.go index f5fbac9bb0d..aba2084b7cd 100644 --- a/consensus/hotstuff/verification/combined_signer_v2_test.go +++ b/consensus/hotstuff/verification/combined_signer_v2_test.go @@ -40,11 +40,11 @@ func TestCombinedSignWithBeaconKey(t *testing.T) { beaconKeyStore.On("ByView", view).Return(beaconKey, nil) stakingPriv := unittest.StakingPrivKeyFixture() - nodeID := unittest.IdentityFixture() + nodeID := &unittest.IdentityFixture().IdentitySkeleton nodeID.NodeID = signerID nodeID.StakingPubKey = stakingPriv.PublicKey() - me, err := local.New(nodeID.IdentitySkeleton, stakingPriv) + me, err := local.New(*nodeID, stakingPriv) require.NoError(t, err) signer := NewCombinedSigner(me, beaconKeyStore) @@ -96,7 +96,7 @@ func TestCombinedSignWithBeaconKey(t *testing.T) { require.ErrorIs(t, err, model.ErrInvalidSignature) // vote by different signer should be invalid - wrongVoter := identities[1] + wrongVoter := &identities[1].IdentitySkeleton wrongVoter.StakingPubKey = unittest.StakingPrivKeyFixture().PublicKey() err = verifier.VerifyVote(wrongVoter, vote.SigData, block.View, block.BlockID) require.ErrorIs(t, err, model.ErrInvalidSignature) @@ -133,11 +133,11 @@ func TestCombinedSignWithNoBeaconKey(t *testing.T) { beaconKeyStore.On("ByView", view).Return(nil, module.ErrNoBeaconKeyForEpoch) stakingPriv := unittest.StakingPrivKeyFixture() - nodeID := unittest.IdentityFixture() + nodeID := &unittest.IdentityFixture().IdentitySkeleton nodeID.NodeID = signerID nodeID.StakingPubKey = stakingPriv.PublicKey() - me, err := local.New(nodeID.IdentitySkeleton, stakingPriv) + me, err := local.New(*nodeID, stakingPriv) require.NoError(t, err) signer := NewCombinedSigner(me, beaconKeyStore) @@ -200,7 +200,7 @@ func Test_VerifyQC_EmptySigners(t *testing.T) { sigData, err := encoder.Encode(&emptySignersInput) require.NoError(t, err) - err = verifier.VerifyQC([]*flow.Identity{}, sigData, block.View, block.BlockID) + err = verifier.VerifyQC(flow.IdentitySkeletonList{}, sigData, block.View, block.BlockID) require.True(t, model.IsInsufficientSignaturesError(err)) err = verifier.VerifyQC(nil, sigData, block.View, block.BlockID) diff --git a/consensus/hotstuff/verification/combined_signer_v3_test.go b/consensus/hotstuff/verification/combined_signer_v3_test.go index 4afd14dcb85..2eeb63687af 100644 --- a/consensus/hotstuff/verification/combined_signer_v3_test.go +++ b/consensus/hotstuff/verification/combined_signer_v3_test.go @@ -40,11 +40,11 @@ func TestCombinedSignWithBeaconKeyV3(t *testing.T) { beaconKeyStore.On("ByView", view).Return(beaconKey, nil) stakingPriv := unittest.StakingPrivKeyFixture() - nodeID := unittest.IdentityFixture() + nodeID := &unittest.IdentityFixture().IdentitySkeleton nodeID.NodeID = signerID nodeID.StakingPubKey = stakingPriv.PublicKey() - me, err := local.New(nodeID.IdentitySkeleton, stakingPriv) + me, err := local.New(*nodeID, stakingPriv) require.NoError(t, err) signer := NewCombinedSignerV3(me, beaconKeyStore) @@ -100,11 +100,11 @@ func TestCombinedSignWithNoBeaconKeyV3(t *testing.T) { beaconKeyStore.On("ByView", view).Return(nil, module.ErrNoBeaconKeyForEpoch) stakingPriv := unittest.StakingPrivKeyFixture() - nodeID := unittest.IdentityFixture() + nodeID := &unittest.IdentityFixture().IdentitySkeleton nodeID.NodeID = signerID nodeID.StakingPubKey = stakingPriv.PublicKey() - me, err := local.New(nodeID.IdentitySkeleton, stakingPriv) + me, err := local.New(*nodeID, stakingPriv) require.NoError(t, err) signer := NewCombinedSignerV3(me, beaconKeyStore) @@ -161,7 +161,7 @@ func Test_VerifyQCV3(t *testing.T) { stakingSigners := generateIdentitiesForPrivateKeys(t, privStakingKeys) rbSigners := generateIdentitiesForPrivateKeys(t, privRbKeyShares) registerPublicRbKeys(t, dkg, rbSigners.NodeIDs(), privRbKeyShares) - allSigners := append(append(flow.IdentityList{}, stakingSigners...), rbSigners...) + allSigners := append(append(flow.IdentityList{}, stakingSigners...), rbSigners...).ToSkeleton() packedSigData := unittest.RandomBytes(1021) unpackedSigData := hotstuff.BlockSignatureData{ @@ -272,7 +272,7 @@ func Test_VerifyQC_EmptySignersV3(t *testing.T) { sigData, err := encoder.Encode(&emptySignersInput) require.NoError(t, err) - err = verifier.VerifyQC([]*flow.Identity{}, sigData, block.View, block.BlockID) + err = verifier.VerifyQC(flow.IdentitySkeletonList{}, sigData, block.View, block.BlockID) require.True(t, model.IsInsufficientSignaturesError(err)) err = verifier.VerifyQC(nil, sigData, block.View, block.BlockID) diff --git a/consensus/hotstuff/verification/staking_signer_test.go b/consensus/hotstuff/verification/staking_signer_test.go index 33ead119498..69f31bdfed3 100644 --- a/consensus/hotstuff/verification/staking_signer_test.go +++ b/consensus/hotstuff/verification/staking_signer_test.go @@ -50,8 +50,8 @@ func TestStakingSigner_CreateProposal(t *testing.T) { me, err := local.New(signer.IdentitySkeleton, stakingPriv) require.NoError(t, err) - signerIdentity := unittest.IdentityFixture(unittest.WithNodeID(signerID), - unittest.WithStakingPubKey(stakingPriv.PublicKey())) + signerIdentity := &unittest.IdentityFixture(unittest.WithNodeID(signerID), + unittest.WithStakingPubKey(stakingPriv.PublicKey())).IdentitySkeleton signer := NewStakingSigner(me) @@ -91,8 +91,8 @@ func TestStakingSigner_CreateVote(t *testing.T) { me, err := local.New(signer.IdentitySkeleton, stakingPriv) require.NoError(t, err) - signerIdentity := unittest.IdentityFixture(unittest.WithNodeID(signerID), - unittest.WithStakingPubKey(stakingPriv.PublicKey())) + signerIdentity := &unittest.IdentityFixture(unittest.WithNodeID(signerID), + unittest.WithStakingPubKey(stakingPriv.PublicKey())).IdentitySkeleton signer := NewStakingSigner(me) @@ -114,7 +114,7 @@ func TestStakingSigner_VerifyQC(t *testing.T) { sigData := unittest.RandomBytes(127) verifier := NewStakingVerifier() - err := verifier.VerifyQC([]*flow.Identity{}, sigData, block.View, block.BlockID) + err := verifier.VerifyQC(flow.IdentitySkeletonList{}, sigData, block.View, block.BlockID) require.True(t, model.IsInsufficientSignaturesError(err)) err = verifier.VerifyQC(nil, sigData, block.View, block.BlockID) diff --git a/consensus/hotstuff/votecollector/combined_vote_processor_v2_test.go b/consensus/hotstuff/votecollector/combined_vote_processor_v2_test.go index 926f5fe8946..235125ee122 100644 --- a/consensus/hotstuff/votecollector/combined_vote_processor_v2_test.go +++ b/consensus/hotstuff/votecollector/combined_vote_processor_v2_test.go @@ -858,8 +858,8 @@ func TestCombinedVoteProcessorV2_BuildVerifyQC(t *testing.T) { require.NoError(t, err) committee := &mockhotstuff.DynamicCommittee{} - committee.On("QuorumThresholdForView", mock.Anything).Return(committees.WeightThresholdToBuildQC(allIdentities.TotalWeight()), nil) - committee.On("IdentitiesByEpoch", block.View).Return(allIdentities, nil) + committee.On("QuorumThresholdForView", mock.Anything).Return(committees.WeightThresholdToBuildQC(allIdentities.ToSkeleton().TotalWeight()), nil) + committee.On("IdentitiesByEpoch", block.View).Return(allIdentities.ToSkeleton(), nil) committee.On("IdentitiesByBlock", block.BlockID).Return(allIdentities, nil) committee.On("DKG", block.View).Return(inmemDKG, nil) @@ -938,7 +938,7 @@ func TestReadRandomSourceFromPackedQCV2(t *testing.T) { // create a packer committee := &mockhotstuff.DynamicCommittee{} committee.On("IdentitiesByBlock", block.BlockID).Return(allSigners, nil) - committee.On("IdentitiesByEpoch", block.View).Return(allSigners, nil) + committee.On("IdentitiesByEpoch", block.View).Return(allSigners.ToSkeleton(), nil) packer := signature.NewConsensusSigDataPacker(committee) qc, err := buildQCWithPackerAndSigData(packer, block, blockSigData) diff --git a/consensus/hotstuff/votecollector/combined_vote_processor_v3_test.go b/consensus/hotstuff/votecollector/combined_vote_processor_v3_test.go index cca64d1b667..950dc2762df 100644 --- a/consensus/hotstuff/votecollector/combined_vote_processor_v3_test.go +++ b/consensus/hotstuff/votecollector/combined_vote_processor_v3_test.go @@ -995,8 +995,8 @@ func TestCombinedVoteProcessorV3_BuildVerifyQC(t *testing.T) { committee := &mockhotstuff.DynamicCommittee{} committee.On("IdentitiesByBlock", block.BlockID).Return(allIdentities, nil) - committee.On("IdentitiesByEpoch", block.View).Return(allIdentities, nil) - committee.On("QuorumThresholdForView", mock.Anything).Return(committees.WeightThresholdToBuildQC(allIdentities.TotalWeight()), nil) + committee.On("IdentitiesByEpoch", block.View).Return(allIdentities.ToSkeleton(), nil) + committee.On("QuorumThresholdForView", mock.Anything).Return(committees.WeightThresholdToBuildQC(allIdentities.ToSkeleton().TotalWeight()), nil) committee.On("DKG", block.View).Return(inmemDKG, nil) votes := make([]*model.Vote, 0, len(allIdentities)) diff --git a/consensus/hotstuff/votecollector/staking_vote_processor_test.go b/consensus/hotstuff/votecollector/staking_vote_processor_test.go index c463f221ffb..279adea73ca 100644 --- a/consensus/hotstuff/votecollector/staking_vote_processor_test.go +++ b/consensus/hotstuff/votecollector/staking_vote_processor_test.go @@ -2,6 +2,7 @@ package votecollector import ( "errors" + "github.com/onflow/flow-go/model/flow/order" "sync" "testing" @@ -264,7 +265,7 @@ func TestStakingVoteProcessorV2_BuildVerifyQC(t *testing.T) { require.NoError(t, err) signers[identity.NodeID] = verification.NewStakingSigner(me) - }) + }).Sort(order.Canonical) leader := stakingSigners[0] @@ -272,9 +273,9 @@ func TestStakingVoteProcessorV2_BuildVerifyQC(t *testing.T) { helper.WithBlockProposer(leader.NodeID)) committee := &mockhotstuff.DynamicCommittee{} - committee.On("IdentitiesByEpoch", block.View).Return(stakingSigners, nil) + committee.On("IdentitiesByEpoch", block.View).Return(stakingSigners.ToSkeleton(), nil) committee.On("IdentitiesByBlock", block.BlockID).Return(stakingSigners, nil) - committee.On("QuorumThresholdForView", mock.Anything).Return(committees.WeightThresholdToBuildQC(stakingSigners.TotalWeight()), nil) + committee.On("QuorumThresholdForView", mock.Anything).Return(committees.WeightThresholdToBuildQC(stakingSigners.ToSkeleton().TotalWeight()), nil) votes := make([]*model.Vote, 0, len(stakingSigners)) diff --git a/consensus/integration/epoch_test.go b/consensus/integration/epoch_test.go index aa41de368fe..914966597ff 100644 --- a/consensus/integration/epoch_test.go +++ b/consensus/integration/epoch_test.go @@ -19,8 +19,6 @@ import ( // should be able to reach consensus when identity table contains nodes with 0 weight. func TestUnweightedNode(t *testing.T) { - // stop after building 2 blocks to ensure we can tolerate 0-weight (joining next - // epoch) identities, but don't cross an epoch boundary // stop after building 2 blocks to ensure we can tolerate 0-weight (joining next // epoch) identities, but don't cross an epoch boundary stopper := NewStopper(2, 0) @@ -254,7 +252,7 @@ func withNextEpoch( // and all the NEW identities in next epoch, with 0 weight nextEpochIdentities. Filter(filter.Not(filter.In(encodableSnapshot.Identities))). - Map(mapfunc.WithWeight(0))..., + Map(mapfunc.WithInitialWeight(0))..., ).Sort(order.Canonical) return inmem.SnapshotFromEncodable(encodableSnapshot) diff --git a/engine/access/rpc/engine.go b/engine/access/rpc/engine.go index d4c812df997..559b84d8858 100644 --- a/engine/access/rpc/engine.go +++ b/engine/access/rpc/engine.go @@ -232,6 +232,17 @@ func NewBuilder(log zerolog.Logger, return builder, nil } +func WaitForServerStart(server component.Component) component.ComponentWorker { + return func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + select { + case <-ctx.Done(): + case <-server.Ready(): + ready() + } + <-server.Done() + } +} + // shutdownWorker is a worker routine which shuts down all servers when the context is cancelled. func (e *Engine) shutdownWorker(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { ready() diff --git a/model/flow/identity.go b/model/flow/identity.go index 6cdc65f97e8..bba780106fc 100644 --- a/model/flow/identity.go +++ b/model/flow/identity.go @@ -494,6 +494,16 @@ func (il IdentityList) ByIndex(index uint) (*Identity, bool) { return il[int(index)], true } +// ByNodeID gets a node from the list by node ID. +func (il IdentitySkeletonList) ByNodeID(nodeID Identifier) (*IdentitySkeleton, bool) { + for _, identity := range il { + if identity.NodeID == nodeID { + return identity, true + } + } + return nil, false +} + // ByNodeID gets a node from the list by node ID. func (il IdentityList) ByNodeID(nodeID Identifier) (*Identity, bool) { for _, identity := range il { diff --git a/model/flow/mapfunc/identity.go b/model/flow/mapfunc/identity.go index 89fc568b039..736f2044cd2 100644 --- a/model/flow/mapfunc/identity.go +++ b/model/flow/mapfunc/identity.go @@ -4,8 +4,9 @@ import ( "github.com/onflow/flow-go/model/flow" ) -func WithWeight(weight uint64) flow.IdentityMapFunc { +func WithInitialWeight(weight uint64) flow.IdentityMapFunc { return func(identity flow.Identity) flow.Identity { + identity.InitialWeight = weight identity.Weight = weight return identity } diff --git a/module/signature/signer_indices_test.go b/module/signature/signer_indices_test.go index c34daea4f37..efd320c2f25 100644 --- a/module/signature/signer_indices_test.go +++ b/module/signature/signer_indices_test.go @@ -2,6 +2,7 @@ package signature_test import ( "fmt" + "golang.org/x/exp/slices" "sort" "testing" @@ -22,7 +23,7 @@ import ( // 2. for the decoding step, we offer an optimized convenience function to directly // decode to full identities: Indices --decode--> Identities func TestEncodeDecodeIdentities(t *testing.T) { - canonicalIdentities := unittest.IdentityListFixture(20) + canonicalIdentities := unittest.IdentityListFixture(20).ToSkeleton() canonicalIdentifiers := canonicalIdentities.NodeIDs() for s := 0; s < 20; s++ { for e := s; e < 20; e++ { @@ -148,7 +149,8 @@ func Test_DecodeSigTypeToStakingAndBeaconSigners(t *testing.T) { numRandomBeaconSigners := rapid.IntRange(0, committeeSize-numStakingSigners).Draw(t, "numRandomBeaconSigners").(int) // create committee - committeeIdentities := unittest.IdentityListFixture(committeeSize, unittest.WithRole(flow.RoleConsensus)).Sort(order.Canonical) + committeeIdentities := unittest.IdentityListFixture(committeeSize, unittest.WithRole(flow.RoleConsensus)). + Sort(order.Canonical) committee := committeeIdentities.NodeIDs() stakingSigners, beaconSigners := sampleSigners(committee, numStakingSigners, numRandomBeaconSigners) @@ -157,7 +159,7 @@ func Test_DecodeSigTypeToStakingAndBeaconSigners(t *testing.T) { require.NoError(t, err) // decode - decSignerIdentites, err := signature.DecodeSignerIndicesToIdentities(committeeIdentities, signerIndices) + decSignerIdentites, err := signature.DecodeSignerIndicesToIdentities(committeeIdentities.ToSkeleton(), signerIndices) require.NoError(t, err) decStakingSigners, decBeaconSigners, err := signature.DecodeSigTypeToStakingAndBeaconSigners(decSignerIdentites, sigTypes) require.NoError(t, err) @@ -182,10 +184,9 @@ func Test_DecodeSigTypeToStakingAndBeaconSigners(t *testing.T) { } func Test_ValidPaddingErrIncompatibleBitVectorLength(t *testing.T) { - var signers flow.IdentityList var err error // if bits is multiply of 8, then there is no padding needed, any sig type can be decoded. - signers = unittest.IdentityListFixture(16) + signers := unittest.IdentityListFixture(16).ToSkeleton() // 16 bits needs 2 bytes, provided 2 bytes _, _, err = signature.DecodeSigTypeToStakingAndBeaconSigners(signers, unittest.RandomBytes(2)) @@ -202,7 +203,7 @@ func Test_ValidPaddingErrIncompatibleBitVectorLength(t *testing.T) { require.ErrorIs(t, err, signature.ErrIncompatibleBitVectorLength, "low-level error representing the failure should be ErrIncompatibleBitVectorLength") // if bits is not multiply of 8, then padding is needed - signers = unittest.IdentityListFixture(15) + signers = unittest.IdentityListFixture(15).ToSkeleton() _, _, err = signature.DecodeSigTypeToStakingAndBeaconSigners(signers, []byte{byte(255), byte(254)}) require.NoError(t, err) @@ -218,30 +219,30 @@ func Test_ValidPaddingErrIncompatibleBitVectorLength(t *testing.T) { // if bits is not multiply of 8, // 1 byte more - signers = unittest.IdentityListFixture(0) + signers = unittest.IdentityListFixture(0).ToSkeleton() _, _, err = signature.DecodeSigTypeToStakingAndBeaconSigners(signers, []byte{byte(255)}) require.True(t, signature.IsInvalidSigTypesError(err), "API-level error should be InvalidSigTypesError") require.ErrorIs(t, err, signature.ErrIncompatibleBitVectorLength, "low-level error representing the failure should be ErrIncompatibleBitVectorLength") // 1 byte more - signers = unittest.IdentityListFixture(1) + signers = unittest.IdentityListFixture(1).ToSkeleton() _, _, err = signature.DecodeSigTypeToStakingAndBeaconSigners(signers, []byte{byte(0), byte(0)}) require.True(t, signature.IsInvalidSigTypesError(err), "API-level error should be InvalidSigTypesError") require.ErrorIs(t, err, signature.ErrIncompatibleBitVectorLength, "low-level error representing the failure should be ErrIncompatibleBitVectorLength") // 1 byte less - signers = unittest.IdentityListFixture(7) + signers = unittest.IdentityListFixture(7).ToSkeleton() _, _, err = signature.DecodeSigTypeToStakingAndBeaconSigners(signers, []byte{}) require.True(t, signature.IsInvalidSigTypesError(err), "API-level error should be InvalidSigTypesError") require.ErrorIs(t, err, signature.ErrIncompatibleBitVectorLength, "low-level error representing the failure should be ErrIncompatibleBitVectorLength") } func TestValidPaddingErrIllegallyPaddedBitVector(t *testing.T) { - var signers flow.IdentityList + var signers flow.IdentitySkeletonList var err error // if bits is multiply of 8, then there is no padding needed, any sig type can be decoded. for count := 1; count < 8; count++ { - signers = unittest.IdentityListFixture(count) + signers = unittest.IdentityListFixture(count).ToSkeleton() _, _, err = signature.DecodeSigTypeToStakingAndBeaconSigners(signers, []byte{byte(255)}) // last bit should be 0, but 1 require.True(t, signature.IsInvalidSigTypesError(err), "API-level error should be InvalidSigTypesError") require.ErrorIs(t, err, signature.ErrIllegallyPaddedBitVector, "low-level error representing the failure should be ErrIllegallyPaddedBitVector") @@ -252,7 +253,7 @@ func TestValidPaddingErrIllegallyPaddedBitVector(t *testing.T) { } for count := 9; count < 16; count++ { - signers = unittest.IdentityListFixture(count) + signers = unittest.IdentityListFixture(count).ToSkeleton() _, _, err = signature.DecodeSigTypeToStakingAndBeaconSigners(signers, []byte{byte(255), byte(255)}) // last bit should be 0, but 1 require.True(t, signature.IsInvalidSigTypesError(err), "API-level error should be InvalidSigTypesError") require.ErrorIs(t, err, signature.ErrIllegallyPaddedBitVector, "low-level error representing the failure should be ErrIllegallyPaddedBitVector") @@ -340,16 +341,25 @@ func Test_DecodeSignerIndicesToIdentities(t *testing.T) { // create committee identities := unittest.IdentityListFixture(committeeSize, unittest.WithRole(flow.RoleConsensus)).Sort(order.Canonical) - signers := identities.Sample(uint(numSigners)) + signers := identities.Sample(uint(numSigners)).ToSkeleton() // encode signerIndices, err := signature.EncodeSignersToIndices(identities.NodeIDs(), signers.NodeIDs()) require.NoError(t, err) // decode and verify - decodedSigners, err := signature.DecodeSignerIndicesToIdentities(identities, signerIndices) + decodedSigners, err := signature.DecodeSignerIndicesToIdentities(identities.ToSkeleton(), signerIndices) require.NoError(t, err) - require.Equal(t, signers.Sort(order.Canonical), decodedSigners.Sort(order.Canonical)) + + slices.SortFunc(signers, func(lhs, rhs *flow.IdentitySkeleton) bool { + return order.IdentifierCanonical(lhs.NodeID, rhs.NodeID) + }) + + slices.SortFunc(decodedSigners, func(lhs, rhs *flow.IdentitySkeleton) bool { + return order.IdentifierCanonical(lhs.NodeID, rhs.NodeID) + }) + + require.Equal(t, signers, decodedSigners) }) } diff --git a/state/protocol/badger/snapshot.go b/state/protocol/badger/snapshot.go index 33522480301..3fac1ad76bc 100644 --- a/state/protocol/badger/snapshot.go +++ b/state/protocol/badger/snapshot.go @@ -166,7 +166,7 @@ func (s *Snapshot) Identities(selector flow.IdentityFilter) (flow.IdentityList, // add the identities from next/last epoch, with weight set to 0 identities = append( identities, - otherEpochIdentities.Map(mapfunc.WithWeight(0))..., + otherEpochIdentities.Map(mapfunc.WithInitialWeight(0))..., ) // apply the filter to the participants diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index eeeedf0ecc5..8a582a3b211 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -977,6 +977,13 @@ func WithRole(role flow.Role) func(*flow.Identity) { } } +// WithInitialWeight sets the initial weight on an identity fixture. +func WithInitialWeight(weight uint64) func(*flow.Identity) { + return func(identity *flow.Identity) { + identity.InitialWeight = weight + } +} + // WithWeight sets the weight on an identity fixture. func WithWeight(weight uint64) func(*flow.Identity) { return func(identity *flow.Identity) { From 6682f278dafbb8c07e0937f4af48344a892ad6df Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Wed, 5 Jul 2023 22:00:32 +0300 Subject: [PATCH 11/46] Fixed more tests --- cmd/util/cmd/execution-state-extract/export_report.json | 4 ++-- consensus/integration/epoch_test.go | 2 +- engine/testutil/nodes.go | 8 ++++---- model/flow/mapfunc/identity.go | 6 ++++++ module/signature/signer_indices_test.go | 7 ++++--- state/protocol/badger/snapshot.go | 2 +- 6 files changed, 18 insertions(+), 11 deletions(-) diff --git a/cmd/util/cmd/execution-state-extract/export_report.json b/cmd/util/cmd/execution-state-extract/export_report.json index ac9526fe448..b3bd9916676 100644 --- a/cmd/util/cmd/execution-state-extract/export_report.json +++ b/cmd/util/cmd/execution-state-extract/export_report.json @@ -1,6 +1,6 @@ { "EpochCounter": 0, - "PreviousStateCommitment": "c14260392ed0f6d2a46782a6ea6c9d9259263ad750cf5df70c2bce3b84196373", - "CurrentStateCommitment": "c14260392ed0f6d2a46782a6ea6c9d9259263ad750cf5df70c2bce3b84196373", + "PreviousStateCommitment": "d1890f89344a5a1b5b47b0e1ee81a7620214849e59aac83509f36bcf84424c27", + "CurrentStateCommitment": "d1890f89344a5a1b5b47b0e1ee81a7620214849e59aac83509f36bcf84424c27", "ReportSucceeded": true } \ No newline at end of file diff --git a/consensus/integration/epoch_test.go b/consensus/integration/epoch_test.go index 914966597ff..535ef9b8982 100644 --- a/consensus/integration/epoch_test.go +++ b/consensus/integration/epoch_test.go @@ -252,7 +252,7 @@ func withNextEpoch( // and all the NEW identities in next epoch, with 0 weight nextEpochIdentities. Filter(filter.Not(filter.In(encodableSnapshot.Identities))). - Map(mapfunc.WithInitialWeight(0))..., + Map(mapfunc.WithWeight(0))..., ).Sort(order.Canonical) return inmem.SnapshotFromEncodable(encodableSnapshot) diff --git a/engine/testutil/nodes.go b/engine/testutil/nodes.go index 3ec40aa306b..c4881724eff 100644 --- a/engine/testutil/nodes.go +++ b/engine/testutil/nodes.go @@ -858,7 +858,7 @@ func (s *RoundRobinLeaderSelection) IdentityByBlock(_ flow.Identifier, participa } func (s *RoundRobinLeaderSelection) IdentitiesByEpoch(view uint64) (flow.IdentitySkeletonList, error) { - return s.identities, nil + return s.identities.ToSkeleton(), nil } func (s *RoundRobinLeaderSelection) IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.IdentitySkeleton, error) { @@ -866,7 +866,7 @@ func (s *RoundRobinLeaderSelection) IdentityByEpoch(view uint64, participantID f if !found { return nil, model.NewInvalidSignerErrorf("unknown participant %x", participantID) } - return id, nil + return &id.IdentitySkeleton, nil } func (s *RoundRobinLeaderSelection) LeaderForView(view uint64) (flow.Identifier, error) { @@ -874,11 +874,11 @@ func (s *RoundRobinLeaderSelection) LeaderForView(view uint64) (flow.Identifier, } func (s *RoundRobinLeaderSelection) QuorumThresholdForView(_ uint64) (uint64, error) { - return committees.WeightThresholdToBuildQC(s.identities.TotalWeight()), nil + return committees.WeightThresholdToBuildQC(s.identities.ToSkeleton().TotalWeight()), nil } func (s *RoundRobinLeaderSelection) TimeoutThresholdForView(_ uint64) (uint64, error) { - return committees.WeightThresholdToTimeout(s.identities.TotalWeight()), nil + return committees.WeightThresholdToTimeout(s.identities.ToSkeleton().TotalWeight()), nil } func (s *RoundRobinLeaderSelection) Self() flow.Identifier { diff --git a/model/flow/mapfunc/identity.go b/model/flow/mapfunc/identity.go index 736f2044cd2..aaab9de0ae4 100644 --- a/model/flow/mapfunc/identity.go +++ b/model/flow/mapfunc/identity.go @@ -7,6 +7,12 @@ import ( func WithInitialWeight(weight uint64) flow.IdentityMapFunc { return func(identity flow.Identity) flow.Identity { identity.InitialWeight = weight + return identity + } +} + +func WithWeight(weight uint64) flow.IdentityMapFunc { + return func(identity flow.Identity) flow.Identity { identity.Weight = weight return identity } diff --git a/module/signature/signer_indices_test.go b/module/signature/signer_indices_test.go index efd320c2f25..ac6dcf4adda 100644 --- a/module/signature/signer_indices_test.go +++ b/module/signature/signer_indices_test.go @@ -166,19 +166,20 @@ func Test_DecodeSigTypeToStakingAndBeaconSigners(t *testing.T) { // verify; note that there is a slightly different convention between Filter and the decoding logic: // Filter returns nil for an empty list, while the decoding logic returns an instance of an empty slice - sigIdentities := committeeIdentities.Filter(filter.Or(filter.HasNodeID(stakingSigners...), filter.HasNodeID(beaconSigners...))) // signer identities in canonical order + sigIdentities := committeeIdentities.Filter( + filter.Or(filter.HasNodeID(stakingSigners...), filter.HasNodeID(beaconSigners...))).ToSkeleton() // signer identities in canonical order if len(stakingSigners)+len(decBeaconSigners) > 0 { require.Equal(t, sigIdentities, decSignerIdentites) } if len(stakingSigners) == 0 { require.Empty(t, decStakingSigners) } else { - require.Equal(t, committeeIdentities.Filter(filter.HasNodeID(stakingSigners...)), decStakingSigners) + require.Equal(t, committeeIdentities.Filter(filter.HasNodeID(stakingSigners...)).ToSkeleton(), decStakingSigners) } if len(decBeaconSigners) == 0 { require.Empty(t, decBeaconSigners) } else { - require.Equal(t, committeeIdentities.Filter(filter.HasNodeID(beaconSigners...)), decBeaconSigners) + require.Equal(t, committeeIdentities.Filter(filter.HasNodeID(beaconSigners...)).ToSkeleton(), decBeaconSigners) } }) } diff --git a/state/protocol/badger/snapshot.go b/state/protocol/badger/snapshot.go index 3fac1ad76bc..33522480301 100644 --- a/state/protocol/badger/snapshot.go +++ b/state/protocol/badger/snapshot.go @@ -166,7 +166,7 @@ func (s *Snapshot) Identities(selector flow.IdentityFilter) (flow.IdentityList, // add the identities from next/last epoch, with weight set to 0 identities = append( identities, - otherEpochIdentities.Map(mapfunc.WithInitialWeight(0))..., + otherEpochIdentities.Map(mapfunc.WithWeight(0))..., ) // apply the filter to the participants From 47e9de50c15ece5796d59db4915b43e3472d47e4 Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Fri, 7 Jul 2023 12:54:51 +0300 Subject: [PATCH 12/46] LInted --- consensus/hotstuff/votecollector/staking_vote_processor_test.go | 2 +- module/signature/signer_indices_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/consensus/hotstuff/votecollector/staking_vote_processor_test.go b/consensus/hotstuff/votecollector/staking_vote_processor_test.go index 279adea73ca..d82059b3405 100644 --- a/consensus/hotstuff/votecollector/staking_vote_processor_test.go +++ b/consensus/hotstuff/votecollector/staking_vote_processor_test.go @@ -2,7 +2,6 @@ package votecollector import ( "errors" - "github.com/onflow/flow-go/model/flow/order" "sync" "testing" @@ -20,6 +19,7 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/verification" "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/module/local" modulemock "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/module/signature" diff --git a/module/signature/signer_indices_test.go b/module/signature/signer_indices_test.go index ac6dcf4adda..1d011723b85 100644 --- a/module/signature/signer_indices_test.go +++ b/module/signature/signer_indices_test.go @@ -2,11 +2,11 @@ package signature_test import ( "fmt" - "golang.org/x/exp/slices" "sort" "testing" "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" "pgregory.net/rapid" "github.com/onflow/flow-go/ledger/common/bitutils" From d436320fd1b41d086848af318f5c1b641efe15d4 Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Fri, 7 Jul 2023 13:37:41 +0300 Subject: [PATCH 13/46] Added protocol state entry data structure and added much needed ID functions --- model/flow/epoch.go | 5 ++++ model/flow/protocol_state.go | 47 ++++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+) create mode 100644 model/flow/protocol_state.go diff --git a/model/flow/epoch.go b/model/flow/epoch.go index 3f27586f2a2..f7ce38bb1ee 100644 --- a/model/flow/epoch.go +++ b/model/flow/epoch.go @@ -434,6 +434,11 @@ type EventIDs struct { CommitID Identifier } +// ID returns hash of the event IDs. +func (e *EventIDs) ID() Identifier { + return MakeID(e) +} + func NewEpochStatus(previousSetup, previousCommit, currentSetup, currentCommit, nextSetup, nextCommit Identifier) (*EpochStatus, error) { status := &EpochStatus{ PreviousEpoch: EventIDs{ diff --git a/model/flow/protocol_state.go b/model/flow/protocol_state.go new file mode 100644 index 00000000000..8bb8d536e41 --- /dev/null +++ b/model/flow/protocol_state.go @@ -0,0 +1,47 @@ +package flow + +// DynamicIdentityEntry encapsulates nodeID and dynamic portion of identity. +type DynamicIdentityEntry struct { + NodeID Identifier + Dynamic DynamicIdentity +} + +type DynamicIdentityEntryList []*DynamicIdentityEntry + +// ProtocolStateEntry is the main structure that will be saved in the database. +// It contains the minimal protocol state that has to be persisted. +type ProtocolStateEntry struct { + // setup and commit event IDs for current epoch. + CurrentEpochEventIDs EventIDs + // setup and commit event IDs for previous epoch. + PreviousEpochEventIDs EventIDs + // Part of identity table that can be changed during the epoch. + Identities DynamicIdentityEntryList + // InvalidStateTransitionAttempted encodes whether an invalid state transition + // has been detected in this fork. When this happens, epoch fallback is triggered + // AFTER the fork is finalized. + InvalidStateTransitionAttempted bool + // NextEpochProtocolState describes protocol state of the next epoch + NextEpochProtocolState *ProtocolStateEntry +} + +// ID returns hash of entry by hashing all fields. +func (e *ProtocolStateEntry) ID() Identifier { + if e == nil { + return ZeroID + } + body := struct { + CurrentEpochEventIDs Identifier + PreviousEpochEventIDs Identifier + Identities DynamicIdentityEntryList + InvalidStateTransitionAttempted bool + NextEpochProtocolStateID Identifier + }{ + CurrentEpochEventIDs: e.CurrentEpochEventIDs.ID(), + PreviousEpochEventIDs: e.PreviousEpochEventIDs.ID(), + Identities: e.Identities, + InvalidStateTransitionAttempted: e.InvalidStateTransitionAttempted, + NextEpochProtocolStateID: e.NextEpochProtocolState.ID(), + } + return MakeID(body) +} From 81f77ce27d4a1c82d95cb8645b3dc3f6148798f3 Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Fri, 7 Jul 2023 13:39:48 +0300 Subject: [PATCH 14/46] Added interface for ProtocolState storage --- storage/protocol_state.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 storage/protocol_state.go diff --git a/storage/protocol_state.go b/storage/protocol_state.go new file mode 100644 index 00000000000..a63399f306d --- /dev/null +++ b/storage/protocol_state.go @@ -0,0 +1,18 @@ +package storage + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage/badger/transaction" +) + +// ProtocolState represents persistent storage for protocol state entries. +type ProtocolState interface { + // StoreTx allows us to store protocol state as part of a DB tx, while still going through the caching layer. + StoreTx(id flow.Identifier, protocolState *flow.ProtocolStateEntry) func(*transaction.Tx) error + // Index indexes the protocol state by block ID. + Index(blockID flow.Identifier, protocolStateID flow.Identifier) func(*transaction.Tx) error + // ByID returns the protocol state by its ID. + ByID(id flow.Identifier) (*flow.ProtocolStateEntry, error) + // ByBlockID returns the protocol state by block ID. + ByBlockID(blockID flow.Identifier) (*flow.ProtocolStateEntry, error) +} From 5299054334db095a59b8787049b67ea60b07c7e5 Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Fri, 7 Jul 2023 14:18:18 +0300 Subject: [PATCH 15/46] Added protocol state database operations --- storage/badger/operation/prefix.go | 2 ++ storage/badger/operation/protocol_state.go | 26 ++++++++++++++++++++++ storage/badger/protocol_state.go | 1 + 3 files changed, 29 insertions(+) create mode 100644 storage/badger/operation/protocol_state.go create mode 100644 storage/badger/protocol_state.go diff --git a/storage/badger/operation/prefix.go b/storage/badger/operation/prefix.go index e75497257ca..d63fed18c41 100644 --- a/storage/badger/operation/prefix.go +++ b/storage/badger/operation/prefix.go @@ -46,6 +46,7 @@ const ( codeExecutionReceiptMeta = 36 codeResultApproval = 37 codeChunk = 38 + codeProtocolState = 39 // codes for indexing single identifier by identifier/integeter codeHeightToBlock = 40 // index mapping height to block ID @@ -54,6 +55,7 @@ const ( codeRefHeightToClusterBlock = 43 // index reference block height to cluster block IDs codeBlockIDToFinalizedSeal = 44 // index _finalized_ seal by sealed block ID codeBlockIDToQuorumCertificate = 45 // index of quorum certificates by block ID + codeProtocolStateByBlockID = 46 // index of protocol state entry by block ID // codes for indexing multiple identifiers by identifier // NOTE: 51 was used for identity indexes before epochs diff --git a/storage/badger/operation/protocol_state.go b/storage/badger/operation/protocol_state.go new file mode 100644 index 00000000000..424e5fead32 --- /dev/null +++ b/storage/badger/operation/protocol_state.go @@ -0,0 +1,26 @@ +package operation + +import ( + "github.com/dgraph-io/badger/v2" + "github.com/onflow/flow-go/model/flow" +) + +// InsertProtocolState inserts a protocol state by ID. +func InsertProtocolState(protocolStateID flow.Identifier, protocolState *flow.ProtocolStateEntry) func(*badger.Txn) error { + return insert(makePrefix(codeProtocolState, protocolStateID), protocolState) +} + +// RetrieveProtocolState retrieves a protocol state by ID. +func RetrieveProtocolState(protocolStateID flow.Identifier, protocolState *flow.ProtocolStateEntry) func(*badger.Txn) error { + return retrieve(makePrefix(codeProtocolState, protocolStateID), protocolState) +} + +// IndexProtocolState indexes a protocol state by block ID. +func IndexProtocolState(blockID flow.Identifier, protocolStateID flow.Identifier) func(*badger.Txn) error { + return insert(makePrefix(codeProtocolStateByBlockID, blockID), protocolStateID) +} + +// LookupProtocolState finds protocol state ID by block ID. +func LookupProtocolState(blockID flow.Identifier, protocolStateID *flow.Identifier) func(*badger.Txn) error { + return retrieve(makePrefix(codeProtocolStateByBlockID, blockID), protocolStateID) +} diff --git a/storage/badger/protocol_state.go b/storage/badger/protocol_state.go new file mode 100644 index 00000000000..2b90f775cf1 --- /dev/null +++ b/storage/badger/protocol_state.go @@ -0,0 +1 @@ +package badger From d235fe942bc537158ad60c0c37e15e0e4dcc1a6b Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Mon, 10 Jul 2023 12:36:50 +0300 Subject: [PATCH 16/46] Added protocol state storage implementation --- module/metrics/labels.go | 1 + storage/badger/cluster_payloads.go | 1 + storage/badger/protocol_state.go | 88 ++++++++++++++++++++++++++++++ 3 files changed, 90 insertions(+) diff --git a/module/metrics/labels.go b/module/metrics/labels.go index 353e1b3ca25..88501f5a652 100644 --- a/module/metrics/labels.go +++ b/module/metrics/labels.go @@ -56,6 +56,7 @@ const ( ResourceQC = "qc" ResourceMyReceipt = "my_receipt" ResourceCollection = "collection" + ResourceProtocolState = "protocol_state" ResourceApproval = "approval" ResourceSeal = "seal" ResourcePendingIncorporatedSeal = "pending_incorporated_seal" diff --git a/storage/badger/cluster_payloads.go b/storage/badger/cluster_payloads.go index 84e260b9a75..34f25f2c673 100644 --- a/storage/badger/cluster_payloads.go +++ b/storage/badger/cluster_payloads.go @@ -50,6 +50,7 @@ func NewClusterPayloads(cacheMetrics module.CacheMetrics, db *badger.DB) *Cluste func (cp *ClusterPayloads) storeTx(blockID flow.Identifier, payload *cluster.Payload) func(*transaction.Tx) error { return cp.cache.PutTx(blockID, payload) } + func (cp *ClusterPayloads) retrieveTx(blockID flow.Identifier) func(*badger.Txn) (*cluster.Payload, error) { return func(tx *badger.Txn) (*cluster.Payload, error) { val, err := cp.cache.Get(blockID)(tx) diff --git a/storage/badger/protocol_state.go b/storage/badger/protocol_state.go index 2b90f775cf1..a01ff770f9d 100644 --- a/storage/badger/protocol_state.go +++ b/storage/badger/protocol_state.go @@ -1 +1,89 @@ package badger + +import ( + "fmt" + + "github.com/dgraph-io/badger/v2" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/badger/operation" + "github.com/onflow/flow-go/storage/badger/transaction" +) + +// ProtocolState implements persistent storage for storing entities of protocol state. +type ProtocolState struct { + db *badger.DB + cache *Cache +} + +var _ storage.ProtocolState = (*ProtocolState)(nil) + +// NewProtocolState Creates ProtocolState instance which is a database of protocol state entries +// which supports storing, caching and retrieving by ID and additionally indexed block ID. +func NewProtocolState(collector module.CacheMetrics, db *badger.DB, cacheSize uint) *ProtocolState { + store := func(key interface{}, val interface{}) func(*transaction.Tx) error { + id := key.(flow.Identifier) + protocolStateEntry := val.(*flow.ProtocolStateEntry) + return transaction.WithTx(operation.InsertProtocolState(id, protocolStateEntry)) + } + + retrieve := func(key interface{}) func(tx *badger.Txn) (interface{}, error) { + protocolStateID := key.(flow.Identifier) + var protocolStateEntry flow.ProtocolStateEntry + return func(tx *badger.Txn) (interface{}, error) { + err := operation.RetrieveProtocolState(protocolStateID, &protocolStateEntry)(tx) + return &protocolStateEntry, err + } + } + + return &ProtocolState{ + db: db, + cache: newCache(collector, metrics.ResourceProtocolState, + withLimit(cacheSize), + withStore(store), + withRetrieve(retrieve)), + } +} + +// StoreTx allows us to store protocol state as part of a DB tx, while still going through the caching layer. +func (s *ProtocolState) StoreTx(id flow.Identifier, protocolState *flow.ProtocolStateEntry) func(*transaction.Tx) error { + return s.cache.PutTx(id, protocolState) +} + +// Index indexes the protocol state by block ID. +func (s *ProtocolState) Index(blockID flow.Identifier, protocolStateID flow.Identifier) func(*transaction.Tx) error { + return func(tx *transaction.Tx) error { + err := transaction.WithTx(operation.IndexProtocolState(blockID, protocolStateID))(tx) + if err != nil { + return fmt.Errorf("could not index protocol state for block (%x): %w", blockID[:], err) + } + return nil + } +} + +// ByID returns the protocol state by its ID. +func (s *ProtocolState) ByID(id flow.Identifier) (*flow.ProtocolStateEntry, error) { + tx := s.db.NewTransaction(false) + defer tx.Discard() + return s.retrieveTx(id)(tx) +} + +// ByBlockID returns the protocol state by block ID. +func (s *ProtocolState) ByBlockID(blockID flow.Identifier) (*flow.ProtocolStateEntry, error) { + tx := s.db.NewTransaction(false) + defer tx.Discard() + return s.retrieveTx(blockID)(tx) +} + +func (s *ProtocolState) retrieveTx(protocolStateID flow.Identifier) func(*badger.Txn) (*flow.ProtocolStateEntry, error) { + return func(tx *badger.Txn) (*flow.ProtocolStateEntry, error) { + val, err := s.cache.Get(protocolStateID)(tx) + if err != nil { + return nil, err + } + return val.(*flow.ProtocolStateEntry), nil + } +} From 606a72972a13ab29090de424cfb6f16da361134d Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Mon, 10 Jul 2023 12:44:41 +0300 Subject: [PATCH 17/46] Updated implementation of protocol state to correctly return indexed value --- storage/badger/protocol_state.go | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/storage/badger/protocol_state.go b/storage/badger/protocol_state.go index a01ff770f9d..51db39a9e68 100644 --- a/storage/badger/protocol_state.go +++ b/storage/badger/protocol_state.go @@ -68,17 +68,17 @@ func (s *ProtocolState) Index(blockID flow.Identifier, protocolStateID flow.Iden func (s *ProtocolState) ByID(id flow.Identifier) (*flow.ProtocolStateEntry, error) { tx := s.db.NewTransaction(false) defer tx.Discard() - return s.retrieveTx(id)(tx) + return s.byID(id)(tx) } // ByBlockID returns the protocol state by block ID. func (s *ProtocolState) ByBlockID(blockID flow.Identifier) (*flow.ProtocolStateEntry, error) { tx := s.db.NewTransaction(false) defer tx.Discard() - return s.retrieveTx(blockID)(tx) + return s.byBlockID(blockID)(tx) } -func (s *ProtocolState) retrieveTx(protocolStateID flow.Identifier) func(*badger.Txn) (*flow.ProtocolStateEntry, error) { +func (s *ProtocolState) byID(protocolStateID flow.Identifier) func(*badger.Txn) (*flow.ProtocolStateEntry, error) { return func(tx *badger.Txn) (*flow.ProtocolStateEntry, error) { val, err := s.cache.Get(protocolStateID)(tx) if err != nil { @@ -87,3 +87,14 @@ func (s *ProtocolState) retrieveTx(protocolStateID flow.Identifier) func(*badger return val.(*flow.ProtocolStateEntry), nil } } + +func (s *ProtocolState) byBlockID(blockID flow.Identifier) func(*badger.Txn) (*flow.ProtocolStateEntry, error) { + return func(tx *badger.Txn) (*flow.ProtocolStateEntry, error) { + var protocolStateID flow.Identifier + err := operation.LookupProtocolState(blockID, &protocolStateID)(tx) + if err != nil { + return nil, fmt.Errorf("could not lookup protocol state ID for block (%x): %w", blockID[:], err) + } + return s.byID(protocolStateID)(tx) + } +} From 0114b3c196272e6aa7e31efbf92b8ef15c08aa42 Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Tue, 11 Jul 2023 21:45:04 +0300 Subject: [PATCH 18/46] Added RichProtocolStateEntry for storing all needed data without relying on DB lookups. --- model/flow/protocol_state.go | 21 +++++++++++++++++++++ storage/protocol_state.go | 4 ++-- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/model/flow/protocol_state.go b/model/flow/protocol_state.go index 8bb8d536e41..01e269beccf 100644 --- a/model/flow/protocol_state.go +++ b/model/flow/protocol_state.go @@ -25,6 +25,27 @@ type ProtocolStateEntry struct { NextEpochProtocolState *ProtocolStateEntry } +// RichProtocolStateEntry is a ProtocolStateEntry which has additional fields that are cached +// from storage layer for convenience. +// Using this structure instead of ProtocolStateEntry allows us to avoid querying +// the database for epoch setups and commits and full identity table. +// It holds several invariants, such as: +// - CurrentEpochSetup and CurrentEpochCommit are for the same epoch. Never nil. +// - PreviousEpochSetup and PreviousEpochCommit are for the same epoch. Never nil. +// - Identities is a full identity table for the current epoch. Identities are sorted in canonical order. Never nil. +// - NextEpochProtocolState is a protocol state for the next epoch. Can be nil. +type RichProtocolStateEntry struct { + ProtocolStateEntry + + CurrentEpochSetup *EpochSetup + CurrentEpochCommit *EpochCommit + PreviousEpochSetup *EpochSetup + PreviousEpochCommit *EpochCommit + Identities IdentityList + + NextEpochProtocolState *RichProtocolStateEntry +} + // ID returns hash of entry by hashing all fields. func (e *ProtocolStateEntry) ID() Identifier { if e == nil { diff --git a/storage/protocol_state.go b/storage/protocol_state.go index a63399f306d..e2e7aa90096 100644 --- a/storage/protocol_state.go +++ b/storage/protocol_state.go @@ -12,7 +12,7 @@ type ProtocolState interface { // Index indexes the protocol state by block ID. Index(blockID flow.Identifier, protocolStateID flow.Identifier) func(*transaction.Tx) error // ByID returns the protocol state by its ID. - ByID(id flow.Identifier) (*flow.ProtocolStateEntry, error) + ByID(id flow.Identifier) (*flow.RichProtocolStateEntry, error) // ByBlockID returns the protocol state by block ID. - ByBlockID(blockID flow.Identifier) (*flow.ProtocolStateEntry, error) + ByBlockID(blockID flow.Identifier) (*flow.RichProtocolStateEntry, error) } From cb20acc989d4a0bddabd89de42c18caa96d8cd56 Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Tue, 11 Jul 2023 21:45:53 +0300 Subject: [PATCH 19/46] Updated protocol state DB to enrich protocol state entry with extra data --- storage/badger/protocol_state.go | 130 ++++++++++++++++++++++++++----- 1 file changed, 112 insertions(+), 18 deletions(-) diff --git a/storage/badger/protocol_state.go b/storage/badger/protocol_state.go index 51db39a9e68..78c30f19f73 100644 --- a/storage/badger/protocol_state.go +++ b/storage/badger/protocol_state.go @@ -2,8 +2,8 @@ package badger import ( "fmt" - "github.com/dgraph-io/badger/v2" + "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" @@ -23,19 +23,24 @@ var _ storage.ProtocolState = (*ProtocolState)(nil) // NewProtocolState Creates ProtocolState instance which is a database of protocol state entries // which supports storing, caching and retrieving by ID and additionally indexed block ID. -func NewProtocolState(collector module.CacheMetrics, db *badger.DB, cacheSize uint) *ProtocolState { - store := func(key interface{}, val interface{}) func(*transaction.Tx) error { - id := key.(flow.Identifier) - protocolStateEntry := val.(*flow.ProtocolStateEntry) - return transaction.WithTx(operation.InsertProtocolState(id, protocolStateEntry)) - } - +func NewProtocolState(collector module.CacheMetrics, + epochSetups storage.EpochSetups, + epochCommits storage.EpochCommits, + db *badger.DB, + cacheSize uint) *ProtocolState { retrieve := func(key interface{}) func(tx *badger.Txn) (interface{}, error) { protocolStateID := key.(flow.Identifier) var protocolStateEntry flow.ProtocolStateEntry return func(tx *badger.Txn) (interface{}, error) { err := operation.RetrieveProtocolState(protocolStateID, &protocolStateEntry)(tx) - return &protocolStateEntry, err + if err != nil { + return nil, err + } + result, err := newRichProtocolStateEntry(protocolStateEntry, epochSetups, epochCommits) + if err != nil { + return nil, fmt.Errorf("could not create rich protocol state entry: %w", err) + } + return result, nil } } @@ -43,14 +48,14 @@ func NewProtocolState(collector module.CacheMetrics, db *badger.DB, cacheSize ui db: db, cache: newCache(collector, metrics.ResourceProtocolState, withLimit(cacheSize), - withStore(store), + withStore(noopStore), withRetrieve(retrieve)), } } // StoreTx allows us to store protocol state as part of a DB tx, while still going through the caching layer. func (s *ProtocolState) StoreTx(id flow.Identifier, protocolState *flow.ProtocolStateEntry) func(*transaction.Tx) error { - return s.cache.PutTx(id, protocolState) + return transaction.WithTx(operation.InsertProtocolState(id, protocolState)) } // Index indexes the protocol state by block ID. @@ -65,31 +70,31 @@ func (s *ProtocolState) Index(blockID flow.Identifier, protocolStateID flow.Iden } // ByID returns the protocol state by its ID. -func (s *ProtocolState) ByID(id flow.Identifier) (*flow.ProtocolStateEntry, error) { +func (s *ProtocolState) ByID(id flow.Identifier) (*flow.RichProtocolStateEntry, error) { tx := s.db.NewTransaction(false) defer tx.Discard() return s.byID(id)(tx) } // ByBlockID returns the protocol state by block ID. -func (s *ProtocolState) ByBlockID(blockID flow.Identifier) (*flow.ProtocolStateEntry, error) { +func (s *ProtocolState) ByBlockID(blockID flow.Identifier) (*flow.RichProtocolStateEntry, error) { tx := s.db.NewTransaction(false) defer tx.Discard() return s.byBlockID(blockID)(tx) } -func (s *ProtocolState) byID(protocolStateID flow.Identifier) func(*badger.Txn) (*flow.ProtocolStateEntry, error) { - return func(tx *badger.Txn) (*flow.ProtocolStateEntry, error) { +func (s *ProtocolState) byID(protocolStateID flow.Identifier) func(*badger.Txn) (*flow.RichProtocolStateEntry, error) { + return func(tx *badger.Txn) (*flow.RichProtocolStateEntry, error) { val, err := s.cache.Get(protocolStateID)(tx) if err != nil { return nil, err } - return val.(*flow.ProtocolStateEntry), nil + return val.(*flow.RichProtocolStateEntry), nil } } -func (s *ProtocolState) byBlockID(blockID flow.Identifier) func(*badger.Txn) (*flow.ProtocolStateEntry, error) { - return func(tx *badger.Txn) (*flow.ProtocolStateEntry, error) { +func (s *ProtocolState) byBlockID(blockID flow.Identifier) func(*badger.Txn) (*flow.RichProtocolStateEntry, error) { + return func(tx *badger.Txn) (*flow.RichProtocolStateEntry, error) { var protocolStateID flow.Identifier err := operation.LookupProtocolState(blockID, &protocolStateID)(tx) if err != nil { @@ -98,3 +103,92 @@ func (s *ProtocolState) byBlockID(blockID flow.Identifier) func(*badger.Txn) (*f return s.byID(protocolStateID)(tx) } } + +func newRichProtocolStateEntry(protocolState flow.ProtocolStateEntry, + setups storage.EpochSetups, + commits storage.EpochCommits, +) (*flow.RichProtocolStateEntry, error) { + result := &flow.RichProtocolStateEntry{ + ProtocolStateEntry: protocolState, + } + + // query and fill in epoch setups and commits for previous and current epochs + var err error + result.PreviousEpochSetup, err = setups.ByID(protocolState.PreviousEpochEventIDs.SetupID) + if err != nil { + return nil, fmt.Errorf("could not retrieve previous epoch setup: %w", err) + } + result.PreviousEpochCommit, err = commits.ByID(protocolState.PreviousEpochEventIDs.CommitID) + if err != nil { + return nil, fmt.Errorf("could not retrieve previous epoch commit: %w", err) + } + result.CurrentEpochSetup, err = setups.ByID(protocolState.CurrentEpochEventIDs.SetupID) + if err != nil { + return nil, fmt.Errorf("could not retrieve current epoch setup: %w", err) + } + result.CurrentEpochCommit, err = commits.ByID(protocolState.CurrentEpochEventIDs.CommitID) + if err != nil { + return nil, fmt.Errorf("could not retrieve current epoch commit: %w", err) + } + result.Identities, err = buildIdentityTable(protocolState.Identities, result.PreviousEpochSetup, result.CurrentEpochSetup) + if err != nil { + return nil, fmt.Errorf("could not build identity table: %w", err) + } + + // if next epoch has been already committed, fill in data for it as well. + if protocolState.NextEpochProtocolState != nil { + nextEpochProtocolState := *protocolState.NextEpochProtocolState + + nextEpochSetup, err := setups.ByID(nextEpochProtocolState.CurrentEpochEventIDs.SetupID) + if err != nil { + return nil, fmt.Errorf("could not retrieve next epoch setup: %w", err) + } + nextEpochCommit, err := commits.ByID(nextEpochProtocolState.CurrentEpochEventIDs.CommitID) + if err != nil { + return nil, fmt.Errorf("could not retrieve next epoch commit: %w", err) + } + nextEpochIdentityTable, err := buildIdentityTable(protocolState.Identities, result.CurrentEpochSetup, nextEpochSetup) + if err != nil { + return nil, fmt.Errorf("could not build next epoch identity table: %w", err) + } + + // fill identities for next epoch + result.NextEpochProtocolState = &flow.RichProtocolStateEntry{ + ProtocolStateEntry: nextEpochProtocolState, + CurrentEpochSetup: nextEpochSetup, + CurrentEpochCommit: nextEpochCommit, + PreviousEpochSetup: result.CurrentEpochSetup, // previous epoch setup is current epoch setup + PreviousEpochCommit: result.CurrentEpochCommit, // previous epoch setup is current epoch setup + Identities: nextEpochIdentityTable, + NextEpochProtocolState: nil, // always nil + } + } + + return result, nil +} + +func buildIdentityTable( + dynamicIdentities flow.DynamicIdentityEntryList, + previousEpochSetup, currentEpochSetup *flow.EpochSetup, +) (flow.IdentityList, error) { + allEpochParticipants := append(previousEpochSetup.Participants, currentEpochSetup.Participants...) + allEpochParticipants.Sort(order.Canonical) + // sanity check: size of identities should be equal to previous and current epoch participants combined + if len(allEpochParticipants) != len(dynamicIdentities) { + return nil, fmt.Errorf("invalid number of identities in protocol state: expected %d, got %d", len(allEpochParticipants), len(dynamicIdentities)) + } + + // build full identity table for current epoch + var result flow.IdentityList + for i, identity := range dynamicIdentities { + // sanity check: identities should be sorted in canonical order + if identity.NodeID != allEpochParticipants[i].NodeID { + return nil, fmt.Errorf("identites in protocol state are not in canonical order: expected %s, got %s", allEpochParticipants[i].NodeID, identity.NodeID) + } + result = append(result, &flow.Identity{ + IdentitySkeleton: allEpochParticipants[i].IdentitySkeleton, + DynamicIdentity: identity.Dynamic, + }) + } + return result, nil +} From b1d07029615537b20ec1d1f92c67f56eb759151d Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Tue, 11 Jul 2023 21:55:30 +0300 Subject: [PATCH 20/46] Added godoc for badger.ProtocolState --- storage/badger/protocol_state.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/storage/badger/protocol_state.go b/storage/badger/protocol_state.go index 78c30f19f73..5bae436cd4d 100644 --- a/storage/badger/protocol_state.go +++ b/storage/badger/protocol_state.go @@ -104,7 +104,11 @@ func (s *ProtocolState) byBlockID(blockID flow.Identifier) func(*badger.Txn) (*f } } -func newRichProtocolStateEntry(protocolState flow.ProtocolStateEntry, +// newRichProtocolStateEntry constructs a rich protocol state entry from a protocol state entry. +// It queries and fills in epoch setups and commits for previous and current epochs and possibly next epoch. +// No errors are expected during normal operation. +func newRichProtocolStateEntry( + protocolState flow.ProtocolStateEntry, setups storage.EpochSetups, commits storage.EpochCommits, ) (*flow.RichProtocolStateEntry, error) { @@ -138,7 +142,6 @@ func newRichProtocolStateEntry(protocolState flow.ProtocolStateEntry, // if next epoch has been already committed, fill in data for it as well. if protocolState.NextEpochProtocolState != nil { nextEpochProtocolState := *protocolState.NextEpochProtocolState - nextEpochSetup, err := setups.ByID(nextEpochProtocolState.CurrentEpochEventIDs.SetupID) if err != nil { return nil, fmt.Errorf("could not retrieve next epoch setup: %w", err) @@ -167,6 +170,9 @@ func newRichProtocolStateEntry(protocolState flow.ProtocolStateEntry, return result, nil } +// buildIdentityTable builds identity table for current epoch combining data from previous, current epoch setups and dynamic identities +// that are stored in protocol state. It also performs sanity checks to make sure that data is consistent. +// No errors are expected during normal operation. func buildIdentityTable( dynamicIdentities flow.DynamicIdentityEntryList, previousEpochSetup, currentEpochSetup *flow.EpochSetup, From 3840c658dde5bf5105513c340f544ff0deeecb0e Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Tue, 11 Jul 2023 22:02:25 +0300 Subject: [PATCH 21/46] More godoc updates --- storage/badger/protocol_state.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/storage/badger/protocol_state.go b/storage/badger/protocol_state.go index 5bae436cd4d..5088d8fecf3 100644 --- a/storage/badger/protocol_state.go +++ b/storage/badger/protocol_state.go @@ -14,6 +14,8 @@ import ( ) // ProtocolState implements persistent storage for storing entities of protocol state. +// Protocol state uses an embedded cache without storing capabilities(store happens on first retrieval) to avoid unnecessary +// operations and to speed up access to frequently used protocol states. type ProtocolState struct { db *badger.DB cache *Cache From 21af7680920f6a39dbd8da5f590f57eff487b3f7 Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Thu, 13 Jul 2023 12:57:25 +0300 Subject: [PATCH 22/46] Updated fixtures, added basic test for storage. Fixed issue with ordering identities --- .../export_report.json | 4 +- storage/badger/protocol_state.go | 2 +- storage/badger/protocol_state_test.go | 44 ++++++++++++++++ utils/unittest/fixtures.go | 52 +++++++++++++++++++ 4 files changed, 99 insertions(+), 3 deletions(-) create mode 100644 storage/badger/protocol_state_test.go diff --git a/cmd/util/cmd/execution-state-extract/export_report.json b/cmd/util/cmd/execution-state-extract/export_report.json index b3bd9916676..fb20b73a6bf 100644 --- a/cmd/util/cmd/execution-state-extract/export_report.json +++ b/cmd/util/cmd/execution-state-extract/export_report.json @@ -1,6 +1,6 @@ { "EpochCounter": 0, - "PreviousStateCommitment": "d1890f89344a5a1b5b47b0e1ee81a7620214849e59aac83509f36bcf84424c27", - "CurrentStateCommitment": "d1890f89344a5a1b5b47b0e1ee81a7620214849e59aac83509f36bcf84424c27", + "PreviousStateCommitment": "cfe71623d99214a7e27b3cbff0ff4b899031daa8be82336b9fd1558048cdde99", + "CurrentStateCommitment": "cfe71623d99214a7e27b3cbff0ff4b899031daa8be82336b9fd1558048cdde99", "ReportSucceeded": true } \ No newline at end of file diff --git a/storage/badger/protocol_state.go b/storage/badger/protocol_state.go index 5088d8fecf3..72aa5232ea0 100644 --- a/storage/badger/protocol_state.go +++ b/storage/badger/protocol_state.go @@ -180,7 +180,7 @@ func buildIdentityTable( previousEpochSetup, currentEpochSetup *flow.EpochSetup, ) (flow.IdentityList, error) { allEpochParticipants := append(previousEpochSetup.Participants, currentEpochSetup.Participants...) - allEpochParticipants.Sort(order.Canonical) + allEpochParticipants = allEpochParticipants.Sort(order.Canonical) // sanity check: size of identities should be equal to previous and current epoch participants combined if len(allEpochParticipants) != len(dynamicIdentities) { return nil, fmt.Errorf("invalid number of identities in protocol state: expected %d, got %d", len(allEpochParticipants), len(dynamicIdentities)) diff --git a/storage/badger/protocol_state_test.go b/storage/badger/protocol_state_test.go new file mode 100644 index 00000000000..9f2452d5ba8 --- /dev/null +++ b/storage/badger/protocol_state_test.go @@ -0,0 +1,44 @@ +package badger + +import ( + "github.com/dgraph-io/badger/v2" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage/badger/transaction" + "github.com/onflow/flow-go/utils/unittest" + "github.com/stretchr/testify/require" + "testing" +) + +func TestProtocolStateStoreRetrieve(t *testing.T) { + unittest.RunWithBadgerDB(t, func(db *badger.DB) { + metrics := metrics.NewNoopCollector() + + setups := NewEpochSetups(metrics, db) + commits := NewEpochCommits(metrics, db) + store := NewProtocolState(metrics, setups, commits, db, DefaultCacheSize) + + expected := unittest.ProtocolStateFixture() + protocolStateID := expected.ID() + + // store protocol state and auxiliary info + err := transaction.Update(db, func(tx *transaction.Tx) error { + + err := setups.StoreTx(expected.PreviousEpochSetup)(tx) + require.NoError(t, err) + err = setups.StoreTx(expected.CurrentEpochSetup)(tx) + require.NoError(t, err) + err = commits.StoreTx(expected.PreviousEpochCommit)(tx) + require.NoError(t, err) + err = commits.StoreTx(expected.CurrentEpochCommit)(tx) + require.NoError(t, err) + + return store.StoreTx(protocolStateID, &expected.ProtocolStateEntry)(tx) + }) + require.NoError(t, err) + + // fetch protocol state + actual, err := store.ByID(protocolStateID) + require.NoError(t, err) + require.Equal(t, expected, actual) + }) +} diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index cde27bfb369..a349d9b295b 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -2474,3 +2474,55 @@ func ChunkExecutionDataFixture(t *testing.T, minSize int, opts ...func(*executio size *= 2 } } + +func ProtocolStateFixture(options ...func(*flow.RichProtocolStateEntry)) *flow.RichProtocolStateEntry { + prevEpochSetup := EpochSetupFixture() + prevEpochCommit := EpochCommitFixture(func(commit *flow.EpochCommit) { + commit.Counter = prevEpochSetup.Counter + }) + currentEpochSetup := EpochSetupFixture(func(setup *flow.EpochSetup) { + setup.Counter = prevEpochSetup.Counter + 1 + }) + currentEpochCommit := EpochCommitFixture(func(commit *flow.EpochCommit) { + commit.Counter = currentEpochSetup.Counter + }) + + allIdentities := append(prevEpochSetup.Participants, currentEpochSetup.Participants...) + allIdentities = allIdentities.Sort(order.Canonical) + + var dynamicIdentities flow.DynamicIdentityEntryList + for _, identity := range allIdentities { + dynamicIdentities = append(dynamicIdentities, &flow.DynamicIdentityEntry{ + NodeID: identity.NodeID, + Dynamic: identity.DynamicIdentity, + }) + } + + entry := &flow.RichProtocolStateEntry{ + ProtocolStateEntry: flow.ProtocolStateEntry{ + CurrentEpochEventIDs: flow.EventIDs{ + SetupID: currentEpochSetup.ID(), + CommitID: currentEpochCommit.ID(), + }, + PreviousEpochEventIDs: flow.EventIDs{ + SetupID: prevEpochSetup.ID(), + CommitID: prevEpochCommit.ID(), + }, + Identities: dynamicIdentities, + InvalidStateTransitionAttempted: false, + NextEpochProtocolState: nil, + }, + CurrentEpochSetup: currentEpochSetup, + CurrentEpochCommit: currentEpochCommit, + PreviousEpochSetup: prevEpochSetup, + PreviousEpochCommit: prevEpochCommit, + Identities: allIdentities, + NextEpochProtocolState: nil, + } + + for _, option := range options { + option(entry) + } + + return entry +} From ba61c289a3c160a9786d612e302965af63d4186d Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Thu, 13 Jul 2023 13:39:20 +0300 Subject: [PATCH 23/46] Updated tests to fully cover building of RichProtocolStateEntry --- storage/badger/protocol_state.go | 2 +- storage/badger/protocol_state_test.go | 61 +++++++++++++++++++++++++-- utils/unittest/fixtures.go | 48 +++++++++++++++++++++ 3 files changed, 106 insertions(+), 5 deletions(-) diff --git a/storage/badger/protocol_state.go b/storage/badger/protocol_state.go index 72aa5232ea0..19f26816f3b 100644 --- a/storage/badger/protocol_state.go +++ b/storage/badger/protocol_state.go @@ -152,7 +152,7 @@ func newRichProtocolStateEntry( if err != nil { return nil, fmt.Errorf("could not retrieve next epoch commit: %w", err) } - nextEpochIdentityTable, err := buildIdentityTable(protocolState.Identities, result.CurrentEpochSetup, nextEpochSetup) + nextEpochIdentityTable, err := buildIdentityTable(nextEpochProtocolState.Identities, result.CurrentEpochSetup, nextEpochSetup) if err != nil { return nil, fmt.Errorf("could not build next epoch identity table: %w", err) } diff --git a/storage/badger/protocol_state_test.go b/storage/badger/protocol_state_test.go index 9f2452d5ba8..4fb5254db8e 100644 --- a/storage/badger/protocol_state_test.go +++ b/storage/badger/protocol_state_test.go @@ -2,14 +2,18 @@ package badger import ( "github.com/dgraph-io/badger/v2" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage/badger/transaction" "github.com/onflow/flow-go/utils/unittest" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "testing" ) -func TestProtocolStateStoreRetrieve(t *testing.T) { +// TestProtocolStateStorage tests if the protocol state is stored, retrieved and indexed correctly +func TestProtocolStateStorage(t *testing.T) { unittest.RunWithBadgerDB(t, func(db *badger.DB) { metrics := metrics.NewNoopCollector() @@ -17,22 +21,29 @@ func TestProtocolStateStoreRetrieve(t *testing.T) { commits := NewEpochCommits(metrics, db) store := NewProtocolState(metrics, setups, commits, db, DefaultCacheSize) - expected := unittest.ProtocolStateFixture() + expected := unittest.ProtocolStateFixture(unittest.WithNextEpochProtocolState()) protocolStateID := expected.ID() + blockID := unittest.IdentifierFixture() // store protocol state and auxiliary info err := transaction.Update(db, func(tx *transaction.Tx) error { - + // store epoch events to be able to retrieve them later err := setups.StoreTx(expected.PreviousEpochSetup)(tx) require.NoError(t, err) err = setups.StoreTx(expected.CurrentEpochSetup)(tx) require.NoError(t, err) + err = setups.StoreTx(expected.NextEpochProtocolState.CurrentEpochSetup)(tx) + require.NoError(t, err) err = commits.StoreTx(expected.PreviousEpochCommit)(tx) require.NoError(t, err) err = commits.StoreTx(expected.CurrentEpochCommit)(tx) require.NoError(t, err) + err = commits.StoreTx(expected.NextEpochProtocolState.CurrentEpochCommit)(tx) + require.NoError(t, err) - return store.StoreTx(protocolStateID, &expected.ProtocolStateEntry)(tx) + err = store.StoreTx(protocolStateID, &expected.ProtocolStateEntry)(tx) + require.NoError(t, err) + return store.Index(blockID, protocolStateID)(tx) }) require.NoError(t, err) @@ -40,5 +51,47 @@ func TestProtocolStateStoreRetrieve(t *testing.T) { actual, err := store.ByID(protocolStateID) require.NoError(t, err) require.Equal(t, expected, actual) + + assertRichProtocolStateValidity(t, actual) + + // fetch protocol state by block ID + actualByBlockID, err := store.ByBlockID(blockID) + require.NoError(t, err) + require.Equal(t, expected, actualByBlockID) + + assertRichProtocolStateValidity(t, actualByBlockID) }) } + +// assertRichProtocolStateValidity checks if RichProtocolState holds its invariant and is correctly populated by storage layer. +func assertRichProtocolStateValidity(t *testing.T, state *flow.RichProtocolStateEntry) { + // invariant: CurrentEpochSetup and CurrentEpochCommit are for the same epoch. Never nil. + assert.Equal(t, state.CurrentEpochSetup.Counter, state.CurrentEpochCommit.Counter, "current epoch setup and commit should be for the same epoch") + assert.Equal(t, state.CurrentEpochSetup.Counter, state.PreviousEpochSetup.Counter+1, "current epoch setup should be next after previous epoch") + + // invariant: CurrentEpochSetup and CurrentEpochCommit IDs are the equal to the ID of the protocol state entry. Never nil. + assert.Equal(t, state.CurrentEpochSetup.ID(), state.ProtocolStateEntry.CurrentEpochEventIDs.SetupID, "epoch setup should be for correct event ID") + assert.Equal(t, state.CurrentEpochCommit.ID(), state.ProtocolStateEntry.CurrentEpochEventIDs.CommitID, "epoch commit should be for correct event ID") + + // invariant: PreviousEpochSetup and PreviousEpochCommit are for the same epoch. Never nil. + assert.Equal(t, state.PreviousEpochSetup.Counter, state.PreviousEpochCommit.Counter, "previous epoch setup and commit should be for the same epoch") + + // invariant: PreviousEpochSetup and PreviousEpochCommit IDs are the equal to the ID of the protocol state entry. Never nil. + assert.Equal(t, state.PreviousEpochSetup.ID(), state.ProtocolStateEntry.PreviousEpochEventIDs.SetupID, "epoch setup should be for correct event ID") + assert.Equal(t, state.PreviousEpochCommit.ID(), state.ProtocolStateEntry.PreviousEpochEventIDs.CommitID, "epoch commit should be for correct event ID") + + // invariant: Identities is a full identity table for the current epoch. Identities are sorted in canonical order. Never nil. + allIdentities := append(state.PreviousEpochSetup.Participants, state.CurrentEpochSetup.Participants...).Sort(order.Canonical) + assert.Equal(t, allIdentities, state.Identities, "identities should be a full identity table for the current epoch") + + for i, identity := range state.ProtocolStateEntry.Identities { + assert.Equal(t, identity.NodeID, allIdentities[i].NodeID, "identity node ID should match") + } + + nextEpochState := state.NextEpochProtocolState + if nextEpochState == nil { + return + } + // invariant: NextEpochProtocolState is a protocol state for the next epoch. Can be nil. + assertRichProtocolStateValidity(t, nextEpochState) +} diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index a349d9b295b..a1093e5a1a8 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -2475,6 +2475,10 @@ func ChunkExecutionDataFixture(t *testing.T, minSize int, opts ...func(*executio } } +// ProtocolStateFixture creates a fixture with correctly structured data that passes basic sanity checks. +// Epoch setup and commit counters are set to match. +// Identities are constructed from setup events. +// Identities are sorted in canonical order. func ProtocolStateFixture(options ...func(*flow.RichProtocolStateEntry)) *flow.RichProtocolStateEntry { prevEpochSetup := EpochSetupFixture() prevEpochCommit := EpochCommitFixture(func(commit *flow.EpochCommit) { @@ -2526,3 +2530,47 @@ func ProtocolStateFixture(options ...func(*flow.RichProtocolStateEntry)) *flow.R return entry } + +// WithNextEpochProtocolState creates a fixture with correctly structured data for next epoch. +func WithNextEpochProtocolState() func(entry *flow.RichProtocolStateEntry) { + return func(entry *flow.RichProtocolStateEntry) { + nextEpochSetup := EpochSetupFixture(func(setup *flow.EpochSetup) { + setup.Counter = entry.CurrentEpochSetup.Counter + 1 + }) + nextEpochCommit := EpochCommitFixture(func(commit *flow.EpochCommit) { + commit.Counter = nextEpochSetup.Counter + }) + + allIdentities := append(entry.CurrentEpochSetup.Participants, nextEpochSetup.Participants...) + allIdentities = allIdentities.Sort(order.Canonical) + + var dynamicIdentities flow.DynamicIdentityEntryList + for _, identity := range allIdentities { + dynamicIdentities = append(dynamicIdentities, &flow.DynamicIdentityEntry{ + NodeID: identity.NodeID, + Dynamic: identity.DynamicIdentity, + }) + } + + entry.ProtocolStateEntry.NextEpochProtocolState = &flow.ProtocolStateEntry{ + CurrentEpochEventIDs: flow.EventIDs{ + SetupID: nextEpochSetup.ID(), + CommitID: nextEpochCommit.ID(), + }, + PreviousEpochEventIDs: entry.CurrentEpochEventIDs, + Identities: dynamicIdentities, + InvalidStateTransitionAttempted: false, + NextEpochProtocolState: nil, + } + + entry.NextEpochProtocolState = &flow.RichProtocolStateEntry{ + ProtocolStateEntry: *entry.ProtocolStateEntry.NextEpochProtocolState, + CurrentEpochSetup: nextEpochSetup, + CurrentEpochCommit: nextEpochCommit, + PreviousEpochSetup: entry.CurrentEpochSetup, + PreviousEpochCommit: entry.CurrentEpochCommit, + Identities: allIdentities, + NextEpochProtocolState: nil, + } + } +} From b66d0e17a4d5f564991964cc77f9676866170aee Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Thu, 13 Jul 2023 13:46:11 +0300 Subject: [PATCH 24/46] Added extra test for badger operations. Linted --- storage/badger/operation/protocol_state.go | 1 + .../badger/operation/protocol_state_test.go | 39 +++++++++++++++++++ storage/badger/protocol_state.go | 3 +- storage/badger/protocol_state_test.go | 8 ++-- 4 files changed, 47 insertions(+), 4 deletions(-) create mode 100644 storage/badger/operation/protocol_state_test.go diff --git a/storage/badger/operation/protocol_state.go b/storage/badger/operation/protocol_state.go index 424e5fead32..072fdde7a46 100644 --- a/storage/badger/operation/protocol_state.go +++ b/storage/badger/operation/protocol_state.go @@ -2,6 +2,7 @@ package operation import ( "github.com/dgraph-io/badger/v2" + "github.com/onflow/flow-go/model/flow" ) diff --git a/storage/badger/operation/protocol_state_test.go b/storage/badger/operation/protocol_state_test.go new file mode 100644 index 00000000000..883b60df4a2 --- /dev/null +++ b/storage/badger/operation/protocol_state_test.go @@ -0,0 +1,39 @@ +package operation + +import ( + "testing" + + "github.com/dgraph-io/badger/v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestInsertProtocolState tests if basic badger operations on ProtocolState work as expected. +func TestInsertProtocolState(t *testing.T) { + unittest.RunWithBadgerDB(t, func(db *badger.DB) { + expected := &unittest.ProtocolStateFixture().ProtocolStateEntry + + protocolStateID := expected.ID() + err := db.Update(InsertProtocolState(protocolStateID, expected)) + require.Nil(t, err) + + var actual flow.ProtocolStateEntry + err = db.View(RetrieveProtocolState(protocolStateID, &actual)) + require.Nil(t, err) + + assert.Equal(t, expected, &actual) + + blockID := unittest.IdentifierFixture() + err = db.Update(IndexProtocolState(blockID, protocolStateID)) + require.Nil(t, err) + + var actualProtocolStateID flow.Identifier + err = db.View(LookupProtocolState(blockID, &actualProtocolStateID)) + require.Nil(t, err) + + assert.Equal(t, protocolStateID, actualProtocolStateID) + }) +} diff --git a/storage/badger/protocol_state.go b/storage/badger/protocol_state.go index 19f26816f3b..610b9101c4e 100644 --- a/storage/badger/protocol_state.go +++ b/storage/badger/protocol_state.go @@ -2,10 +2,11 @@ package badger import ( "fmt" + "github.com/dgraph-io/badger/v2" - "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage" diff --git a/storage/badger/protocol_state_test.go b/storage/badger/protocol_state_test.go index 4fb5254db8e..c980ca99a51 100644 --- a/storage/badger/protocol_state_test.go +++ b/storage/badger/protocol_state_test.go @@ -1,15 +1,17 @@ package badger import ( + "testing" + "github.com/dgraph-io/badger/v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage/badger/transaction" "github.com/onflow/flow-go/utils/unittest" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "testing" ) // TestProtocolStateStorage tests if the protocol state is stored, retrieved and indexed correctly From 617925ad85e8905908e07f9fafaf3b4838b61af5 Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Thu, 13 Jul 2023 15:39:47 +0300 Subject: [PATCH 25/46] Updated godoc --- model/flow/protocol_state.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/model/flow/protocol_state.go b/model/flow/protocol_state.go index 01e269beccf..0ef2f64086a 100644 --- a/model/flow/protocol_state.go +++ b/model/flow/protocol_state.go @@ -8,8 +8,11 @@ type DynamicIdentityEntry struct { type DynamicIdentityEntryList []*DynamicIdentityEntry -// ProtocolStateEntry is the main structure that will be saved in the database. -// It contains the minimal protocol state that has to be persisted. +// ProtocolStateEntry holds information about the protocol state at some point in time. +// It allows to reconstruct the state of identity table using epoch setup events and dynamic identities. +// It tracks attempts of invalid state transitions. +// It also holds information about the next epoch, if it has been already committed. +// This structure is used to persist protocol state in the database. type ProtocolStateEntry struct { // setup and commit event IDs for current epoch. CurrentEpochEventIDs EventIDs From 895a5a40b59ae889d7906e06c5e69fc96b20daaa Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Mon, 31 Jul 2023 20:56:09 +0300 Subject: [PATCH 26/46] Updated protocol state store only canonicaly sorted identiies --- model/flow/protocol_state.go | 13 +++++++++++++ storage/badger/protocol_state.go | 14 +++++++++++--- storage/badger/protocol_state_test.go | 24 ++++++++++++++++++++++++ 3 files changed, 48 insertions(+), 3 deletions(-) diff --git a/model/flow/protocol_state.go b/model/flow/protocol_state.go index 0ef2f64086a..c4491819488 100644 --- a/model/flow/protocol_state.go +++ b/model/flow/protocol_state.go @@ -19,6 +19,7 @@ type ProtocolStateEntry struct { // setup and commit event IDs for previous epoch. PreviousEpochEventIDs EventIDs // Part of identity table that can be changed during the epoch. + // Always sorted in canonical order. Identities DynamicIdentityEntryList // InvalidStateTransitionAttempted encodes whether an invalid state transition // has been detected in this fork. When this happens, epoch fallback is triggered @@ -69,3 +70,15 @@ func (e *ProtocolStateEntry) ID() Identifier { } return MakeID(body) } + +// Sorted returns whether the list is sorted by the input ordering. +func (ll DynamicIdentityEntryList) Sorted(less IdentifierOrder) bool { + for i := 0; i < len(ll)-1; i++ { + a := ll[i] + b := ll[i+1] + if !less(a.NodeID, b.NodeID) { + return false + } + } + return true +} diff --git a/storage/badger/protocol_state.go b/storage/badger/protocol_state.go index 610b9101c4e..e53fd27489c 100644 --- a/storage/badger/protocol_state.go +++ b/storage/badger/protocol_state.go @@ -2,9 +2,7 @@ package badger import ( "fmt" - "github.com/dgraph-io/badger/v2" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/module" @@ -58,7 +56,17 @@ func NewProtocolState(collector module.CacheMetrics, // StoreTx allows us to store protocol state as part of a DB tx, while still going through the caching layer. func (s *ProtocolState) StoreTx(id flow.Identifier, protocolState *flow.ProtocolStateEntry) func(*transaction.Tx) error { - return transaction.WithTx(operation.InsertProtocolState(id, protocolState)) + return func(tx *transaction.Tx) error { + if !protocolState.Identities.Sorted(order.IdentifierCanonical) { + return fmt.Errorf("sanity check failed: identities are not sorted") + } + if protocolState.NextEpochProtocolState != nil { + if !protocolState.NextEpochProtocolState.Identities.Sorted(order.IdentifierCanonical) { + return fmt.Errorf("sanity check failed: next epoch identities are not sorted") + } + } + return transaction.WithTx(operation.InsertProtocolState(id, protocolState))(tx) + } } // Index indexes the protocol state by block ID. diff --git a/storage/badger/protocol_state_test.go b/storage/badger/protocol_state_test.go index c980ca99a51..2a02f1209bb 100644 --- a/storage/badger/protocol_state_test.go +++ b/storage/badger/protocol_state_test.go @@ -65,6 +65,30 @@ func TestProtocolStateStorage(t *testing.T) { }) } +// TestProtocolStateStoreInvalidProtocolState tests that storing protocol state which has unsorted identities fails for +// current and next epoch protocol states. +func TestProtocolStateStoreInvalidProtocolState(t *testing.T) { + unittest.RunWithBadgerDB(t, func(db *badger.DB) { + metrics := metrics.NewNoopCollector() + setups := NewEpochSetups(metrics, db) + commits := NewEpochCommits(metrics, db) + store := NewProtocolState(metrics, setups, commits, db, DefaultCacheSize) + invalid := unittest.ProtocolStateFixture().ProtocolStateEntry + // swap first and second elements to break canonical order + invalid.Identities[0], invalid.Identities[1] = invalid.Identities[1], invalid.Identities[0] + + err := transaction.Update(db, store.StoreTx(invalid.ID(), &invalid)) + require.Error(t, err) + + invalid = unittest.ProtocolStateFixture(unittest.WithNextEpochProtocolState()).ProtocolStateEntry + // swap first and second elements to break canonical order + invalid.NextEpochProtocolState.Identities[0], invalid.NextEpochProtocolState.Identities[1] = invalid.NextEpochProtocolState.Identities[1], invalid.NextEpochProtocolState.Identities[0] + + err = transaction.Update(db, store.StoreTx(invalid.ID(), &invalid)) + require.Error(t, err) + }) +} + // assertRichProtocolStateValidity checks if RichProtocolState holds its invariant and is correctly populated by storage layer. func assertRichProtocolStateValidity(t *testing.T, state *flow.RichProtocolStateEntry) { // invariant: CurrentEpochSetup and CurrentEpochCommit are for the same epoch. Never nil. From 1a1b0e459a15a4bc35be9b47a6003a57b14a2f59 Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Mon, 31 Jul 2023 21:38:34 +0300 Subject: [PATCH 27/46] Updated ProtocolStateEntry to store union of previous and current epoch identities, removed duplicates. Updated tests and documentation --- .../cmd/execution-state-extract/export_report.json | 4 ++-- model/flow/identity.go | 2 +- model/flow/protocol_state.go | 9 +++++---- storage/badger/protocol_state.go | 6 ++++-- storage/badger/protocol_state_test.go | 7 +++---- utils/unittest/fixtures.go | 11 +++++------ 6 files changed, 20 insertions(+), 19 deletions(-) diff --git a/cmd/util/cmd/execution-state-extract/export_report.json b/cmd/util/cmd/execution-state-extract/export_report.json index 4c8484e4396..091c0b7ffd0 100644 --- a/cmd/util/cmd/execution-state-extract/export_report.json +++ b/cmd/util/cmd/execution-state-extract/export_report.json @@ -1,6 +1,6 @@ { "EpochCounter": 0, - "PreviousStateCommitment": "1c9f9d343cb8d4610e0b2c1eb74d6ea2f2f8aef2d666281dc22870e3efaa607b", - "CurrentStateCommitment": "1c9f9d343cb8d4610e0b2c1eb74d6ea2f2f8aef2d666281dc22870e3efaa607b", + "PreviousStateCommitment": "f057d2597eb52d2c925e182f9b35446acabe74de5797b097cc01dfd2b6585d58", + "CurrentStateCommitment": "f057d2597eb52d2c925e182f9b35446acabe74de5797b097cc01dfd2b6585d58", "ReportSucceeded": true } \ No newline at end of file diff --git a/model/flow/identity.go b/model/flow/identity.go index 2dc3d4dccd3..08e3cb22222 100644 --- a/model/flow/identity.go +++ b/model/flow/identity.go @@ -571,7 +571,7 @@ func (il IdentityList) SamplePct(pct float64) (IdentityList, error) { // Union returns a new identity list containing every identity that occurs in // either `il`, or `other`, or both. There are no duplicates in the output, // where duplicates are identities with the same node ID. -// The returned IdentityList is sorted +// The returned IdentityList is sorted in canonical order. func (il IdentityList) Union(other IdentityList) IdentityList { maxLen := len(il) + len(other) diff --git a/model/flow/protocol_state.go b/model/flow/protocol_state.go index c4491819488..a8ee513b619 100644 --- a/model/flow/protocol_state.go +++ b/model/flow/protocol_state.go @@ -34,10 +34,11 @@ type ProtocolStateEntry struct { // Using this structure instead of ProtocolStateEntry allows us to avoid querying // the database for epoch setups and commits and full identity table. // It holds several invariants, such as: -// - CurrentEpochSetup and CurrentEpochCommit are for the same epoch. Never nil. -// - PreviousEpochSetup and PreviousEpochCommit are for the same epoch. Never nil. -// - Identities is a full identity table for the current epoch. Identities are sorted in canonical order. Never nil. -// - NextEpochProtocolState is a protocol state for the next epoch. Can be nil. +// - CurrentEpochSetup and CurrentEpochCommit are for the same epoch. Never nil. +// - PreviousEpochSetup and PreviousEpochCommit are for the same epoch. Never nil. +// - Identities is a full identity table for the current epoch. +// Identities are sorted in canonical order. Without duplicates. Never nil. +// - NextEpochProtocolState is a protocol state for the next epoch. Can be nil. type RichProtocolStateEntry struct { ProtocolStateEntry diff --git a/storage/badger/protocol_state.go b/storage/badger/protocol_state.go index e53fd27489c..0e4410fa32a 100644 --- a/storage/badger/protocol_state.go +++ b/storage/badger/protocol_state.go @@ -2,7 +2,9 @@ package badger import ( "fmt" + "github.com/dgraph-io/badger/v2" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/module" @@ -188,8 +190,8 @@ func buildIdentityTable( dynamicIdentities flow.DynamicIdentityEntryList, previousEpochSetup, currentEpochSetup *flow.EpochSetup, ) (flow.IdentityList, error) { - allEpochParticipants := append(previousEpochSetup.Participants, currentEpochSetup.Participants...) - allEpochParticipants = allEpochParticipants.Sort(order.Canonical) + // produce a unique set for current and previous epoch participants + allEpochParticipants := previousEpochSetup.Participants.Union(currentEpochSetup.Participants) // sanity check: size of identities should be equal to previous and current epoch participants combined if len(allEpochParticipants) != len(dynamicIdentities) { return nil, fmt.Errorf("invalid number of identities in protocol state: expected %d, got %d", len(allEpochParticipants), len(dynamicIdentities)) diff --git a/storage/badger/protocol_state_test.go b/storage/badger/protocol_state_test.go index 2a02f1209bb..46ee3955c29 100644 --- a/storage/badger/protocol_state_test.go +++ b/storage/badger/protocol_state_test.go @@ -8,7 +8,6 @@ import ( "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage/badger/transaction" "github.com/onflow/flow-go/utils/unittest" @@ -106,9 +105,9 @@ func assertRichProtocolStateValidity(t *testing.T, state *flow.RichProtocolState assert.Equal(t, state.PreviousEpochSetup.ID(), state.ProtocolStateEntry.PreviousEpochEventIDs.SetupID, "epoch setup should be for correct event ID") assert.Equal(t, state.PreviousEpochCommit.ID(), state.ProtocolStateEntry.PreviousEpochEventIDs.CommitID, "epoch commit should be for correct event ID") - // invariant: Identities is a full identity table for the current epoch. Identities are sorted in canonical order. Never nil. - allIdentities := append(state.PreviousEpochSetup.Participants, state.CurrentEpochSetup.Participants...).Sort(order.Canonical) - assert.Equal(t, allIdentities, state.Identities, "identities should be a full identity table for the current epoch") + // invariant: Identities is a full identity table for the current epoch. Identities are sorted in canonical order. Without duplicates. Never nil. + allIdentities := state.PreviousEpochSetup.Participants.Union(state.CurrentEpochSetup.Participants) + assert.Equal(t, allIdentities, state.Identities, "identities should be a full identity table for the current epoch, without duplicates") for i, identity := range state.ProtocolStateEntry.Identities { assert.Equal(t, identity.NodeID, allIdentities[i].NodeID, "identity node ID should match") diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index 35622e6c98d..2033ed8630d 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -2532,14 +2532,14 @@ func ProtocolStateFixture(options ...func(*flow.RichProtocolStateEntry)) *flow.R }) currentEpochSetup := EpochSetupFixture(func(setup *flow.EpochSetup) { setup.Counter = prevEpochSetup.Counter + 1 + // reuse same participant for current epoch + setup.Participants[1] = prevEpochSetup.Participants[1] }) currentEpochCommit := EpochCommitFixture(func(commit *flow.EpochCommit) { commit.Counter = currentEpochSetup.Counter }) - allIdentities := append(prevEpochSetup.Participants, currentEpochSetup.Participants...) - allIdentities = allIdentities.Sort(order.Canonical) - + allIdentities := prevEpochSetup.Participants.Union(currentEpochSetup.Participants) var dynamicIdentities flow.DynamicIdentityEntryList for _, identity := range allIdentities { dynamicIdentities = append(dynamicIdentities, &flow.DynamicIdentityEntry{ @@ -2582,13 +2582,12 @@ func WithNextEpochProtocolState() func(entry *flow.RichProtocolStateEntry) { return func(entry *flow.RichProtocolStateEntry) { nextEpochSetup := EpochSetupFixture(func(setup *flow.EpochSetup) { setup.Counter = entry.CurrentEpochSetup.Counter + 1 + setup.Participants[1] = entry.CurrentEpochSetup.Participants[1] }) nextEpochCommit := EpochCommitFixture(func(commit *flow.EpochCommit) { commit.Counter = nextEpochSetup.Counter }) - - allIdentities := append(entry.CurrentEpochSetup.Participants, nextEpochSetup.Participants...) - allIdentities = allIdentities.Sort(order.Canonical) + allIdentities := entry.CurrentEpochSetup.Participants.Union(nextEpochSetup.Participants) var dynamicIdentities flow.DynamicIdentityEntryList for _, identity := range allIdentities { From fe8ce9fe2150929723521e73bf5e0145ecfb321a Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Tue, 1 Aug 2023 10:54:30 +0300 Subject: [PATCH 28/46] Updated how epoch participants are constructured in storage layer. Changed ordering from prev + current epoch to current + prev epoch --- storage/badger/protocol_state.go | 2 +- storage/badger/protocol_state_test.go | 51 ++++++++++++++++++++++++++- utils/unittest/fixtures.go | 11 +++--- 3 files changed, 58 insertions(+), 6 deletions(-) diff --git a/storage/badger/protocol_state.go b/storage/badger/protocol_state.go index 0e4410fa32a..310c693bbd5 100644 --- a/storage/badger/protocol_state.go +++ b/storage/badger/protocol_state.go @@ -191,7 +191,7 @@ func buildIdentityTable( previousEpochSetup, currentEpochSetup *flow.EpochSetup, ) (flow.IdentityList, error) { // produce a unique set for current and previous epoch participants - allEpochParticipants := previousEpochSetup.Participants.Union(currentEpochSetup.Participants) + allEpochParticipants := currentEpochSetup.Participants.Union(previousEpochSetup.Participants) // sanity check: size of identities should be equal to previous and current epoch participants combined if len(allEpochParticipants) != len(dynamicIdentities) { return nil, fmt.Errorf("invalid number of identities in protocol state: expected %d, got %d", len(allEpochParticipants), len(dynamicIdentities)) diff --git a/storage/badger/protocol_state_test.go b/storage/badger/protocol_state_test.go index 46ee3955c29..be1f23a141c 100644 --- a/storage/badger/protocol_state_test.go +++ b/storage/badger/protocol_state_test.go @@ -88,6 +88,55 @@ func TestProtocolStateStoreInvalidProtocolState(t *testing.T) { }) } +// TestProtocolStateMergeParticipants tests that merging participants between epochs works correctly. We always take participants +// from current epoch and additionally add participants from previous epoch if they are not present in current epoch. +// If there is participant in previous and current epochs we should see it only once in the merged list and the entity has to be from current epoch. +func TestProtocolStateMergeParticipants(t *testing.T) { + unittest.RunWithBadgerDB(t, func(db *badger.DB) { + metrics := metrics.NewNoopCollector() + + setups := NewEpochSetups(metrics, db) + commits := NewEpochCommits(metrics, db) + store := NewProtocolState(metrics, setups, commits, db, DefaultCacheSize) + + stateEntry := unittest.ProtocolStateFixture() + require.Equal(t, stateEntry.CurrentEpochSetup.Participants[1], stateEntry.PreviousEpochSetup.Participants[1]) + // change address of participant in current epoch, so we can distinguish it from the one in previous epoch + // when performing assertion. + newAddress := "123" + nodeID := stateEntry.CurrentEpochSetup.Participants[1].NodeID + stateEntry.CurrentEpochSetup.Participants[1].Address = newAddress + stateEntry.CurrentEpochEventIDs.SetupID = stateEntry.CurrentEpochSetup.ID() + protocolStateID := stateEntry.ID() + + // store protocol state and auxiliary info + err := transaction.Update(db, func(tx *transaction.Tx) error { + // store epoch events to be able to retrieve them later + err := setups.StoreTx(stateEntry.PreviousEpochSetup)(tx) + require.NoError(t, err) + err = setups.StoreTx(stateEntry.CurrentEpochSetup)(tx) + require.NoError(t, err) + err = commits.StoreTx(stateEntry.PreviousEpochCommit)(tx) + require.NoError(t, err) + err = commits.StoreTx(stateEntry.CurrentEpochCommit)(tx) + require.NoError(t, err) + + return store.StoreTx(protocolStateID, &stateEntry.ProtocolStateEntry)(tx) + }) + require.NoError(t, err) + + // fetch protocol state + actual, err := store.ByID(protocolStateID) + require.NoError(t, err) + require.Equal(t, stateEntry, actual) + + assertRichProtocolStateValidity(t, actual) + identity, ok := actual.Identities.ByNodeID(nodeID) + require.True(t, ok) + require.Equal(t, newAddress, identity.Address) + }) +} + // assertRichProtocolStateValidity checks if RichProtocolState holds its invariant and is correctly populated by storage layer. func assertRichProtocolStateValidity(t *testing.T, state *flow.RichProtocolStateEntry) { // invariant: CurrentEpochSetup and CurrentEpochCommit are for the same epoch. Never nil. @@ -106,7 +155,7 @@ func assertRichProtocolStateValidity(t *testing.T, state *flow.RichProtocolState assert.Equal(t, state.PreviousEpochCommit.ID(), state.ProtocolStateEntry.PreviousEpochEventIDs.CommitID, "epoch commit should be for correct event ID") // invariant: Identities is a full identity table for the current epoch. Identities are sorted in canonical order. Without duplicates. Never nil. - allIdentities := state.PreviousEpochSetup.Participants.Union(state.CurrentEpochSetup.Participants) + allIdentities := state.CurrentEpochSetup.Participants.Union(state.PreviousEpochSetup.Participants) assert.Equal(t, allIdentities, state.Identities, "identities should be a full identity table for the current epoch, without duplicates") for i, identity := range state.ProtocolStateEntry.Identities { diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index 2033ed8630d..10a72b53f28 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -2533,13 +2533,14 @@ func ProtocolStateFixture(options ...func(*flow.RichProtocolStateEntry)) *flow.R currentEpochSetup := EpochSetupFixture(func(setup *flow.EpochSetup) { setup.Counter = prevEpochSetup.Counter + 1 // reuse same participant for current epoch - setup.Participants[1] = prevEpochSetup.Participants[1] + sameParticipant := *prevEpochSetup.Participants[1] + setup.Participants[1] = &sameParticipant }) currentEpochCommit := EpochCommitFixture(func(commit *flow.EpochCommit) { commit.Counter = currentEpochSetup.Counter }) - allIdentities := prevEpochSetup.Participants.Union(currentEpochSetup.Participants) + allIdentities := currentEpochSetup.Participants.Union(prevEpochSetup.Participants) var dynamicIdentities flow.DynamicIdentityEntryList for _, identity := range allIdentities { dynamicIdentities = append(dynamicIdentities, &flow.DynamicIdentityEntry{ @@ -2582,12 +2583,14 @@ func WithNextEpochProtocolState() func(entry *flow.RichProtocolStateEntry) { return func(entry *flow.RichProtocolStateEntry) { nextEpochSetup := EpochSetupFixture(func(setup *flow.EpochSetup) { setup.Counter = entry.CurrentEpochSetup.Counter + 1 - setup.Participants[1] = entry.CurrentEpochSetup.Participants[1] + // reuse same participant for current epoch + sameParticipant := *entry.CurrentEpochSetup.Participants[1] + setup.Participants[1] = &sameParticipant }) nextEpochCommit := EpochCommitFixture(func(commit *flow.EpochCommit) { commit.Counter = nextEpochSetup.Counter }) - allIdentities := entry.CurrentEpochSetup.Participants.Union(nextEpochSetup.Participants) + allIdentities := nextEpochSetup.Participants.Union(entry.CurrentEpochSetup.Participants) var dynamicIdentities flow.DynamicIdentityEntryList for _, identity := range allIdentities { From 9b6f11c1338eb7f273ccff7ddde096afc6223793 Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Mon, 7 Aug 2023 14:00:09 +0300 Subject: [PATCH 29/46] Refactored creation of RichProtocolStateEntry. Made it more flexible. Added fixture for root protocol state. Added support for previous epoch and next epochs to be nil. --- model/flow/protocol_state.go | 85 ++++++++++++++++++++++ storage/badger/protocol_state.go | 101 +++++++++----------------- storage/badger/protocol_state_test.go | 64 ++++++++++++++-- utils/unittest/fixtures.go | 42 +++++++++++ 4 files changed, 218 insertions(+), 74 deletions(-) diff --git a/model/flow/protocol_state.go b/model/flow/protocol_state.go index a8ee513b619..6e4ba0bd20c 100644 --- a/model/flow/protocol_state.go +++ b/model/flow/protocol_state.go @@ -1,5 +1,7 @@ package flow +import "fmt" + // DynamicIdentityEntry encapsulates nodeID and dynamic portion of identity. type DynamicIdentityEntry struct { NodeID Identifier @@ -51,6 +53,56 @@ type RichProtocolStateEntry struct { NextEpochProtocolState *RichProtocolStateEntry } +// NewRichProtocolStateEntry constructs a rich protocol state entry from a protocol state entry and additional data. +// No errors are expected during normal operation. +func NewRichProtocolStateEntry( + protocolState ProtocolStateEntry, + previousEpochSetup *EpochSetup, + previousEpochCommit *EpochCommit, + currentEpochSetup *EpochSetup, + currentEpochCommit *EpochCommit, + nextEpochSetup *EpochSetup, + nextEpochCommit *EpochCommit, +) (*RichProtocolStateEntry, error) { + result := &RichProtocolStateEntry{ + ProtocolStateEntry: protocolState, + CurrentEpochSetup: currentEpochSetup, + CurrentEpochCommit: currentEpochCommit, + PreviousEpochSetup: previousEpochSetup, + PreviousEpochCommit: previousEpochCommit, + Identities: nil, + NextEpochProtocolState: nil, + } + + var err error + result.Identities, err = buildIdentityTable(protocolState.Identities, result.PreviousEpochSetup, result.CurrentEpochSetup) + if err != nil { + return nil, fmt.Errorf("could not build identity table: %w", err) + } + + // if next epoch has been already committed, fill in data for it as well. + if protocolState.NextEpochProtocolState != nil { + nextEpochProtocolState := *protocolState.NextEpochProtocolState + nextEpochIdentityTable, err := buildIdentityTable(nextEpochProtocolState.Identities, result.CurrentEpochSetup, nextEpochSetup) + if err != nil { + return nil, fmt.Errorf("could not build next epoch identity table: %w", err) + } + + // fill identities for next epoch + result.NextEpochProtocolState = &RichProtocolStateEntry{ + ProtocolStateEntry: nextEpochProtocolState, + CurrentEpochSetup: nextEpochSetup, + CurrentEpochCommit: nextEpochCommit, + PreviousEpochSetup: result.CurrentEpochSetup, // previous epoch setup is current epoch setup + PreviousEpochCommit: result.CurrentEpochCommit, // previous epoch setup is current epoch setup + Identities: nextEpochIdentityTable, + NextEpochProtocolState: nil, // always nil + } + } + + return result, nil +} + // ID returns hash of entry by hashing all fields. func (e *ProtocolStateEntry) ID() Identifier { if e == nil { @@ -83,3 +135,36 @@ func (ll DynamicIdentityEntryList) Sorted(less IdentifierOrder) bool { } return true } + +// buildIdentityTable builds identity table for current epoch combining data from previous, current epoch setups and dynamic identities +// that are stored in protocol state. It also performs sanity checks to make sure that data is consistent. +// No errors are expected during normal operation. +func buildIdentityTable( + dynamicIdentities DynamicIdentityEntryList, + previousEpochSetup, currentEpochSetup *EpochSetup, +) (IdentityList, error) { + var previousEpochParticipants IdentityList + if previousEpochSetup != nil { + previousEpochParticipants = previousEpochSetup.Participants + } + // produce a unique set for current and previous epoch participants + allEpochParticipants := currentEpochSetup.Participants.Union(previousEpochParticipants) + // sanity check: size of identities should be equal to previous and current epoch participants combined + if len(allEpochParticipants) != len(dynamicIdentities) { + return nil, fmt.Errorf("invalid number of identities in protocol state: expected %d, got %d", len(allEpochParticipants), len(dynamicIdentities)) + } + + // build full identity table for current epoch + var result IdentityList + for i, identity := range dynamicIdentities { + // sanity check: identities should be sorted in canonical order + if identity.NodeID != allEpochParticipants[i].NodeID { + return nil, fmt.Errorf("identites in protocol state are not in canonical order: expected %s, got %s", allEpochParticipants[i].NodeID, identity.NodeID) + } + result = append(result, &Identity{ + IdentitySkeleton: allEpochParticipants[i].IdentitySkeleton, + DynamicIdentity: identity.Dynamic, + }) + } + return result, nil +} diff --git a/storage/badger/protocol_state.go b/storage/badger/protocol_state.go index 310c693bbd5..84630f5cb4f 100644 --- a/storage/badger/protocol_state.go +++ b/storage/badger/protocol_state.go @@ -125,89 +125,56 @@ func newRichProtocolStateEntry( setups storage.EpochSetups, commits storage.EpochCommits, ) (*flow.RichProtocolStateEntry, error) { - result := &flow.RichProtocolStateEntry{ - ProtocolStateEntry: protocolState, - } - + var ( + previousEpochSetup *flow.EpochSetup + previousEpochCommit *flow.EpochCommit + nextEpochSetup *flow.EpochSetup + nextEpochCommit *flow.EpochCommit + err error + ) // query and fill in epoch setups and commits for previous and current epochs - var err error - result.PreviousEpochSetup, err = setups.ByID(protocolState.PreviousEpochEventIDs.SetupID) - if err != nil { - return nil, fmt.Errorf("could not retrieve previous epoch setup: %w", err) - } - result.PreviousEpochCommit, err = commits.ByID(protocolState.PreviousEpochEventIDs.CommitID) - if err != nil { - return nil, fmt.Errorf("could not retrieve previous epoch commit: %w", err) + if protocolState.PreviousEpochEventIDs.SetupID != flow.ZeroID { + previousEpochSetup, err = setups.ByID(protocolState.PreviousEpochEventIDs.SetupID) + if err != nil { + return nil, fmt.Errorf("could not retrieve previous epoch setup: %w", err) + } + previousEpochCommit, err = commits.ByID(protocolState.PreviousEpochEventIDs.CommitID) + if err != nil { + return nil, fmt.Errorf("could not retrieve previous epoch commit: %w", err) + } } - result.CurrentEpochSetup, err = setups.ByID(protocolState.CurrentEpochEventIDs.SetupID) + + currentEpochSetup, err := setups.ByID(protocolState.CurrentEpochEventIDs.SetupID) if err != nil { return nil, fmt.Errorf("could not retrieve current epoch setup: %w", err) } - result.CurrentEpochCommit, err = commits.ByID(protocolState.CurrentEpochEventIDs.CommitID) + currentEpochCommit, err := commits.ByID(protocolState.CurrentEpochEventIDs.CommitID) if err != nil { return nil, fmt.Errorf("could not retrieve current epoch commit: %w", err) } - result.Identities, err = buildIdentityTable(protocolState.Identities, result.PreviousEpochSetup, result.CurrentEpochSetup) - if err != nil { - return nil, fmt.Errorf("could not build identity table: %w", err) - } // if next epoch has been already committed, fill in data for it as well. if protocolState.NextEpochProtocolState != nil { nextEpochProtocolState := *protocolState.NextEpochProtocolState - nextEpochSetup, err := setups.ByID(nextEpochProtocolState.CurrentEpochEventIDs.SetupID) + nextEpochSetup, err = setups.ByID(nextEpochProtocolState.CurrentEpochEventIDs.SetupID) if err != nil { return nil, fmt.Errorf("could not retrieve next epoch setup: %w", err) } - nextEpochCommit, err := commits.ByID(nextEpochProtocolState.CurrentEpochEventIDs.CommitID) - if err != nil { - return nil, fmt.Errorf("could not retrieve next epoch commit: %w", err) - } - nextEpochIdentityTable, err := buildIdentityTable(nextEpochProtocolState.Identities, result.CurrentEpochSetup, nextEpochSetup) - if err != nil { - return nil, fmt.Errorf("could not build next epoch identity table: %w", err) - } - - // fill identities for next epoch - result.NextEpochProtocolState = &flow.RichProtocolStateEntry{ - ProtocolStateEntry: nextEpochProtocolState, - CurrentEpochSetup: nextEpochSetup, - CurrentEpochCommit: nextEpochCommit, - PreviousEpochSetup: result.CurrentEpochSetup, // previous epoch setup is current epoch setup - PreviousEpochCommit: result.CurrentEpochCommit, // previous epoch setup is current epoch setup - Identities: nextEpochIdentityTable, - NextEpochProtocolState: nil, // always nil + if nextEpochProtocolState.CurrentEpochEventIDs.CommitID != flow.ZeroID { + nextEpochCommit, err = commits.ByID(nextEpochProtocolState.CurrentEpochEventIDs.CommitID) + if err != nil { + return nil, fmt.Errorf("could not retrieve next epoch commit: %w", err) + } } } - return result, nil -} - -// buildIdentityTable builds identity table for current epoch combining data from previous, current epoch setups and dynamic identities -// that are stored in protocol state. It also performs sanity checks to make sure that data is consistent. -// No errors are expected during normal operation. -func buildIdentityTable( - dynamicIdentities flow.DynamicIdentityEntryList, - previousEpochSetup, currentEpochSetup *flow.EpochSetup, -) (flow.IdentityList, error) { - // produce a unique set for current and previous epoch participants - allEpochParticipants := currentEpochSetup.Participants.Union(previousEpochSetup.Participants) - // sanity check: size of identities should be equal to previous and current epoch participants combined - if len(allEpochParticipants) != len(dynamicIdentities) { - return nil, fmt.Errorf("invalid number of identities in protocol state: expected %d, got %d", len(allEpochParticipants), len(dynamicIdentities)) - } - - // build full identity table for current epoch - var result flow.IdentityList - for i, identity := range dynamicIdentities { - // sanity check: identities should be sorted in canonical order - if identity.NodeID != allEpochParticipants[i].NodeID { - return nil, fmt.Errorf("identites in protocol state are not in canonical order: expected %s, got %s", allEpochParticipants[i].NodeID, identity.NodeID) - } - result = append(result, &flow.Identity{ - IdentitySkeleton: allEpochParticipants[i].IdentitySkeleton, - DynamicIdentity: identity.Dynamic, - }) - } - return result, nil + return flow.NewRichProtocolStateEntry( + protocolState, + previousEpochSetup, + previousEpochCommit, + currentEpochSetup, + currentEpochCommit, + nextEpochSetup, + nextEpochCommit, + ) } diff --git a/storage/badger/protocol_state_test.go b/storage/badger/protocol_state_test.go index be1f23a141c..962c7cb802b 100644 --- a/storage/badger/protocol_state_test.go +++ b/storage/badger/protocol_state_test.go @@ -137,25 +137,74 @@ func TestProtocolStateMergeParticipants(t *testing.T) { }) } +// TestProtocolStateRootSnapshot tests that storing and retrieving root protocol state(in case of bootstrap) works as expected. +func TestProtocolStateRootSnapshot(t *testing.T) { + unittest.RunWithBadgerDB(t, func(db *badger.DB) { + metrics := metrics.NewNoopCollector() + + setups := NewEpochSetups(metrics, db) + commits := NewEpochCommits(metrics, db) + store := NewProtocolState(metrics, setups, commits, db, DefaultCacheSize) + expected := unittest.RootProtocolStateFixture() + + protocolStateID := expected.ID() + blockID := unittest.IdentifierFixture() + + // store protocol state and auxiliary info + err := transaction.Update(db, func(tx *transaction.Tx) error { + // store epoch events to be able to retrieve them later + err := setups.StoreTx(expected.CurrentEpochSetup)(tx) + require.NoError(t, err) + err = commits.StoreTx(expected.CurrentEpochCommit)(tx) + require.NoError(t, err) + + err = store.StoreTx(protocolStateID, &expected.ProtocolStateEntry)(tx) + require.NoError(t, err) + return store.Index(blockID, protocolStateID)(tx) + }) + require.NoError(t, err) + + // fetch protocol state + actual, err := store.ByID(protocolStateID) + require.NoError(t, err) + require.Equal(t, expected, actual) + + assertRichProtocolStateValidity(t, actual) + + // fetch protocol state by block ID + actualByBlockID, err := store.ByBlockID(blockID) + require.NoError(t, err) + require.Equal(t, expected, actualByBlockID) + + assertRichProtocolStateValidity(t, actualByBlockID) + }) +} + // assertRichProtocolStateValidity checks if RichProtocolState holds its invariant and is correctly populated by storage layer. func assertRichProtocolStateValidity(t *testing.T, state *flow.RichProtocolStateEntry) { // invariant: CurrentEpochSetup and CurrentEpochCommit are for the same epoch. Never nil. assert.Equal(t, state.CurrentEpochSetup.Counter, state.CurrentEpochCommit.Counter, "current epoch setup and commit should be for the same epoch") - assert.Equal(t, state.CurrentEpochSetup.Counter, state.PreviousEpochSetup.Counter+1, "current epoch setup should be next after previous epoch") // invariant: CurrentEpochSetup and CurrentEpochCommit IDs are the equal to the ID of the protocol state entry. Never nil. assert.Equal(t, state.CurrentEpochSetup.ID(), state.ProtocolStateEntry.CurrentEpochEventIDs.SetupID, "epoch setup should be for correct event ID") assert.Equal(t, state.CurrentEpochCommit.ID(), state.ProtocolStateEntry.CurrentEpochEventIDs.CommitID, "epoch commit should be for correct event ID") - // invariant: PreviousEpochSetup and PreviousEpochCommit are for the same epoch. Never nil. - assert.Equal(t, state.PreviousEpochSetup.Counter, state.PreviousEpochCommit.Counter, "previous epoch setup and commit should be for the same epoch") + var previousEpochParticipants flow.IdentityList + // invariant: PreviousEpochSetup and PreviousEpochCommit should be present if respective ID is not zero. + if state.PreviousEpochEventIDs.SetupID != flow.ZeroID { + // invariant: PreviousEpochSetup and PreviousEpochCommit are for the same epoch. Never nil. + assert.Equal(t, state.CurrentEpochSetup.Counter, state.PreviousEpochSetup.Counter+1, "current epoch setup should be next after previous epoch") + assert.Equal(t, state.PreviousEpochSetup.Counter, state.PreviousEpochCommit.Counter, "previous epoch setup and commit should be for the same epoch") - // invariant: PreviousEpochSetup and PreviousEpochCommit IDs are the equal to the ID of the protocol state entry. Never nil. - assert.Equal(t, state.PreviousEpochSetup.ID(), state.ProtocolStateEntry.PreviousEpochEventIDs.SetupID, "epoch setup should be for correct event ID") - assert.Equal(t, state.PreviousEpochCommit.ID(), state.ProtocolStateEntry.PreviousEpochEventIDs.CommitID, "epoch commit should be for correct event ID") + // invariant: PreviousEpochSetup and PreviousEpochCommit IDs are the equal to the ID of the protocol state entry. Never nil. + assert.Equal(t, state.PreviousEpochSetup.ID(), state.ProtocolStateEntry.PreviousEpochEventIDs.SetupID, "epoch setup should be for correct event ID") + assert.Equal(t, state.PreviousEpochCommit.ID(), state.ProtocolStateEntry.PreviousEpochEventIDs.CommitID, "epoch commit should be for correct event ID") + + previousEpochParticipants = state.PreviousEpochSetup.Participants + } // invariant: Identities is a full identity table for the current epoch. Identities are sorted in canonical order. Without duplicates. Never nil. - allIdentities := state.CurrentEpochSetup.Participants.Union(state.PreviousEpochSetup.Participants) + allIdentities := state.CurrentEpochSetup.Participants.Union(previousEpochParticipants) assert.Equal(t, allIdentities, state.Identities, "identities should be a full identity table for the current epoch, without duplicates") for i, identity := range state.ProtocolStateEntry.Identities { @@ -166,6 +215,7 @@ func assertRichProtocolStateValidity(t *testing.T, state *flow.RichProtocolState if nextEpochState == nil { return } + // invariant: NextEpochProtocolState is a protocol state for the next epoch. Can be nil. assertRichProtocolStateValidity(t, nextEpochState) } diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index 10a72b53f28..15b0259be3d 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -2521,6 +2521,48 @@ func ChunkExecutionDataFixture(t *testing.T, minSize int, opts ...func(*executio } } +// RootProtocolStateFixture creates a fixture with correctly structured data for root protocol state. +// This can be useful for testing bootstrap when there is no previous epoch. +func RootProtocolStateFixture() *flow.RichProtocolStateEntry { + currentEpochSetup := EpochSetupFixture(func(setup *flow.EpochSetup) { + setup.Counter = 1 + }) + currentEpochCommit := EpochCommitFixture(func(commit *flow.EpochCommit) { + commit.Counter = currentEpochSetup.Counter + }) + + allIdentities := currentEpochSetup.Participants + var dynamicIdentities flow.DynamicIdentityEntryList + for _, identity := range allIdentities { + dynamicIdentities = append(dynamicIdentities, &flow.DynamicIdentityEntry{ + NodeID: identity.NodeID, + Dynamic: identity.DynamicIdentity, + }) + } + + return &flow.RichProtocolStateEntry{ + ProtocolStateEntry: flow.ProtocolStateEntry{ + CurrentEpochEventIDs: flow.EventIDs{ + SetupID: currentEpochSetup.ID(), + CommitID: currentEpochCommit.ID(), + }, + PreviousEpochEventIDs: flow.EventIDs{ + SetupID: flow.ZeroID, + CommitID: flow.ZeroID, + }, + Identities: dynamicIdentities, + InvalidStateTransitionAttempted: false, + NextEpochProtocolState: nil, + }, + CurrentEpochSetup: currentEpochSetup, + CurrentEpochCommit: currentEpochCommit, + PreviousEpochSetup: nil, + PreviousEpochCommit: nil, + Identities: allIdentities, + NextEpochProtocolState: nil, + } +} + // ProtocolStateFixture creates a fixture with correctly structured data that passes basic sanity checks. // Epoch setup and commit counters are set to match. // Identities are constructed from setup events. From ab7859a7e71821e5d4bd83dd46f0bf24c0df9439 Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Mon, 7 Aug 2023 17:51:13 +0300 Subject: [PATCH 30/46] Changed RichProtocolStateEntry to hold a pointer of embedded type --- model/flow/protocol_state.go | 6 +++--- storage/badger/protocol_state.go | 4 ++-- storage/badger/protocol_state_test.go | 10 +++++----- utils/unittest/fixtures.go | 6 +++--- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/model/flow/protocol_state.go b/model/flow/protocol_state.go index 6e4ba0bd20c..8fb841f52b8 100644 --- a/model/flow/protocol_state.go +++ b/model/flow/protocol_state.go @@ -42,7 +42,7 @@ type ProtocolStateEntry struct { // Identities are sorted in canonical order. Without duplicates. Never nil. // - NextEpochProtocolState is a protocol state for the next epoch. Can be nil. type RichProtocolStateEntry struct { - ProtocolStateEntry + *ProtocolStateEntry CurrentEpochSetup *EpochSetup CurrentEpochCommit *EpochCommit @@ -56,7 +56,7 @@ type RichProtocolStateEntry struct { // NewRichProtocolStateEntry constructs a rich protocol state entry from a protocol state entry and additional data. // No errors are expected during normal operation. func NewRichProtocolStateEntry( - protocolState ProtocolStateEntry, + protocolState *ProtocolStateEntry, previousEpochSetup *EpochSetup, previousEpochCommit *EpochCommit, currentEpochSetup *EpochSetup, @@ -82,7 +82,7 @@ func NewRichProtocolStateEntry( // if next epoch has been already committed, fill in data for it as well. if protocolState.NextEpochProtocolState != nil { - nextEpochProtocolState := *protocolState.NextEpochProtocolState + nextEpochProtocolState := protocolState.NextEpochProtocolState nextEpochIdentityTable, err := buildIdentityTable(nextEpochProtocolState.Identities, result.CurrentEpochSetup, nextEpochSetup) if err != nil { return nil, fmt.Errorf("could not build next epoch identity table: %w", err) diff --git a/storage/badger/protocol_state.go b/storage/badger/protocol_state.go index 84630f5cb4f..b7bf4faa469 100644 --- a/storage/badger/protocol_state.go +++ b/storage/badger/protocol_state.go @@ -39,7 +39,7 @@ func NewProtocolState(collector module.CacheMetrics, if err != nil { return nil, err } - result, err := newRichProtocolStateEntry(protocolStateEntry, epochSetups, epochCommits) + result, err := newRichProtocolStateEntry(&protocolStateEntry, epochSetups, epochCommits) if err != nil { return nil, fmt.Errorf("could not create rich protocol state entry: %w", err) } @@ -121,7 +121,7 @@ func (s *ProtocolState) byBlockID(blockID flow.Identifier) func(*badger.Txn) (*f // It queries and fills in epoch setups and commits for previous and current epochs and possibly next epoch. // No errors are expected during normal operation. func newRichProtocolStateEntry( - protocolState flow.ProtocolStateEntry, + protocolState *flow.ProtocolStateEntry, setups storage.EpochSetups, commits storage.EpochCommits, ) (*flow.RichProtocolStateEntry, error) { diff --git a/storage/badger/protocol_state_test.go b/storage/badger/protocol_state_test.go index 962c7cb802b..8f235daafa0 100644 --- a/storage/badger/protocol_state_test.go +++ b/storage/badger/protocol_state_test.go @@ -42,7 +42,7 @@ func TestProtocolStateStorage(t *testing.T) { err = commits.StoreTx(expected.NextEpochProtocolState.CurrentEpochCommit)(tx) require.NoError(t, err) - err = store.StoreTx(protocolStateID, &expected.ProtocolStateEntry)(tx) + err = store.StoreTx(protocolStateID, expected.ProtocolStateEntry)(tx) require.NoError(t, err) return store.Index(blockID, protocolStateID)(tx) }) @@ -76,14 +76,14 @@ func TestProtocolStateStoreInvalidProtocolState(t *testing.T) { // swap first and second elements to break canonical order invalid.Identities[0], invalid.Identities[1] = invalid.Identities[1], invalid.Identities[0] - err := transaction.Update(db, store.StoreTx(invalid.ID(), &invalid)) + err := transaction.Update(db, store.StoreTx(invalid.ID(), invalid)) require.Error(t, err) invalid = unittest.ProtocolStateFixture(unittest.WithNextEpochProtocolState()).ProtocolStateEntry // swap first and second elements to break canonical order invalid.NextEpochProtocolState.Identities[0], invalid.NextEpochProtocolState.Identities[1] = invalid.NextEpochProtocolState.Identities[1], invalid.NextEpochProtocolState.Identities[0] - err = transaction.Update(db, store.StoreTx(invalid.ID(), &invalid)) + err = transaction.Update(db, store.StoreTx(invalid.ID(), invalid)) require.Error(t, err) }) } @@ -121,7 +121,7 @@ func TestProtocolStateMergeParticipants(t *testing.T) { err = commits.StoreTx(stateEntry.CurrentEpochCommit)(tx) require.NoError(t, err) - return store.StoreTx(protocolStateID, &stateEntry.ProtocolStateEntry)(tx) + return store.StoreTx(protocolStateID, stateEntry.ProtocolStateEntry)(tx) }) require.NoError(t, err) @@ -158,7 +158,7 @@ func TestProtocolStateRootSnapshot(t *testing.T) { err = commits.StoreTx(expected.CurrentEpochCommit)(tx) require.NoError(t, err) - err = store.StoreTx(protocolStateID, &expected.ProtocolStateEntry)(tx) + err = store.StoreTx(protocolStateID, expected.ProtocolStateEntry)(tx) require.NoError(t, err) return store.Index(blockID, protocolStateID)(tx) }) diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index 15b0259be3d..035699fb271 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -2541,7 +2541,7 @@ func RootProtocolStateFixture() *flow.RichProtocolStateEntry { } return &flow.RichProtocolStateEntry{ - ProtocolStateEntry: flow.ProtocolStateEntry{ + ProtocolStateEntry: &flow.ProtocolStateEntry{ CurrentEpochEventIDs: flow.EventIDs{ SetupID: currentEpochSetup.ID(), CommitID: currentEpochCommit.ID(), @@ -2592,7 +2592,7 @@ func ProtocolStateFixture(options ...func(*flow.RichProtocolStateEntry)) *flow.R } entry := &flow.RichProtocolStateEntry{ - ProtocolStateEntry: flow.ProtocolStateEntry{ + ProtocolStateEntry: &flow.ProtocolStateEntry{ CurrentEpochEventIDs: flow.EventIDs{ SetupID: currentEpochSetup.ID(), CommitID: currentEpochCommit.ID(), @@ -2654,7 +2654,7 @@ func WithNextEpochProtocolState() func(entry *flow.RichProtocolStateEntry) { } entry.NextEpochProtocolState = &flow.RichProtocolStateEntry{ - ProtocolStateEntry: *entry.ProtocolStateEntry.NextEpochProtocolState, + ProtocolStateEntry: entry.ProtocolStateEntry.NextEpochProtocolState, CurrentEpochSetup: nextEpochSetup, CurrentEpochCommit: nextEpochCommit, PreviousEpochSetup: entry.CurrentEpochSetup, From 3bec0548cfc3d4af5411b15a1209d1abf193caa4 Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Mon, 7 Aug 2023 17:56:25 +0300 Subject: [PATCH 31/46] Linted --- storage/badger/operation/protocol_state_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/badger/operation/protocol_state_test.go b/storage/badger/operation/protocol_state_test.go index 883b60df4a2..1f29e1b7b49 100644 --- a/storage/badger/operation/protocol_state_test.go +++ b/storage/badger/operation/protocol_state_test.go @@ -14,7 +14,7 @@ import ( // TestInsertProtocolState tests if basic badger operations on ProtocolState work as expected. func TestInsertProtocolState(t *testing.T) { unittest.RunWithBadgerDB(t, func(db *badger.DB) { - expected := &unittest.ProtocolStateFixture().ProtocolStateEntry + expected := unittest.ProtocolStateFixture().ProtocolStateEntry protocolStateID := expected.ID() err := db.Update(InsertProtocolState(protocolStateID, expected)) From a97c4549b0b19c436cd6c50da1ea0f5e02d1d194 Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Tue, 8 Aug 2023 20:46:36 +0300 Subject: [PATCH 32/46] Updated how identities are created based on epoch state. Updated tests, fixtures, docs --- model/flow/protocol_state.go | 57 ++++++++++++---- model/flow/protocol_state_test.go | 96 +++++++++++++++++++++++++++ storage/badger/protocol_state_test.go | 8 ++- utils/unittest/fixtures.go | 29 ++------ 4 files changed, 153 insertions(+), 37 deletions(-) create mode 100644 model/flow/protocol_state_test.go diff --git a/model/flow/protocol_state.go b/model/flow/protocol_state.go index 8fb841f52b8..23f7c7d5b86 100644 --- a/model/flow/protocol_state.go +++ b/model/flow/protocol_state.go @@ -75,15 +75,25 @@ func NewRichProtocolStateEntry( } var err error - result.Identities, err = buildIdentityTable(protocolState.Identities, result.PreviousEpochSetup, result.CurrentEpochSetup) - if err != nil { - return nil, fmt.Errorf("could not build identity table: %w", err) - } - // if next epoch has been already committed, fill in data for it as well. if protocolState.NextEpochProtocolState != nil { + // if next epoch is available, it means that we have observed epoch setup event and we are not anymore in staking phase, + // so we need to build the identity table using current and next epoch setup events. + result.Identities, err = buildIdentityTable( + protocolState.Identities, + currentEpochSetup.Participants, + nextEpochSetup.Participants, + ) + if err != nil { + return nil, fmt.Errorf("could not build identity table for setup/commit phase: %w", err) + } + nextEpochProtocolState := protocolState.NextEpochProtocolState - nextEpochIdentityTable, err := buildIdentityTable(nextEpochProtocolState.Identities, result.CurrentEpochSetup, nextEpochSetup) + nextEpochIdentityTable, err := buildIdentityTable( + nextEpochProtocolState.Identities, + nextEpochSetup.Participants, + currentEpochSetup.Participants, + ) if err != nil { return nil, fmt.Errorf("could not build next epoch identity table: %w", err) } @@ -98,6 +108,21 @@ func NewRichProtocolStateEntry( Identities: nextEpochIdentityTable, NextEpochProtocolState: nil, // always nil } + } else { + // if next epoch is not yet created, it means that we are in staking phase, + // so we need to build the identity table using previous and current epoch setup events. + var otherIdentities IdentityList + if previousEpochSetup != nil { + otherIdentities = previousEpochSetup.Participants + } + result.Identities, err = buildIdentityTable( + protocolState.Identities, + currentEpochSetup.Participants, + otherIdentities, + ) + if err != nil { + return nil, fmt.Errorf("could not build identity table for staking phase: %w", err) + } } return result, nil @@ -141,14 +166,10 @@ func (ll DynamicIdentityEntryList) Sorted(less IdentifierOrder) bool { // No errors are expected during normal operation. func buildIdentityTable( dynamicIdentities DynamicIdentityEntryList, - previousEpochSetup, currentEpochSetup *EpochSetup, + coreIdentities, otherIdentities IdentityList, ) (IdentityList, error) { - var previousEpochParticipants IdentityList - if previousEpochSetup != nil { - previousEpochParticipants = previousEpochSetup.Participants - } // produce a unique set for current and previous epoch participants - allEpochParticipants := currentEpochSetup.Participants.Union(previousEpochParticipants) + allEpochParticipants := coreIdentities.Union(otherIdentities) // sanity check: size of identities should be equal to previous and current epoch participants combined if len(allEpochParticipants) != len(dynamicIdentities) { return nil, fmt.Errorf("invalid number of identities in protocol state: expected %d, got %d", len(allEpochParticipants), len(dynamicIdentities)) @@ -168,3 +189,15 @@ func buildIdentityTable( } return result, nil } + +// DynamicIdentityEntryListFromIdentities converts IdentityList to DynamicIdentityEntryList. +func DynamicIdentityEntryListFromIdentities(identities IdentityList) DynamicIdentityEntryList { + dynamicIdentities := make(DynamicIdentityEntryList, 0, len(identities)) + for _, identity := range identities { + dynamicIdentities = append(dynamicIdentities, &DynamicIdentityEntry{ + NodeID: identity.NodeID, + Dynamic: identity.DynamicIdentity, + }) + } + return dynamicIdentities +} diff --git a/model/flow/protocol_state_test.go b/model/flow/protocol_state_test.go new file mode 100644 index 00000000000..9b89cf886d6 --- /dev/null +++ b/model/flow/protocol_state_test.go @@ -0,0 +1,96 @@ +package flow_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestNewRichProtocolStateEntry checks that NewRichProtocolStateEntry creates valid identity tables depending on the state +// of epoch which is derived from the protocol state entry. +func TestNewRichProtocolStateEntry(t *testing.T) { + t.Run("staking-root-protocol-state", func(t *testing.T) { + currentEpochSetup := unittest.EpochSetupFixture() + currentEpochCommit := unittest.EpochCommitFixture() + stateEntry := &flow.ProtocolStateEntry{ + CurrentEpochEventIDs: flow.EventIDs{ + SetupID: currentEpochSetup.ID(), + CommitID: currentEpochCommit.ID(), + }, + PreviousEpochEventIDs: flow.EventIDs{}, + Identities: flow.DynamicIdentityEntryListFromIdentities(currentEpochSetup.Participants), + InvalidStateTransitionAttempted: false, + NextEpochProtocolState: nil, + } + entry, err := flow.NewRichProtocolStateEntry( + stateEntry, + nil, + nil, + currentEpochSetup, + currentEpochCommit, + nil, + nil, + ) + assert.NoError(t, err) + assert.Equal(t, currentEpochSetup.Participants, entry.Identities, "should be equal to current epoch setup participants") + }) + t.Run("staking-phase", func(t *testing.T) { + stateEntry := unittest.ProtocolStateFixture() + richEntry, err := flow.NewRichProtocolStateEntry( + stateEntry.ProtocolStateEntry, + stateEntry.PreviousEpochSetup, + stateEntry.PreviousEpochCommit, + stateEntry.CurrentEpochSetup, + stateEntry.CurrentEpochCommit, + nil, + nil, + ) + assert.NoError(t, err) + expectedIdentities := stateEntry.CurrentEpochSetup.Participants.Union(stateEntry.PreviousEpochSetup.Participants) + assert.Equal(t, expectedIdentities, richEntry.Identities, "should be equal to current epoch setup participants + previous epoch setup participants") + assert.Nil(t, richEntry.NextEpochProtocolState) + }) + t.Run("setup-phase", func(t *testing.T) { + stateEntry := unittest.ProtocolStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichProtocolStateEntry) { + entry.NextEpochProtocolState.CurrentEpochCommit = nil + entry.NextEpochProtocolState.CurrentEpochEventIDs.CommitID = flow.ZeroID + }) + + richEntry, err := flow.NewRichProtocolStateEntry( + stateEntry.ProtocolStateEntry, + stateEntry.PreviousEpochSetup, + stateEntry.PreviousEpochCommit, + stateEntry.CurrentEpochSetup, + stateEntry.CurrentEpochCommit, + stateEntry.NextEpochProtocolState.CurrentEpochSetup, + nil, + ) + assert.NoError(t, err) + expectedIdentities := stateEntry.CurrentEpochSetup.Participants.Union(stateEntry.NextEpochProtocolState.CurrentEpochSetup.Participants) + assert.Equal(t, expectedIdentities, richEntry.Identities, "should be equal to current epoch setup participants + next epoch setup participants") + assert.Nil(t, richEntry.NextEpochProtocolState.CurrentEpochCommit) + expectedIdentities = stateEntry.NextEpochProtocolState.CurrentEpochSetup.Participants.Union(stateEntry.CurrentEpochSetup.Participants) + assert.Equal(t, expectedIdentities, richEntry.NextEpochProtocolState.Identities, "should be equal to next epoch setup participants + current epoch setup participants") + }) + t.Run("commit-phase", func(t *testing.T) { + stateEntry := unittest.ProtocolStateFixture(unittest.WithNextEpochProtocolState()) + + richEntry, err := flow.NewRichProtocolStateEntry( + stateEntry.ProtocolStateEntry, + stateEntry.PreviousEpochSetup, + stateEntry.PreviousEpochCommit, + stateEntry.CurrentEpochSetup, + stateEntry.CurrentEpochCommit, + stateEntry.NextEpochProtocolState.CurrentEpochSetup, + stateEntry.NextEpochProtocolState.CurrentEpochCommit, + ) + assert.NoError(t, err) + expectedIdentities := stateEntry.CurrentEpochSetup.Participants.Union(stateEntry.NextEpochProtocolState.CurrentEpochSetup.Participants) + assert.Equal(t, expectedIdentities, richEntry.Identities, "should be equal to current epoch setup participants + next epoch setup participants") + expectedIdentities = stateEntry.NextEpochProtocolState.CurrentEpochSetup.Participants.Union(stateEntry.CurrentEpochSetup.Participants) + assert.Equal(t, expectedIdentities, richEntry.NextEpochProtocolState.Identities, "should be equal to next epoch setup participants + current epoch setup participants") + }) +} diff --git a/storage/badger/protocol_state_test.go b/storage/badger/protocol_state_test.go index 8f235daafa0..a2a8a6203dd 100644 --- a/storage/badger/protocol_state_test.go +++ b/storage/badger/protocol_state_test.go @@ -204,7 +204,13 @@ func assertRichProtocolStateValidity(t *testing.T, state *flow.RichProtocolState } // invariant: Identities is a full identity table for the current epoch. Identities are sorted in canonical order. Without duplicates. Never nil. - allIdentities := state.CurrentEpochSetup.Participants.Union(previousEpochParticipants) + var allIdentities flow.IdentityList + if state.NextEpochProtocolState != nil { + allIdentities = state.CurrentEpochSetup.Participants.Union(state.NextEpochProtocolState.CurrentEpochSetup.Participants) + } else { + allIdentities = state.CurrentEpochSetup.Participants.Union(previousEpochParticipants) + } + assert.Equal(t, allIdentities, state.Identities, "identities should be a full identity table for the current epoch, without duplicates") for i, identity := range state.ProtocolStateEntry.Identities { diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index 035699fb271..27d7c8b3391 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -2532,13 +2532,6 @@ func RootProtocolStateFixture() *flow.RichProtocolStateEntry { }) allIdentities := currentEpochSetup.Participants - var dynamicIdentities flow.DynamicIdentityEntryList - for _, identity := range allIdentities { - dynamicIdentities = append(dynamicIdentities, &flow.DynamicIdentityEntry{ - NodeID: identity.NodeID, - Dynamic: identity.DynamicIdentity, - }) - } return &flow.RichProtocolStateEntry{ ProtocolStateEntry: &flow.ProtocolStateEntry{ @@ -2550,7 +2543,7 @@ func RootProtocolStateFixture() *flow.RichProtocolStateEntry { SetupID: flow.ZeroID, CommitID: flow.ZeroID, }, - Identities: dynamicIdentities, + Identities: flow.DynamicIdentityEntryListFromIdentities(allIdentities), InvalidStateTransitionAttempted: false, NextEpochProtocolState: nil, }, @@ -2583,13 +2576,6 @@ func ProtocolStateFixture(options ...func(*flow.RichProtocolStateEntry)) *flow.R }) allIdentities := currentEpochSetup.Participants.Union(prevEpochSetup.Participants) - var dynamicIdentities flow.DynamicIdentityEntryList - for _, identity := range allIdentities { - dynamicIdentities = append(dynamicIdentities, &flow.DynamicIdentityEntry{ - NodeID: identity.NodeID, - Dynamic: identity.DynamicIdentity, - }) - } entry := &flow.RichProtocolStateEntry{ ProtocolStateEntry: &flow.ProtocolStateEntry{ @@ -2601,7 +2587,7 @@ func ProtocolStateFixture(options ...func(*flow.RichProtocolStateEntry)) *flow.R SetupID: prevEpochSetup.ID(), CommitID: prevEpochCommit.ID(), }, - Identities: dynamicIdentities, + Identities: flow.DynamicIdentityEntryListFromIdentities(allIdentities), InvalidStateTransitionAttempted: false, NextEpochProtocolState: nil, }, @@ -2634,13 +2620,8 @@ func WithNextEpochProtocolState() func(entry *flow.RichProtocolStateEntry) { }) allIdentities := nextEpochSetup.Participants.Union(entry.CurrentEpochSetup.Participants) - var dynamicIdentities flow.DynamicIdentityEntryList - for _, identity := range allIdentities { - dynamicIdentities = append(dynamicIdentities, &flow.DynamicIdentityEntry{ - NodeID: identity.NodeID, - Dynamic: identity.DynamicIdentity, - }) - } + entry.Identities = entry.CurrentEpochSetup.Participants.Union(nextEpochSetup.Participants) + entry.ProtocolStateEntry.Identities = flow.DynamicIdentityEntryListFromIdentities(entry.Identities) entry.ProtocolStateEntry.NextEpochProtocolState = &flow.ProtocolStateEntry{ CurrentEpochEventIDs: flow.EventIDs{ @@ -2648,7 +2629,7 @@ func WithNextEpochProtocolState() func(entry *flow.RichProtocolStateEntry) { CommitID: nextEpochCommit.ID(), }, PreviousEpochEventIDs: entry.CurrentEpochEventIDs, - Identities: dynamicIdentities, + Identities: flow.DynamicIdentityEntryListFromIdentities(allIdentities), InvalidStateTransitionAttempted: false, NextEpochProtocolState: nil, } From 7556712602e0855d568bd2c262901c7854a77c16 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Tue, 22 Aug 2023 15:01:01 -0700 Subject: [PATCH 33/46] Apply suggestions from code review Co-authored-by: Jordan Schalm --- model/flow/protocol_state.go | 2 +- storage/badger/operation/protocol_state.go | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/model/flow/protocol_state.go b/model/flow/protocol_state.go index 23f7c7d5b86..ce7cfb60bb7 100644 --- a/model/flow/protocol_state.go +++ b/model/flow/protocol_state.go @@ -37,7 +37,7 @@ type ProtocolStateEntry struct { // the database for epoch setups and commits and full identity table. // It holds several invariants, such as: // - CurrentEpochSetup and CurrentEpochCommit are for the same epoch. Never nil. -// - PreviousEpochSetup and PreviousEpochCommit are for the same epoch. Never nil. +// - PreviousEpochSetup and PreviousEpochCommit are for the same epoch. Can be nil. // - Identities is a full identity table for the current epoch. // Identities are sorted in canonical order. Without duplicates. Never nil. // - NextEpochProtocolState is a protocol state for the next epoch. Can be nil. diff --git a/storage/badger/operation/protocol_state.go b/storage/badger/operation/protocol_state.go index 072fdde7a46..3534a5b4679 100644 --- a/storage/badger/operation/protocol_state.go +++ b/storage/badger/operation/protocol_state.go @@ -7,21 +7,33 @@ import ( ) // InsertProtocolState inserts a protocol state by ID. +// Error returns: +// - storage.ErrAlreadyExists if the key already exists in the database. +// - generic error in case of unexpected failure from the database layer or encoding failure. func InsertProtocolState(protocolStateID flow.Identifier, protocolState *flow.ProtocolStateEntry) func(*badger.Txn) error { return insert(makePrefix(codeProtocolState, protocolStateID), protocolState) } // RetrieveProtocolState retrieves a protocol state by ID. +// Error returns: +// - storage.ErrNotFound if the key does not exist in the database +// - generic error in case of unexpected failure from the database layer func RetrieveProtocolState(protocolStateID flow.Identifier, protocolState *flow.ProtocolStateEntry) func(*badger.Txn) error { return retrieve(makePrefix(codeProtocolState, protocolStateID), protocolState) } // IndexProtocolState indexes a protocol state by block ID. +// Error returns: +// - storage.ErrAlreadyExists if the key already exists in the database. +// - generic error in case of unexpected failure from the database layer or encoding failure. func IndexProtocolState(blockID flow.Identifier, protocolStateID flow.Identifier) func(*badger.Txn) error { return insert(makePrefix(codeProtocolStateByBlockID, blockID), protocolStateID) } // LookupProtocolState finds protocol state ID by block ID. +// Error returns: +// - storage.ErrNotFound if the key does not exist in the database +// - generic error in case of unexpected failure from the database layer func LookupProtocolState(blockID flow.Identifier, protocolStateID *flow.Identifier) func(*badger.Txn) error { return retrieve(makePrefix(codeProtocolStateByBlockID, blockID), protocolStateID) } From 1ef613ac7e1539840f494064358351c97261ce68 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Wed, 23 Aug 2023 19:57:34 -0700 Subject: [PATCH 34/46] minor goDoc updates --- consensus/hotstuff/committees/consensus_committee.go | 11 ++++------- model/flow/epoch.go | 4 ++-- model/flow/protocol_state.go | 11 ++++++++--- 3 files changed, 14 insertions(+), 12 deletions(-) diff --git a/consensus/hotstuff/committees/consensus_committee.go b/consensus/hotstuff/committees/consensus_committee.go index 4b110a9b895..33f51d61063 100644 --- a/consensus/hotstuff/committees/consensus_committee.go +++ b/consensus/hotstuff/committees/consensus_committee.go @@ -21,11 +21,10 @@ import ( // staticEpochInfo contains leader selection and the initial committee for one epoch. // This data structure must not be mutated after construction. type staticEpochInfo struct { - firstView uint64 // first view of the epoch (inclusive) - finalView uint64 // final view of the epoch (inclusive) - randomSource []byte // random source of epoch - leaders *leader.LeaderSelection // pre-computed leader selection for the epoch - // TODO: should use identity skeleton https://github.com/dapperlabs/flow-go/issues/6232 + firstView uint64 // first view of the epoch (inclusive) + finalView uint64 // final view of the epoch (inclusive) + randomSource []byte // random source of epoch + leaders *leader.LeaderSelection // pre-computed leader selection for the epoch initialCommittee flow.IdentitySkeletonList initialCommitteeMap map[flow.Identifier]*flow.IdentitySkeleton weightThresholdForQC uint64 // computed based on initial committee weights @@ -84,7 +83,6 @@ func newStaticEpochInfo(epoch protocol.Epoch) (*staticEpochInfo, error) { // * lasts until the next spork (estimated 6 months) // * has the same static committee as the last committed epoch func newEmergencyFallbackEpoch(lastCommittedEpoch *staticEpochInfo) (*staticEpochInfo, error) { - rng, err := prg.New(lastCommittedEpoch.randomSource, prg.ConsensusLeaderSelection, nil) if err != nil { return nil, fmt.Errorf("could not create rng from seed: %w", err) @@ -131,7 +129,6 @@ var _ hotstuff.Replicas = (*Consensus)(nil) var _ hotstuff.DynamicCommittee = (*Consensus)(nil) func NewConsensusCommittee(state protocol.State, me flow.Identifier) (*Consensus, error) { - com := &Consensus{ state: state, me: me, diff --git a/model/flow/epoch.go b/model/flow/epoch.go index f7ce38bb1ee..e2373b1b4af 100644 --- a/model/flow/epoch.go +++ b/model/flow/epoch.go @@ -69,8 +69,8 @@ type EpochSetup struct { DKGPhase2FinalView uint64 // the final view of DKG phase 2 DKGPhase3FinalView uint64 // the final view of DKG phase 3 FinalView uint64 // the final view of the epoch - Participants IdentityList // all participants of the epoch - Assignments AssignmentList // cluster assignment for the epoch + Participants IdentityList // all participants of the epoch in canonical order + Assignments AssignmentList // cluster assignment for the epoch with node IDs for each cluster in canonical order RandomSource []byte // source of randomness for epoch-specific setup tasks } diff --git a/model/flow/protocol_state.go b/model/flow/protocol_state.go index ce7cfb60bb7..97ddeae9419 100644 --- a/model/flow/protocol_state.go +++ b/model/flow/protocol_state.go @@ -10,11 +10,16 @@ type DynamicIdentityEntry struct { type DynamicIdentityEntryList []*DynamicIdentityEntry -// ProtocolStateEntry holds information about the protocol state at some point in time. -// It allows to reconstruct the state of identity table using epoch setup events and dynamic identities. -// It tracks attempts of invalid state transitions. +// ProtocolStateEntry represents a snapshot of the identity table (i.e. the set of all notes authorized to +// be part of the network) at some point in time. It allows to reconstruct the state of identity table using +// epoch setup events and dynamic identities. It tracks attempts of invalid state transitions. // It also holds information about the next epoch, if it has been already committed. // This structure is used to persist protocol state in the database. +// +// Note that the current implementation does not store the identity table directly. Instead, we store +// the original events that constituted the _initial_ identity table at the beginning of the epoch +// plus some modifiers. We intend to restructure this code soon. +// TODO: https://github.com/onflow/flow-go/issues/4649 type ProtocolStateEntry struct { // setup and commit event IDs for current epoch. CurrentEpochEventIDs EventIDs From 60be9c4af95e40979429db56946eefeaa8e9893a Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Wed, 23 Aug 2023 20:11:54 -0700 Subject: [PATCH 35/46] removed unnecessary whitespaces --- .../hotstuff/votecollector/staking_vote_processor_test.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/consensus/hotstuff/votecollector/staking_vote_processor_test.go b/consensus/hotstuff/votecollector/staking_vote_processor_test.go index d82059b3405..082aee074e0 100644 --- a/consensus/hotstuff/votecollector/staking_vote_processor_test.go +++ b/consensus/hotstuff/votecollector/staking_vote_processor_test.go @@ -268,9 +268,7 @@ func TestStakingVoteProcessorV2_BuildVerifyQC(t *testing.T) { }).Sort(order.Canonical) leader := stakingSigners[0] - - block := helper.MakeBlock(helper.WithBlockView(view), - helper.WithBlockProposer(leader.NodeID)) + block := helper.MakeBlock(helper.WithBlockView(view), helper.WithBlockProposer(leader.NodeID)) committee := &mockhotstuff.DynamicCommittee{} committee.On("IdentitiesByEpoch", block.View).Return(stakingSigners.ToSkeleton(), nil) From be31002af4c5100c58f5e8ef84b7eafbb5f0eeff Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Thu, 24 Aug 2023 15:45:27 -0700 Subject: [PATCH 36/46] refined goDoc --- model/flow/protocol_state.go | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/model/flow/protocol_state.go b/model/flow/protocol_state.go index 97ddeae9419..ad1a14e3ebc 100644 --- a/model/flow/protocol_state.go +++ b/model/flow/protocol_state.go @@ -82,6 +82,16 @@ func NewRichProtocolStateEntry( var err error // if next epoch has been already committed, fill in data for it as well. if protocolState.NextEpochProtocolState != nil { + // sanity check consistency of input data + if protocolState.NextEpochProtocolState.CurrentEpochEventIDs.SetupID != nextEpochSetup.ID() { + return nil, fmt.Errorf("inconsistent EpochSetup for constucting RichProtocolStateEntry, next protocol state states ID %v while input event has ID %v", + protocolState.NextEpochProtocolState.CurrentEpochEventIDs.SetupID, nextEpochSetup.ID()) + } + if protocolState.NextEpochProtocolState.CurrentEpochEventIDs.CommitID != nextEpochCommit.ID() { + return nil, fmt.Errorf("inconsistent EpochCommit for constucting RichProtocolStateEntry, next protocol state states ID %v while input event has ID %v", + protocolState.NextEpochProtocolState.CurrentEpochEventIDs.CommitID, nextEpochCommit.ID()) + } + // if next epoch is available, it means that we have observed epoch setup event and we are not anymore in staking phase, // so we need to build the identity table using current and next epoch setup events. result.Identities, err = buildIdentityTable( @@ -166,8 +176,11 @@ func (ll DynamicIdentityEntryList) Sorted(less IdentifierOrder) bool { return true } -// buildIdentityTable builds identity table for current epoch combining data from previous, current epoch setups and dynamic identities -// that are stored in protocol state. It also performs sanity checks to make sure that data is consistent. +// buildIdentityTable builds identity table for current epoch combining data from: +// - the current epoch's Dynamic Identities +// - and previous + current EpochSetup events +// +// It also performs sanity checks to make sure that the data is consistent. // No errors are expected during normal operation. func buildIdentityTable( dynamicIdentities DynamicIdentityEntryList, @@ -176,6 +189,7 @@ func buildIdentityTable( // produce a unique set for current and previous epoch participants allEpochParticipants := coreIdentities.Union(otherIdentities) // sanity check: size of identities should be equal to previous and current epoch participants combined + // This is because if len(allEpochParticipants) != len(dynamicIdentities) { return nil, fmt.Errorf("invalid number of identities in protocol state: expected %d, got %d", len(allEpochParticipants), len(dynamicIdentities)) } From 9a80526c1287ddebd290b5a409377b56562a9ed7 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Thu, 24 Aug 2023 17:04:20 -0700 Subject: [PATCH 37/46] refined goDoc part 2 --- model/flow/identity.go | 1 + model/flow/protocol_state.go | 25 +++++++++++++++---------- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/model/flow/identity.go b/model/flow/identity.go index 63447f18dd4..eeedbb4aa18 100644 --- a/model/flow/identity.go +++ b/model/flow/identity.go @@ -570,6 +570,7 @@ func (il IdentityList) SamplePct(pct float64) (IdentityList, error) { // Union returns a new identity list containing every identity that occurs in // either `il`, or `other`, or both. There are no duplicates in the output, // where duplicates are identities with the same node ID. +// Receiver `il` and/or method input `other` can be nil or empty. // The returned IdentityList is sorted in canonical order. func (il IdentityList) Union(other IdentityList) IdentityList { maxLen := len(il) + len(other) diff --git a/model/flow/protocol_state.go b/model/flow/protocol_state.go index ad1a14e3ebc..6f7468c34bd 100644 --- a/model/flow/protocol_state.go +++ b/model/flow/protocol_state.go @@ -176,27 +176,32 @@ func (ll DynamicIdentityEntryList) Sorted(less IdentifierOrder) bool { return true } -// buildIdentityTable builds identity table for current epoch combining data from: -// - the current epoch's Dynamic Identities -// - and previous + current EpochSetup events +// buildIdentityTable constructs the full identity table for the target epoch by combining data from: +// 1. The target epoch's Dynamic Identities. +// 2. The target epoch's IdentitySkeletons +// (recorded in EpochSetup event and immutable throughout the epoch). +// 3. [optional] An adjacent epoch's IdentitySkeletons (can be empty or nil), as recorded in the +// adjacent epoch's setup event. For a target epoch N, the epochs N-1 and N+1 are defined to be +// adjacent. Adjacent epochs do not _necessarily_ exist (e.g. consider a spork comprising only +// a single epoch), in which case this input is nil or empty. // // It also performs sanity checks to make sure that the data is consistent. // No errors are expected during normal operation. func buildIdentityTable( - dynamicIdentities DynamicIdentityEntryList, - coreIdentities, otherIdentities IdentityList, + targetEpochDynamicIdentities DynamicIdentityEntryList, + targetEpochIdentitySkeletons IdentityList, // TODO: change to `IdentitySkeletonList` + adjacentEpochIdentitySkeletons IdentityList, // TODO: change to `IdentitySkeletonList` ) (IdentityList, error) { // produce a unique set for current and previous epoch participants - allEpochParticipants := coreIdentities.Union(otherIdentities) + allEpochParticipants := targetEpochIdentitySkeletons.Union(adjacentEpochIdentitySkeletons) // sanity check: size of identities should be equal to previous and current epoch participants combined - // This is because - if len(allEpochParticipants) != len(dynamicIdentities) { - return nil, fmt.Errorf("invalid number of identities in protocol state: expected %d, got %d", len(allEpochParticipants), len(dynamicIdentities)) + if len(allEpochParticipants) != len(targetEpochDynamicIdentities) { + return nil, fmt.Errorf("invalid number of identities in protocol state: expected %d, got %d", len(allEpochParticipants), len(targetEpochDynamicIdentities)) } // build full identity table for current epoch var result IdentityList - for i, identity := range dynamicIdentities { + for i, identity := range targetEpochDynamicIdentities { // sanity check: identities should be sorted in canonical order if identity.NodeID != allEpochParticipants[i].NodeID { return nil, fmt.Errorf("identites in protocol state are not in canonical order: expected %s, got %s", allEpochParticipants[i].NodeID, identity.NodeID) From e69609a82e7b1e72efeabeeacf097a972aec0a39 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Thu, 24 Aug 2023 17:49:04 -0700 Subject: [PATCH 38/46] extended goDoc for tests --- model/flow/protocol_state_test.go | 18 ++++++++++++++++++ utils/unittest/fixtures.go | 23 ++++++++++++++++------- 2 files changed, 34 insertions(+), 7 deletions(-) diff --git a/model/flow/protocol_state_test.go b/model/flow/protocol_state_test.go index 9b89cf886d6..d57a4e61a0c 100644 --- a/model/flow/protocol_state_test.go +++ b/model/flow/protocol_state_test.go @@ -12,6 +12,9 @@ import ( // TestNewRichProtocolStateEntry checks that NewRichProtocolStateEntry creates valid identity tables depending on the state // of epoch which is derived from the protocol state entry. func TestNewRichProtocolStateEntry(t *testing.T) { + // Conditions right after a spork: + // * no previous epoch exists from the perspective of the freshly-sporked protocol state + // * network is currently in the staking phase for the next epoch, hence no service events for the next epoch exist t.Run("staking-root-protocol-state", func(t *testing.T) { currentEpochSetup := unittest.EpochSetupFixture() currentEpochCommit := unittest.EpochCommitFixture() @@ -37,6 +40,11 @@ func TestNewRichProtocolStateEntry(t *testing.T) { assert.NoError(t, err) assert.Equal(t, currentEpochSetup.Participants, entry.Identities, "should be equal to current epoch setup participants") }) + + // Common situation during the staking phase for epoch N+1 + // * we are currently in Epoch N + // * previous epoch N-1 is known (specifically EpochSetup and EpochCommit events) + // * network is currently in the staking phase for the next epoch, hence no service events for the next epoch exist t.Run("staking-phase", func(t *testing.T) { stateEntry := unittest.ProtocolStateFixture() richEntry, err := flow.NewRichProtocolStateEntry( @@ -53,6 +61,11 @@ func TestNewRichProtocolStateEntry(t *testing.T) { assert.Equal(t, expectedIdentities, richEntry.Identities, "should be equal to current epoch setup participants + previous epoch setup participants") assert.Nil(t, richEntry.NextEpochProtocolState) }) + + // Common situation during the epoch setup phase for epoch N+1 + // * we are currently in Epoch N + // * previous epoch N-1 is known (specifically EpochSetup and EpochCommit events) + // * network is currently in the setup phase for the next epoch, i.e. EpochSetup event (starting setup phase) has already been observed t.Run("setup-phase", func(t *testing.T) { stateEntry := unittest.ProtocolStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichProtocolStateEntry) { entry.NextEpochProtocolState.CurrentEpochCommit = nil @@ -75,6 +88,11 @@ func TestNewRichProtocolStateEntry(t *testing.T) { expectedIdentities = stateEntry.NextEpochProtocolState.CurrentEpochSetup.Participants.Union(stateEntry.CurrentEpochSetup.Participants) assert.Equal(t, expectedIdentities, richEntry.NextEpochProtocolState.Identities, "should be equal to next epoch setup participants + current epoch setup participants") }) + + // Common situation during the epoch commit phase for epoch N+1 + // * we are currently in Epoch N + // * previous epoch N-1 is known (specifically EpochSetup and EpochCommit events) + // * The network has completed the epoch setup phase, i.e. published the EpochSetup and EpochCommit events for epoch N+1. t.Run("commit-phase", func(t *testing.T) { stateEntry := unittest.ProtocolStateFixture(unittest.WithNextEpochProtocolState()) diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index 27d7c8b3391..107a78cc610 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -10,12 +10,10 @@ import ( "time" "github.com/libp2p/go-libp2p/core/peer" - "github.com/stretchr/testify/require" - "github.com/onflow/cadence" + "github.com/stretchr/testify/require" sdk "github.com/onflow/flow-go-sdk" - hotstuff "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/crypto/hash" @@ -2556,10 +2554,17 @@ func RootProtocolStateFixture() *flow.RichProtocolStateEntry { } } -// ProtocolStateFixture creates a fixture with correctly structured data that passes basic sanity checks. -// Epoch setup and commit counters are set to match. -// Identities are constructed from setup events. -// Identities are sorted in canonical order. +// ProtocolStateFixture creates a fixture with correctly structured data. The returned Identity Table +// represents the common situation during the staking phase of Epoch N+1: +// - we are currently in Epoch N +// - previous epoch N-1 is known (specifically EpochSetup and EpochCommit events) +// - network is currently in the staking phase to setup the next epoch, hence no service +// events for the next epoch exist +// +// In particular, the following consistency requirements hold: +// - Epoch setup and commit counters are set to match. +// - Identities are constructed from setup events. +// - Identities are sorted in canonical order. func ProtocolStateFixture(options ...func(*flow.RichProtocolStateEntry)) *flow.RichProtocolStateEntry { prevEpochSetup := EpochSetupFixture() prevEpochCommit := EpochCommitFixture(func(commit *flow.EpochCommit) { @@ -2607,6 +2612,10 @@ func ProtocolStateFixture(options ...func(*flow.RichProtocolStateEntry)) *flow.R } // WithNextEpochProtocolState creates a fixture with correctly structured data for next epoch. +// The resulting Identity Table represents the common situation during the epoch commit phase for Epoch N+1: +// - We are currently in Epoch N. +// - The previous epoch N-1 is known (specifically EpochSetup and EpochCommit events). +// - The network has completed the epoch setup phase, i.e. published the EpochSetup and EpochCommit events for epoch N+1. func WithNextEpochProtocolState() func(entry *flow.RichProtocolStateEntry) { return func(entry *flow.RichProtocolStateEntry) { nextEpochSetup := EpochSetupFixture(func(setup *flow.EpochSetup) { From 2fea77adfa1cfe8222d4d089f741e5d135b76a9b Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Thu, 24 Aug 2023 18:11:18 -0700 Subject: [PATCH 39/46] added TODOs reflecting review comments --- model/flow/protocol_state_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/model/flow/protocol_state_test.go b/model/flow/protocol_state_test.go index d57a4e61a0c..8fbce1e6fb2 100644 --- a/model/flow/protocol_state_test.go +++ b/model/flow/protocol_state_test.go @@ -89,6 +89,8 @@ func TestNewRichProtocolStateEntry(t *testing.T) { assert.Equal(t, expectedIdentities, richEntry.NextEpochProtocolState.Identities, "should be equal to next epoch setup participants + current epoch setup participants") }) + // TODO: include test for epoch setup phase where no prior epoch exist (i.e. first epoch setup phase after spork) + // Common situation during the epoch commit phase for epoch N+1 // * we are currently in Epoch N // * previous epoch N-1 is known (specifically EpochSetup and EpochCommit events) @@ -111,4 +113,7 @@ func TestNewRichProtocolStateEntry(t *testing.T) { expectedIdentities = stateEntry.NextEpochProtocolState.CurrentEpochSetup.Participants.Union(stateEntry.CurrentEpochSetup.Participants) assert.Equal(t, expectedIdentities, richEntry.NextEpochProtocolState.Identities, "should be equal to next epoch setup participants + current epoch setup participants") }) + + // TODO: include test for epoch commit phase where no prior epoch exist (i.e. first epoch commit phase after spork) + } From 7a16b08fb7968621143476d803c2e3cf8f75bbee Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Thu, 24 Aug 2023 18:19:09 -0700 Subject: [PATCH 40/46] fixed merge artifacts. --- module/signature/signer_indices_test.go | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) diff --git a/module/signature/signer_indices_test.go b/module/signature/signer_indices_test.go index b134e93b08a..3357d092cca 100644 --- a/module/signature/signer_indices_test.go +++ b/module/signature/signer_indices_test.go @@ -355,25 +355,14 @@ func Test_DecodeSignerIndicesToIdentities(t *testing.T) { decodedSigners, err := signature.DecodeSignerIndicesToIdentities(identities.ToSkeleton(), signerIndices) require.NoError(t, err) -<<<<<<< HEAD - slices.SortFunc(signers, func(lhs, rhs *flow.IdentitySkeleton) bool { - return order.IdentifierCanonical(lhs.NodeID, rhs.NodeID) - }) - - slices.SortFunc(decodedSigners, func(lhs, rhs *flow.IdentitySkeleton) bool { - return order.IdentifierCanonical(lhs.NodeID, rhs.NodeID) - }) - -======= - // Note that sampling from `identities` generates an _unordered_ list `signers`. Though, - // this is fine, as `EncodeSignersToIndices` as no ordering requirement on its input `signers`. + // Note that sampling from `identities` generates an _unordered_ list `signers`. + // This is fine, as `EncodeSignersToIndices` has no ordering requirement on its input `signers`. // Nevertheless, note that the output of `DecodeSignerIndicesToIdentities` is _always_ canonically // ordered. Therefore, we need to order the input `signers` (so far unordered) before comparing it - // to the decoded output (canonically ordered) + // to the decoded output (canonically ordered). slices.SortFunc(signers, func(lhs, rhs *flow.IdentitySkeleton) bool { return order.IdentifierCanonical(lhs.NodeID, rhs.NodeID) }) ->>>>>>> feature/dynamic-protocol-state require.Equal(t, signers, decodedSigners) }) } From 279a4642fe393ade5b1e7d5801277d08c002d8fa Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Thu, 24 Aug 2023 19:31:27 -0700 Subject: [PATCH 41/46] refined and extended goDoc --- storage/badger/protocol_state.go | 7 ++++--- storage/badger/protocol_state_test.go | 5 +++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/storage/badger/protocol_state.go b/storage/badger/protocol_state.go index b7bf4faa469..fc6a7161f48 100644 --- a/storage/badger/protocol_state.go +++ b/storage/badger/protocol_state.go @@ -24,13 +24,14 @@ type ProtocolState struct { var _ storage.ProtocolState = (*ProtocolState)(nil) -// NewProtocolState Creates ProtocolState instance which is a database of protocol state entries -// which supports storing, caching and retrieving by ID and additionally indexed block ID. +// NewProtocolState creates a ProtocolState instance, which is a database of protocol state entries. +// It supports storing, caching and retrieving by ID or the additionally indexed block ID. func NewProtocolState(collector module.CacheMetrics, epochSetups storage.EpochSetups, epochCommits storage.EpochCommits, db *badger.DB, - cacheSize uint) *ProtocolState { + cacheSize uint, +) *ProtocolState { retrieve := func(key interface{}) func(tx *badger.Txn) (interface{}, error) { protocolStateID := key.(flow.Identifier) var protocolStateEntry flow.ProtocolStateEntry diff --git a/storage/badger/protocol_state_test.go b/storage/badger/protocol_state_test.go index a2a8a6203dd..18bcb6d71f1 100644 --- a/storage/badger/protocol_state_test.go +++ b/storage/badger/protocol_state_test.go @@ -90,7 +90,7 @@ func TestProtocolStateStoreInvalidProtocolState(t *testing.T) { // TestProtocolStateMergeParticipants tests that merging participants between epochs works correctly. We always take participants // from current epoch and additionally add participants from previous epoch if they are not present in current epoch. -// If there is participant in previous and current epochs we should see it only once in the merged list and the entity has to be from current epoch. +// If the same participant is in the previous and current epochs, we should see it only once in the merged list and the dynamic portion has to be from current epoch. func TestProtocolStateMergeParticipants(t *testing.T) { unittest.RunWithBadgerDB(t, func(db *badger.DB) { metrics := metrics.NewNoopCollector() @@ -137,7 +137,8 @@ func TestProtocolStateMergeParticipants(t *testing.T) { }) } -// TestProtocolStateRootSnapshot tests that storing and retrieving root protocol state(in case of bootstrap) works as expected. +// TestProtocolStateRootSnapshot tests that storing and retrieving root protocol state (in case of bootstrap) works as expected. +// Specifically, this means that no prior epoch exists (situation after a spork) from the perspective of the freshly-sporked network. func TestProtocolStateRootSnapshot(t *testing.T) { unittest.RunWithBadgerDB(t, func(db *badger.DB) { metrics := metrics.NewNoopCollector() From 3f89749b6a549eb32834b66cf6abbc78ffa261d3 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Thu, 24 Aug 2023 19:45:19 -0700 Subject: [PATCH 42/46] more goDoc extensions --- storage/badger/protocol_state.go | 37 +++++++++++++++++++++++--------- 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/storage/badger/protocol_state.go b/storage/badger/protocol_state.go index fc6a7161f48..91ab3302de9 100644 --- a/storage/badger/protocol_state.go +++ b/storage/badger/protocol_state.go @@ -14,9 +14,10 @@ import ( "github.com/onflow/flow-go/storage/badger/transaction" ) -// ProtocolState implements persistent storage for storing entities of protocol state. +// ProtocolState implements persistent storage for storing identity table instances. // Protocol state uses an embedded cache without storing capabilities(store happens on first retrieval) to avoid unnecessary -// operations and to speed up access to frequently used protocol states. +// operations and to speed up access to frequently used identity tables. +// TODO: update naming to IdentityTable type ProtocolState struct { db *badger.DB cache *Cache @@ -24,7 +25,7 @@ type ProtocolState struct { var _ storage.ProtocolState = (*ProtocolState)(nil) -// NewProtocolState creates a ProtocolState instance, which is a database of protocol state entries. +// NewProtocolState creates a ProtocolState instance, which is a database of identity table instances. // It supports storing, caching and retrieving by ID or the additionally indexed block ID. func NewProtocolState(collector module.CacheMetrics, epochSetups storage.EpochSetups, @@ -42,7 +43,7 @@ func NewProtocolState(collector module.CacheMetrics, } result, err := newRichProtocolStateEntry(&protocolStateEntry, epochSetups, epochCommits) if err != nil { - return nil, fmt.Errorf("could not create rich protocol state entry: %w", err) + return nil, fmt.Errorf("could not create rich identity table entry: %w", err) } return result, nil } @@ -57,7 +58,10 @@ func NewProtocolState(collector module.CacheMetrics, } } -// StoreTx allows us to store protocol state as part of a DB tx, while still going through the caching layer. +// StoreTx allows us to store an identity table as part of a DB tx, while still going through the caching layer. +// Per convention, the given Identity Table must be in canonical order, otherwise an exception is returned. +// Expected error returns during normal operations: +// - storage.ErrAlreadyExists if an Identity Table with the given id is already stored func (s *ProtocolState) StoreTx(id flow.Identifier, protocolState *flow.ProtocolStateEntry) func(*transaction.Tx) error { return func(tx *transaction.Tx) error { if !protocolState.Identities.Sorted(order.IdentifierCanonical) { @@ -72,31 +76,40 @@ func (s *ProtocolState) StoreTx(id flow.Identifier, protocolState *flow.Protocol } } -// Index indexes the protocol state by block ID. +// Index indexes the identity table by block ID. +// Error returns: +// - storage.ErrAlreadyExists if an identity table for the given blockID has already been indexed func (s *ProtocolState) Index(blockID flow.Identifier, protocolStateID flow.Identifier) func(*transaction.Tx) error { return func(tx *transaction.Tx) error { err := transaction.WithTx(operation.IndexProtocolState(blockID, protocolStateID))(tx) if err != nil { - return fmt.Errorf("could not index protocol state for block (%x): %w", blockID[:], err) + return fmt.Errorf("could not index identity table for block (%x): %w", blockID[:], err) } return nil } } -// ByID returns the protocol state by its ID. +// ByID retrieves the identity table by its ID. +// Error returns: +// - storage.ErrNotFound if no identity table with the given ID exists func (s *ProtocolState) ByID(id flow.Identifier) (*flow.RichProtocolStateEntry, error) { tx := s.db.NewTransaction(false) defer tx.Discard() return s.byID(id)(tx) } -// ByBlockID returns the protocol state by block ID. +// ByBlockID retrieves the identity table by the respective block ID. +// TODO: clarify whether the blockID is the block that defines this identity table or the _child_ block where the identity table is applied. CAUTION: surface for bugs! +// Error returns: +// - storage.ErrNotFound if no identity table for the given blockID exists func (s *ProtocolState) ByBlockID(blockID flow.Identifier) (*flow.RichProtocolStateEntry, error) { tx := s.db.NewTransaction(false) defer tx.Discard() return s.byBlockID(blockID)(tx) } +// byID retrieves the identity table by its ID. Error returns: +// - storage.ErrNotFound if no identity table with the given ID exists func (s *ProtocolState) byID(protocolStateID flow.Identifier) func(*badger.Txn) (*flow.RichProtocolStateEntry, error) { return func(tx *badger.Txn) (*flow.RichProtocolStateEntry, error) { val, err := s.cache.Get(protocolStateID)(tx) @@ -107,12 +120,16 @@ func (s *ProtocolState) byID(protocolStateID flow.Identifier) func(*badger.Txn) } } +// byBlockID retrieves the identity table by the respective block ID. +// TODO: clarify whether the blockID is the block that defines this identity table or the _child_ block where the identity table is applied. CAUTION: surface for bugs! +// Error returns: +// - storage.ErrNotFound if no identity table for the given blockID exists func (s *ProtocolState) byBlockID(blockID flow.Identifier) func(*badger.Txn) (*flow.RichProtocolStateEntry, error) { return func(tx *badger.Txn) (*flow.RichProtocolStateEntry, error) { var protocolStateID flow.Identifier err := operation.LookupProtocolState(blockID, &protocolStateID)(tx) if err != nil { - return nil, fmt.Errorf("could not lookup protocol state ID for block (%x): %w", blockID[:], err) + return nil, fmt.Errorf("could not lookup identity table ID for block (%x): %w", blockID[:], err) } return s.byID(protocolStateID)(tx) } From bd549897f434e5d8057dcd345422d047d808063f Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Thu, 24 Aug 2023 19:47:03 -0700 Subject: [PATCH 43/46] extended `badger.ProtocolState` interface documentation --- storage/protocol_state.go | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/storage/protocol_state.go b/storage/protocol_state.go index e2e7aa90096..6f35ccbe41d 100644 --- a/storage/protocol_state.go +++ b/storage/protocol_state.go @@ -6,13 +6,27 @@ import ( ) // ProtocolState represents persistent storage for protocol state entries. +// TODO: update naming to IdentityTable type ProtocolState interface { - // StoreTx allows us to store protocol state as part of a DB tx, while still going through the caching layer. + // StoreTx allows us to store an identity table as part of a DB tx, while still going through the caching layer. + // Per convention, the given Identity Table must be in canonical order, otherwise an exception is returned. + // Expected error returns during normal operations: + // - storage.ErrAlreadyExists if an Identity Table with the given id is already stored StoreTx(id flow.Identifier, protocolState *flow.ProtocolStateEntry) func(*transaction.Tx) error - // Index indexes the protocol state by block ID. + + // Index indexes the identity table by block ID. + // Error returns: + // - storage.ErrAlreadyExists if an identity table for the given blockID has already been indexed Index(blockID flow.Identifier, protocolStateID flow.Identifier) func(*transaction.Tx) error - // ByID returns the protocol state by its ID. + + // ByID retrieves the identity table by its ID. + // Error returns: + // - storage.ErrNotFound if no identity table with the given ID exists ByID(id flow.Identifier) (*flow.RichProtocolStateEntry, error) - // ByBlockID returns the protocol state by block ID. + + // ByBlockID retrieves the identity table by the respective block ID. + // TODO: clarify whether the blockID is the block that defines this identity table or the _child_ block where the identity table is applied. CAUTION: surface for bugs! + // Error returns: + // - storage.ErrNotFound if no identity table for the given blockID exists ByBlockID(blockID flow.Identifier) (*flow.RichProtocolStateEntry, error) } From 170109a4ca073930677e9f6585bd9d0e10b64d71 Mon Sep 17 00:00:00 2001 From: Jordan Schalm Date: Fri, 25 Aug 2023 14:19:06 -0700 Subject: [PATCH 44/46] remove unused fn WaitForServerStart --- engine/access/rpc/engine.go | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/engine/access/rpc/engine.go b/engine/access/rpc/engine.go index 0223b4d3c5d..eea6dc5d17c 100644 --- a/engine/access/rpc/engine.go +++ b/engine/access/rpc/engine.go @@ -131,17 +131,6 @@ func NewBuilder(log zerolog.Logger, return builder, nil } -func WaitForServerStart(server component.Component) component.ComponentWorker { - return func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - select { - case <-ctx.Done(): - case <-server.Ready(): - ready() - } - <-server.Done() - } -} - // shutdownWorker is a worker routine which shuts down all servers when the context is cancelled. func (e *Engine) shutdownWorker(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { ready() From f91d5c86794e574b2c7e72dc0979bceaa7058b72 Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Thu, 7 Sep 2023 10:39:51 +0300 Subject: [PATCH 45/46] Update model/flow/protocol_state.go Co-authored-by: Alexander Hentschel --- model/flow/protocol_state.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/model/flow/protocol_state.go b/model/flow/protocol_state.go index 6f7468c34bd..dbde3881103 100644 --- a/model/flow/protocol_state.go +++ b/model/flow/protocol_state.go @@ -92,8 +92,19 @@ func NewRichProtocolStateEntry( protocolState.NextEpochProtocolState.CurrentEpochEventIDs.CommitID, nextEpochCommit.ID()) } + // sanity check consistency of input data + if protocolState.NextEpochProtocolState.CurrentEpochEventIDs.SetupID != nextEpochSetup.ID() { + return nil, fmt.Errorf("inconsistent EpochSetup for constucting RichProtocolStateEntry, next protocol state states ID %v while input event has ID %v", + protocolState.NextEpochProtocolState.CurrentEpochEventIDs.SetupID, nextEpochSetup.ID()) + } + if protocolState.NextEpochProtocolState.CurrentEpochEventIDs.CommitID != nextEpochCommit.ID() { + return nil, fmt.Errorf("inconsistent EpochCommit for constucting RichProtocolStateEntry, next protocol state states ID %v while input event has ID %v", + protocolState.NextEpochProtocolState.CurrentEpochEventIDs.CommitID, nextEpochCommit.ID()) + } + // if next epoch is available, it means that we have observed epoch setup event and we are not anymore in staking phase, // so we need to build the identity table using current and next epoch setup events. + // so we need to build the identity table using current and next epoch setup events. result.Identities, err = buildIdentityTable( protocolState.Identities, currentEpochSetup.Participants, From ddaf7ed2f82b0191fe2d1ae840cbefa80910ec02 Mon Sep 17 00:00:00 2001 From: Yurii Oleksyshyn Date: Thu, 7 Sep 2023 23:14:39 +0300 Subject: [PATCH 46/46] Fixed sanity check --- model/flow/protocol_state.go | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/model/flow/protocol_state.go b/model/flow/protocol_state.go index dbde3881103..d3eb0316f84 100644 --- a/model/flow/protocol_state.go +++ b/model/flow/protocol_state.go @@ -87,19 +87,11 @@ func NewRichProtocolStateEntry( return nil, fmt.Errorf("inconsistent EpochSetup for constucting RichProtocolStateEntry, next protocol state states ID %v while input event has ID %v", protocolState.NextEpochProtocolState.CurrentEpochEventIDs.SetupID, nextEpochSetup.ID()) } - if protocolState.NextEpochProtocolState.CurrentEpochEventIDs.CommitID != nextEpochCommit.ID() { - return nil, fmt.Errorf("inconsistent EpochCommit for constucting RichProtocolStateEntry, next protocol state states ID %v while input event has ID %v", - protocolState.NextEpochProtocolState.CurrentEpochEventIDs.CommitID, nextEpochCommit.ID()) - } - - // sanity check consistency of input data - if protocolState.NextEpochProtocolState.CurrentEpochEventIDs.SetupID != nextEpochSetup.ID() { - return nil, fmt.Errorf("inconsistent EpochSetup for constucting RichProtocolStateEntry, next protocol state states ID %v while input event has ID %v", - protocolState.NextEpochProtocolState.CurrentEpochEventIDs.SetupID, nextEpochSetup.ID()) - } - if protocolState.NextEpochProtocolState.CurrentEpochEventIDs.CommitID != nextEpochCommit.ID() { - return nil, fmt.Errorf("inconsistent EpochCommit for constucting RichProtocolStateEntry, next protocol state states ID %v while input event has ID %v", - protocolState.NextEpochProtocolState.CurrentEpochEventIDs.CommitID, nextEpochCommit.ID()) + if protocolState.NextEpochProtocolState.CurrentEpochEventIDs.CommitID != ZeroID { + if protocolState.NextEpochProtocolState.CurrentEpochEventIDs.CommitID != nextEpochCommit.ID() { + return nil, fmt.Errorf("inconsistent EpochCommit for constucting RichProtocolStateEntry, next protocol state states ID %v while input event has ID %v", + protocolState.NextEpochProtocolState.CurrentEpochEventIDs.CommitID, nextEpochCommit.ID()) + } } // if next epoch is available, it means that we have observed epoch setup event and we are not anymore in staking phase,