diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index de3a927d46..e30143489a 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -40,7 +40,7 @@ Build stage Docker image: - docker tag $IMAGE_NAME:$CI_COMMIT_SHA $DOCKER_REPO_INFRA_STAGE:$CI_COMMIT_SHA - $DOCKER_LOGIN_TO_INFRA_STAGE_REPO && docker push $DOCKER_REPO_INFRA_STAGE:$CI_COMMIT_SHA only: - - gm/exporter-alan-merge + - alan/no-fork # +---------------------+ @@ -84,7 +84,7 @@ Deploy nodes to hetzner stage: - .k8/hetzner-stage/scripts/deploy-cluster-65--68.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT - .k8/hetzner-stage/scripts/deploy-cluster-69--72.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT only: - - stage + - alan/no-fork Deploy exporter to hetzner stage: stage: deploy @@ -101,7 +101,7 @@ Deploy exporter to hetzner stage: - kubectl config get-contexts - .k8/hetzner-stage/scripts/deploy-holesky-exporters.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $SSV_EXPORTER_CPU_LIMIT $SSV_EXPORTER_MEM_LIMIT only: - - gm/exporter-alan-merge + - alan/no-fork # +---------------+ # | Prod | diff --git a/Dockerfile b/Dockerfile index 1fbf4d8651..2d2c665e07 100644 --- a/Dockerfile +++ b/Dockerfile @@ -66,3 +66,4 @@ EXPOSE 5678 5000 4000/udp ENV GODEBUG="netdns=go" #ENTRYPOINT ["/go/bin/ssvnode"] + diff --git a/cli/operator/node.go b/cli/operator/node.go index db0cba2fce..c23f180829 100644 --- a/cli/operator/node.go +++ b/cli/operator/node.go @@ -4,7 +4,6 @@ import ( "context" "encoding/base64" "fmt" - "github.com/ssvlabs/ssv/exporter/exporter_message" "log" "math/big" "net/http" @@ -30,6 +29,7 @@ import ( "github.com/ssvlabs/ssv/eth/localevents" exporterapi "github.com/ssvlabs/ssv/exporter/api" "github.com/ssvlabs/ssv/exporter/api/decided" + "github.com/ssvlabs/ssv/exporter/convert" ibftstorage "github.com/ssvlabs/ssv/ibft/storage" ssv_identity "github.com/ssvlabs/ssv/identity" "github.com/ssvlabs/ssv/logging" @@ -262,6 +262,7 @@ var StartNodeCmd = &cobra.Command{ cfg.SSVOptions.ValidatorOptions.Beacon = consensusClient cfg.SSVOptions.ValidatorOptions.BeaconSigner = keyManager cfg.SSVOptions.ValidatorOptions.ValidatorsMap = validatorsMap + cfg.SSVOptions.ValidatorOptions.NetworkConfig = networkConfig cfg.SSVOptions.ValidatorOptions.OperatorDataStore = operatorDataStore cfg.SSVOptions.ValidatorOptions.RegistryStorage = nodeStorage @@ -272,20 +273,20 @@ var StartNodeCmd = &cobra.Command{ ws := exporterapi.NewWsServer(cmd.Context(), nil, http.NewServeMux(), cfg.WithPing) cfg.SSVOptions.WS = ws cfg.SSVOptions.WsAPIPort = cfg.WsAPIPort - cfg.SSVOptions.ValidatorOptions.NewDecidedHandler = decided.NewStreamPublisher(logger, ws, cfg.SSVOptions.ValidatorOptions.UseNewExporterAPI) + cfg.SSVOptions.ValidatorOptions.NewDecidedHandler = decided.NewStreamPublisher(logger, ws) } cfg.SSVOptions.ValidatorOptions.DutyRoles = []spectypes.BeaconRole{spectypes.BNRoleAttester} // TODO could be better to set in other place - storageRoles := []exporter_message.RunnerRole{ - exporter_message.RoleCommittee, - exporter_message.RoleAttester, - exporter_message.RoleProposer, - exporter_message.RoleSyncCommittee, - exporter_message.RoleAggregator, - exporter_message.RoleSyncCommitteeContribution, - exporter_message.RoleValidatorRegistration, - exporter_message.RoleVoluntaryExit, + storageRoles := []convert.RunnerRole{ + convert.RoleCommittee, + convert.RoleAttester, + convert.RoleProposer, + convert.RoleSyncCommittee, + convert.RoleAggregator, + convert.RoleSyncCommitteeContribution, + convert.RoleValidatorRegistration, + convert.RoleVoluntaryExit, } storageMap := ibftstorage.NewStores() @@ -304,7 +305,7 @@ var StartNodeCmd = &cobra.Command{ cfg.SSVOptions.ValidatorController = validatorCtrl cfg.SSVOptions.ValidatorStore = validatorStore - operatorNode = operator.New(logger, cfg.SSVOptions, slotTickerProvider) + operatorNode = operator.New(logger, cfg.SSVOptions, slotTickerProvider, storageMap) if cfg.MetricsAPIPort > 0 { go startMetricsHandler(cmd.Context(), logger, db, metricsReporter, cfg.MetricsAPIPort, cfg.EnableProfile) @@ -559,8 +560,6 @@ func setupSSVNetwork(logger *zap.Logger) (networkconfig.NetworkConfig, error) { return networkconfig.NetworkConfig{}, err } - types.SetDefaultDomain(networkConfig.Domain) - nodeType := "light" if cfg.SSVOptions.ValidatorOptions.FullNode { nodeType = "full" diff --git a/e2e/logs_catcher/matcher_bls.go b/e2e/logs_catcher/matcher_bls.go index 539bd327c4..41e664d038 100644 --- a/e2e/logs_catcher/matcher_bls.go +++ b/e2e/logs_catcher/matcher_bls.go @@ -19,6 +19,27 @@ import ( "github.com/ssvlabs/ssv/e2e/logs_catcher/docker" ) +const ( + targetContainer = "ssv-node-1" + + verifySignatureErr = "failed processing consensus message: could not process msg: invalid signed message: msg signature invalid: failed to verify signature" + reconstructSignatureErr = "could not reconstruct post consensus signature: could not reconstruct beacon sig: failed to verify reconstruct signature: could not reconstruct a valid signature" + pastRoundErr = "failed processing consensus message: could not process msg: invalid signed message: past round" + reconstructSignaturesSuccess = "reconstructed partial signatures" + submittedAttSuccess = "✅ successfully submitted attestation" + gotDutiesSuccess = "🗂 got duties" + + msgHeightField = "\"msg_height\":%d" + msgRoundField = "\"msg_round\":%d" + msgTypeField = "\"msg_type\":\"%s\"" + consensusMsgTypeField = "\"consensus_msg_type\":%d" + signersField = "\"signers\":[%d]" + errorField = "\"error\":\"%s\"" + dutyIDField = "\"duty_id\":\"%s\"" + roleField = "\"role\":\"%s\"" + slotField = "\"slot\":%d" +) + type logCondition struct { role string slot phase0.Slot @@ -40,7 +61,7 @@ func VerifyBLSSignature(pctx context.Context, logger *zap.Logger, cli DockerCLI, defer startc() validatorIndex := fmt.Sprintf("v%d", share.ValidatorIndex) - conditionLog, err := StartCondition(startctx, logger, []string{gotDutiesSuccess, validatorIndex}, ssvNodesContainers[0], cli) + conditionLog, err := StartCondition(startctx, logger, []string{gotDutiesSuccess, validatorIndex}, targetContainer, cli) if err != nil { return fmt.Errorf("failed to start condition: %w", err) } @@ -60,7 +81,7 @@ func VerifyBLSSignature(pctx context.Context, logger *zap.Logger, cli DockerCLI, leader := DetermineLeader(dutySlot, committee) logger.Debug("Leader: ", zap.Uint64("leader", leader)) - _, err = StartCondition(startctx, logger, []string{submittedAttSuccess, share.ValidatorPubKey}, ssvNodesContainers[0], cli) + _, err = StartCondition(startctx, logger, []string{submittedAttSuccess, share.ValidatorPubKey}, targetContainer, cli) if err != nil { return fmt.Errorf("failed to start condition: %w", err) } @@ -91,7 +112,10 @@ func ParseAndExtractDutyInfo(conditionLog string, corruptedValidatorIndex string } func DetermineLeader(dutySlot phase0.Slot, committee []*types.CommitteeMember) types.OperatorID { - leader := qbft.RoundRobinProposer(&qbft.State{Height: qbft.Height(dutySlot), CommitteeMember: committee[0]}, qbft.FirstRound) + share := &types.Operator{ + Committee: committee, + } + leader := qbft.RoundRobinProposer(&qbft.State{Height: qbft.Height(dutySlot), Share: share}, qbft.FirstRound) return leader } @@ -139,7 +163,7 @@ func processNonCorruptedOperatorLogs(ctx context.Context, logger *zap.Logger, cl msgType: types.SSVConsensusMsgType, consensusMsgType: qbft.ProposalMsgType, signer: corruptedOperator, - error: failedVerifySigLog, + error: verifySignatureErr, }, { role: types.BNRoleAttester.String(), @@ -157,7 +181,7 @@ func processNonCorruptedOperatorLogs(ctx context.Context, logger *zap.Logger, cl msgType: types.SSVConsensusMsgType, consensusMsgType: qbft.RoundChangeMsgType, signer: corruptedOperator, - error: failedVerifySigLog, + error: verifySignatureErr, }, { role: types.BNRoleAttester.String(), @@ -166,7 +190,7 @@ func processNonCorruptedOperatorLogs(ctx context.Context, logger *zap.Logger, cl msgType: types.SSVConsensusMsgType, consensusMsgType: qbft.PrepareMsgType, signer: corruptedOperator, - error: failedVerifySigLog, + error: verifySignatureErr, }, // TODO: handle decided failed signature } @@ -179,7 +203,7 @@ func processNonCorruptedOperatorLogs(ctx context.Context, logger *zap.Logger, cl msgType: types.SSVConsensusMsgType, consensusMsgType: qbft.PrepareMsgType, signer: corruptedOperator, - error: failedVerifySigLog, + error: verifySignatureErr, }, { role: types.BNRoleAttester.String(), @@ -188,7 +212,7 @@ func processNonCorruptedOperatorLogs(ctx context.Context, logger *zap.Logger, cl msgType: types.SSVConsensusMsgType, consensusMsgType: qbft.CommitMsgType, signer: corruptedOperator, - error: failedVerifySigLog, + error: verifySignatureErr, }, // TODO: handle decided failed signature } @@ -222,11 +246,10 @@ func matchSingleConditionLog(ctx context.Context, logger *zap.Logger, cli Docker } filteredLogs := res.Grep(first) - logger.Info("matched", zap.Int("count", len(filteredLogs)), zap.String("target", target), zap.Strings("match_string", first)) if len(filteredLogs) != 1 { - return fmt.Errorf("found non matching messages on (1) %v, want %v got %v", target, 1, len(filteredLogs)) + return fmt.Errorf("found non matching messages on %v, want %v got %v", target, 1, len(filteredLogs)) } return nil @@ -265,7 +288,7 @@ func matchDualConditionLog(ctx context.Context, logger *zap.Logger, cli DockerCL logger.Info("matched", zap.Int("count", len(filteredLogs)), zap.String("target", target), zap.Strings("match_string", fail)) if len(filteredLogs) != 1 { - return fmt.Errorf("found non matching messages on (3) %v, want %v got %v", target, 1, len(filteredLogs)) + return fmt.Errorf("found non matching messages on %v, want %v got %v", target, 1, len(filteredLogs)) } } diff --git a/e2e/run.sh b/e2e/run.sh index 9cb23c07af..882a221c99 100755 --- a/e2e/run.sh +++ b/e2e/run.sh @@ -52,6 +52,8 @@ save_logs() { export BEACON_NODE_URL=http://prod-standalone-holesky.bloxinfra.com:5052 export EXECUTION_NODE_URL=ws://prod-standalone-holesky.bloxinfra.com:8548/ws +export BEACON_NODE_URL=http://bn-h-2.stage.bloxinfra.com:3502/ +export EXECUTION_NODE_URL=ws://bn-h-2.stage.bloxinfra.com:8557/ws # Step 1: Start the beacon_proxy and ssv-node services docker compose up -d --build beacon_proxy ssv-node-1 ssv-node-2 ssv-node-3 ssv-node-4 diff --git a/eth/eventhandler/handlers.go b/eth/eventhandler/handlers.go index 014629b3ae..3371fe312d 100644 --- a/eth/eventhandler/handlers.go +++ b/eth/eventhandler/handlers.go @@ -5,7 +5,8 @@ import ( "encoding/hex" "errors" "fmt" - "github.com/ssvlabs/ssv/exporter/exporter_message" + + "github.com/ssvlabs/ssv/exporter/convert" "github.com/attestantio/go-eth2-client/spec/phase0" ethcommon "github.com/ethereum/go-ethereum/common" @@ -373,8 +374,8 @@ func (eh *EventHandler) handleValidatorRemoved(txn basedb.Txn, event *contract.C return emptyPK, &MalformedEventError{Err: ErrShareBelongsToDifferentOwner} } - removeDecidedMessages := func(role exporter_message.RunnerRole, store qbftstorage.QBFTStore) error { - messageID := exporter_message.NewMsgID(eh.networkConfig.Domain, share.ValidatorPubKey[:], role) + removeDecidedMessages := func(role convert.RunnerRole, store qbftstorage.QBFTStore) error { + messageID := convert.NewMsgID(eh.networkConfig.Domain, share.ValidatorPubKey[:], role) return store.CleanAllInstances(logger, messageID[:]) } err := eh.storageMap.Each(removeDecidedMessages) @@ -511,10 +512,6 @@ func (eh *EventHandler) handleValidatorExited(txn basedb.Txn, event *contract.Co return nil, &MalformedEventError{Err: ErrShareBelongsToDifferentOwner} } - if !share.BelongsToOperator(eh.operatorDataStore.GetOperatorID()) { - return nil, nil - } - if share.BeaconMetadata == nil { return nil, nil } @@ -523,10 +520,14 @@ func (eh *EventHandler) handleValidatorExited(txn basedb.Txn, event *contract.Co copy(pk[:], share.ValidatorPubKey[:]) ed := &duties.ExitDescriptor{ + OwnValidator: false, PubKey: pk, ValidatorIndex: share.BeaconMetadata.Index, BlockNumber: event.Raw.BlockNumber, } + if share.BelongsToOperator(eh.operatorDataStore.GetOperatorID()) { + ed.OwnValidator = true + } return ed, nil } diff --git a/eth/executionclient/execution_client.go b/eth/executionclient/execution_client.go index 3965cc64c6..f9571ecb6e 100644 --- a/eth/executionclient/execution_client.go +++ b/eth/executionclient/execution_client.go @@ -221,9 +221,6 @@ func (ec *ExecutionClient) StreamLogs(ctx context.Context, fromBlock uint64) <-c // Healthy returns if execution client is currently healthy: responds to requests and not in the syncing state. func (ec *ExecutionClient) Healthy(ctx context.Context) error { - // TODO ALAN: revert - return nil - if ec.isClosed() { return ErrClosed } diff --git a/exporter/api/msg.go b/exporter/api/msg.go index 922ca56b37..c62234ef4f 100644 --- a/exporter/api/msg.go +++ b/exporter/api/msg.go @@ -40,24 +40,24 @@ type ParticipantsAPI struct { } // NewParticipantsAPIMsg creates a new message in a new format from the given message. -func NewParticipantsAPIMsg(msgs ...qbftstorage.ParticipantsRangeEntry) Message { - data, err := ParticipantsAPIData(msgs...) +func NewParticipantsAPIMsg(msg qbftstorage.ParticipantsRangeEntry) Message { + data, err := ParticipantsAPIData(msg) if err != nil { return Message{ Type: TypeParticipants, Data: []string{}, } } - identifier := specqbft.ControllerIdToMessageID(msgs[0].Identifier[:]) + identifier := specqbft.ControllerIdToMessageID(msg.Identifier[:]) pkv := identifier.GetDutyExecutorID() return Message{ Type: TypeDecided, Filter: MessageFilter{ PublicKey: hex.EncodeToString(pkv), - From: uint64(msgs[0].Slot), - To: uint64(msgs[len(msgs)-1].Slot), - Role: msgs[0].Identifier.GetRoleType().String(), + From: uint64(msg.Slot), + To: uint64(msg.Slot), + Role: msg.Identifier.GetRoleType().String(), }, Data: data, } @@ -100,7 +100,7 @@ type MessageFilter struct { To uint64 `json:"to"` // Role is the duty type, optional as it's relevant for IBFT data Role string `json:"role,omitempty"` - // PublicKeys is optional, used for fetching decided messages or information about specific validator/operator + // PublicKey is optional, used for fetching decided messages or information about specific validator/operator PublicKey string `json:"publicKey,omitempty"` } diff --git a/exporter/api/query_handlers.go b/exporter/api/query_handlers.go index eec14ae72a..8c91f5c731 100644 --- a/exporter/api/query_handlers.go +++ b/exporter/api/query_handlers.go @@ -3,15 +3,15 @@ package api import ( "encoding/hex" "fmt" + "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/ssvlabs/ssv-spec/types" - "github.com/ssvlabs/ssv/exporter/exporter_message" - "github.com/ssvlabs/ssv/logging/fields" - "github.com/ssvlabs/ssv/protocol/v2/message" - "github.com/ssvlabs/ssv/protocol/v2/types" "go.uber.org/zap" + "github.com/ssvlabs/ssv/exporter/convert" "github.com/ssvlabs/ssv/ibft/storage" + "github.com/ssvlabs/ssv/logging/fields" + "github.com/ssvlabs/ssv/protocol/v2/message" ) const ( @@ -47,7 +47,7 @@ func HandleUnknownQuery(logger *zap.Logger, nm *NetworkMessage) { } // HandleParticipantsQuery handles TypeParticipants queries. -func HandleParticipantsQuery(logger *zap.Logger, qbftStorage *storage.QBFTStores, nm *NetworkMessage) { +func HandleParticipantsQuery(logger *zap.Logger, qbftStorage *storage.QBFTStores, nm *NetworkMessage, domain spectypes.DomainType) { logger.Debug("handles query request", zap.Uint64("from", nm.Msg.Filter.From), zap.Uint64("to", nm.Msg.Filter.To), @@ -72,16 +72,16 @@ func HandleParticipantsQuery(logger *zap.Logger, qbftStorage *storage.QBFTStores nm.Msg = res return } - runnerRole := exporter_message.RunnerRole(beaconRole) + runnerRole := convert.RunnerRole(beaconRole) roleStorage := qbftStorage.Get(runnerRole) if roleStorage == nil { - logger.Warn("role storage doesn't exist", fields.Role(spectypes.RunnerRole(runnerRole))) + logger.Warn("role storage doesn't exist", fields.ExporterRole(runnerRole)) res.Data = []string{"internal error - role storage doesn't exist", beaconRole.String()} nm.Msg = res return } - msgID := exporter_message.NewMsgID(types.GetDefaultDomain(), pkRaw, runnerRole) + msgID := convert.NewMsgID(domain, pkRaw, runnerRole) from := phase0.Slot(nm.Msg.Filter.From) to := phase0.Slot(nm.Msg.Filter.To) participantsList, err := roleStorage.GetParticipantsInRange(msgID, from, to) diff --git a/exporter/api/query_handlers_test.go b/exporter/api/query_handlers_test.go index d70629131e..d05f0f9b43 100644 --- a/exporter/api/query_handlers_test.go +++ b/exporter/api/query_handlers_test.go @@ -2,15 +2,10 @@ package api import ( "crypto/rsa" - "github.com/ssvlabs/ssv/exporter/exporter_message" "math" "testing" "github.com/attestantio/go-eth2-client/spec/phase0" - "github.com/ssvlabs/ssv/logging" - "github.com/ssvlabs/ssv/storage/kv" - "github.com/ssvlabs/ssv/utils/rsaencryption" - "github.com/herumi/bls-eth-go-binary/bls" "github.com/pkg/errors" specqbft "github.com/ssvlabs/ssv-spec/qbft" @@ -18,11 +13,15 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/zap" + "github.com/ssvlabs/ssv/exporter/convert" qbftstorage "github.com/ssvlabs/ssv/ibft/storage" + "github.com/ssvlabs/ssv/logging" + "github.com/ssvlabs/ssv/networkconfig" "github.com/ssvlabs/ssv/operator/storage" protocoltesting "github.com/ssvlabs/ssv/protocol/v2/testing" - "github.com/ssvlabs/ssv/protocol/v2/types" "github.com/ssvlabs/ssv/storage/basedb" + "github.com/ssvlabs/ssv/storage/kv" + "github.com/ssvlabs/ssv/utils/rsaencryption" ) func TestHandleUnknownQuery(t *testing.T) { @@ -88,11 +87,12 @@ func TestHandleDecidedQuery(t *testing.T) { db, l, done := newDBAndLoggerForTest(logger) defer done() - roles := []exporter_message.RunnerRole{ - exporter_message.RoleCommittee, - exporter_message.RoleProposer, - exporter_message.RoleAggregator, - exporter_message.RoleSyncCommitteeContribution, + roles := []convert.RunnerRole{ + convert.RoleAttester, + convert.RoleCommittee, + convert.RoleProposer, + convert.RoleAggregator, + convert.RoleSyncCommitteeContribution, // skipping spectypes.BNRoleSyncCommitteeContribution to test non-existing storage } _, ibftStorage := newStorageForTest(db, l, roles...) @@ -104,76 +104,80 @@ func TestHandleDecidedQuery(t *testing.T) { oids = append(oids, o.OperatorID) } - role := exporter_message.RoleCommittee - pk := sks[1].GetPublicKey() - decided250Seq, err := protocoltesting.CreateMultipleStoredInstances(rsaKeys, specqbft.Height(0), specqbft.Height(250), func(height specqbft.Height) ([]spectypes.OperatorID, *specqbft.Message) { - id := exporter_message.NewMsgID(types.GetDefaultDomain(), pk.Serialize(), role) - return oids, &specqbft.Message{ - MsgType: specqbft.CommitMsgType, - Height: height, - Round: 1, - Identifier: id[:], - Root: [32]byte{0x1, 0x2, 0x3}, + for _, role := range roles { + pk := sks[1].GetPublicKey() + networkConfig, err := networkconfig.GetNetworkConfigByName(networkconfig.HoleskyStage.Name) + require.NoError(t, err) + decided250Seq, err := protocoltesting.CreateMultipleStoredInstances(rsaKeys, specqbft.Height(0), specqbft.Height(250), func(height specqbft.Height) ([]spectypes.OperatorID, *specqbft.Message) { + id := convert.NewMsgID(networkConfig.Domain, pk.Serialize(), role) + return oids, &specqbft.Message{ + MsgType: specqbft.CommitMsgType, + Height: height, + Round: 1, + Identifier: id[:], + Root: [32]byte{0x1, 0x2, 0x3}, + } + }) + require.NoError(t, err) + + // save decided + for _, d := range decided250Seq { + require.NoError(t, ibftStorage.Get(role).SaveInstance(d)) + require.NoError(t, ibftStorage.Get(role).SaveParticipants(convert.MessageID(d.DecidedMessage.SSVMessage.MsgID), + phase0.Slot(d.State.Height), + d.DecidedMessage.OperatorIDs), + ) } - }) - require.NoError(t, err) - - // save decided - for _, d := range decided250Seq { - require.NoError(t, ibftStorage.Get(role).SaveInstance(d)) - require.NoError(t, ibftStorage.Get(role).SaveParticipants(exporter_message.MessageID(d.DecidedMessage.SSVMessage.MsgID), - phase0.Slot(d.State.Height), - d.DecidedMessage.OperatorIDs), - ) - } - t.Run("valid range", func(t *testing.T) { - nm := newDecidedAPIMsg(pk.SerializeToHexStr(), spectypes.BNRoleAttester, 0, 250) - HandleParticipantsQuery(l, ibftStorage, nm) - require.NotNil(t, nm.Msg.Data) - msgs, ok := nm.Msg.Data.([]*SignedMessageAPI) - require.True(t, ok, "expected []*SignedMessageAPI, got %+v", nm.Msg.Data) - require.Equal(t, 251, len(msgs)) // seq 0 - 250 - }) - - t.Run("invalid range", func(t *testing.T) { - nm := newDecidedAPIMsg(pk.SerializeToHexStr(), spectypes.BNRoleAttester, 400, 404) - HandleParticipantsQuery(l, ibftStorage, nm) - require.NotNil(t, nm.Msg.Data) - data, ok := nm.Msg.Data.([]string) - require.True(t, ok) - require.Equal(t, []string{"no messages"}, data) - }) - - t.Run("non-existing validator", func(t *testing.T) { - nm := newDecidedAPIMsg("xxx", spectypes.BNRoleAttester, 400, 404) - HandleParticipantsQuery(l, ibftStorage, nm) - require.NotNil(t, nm.Msg.Data) - errs, ok := nm.Msg.Data.([]string) - require.True(t, ok) - require.Equal(t, "internal error - could not read validator key", errs[0]) - }) - - t.Run("non-existing role", func(t *testing.T) { - nm := newDecidedAPIMsg(pk.SerializeToHexStr(), math.MaxUint64, 0, 250) - HandleParticipantsQuery(l, ibftStorage, nm) - require.NotNil(t, nm.Msg.Data) - errs, ok := nm.Msg.Data.([]string) - require.True(t, ok) - require.Equal(t, "role doesn't exist", errs[0]) - }) - - t.Run("non-existing storage", func(t *testing.T) { - nm := newDecidedAPIMsg(pk.SerializeToHexStr(), spectypes.BNRoleSyncCommitteeContribution, 0, 250) - HandleParticipantsQuery(l, ibftStorage, nm) - require.NotNil(t, nm.Msg.Data) - errs, ok := nm.Msg.Data.([]string) - require.True(t, ok) - require.Equal(t, "internal error - role storage doesn't exist", errs[0]) - }) + t.Run("valid range", func(t *testing.T) { + nm := newParticipantsAPIMsg(pk.SerializeToHexStr(), spectypes.BNRoleAttester, 0, 250) + HandleParticipantsQuery(l, ibftStorage, nm, networkConfig.Domain) + require.NotNil(t, nm.Msg.Data) + msgs, ok := nm.Msg.Data.([]*ParticipantsAPI) + + require.True(t, ok, "expected []*ParticipantsAPI, got %+v", nm.Msg.Data) + require.Equal(t, 251, len(msgs)) // seq 0 - 250 + }) + + t.Run("invalid range", func(t *testing.T) { + nm := newParticipantsAPIMsg(pk.SerializeToHexStr(), spectypes.BNRoleAttester, 400, 404) + HandleParticipantsQuery(l, ibftStorage, nm, networkConfig.Domain) + require.NotNil(t, nm.Msg.Data) + data, ok := nm.Msg.Data.([]string) + require.True(t, ok) + require.Equal(t, []string{"no messages"}, data) + }) + + t.Run("non-existing validator", func(t *testing.T) { + nm := newParticipantsAPIMsg("xxx", spectypes.BNRoleAttester, 400, 404) + HandleParticipantsQuery(l, ibftStorage, nm, networkConfig.Domain) + require.NotNil(t, nm.Msg.Data) + errs, ok := nm.Msg.Data.([]string) + require.True(t, ok) + require.Equal(t, "internal error - could not read validator key", errs[0]) + }) + + t.Run("non-existing role", func(t *testing.T) { + nm := newParticipantsAPIMsg(pk.SerializeToHexStr(), math.MaxUint64, 0, 250) + HandleParticipantsQuery(l, ibftStorage, nm, networkConfig.Domain) + require.NotNil(t, nm.Msg.Data) + errs, ok := nm.Msg.Data.([]string) + require.True(t, ok) + require.Equal(t, "role doesn't exist", errs[0]) + }) + + t.Run("non-existing storage", func(t *testing.T) { + nm := newParticipantsAPIMsg(pk.SerializeToHexStr(), spectypes.BNRoleSyncCommitteeContribution, 0, 250) + HandleParticipantsQuery(l, ibftStorage, nm, networkConfig.Domain) + require.NotNil(t, nm.Msg.Data) + errs, ok := nm.Msg.Data.([]string) + require.True(t, ok) + require.Equal(t, "internal error - role storage doesn't exist", errs[0]) + }) + } } -func newDecidedAPIMsg(pk string, role spectypes.BeaconRole, from, to uint64) *NetworkMessage { +func newParticipantsAPIMsg(pk string, role spectypes.BeaconRole, from, to uint64) *NetworkMessage { return &NetworkMessage{ Msg: Message{ Type: TypeDecided, @@ -199,7 +203,7 @@ func newDBAndLoggerForTest(logger *zap.Logger) (basedb.Database, *zap.Logger, fu } } -func newStorageForTest(db basedb.Database, logger *zap.Logger, roles ...exporter_message.RunnerRole) (storage.Storage, *qbftstorage.QBFTStores) { +func newStorageForTest(db basedb.Database, logger *zap.Logger, roles ...convert.RunnerRole) (storage.Storage, *qbftstorage.QBFTStores) { sExporter, err := storage.NewNodeStorage(logger, db) if err != nil { panic(err) diff --git a/exporter/convert/message.go b/exporter/convert/message.go new file mode 100644 index 0000000000..0a11f6d8f3 --- /dev/null +++ b/exporter/convert/message.go @@ -0,0 +1,63 @@ +package convert + +import ( + "encoding/binary" + "encoding/hex" + spectypes "github.com/ssvlabs/ssv-spec/types" +) + +const ( + domainSize = 4 + domainStartPos = 0 + roleTypeSize = 4 + roleTypeStartPos = domainStartPos + domainSize + dutyExecutorIDSize = 48 + dutyExecutorIDStartPos = roleTypeStartPos + roleTypeSize +) + +// MessageID is used to identify and route messages to the right validator and Runner +type MessageID [56]byte + +func (msg MessageID) GetDomain() []byte { + return msg[domainStartPos : domainStartPos+domainSize] +} + +func (msg MessageID) GetDutyExecutorID() []byte { + return msg[dutyExecutorIDStartPos : dutyExecutorIDStartPos+dutyExecutorIDSize] +} + +func (msg MessageID) GetRoleType() RunnerRole { + roleByts := msg[roleTypeStartPos : roleTypeStartPos+roleTypeSize] + return RunnerRole(binary.LittleEndian.Uint32(roleByts)) +} + +func NewMsgID(domain spectypes.DomainType, pk []byte, role RunnerRole) MessageID { + roleByts := make([]byte, 4) + binary.LittleEndian.PutUint32(roleByts, uint32(role)) + + return newMessageID(domain[:], pk, roleByts) +} + +func (msgID MessageID) String() string { + return hex.EncodeToString(msgID[:]) +} + +func MessageIDFromBytes(mid []byte) MessageID { + if len(mid) < domainSize+dutyExecutorIDSize+roleTypeSize { + return MessageID{} + } + return newMessageID( + mid[domainStartPos:domainStartPos+domainSize], + mid[roleTypeStartPos:roleTypeStartPos+roleTypeSize], + mid[dutyExecutorIDStartPos:dutyExecutorIDStartPos+dutyExecutorIDSize], + ) +} + +func newMessageID(domain, dutyExecutorID, roleByts []byte) MessageID { + mid := MessageID{} + copy(mid[domainStartPos:domainStartPos+domainSize], domain[:]) + copy(mid[roleTypeStartPos:roleTypeStartPos+roleTypeSize], roleByts) + prefixLen := dutyExecutorIDSize - len(dutyExecutorID) + copy(mid[dutyExecutorIDStartPos+prefixLen:dutyExecutorIDStartPos+dutyExecutorIDSize], dutyExecutorID) + return mid +} diff --git a/exporter/convert/roles.go b/exporter/convert/roles.go new file mode 100644 index 0000000000..d854e1a936 --- /dev/null +++ b/exporter/convert/roles.go @@ -0,0 +1,61 @@ +package convert + +type RunnerRole int32 + +const ( + RoleAttester RunnerRole = iota + RoleAggregator + RoleProposer + RoleSyncCommitteeContribution + RoleSyncCommittee + + RoleValidatorRegistration + RoleVoluntaryExit + RoleCommittee +) + +// String returns name of the runner role +func (r RunnerRole) String() string { + switch r { + case RoleAttester: + return "ATTESTER" + case RoleAggregator: + return "AGGREGATOR" + case RoleProposer: + return "PROPOSER" + case RoleSyncCommittee: + return "SYNC_COMMITTEE" + case RoleSyncCommitteeContribution: + return "SYNC_COMMITTEE_CONTRIBUTION" + case RoleValidatorRegistration: + return "VALIDATOR_REGISTRATION" + case RoleVoluntaryExit: + return "VOLUNTARY_EXIT" + case RoleCommittee: + return "COMMITTEE" + default: + return "UNDEFINED" + } +} + +// ToBeaconRole returns name of the beacon role +func (r RunnerRole) ToBeaconRole() string { + switch r { + case RoleAttester: + return "ATTESTER" + case RoleAggregator: + return "AGGREGATOR" + case RoleProposer: + return "PROPOSER" + case RoleSyncCommittee: + return "SYNC_COMMITTEE" + case RoleSyncCommitteeContribution: + return "SYNC_COMMITTEE_CONTRIBUTION" + case RoleValidatorRegistration: + return "VALIDATOR_REGISTRATION" + case RoleVoluntaryExit: + return "VOLUNTARY_EXIT" + default: + return "UNDEFINED" + } +} diff --git a/ibft/storage/store.go b/ibft/storage/store.go index bd12dc4a76..41e2bae95e 100644 --- a/ibft/storage/store.go +++ b/ibft/storage/store.go @@ -4,13 +4,12 @@ import ( "encoding/binary" "fmt" "github.com/attestantio/go-eth2-client/spec/phase0" - spectypes "github.com/ssvlabs/ssv-spec/types" - "github.com/ssvlabs/ssv/exporter/exporter_message" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" specqbft "github.com/ssvlabs/ssv-spec/qbft" + spectypes "github.com/ssvlabs/ssv-spec/types" + "github.com/ssvlabs/ssv/exporter/convert" "go.uber.org/zap" "github.com/ssvlabs/ssv/protocol/v2/qbft/instance" @@ -155,15 +154,19 @@ func (i *ibftStorage) CleanAllInstances(logger *zap.Logger, msgID []byte) error return nil } -func (i *ibftStorage) SaveParticipants(identifier exporter_message.MessageID, slot phase0.Slot, operators []spectypes.OperatorID) error { - if err := i.save(encodeOperators(operators), participantsKey, identifier[:], uInt64ToByteSlice(uint64(slot))); err != nil { +func (i *ibftStorage) SaveParticipants(identifier convert.MessageID, slot phase0.Slot, operators []spectypes.OperatorID) error { + bytes, err := encodeOperators(operators) + if err != nil { + return err + } + if err := i.save(bytes, participantsKey, identifier[:], uInt64ToByteSlice(uint64(slot))); err != nil { return fmt.Errorf("could not save participants: %w", err) } return nil } -func (i *ibftStorage) GetParticipantsInRange(identifier exporter_message.MessageID, from, to phase0.Slot) ([]qbftstorage.ParticipantsRangeEntry, error) { +func (i *ibftStorage) GetParticipantsInRange(identifier convert.MessageID, from, to phase0.Slot) ([]qbftstorage.ParticipantsRangeEntry, error) { participantsRange := make([]qbftstorage.ParticipantsRangeEntry, 0) for slot := from; slot <= to; slot++ { @@ -186,7 +189,7 @@ func (i *ibftStorage) GetParticipantsInRange(identifier exporter_message.Message return participantsRange, nil } -func (i *ibftStorage) GetParticipants(identifier exporter_message.MessageID, slot phase0.Slot) ([]spectypes.OperatorID, error) { +func (i *ibftStorage) GetParticipants(identifier convert.MessageID, slot phase0.Slot) ([]spectypes.OperatorID, error) { val, found, err := i.get(participantsKey, identifier[:], uInt64ToByteSlice(uint64(slot))) if err != nil { return nil, err @@ -238,13 +241,16 @@ func uInt64ToByteSlice(n uint64) []byte { return b } -func encodeOperators(operators []spectypes.OperatorID) []byte { +func encodeOperators(operators []spectypes.OperatorID) ([]byte, error) { + if len(operators) != 4 && len(operators) != 7 && len(operators) != 13 { + return nil, fmt.Errorf("invalid operators list size: %d", len(operators)) + } encoded := make([]byte, len(operators)*8) for i, v := range operators { binary.BigEndian.PutUint64(encoded[i*8:], v) } - return encoded + return encoded, nil } func decodeOperators(encoded []byte) []spectypes.OperatorID { diff --git a/ibft/storage/store_test.go b/ibft/storage/store_test.go index 5a80f0da13..4397505b46 100644 --- a/ibft/storage/store_test.go +++ b/ibft/storage/store_test.go @@ -11,9 +11,10 @@ import ( specqbft "github.com/ssvlabs/ssv-spec/qbft" spectypes "github.com/ssvlabs/ssv-spec/types" "github.com/ssvlabs/ssv-spec/types/testingutils" + "github.com/ssvlabs/ssv/logging" + "github.com/ssvlabs/ssv/networkconfig" qbftstorage "github.com/ssvlabs/ssv/protocol/v2/qbft/storage" - "github.com/ssvlabs/ssv/protocol/v2/types" "github.com/ssvlabs/ssv/storage/basedb" "github.com/ssvlabs/ssv/storage/kv" ) @@ -21,7 +22,7 @@ import ( func TestCleanInstances(t *testing.T) { ks := testingutils.Testing4SharesSet() logger := logging.TestLogger(t) - msgID := spectypes.NewMsgID(types.GetDefaultDomain(), []byte("pk"), spectypes.RoleCommittee) + msgID := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, []byte("pk"), spectypes.RoleCommittee) storage, err := newTestIbftStorage(logger, "test") require.NoError(t, err) @@ -56,7 +57,7 @@ func TestCleanInstances(t *testing.T) { require.NoError(t, storage.SaveHighestInstance(generateInstance(msgID, specqbft.Height(msgsCount)))) // add different msgID - differMsgID := spectypes.NewMsgID(types.GetDefaultDomain(), []byte("differ_pk"), spectypes.RoleCommittee) + differMsgID := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, []byte("differ_pk"), spectypes.RoleCommittee) require.NoError(t, storage.SaveInstance(generateInstance(differMsgID, specqbft.Height(1)))) require.NoError(t, storage.SaveHighestInstance(generateInstance(differMsgID, specqbft.Height(msgsCount)))) require.NoError(t, storage.SaveHighestAndHistoricalInstance(generateInstance(differMsgID, specqbft.Height(1)))) @@ -91,7 +92,7 @@ func TestCleanInstances(t *testing.T) { } func TestSaveAndFetchLastState(t *testing.T) { - identifier := spectypes.NewMsgID(types.GetDefaultDomain(), []byte("pk"), spectypes.RoleCommittee) + identifier := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, []byte("pk"), spectypes.RoleCommittee) instance := &qbftstorage.StoredInstance{ State: &specqbft.State{ @@ -129,7 +130,7 @@ func TestSaveAndFetchLastState(t *testing.T) { } func TestSaveAndFetchState(t *testing.T) { - identifier := spectypes.NewMsgID(types.GetDefaultDomain(), []byte("pk"), spectypes.RoleCommittee) + identifier := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, []byte("pk"), spectypes.RoleCommittee) instance := &qbftstorage.StoredInstance{ State: &specqbft.State{ @@ -185,14 +186,21 @@ func TestEncodeDecodeOperators(t *testing.T) { input []uint64 encoded []byte }{ - {[]uint64{0x0123456789ABCDEF, 0xFEDCBA9876543210}, []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10}}, - {[]uint64{0, 1, 2, 3}, []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3}}, - {[]uint64{}, []byte{}}, + // Valid sizes: 4 + {[]uint64{0x0123456789ABCDEF, 0xFEDCBA9876543210, 0x1122334455667788, 0x8877665544332211}, + []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x88, 0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11}}, + // Valid sizes: 7 + {[]uint64{1, 2, 3, 4, 5, 6, 7}, + []byte{0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7}}, + // Valid sizes: 13 + {[]uint64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}, + []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 12}}, } for i, tc := range testCases { t.Run(fmt.Sprintf("Case %d", i+1), func(t *testing.T) { - encoded := encodeOperators(tc.input) + encoded, err := encodeOperators(tc.input) + require.Equal(t, err, nil) require.Equal(t, tc.encoded, encoded) decoded := decodeOperators(encoded) diff --git a/ibft/storage/stores.go b/ibft/storage/stores.go index 11fad128ad..07550b0dc0 100644 --- a/ibft/storage/stores.go +++ b/ibft/storage/stores.go @@ -2,24 +2,23 @@ package storage import ( "github.com/cornelk/hashmap" - "github.com/ssvlabs/ssv/exporter/exporter_message" - + "github.com/ssvlabs/ssv/exporter/convert" qbftstorage "github.com/ssvlabs/ssv/protocol/v2/qbft/storage" "github.com/ssvlabs/ssv/storage/basedb" ) // QBFTStores wraps sync map with cast functions to qbft store type QBFTStores struct { - m *hashmap.Map[exporter_message.RunnerRole, qbftstorage.QBFTStore] + m *hashmap.Map[convert.RunnerRole, qbftstorage.QBFTStore] } func NewStores() *QBFTStores { return &QBFTStores{ - m: hashmap.New[exporter_message.RunnerRole, qbftstorage.QBFTStore](), + m: hashmap.New[convert.RunnerRole, qbftstorage.QBFTStore](), } } -func NewStoresFromRoles(db basedb.Database, roles ...exporter_message.RunnerRole) *QBFTStores { +func NewStoresFromRoles(db basedb.Database, roles ...convert.RunnerRole) *QBFTStores { stores := NewStores() for _, role := range roles { stores.Add(role, New(db, role.String())) @@ -28,7 +27,7 @@ func NewStoresFromRoles(db basedb.Database, roles ...exporter_message.RunnerRole } // Get store from sync map by role type -func (qs *QBFTStores) Get(role exporter_message.RunnerRole) qbftstorage.QBFTStore { +func (qs *QBFTStores) Get(role convert.RunnerRole) qbftstorage.QBFTStore { s, ok := qs.m.Get(role) if !ok { return nil @@ -37,14 +36,14 @@ func (qs *QBFTStores) Get(role exporter_message.RunnerRole) qbftstorage.QBFTStor } // Add store to sync map by role as a key -func (qs *QBFTStores) Add(role exporter_message.RunnerRole, store qbftstorage.QBFTStore) { +func (qs *QBFTStores) Add(role convert.RunnerRole, store qbftstorage.QBFTStore) { qs.m.Set(role, store) } // Each iterates over all stores and calls the given function -func (qs *QBFTStores) Each(f func(role exporter_message.RunnerRole, store qbftstorage.QBFTStore) error) error { +func (qs *QBFTStores) Each(f func(role convert.RunnerRole, store qbftstorage.QBFTStore) error) error { var err error - qs.m.Range(func(role exporter_message.RunnerRole, store qbftstorage.QBFTStore) bool { + qs.m.Range(func(role convert.RunnerRole, store qbftstorage.QBFTStore) bool { err = f(role, store) return err == nil }) diff --git a/integration/qbft/tests/scenario_test.go b/integration/qbft/tests/scenario_test.go index 737b6a08ab..e415669dc8 100644 --- a/integration/qbft/tests/scenario_test.go +++ b/integration/qbft/tests/scenario_test.go @@ -81,7 +81,7 @@ func (s *Scenario) Run(t *testing.T, role spectypes.BeaconRole) { //validating state of validator after invoking duties for id, validationFunc := range s.ValidationFunctions { - identifier := spectypes.NewMsgID(types.GetDefaultDomain(), getKeySet(s.Committee).ValidatorPK.Serialize(), spectypes.MapDutyToRunnerRole(role)) + identifier := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, getKeySet(s.Committee).ValidatorPK.Serialize(), spectypes.MapDutyToRunnerRole(role)) //getting stored state of validator var storedInstance *protocolstorage.StoredInstance for { @@ -189,7 +189,7 @@ func createValidator(t *testing.T, pCtx context.Context, id spectypes.OperatorID options := protocolvalidator.Options{ Storage: newStores(logger), Network: node, - BeaconNetwork: networkconfig.TestNetwork.Beacon, + NetworkConfig: networkconfig.TestNetwork, SSVShare: &types.SSVShare{ Share: *testingShare(keySet, id), Metadata: types.Metadata{ diff --git a/integration/qbft/tests/setup_test.go b/integration/qbft/tests/setup_test.go index c74d6d1857..f2bacbf0e9 100644 --- a/integration/qbft/tests/setup_test.go +++ b/integration/qbft/tests/setup_test.go @@ -5,14 +5,12 @@ import ( "testing" spectypes "github.com/ssvlabs/ssv-spec/types" - "github.com/ssvlabs/ssv-spec/types/testingutils" "github.com/stretchr/testify/require" "go.uber.org/zap" "github.com/ssvlabs/ssv/logging" "github.com/ssvlabs/ssv/network" p2pv1 "github.com/ssvlabs/ssv/network/p2p" - "github.com/ssvlabs/ssv/protocol/v2/types" ) const ( @@ -40,8 +38,6 @@ func TestMain(m *testing.M) { logger := zap.L().Named("integration-tests") - types.SetDefaultDomain(testingutils.TestingSSVDomainType) - ln, err := p2pv1.CreateAndStartLocalNet(ctx, logger, p2pv1.LocalNetOptions{ Nodes: maxSupportedCommittee, MinConnected: maxSupportedQuorum, diff --git a/integration/qbft/tests/temp_testing_beacon_network.go b/integration/qbft/tests/temp_testing_beacon_network.go index 3df29c07fd..c9a1c44c19 100644 --- a/integration/qbft/tests/temp_testing_beacon_network.go +++ b/integration/qbft/tests/temp_testing_beacon_network.go @@ -1,6 +1,13 @@ package tests import ( + "github.com/attestantio/go-eth2-client/api" + "github.com/attestantio/go-eth2-client/spec" + "github.com/attestantio/go-eth2-client/spec/altair" + "github.com/attestantio/go-eth2-client/spec/bellatrix" + "github.com/attestantio/go-eth2-client/spec/phase0" + ssz "github.com/ferranbt/fastssz" + spectypes "github.com/ssvlabs/ssv-spec/types" spectestingutils "github.com/ssvlabs/ssv-spec/types/testingutils" "github.com/ssvlabs/ssv/protocol/v2/blockchain/beacon" ) @@ -14,6 +21,63 @@ func (bn *TestingBeaconNodeWrapped) SetSyncCommitteeAggregatorRootHexes(roots ma bn.bn.SetSyncCommitteeAggregatorRootHexes(roots) } +func (bn *TestingBeaconNodeWrapped) GetBroadcastedRoots() []phase0.Root { + return bn.bn.BroadcastedRoots +} + +func (bn *TestingBeaconNodeWrapped) GetBeaconNode() *spectestingutils.TestingBeaconNode { + return bn.bn +} + +func (bn *TestingBeaconNodeWrapped) GetAttestationData(slot phase0.Slot, committeeIndex phase0.CommitteeIndex) (*phase0.AttestationData, spec.DataVersion, error) { + return bn.bn.GetAttestationData(slot, committeeIndex) +} +func (bn *TestingBeaconNodeWrapped) DomainData(epoch phase0.Epoch, domain phase0.DomainType) (phase0.Domain, error) { + return bn.bn.DomainData(epoch, domain) +} +func (bn *TestingBeaconNodeWrapped) SyncCommitteeSubnetID(index phase0.CommitteeIndex) (uint64, error) { + return bn.bn.SyncCommitteeSubnetID(index) +} +func (bn *TestingBeaconNodeWrapped) IsSyncCommitteeAggregator(proof []byte) (bool, error) { + return bn.bn.IsSyncCommitteeAggregator(proof) +} +func (bn *TestingBeaconNodeWrapped) GetSyncCommitteeContribution(slot phase0.Slot, selectionProofs []phase0.BLSSignature, subnetIDs []uint64) (ssz.Marshaler, spec.DataVersion, error) { + return bn.bn.GetSyncCommitteeContribution(slot, selectionProofs, subnetIDs) +} +func (bn *TestingBeaconNodeWrapped) SubmitAggregateSelectionProof(slot phase0.Slot, committeeIndex phase0.CommitteeIndex, committeeLength uint64, index phase0.ValidatorIndex, slotSig []byte) (ssz.Marshaler, spec.DataVersion, error) { + return bn.bn.SubmitAggregateSelectionProof(slot, committeeIndex, committeeLength, index, slotSig) +} +func (bn *TestingBeaconNodeWrapped) GetBeaconNetwork() spectypes.BeaconNetwork { + return bn.bn.GetBeaconNetwork() +} +func (bn *TestingBeaconNodeWrapped) GetBeaconBlock(slot phase0.Slot, graffiti, randao []byte) (ssz.Marshaler, spec.DataVersion, error) { + return bn.bn.GetBeaconBlock(slot, graffiti, randao) +} +func (bn *TestingBeaconNodeWrapped) SubmitValidatorRegistration(pubkey []byte, feeRecipient bellatrix.ExecutionAddress, sig phase0.BLSSignature) error { + return bn.bn.SubmitValidatorRegistration(pubkey, feeRecipient, sig) +} +func (bn *TestingBeaconNodeWrapped) SubmitVoluntaryExit(voluntaryExit *phase0.SignedVoluntaryExit) error { + return bn.bn.SubmitVoluntaryExit(voluntaryExit) +} +func (bn *TestingBeaconNodeWrapped) SubmitAttestations(attestations []*phase0.Attestation) error { + return bn.bn.SubmitAttestations(attestations) +} +func (bn *TestingBeaconNodeWrapped) SubmitSyncMessages(msgs []*altair.SyncCommitteeMessage) error { + return bn.bn.SubmitSyncMessages(msgs) +} +func (bn *TestingBeaconNodeWrapped) SubmitBlindedBeaconBlock(block *api.VersionedBlindedProposal, sig phase0.BLSSignature) error { + return bn.bn.SubmitBlindedBeaconBlock(block, sig) +} +func (bn *TestingBeaconNodeWrapped) SubmitSignedContributionAndProof(contribution *altair.SignedContributionAndProof) error { + return bn.bn.SubmitSignedContributionAndProof(contribution) +} +func (bn *TestingBeaconNodeWrapped) SubmitSignedAggregateSelectionProof(msg *phase0.SignedAggregateAndProof) error { + return bn.bn.SubmitSignedAggregateSelectionProof(msg) +} +func (bn *TestingBeaconNodeWrapped) SubmitBeaconBlock(block *api.VersionedProposal, sig phase0.BLSSignature) error { + return bn.bn.SubmitBeaconBlock(block, sig) +} + func NewTestingBeaconNodeWrapped() beacon.BeaconNode { bnw := &TestingBeaconNodeWrapped{} bnw.bn = spectestingutils.NewTestingBeaconNode() diff --git a/logging/fields/fields.go b/logging/fields/fields.go index f8eb80c512..bbbad37a73 100644 --- a/logging/fields/fields.go +++ b/logging/fields/fields.go @@ -3,6 +3,7 @@ package fields import ( "encoding/hex" "fmt" + "github.com/ssvlabs/ssv/exporter/convert" "net" "net/url" "strconv" @@ -57,6 +58,7 @@ const ( FieldDomain = "domain" FieldDuration = "duration" FieldDuties = "duties" + FieldDutyExecutorID = "duty_executor_id" FieldDutyID = "duty_id" FieldENR = "enr" FieldEpoch = "epoch" @@ -81,7 +83,6 @@ const ( FieldPubKey = "pubkey" FieldRole = "role" FieldRound = "round" - FieldSenderID = "sender_id" FieldSlot = "slot" FieldStartTimeUnixMilli = "start_time_unix_milli" FieldSubmissionTime = "submission_time" @@ -130,8 +131,8 @@ func Validator(pubKey []byte) zapcore.Field { return zap.Stringer(FieldValidator, stringer.HexStringer{Val: pubKey}) } -func SenderID(senderID []byte) zapcore.Field { - return zap.Stringer(FieldSenderID, stringer.HexStringer{Val: senderID}) +func DutyExecutorID(senderID []byte) zapcore.Field { + return zap.Stringer(FieldDutyExecutorID, stringer.HexStringer{Val: senderID}) } func AddressURL(val url.URL) zapcore.Field { @@ -233,6 +234,9 @@ func BeaconRole(val spectypes.BeaconRole) zap.Field { func Role(val spectypes.RunnerRole) zap.Field { return zap.Stringer(FieldRole, val) } +func ExporterRole(val convert.RunnerRole) zap.Field { + return zap.Stringer(FieldRole, val) +} func MessageID(val spectypes.MessageID) zap.Field { return zap.Stringer(FieldMessageID, val) diff --git a/message/validation/common_checks.go b/message/validation/common_checks.go index 3068302ddc..9c17c6d948 100644 --- a/message/validation/common_checks.go +++ b/message/validation/common_checks.go @@ -13,12 +13,14 @@ func (mv *messageValidator) committeeRole(role spectypes.RunnerRole) bool { } func (mv *messageValidator) validateSlotTime(messageSlot phase0.Slot, role spectypes.RunnerRole, receivedAt time.Time) error { - if mv.earlyMessage(messageSlot, receivedAt) { - return ErrEarlyMessage + if earliness := mv.messageEarliness(messageSlot, receivedAt); earliness > clockErrorTolerance { + e := ErrEarlySlotMessage + e.got = fmt.Sprintf("early by %v", earliness) + return e } - if lateness := mv.lateMessage(messageSlot, role, receivedAt); lateness > 0 { - e := ErrLateMessage + if lateness := mv.messageLateness(messageSlot, role, receivedAt); lateness > clockErrorTolerance { + e := ErrLateSlotMessage e.got = fmt.Sprintf("late by %v", lateness) return e } @@ -26,57 +28,90 @@ func (mv *messageValidator) validateSlotTime(messageSlot phase0.Slot, role spect return nil } -func (mv *messageValidator) earlyMessage(slot phase0.Slot, receivedAt time.Time) bool { - return mv.netCfg.Beacon.GetSlotEndTime(mv.netCfg.Beacon.EstimatedSlotAtTime(receivedAt.Unix())). - Add(-clockErrorTolerance).Before(mv.netCfg.Beacon.GetSlotStartTime(slot)) +// messageEarliness returns how early message is or 0 if it's not +func (mv *messageValidator) messageEarliness(slot phase0.Slot, receivedAt time.Time) time.Duration { + return mv.netCfg.Beacon.GetSlotStartTime(slot).Sub(receivedAt) } -func (mv *messageValidator) lateMessage(slot phase0.Slot, role spectypes.RunnerRole, receivedAt time.Time) time.Duration { +// messageLateness returns how late message is or 0 if it's not +func (mv *messageValidator) messageLateness(slot phase0.Slot, role spectypes.RunnerRole, receivedAt time.Time) time.Duration { var ttl phase0.Slot switch role { case spectypes.RoleProposer, spectypes.RoleSyncCommitteeContribution: ttl = 1 + lateSlotAllowance case spectypes.RoleCommittee, spectypes.RoleAggregator: - ttl = 32 + lateSlotAllowance + ttl = phase0.Slot(mv.netCfg.Beacon.SlotsPerEpoch()) + lateSlotAllowance case spectypes.RoleValidatorRegistration, spectypes.RoleVoluntaryExit: return 0 } deadline := mv.netCfg.Beacon.GetSlotStartTime(slot + ttl). - Add(lateMessageMargin).Add(clockErrorTolerance) + Add(lateMessageMargin) - return mv.netCfg.Beacon.GetSlotStartTime(mv.netCfg.Beacon.EstimatedSlotAtTime(receivedAt.Unix())). - Sub(deadline) + return receivedAt.Sub(deadline) } func (mv *messageValidator) validateDutyCount( - validatorIndices []phase0.ValidatorIndex, - state *SignerState, msgID spectypes.MessageID, - newDutyInSameEpoch bool, + msgSlot phase0.Slot, + validatorIndexCount int, + signerStateBySlot *OperatorState, ) error { - var dutyLimit int + dutyCount := signerStateBySlot.DutyCount(mv.netCfg.Beacon.EstimatedEpochAtSlot(msgSlot)) + + dutyLimit, exists := mv.dutyLimit(msgID, msgSlot, validatorIndexCount) + if !exists { + return nil + } + + if dutyCount >= dutyLimit { + err := ErrTooManyDutiesPerEpoch + err.got = fmt.Sprintf("%v (role %v)", dutyCount, msgID.GetRoleType()) + err.want = fmt.Sprintf("less than %v", dutyLimit) + return err + } + + return nil +} +func (mv *messageValidator) dutyLimit(msgID spectypes.MessageID, slot phase0.Slot, validatorIndexCount int) (int, bool) { switch msgID.GetRoleType() { - case spectypes.RoleAggregator, spectypes.RoleValidatorRegistration, spectypes.RoleVoluntaryExit: - dutyLimit = 2 + case spectypes.RoleVoluntaryExit: + pk := phase0.BLSPubKey{} + copy(pk[:], msgID.GetDutyExecutorID()) + + return mv.dutyStore.VoluntaryExit.GetDutyCount(slot, pk), true + + case spectypes.RoleAggregator, spectypes.RoleValidatorRegistration: + return 2, true case spectypes.RoleCommittee: - dutyLimit = 2 * len(validatorIndices) + return 2 * validatorIndexCount, true default: - return nil + return 0, false } +} - if sameSlot := !newDutyInSameEpoch; sameSlot { - dutyLimit++ +func (mv *messageValidator) validateBeaconDuty( + role spectypes.RunnerRole, + slot phase0.Slot, + indices []phase0.ValidatorIndex, +) error { + // Rule: For a proposal duty message, we check if the validator is assigned to it + if role == spectypes.RoleProposer { + epoch := mv.netCfg.Beacon.EstimatedEpochAtSlot(slot) + if mv.dutyStore.Proposer.ValidatorDuty(epoch, slot, indices[0]) == nil { + return ErrNoDuty + } } - if state.EpochDuties >= dutyLimit { - err := ErrTooManyDutiesPerEpoch - err.got = fmt.Sprintf("%v (role %v)", state.EpochDuties, msgID.GetRoleType()) - err.want = fmt.Sprintf("less than %v", dutyLimit) - return err + // Rule: For a sync committee aggregation duty message, we check if the validator is assigned to it + if role == spectypes.RoleSyncCommitteeContribution { + period := mv.netCfg.Beacon.EstimatedSyncCommitteePeriodAtEpoch(mv.netCfg.Beacon.EstimatedEpochAtSlot(slot)) + if mv.dutyStore.SyncCommittee.Duty(period, indices[0]) == nil { + return ErrNoDuty + } } return nil diff --git a/message/validation/consensus_state.go b/message/validation/consensus_state.go index 2f87f83146..4c9ba16f3d 100644 --- a/message/validation/consensus_state.go +++ b/message/validation/consensus_state.go @@ -3,37 +3,94 @@ package validation import ( "sync" + "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/ssvlabs/ssv-spec/types" ) // consensusID uniquely identifies a public key and role pair to keep track of state. type consensusID struct { - SenderID string - Role spectypes.RunnerRole + DutyExecutorID string + Role spectypes.RunnerRole } // consensusState keeps track of the signers for a given public key and role. type consensusState struct { - signers map[spectypes.OperatorID]*SignerState - mu sync.Mutex + state map[spectypes.OperatorID]*OperatorState + storedSlotCount phase0.Slot + mu sync.Mutex } -// GetSignerState retrieves the state for the given signer. -// Returns nil if the signer is not found. -func (cs *consensusState) GetSignerState(signer spectypes.OperatorID) *SignerState { +func (cs *consensusState) GetOrCreate(signer spectypes.OperatorID) *OperatorState { cs.mu.Lock() defer cs.mu.Unlock() - return cs.signers[signer] + if _, ok := cs.state[signer]; !ok { + cs.state[signer] = newOperatorState(cs.storedSlotCount) + } + + return cs.state[signer] } -// CreateSignerState initializes and sets a new SignerState for the given signer. -func (cs *consensusState) CreateSignerState(signer spectypes.OperatorID) *SignerState { - signerState := &SignerState{} +type OperatorState struct { + mu sync.RWMutex + state []*SignerState // the slice index is slot % storedSlotCount + maxSlot phase0.Slot + maxEpoch phase0.Epoch + lastEpochDuties int + prevEpochDuties int +} - cs.mu.Lock() - cs.signers[signer] = signerState - cs.mu.Unlock() +func newOperatorState(size phase0.Slot) *OperatorState { + return &OperatorState{ + state: make([]*SignerState, size), + } +} + +func (os *OperatorState) Get(slot phase0.Slot) *SignerState { + os.mu.RLock() + defer os.mu.RUnlock() + + s := os.state[int(slot)%len(os.state)] + if s == nil || s.Slot != slot { + return nil + } + + return s +} + +func (os *OperatorState) Set(slot phase0.Slot, epoch phase0.Epoch, state *SignerState) { + os.mu.Lock() + defer os.mu.Unlock() + + os.state[int(slot)%len(os.state)] = state + if slot > os.maxSlot { + os.maxSlot = slot + } + if epoch > os.maxEpoch { + os.maxEpoch = epoch + os.prevEpochDuties = os.lastEpochDuties + os.lastEpochDuties = 1 + } else { + os.lastEpochDuties++ + } +} + +func (os *OperatorState) MaxSlot() phase0.Slot { + os.mu.RLock() + defer os.mu.RUnlock() + + return os.maxSlot +} + +func (os *OperatorState) DutyCount(epoch phase0.Epoch) int { + os.mu.RLock() + defer os.mu.RUnlock() - return signerState + if epoch == os.maxEpoch { + return os.lastEpochDuties + } + if epoch == os.maxEpoch-1 { + return os.prevEpochDuties + } + return 0 // unused because messages from too old epochs must be rejected in advance } diff --git a/message/validation/consensus_state_test.go b/message/validation/consensus_state_test.go new file mode 100644 index 0000000000..e244588901 --- /dev/null +++ b/message/validation/consensus_state_test.go @@ -0,0 +1,96 @@ +package validation + +import ( + "testing" + + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/stretchr/testify/require" +) + +func TestOperatorState(t *testing.T) { + t.Run("TestNewOperatorState", func(t *testing.T) { + size := phase0.Slot(10) + os := newOperatorState(size) + require.NotNil(t, os) + require.Equal(t, len(os.state), int(size)) + }) + + t.Run("TestGetAndSet", func(t *testing.T) { + size := phase0.Slot(10) + os := newOperatorState(size) + + slot := phase0.Slot(5) + epoch := phase0.Epoch(1) + signerState := &SignerState{Slot: slot} + + os.Set(slot, epoch, signerState) + retrievedState := os.Get(slot) + + require.NotNil(t, retrievedState) + require.Equal(t, retrievedState.Slot, slot) + }) + + t.Run("TestGetInvalidSlot", func(t *testing.T) { + size := phase0.Slot(10) + os := newOperatorState(size) + + slot := phase0.Slot(5) + retrievedState := os.Get(slot) + + require.Nil(t, retrievedState) + }) + + t.Run("TestMaxSlot", func(t *testing.T) { + size := phase0.Slot(10) + os := newOperatorState(size) + + slot := phase0.Slot(5) + epoch := phase0.Epoch(1) + signerState := &SignerState{Slot: slot} + + os.Set(slot, epoch, signerState) + require.Equal(t, os.MaxSlot(), slot) + }) + + t.Run("TestDutyCount", func(t *testing.T) { + size := phase0.Slot(10) + os := newOperatorState(size) + + slot := phase0.Slot(5) + epoch := phase0.Epoch(1) + signerState := &SignerState{Slot: slot} + + os.Set(slot, epoch, signerState) + + require.Equal(t, os.DutyCount(epoch), 1) + require.Equal(t, os.DutyCount(epoch-1), 0) + + slot2 := phase0.Slot(6) + epoch2 := phase0.Epoch(2) + signerState2 := &SignerState{Slot: slot2} + + os.Set(slot2, epoch2, signerState2) + + require.Equal(t, os.DutyCount(epoch2), 1) + require.Equal(t, os.DutyCount(epoch), 1) + require.Equal(t, os.DutyCount(epoch-1), 0) + }) + + t.Run("TestIncrementLastEpochDuties", func(t *testing.T) { + size := phase0.Slot(10) + os := newOperatorState(size) + + slot := phase0.Slot(5) + epoch := phase0.Epoch(1) + signerState := &SignerState{Slot: slot} + + os.Set(slot, epoch, signerState) + require.Equal(t, os.DutyCount(epoch), 1) + + slot2 := phase0.Slot(6) + signerState2 := &SignerState{Slot: slot2} + os.Set(slot2, epoch, signerState2) + + require.Equal(t, os.DutyCount(epoch), 2) + }) +} diff --git a/message/validation/consensus_validation.go b/message/validation/consensus_validation.go index 8f9bf7cb16..fbfc09de7d 100644 --- a/message/validation/consensus_validation.go +++ b/message/validation/consensus_validation.go @@ -4,15 +4,16 @@ package validation import ( "bytes" + "crypto/sha256" + "encoding/binary" "encoding/hex" "fmt" "time" - specqbft "github.com/ssvlabs/ssv-spec/qbft" - spectypes "github.com/ssvlabs/ssv-spec/types" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ssvlabs/ssv-spec-pre-cc/types" + specqbft "github.com/ssvlabs/ssv-spec/qbft" + spectypes "github.com/ssvlabs/ssv-spec/types" "github.com/ssvlabs/ssv/protocol/v2/message" "github.com/ssvlabs/ssv/protocol/v2/qbft/roundtimer" @@ -21,8 +22,7 @@ import ( func (mv *messageValidator) validateConsensusMessage( signedSSVMessage *spectypes.SignedSSVMessage, - committee []spectypes.OperatorID, - validatorIndices []phase0.ValidatorIndex, + committeeInfo CommitteeInfo, receivedAt time.Time, ) (*specqbft.Message, error) { ssvMessage := signedSSVMessage.SSVMessage @@ -43,17 +43,17 @@ func (mv *messageValidator) validateConsensusMessage( mv.metrics.ConsensusMsgType(consensusMessage.MsgType, len(signedSSVMessage.GetOperatorIDs())) - if err := mv.validateConsensusMessageSemantics(signedSSVMessage, consensusMessage, committee); err != nil { + if err := mv.validateConsensusMessageSemantics(signedSSVMessage, consensusMessage, committeeInfo.operatorIDs); err != nil { return consensusMessage, err } state := mv.consensusState(signedSSVMessage.SSVMessage.GetID()) - if err := mv.validateQBFTLogic(signedSSVMessage, consensusMessage, committee, receivedAt, state); err != nil { + if err := mv.validateQBFTLogic(signedSSVMessage, consensusMessage, committeeInfo.operatorIDs, receivedAt, state); err != nil { return consensusMessage, err } - if err := mv.validateQBFTMessageByDutyLogic(signedSSVMessage, consensusMessage, validatorIndices, receivedAt, state); err != nil { + if err := mv.validateQBFTMessageByDutyLogic(signedSSVMessage, consensusMessage, committeeInfo.indices, receivedAt, state); err != nil { return consensusMessage, err } @@ -68,7 +68,9 @@ func (mv *messageValidator) validateConsensusMessage( } } - mv.updateConsensusState(signedSSVMessage, consensusMessage, state) + if err := mv.updateConsensusState(signedSSVMessage, consensusMessage, state); err != nil { + return consensusMessage, err + } return consensusMessage, nil } @@ -80,13 +82,17 @@ func (mv *messageValidator) validateConsensusMessageSemantics( ) error { signers := signedSSVMessage.GetOperatorIDs() quorumSize, _ := ssvtypes.ComputeQuorumAndPartialQuorum(len(committee)) + msgType := consensusMessage.MsgType + if len(signers) > 1 { - if consensusMessage.MsgType != specqbft.CommitMsgType { + // Rule: Decided msg with different type than Commit + if msgType != specqbft.CommitMsgType { e := ErrNonDecidedWithMultipleSigners e.got = len(signers) return e } + // Rule: Number of signers must be >= quorum size if uint64(len(signers)) < quorumSize { e := ErrDecidedNotEnoughSigners e.want = quorumSize @@ -96,8 +102,8 @@ func (mv *messageValidator) validateConsensusMessageSemantics( } if len(signedSSVMessage.FullData) != 0 { - if consensusMessage.MsgType == specqbft.PrepareMsgType || - consensusMessage.MsgType == specqbft.CommitMsgType && len(signedSSVMessage.GetOperatorIDs()) == 1 { + // Rule: Prepare or commit messages must not have full data + if msgType == specqbft.PrepareMsgType || (msgType == specqbft.CommitMsgType && len(signers) == 1) { return ErrPrepareOrCommitWithFullData } @@ -108,21 +114,25 @@ func (mv *messageValidator) validateConsensusMessageSemantics( return e } + // Rule: Full data hash must match root if hashedFullData != consensusMessage.Root { return ErrInvalidHash } } - if !mv.validConsensusMsgType(consensusMessage.MsgType) { + // Rule: Consensus message type must be valid + if !mv.validConsensusMsgType(msgType) { return ErrUnknownQBFTMessageType } + // Rule: Round must not be zero if consensusMessage.Round == specqbft.NoRound { - e := ErrInvalidRound + e := ErrZeroRound e.got = specqbft.NoRound return e } + // Rule: consensus message must have the same identifier as the ssv message's identifier if !bytes.Equal(consensusMessage.Identifier, signedSSVMessage.SSVMessage.MsgID[:]) { e := ErrMismatchedIdentifier e.want = hex.EncodeToString(signedSSVMessage.SSVMessage.MsgID[:]) @@ -145,6 +155,7 @@ func (mv *messageValidator) validateQBFTLogic( state *consensusState, ) error { if consensusMessage.MsgType == specqbft.ProposalMsgType { + // Rule: Signer must be the leader leader := mv.roundRobinProposer(consensusMessage.Height, consensusMessage.Round, committee) if signedSSVMessage.GetOperatorIDs()[0] != leader { err := ErrSignerNotLeader @@ -156,32 +167,58 @@ func (mv *messageValidator) validateQBFTLogic( msgSlot := phase0.Slot(consensusMessage.Height) for _, signer := range signedSSVMessage.GetOperatorIDs() { - signerState := state.GetSignerState(signer) + signerStateBySlot := state.GetOrCreate(signer) + signerState := signerStateBySlot.Get(msgSlot) if signerState == nil { continue } - // It should be checked after ErrNonDecidedWithMultipleSigners - signerCount := len(signedSSVMessage.GetOperatorIDs()) - if signerCount > 1 { - if _, ok := signerState.SeenDecidedLengths[signerCount]; ok { - return ErrDecidedWithSameNumberOfSigners + if len(signedSSVMessage.GetOperatorIDs()) == 1 { + // Rule: Ignore if peer already advanced to a later round. Only for non-decided messages + if consensusMessage.Round < signerState.Round { + // Signers aren't allowed to decrease their round. + // If they've sent a future message due to clock error, + // they'd have to wait for the next slot/round to be accepted. + err := ErrRoundAlreadyAdvanced + err.want = signerState.Round + err.got = consensusMessage.Round + return err } - } - if msgSlot == signerState.Slot && consensusMessage.Round == signerState.Round { - if len(signedSSVMessage.FullData) != 0 && signerState.ProposalData != nil && !bytes.Equal(signerState.ProposalData, signedSSVMessage.FullData) { - return ErrDuplicatedProposalWithDifferentData + if consensusMessage.Round == signerState.Round { + // Rule: Peer must not send two proposals with different data + if len(signedSSVMessage.FullData) != 0 && signerState.ProposalData != nil && !bytes.Equal(signerState.ProposalData, signedSSVMessage.FullData) { + return ErrDuplicatedProposalWithDifferentData + } + + // Rule: Peer must send only 1 proposal, 1 prepare, 1 commit, and 1 round-change per round + limits := maxMessageCounts() + if err := signerState.MessageCounts.ValidateConsensusMessage(signedSSVMessage, consensusMessage, limits); err != nil { + return err + } } - - limits := maxMessageCounts(len(committee)) - if err := signerState.MessageCounts.ValidateConsensusMessage(signedSSVMessage, consensusMessage, limits); err != nil { + } else if len(signedSSVMessage.GetOperatorIDs()) > 1 { + // Rule: Decided msg can't have the same signers as previously sent before for the same duty + encodedOperators, err := encodeOperators(signedSSVMessage.GetOperatorIDs()) + if err != nil { return err } + + // Rule: Decided msg can't have the same signers as previously sent before for the same duty + if _, ok := signerState.SeenSigners[encodedOperators]; ok { + return ErrDecidedWithSameSigners + } } } - return mv.roundBelongsToAllowedSpread(signedSSVMessage, consensusMessage, receivedAt) + if len(signedSSVMessage.GetOperatorIDs()) == 1 { + // Rule: Round must not be smaller than current peer's round -1 or +1. Only for non-decided messages + if err := mv.roundBelongsToAllowedSpread(signedSSVMessage, consensusMessage, receivedAt); err != nil { + return err + } + } + + return nil } func (mv *messageValidator) validateQBFTMessageByDutyLogic( @@ -191,7 +228,22 @@ func (mv *messageValidator) validateQBFTMessageByDutyLogic( receivedAt time.Time, state *consensusState, ) error { + // Rule: Height must not be "old". I.e., signer must not have already advanced to a later slot. + if signedSSVMessage.SSVMessage.MsgID.GetRoleType() != spectypes.RoleCommittee { // Rule only for validator runners + for _, signer := range signedSSVMessage.GetOperatorIDs() { + signerStateBySlot := state.GetOrCreate(signer) + if maxSlot := signerStateBySlot.MaxSlot(); maxSlot > phase0.Slot(consensusMessage.Height) { + e := ErrSlotAlreadyAdvanced + e.got = consensusMessage.Height + e.want = maxSlot + return e + } + } + } + role := signedSSVMessage.SSVMessage.GetID().GetRoleType() + + // Rule: Duty role has consensus (true except for ValidatorRegistration and VoluntaryExit) if role == spectypes.RoleValidatorRegistration || role == spectypes.RoleVoluntaryExit { e := ErrUnexpectedConsensusMessage e.got = role @@ -203,21 +255,27 @@ func (mv *messageValidator) validateQBFTMessageByDutyLogic( return err } + // Rule: current slot(height) must be between duty's starting slot and: + // - duty's starting slot + 34 (committee and aggregation) + // - duty's starting slot + 3 (other types) if err := mv.validateSlotTime(msgSlot, role, receivedAt); err != nil { return err } + // Rule: valid number of duties per epoch: + // - 2 for aggregation, voluntary exit and validator registration + // - 2*V for Committee duty (where V is the number of validators in the cluster) (if no validator is doing sync committee in this epoch) + // - else, accept for _, signer := range signedSSVMessage.GetOperatorIDs() { - signerState := state.GetSignerState(signer) - if signerState == nil { - continue - } - - if err := mv.validNumberOfCommitteeDutiesPerEpoch(signedSSVMessage, validatorIndices, signerState, msgSlot); err != nil { + signerStateBySlot := state.GetOrCreate(signer) + if err := mv.validateDutyCount(signedSSVMessage.SSVMessage.GetID(), msgSlot, len(validatorIndices), signerStateBySlot); err != nil { return err } } + // Rule: Round cut-offs for roles: + // - 12 (committee and aggregation) + // - 6 (other types) if maxRound := mv.maxRound(role); consensusMessage.Round > maxRound { err := ErrRoundTooHigh err.got = fmt.Sprintf("%v (%v role)", consensusMessage.Round, message.RunnerRoleToString(role)) @@ -228,52 +286,54 @@ func (mv *messageValidator) validateQBFTMessageByDutyLogic( return nil } -func (mv *messageValidator) updateConsensusState(signedSSVMessage *spectypes.SignedSSVMessage, consensusMessage *specqbft.Message, state *consensusState) { +func (mv *messageValidator) updateConsensusState(signedSSVMessage *spectypes.SignedSSVMessage, consensusMessage *specqbft.Message, consensusState *consensusState) error { msgSlot := phase0.Slot(consensusMessage.Height) + msgEpoch := mv.netCfg.Beacon.EstimatedEpochAtSlot(msgSlot) for _, signer := range signedSSVMessage.GetOperatorIDs() { - signerState := state.GetSignerState(signer) + stateBySlot := consensusState.GetOrCreate(signer) + signerState := stateBySlot.Get(msgSlot) if signerState == nil { - signerState = state.CreateSignerState(signer) - } - if msgSlot > signerState.Slot { - newEpoch := mv.netCfg.Beacon.EstimatedEpochAtSlot(msgSlot) > mv.netCfg.Beacon.EstimatedEpochAtSlot(signerState.Slot) - signerState.ResetSlot(msgSlot, consensusMessage.Round, newEpoch) - } else if msgSlot == signerState.Slot && consensusMessage.Round > signerState.Round { - signerState.ResetRound(consensusMessage.Round) - } - - if len(signedSSVMessage.FullData) != 0 && signerState.ProposalData == nil { - signerState.ProposalData = signedSSVMessage.FullData + signerState = NewSignerState(phase0.Slot(consensusMessage.Height), consensusMessage.Round) + stateBySlot.Set(msgSlot, msgEpoch, signerState) + } else { + if consensusMessage.Round > signerState.Round { + signerState.Reset(phase0.Slot(consensusMessage.Height), consensusMessage.Round) + } } - signerCount := len(signedSSVMessage.GetOperatorIDs()) - if signerCount > 1 { - signerState.SeenDecidedLengths[signerCount] = struct{}{} + if err := mv.processSignerState(signedSSVMessage, consensusMessage, signerState); err != nil { + return err } - - signerState.MessageCounts.RecordConsensusMessage(signedSSVMessage, consensusMessage) } + + return nil } -func (mv *messageValidator) validNumberOfCommitteeDutiesPerEpoch( - signedSSVMessage *spectypes.SignedSSVMessage, - validatorIndices []phase0.ValidatorIndex, - signerState *SignerState, - msgSlot phase0.Slot, -) error { - newDutyInSameEpoch := false - if msgSlot > signerState.Slot && mv.netCfg.Beacon.EstimatedEpochAtSlot(msgSlot) == mv.netCfg.Beacon.EstimatedEpochAtSlot(signerState.Slot) { - newDutyInSameEpoch = true +func (mv *messageValidator) processSignerState(signedSSVMessage *spectypes.SignedSSVMessage, consensusMessage *specqbft.Message, signerState *SignerState) error { + if len(signedSSVMessage.FullData) != 0 && consensusMessage.MsgType == specqbft.ProposalMsgType { + signerState.ProposalData = signedSSVMessage.FullData } - if err := mv.validateDutyCount(validatorIndices, signerState, signedSSVMessage.SSVMessage.GetID(), newDutyInSameEpoch); err != nil { - return err + signerCount := len(signedSSVMessage.GetOperatorIDs()) + if signerCount > 1 { + encodedOperators, err := encodeOperators(signedSSVMessage.GetOperatorIDs()) + if err != nil { + // encodeOperators must never re + return ErrEncodeOperators + } + + signerState.SeenSigners[encodedOperators] = struct{}{} } + signerState.MessageCounts.RecordConsensusMessage(signedSSVMessage, consensusMessage) return nil } +func (mv *messageValidator) maxSlotsInState() phase0.Slot { + return phase0.Slot(mv.netCfg.SlotsPerEpoch()) + lateSlotAllowance +} + func (mv *messageValidator) validateJustifications(message *specqbft.Message) error { pj, err := message.GetPrepareJustifications() if err != nil { @@ -282,6 +342,7 @@ func (mv *messageValidator) validateJustifications(message *specqbft.Message) er return e } + // Rule: Can only exist for Proposal messages if len(pj) != 0 && message.MsgType != specqbft.ProposalMsgType { e := ErrUnexpectedPrepareJustifications e.got = message.MsgType @@ -295,6 +356,7 @@ func (mv *messageValidator) validateJustifications(message *specqbft.Message) er return e } + // Rule: Can only exist for Proposal or Round-Change messages if len(rcj) != 0 && message.MsgType != specqbft.ProposalMsgType && message.MsgType != specqbft.RoundChangeMsgType { e := ErrUnexpectedRoundChangeJustifications e.got = message.MsgType @@ -304,28 +366,6 @@ func (mv *messageValidator) validateJustifications(message *specqbft.Message) er return nil } -func (mv *messageValidator) validateBeaconDuty( - role spectypes.RunnerRole, - slot phase0.Slot, - indices []phase0.ValidatorIndex, -) error { - if role == spectypes.RoleProposer { - epoch := mv.netCfg.Beacon.EstimatedEpochAtSlot(slot) - if mv.dutyStore != nil && mv.dutyStore.Proposer.ValidatorDuty(epoch, slot, indices[0]) == nil { - return ErrNoDuty - } - } - - if role == spectypes.RoleSyncCommitteeContribution { - period := mv.netCfg.Beacon.EstimatedSyncCommitteePeriodAtEpoch(mv.netCfg.Beacon.EstimatedEpochAtSlot(slot)) - if mv.dutyStore != nil && mv.dutyStore.SyncCommittee.Duty(period, indices[0]) == nil { - return ErrNoDuty - } - } - - return nil -} - func (mv *messageValidator) maxRound(role spectypes.RunnerRole) specqbft.Round { switch role { case spectypes.RoleCommittee, spectypes.RoleAggregator: // TODO: check if value for aggregator is correct as there are messages on stage exceeding the limit @@ -338,12 +378,12 @@ func (mv *messageValidator) maxRound(role spectypes.RunnerRole) specqbft.Round { } func (mv *messageValidator) currentEstimatedRound(sinceSlotStart time.Duration) specqbft.Round { - if currentQuickRound := specqbft.FirstRound + specqbft.Round(sinceSlotStart/roundtimer.QuickTimeout); currentQuickRound <= specqbft.Round(roundtimer.QuickTimeoutThreshold) { + if currentQuickRound := specqbft.FirstRound + specqbft.Round(sinceSlotStart/roundtimer.QuickTimeout); currentQuickRound <= roundtimer.QuickTimeoutThreshold { return currentQuickRound } - sinceFirstSlowRound := sinceSlotStart - (time.Duration(specqbft.Round(roundtimer.QuickTimeoutThreshold)) * roundtimer.QuickTimeout) - estimatedRound := specqbft.Round(roundtimer.QuickTimeoutThreshold) + specqbft.FirstRound + specqbft.Round(sinceFirstSlowRound/roundtimer.SlowTimeout) + sinceFirstSlowRound := sinceSlotStart - (time.Duration(roundtimer.QuickTimeoutThreshold) * roundtimer.QuickTimeout) + estimatedRound := roundtimer.QuickTimeoutThreshold + specqbft.FirstRound + specqbft.Round(sinceFirstSlowRound/roundtimer.SlowTimeout) return estimatedRound } @@ -378,7 +418,7 @@ func (mv *messageValidator) roundBelongsToAllowedSpread( role := signedSSVMessage.SSVMessage.GetID().GetRoleType() if consensusMessage.Round < lowestAllowed || consensusMessage.Round > highestAllowed { - e := ErrEstimatedRoundTooFar + e := ErrEstimatedRoundNotInAllowedSpread e.got = fmt.Sprintf("%v (%v role)", consensusMessage.Round, message.RunnerRoleToString(role)) e.want = fmt.Sprintf("between %v and %v (%v role) / %v passed", lowestAllowed, highestAllowed, message.RunnerRoleToString(role), sinceSlotStart) return e @@ -396,3 +436,14 @@ func (mv *messageValidator) roundRobinProposer(height specqbft.Height, round spe index := (firstRoundIndex + int(round) - int(specqbft.FirstRound)) % len(committee) return committee[index] } + +func encodeOperators(operators []spectypes.OperatorID) ([sha256.Size]byte, error) { + buf := new(bytes.Buffer) + for _, operator := range operators { + if err := binary.Write(buf, binary.LittleEndian, operator); err != nil { + return [sha256.Size]byte{}, err + } + } + hash := sha256.Sum256(buf.Bytes()) + return hash, nil +} diff --git a/message/validation/consensus_validation_test.go b/message/validation/consensus_validation_test.go index a81c6b89f7..7207a53f74 100644 --- a/message/validation/consensus_validation_test.go +++ b/message/validation/consensus_validation_test.go @@ -70,27 +70,27 @@ func TestMessageValidator_currentEstimatedRound(t *testing.T) { { name: "QuickTimeout*9 - expected first+8 round", sinceSlotStart: roundtimer.QuickTimeout * time.Duration(roundtimer.QuickTimeoutThreshold+1), - want: specqbft.Round(roundtimer.QuickTimeoutThreshold) + 1, + want: roundtimer.QuickTimeoutThreshold + 1, }, { name: "QuickTimeout*10 - expected first+8 round", sinceSlotStart: roundtimer.QuickTimeout * time.Duration(roundtimer.QuickTimeoutThreshold+2), - want: specqbft.Round(roundtimer.QuickTimeoutThreshold + 1), + want: roundtimer.QuickTimeoutThreshold + 1, }, { name: "(QuickTimeout*8 + SlowTimeout) - expected first+9 round", sinceSlotStart: roundtimer.QuickTimeout*time.Duration(roundtimer.QuickTimeoutThreshold) + roundtimer.SlowTimeout, - want: specqbft.Round(roundtimer.QuickTimeoutThreshold + 2), + want: roundtimer.QuickTimeoutThreshold + 2, }, { name: "(QuickTimeout*8 + SlowTimeout*2) - expected first+10 round", sinceSlotStart: roundtimer.QuickTimeout*time.Duration(roundtimer.QuickTimeoutThreshold) + roundtimer.SlowTimeout*2, - want: specqbft.Round(roundtimer.QuickTimeoutThreshold + 3), + want: roundtimer.QuickTimeoutThreshold + 3, }, { name: "(QuickTimeout*8 + SlowTimeout*3) - expected first+11 round", sinceSlotStart: roundtimer.QuickTimeout*time.Duration(roundtimer.QuickTimeoutThreshold) + roundtimer.SlowTimeout*3, - want: specqbft.Round(roundtimer.QuickTimeoutThreshold + 4), + want: roundtimer.QuickTimeoutThreshold + 4, }, } diff --git a/message/validation/const.go b/message/validation/const.go index 609219bbbf..c56c315891 100644 --- a/message/validation/const.go +++ b/message/validation/const.go @@ -7,20 +7,56 @@ import ( const ( // lateMessageMargin is the duration past a message's TTL in which it is still considered valid. lateMessageMargin = time.Second * 3 - // clockErrorTolerance is the maximum amount of clock error we expect to see between nodes. - clockErrorTolerance = time.Millisecond * 50 - - maxConsensusMsgSize = 8388608 // TODO: calculate new value - maxPartialSignatureMsgSize = 1952 - maxPayloadSize = maxConsensusMsgSize - maxSignedMsgSize = 4 + 56 + maxPayloadSize // Max possible MsgType + MsgID + Data - maxEncodedMsgSize = maxSignedMsgSize + maxSignedMsgSize/10 // 10% for encoding overhead - allowedRoundsInFuture = 1 - allowedRoundsInPast = 2 - lateSlotAllowance = 2 - rsaSignatureSize = 256 - blsSignatureSize = 96 - syncCommitteeSize = 512 - maxSignaturesInSyncCommitteeContribution = 13 + clockErrorTolerance = time.Millisecond * 50 + allowedRoundsInFuture = 1 + allowedRoundsInPast = 2 + lateSlotAllowance = 2 + syncCommitteeSize = 512 + rsaSignatureSize = 256 + operatorIDSize = 8 // uint64 + slotSize = 8 // uint64 + validatorIndexSize = 8 // uint64 + identifierSize = 56 + rootSize = 32 + maxSignatures = 13 +) + +const ( + qbftMsgTypeSize = 8 // uint64 + heightSize = 8 // uint64 + roundSize = 8 // uint64 + maxNoJustificationSize = 3616 // from KB + max1JustificationSize = 50624 // from KB + maxConsensusMsgSize = qbftMsgTypeSize + heightSize + roundSize + identifierSize + rootSize + roundSize + maxSignatures*(maxNoJustificationSize+max1JustificationSize) +) + +const ( + partialSignatureSize = 96 + partialSignatureMsgSize = partialSignatureSize + rootSize + operatorIDSize + validatorIndexSize + maxPartialSignatureMessages = 1000 + partialSigMsgTypeSize = 8 // uint64 + maxPartialSignatureMsgsSize = partialSigMsgTypeSize + slotSize + maxPartialSignatureMessages*partialSignatureMsgSize ) + +const ( + msgTypeSize = 8 // uint64 + maxSignaturesSize = maxSignatures * rsaSignatureSize + maxOperatorIDSize = maxSignatures * operatorIDSize + maxFullDataSize = 5243144 // from spectypes.SignedSSVMessage +) + +var ( + maxPayloadDataSize = max(maxConsensusMsgSize, maxPartialSignatureMsgsSize) // not const because of max TODO: const after Go 1.21 + maxSignedMsgSize = maxSignaturesSize + maxOperatorIDSize + msgTypeSize + identifierSize + maxPayloadDataSize + maxFullDataSize + maxEncodedMsgSize = maxSignedMsgSize + maxSignedMsgSize/10 // 10% for encoding overhead +) + +// TODO: delete after updating to Go 1.21 +func max(a, b int) int { + if a > b { + return a + } + + return b +} diff --git a/message/validation/errors.go b/message/validation/errors.go index 441abff7ed..06c0713f32 100644 --- a/message/validation/errors.go +++ b/message/validation/errors.go @@ -52,31 +52,36 @@ func (e Error) Text() string { } var ( - ErrGenesisSSVMessage = Error{text: "genesis ssv message"} - ErrGenesisSignedSSVMessage = Error{text: "genesis signed ssv message"} - ErrEmptyData = Error{text: "empty data"} ErrWrongDomain = Error{text: "wrong domain"} ErrNoShareMetadata = Error{text: "share has no metadata"} ErrUnknownValidator = Error{text: "unknown validator"} ErrValidatorLiquidated = Error{text: "validator is liquidated"} ErrValidatorNotAttesting = Error{text: "validator is not attesting"} - ErrEarlyMessage = Error{text: "early message"} - ErrLateMessage = Error{text: "late message"} - ErrTooManySameTypeMessagesPerRound = Error{text: "too many messages of same type per round"} - ErrRoundTooHigh = Error{text: "round is too high for this role" /*, reject: true*/} // TODO: enable reject + ErrEarlySlotMessage = Error{text: "message was sent before slot starts"} + ErrLateSlotMessage = Error{text: "current time is above duty's start +34(committee and aggregator) or +3(else) slots"} + ErrSlotAlreadyAdvanced = Error{text: "signer has already advanced to a later slot"} + ErrRoundAlreadyAdvanced = Error{text: "signer has already advanced to a later round"} + ErrDecidedWithSameSigners = Error{text: "decided with same number of signers"} + ErrPubSubDataTooBig = Error{text: "pub-sub message data too big"} + ErrIncorrectTopic = Error{text: "incorrect topic"} + ErrNonExistentCommitteeID = Error{text: "committee ID doesn't exist"} + ErrRoundTooHigh = Error{text: "round is too high for this role"} + ErrValidatorIndexMismatch = Error{text: "partial signature validator index not found"} + ErrTooManyDutiesPerEpoch = Error{text: "too many duties per epoch"} + ErrNoDuty = Error{text: "no duty for this epoch"} + ErrEstimatedRoundNotInAllowedSpread = Error{text: "message round is too far from estimated"} + ErrEmptyData = Error{text: "empty data", reject: true} + ErrMismatchedIdentifier = Error{text: "identifier mismatch", reject: true} ErrSignatureVerification = Error{text: "signature verification", reject: true} ErrPubSubMessageHasNoData = Error{text: "pub-sub message has no data", reject: true} - ErrPubSubDataTooBig = Error{text: "pub-sub message data too big", reject: true} ErrMalformedPubSubMessage = Error{text: "pub-sub message is malformed", reject: true} ErrNilSignedSSVMessage = Error{text: "signed ssv message is nil", reject: true} ErrNilSSVMessage = Error{text: "ssv message is nil", reject: true} - ErrIncorrectTopic = Error{text: "incorrect topic", reject: true} ErrSSVDataTooBig = Error{text: "ssv message data too big", reject: true} ErrInvalidRole = Error{text: "invalid role", reject: true} ErrUnexpectedConsensusMessage = Error{text: "unexpected consensus message for this role", reject: true} ErrNoSigners = Error{text: "no signers", reject: true} ErrWrongRSASignatureSize = Error{text: "wrong RSA signature size", reject: true} - ErrEmptySignature = Error{text: "empty signature", reject: true} ErrZeroSigner = Error{text: "zero signer ID", reject: true} ErrSignerNotInCommittee = Error{text: "signer is not in committee", reject: true} ErrDuplicatedSigner = Error{text: "signer is duplicated", reject: true} @@ -85,7 +90,6 @@ var ( ErrInconsistentSigners = Error{text: "signer is not expected", reject: true} ErrInvalidHash = Error{text: "root doesn't match full data hash", reject: true} ErrFullDataHash = Error{text: "couldn't hash root", reject: true} - ErrEstimatedRoundTooFar = Error{text: "message round is too far from estimated"} ErrUndecodableMessageData = Error{text: "message data could not be decoded", reject: true} ErrEventMessage = Error{text: "unexpected event message", reject: true} ErrDKGMessage = Error{text: "unexpected DKG message", reject: true} @@ -100,23 +104,20 @@ var ( ErrUnexpectedPrepareJustifications = Error{text: "prepare justifications unexpected for this message type", reject: true} ErrMalformedRoundChangeJustifications = Error{text: "malformed round change justifications", reject: true} ErrUnexpectedRoundChangeJustifications = Error{text: "round change justifications unexpected for this message type", reject: true} - ErrTooManyDutiesPerEpoch = Error{text: "too many duties per epoch", reject: true} - ErrNoDuty = Error{text: "no duty for this epoch", reject: true} ErrDeserializePublicKey = Error{text: "deserialize public key", reject: true} ErrNoPartialSignatureMessages = Error{text: "no partial signature messages", reject: true} - ErrNonExistentCommitteeID = Error{text: "committee ID doesn't exist", reject: true} ErrNoValidators = Error{text: "no validators for this committee ID", reject: true} - ErrValidatorIndexMismatch = Error{text: "partial signature validator index not found", reject: true} ErrNoSignatures = Error{text: "no signatures", reject: true} - ErrSignatureOperatorIDLengthMismatch = Error{text: "signature and operator ID length mismatch", reject: true} + ErrSignersAndSignaturesWithDifferentLength = Error{text: "signature and operator ID length mismatch", reject: true} ErrPartialSigOneSigner = Error{text: "partial signature message must have only one signer", reject: true} ErrPrepareOrCommitWithFullData = Error{text: "prepare or commit with full data", reject: true} - ErrMismatchedIdentifier = Error{text: "identifier mismatch", reject: true} ErrFullDataNotInConsensusMessage = Error{text: "full data not in consensus message", reject: true} - ErrTooManyPartialSignatureMessages = Error{text: "too many partial signature messages", reject: true} ErrTripleValidatorIndexInPartialSignatures = Error{text: "triple validator index in partial signatures", reject: true} - ErrInvalidRound = Error{text: "invalid round", reject: true} - ErrDecidedWithSameNumberOfSigners = Error{text: "decided with same number of signers", reject: true} + ErrZeroRound = Error{text: "zero round", reject: true} + ErrDuplicatedMessage = Error{text: "message is duplicated", reject: true} + ErrInvalidPartialSignatureTypeCount = Error{text: "sent more partial signature messages of a certain type than allowed", reject: true} + ErrTooManyPartialSignatureMessages = Error{text: "too many partial signature messages", reject: true} + ErrEncodeOperators = Error{text: "encode operators", reject: true} ) func (mv *messageValidator) handleValidationError(peerID peer.ID, decodedMessage *queue.DecodedSSVMessage, err error) pubsub.ValidationResult { diff --git a/message/validation/genesis/consensus_validation.go b/message/validation/genesis/consensus_validation.go index 10304941c8..85bae515e7 100644 --- a/message/validation/genesis/consensus_validation.go +++ b/message/validation/genesis/consensus_validation.go @@ -128,7 +128,7 @@ func (mv *messageValidator) validateConsensusMessage( newEpoch := mv.netCfg.Beacon.EstimatedEpochAtSlot(msgSlot) > mv.netCfg.Beacon.EstimatedEpochAtSlot(signerState.Slot) signerState.ResetSlot(msgSlot, msgRound, newEpoch) } else if msgSlot == signerState.Slot && msgRound > signerState.Round { - signerState.ResetRound(msgRound) + signerState.Reset(msgRound) } if mv.hasFullData(signedMsg) && signerState.ProposalData == nil { diff --git a/message/validation/genesis/qbft_config.go b/message/validation/genesis/qbft_config.go deleted file mode 100644 index 97944a7696..0000000000 --- a/message/validation/genesis/qbft_config.go +++ /dev/null @@ -1,53 +0,0 @@ -package validation - -import ( - specqbft "github.com/ssvlabs/ssv-spec-pre-cc/qbft" - genesisspectypes "github.com/ssvlabs/ssv-spec-pre-cc/types" - spectypes "github.com/ssvlabs/ssv-spec-pre-cc/types" - - "github.com/ssvlabs/ssv/protocol/v2/qbft/roundtimer" - qbftstorage "github.com/ssvlabs/ssv/protocol/v2/qbft/storage" -) - -// qbftConfig is used in message validation and has no signature verification. -type qbftConfig struct { - domain spectypes.DomainType -} - -func newQBFTConfig(domain spectypes.DomainType) qbftConfig { - return qbftConfig{ - domain: domain, - } -} - -func (q qbftConfig) GetSigner() genesisspectypes.BeaconSigner { - panic("should not be called") -} - -func (q qbftConfig) GetSignatureDomainType() spectypes.DomainType { - return q.domain -} - -func (q qbftConfig) GetValueCheckF() specqbft.ProposedValueCheckF { - panic("should not be called") -} - -func (q qbftConfig) GetProposerF() specqbft.ProposerF { - panic("should not be called") -} - -func (q qbftConfig) GetNetwork() specqbft.Network { - panic("should not be called") -} - -func (q qbftConfig) GetStorage() qbftstorage.QBFTStore { - panic("should not be called") -} - -func (q qbftConfig) GetTimer() roundtimer.Timer { - panic("should not be called") -} - -func (q qbftConfig) VerifySignatures() bool { - return false -} diff --git a/message/validation/genesis/signer_state.go b/message/validation/genesis/signer_state.go index c7f5c437e3..f6fb7a1b1f 100644 --- a/message/validation/genesis/signer_state.go +++ b/message/validation/genesis/signer_state.go @@ -35,9 +35,9 @@ func (s *SignerState) ResetSlot(slot phase0.Slot, round specqbft.Round, newEpoch } } -// ResetRound resets the state's round, message counts, and proposal data to the given values. +// Reset resets the state's round, message counts, and proposal data to the given values. // It also updates the start time to the current time. -func (s *SignerState) ResetRound(round specqbft.Round) { +func (s *SignerState) Reset(round specqbft.Round) { s.Start = time.Now() s.Round = round s.MessageCounts = MessageCounts{} diff --git a/message/validation/logger_fields.go b/message/validation/logger_fields.go index dfbe4d4dec..9bf7f88a8c 100644 --- a/message/validation/logger_fields.go +++ b/message/validation/logger_fields.go @@ -21,7 +21,7 @@ type ConsensusFields struct { // LoggerFields provides details about a message. It's used for logging and metrics. type LoggerFields struct { - SenderID []byte + DutyExecutorID []byte Role spectypes.RunnerRole SSVMessageType spectypes.MsgType Slot phase0.Slot @@ -31,7 +31,7 @@ type LoggerFields struct { // AsZapFields returns zap logging fields for the descriptor. func (d LoggerFields) AsZapFields() []zapcore.Field { result := []zapcore.Field{ - fields.SenderID(d.SenderID), + fields.DutyExecutorID(d.DutyExecutorID), fields.Role(d.Role), zap.String("ssv_message_type", ssvmessage.MsgTypeToString(d.SSVMessageType)), fields.Slot(d.Slot), @@ -56,17 +56,21 @@ func (mv *messageValidator) buildLoggerFields(decodedMessage *queue.DecodedSSVMe return descriptor } - descriptor.SenderID = decodedMessage.GetID().GetDutyExecutorID() + descriptor.DutyExecutorID = decodedMessage.GetID().GetDutyExecutorID() descriptor.Role = decodedMessage.GetID().GetRoleType() descriptor.SSVMessageType = decodedMessage.GetType() switch m := decodedMessage.Body.(type) { case *specqbft.Message: - descriptor.Slot = phase0.Slot(m.Height) - descriptor.Consensus.Round = m.Round - descriptor.Consensus.QBFTMessageType = m.MsgType + if m != nil { + descriptor.Slot = phase0.Slot(m.Height) + descriptor.Consensus.Round = m.Round + descriptor.Consensus.QBFTMessageType = m.MsgType + } case *spectypes.PartialSignatureMessages: - descriptor.Slot = m.Slot + if m != nil { + descriptor.Slot = m.Slot + } } return descriptor diff --git a/message/validation/message_counts.go b/message/validation/message_counts.go index 13bdd88461..d1b105d781 100644 --- a/message/validation/message_counts.go +++ b/message/validation/message_counts.go @@ -15,19 +15,17 @@ type MessageCounts struct { Proposal int Prepare int Commit int - Decided int RoundChange int PostConsensus int } // String provides a formatted representation of the MessageCounts. func (c *MessageCounts) String() string { - return fmt.Sprintf("pre-consensus: %v, proposal: %v, prepare: %v, commit: %v, decided: %v, round change: %v, post-consensus: %v", + return fmt.Sprintf("pre-consensus: %v, proposal: %v, prepare: %v, commit: %v, round change: %v, post-consensus: %v", c.PreConsensus, c.Proposal, c.Prepare, c.Commit, - c.Decided, c.RoundChange, c.PostConsensus, ) @@ -39,34 +37,28 @@ func (c *MessageCounts) ValidateConsensusMessage(signedSSVMessage *spectypes.Sig switch msg.MsgType { case specqbft.ProposalMsgType: if c.Proposal >= limits.Proposal { - err := ErrTooManySameTypeMessagesPerRound + err := ErrDuplicatedMessage err.got = fmt.Sprintf("proposal, having %v", c.String()) return err } case specqbft.PrepareMsgType: if c.Prepare >= limits.Prepare { - err := ErrTooManySameTypeMessagesPerRound + err := ErrDuplicatedMessage err.got = fmt.Sprintf("prepare, having %v", c.String()) return err } case specqbft.CommitMsgType: if len(signedSSVMessage.GetOperatorIDs()) == 1 { if c.Commit >= limits.Commit { - err := ErrTooManySameTypeMessagesPerRound + err := ErrDuplicatedMessage err.got = fmt.Sprintf("commit, having %v", c.String()) return err } } - if len(signedSSVMessage.GetOperatorIDs()) > 1 { - if c.Decided >= limits.Decided { - err := ErrTooManySameTypeMessagesPerRound - err.got = fmt.Sprintf("decided, having %v", c.String()) - return err - } - } case specqbft.RoundChangeMsgType: if c.RoundChange >= limits.RoundChange { - err := ErrTooManySameTypeMessagesPerRound + err := ErrDuplicatedMessage + err.got = fmt.Sprintf("round change, having %v", c.String()) return err } @@ -83,13 +75,13 @@ func (c *MessageCounts) ValidatePartialSignatureMessage(m *spectypes.PartialSign switch m.Type { case spectypes.RandaoPartialSig, spectypes.SelectionProofPartialSig, spectypes.ContributionProofs, spectypes.ValidatorRegistrationPartialSig, spectypes.VoluntaryExitPartialSig: if c.PreConsensus > limits.PreConsensus { - err := ErrTooManySameTypeMessagesPerRound + err := ErrInvalidPartialSignatureTypeCount err.got = fmt.Sprintf("pre-consensus, having %v", c.String()) return err } case spectypes.PostConsensusPartialSig: if c.PostConsensus > limits.PostConsensus { - err := ErrTooManySameTypeMessagesPerRound + err := ErrInvalidPartialSignatureTypeCount err.got = fmt.Sprintf("post-consensus, having %v", c.String()) return err } @@ -108,13 +100,8 @@ func (c *MessageCounts) RecordConsensusMessage(signedSSVMessage *spectypes.Signe case specqbft.PrepareMsgType: c.Prepare++ case specqbft.CommitMsgType: - switch { - case len(signedSSVMessage.GetOperatorIDs()) == 1: + if len(signedSSVMessage.GetOperatorIDs()) == 1 { c.Commit++ - case len(signedSSVMessage.GetOperatorIDs()) > 1: - c.Decided++ - default: - panic("unexpected signers length 0") // 0 length should be checked before } case specqbft.RoundChangeMsgType: c.RoundChange++ @@ -136,21 +123,13 @@ func (c *MessageCounts) RecordPartialSignatureMessage(messages *spectypes.Partia } // maxMessageCounts is the maximum number of acceptable messages from a signer within a slot & round. -func maxMessageCounts(committeeSize int) MessageCounts { - maxDecided := maxDecidedCount(committeeSize) - +func maxMessageCounts() MessageCounts { return MessageCounts{ PreConsensus: 1, Proposal: 1, Prepare: 1, Commit: 1, - Decided: maxDecided, RoundChange: 1, PostConsensus: 1, } } - -func maxDecidedCount(committeeSize int) int { - f := (committeeSize - 1) / 3 - return committeeSize * (f + 1) // N * (f + 1) -} diff --git a/message/validation/partial_validation.go b/message/validation/partial_validation.go index 54d85f071b..ab0df08c5f 100644 --- a/message/validation/partial_validation.go +++ b/message/validation/partial_validation.go @@ -6,18 +6,16 @@ import ( "fmt" "time" + "github.com/attestantio/go-eth2-client/spec/phase0" specqbft "github.com/ssvlabs/ssv-spec/qbft" "github.com/ssvlabs/ssv-spec/types" spectypes "github.com/ssvlabs/ssv-spec/types" - - "github.com/attestantio/go-eth2-client/spec/phase0" "golang.org/x/exp/slices" ) func (mv *messageValidator) validatePartialSignatureMessage( signedSSVMessage *spectypes.SignedSSVMessage, - committee []spectypes.OperatorID, - validatorIndices []phase0.ValidatorIndex, + committeeInfo CommitteeInfo, receivedAt time.Time, ) ( *spectypes.PartialSignatureMessages, @@ -25,10 +23,10 @@ func (mv *messageValidator) validatePartialSignatureMessage( ) { ssvMessage := signedSSVMessage.SSVMessage - if len(ssvMessage.Data) > maxPartialSignatureMsgSize { + if len(ssvMessage.Data) > maxPartialSignatureMsgsSize { e := ErrSSVDataTooBig e.got = len(ssvMessage.Data) - e.want = maxPartialSignatureMsgSize + e.want = maxPartialSignatureMsgsSize return nil, e } @@ -39,13 +37,13 @@ func (mv *messageValidator) validatePartialSignatureMessage( return nil, e } - if err := mv.validatePartialSignatureMessageSemantics(signedSSVMessage, partialSignatureMessages, validatorIndices); err != nil { + if err := mv.validatePartialSignatureMessageSemantics(signedSSVMessage, partialSignatureMessages, committeeInfo.indices); err != nil { return nil, err } msgID := ssvMessage.GetID() state := mv.consensusState(msgID) - if err := mv.validatePartialSigMessagesByDutyLogic(signedSSVMessage, partialSignatureMessages, committee, validatorIndices, receivedAt, state); err != nil { + if err := mv.validatePartialSigMessagesByDutyLogic(signedSSVMessage, partialSignatureMessages, committeeInfo, receivedAt, state); err != nil { return nil, err } @@ -57,7 +55,7 @@ func (mv *messageValidator) validatePartialSignatureMessage( return partialSignatureMessages, e } - mv.updatePartialSignatureState(partialSignatureMessages, state.GetSignerState(signer)) + mv.updatePartialSignatureState(partialSignatureMessages, state, signer) return partialSignatureMessages, nil } @@ -69,41 +67,52 @@ func (mv *messageValidator) validatePartialSignatureMessageSemantics( ) error { role := signedSSVMessage.SSVMessage.GetID().GetRoleType() + // Rule: Partial Signature message must have 1 signer signers := signedSSVMessage.GetOperatorIDs() if len(signers) != 1 { return ErrPartialSigOneSigner } + signer := signers[0] + + // Rule: Partial signature message must not have full data if len(signedSSVMessage.FullData) > 0 { return ErrFullDataNotInConsensusMessage } + // Rule: Valid signature type if !mv.validPartialSigMsgType(partialSignatureMessages.Type) { e := ErrInvalidPartialSignatureType e.got = partialSignatureMessages.Type return e } + // Rule: Partial signature type must match expected type: + // - PostConsensusPartialSig, for Committee duty + // - RandaoPartialSig or PostConsensusPartialSig for Proposer + // - SelectionProofPartialSig or PostConsensusPartialSig for Aggregator + // - SelectionProofPartialSig or PostConsensusPartialSig for Sync committee contribution + // - ValidatorRegistrationPartialSig for Validator Registration + // - VoluntaryExitPartialSig for Voluntary Exit if !mv.partialSignatureTypeMatchesRole(partialSignatureMessages.Type, role) { return ErrPartialSignatureTypeRoleMismatch } + // Rule: Partial signature message must have at least one signature if len(partialSignatureMessages.Messages) == 0 { return ErrNoPartialSignatureMessages } for _, message := range partialSignatureMessages.Messages { - if len(message.PartialSignature) == 0 { - return ErrEmptySignature - } - - if message.Signer != signers[0] { + // Rule: Partial signature signer must be consistent + if message.Signer != signer { err := ErrInconsistentSigners - err.got = signers[0] + err.got = signer err.want = message.Signer return err } + // Rule: Validator index must match with validatorPK or one of CommitteeID's validators if !slices.Contains(validatorIndices, message.ValidatorIndex) { e := ErrValidatorIndexMismatch e.got = message.ValidatorIndex @@ -118,54 +127,69 @@ func (mv *messageValidator) validatePartialSignatureMessageSemantics( func (mv *messageValidator) validatePartialSigMessagesByDutyLogic( signedSSVMessage *spectypes.SignedSSVMessage, partialSignatureMessages *spectypes.PartialSignatureMessages, - committee []spectypes.OperatorID, - validatorIndices []phase0.ValidatorIndex, + committeeInfo CommitteeInfo, receivedAt time.Time, state *consensusState, ) error { role := signedSSVMessage.SSVMessage.GetID().GetRoleType() messageSlot := partialSignatureMessages.Slot - - if err := mv.validateBeaconDuty(signedSSVMessage.SSVMessage.GetID().GetRoleType(), messageSlot, validatorIndices); err != nil { - return err + signer := signedSSVMessage.GetOperatorIDs()[0] + signerStateBySlot := state.GetOrCreate(signer) + + // Rule: Height must not be "old". I.e., signer must not have already advanced to a later slot. + if signedSSVMessage.SSVMessage.MsgID.GetRoleType() != types.RoleCommittee { // Rule only for validator runners + maxSlot := signerStateBySlot.MaxSlot() + if maxSlot != 0 && maxSlot > partialSignatureMessages.Slot { + e := ErrSlotAlreadyAdvanced + e.got = partialSignatureMessages.Slot + e.want = maxSlot + return e + } } - signer := signedSSVMessage.GetOperatorIDs()[0] - signerState := state.GetSignerState(signer) - if signerState == nil { - signerState = state.CreateSignerState(signer) + if err := mv.validateBeaconDuty(signedSSVMessage.SSVMessage.GetID().GetRoleType(), messageSlot, committeeInfo.indices); err != nil { + return err } - if signerState != nil && messageSlot == signerState.Slot { - limits := maxMessageCounts(len(committee)) + if signerState := signerStateBySlot.Get(messageSlot); signerState != nil && signerState.Slot == messageSlot { + // Rule: peer must send only: + // - 1 PostConsensusPartialSig, for Committee duty + // - 1 RandaoPartialSig and 1 PostConsensusPartialSig for Proposer + // - 1 SelectionProofPartialSig and 1 PostConsensusPartialSig for Aggregator + // - 1 SelectionProofPartialSig and 1 PostConsensusPartialSig for Sync committee contribution + // - 1 ValidatorRegistrationPartialSig for Validator Registration + // - 1 VoluntaryExitPartialSig for Voluntary Exit + limits := maxMessageCounts() if err := signerState.MessageCounts.ValidatePartialSignatureMessage(partialSignatureMessages, limits); err != nil { return err } } + // Rule: current slot must be between duty's starting slot and: + // - duty's starting slot + 34 (committee and aggregation) + // - duty's starting slot + 3 (other duties) if err := mv.validateSlotTime(messageSlot, role, receivedAt); err != nil { return err } - messageEpoch := mv.netCfg.Beacon.EstimatedEpochAtSlot(messageSlot) - stateEpoch := mv.netCfg.Beacon.EstimatedEpochAtSlot(signerState.Slot) - newDutyInSameEpoch := false - if messageSlot > signerState.Slot && messageEpoch == stateEpoch { - newDutyInSameEpoch = true - } - - if err := mv.validateDutyCount(validatorIndices, signerState, signedSSVMessage.SSVMessage.GetID(), newDutyInSameEpoch); err != nil { + clusterValidatorCount := len(committeeInfo.indices) + // Rule: valid number of duties per epoch: + // - 2 for aggregation, voluntary exit and validator registration + // - 2*V for Committee duty (where V is the number of validators in the cluster) (if no validator is doing sync committee in this epoch) + // - else, accept + if err := mv.validateDutyCount(signedSSVMessage.SSVMessage.GetID(), messageSlot, clusterValidatorCount, signerStateBySlot); err != nil { return err } partialSignatureMessageCount := len(partialSignatureMessages.Messages) - clusterValidatorCount := len(validatorIndices) if signedSSVMessage.SSVMessage.MsgID.GetRoleType() == spectypes.RoleCommittee { + // Rule: The number of signatures must be <= min(2*V, V + SYNC_COMMITTEE_SIZE) where V is the number of validators assigned to the cluster if partialSignatureMessageCount > min(2*clusterValidatorCount, clusterValidatorCount+syncCommitteeSize) { return ErrTooManyPartialSignatureMessages } + // Rule: a ValidatorIndex can't appear more than 2 times in the []*PartialSignatureMessage list validatorIndexCount := make(map[phase0.ValidatorIndex]int) for _, message := range partialSignatureMessages.Messages { validatorIndexCount[message.ValidatorIndex]++ @@ -174,13 +198,15 @@ func (mv *messageValidator) validatePartialSigMessagesByDutyLogic( } } } else if signedSSVMessage.SSVMessage.MsgID.GetRoleType() == types.RoleSyncCommitteeContribution { - if partialSignatureMessageCount > maxSignaturesInSyncCommitteeContribution { + // Rule: The number of signatures must be <= MaxSignaturesInSyncCommitteeContribution for the sync comittee contribution duty + if partialSignatureMessageCount > maxSignatures { e := ErrTooManyPartialSignatureMessages e.got = partialSignatureMessageCount e.want = maxConsensusMsgSize return e } } else if partialSignatureMessageCount > 1 { + // Rule: The number of signatures must be 1 for the other types of duties e := ErrTooManyPartialSignatureMessages e.got = partialSignatureMessageCount e.want = 1 @@ -189,10 +215,19 @@ func (mv *messageValidator) validatePartialSigMessagesByDutyLogic( return nil } -func (mv *messageValidator) updatePartialSignatureState(partialSignatureMessages *spectypes.PartialSignatureMessages, signerState *SignerState) { - if partialSignatureMessages.Slot > signerState.Slot { - newEpoch := mv.netCfg.Beacon.EstimatedEpochAtSlot(partialSignatureMessages.Slot) > mv.netCfg.Beacon.EstimatedEpochAtSlot(signerState.Slot) - signerState.ResetSlot(partialSignatureMessages.Slot, specqbft.FirstRound, newEpoch) +func (mv *messageValidator) updatePartialSignatureState( + partialSignatureMessages *spectypes.PartialSignatureMessages, + state *consensusState, + signer spectypes.OperatorID, +) { + stateBySlot := state.GetOrCreate(signer) + messageSlot := partialSignatureMessages.Slot + messageEpoch := mv.netCfg.Beacon.EstimatedEpochAtSlot(messageSlot) + + signerState := stateBySlot.Get(messageSlot) + if signerState == nil || signerState.Slot != messageSlot { + signerState = NewSignerState(messageSlot, specqbft.FirstRound) + stateBySlot.Set(messageSlot, messageEpoch, signerState) } signerState.MessageCounts.RecordPartialSignatureMessage(partialSignatureMessages) diff --git a/message/validation/pubsub_validation.go b/message/validation/pubsub_validation.go index 063dccfe57..12f5421d39 100644 --- a/message/validation/pubsub_validation.go +++ b/message/validation/pubsub_validation.go @@ -5,10 +5,12 @@ import ( ) func (mv *messageValidator) validatePubSubMessage(pMsg *pubsub.Message) error { + // Rule: Pubsub.Message.Message.Data must not be empty if len(pMsg.GetData()) == 0 { return ErrPubSubMessageHasNoData } + // Rule: Pubsub.Message.Message.Data size upper limit if len(pMsg.GetData()) > maxEncodedMsgSize { e := ErrPubSubDataTooBig e.got = len(pMsg.GetData()) diff --git a/message/validation/signed_ssv_message.go b/message/validation/signed_ssv_message.go index 09bdb8e960..30bdba1647 100644 --- a/message/validation/signed_ssv_message.go +++ b/message/validation/signed_ssv_message.go @@ -5,29 +5,17 @@ import ( "encoding/hex" "fmt" - genesisspectypes "github.com/ssvlabs/ssv-spec-pre-cc/types" - spectypes "github.com/ssvlabs/ssv-spec/types" - pubsub "github.com/libp2p/go-libp2p-pubsub" + spectypes "github.com/ssvlabs/ssv-spec/types" "golang.org/x/exp/slices" - "github.com/ssvlabs/ssv/network/commons" ssvmessage "github.com/ssvlabs/ssv/protocol/v2/message" ) func (mv *messageValidator) decodeSignedSSVMessage(pMsg *pubsub.Message) (*spectypes.SignedSSVMessage, error) { + // Rule: Pubsub.Message.Message.Data decoding signedSSVMessage := &spectypes.SignedSSVMessage{} if err := signedSSVMessage.Decode(pMsg.GetData()); err != nil { - genesisSignedSSVMessage := &genesisspectypes.SignedSSVMessage{} - if err := genesisSignedSSVMessage.Decode(pMsg.GetData()); err == nil { - return nil, ErrGenesisSignedSSVMessage - } - - genesisSSVMessage := &genesisspectypes.SSVMessage{} - if err := genesisSSVMessage.Decode(pMsg.GetData()); err == nil { - return nil, ErrGenesisSSVMessage - } - e := ErrMalformedPubSubMessage e.innerErr = err return nil, e @@ -37,25 +25,43 @@ func (mv *messageValidator) decodeSignedSSVMessage(pMsg *pubsub.Message) (*spect } func (mv *messageValidator) validateSignedSSVMessage(signedSSVMessage *spectypes.SignedSSVMessage) error { + // Rule: SignedSSVMessage cannot be nil if signedSSVMessage == nil { return ErrNilSignedSSVMessage } - signers := signedSSVMessage.GetOperatorIDs() - - if len(signers) == 0 { + // Rule: Must have at least one signer + if len(signedSSVMessage.GetOperatorIDs()) == 0 { return ErrNoSigners } - if !slices.IsSorted(signers) { + // Rule: Must have at least one signature + if len(signedSSVMessage.Signatures) == 0 { + return ErrNoSignatures + } + + // Rule: Signature size + for _, signature := range signedSSVMessage.Signatures { + if len(signature) != rsaSignatureSize { + e := ErrWrongRSASignatureSize + e.got = len(signature) + return e + } + } + + // Rule: Signers must be sorted + if !slices.IsSorted(signedSSVMessage.GetOperatorIDs()) { return ErrSignersNotSorted } var prevSigner spectypes.OperatorID - for _, signer := range signers { + for _, signer := range signedSSVMessage.GetOperatorIDs() { + // Rule: Signer can't be zero if signer == 0 { return ErrZeroSigner } + + // Rule: Signers must be unique // This check assumes that signers is sorted, so this rule should be after the check for ErrSignersNotSorted. if signer == prevSigner { return ErrDuplicatedSigner @@ -63,30 +69,14 @@ func (mv *messageValidator) validateSignedSSVMessage(signedSSVMessage *spectypes prevSigner = signer } - signatures := signedSSVMessage.Signatures - - if len(signatures) == 0 { - return ErrNoSignatures - } - - for _, signature := range signatures { - if len(signature) == 0 { - return ErrEmptySignature - } - - if len(signature) != rsaSignatureSize { - e := ErrWrongRSASignatureSize - e.got = len(signature) - return e - } - } - - if len(signers) != len(signatures) { - e := ErrSignatureOperatorIDLengthMismatch - e.got = fmt.Sprintf("%d/%d", len(signers), len(signatures)) + // Rule: Len(Signers) must be equal to Len(Signatures) + if len(signedSSVMessage.GetOperatorIDs()) != len(signedSSVMessage.Signatures) { + e := ErrSignersAndSignaturesWithDifferentLength + e.got = fmt.Sprintf("%d/%d", len(signedSSVMessage.GetOperatorIDs()), len(signedSSVMessage.Signatures)) return e } + // Rule: SSVMessage cannot be nil ssvMessage := signedSSVMessage.SSVMessage if ssvMessage == nil { return ErrNilSSVMessage @@ -95,50 +85,51 @@ func (mv *messageValidator) validateSignedSSVMessage(signedSSVMessage *spectypes return nil } -func (mv *messageValidator) validateSSVMessage(ssvMessage *spectypes.SSVMessage, topic string) error { +func (mv *messageValidator) validateSSVMessage(ssvMessage *spectypes.SSVMessage) error { mv.metrics.SSVMessageType(ssvMessage.MsgType) + // Rule: SSVMessage.Data must not be empty if len(ssvMessage.Data) == 0 { return ErrEmptyData } + // SSVMessage.Data must respect the size limit + if len(ssvMessage.Data) > maxPayloadDataSize { + err := ErrSSVDataTooBig + err.got = len(ssvMessage.Data) + err.want = maxPayloadDataSize + return err + } + switch ssvMessage.MsgType { case spectypes.SSVConsensusMsgType, spectypes.SSVPartialSignatureMsgType: break case ssvmessage.SSVEventMsgType: + // Rule: Event message return ErrEventMessage case spectypes.DKGMsgType: + // Rule: DKG message return ErrDKGMessage default: + // Unknown message type e := ErrUnknownSSVMessageType e.got = ssvMessage.MsgType return e } - if !bytes.Equal(ssvMessage.MsgID.GetDomain(), mv.netCfg.Domain[:]) { + // Rule: If domain is different then self domain + if !bytes.Equal(ssvMessage.GetID().GetDomain(), mv.netCfg.Domain[:]) { err := ErrWrongDomain err.got = hex.EncodeToString(ssvMessage.MsgID.GetDomain()) err.want = hex.EncodeToString(mv.netCfg.Domain[:]) return err } + // Rule: If role is invalid if !mv.validRole(ssvMessage.GetID().GetRoleType()) { return ErrInvalidRole } - if !mv.topicMatches(ssvMessage, topic) { - e := ErrIncorrectTopic - e.got = topic - return e - } - - if len(ssvMessage.Data) > maxPayloadSize { - err := ErrSSVDataTooBig - err.got = len(ssvMessage.Data) - err.want = maxPayloadSize - return err - } - return nil } @@ -156,19 +147,8 @@ func (mv *messageValidator) validRole(roleType spectypes.RunnerRole) bool { } } -// topicMatches checks if the message was sent on the right topic. -func (mv *messageValidator) topicMatches(ssvMessage *spectypes.SSVMessage, topic string) bool { - var topics []string - if mv.committeeRole(ssvMessage.GetID().GetRoleType()) { - cid := spectypes.CommitteeID(ssvMessage.GetID().GetDutyExecutorID()[16:]) - topics = commons.CommitteeTopicID(cid) - } else { - topics = commons.ValidatorTopicID(ssvMessage.GetID().GetDutyExecutorID()) - } - return slices.Contains(topics, commons.GetTopicBaseName(topic)) -} - func (mv *messageValidator) belongsToCommittee(operatorIDs []spectypes.OperatorID, committee []spectypes.OperatorID) error { + // Rule: Signers must belong to validator committee or CommitteeID for _, signer := range operatorIDs { if !slices.Contains(committee, signer) { e := ErrSignerNotInCommittee diff --git a/message/validation/signer_state.go b/message/validation/signer_state.go index 061d29745f..a8dab8c02c 100644 --- a/message/validation/signer_state.go +++ b/message/validation/signer_state.go @@ -3,42 +3,34 @@ package validation // signer_state.go describes state of a signer. import ( - specqbft "github.com/ssvlabs/ssv-spec/qbft" + "crypto/sha256" "github.com/attestantio/go-eth2-client/spec/phase0" + specqbft "github.com/ssvlabs/ssv-spec/qbft" ) // SignerState represents the state of a signer, including its start time, slot, round, // message counts, proposal data, and the number of duties performed in the current epoch. type SignerState struct { - Slot phase0.Slot - Round specqbft.Round - MessageCounts MessageCounts - ProposalData []byte - EpochDuties int - SeenDecidedLengths map[int]struct{} + Slot phase0.Slot // index stores slot modulo, so we also need to store slot here + Round specqbft.Round + MessageCounts MessageCounts + ProposalData []byte + SeenSigners map[[sha256.Size]byte]struct{} } -// ResetSlot resets the state's slot, round, message counts, and proposal data to the given values. -// It also updates the start time to the current time and increments the epoch duties count if it's a new epoch. -func (s *SignerState) ResetSlot(slot phase0.Slot, round specqbft.Round, newEpoch bool) { - s.Slot = slot - s.Round = round - s.MessageCounts = MessageCounts{} - s.ProposalData = nil - if newEpoch { - s.EpochDuties = 1 - } else { - s.EpochDuties++ - } - s.SeenDecidedLengths = make(map[int]struct{}) +func NewSignerState(slot phase0.Slot, round specqbft.Round) *SignerState { + s := &SignerState{} + s.Reset(slot, round) + return s } -// ResetRound resets the state's round, message counts, and proposal data to the given values. +// Reset resets the state's round, message counts, and proposal data to the given values. // It also updates the start time to the current time. -func (s *SignerState) ResetRound(round specqbft.Round) { +func (s *SignerState) Reset(slot phase0.Slot, round specqbft.Round) { + s.Slot = slot s.Round = round s.MessageCounts = MessageCounts{} s.ProposalData = nil - s.SeenDecidedLengths = make(map[int]struct{}) + s.SeenSigners = make(map[[sha256.Size]byte]struct{}) } diff --git a/message/validation/validation.go b/message/validation/validation.go index ccdcd25780..093eb90e51 100644 --- a/message/validation/validation.go +++ b/message/validation/validation.go @@ -6,6 +6,7 @@ package validation import ( "context" "encoding/hex" + "fmt" "sync" "time" @@ -14,9 +15,11 @@ import ( "github.com/libp2p/go-libp2p/core/peer" spectypes "github.com/ssvlabs/ssv-spec/types" "go.uber.org/zap" + "golang.org/x/exp/slices" "github.com/ssvlabs/ssv/message/signatureverifier" "github.com/ssvlabs/ssv/monitoring/metricsreporter" + "github.com/ssvlabs/ssv/network/commons" "github.com/ssvlabs/ssv/networkconfig" "github.com/ssvlabs/ssv/operator/duties/dutystore" "github.com/ssvlabs/ssv/protocol/v2/ssv/queue" @@ -91,9 +94,6 @@ func (mv *messageValidator) Validate(_ context.Context, peerID peer.ID, pmsg *pu reportDone := mv.reportPubSubMetrics(pmsg) defer reportDone() - // TODO: Alan revert blind accept - return mv.validateSelf(pmsg) - decodedMessage, err := mv.handlePubsubMessage(pmsg, time.Now()) if err != nil { return mv.handleValidationError(peerID, decodedMessage, err) @@ -118,21 +118,28 @@ func (mv *messageValidator) handlePubsubMessage(pMsg *pubsub.Message, receivedAt } func (mv *messageValidator) handleSignedSSVMessage(signedSSVMessage *spectypes.SignedSSVMessage, topic string, receivedAt time.Time) (*queue.DecodedSSVMessage, error) { + decodedMessage := &queue.DecodedSSVMessage{ + SignedSSVMessage: signedSSVMessage, + } + if err := mv.validateSignedSSVMessage(signedSSVMessage); err != nil { - return nil, err + return decodedMessage, err } - if err := mv.validateSSVMessage(signedSSVMessage.SSVMessage, topic); err != nil { - return nil, err + decodedMessage.SSVMessage = signedSSVMessage.SSVMessage + + if err := mv.validateSSVMessage(signedSSVMessage.SSVMessage); err != nil { + return decodedMessage, err } - committee, validatorIndices, err := mv.getCommitteeAndValidatorIndices(signedSSVMessage.SSVMessage.GetID()) + // TODO: leverage the ValidatorStore to keep track of committees' indices and return them in Committee methods (which already return a Committee struct that we should add an Indices filter to): https://github.com/ssvlabs/ssv/pull/1393#discussion_r1667681686 + committeeInfo, err := mv.getCommitteeAndValidatorIndices(signedSSVMessage.SSVMessage.GetID()) if err != nil { - return nil, err + return decodedMessage, err } - if err := mv.belongsToCommittee(signedSSVMessage.GetOperatorIDs(), committee); err != nil { - return nil, err + if err := mv.committeeChecks(signedSSVMessage, committeeInfo, topic); err != nil { + return decodedMessage, err } validationMu := mv.obtainValidationLock(signedSSVMessage.SSVMessage.GetID()) @@ -140,98 +147,119 @@ func (mv *messageValidator) handleSignedSSVMessage(signedSSVMessage *spectypes.S validationMu.Lock() defer validationMu.Unlock() - decodedMessage := &queue.DecodedSSVMessage{ - SignedSSVMessage: signedSSVMessage, - SSVMessage: signedSSVMessage.SSVMessage, - } - switch signedSSVMessage.SSVMessage.MsgType { case spectypes.SSVConsensusMsgType: - consensusMessage, err := mv.validateConsensusMessage(signedSSVMessage, committee, validatorIndices, receivedAt) + consensusMessage, err := mv.validateConsensusMessage(signedSSVMessage, committeeInfo, receivedAt) + decodedMessage.Body = consensusMessage if err != nil { - return nil, err + return decodedMessage, err } - decodedMessage.Body = consensusMessage case spectypes.SSVPartialSignatureMsgType: - partialSignatureMessages, err := mv.validatePartialSignatureMessage(signedSSVMessage, committee, validatorIndices, receivedAt) + partialSignatureMessages, err := mv.validatePartialSignatureMessage(signedSSVMessage, committeeInfo, receivedAt) + decodedMessage.Body = partialSignatureMessages if err != nil { - return nil, err + return decodedMessage, err } - decodedMessage.Body = partialSignatureMessages default: - panic("message type assertion should have been done") + panic("unreachable: message type assertion should have been done") } return decodedMessage, nil } +func (mv *messageValidator) committeeChecks(signedSSVMessage *spectypes.SignedSSVMessage, committeeInfo CommitteeInfo, topic string) error { + if err := mv.belongsToCommittee(signedSSVMessage.GetOperatorIDs(), committeeInfo.operatorIDs); err != nil { + return err + } + + // Rule: Check if message was sent in the correct topic + messageTopics := commons.CommitteeTopicID(committeeInfo.committeeID) + topicBaseName := commons.GetTopicBaseName(topic) + if !slices.Contains(messageTopics, topicBaseName) { + e := ErrIncorrectTopic + e.got = fmt.Sprintf("topic %v / base name %v", topic, topicBaseName) + e.want = messageTopics + return e + } + + return nil +} + func (mv *messageValidator) obtainValidationLock(messageID spectypes.MessageID) *sync.Mutex { // Lock this SSV message ID to prevent concurrent access to the same state. mv.validationMutex.Lock() + // TODO: make sure that we check that message ID exists in advance mutex, ok := mv.validationLocks[messageID] if !ok { mutex = &sync.Mutex{} mv.validationLocks[messageID] = mutex + // TODO: Clean the map when mutex won't be needed anymore. Now it's a mutex leak... } mv.validationMutex.Unlock() return mutex } -func (mv *messageValidator) getCommitteeAndValidatorIndices(msgID spectypes.MessageID) ([]spectypes.OperatorID, []phase0.ValidatorIndex, error) { +type CommitteeInfo struct { + operatorIDs []spectypes.OperatorID + indices []phase0.ValidatorIndex + committeeID spectypes.CommitteeID +} + +func (mv *messageValidator) getCommitteeAndValidatorIndices(msgID spectypes.MessageID) (CommitteeInfo, error) { if mv.committeeRole(msgID.GetRoleType()) { // TODO: add metrics and logs for committee role committeeID := spectypes.CommitteeID(msgID.GetDutyExecutorID()[16:]) - committee := mv.validatorStore.Committee(committeeID) // TODO: consider passing whole senderID + + // Rule: Cluster does not exist + committee := mv.validatorStore.Committee(committeeID) // TODO: consider passing whole duty executor ID if committee == nil { e := ErrNonExistentCommitteeID e.got = hex.EncodeToString(committeeID[:]) - return nil, nil, e - } - - validatorIndices := make([]phase0.ValidatorIndex, 0) - for _, v := range committee.Validators { - if v.BeaconMetadata != nil { - validatorIndices = append(validatorIndices, v.BeaconMetadata.Index) - } + return CommitteeInfo{}, e } - if len(validatorIndices) == 0 { - return nil, nil, ErrNoValidators + if len(committee.Indices) == 0 { + return CommitteeInfo{}, ErrNoValidators } - return committee.Operators, validatorIndices, nil + return CommitteeInfo{ + operatorIDs: committee.Operators, + indices: committee.Indices, + committeeID: committeeID, + }, nil } - // #TODO fixme. can be not only publicKey, but also committeeID publicKey, err := ssvtypes.DeserializeBLSPublicKey(msgID.GetDutyExecutorID()) if err != nil { e := ErrDeserializePublicKey e.innerErr = err - return nil, nil, e + return CommitteeInfo{}, e } validator := mv.validatorStore.Validator(publicKey.Serialize()) if validator == nil { e := ErrUnknownValidator e.got = publicKey.SerializeToHexStr() - return nil, nil, e + return CommitteeInfo{}, e } + // Rule: If validator is liquidated if validator.Liquidated { - return nil, nil, ErrValidatorLiquidated + return CommitteeInfo{}, ErrValidatorLiquidated } if validator.BeaconMetadata == nil { - return nil, nil, ErrNoShareMetadata + return CommitteeInfo{}, ErrNoShareMetadata } + // Rule: If validator is not active if !validator.IsAttesting(mv.netCfg.Beacon.EstimatedCurrentEpoch()) { e := ErrValidatorNotAttesting e.got = validator.BeaconMetadata.Status.String() - return nil, nil, e + return CommitteeInfo{}, e } var operators []spectypes.OperatorID @@ -239,7 +267,11 @@ func (mv *messageValidator) getCommitteeAndValidatorIndices(msgID spectypes.Mess operators = append(operators, c.Signer) } - return operators, []phase0.ValidatorIndex{validator.BeaconMetadata.Index}, nil + return CommitteeInfo{ + operatorIDs: operators, + indices: []phase0.ValidatorIndex{validator.BeaconMetadata.Index}, + committeeID: validator.CommitteeID(), + }, nil } func (mv *messageValidator) consensusState(messageID spectypes.MessageID) *consensusState { @@ -247,13 +279,14 @@ func (mv *messageValidator) consensusState(messageID spectypes.MessageID) *conse defer mv.consensusStateIndexMu.Unlock() id := consensusID{ - SenderID: string(messageID.GetDutyExecutorID()), - Role: messageID.GetRoleType(), + DutyExecutorID: string(messageID.GetDutyExecutorID()), + Role: messageID.GetRoleType(), } if _, ok := mv.consensusStateIndex[id]; !ok { cs := &consensusState{ - signers: make(map[spectypes.OperatorID]*SignerState), + state: make(map[spectypes.OperatorID]*OperatorState), + storedSlotCount: phase0.Slot(mv.netCfg.Beacon.SlotsPerEpoch()) * 2, // store last two epochs to calculate duty count } mv.consensusStateIndex[id] = cs } diff --git a/message/validation/validation_test.go b/message/validation/validation_test.go index 393cc17e1c..5f50382610 100644 --- a/message/validation/validation_test.go +++ b/message/validation/validation_test.go @@ -19,7 +19,7 @@ import ( spectestingutils "github.com/ssvlabs/ssv-spec/types/testingutils" "github.com/stretchr/testify/require" eth2types "github.com/wealdtech/go-eth2-types/v2" - gomock "go.uber.org/mock/gomock" + "go.uber.org/mock/gomock" "go.uber.org/zap/zaptest" "golang.org/x/exp/maps" "golang.org/x/exp/slices" @@ -119,7 +119,7 @@ func Test_ValidateSSVMessage(t *testing.T) { encodedCommitteeID := append(bytes.Repeat([]byte{0}, 16), committeeID[:]...) committeeIdentifier := spectypes.NewMsgID(netCfg.Domain, encodedCommitteeID, committeeRole) - //nonCommitteeIdentifier := spectypes.NewMsgID(netCfg.Domain, ks.ValidatorPK.Serialize(), committeeRole) + nonCommitteeIdentifier := spectypes.NewMsgID(netCfg.Domain, ks.ValidatorPK.Serialize(), nonCommitteeRole) // Message validation happy flow, messages are not ignored or rejected and there are no errors t.Run("happy flow", func(t *testing.T) { @@ -129,7 +129,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage := generateSignedMessage(ks, committeeIdentifier, slot) receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.NoError(t, err) }) @@ -144,28 +144,31 @@ func Test_ValidateSSVMessage(t *testing.T) { msgID := committeeIdentifier state := validator.consensusState(msgID) for i := spectypes.OperatorID(1); i <= 4; i++ { - signerState := state.GetSignerState(i) - require.Nil(t, signerState) + signerState := state.GetOrCreate(i) + require.NotNil(t, signerState) } signedSSVMessage := generateSignedMessage(ks, msgID, slot) receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.NoError(t, err) _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) - require.ErrorContains(t, err, ErrTooManySameTypeMessagesPerRound.Error()) + require.ErrorContains(t, err, ErrDuplicatedMessage.Error()) + + stateBySlot := state.GetOrCreate(1) + require.NotNil(t, stateBySlot) - state1 := state.GetSignerState(1) - require.NotNil(t, state1) - require.EqualValues(t, height, state1.Slot) - require.EqualValues(t, 1, state1.Round) - require.EqualValues(t, MessageCounts{Proposal: 1}, state1.MessageCounts) + storedState := stateBySlot.Get(slot) + require.NotNil(t, storedState) + require.EqualValues(t, height, storedState.Slot) + require.EqualValues(t, 1, storedState.Round) + require.EqualValues(t, MessageCounts{Proposal: 1}, storedState.MessageCounts) for i := spectypes.OperatorID(2); i <= 4; i++ { - signerState := state.GetSignerState(i) - require.Nil(t, signerState) + require.NotNil(t, state.GetOrCreate(i)) } signedSSVMessage = generateSignedMessage(ks, msgID, slot, func(message *specqbft.Message) { @@ -177,13 +180,14 @@ func Test_ValidateSSVMessage(t *testing.T) { _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.NoError(t, err) - require.NotNil(t, state1) - require.EqualValues(t, height, state1.Slot) - require.EqualValues(t, 2, state1.Round) - require.EqualValues(t, MessageCounts{Prepare: 1}, state1.MessageCounts) + storedState = stateBySlot.Get(slot) + require.NotNil(t, storedState) + require.EqualValues(t, height, storedState.Slot) + require.EqualValues(t, 2, storedState.Round) + require.EqualValues(t, MessageCounts{Prepare: 1}, storedState.MessageCounts) _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) - require.ErrorContains(t, err, ErrTooManySameTypeMessagesPerRound.Error()) + require.ErrorContains(t, err, ErrDuplicatedMessage.Error()) signedSSVMessage = generateSignedMessage(ks, msgID, slot+1, func(message *specqbft.Message) { message.MsgType = specqbft.CommitMsgType @@ -191,21 +195,21 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage.FullData = nil _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt.Add(netCfg.Beacon.SlotDurationSec())) require.NoError(t, err) - require.NotNil(t, state1) - require.EqualValues(t, height+1, state1.Slot) - require.EqualValues(t, 1, state1.Round) - require.EqualValues(t, MessageCounts{Commit: 1}, state1.MessageCounts) + + storedState = stateBySlot.Get(phase0.Slot(height) + 1) + require.NotNil(t, storedState) + require.EqualValues(t, 1, storedState.Round) + require.EqualValues(t, MessageCounts{Commit: 1}, storedState.MessageCounts) _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt.Add(netCfg.Beacon.SlotDurationSec())) - require.ErrorContains(t, err, ErrTooManySameTypeMessagesPerRound.Error()) + require.ErrorContains(t, err, ErrDuplicatedMessage.Error()) signedSSVMessage = generateMultiSignedMessage(ks, msgID, slot+1) _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt.Add(netCfg.Beacon.SlotDurationSec())) require.NoError(t, err) - require.NotNil(t, state1) - require.EqualValues(t, height+1, state1.Slot) - require.EqualValues(t, 1, state1.Round) - require.EqualValues(t, MessageCounts{Commit: 1, Decided: 1}, state1.MessageCounts) + require.NotNil(t, stateBySlot) + require.EqualValues(t, 1, storedState.Round) + require.EqualValues(t, MessageCounts{Commit: 1}, storedState.MessageCounts) }) // Send a pubsub message with no data should cause an error @@ -228,8 +232,8 @@ func Test_ValidateSSVMessage(t *testing.T) { slot := netCfg.Beacon.FirstSlotAtEpoch(1) - topic := commons.GetTopicFullName(commons.CommitteeTopicID(committeeIdentifier[:])[0]) - msgSize := 10_000_000 + commons.MessageOffset + topic := commons.GetTopicFullName(commons.CommitteeTopicID(committeeID)[0]) + msgSize := maxSignedMsgSize*2 + commons.MessageOffset pmsg := &pubsub.Message{ Message: &pspb.Message{ @@ -253,7 +257,7 @@ func Test_ValidateSSVMessage(t *testing.T) { slot := netCfg.Beacon.FirstSlotAtEpoch(1) - topic := commons.GetTopicFullName(commons.CommitteeTopicID(committeeIdentifier[:])[0]) + topic := commons.GetTopicFullName(commons.CommitteeTopicID(committeeID)[0]) pmsg := &pubsub.Message{ Message: &pspb.Message{ Data: bytes.Repeat([]byte{1}, 1+commons.MessageOffset), @@ -278,7 +282,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage.SSVMessage.Data = bytes.Repeat([]byte{1}, 500) receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.ErrorContains(t, err, ErrUndecodableMessageData.Error()) @@ -294,7 +298,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage.SSVMessage.Data = []byte{} receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.ErrorIs(t, err, ErrEmptyData) @@ -311,16 +315,16 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage := generateSignedMessage(ks, committeeIdentifier, slot) - const tooBigMsgSize = maxPayloadSize * 2 + tooBigMsgSize := maxPayloadDataSize * 2 signedSSVMessage.SSVMessage.Data = bytes.Repeat([]byte{1}, tooBigMsgSize) receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) expectedErr := ErrSSVDataTooBig expectedErr.got = tooBigMsgSize - expectedErr.want = maxPayloadSize + expectedErr.want = maxPayloadDataSize require.ErrorIs(t, err, expectedErr) }) @@ -331,10 +335,10 @@ func Test_ValidateSSVMessage(t *testing.T) { slot := netCfg.Beacon.FirstSlotAtEpoch(1) signedSSVMessage := generateSignedMessage(ks, committeeIdentifier, slot) - signedSSVMessage.SSVMessage.Data = bytes.Repeat([]byte{1}, maxPayloadSize) + signedSSVMessage.SSVMessage.Data = bytes.Repeat([]byte{1}, maxPayloadDataSize) receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.ErrorContains(t, err, ErrUndecodableMessageData.Error()) @@ -349,7 +353,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage := generateSignedMessage(ks, committeeIdentifier, slot) signedSSVMessage.SSVMessage.MsgType = math.MaxUint64 - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, time.Now()) require.ErrorContains(t, err, ErrUnknownSSVMessageType.Error()) }) @@ -398,7 +402,7 @@ func Test_ValidateSSVMessage(t *testing.T) { unknownIdentifier := spectypes.NewMsgID(netCfg.Domain, unknownCommitteeID, committeeRole) signedSSVMessage := generateSignedMessage(ks, unknownIdentifier, slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, time.Now()) expectedErr := ErrNonExistentCommitteeID expectedErr.got = hex.EncodeToString(unknownCommitteeID[16:]) @@ -448,7 +452,7 @@ func Test_ValidateSSVMessage(t *testing.T) { badIdentifier := spectypes.NewMsgID(netCfg.Domain, shares.active.ValidatorPubKey[:], spectypes.RoleValidatorRegistration) signedSSVMessage := generateSignedMessage(ks, badIdentifier, slot) - topicID := commons.ValidatorTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(committeeID)[0] receivedAt := netCfg.Beacon.GetSlotStartTime(slot) _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) expectedErr := ErrUnexpectedConsensusMessage @@ -458,7 +462,6 @@ func Test_ValidateSSVMessage(t *testing.T) { badIdentifier = spectypes.NewMsgID(netCfg.Domain, shares.active.ValidatorPubKey[:], spectypes.RoleVoluntaryExit) signedSSVMessage = generateSignedMessage(ks, badIdentifier, slot) - topicID = commons.ValidatorTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) expectedErr.got = spectypes.RoleVoluntaryExit require.ErrorIs(t, err, expectedErr) @@ -538,7 +541,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage.FullData = spectestingutils.TestingQBFTFullData receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.ValidatorTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(committeeID)[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.NoError(t, err) }) @@ -573,7 +576,7 @@ func Test_ValidateSSVMessage(t *testing.T) { identifier := spectypes.NewMsgID(netCfg.Domain, ks.ValidatorPK.Serialize(), role) signedSSVMessage := generateSignedMessage(ks, identifier, slot) - topicID := commons.ValidatorTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(committeeID)[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, netCfg.Beacon.GetSlotStartTime(slot)) require.NoError(t, err) @@ -598,7 +601,7 @@ func Test_ValidateSSVMessage(t *testing.T) { identifier := spectypes.NewMsgID(netCfg.Domain, ks.ValidatorPK.Serialize(), spectypes.RoleProposer) signedSSVMessage := generateSignedMessage(ks, identifier, slot) - topicID := commons.ValidatorTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(committeeID)[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, netCfg.Beacon.GetSlotStartTime(slot)) require.ErrorContains(t, err, ErrNoDuty.Error()) @@ -614,12 +617,12 @@ func Test_ValidateSSVMessage(t *testing.T) { slot := netCfg.Beacon.FirstSlotAtEpoch(1) msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 1, specqbft.Height(slot)) - for i := 0; i < 13; i++ { + for i := 0; i < 1000; i++ { msg.Messages = append(msg.Messages, msg.Messages[0]) } _, err := msg.Encode() - require.ErrorContains(t, err, "max expected 13 and 14 found") + require.ErrorContains(t, err, "max expected 1000 and 1001 found") }) // Get error when receiving message from operator who is not affiliated with the validator @@ -643,7 +646,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage.FullData = spectestingutils.TestingQBFTFullData receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.ErrorContains(t, err, ErrSignerNotInCommittee.Error()) }) @@ -673,7 +676,7 @@ func Test_ValidateSSVMessage(t *testing.T) { partialSigSSVMessage.OperatorIDs = []spectypes.OperatorID{2} receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.ValidatorTopicID(partialSigSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(committeeID)[0] _, err = validator.handleSignedSSVMessage(partialSigSSVMessage, topicID, receivedAt) expectedErr := ErrInconsistentSigners expectedErr.got = spectypes.OperatorID(2) @@ -692,7 +695,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage := spectestingutils.SignedSSVMessageWithSigner(1, ks.OperatorKeys[1], spectestingutils.SSVMsgAggregator(nil, messages)) receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.ValidatorTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(committeeID)[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.ErrorIs(t, err, ErrNoPartialSignatureMessages) }) @@ -742,13 +745,13 @@ func Test_ValidateSSVMessage(t *testing.T) { encodedMessages, err := messages.Encode() require.NoError(t, err) - senderID := shares.active.ValidatorPubKey[:] + dutyExecutorID := shares.active.ValidatorPubKey[:] if validator.committeeRole(role) { - senderID = encodedCommitteeID + dutyExecutorID = encodedCommitteeID } ssvMessage := &spectypes.SSVMessage{ MsgType: spectypes.SSVPartialSignatureMsgType, - MsgID: spectypes.NewMsgID(spectestingutils.TestingSSVDomainType, senderID, role), + MsgID: spectypes.NewMsgID(spectestingutils.TestingSSVDomainType, dutyExecutorID, role), Data: encodedMessages, } @@ -756,11 +759,7 @@ func Test_ValidateSSVMessage(t *testing.T) { receivedAt := netCfg.Beacon.GetSlotStartTime(spectestingutils.TestingDutySlot) - getTopics := commons.ValidatorTopicID - if validator.committeeRole(role) { - getTopics = commons.CommitteeTopicID - } - topicID := getTopics(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(committeeID)[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.NoError(t, err) @@ -789,7 +788,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage := spectestingutils.SignedSSVMessageWithSigner(1, ks.OperatorKeys[1], ssvMessage) receivedAt := netCfg.Beacon.GetSlotStartTime(spectestingutils.TestingDutySlot) - topicID := commons.ValidatorTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(committeeID)[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.ErrorContains(t, err, ErrInvalidPartialSignatureType.Error()) }) @@ -835,10 +834,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage := spectestingutils.SignedSSVMessageWithSigner(1, ks.OperatorKeys[1], ssvMessage) receivedAt := netCfg.Beacon.GetSlotStartTime(spectestingutils.TestingDutySlot) - topicID := commons.ValidatorTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] - if validator.committeeRole(role) { - topicID = commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] - } + topicID := commons.CommitteeTopicID(committeeID)[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.ErrorContains(t, err, ErrPartialSignatureTypeRoleMismatch.Error()) }) @@ -857,7 +853,7 @@ func Test_ValidateSSVMessage(t *testing.T) { }) receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) expectedErr := ErrUnknownQBFTMessageType require.ErrorIs(t, err, expectedErr) @@ -872,52 +868,45 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage.Signatures = [][]byte{{0x1}} receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.ErrorContains(t, err, ErrWrongRSASignatureSize.Error()) }) - // Initialize signature tests - t.Run("zero signature", func(t *testing.T) { + // Get error when receiving a message with an empty list of signers + t.Run("no signers", func(t *testing.T) { validator := New(netCfg, validatorStore, dutyStore, signatureVerifier).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) + signedSSVMessage := generateSignedMessage(ks, committeeIdentifier, slot) + signedSSVMessage.OperatorIDs = nil - // Get error when receiving a consensus message with a zero signature - t.Run("consensus message", func(t *testing.T) { - signedSSVMessage := generateSignedMessage(ks, committeeIdentifier, slot) - signedSSVMessage.Signatures = [][]byte{{}} - - receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] - _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) - require.ErrorIs(t, err, ErrEmptySignature) - }) - - // Get error when receiving a consensus message with a zero signature - t.Run("partial signature message", func(t *testing.T) { - partialSigSSVMessage := spectestingutils.SignPartialSigSSVMessage(ks, spectestingutils.SSVMsgAggregator(nil, spectestingutils.PostConsensusAggregatorMsg(ks.Shares[1], 1))) - partialSigSSVMessage.Signatures = [][]byte{{}} - - receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.ValidatorTopicID(partialSigSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] - _, err = validator.handleSignedSSVMessage(partialSigSSVMessage, topicID, receivedAt) - require.ErrorIs(t, err, ErrEmptySignature) - }) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot) + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] + _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) + require.ErrorIs(t, err, ErrNoSigners) }) - // Get error when receiving a message with an empty list of signers - t.Run("no signers", func(t *testing.T) { + // Get error when receiving a message with more signers than committee size. + // It tests ErrMoreSignersThanCommitteeSize from knowledge base. + t.Run("more signers than committee size", func(t *testing.T) { validator := New(netCfg, validatorStore, dutyStore, signatureVerifier).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) signedSSVMessage := generateSignedMessage(ks, committeeIdentifier, slot) - signedSSVMessage.OperatorIDs = nil + signedSSVMessage.OperatorIDs = []spectypes.OperatorID{1, 2, 3, 4, 5} + signedSSVMessage.Signatures = [][]byte{ + signedSSVMessage.Signatures[0], + signedSSVMessage.Signatures[0], + signedSSVMessage.Signatures[0], + signedSSVMessage.Signatures[0], + signedSSVMessage.Signatures[0], + } receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) - require.ErrorIs(t, err, ErrNoSigners) + require.ErrorContains(t, err, ErrSignerNotInCommittee.Error()) }) // Get error when receiving a consensus message with zero signer @@ -929,7 +918,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage.OperatorIDs = []spectypes.OperatorID{0} receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.ErrorIs(t, err, ErrZeroSigner) }) @@ -943,7 +932,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage.OperatorIDs = []spectypes.OperatorID{1, 2, 2} receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.ErrorIs(t, err, ErrDuplicatedSigner) }) @@ -957,7 +946,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage.OperatorIDs = []spectypes.OperatorID{3, 2, 1} receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.ErrorIs(t, err, ErrSignersNotSorted) }) @@ -971,10 +960,10 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage.OperatorIDs = []spectypes.OperatorID{1, 2, 3, 4} receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) - require.ErrorContains(t, err, ErrSignatureOperatorIDLengthMismatch.Error()) + require.ErrorContains(t, err, ErrSignersAndSignaturesWithDifferentLength.Error()) }) // Get error when receiving message from less than quorum size amount of signers @@ -987,7 +976,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage.Signatures = signedSSVMessage.Signatures[:2] receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.ErrorContains(t, err, ErrDecidedNotEnoughSigners.Error()) @@ -1003,7 +992,7 @@ func Test_ValidateSSVMessage(t *testing.T) { }) receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) expectedErr := ErrNonDecidedWithMultipleSigners @@ -1032,21 +1021,17 @@ func Test_ValidateSSVMessage(t *testing.T) { for role, receivedAt := range tests { role, receivedAt := role, receivedAt t.Run(message.RunnerRoleToString(role), func(t *testing.T) { - senderID := shares.active.ValidatorPubKey[:] + dutyExecutorID := shares.active.ValidatorPubKey[:] if validator.committeeRole(role) { - senderID = encodedCommitteeID + dutyExecutorID = encodedCommitteeID } - msgID := spectypes.NewMsgID(netCfg.Domain, senderID, role) + msgID := spectypes.NewMsgID(netCfg.Domain, dutyExecutorID, role) signedSSVMessage := generateSignedMessage(ks, msgID, slot) - getTopics := commons.ValidatorTopicID - if validator.committeeRole(role) { - getTopics = commons.CommitteeTopicID - } - topicID := getTopics(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(committeeID)[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) - require.ErrorContains(t, err, ErrLateMessage.Error()) + require.ErrorContains(t, err, ErrLateSlotMessage.Error()) }) } }) @@ -1059,10 +1044,10 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage := generateSignedMessage(ks, committeeIdentifier, slot) receivedAt := netCfg.Beacon.GetSlotStartTime(slot - 1) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) - require.ErrorIs(t, err, ErrEarlyMessage) + require.ErrorContains(t, err, ErrEarlySlotMessage.Error()) }) // Send message from non-leader acting as a leader should receive an error @@ -1074,7 +1059,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage.OperatorIDs = []spectypes.OperatorID{2} receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.ErrorContains(t, err, ErrSignerNotLeader.Error()) }) @@ -1090,7 +1075,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage.OperatorIDs = []spectypes.OperatorID{2} receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.ErrorContains(t, err, ErrMalformedPrepareJustifications.Error()) @@ -1113,7 +1098,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage.FullData = nil receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.ErrorContains(t, err, ErrUnexpectedPrepareJustifications.Error()) @@ -1136,7 +1121,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage.FullData = nil receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.ErrorContains(t, err, ErrUnexpectedRoundChangeJustifications.Error()) @@ -1154,7 +1139,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage.FullData = nil receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.ErrorContains(t, err, ErrMalformedRoundChangeJustifications.Error()) @@ -1170,7 +1155,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage.FullData = []byte{1} receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) expectedErr := ErrInvalidHash @@ -1186,7 +1171,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage := generateSignedMessage(ks, committeeIdentifier, slot) receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.NoError(t, err) @@ -1208,19 +1193,20 @@ func Test_ValidateSSVMessage(t *testing.T) { slot := netCfg.Beacon.FirstSlotAtEpoch(1) - signedSSVMessage := generateSignedMessage(ks, committeeIdentifier, slot, func(message *specqbft.Message) { + identifier := spectypes.NewMsgID(netCfg.Domain, ks.ValidatorPK.Serialize(), spectypes.RoleProposer) + signedSSVMessage := generateSignedMessage(ks, identifier, slot, func(message *specqbft.Message) { message.MsgType = specqbft.PrepareMsgType }) signedSSVMessage.FullData = nil receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(committeeID)[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.NoError(t, err) _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) - expectedErr := ErrTooManySameTypeMessagesPerRound - expectedErr.got = "prepare, having pre-consensus: 0, proposal: 0, prepare: 1, commit: 0, decided: 0, round change: 0, post-consensus: 0" + expectedErr := ErrDuplicatedMessage + expectedErr.got = "prepare, having pre-consensus: 0, proposal: 0, prepare: 1, commit: 0, round change: 0, post-consensus: 0" require.ErrorIs(t, err, expectedErr) }) @@ -1236,13 +1222,13 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage.FullData = nil receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.NoError(t, err) _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) - expectedErr := ErrTooManySameTypeMessagesPerRound - expectedErr.got = "commit, having pre-consensus: 0, proposal: 0, prepare: 0, commit: 1, decided: 0, round change: 0, post-consensus: 0" + expectedErr := ErrDuplicatedMessage + expectedErr.got = "commit, having pre-consensus: 0, proposal: 0, prepare: 0, commit: 1, round change: 0, post-consensus: 0" require.ErrorIs(t, err, expectedErr) }) @@ -1258,18 +1244,18 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage.FullData = nil receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.NoError(t, err) _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) - expectedErr := ErrTooManySameTypeMessagesPerRound - expectedErr.got = "round change, having pre-consensus: 0, proposal: 0, prepare: 0, commit: 0, decided: 0, round change: 1, post-consensus: 0" + expectedErr := ErrDuplicatedMessage + expectedErr.got = "round change, having pre-consensus: 0, proposal: 0, prepare: 0, commit: 0, round change: 1, post-consensus: 0" require.ErrorIs(t, err, expectedErr) }) - // Decided with same number of signers should receive an error - t.Run("decided with same number of signers", func(t *testing.T) { + // Decided with same signers should receive an error + t.Run("decided with same signers", func(t *testing.T) { validator := New(netCfg, validatorStore, dutyStore, signatureVerifier).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) @@ -1280,13 +1266,59 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage.FullData = nil receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.NoError(t, err) _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) - require.ErrorIs(t, err, ErrDecidedWithSameNumberOfSigners) + require.ErrorIs(t, err, ErrDecidedWithSameSigners) + }) + + // Send message with a slot lower than in the previous message + t.Run("slot already advanced", func(t *testing.T) { + validator := New(netCfg, validatorStore, dutyStore, signatureVerifier).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + signedSSVMessage := generateSignedMessage(ks, nonCommitteeIdentifier, slot, func(message *specqbft.Message) { + message.Height = 8 + }) + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot) + topicID := commons.CommitteeTopicID(committeeID)[0] + _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) + require.NoError(t, err) + + signedSSVMessage = generateSignedMessage(ks, nonCommitteeIdentifier, slot, func(message *specqbft.Message) { + message.Height = 4 + }) + + _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) + require.ErrorContains(t, err, ErrSlotAlreadyAdvanced.Error()) + }) + + // Send message with a round lower than in the previous message + t.Run("round already advanced", func(t *testing.T) { + validator := New(netCfg, validatorStore, dutyStore, signatureVerifier).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + signedSSVMessage := generateSignedMessage(ks, committeeIdentifier, slot, func(message *specqbft.Message) { + message.Round = 5 + }) + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(5 * roundtimer.QuickTimeout) + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] + _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) + require.NoError(t, err) + + signedSSVMessage = generateSignedMessage(ks, committeeIdentifier, slot, func(message *specqbft.Message) { + message.Round = 1 + }) + + _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) + require.ErrorContains(t, err, ErrRoundAlreadyAdvanced.Error()) }) // Receive message from a round that is too high for that epoch should receive an error @@ -1310,23 +1342,19 @@ func Test_ValidateSSVMessage(t *testing.T) { for role, round := range tests { role, round := role, round t.Run(message.RunnerRoleToString(role), func(t *testing.T) { - senderID := shares.active.ValidatorPubKey[:] + dutyExecutorID := shares.active.ValidatorPubKey[:] if validator.committeeRole(role) { - senderID = encodedCommitteeID + dutyExecutorID = encodedCommitteeID } - msgID := spectypes.NewMsgID(netCfg.Domain, senderID, role) + msgID := spectypes.NewMsgID(netCfg.Domain, dutyExecutorID, role) signedSSVMessage := generateSignedMessage(ks, msgID, slot, func(message *specqbft.Message) { message.MsgType = specqbft.PrepareMsgType message.Round = round }) signedSSVMessage.FullData = nil - getTopics := commons.ValidatorTopicID - if validator.committeeRole(role) { - getTopics = commons.CommitteeTopicID - } - topicID := getTopics(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(committeeID)[0] sinceSlotStart := time.Duration(0) for validator.currentEstimatedRound(sinceSlotStart) != round { @@ -1335,8 +1363,8 @@ func Test_ValidateSSVMessage(t *testing.T) { receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(sinceSlotStart) _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) - if validator.lateMessage(slot, role, receivedAt) > 0 { - require.ErrorContains(t, err, ErrLateMessage.Error()) + if validator.messageLateness(slot, role, receivedAt) > 0 { + require.ErrorContains(t, err, ErrLateSlotMessage.Error()) } else { require.ErrorContains(t, err, ErrRoundTooHigh.Error()) } @@ -1354,7 +1382,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage.SSVMessage.MsgType = message.SSVEventMsgType receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.ErrorIs(t, err, ErrEventMessage) @@ -1370,7 +1398,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage.SSVMessage.MsgType = spectypes.DKGMsgType receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.ErrorIs(t, err, ErrDKGMessage) @@ -1385,7 +1413,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage := generateSignedMessage(ks, committeeIdentifier, slot) receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.ErrorContains(t, err, ErrSignatureVerification.Error()) @@ -1433,8 +1461,8 @@ func Test_ValidateSSVMessage(t *testing.T) { require.ErrorContains(t, err, ErrNilSSVMessage.Error()) }) - // Receive wrong round (0) - t.Run("wrong round", func(t *testing.T) { + // Receive zero round + t.Run("zero round", func(t *testing.T) { validator := New(netCfg, validatorStore, dutyStore, signatureVerifier).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) @@ -1444,9 +1472,9 @@ func Test_ValidateSSVMessage(t *testing.T) { }) receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) - require.ErrorContains(t, err, ErrInvalidRound.Error()) + require.ErrorContains(t, err, ErrZeroRound.Error()) }) // Receive a message with no signatures @@ -1459,7 +1487,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage.Signatures = [][]byte{} receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.ErrorContains(t, err, ErrNoSignatures.Error()) }) @@ -1477,7 +1505,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage.SSVMessage.MsgID = committeeIdentifier receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.ErrorContains(t, err, ErrMismatchedIdentifier.Error()) }) @@ -1493,7 +1521,7 @@ func Test_ValidateSSVMessage(t *testing.T) { }) receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.ErrorContains(t, err, ErrPrepareOrCommitWithFullData.Error()) @@ -1514,7 +1542,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage.FullData = []byte{1} receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.ValidatorTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(committeeID)[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.ErrorIs(t, err, ErrFullDataNotInConsensusMessage) }) @@ -1530,7 +1558,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage.Signatures = append(signedSSVMessage.Signatures, signedSSVMessage.Signatures[0]) receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.ValidatorTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(committeeID)[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.ErrorIs(t, err, ErrPartialSigOneSigner) }) @@ -1559,7 +1587,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage := spectestingutils.SignPartialSigSSVMessage(ks, ssvMessage) receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.ErrorContains(t, err, ErrTooManyPartialSignatureMessages.Error()) }) @@ -1588,7 +1616,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage := spectestingutils.SignPartialSigSSVMessage(ks, ssvMessage) receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.ErrorContains(t, err, ErrTripleValidatorIndexInPartialSignatures.Error()) }) @@ -1615,7 +1643,7 @@ func Test_ValidateSSVMessage(t *testing.T) { signedSSVMessage := spectestingutils.SignPartialSigSSVMessage(ks, ssvMessage) receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - topicID := commons.CommitteeTopicID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID())[0] + topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) require.ErrorContains(t, err, ErrValidatorIndexMismatch.Error()) }) diff --git a/network/network.go b/network/network.go index e9b94b9bcf..1db826c7df 100644 --- a/network/network.go +++ b/network/network.go @@ -37,6 +37,8 @@ type P2PNetwork interface { SubscribeAll(logger *zap.Logger) error // SubscribeRandoms subscribes to random subnets SubscribeRandoms(logger *zap.Logger, numSubnets int) error + // UpdateScoreParams will update the scoring parameters of GossipSub + UpdateScoreParams(logger *zap.Logger) } // GetValidatorStats returns stats of validators, including the following: diff --git a/network/p2p/p2p.go b/network/p2p/p2p.go index cceac7b4c4..e63f8ef338 100644 --- a/network/p2p/p2p.go +++ b/network/p2p/p2p.go @@ -313,6 +313,30 @@ func (n *p2pNetwork) UpdateSubnets(logger *zap.Logger) { } } +// UpdateScoreParams updates the scoring parameters once per epoch through the call of n.topicsCtrl.UpdateScoreParams +func (n *p2pNetwork) UpdateScoreParams(logger *zap.Logger) { + // TODO: this is a temporary solution to update the score parameters periodically. + // But, we should use an appropriate trigger for the UpdateScoreParams function that should be + // called once a validator is added or removed from the network + + logger = logger.Named(logging.NameP2PNetwork) + + // Create ticker + oneEpochDuration := n.cfg.Network.Beacon.SlotDurationSec() * time.Duration(n.cfg.Network.Beacon.SlotsPerEpoch()) + ticker := time.NewTicker(oneEpochDuration) + defer ticker.Stop() + + // Run immediately and then once every epoch + for ; true; <-ticker.C { + err := n.topicsCtrl.UpdateScoreParams(logger) + if err != nil { + logger.Debug("score parameters update failed", zap.Error(err)) + } else { + logger.Debug("updated score parameters successfully") + } + } +} + // getMaxPeers returns max peers of the given topic. func (n *p2pNetwork) getMaxPeers(topic string) int { if len(topic) == 0 { diff --git a/network/p2p/p2p_setup.go b/network/p2p/p2p_setup.go index 82105d8c21..14499ca623 100644 --- a/network/p2p/p2p_setup.go +++ b/network/p2p/p2p_setup.go @@ -28,6 +28,7 @@ import ( "github.com/ssvlabs/ssv/network/records" "github.com/ssvlabs/ssv/network/streams" "github.com/ssvlabs/ssv/network/topics" + "github.com/ssvlabs/ssv/utils/commons" ) const ( @@ -172,9 +173,7 @@ func (n *p2pNetwork) setupPeerServices(logger *zap.Logger) error { domain := "0x" + hex.EncodeToString(n.cfg.Network.Domain[:]) self := records.NewNodeInfo(domain) self.Metadata = &records.NodeMetadata{ - // TODO: (Alan) revert - // NodeVersion: commons.GetNodeVersion(), - NodeVersion: "ALANTEST", + NodeVersion: commons.GetNodeVersion(), Subnets: records.Subnets(n.subnets).String(), } getPrivKey := func() crypto.PrivKey { @@ -301,7 +300,7 @@ func (n *p2pNetwork) setupPubsub(logger *zap.Logger) error { // run GC every 3 minutes to clear old messages async.RunEvery(n.ctx, time.Minute*3, midHandler.GC) - _, tc, err := topics.NewPubSub(n.ctx, logger, cfg, n.metrics) + _, tc, err := topics.NewPubSub(n.ctx, logger, cfg, n.metrics, n.nodeStorage.ValidatorStore()) if err != nil { return errors.Wrap(err, "could not setup pubsub") } diff --git a/network/p2p/p2p_sync.go b/network/p2p/p2p_sync.go index 8c13fceec8..9a5f9c9028 100644 --- a/network/p2p/p2p_sync.go +++ b/network/p2p/p2p_sync.go @@ -188,8 +188,8 @@ func allPeersFilter(id peer.ID) bool { func waitSubsetOfPeers( logger *zap.Logger, - getSubsetOfPeers func(logger *zap.Logger, vpk spectypes.ValidatorPK, maxPeers int, filter func(peer.ID) bool) (peers []peer.ID, err error), - vpk spectypes.ValidatorPK, + getSubsetOfPeers func(logger *zap.Logger, senderID []byte, maxPeers int, filter func(peer.ID) bool) (peers []peer.ID, err error), + senderID []byte, minPeers, maxPeers int, timeout time.Duration, filter func(peer.ID) bool, @@ -207,7 +207,7 @@ func waitSubsetOfPeers( // Wait for minPeers with a deadline. deadline := time.Now().Add(timeout) for { - peers, err := getSubsetOfPeers(logger, vpk, maxPeers, filter) + peers, err := getSubsetOfPeers(logger, senderID, maxPeers, filter) if err != nil { return nil, err } diff --git a/network/p2p/p2p_test.go b/network/p2p/p2p_test.go index df0dfcbfcf..e075752729 100644 --- a/network/p2p/p2p_test.go +++ b/network/p2p/p2p_test.go @@ -1,15 +1,9 @@ package p2pv1 import ( + "bytes" "context" - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "crypto/x509" "encoding/hex" - "encoding/pem" - "fmt" "sync" "sync/atomic" "testing" @@ -32,65 +26,6 @@ import ( p2pprotocol "github.com/ssvlabs/ssv/protocol/v2/p2p" ) -func TestRSAUsage(t *testing.T) { - privateKey, err := rsa.GenerateKey(rand.Reader, 2048) - require.NoError(t, err) - - testMessage := []byte("message") - - hash := sha256.Sum256(testMessage) - - signature, err := rsa.SignPKCS1v15(nil, privateKey, crypto.SHA256, hash[:]) - require.NoError(t, err) - - publicKey := &privateKey.PublicKey - - pubKeyBytes, err := x509.MarshalPKIXPublicKey(publicKey) - if err != nil { - fmt.Println("Error marshalling public key:", err) - return - } - - pubPEM := pem.EncodeToMemory(&pem.Block{ - Type: "RSA PUBLIC KEY", - Bytes: pubKeyBytes, - }) - - const operatorID = spectypes.OperatorID(0x12345678) - sig := [256]byte{} - copy(sig[:], signature) - signedSSVMsg := &spectypes.SignedSSVMessage{ - Signature: sig, - OperatorID: operatorID, - Data: testMessage, - } - encodedSignedSSVMessage, err := signedSSVMsg.Encode() - require.NoError(t, err) - - decodedMsg := &spectypes.SignedSSVMessage{} - err = decodedMsg.Decode(encodedSignedSSVMessage) - require.NoError(t, err) - - require.NoError(t, err) - require.Equal(t, operatorID, decodedMsg.OperatorID) - require.Equal(t, sig, decodedMsg.Signature) - - messageHash := sha256.Sum256(decodedMsg.Data) - - block, rest := pem.Decode(pubPEM) - require.NotNil(t, block) - require.Empty(t, rest, "extra data after PEM decoding") - - pub, err := x509.ParsePKIXPublicKey(block.Bytes) - require.NoError(t, err) - - rsaPubKey, ok := pub.(*rsa.PublicKey) - require.True(t, ok) - - require.NoError(t, rsa.VerifyPKCS1v15(rsaPubKey, crypto.SHA256, messageHash[:], decodedMsg.Signature[:])) - require.Equal(t, testMessage, decodedMsg.Data) -} - func TestGetMaxPeers(t *testing.T) { n := &p2pNetwork{ cfg: &Config{MaxPeers: 40, TopicMaxPeers: 8}, @@ -99,6 +34,7 @@ func TestGetMaxPeers(t *testing.T) { require.Equal(t, 40, n.getMaxPeers("")) require.Equal(t, 8, n.getMaxPeers("100")) } + func TestP2pNetwork_SubscribeBroadcast(t *testing.T) { n := 4 ctx, cancel := context.WithCancel(context.Background()) @@ -128,8 +64,8 @@ func TestP2pNetwork_SubscribeBroadcast(t *testing.T) { go func() { defer wg.Done() - msgID1, msg1 := dummyMsgAttester(t, pks[0], 1) - msgID3, msg3 := dummyMsgAttester(t, pks[0], 3) + msgID1, msg1 := dummyMsgCommittee(t, pks[0], 1) + msgID3, msg3 := dummyMsgCommittee(t, pks[0], 3) require.NoError(t, node1.Broadcast(msgID1, msg1)) <-time.After(time.Millisecond * 10) require.NoError(t, node2.Broadcast(msgID3, msg3)) @@ -141,9 +77,9 @@ func TestP2pNetwork_SubscribeBroadcast(t *testing.T) { go func() { defer wg.Done() - msgID1, msg1 := dummyMsgAttester(t, pks[0], 1) - msgID2, msg2 := dummyMsgAttester(t, pks[1], 2) - msgID3, msg3 := dummyMsgAttester(t, pks[0], 3) + msgID1, msg1 := dummyMsgCommittee(t, pks[0], 1) + msgID2, msg2 := dummyMsgCommittee(t, pks[1], 2) + msgID3, msg3 := dummyMsgCommittee(t, pks[0], 3) require.NoError(t, err) time.Sleep(time.Millisecond * 10) require.NoError(t, node1.Broadcast(msgID2, msg2)) @@ -199,7 +135,8 @@ func TestP2pNetwork_Stream(t *testing.T) { pk, err := hex.DecodeString(pkHex) require.NoError(t, err) - mid := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, pk, spectypes.BNRoleAttester) + + mid := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, pk, spectypes.RoleCommittee) rounds := []specqbft.Round{ 1, 1, 1, 1, 2, 2, @@ -261,7 +198,7 @@ func TestWaitSubsetOfPeers(t *testing.T) { // The mock function increments the number of peers by 1 for each call, up to maxPeers peersCount := 0 start := time.Now() - mockGetSubsetOfPeers := func(logger *zap.Logger, vpk spectypes.ValidatorPK, maxPeers int, filter func(peer.ID) bool) (peers []peer.ID, err error) { + mockGetSubsetOfPeers := func(logger *zap.Logger, senderID []byte, maxPeers int, filter func(peer.ID) bool) (peers []peer.ID, err error) { if tt.minPeers == 0 { return []peer.ID{}, nil } @@ -274,7 +211,7 @@ func TestWaitSubsetOfPeers(t *testing.T) { return peers, nil } - peers, err := waitSubsetOfPeers(logger, mockGetSubsetOfPeers, vpk, tt.minPeers, tt.maxPeers, tt.timeout, nil) + peers, err := waitSubsetOfPeers(logger, mockGetSubsetOfPeers, vpk[:], tt.minPeers, tt.maxPeers, tt.timeout, nil) if err != nil && err.Error() != tt.expectedErr { t.Errorf("waitSubsetOfPeers() error = %v, wantErr %v", err, tt.expectedErr) return @@ -296,7 +233,7 @@ func (n *p2pNetwork) LastDecided(logger *zap.Logger, mid spectypes.MessageID) ([ return nil, p2pprotocol.ErrNetworkIsNotReady } pid, maxPeers := commons.ProtocolID(p2pprotocol.LastDecidedProtocol) - peers, err := waitSubsetOfPeers(logger, n.getSubsetOfPeers, mid.GetPubKey(), minPeers, maxPeers, waitTime, allPeersFilter) + peers, err := waitSubsetOfPeers(logger, n.getSubsetOfPeers, mid.GetDutyExecutorID(), minPeers, maxPeers, waitTime, allPeersFilter) if err != nil { return nil, errors.Wrap(err, "could not get subset of peers") } @@ -313,18 +250,14 @@ func registerHandler(logger *zap.Logger, node network.P2PNetwork, mid spectypes. Protocol: p2pprotocol.LastDecidedProtocol, Handler: func(message *spectypes.SSVMessage) (*spectypes.SSVMessage, error) { atomic.AddInt64(counter, 1) - sm := specqbft.SignedMessage{ - Signature: make([]byte, 96), - Signers: []spectypes.OperatorID{1, 2, 3}, - Message: specqbft.Message{ - MsgType: specqbft.CommitMsgType, - Height: height, - Round: round, - Identifier: mid[:], - Root: [32]byte{1, 2, 3}, - }, + qbftMessage := specqbft.Message{ + MsgType: specqbft.CommitMsgType, + Height: height, + Round: round, + Identifier: mid[:], + Root: [32]byte{1, 2, 3}, } - data, err := sm.Encode() + data, err := qbftMessage.Encode() if err != nil { errors <- err return nil, err @@ -369,12 +302,12 @@ func createNetworkAndSubscribe(t *testing.T, ctx context.Context, options LocalN } for _, node := range ln.Nodes { wg.Add(1) - go func(node network.P2PNetwork, vpk []byte) { + go func(node network.P2PNetwork, vpk spectypes.ValidatorPK) { defer wg.Done() if err := node.Subscribe(vpk); err != nil { logger.Warn("could not subscribe to topic", zap.Error(err)) } - }(node, vpk) + }(node, spectypes.ValidatorPK(vpk)) } } wg.Wait() @@ -388,7 +321,7 @@ func createNetworkAndSubscribe(t *testing.T, ctx context.Context, options LocalN for _, node := range ln.Nodes { peers := make([]peer.ID, 0) for len(peers) < 2 { - peers, err = node.Peers(vpk) + peers, err = node.Peers(spectypes.ValidatorPK(vpk)) if err != nil { return nil, nil, err } @@ -409,38 +342,37 @@ func (r *dummyRouter) Route(_ context.Context, _ *queue.DecodedSSVMessage) { atomic.AddUint64(&r.count, 1) } -func dummyMsg(t *testing.T, pkHex string, height int, role spectypes.BeaconRole) (spectypes.MessageID, *spectypes.SignedSSVMessage) { +func dummyMsg(t *testing.T, pkHex string, height int, role spectypes.RunnerRole) (spectypes.MessageID, *spectypes.SignedSSVMessage) { pk, err := hex.DecodeString(pkHex) require.NoError(t, err) + id := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, pk, role) - signedMsg := &specqbft.SignedMessage{ - Message: specqbft.Message{ - MsgType: specqbft.CommitMsgType, - Round: 2, - Identifier: id[:], - Height: specqbft.Height(height), - Root: [32]byte{0x1, 0x2, 0x3}, - }, - Signature: []byte("sVV0fsvqQlqliKv/ussGIatxpe8LDWhc9uoaM5WpjbiYvvxUr1eCpz0ja7UT1PGNDdmoGi6xbMC1g/ozhAt4uCdpy0Xdfqbv"), - Signers: []spectypes.OperatorID{1, 3, 4}, + qbftMsg := &specqbft.Message{ + MsgType: specqbft.CommitMsgType, + Round: 2, + Identifier: id[:], + Height: specqbft.Height(height), + Root: [32]byte{0x1, 0x2, 0x3}, } - data, err := signedMsg.Encode() + data, err := qbftMsg.Encode() require.NoError(t, err) + ssvMsg := &spectypes.SSVMessage{ MsgType: spectypes.SSVConsensusMsgType, MsgID: id, Data: data, } + signedSSVMsg, err := spectypes.SSVMessageToSignedSSVMessage(ssvMsg, 1, dummySignSSVMessage) require.NoError(t, err) return id, signedSSVMsg } -func dummyMsgAttester(t *testing.T, pkHex string, height int) (spectypes.MessageID, *spectypes.SignedSSVMessage) { - return dummyMsg(t, pkHex, height, spectypes.BNRoleAttester) +func dummyMsgCommittee(t *testing.T, pkHex string, height int) (spectypes.MessageID, *spectypes.SignedSSVMessage) { + return dummyMsg(t, pkHex, height, spectypes.RoleCommittee) } -func dummySignSSVMessage(data []byte) ([256]byte, error) { - return [256]byte{}, nil +func dummySignSSVMessage(msg *spectypes.SSVMessage) ([]byte, error) { + return bytes.Repeat([]byte{}, 256), nil } diff --git a/network/p2p/p2p_validation_test.go b/network/p2p/p2p_validation_test.go index 8dd2a1f757..6681dd95dd 100644 --- a/network/p2p/p2p_validation_test.go +++ b/network/p2p/p2p_validation_test.go @@ -52,9 +52,9 @@ func TestP2pNetwork_MessageValidation(t *testing.T) { // Create a MessageValidator to accept/reject/ignore messages according to their role type. const ( - acceptedRole = spectypes.BNRoleProposer - ignoredRole = spectypes.BNRoleAttester - rejectedRole = spectypes.BNRoleSyncCommittee + acceptedRole = spectypes.RoleProposer + ignoredRole = spectypes.RoleAggregator + rejectedRole = spectypes.RoleSyncCommitteeContribution ) messageValidators := make([]*MockMessageValidator, nodeCount) var mtx sync.Mutex @@ -69,16 +69,14 @@ func TestP2pNetwork_MessageValidation(t *testing.T) { peer := vNet.NodeByPeerID(p) signedSSVMsg := &spectypes.SignedSSVMessage{} require.NoError(t, signedSSVMsg.Decode(pmsg.GetData())) - msg, err := signedSSVMsg.GetSSVMessageFromData() - require.NoError(t, err) - decodedMsg, err := queue.DecodeSSVMessage(msg) + decodedMsg, err := queue.DecodeSignedSSVMessage(signedSSVMsg) require.NoError(t, err) pmsg.ValidatorData = decodedMsg mtx.Lock() // Validation according to role. var validation pubsub.ValidationResult - switch msg.MsgID.GetRoleType() { + switch signedSSVMsg.SSVMessage.MsgID.GetRoleType() { case acceptedRole: messageValidators[i].Accepted[peer.Index]++ messageValidators[i].TotalAccepted++ @@ -117,9 +115,9 @@ func TestP2pNetwork_MessageValidation(t *testing.T) { // Prepare a pool of broadcasters. mu := sync.Mutex{} height := atomic.Int64{} - roleBroadcasts := map[spectypes.BeaconRole]int{} + roleBroadcasts := map[spectypes.RunnerRole]int{} broadcasters := pool.New().WithErrors().WithContext(ctx) - broadcaster := func(node *VirtualNode, roles ...spectypes.BeaconRole) { + broadcaster := func(node *VirtualNode, roles ...spectypes.RunnerRole) { broadcasters.Go(func(ctx context.Context) error { for i := 0; i < 50; i++ { role := roles[i%len(roles)] @@ -267,17 +265,13 @@ type MockMessageValidator struct { } func (v *MockMessageValidator) ValidatorForTopic(topic string) func(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult { - return v.ValidatePubsubMessage + return v.Validate } -func (v *MockMessageValidator) ValidatePubsubMessage(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult { +func (v *MockMessageValidator) Validate(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult { return v.ValidateFunc(ctx, p, pmsg) } -func (v *MockMessageValidator) ValidateSSVMessage(ssvMessage *queue.DecodedSSVMessage) (*queue.DecodedSSVMessage, validation.Descriptor, error) { - panic("not implemented") // TODO: Implement -} - type NodeIndex int type VirtualNode struct { diff --git a/network/p2p/test_utils.go b/network/p2p/test_utils.go index 876cc3c4d7..148b55452b 100644 --- a/network/p2p/test_utils.go +++ b/network/p2p/test_utils.go @@ -12,6 +12,7 @@ import ( "go.uber.org/zap" "golang.org/x/sync/errgroup" + "github.com/ssvlabs/ssv/message/signatureverifier" "github.com/ssvlabs/ssv/message/validation" "github.com/ssvlabs/ssv/monitoring/metricsreporter" "github.com/ssvlabs/ssv/network" @@ -22,12 +23,14 @@ import ( "github.com/ssvlabs/ssv/network/testing" "github.com/ssvlabs/ssv/networkconfig" operatordatastore "github.com/ssvlabs/ssv/operator/datastore" + "github.com/ssvlabs/ssv/operator/duties/dutystore" + "github.com/ssvlabs/ssv/operator/storage" registrystorage "github.com/ssvlabs/ssv/registry/storage" + "github.com/ssvlabs/ssv/storage/basedb" + "github.com/ssvlabs/ssv/storage/kv" "github.com/ssvlabs/ssv/utils/format" ) -// TODO: (Alan) might have to rename this file back to test_utils.go if non-test files require it. - // LocalNet holds the nodes in the local network type LocalNet struct { NodeKeys []testing.NodeKeys @@ -131,6 +134,19 @@ func (ln *LocalNet) NewTestP2pNetwork(ctx context.Context, nodeIndex int, keys t panic(err) } + db, err := kv.NewInMemory(logger, basedb.Options{}) + if err != nil { + return nil, err + } + + nodeStorage, err := storage.NewNodeStorage(logger, db) + if err != nil { + return nil, err + } + + dutyStore := dutystore.New() + signatureVerifier := signatureverifier.NewSignatureVerifier(nodeStorage) + cfg := NewNetConfig(keys, format.OperatorID(operatorPubkey), ln.Bootnode, testing.RandomTCPPort(12001, 12999), ln.udpRand.Next(13001, 13999), options.Nodes) cfg.Ctx = ctx cfg.Subnets = "00000000000000000000020000000000" //PAY ATTENTION for future test scenarios which use more than one eth-validator we need to make this field dynamically changing @@ -139,7 +155,12 @@ func (ln *LocalNet) NewTestP2pNetwork(ctx context.Context, nodeIndex int, keys t RegisteredOperatorPublicKeyPEMs: []string{}, } cfg.Metrics = nil - cfg.MessageValidator = nil //validation.NewMessageValidator(networkconfig.TestNetwork) + cfg.MessageValidator = validation.New( + networkconfig.TestNetwork, + nodeStorage.ValidatorStore(), + dutyStore, + signatureVerifier, + ) cfg.Network = networkconfig.TestNetwork if options.TotalValidators > 0 { cfg.GetValidatorStats = func() (uint64, uint64, uint64, error) { @@ -159,7 +180,13 @@ func (ln *LocalNet) NewTestP2pNetwork(ctx context.Context, nodeIndex int, keys t if options.MessageValidatorProvider != nil { cfg.MessageValidator = options.MessageValidatorProvider(nodeIndex) } else { - cfg.MessageValidator = nil //validation.NewMessageValidator(networkconfig.TestNetwork, validation.WithSelfAccept(selfPeerID, true)) + cfg.MessageValidator = validation.New( + networkconfig.TestNetwork, + nodeStorage.ValidatorStore(), + dutyStore, + signatureVerifier, + validation.WithSelfAccept(selfPeerID, true), + ) } if options.PeerScoreInspector != nil && options.PeerScoreInspectorInterval > 0 { diff --git a/network/peers/connections/filters.go b/network/peers/connections/filters.go index 56c561736b..162ca6e823 100644 --- a/network/peers/connections/filters.go +++ b/network/peers/connections/filters.go @@ -1,16 +1,12 @@ package connections import ( - "time" - "github.com/libp2p/go-libp2p/core/peer" "github.com/pkg/errors" "github.com/ssvlabs/ssv/network/records" ) -var AllowedDifference = 30 * time.Second - // NetworkIDFilter determines whether we will connect to the given node by the network ID func NetworkIDFilter(networkID string) HandshakeFilter { return func(sender peer.ID, ni *records.NodeInfo) error { diff --git a/network/peers/connections/handshaker.go b/network/peers/connections/handshaker.go index 66e277c645..1dd68a9d08 100644 --- a/network/peers/connections/handshaker.go +++ b/network/peers/connections/handshaker.go @@ -2,7 +2,6 @@ package connections import ( "context" - "strings" "time" libp2pnetwork "github.com/libp2p/go-libp2p/core/network" @@ -153,11 +152,6 @@ func (h *handshaker) verifyTheirNodeInfo(logger *zap.Logger, sender peer.ID, ni zap.String("networkID", ni.GetNodeInfo().NetworkID), ) - // TODO: (Alan) revert - if !strings.Contains(ni.Metadata.NodeVersion, "ALANTEST") { - return errors.New("non Alan node version is not supported") - } - return nil } diff --git a/network/topics/controller.go b/network/topics/controller.go index 2a2489570d..88c629ec19 100644 --- a/network/topics/controller.go +++ b/network/topics/controller.go @@ -2,6 +2,7 @@ package topics import ( "context" + "fmt" "io" "strconv" "time" @@ -32,6 +33,8 @@ type Controller interface { Topics() []string // Broadcast publishes the message on the given topic Broadcast(topicName string, data []byte, timeout time.Duration) error + // UpdateScoreParams refreshes the score params for every subscribed topic + UpdateScoreParams(logger *zap.Logger) error io.Closer } @@ -104,6 +107,34 @@ func (ctrl *topicsCtrl) onNewTopic(logger *zap.Logger) onTopicJoined { } } +func (ctrl *topicsCtrl) UpdateScoreParams(logger *zap.Logger) error { + if ctrl.scoreParamsFactory == nil { + return fmt.Errorf("scoreParamsFactory is not set") + } + errs := "" + topics := ctrl.ps.GetTopics() + for _, topicName := range topics { + topic := ctrl.container.Get(topicName) + if topic == nil { + errs = errs + fmt.Sprintf("topic %s is not ready; ", topicName) + continue + } + p := ctrl.scoreParamsFactory(topicName) + if p == nil { + errs = errs + fmt.Sprintf("score params for topic %s is nil; ", topicName) + continue + } + if err := topic.SetScoreParams(p); err != nil { + errs = errs + fmt.Sprintf("could not set score params for topic %s: %w; ", topicName, err) + continue + } + } + if len(errs) > 0 { + return fmt.Errorf(errs) + } + return nil +} + // Close implements io.Closer func (ctrl *topicsCtrl) Close() error { topics := ctrl.ps.GetTopics() diff --git a/network/topics/params/message_rate.go b/network/topics/params/message_rate.go new file mode 100644 index 0000000000..b5e01bd480 --- /dev/null +++ b/network/topics/params/message_rate.go @@ -0,0 +1,135 @@ +package params + +import ( + "math" + + "github.com/ssvlabs/ssv/registry/storage" +) + +// Ethereum parameters +const ( + EthereumValidators = 1000000.0 // TODO: get from network? + SyncCommitteeSize = 512.0 // TODO: get from network? + EstimatedAttestationCommitteeSize = EthereumValidators / 2048.0 + AggregatorProbability = 16.0 / EstimatedAttestationCommitteeSize + ProposalProbability = 1.0 / EthereumValidators + SyncCommitteeProbability = SyncCommitteeSize / EthereumValidators + SyncCommitteeAggProb = SyncCommitteeProbability * 16.0 / (SyncCommitteeSize / 4.0) + MaxValidatorsPerCommittee = 560.0 + SlotsPerEpoch = 32.0 // TODO: get from network? + MaxAttestationDutiesPerEpochForCommittee = SlotsPerEpoch + SingleSCDutiesLimit = 0 +) + +// Expected number of messages per duty step + +func consensusMessages(n int) int { + return 1 + n + n + 2 // 1 Proposal + n Prepares + n Commits + 2 Decideds (average) +} + +func partialSignatureMessages(n int) int { + return n +} + +func dutyWithPreConsensus(n int) int { + // Pre-Consensus + Consensus + Post-Consensus + return partialSignatureMessages(n) + consensusMessages(n) + partialSignatureMessages(n) +} + +func dutyWithoutPreConsensus(n int) int { + // Consensus + Post-Consensus + return consensusMessages(n) + partialSignatureMessages(n) +} + +// Expected number of committee duties per epoch due to attestations +func expectedNumberOfCommitteeDutiesPerEpochDueToAttestation(numValidators int) float64 { + k := float64(numValidators) + n := SlotsPerEpoch + + // Probability that all validators are not assigned to slot i + probabilityAllNotOnSlotI := math.Pow((n-1)/n, k) + // Probability that at least one validator is assigned to slot i + probabilityAtLeastOneOnSlotI := 1 - probabilityAllNotOnSlotI + // Expected value for duty existence ({0,1}) on slot i + expectedDutyExistenceOnSlotI := 0*probabilityAllNotOnSlotI + 1*probabilityAtLeastOneOnSlotI + // Expected number of duties per epoch + expectedNumberOfDutiesPerEpoch := n * expectedDutyExistenceOnSlotI + + return expectedNumberOfDutiesPerEpoch +} + +// Expected committee duties per epoch that are due to only sync committee beacon duties +func expectedSingleSCCommitteeDutiesPerEpoch(numValidators int) float64 { + // Probability that a validator is not in sync committee + chanceOfNotBeingInSyncCommittee := 1.0 - SyncCommitteeProbability + // Probability that all validators are not in sync committee + chanceThatAllValidatorsAreNotInSyncCommittee := math.Pow(chanceOfNotBeingInSyncCommittee, float64(numValidators)) + // Probability that at least one validator is in sync committee + chanceOfAtLeastOneValidatorBeingInSyncCommittee := 1.0 - chanceThatAllValidatorsAreNotInSyncCommittee + + // Expected number of slots with no attestation duty + expectedSlotsWithNoDuty := 32.0 - expectedNumberOfCommitteeDutiesPerEpochDueToAttestationCached(numValidators) + + // Expected number of committee duties per epoch created due to only sync committee duties + return chanceOfAtLeastOneValidatorBeingInSyncCommittee * expectedSlotsWithNoDuty +} + +// Cache costly calculations + +func generateCachedValues(generator func(int) float64, threshold int) []float64 { + results := make([]float64, 0, threshold) + + for i := 0; i < threshold; i++ { + results = append(results, generator(i)) + } + + return results +} + +var generatedExpectedNumberOfCommitteeDutiesPerEpochDueToAttestation = generateCachedValues(expectedNumberOfCommitteeDutiesPerEpochDueToAttestation, MaxValidatorsPerCommittee) + +func expectedNumberOfCommitteeDutiesPerEpochDueToAttestationCached(numValidators int) float64 { + // If the committee has more validators than our computed cache, we return the limit value + if numValidators >= MaxValidatorsPerCommittee { + return MaxAttestationDutiesPerEpochForCommittee + } + + return generatedExpectedNumberOfCommitteeDutiesPerEpochDueToAttestation[numValidators] +} + +var generatedExpectedSingleSCCommitteeDutiesPerEpoch = generateCachedValues(expectedSingleSCCommitteeDutiesPerEpoch, MaxValidatorsPerCommittee) + +func expectedSingleSCCommitteeDutiesPerEpochCached(numValidators int) float64 { + // If the committee has more validators than our computed cache, we return the limit value + if numValidators >= MaxValidatorsPerCommittee { + return SingleSCDutiesLimit + } + + return generatedExpectedSingleSCCommitteeDutiesPerEpoch[numValidators] +} + +// Calculates the message rate for a topic given its committees' configurations (number of operators and number of validators) +func calculateMessageRateForTopic(committees []*storage.Committee) float64 { + if committees == nil || len(committees) == 0 { + return 0 + } + + totalMsgRate := 0.0 + + for _, committee := range committees { + committeeSize := len(committee.Operators) + numValidators := len(committee.Validators) + + totalMsgRate += expectedNumberOfCommitteeDutiesPerEpochDueToAttestationCached(numValidators) * float64(dutyWithoutPreConsensus(committeeSize)) + totalMsgRate += expectedSingleSCCommitteeDutiesPerEpochCached(numValidators) * float64(dutyWithoutPreConsensus(committeeSize)) + totalMsgRate += float64(numValidators) * AggregatorProbability * float64(dutyWithPreConsensus(committeeSize)) + totalMsgRate += float64(numValidators) * SlotsPerEpoch * ProposalProbability * float64(dutyWithPreConsensus(committeeSize)) + totalMsgRate += float64(numValidators) * SlotsPerEpoch * SyncCommitteeAggProb * float64(dutyWithPreConsensus(committeeSize)) + } + + // Convert rate to seconds + totalEpochSeconds := float64(SlotsPerEpoch * 12) + totalMsgRate = totalMsgRate / totalEpochSeconds + + return totalMsgRate +} diff --git a/network/topics/params/message_rate_test.go b/network/topics/params/message_rate_test.go new file mode 100644 index 0000000000..eef3a0b0e8 --- /dev/null +++ b/network/topics/params/message_rate_test.go @@ -0,0 +1,85 @@ +package params + +import ( + "testing" + + "github.com/attestantio/go-eth2-client/spec/phase0" + spectypes "github.com/ssvlabs/ssv-spec/types" + "github.com/ssvlabs/ssv/protocol/v2/types" + "github.com/ssvlabs/ssv/registry/storage" + "github.com/stretchr/testify/require" +) + +func createTestingValidators(n int) []*types.SSVShare { + ret := make([]*types.SSVShare, 0) + for i := 1; i <= n; i++ { + ret = append(ret, &types.SSVShare{ + Share: spectypes.Share{ + ValidatorIndex: phase0.ValidatorIndex(i), + }, + }) + } + return ret +} + +func createTestingSingleCommittees(n int) []*storage.Committee { + ret := make([]*storage.Committee, 0) + for i := 0; i <= n; i++ { + opRef := uint64(i*4 + 1) + ret = append(ret, &storage.Committee{ + Operators: []uint64{opRef, opRef + 1, opRef + 2, opRef + 3}, + Validators: createTestingValidators(1), + }) + } + return ret +} + +func TestCalculateMessageRateForTopic(t *testing.T) { + tenThousandCommittees := make([]int, 10000) + tenThousandValidators := make([]int, 10000) + for i := range tenThousandCommittees { + tenThousandCommittees[i] = 4 + tenThousandValidators[i] = 1 + } + + type args struct { + committees []*storage.Committee + } + tests := []struct { + name string + args args + want float64 + }{ + { + name: "Case 1", + args: args{ + committees: []*storage.Committee{ + { + Operators: []uint64{1, 2, 3, 4}, + Validators: createTestingValidators(500), + }, + { + Operators: []uint64{5, 6, 7, 8}, + Validators: createTestingValidators(500), + }, + }, + }, + want: 4.2242497530608745, + }, + { + name: "Case 2", + args: args{ + committees: createTestingSingleCommittees(10000), + }, + want: 414.1089067500509, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + msgRate := calculateMessageRateForTopic(tt.args.committees) + require.InDelta(t, tt.want, msgRate, tt.want*0.001) + }) + } +} diff --git a/network/topics/params/scores_test.go b/network/topics/params/scores_test.go index a51e25199d..cfd0df6613 100644 --- a/network/topics/params/scores_test.go +++ b/network/topics/params/scores_test.go @@ -19,24 +19,27 @@ func TestTopicScoreParams(t *testing.T) { { "subnet topic 1k validators", func() *Options { - opts := NewSubnetTopicOpts(1000, 128) - return &opts + validators := 1000 + opts := NewSubnetTopicOpts(validators, 128, createTestingSingleCommittees(validators)) + return opts }, nil, }, { "subnet topic 10k validators", func() *Options { - opts := NewSubnetTopicOpts(10000, 128) - return &opts + validators := 10_000 + opts := NewSubnetTopicOpts(validators, 128, createTestingSingleCommittees(validators)) + return opts }, nil, }, { "subnet topic 51k validators", func() *Options { - opts := NewSubnetTopicOpts(51000, 128) - return &opts + validators := 51_000 + opts := NewSubnetTopicOpts(validators, 128, createTestingSingleCommittees(validators)) + return opts }, nil, }, @@ -50,7 +53,7 @@ func TestTopicScoreParams(t *testing.T) { raw, err := json.Marshal(&opts) require.NoError(t, err) t.Logf("[%s] using opts:\n%s", test.name, string(raw)) - topicScoreParams, err := TopicParams(*opts) + topicScoreParams, err := TopicParams(opts) require.NoError(t, err) require.NotNil(t, topicScoreParams) // raw, err = json.MarshalIndent(topicScoreParams, "", "\t") diff --git a/network/topics/params/topic_score.go b/network/topics/params/topic_score.go index 89c801edf6..bc3f92e1bb 100644 --- a/network/topics/params/topic_score.go +++ b/network/topics/params/topic_score.go @@ -6,6 +6,7 @@ import ( pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/pkg/errors" + "github.com/ssvlabs/ssv/registry/storage" ) const ( @@ -35,11 +36,6 @@ const ( // P4 invalidMessageDecayEpochs = time.Duration(100) maxInvalidMessagesAllowed = 20 - - // Message rate - clusterConsensusReductionFactor = 0.15 - messageRatePerValidatorPerSecond = 600.0 / 10000.0 - msgsPerValidatorPerSecond = messageRatePerValidatorPerSecond * clusterConsensusReductionFactor ) var ( @@ -161,8 +157,8 @@ func (o *Options) maxScore() float64 { } // NewOpts creates new TopicOpts instance -func NewOpts(activeValidators, subnets int) Options { - return Options{ +func NewOpts(activeValidators, subnets int) *Options { + return &Options{ Network: NetworkOpts{ ActiveValidators: activeValidators, Subnets: subnets, @@ -172,8 +168,7 @@ func NewOpts(activeValidators, subnets int) Options { } // NewSubnetTopicOpts creates new TopicOpts for a subnet topic -func NewSubnetTopicOpts(activeValidators, subnets int) Options { - +func NewSubnetTopicOpts(activeValidators, subnets int, committees []*storage.Committee) *Options { // Create options with default values opts := NewOpts(activeValidators, subnets) opts.defaults() @@ -181,9 +176,8 @@ func NewSubnetTopicOpts(activeValidators, subnets int) Options { // Set topic weight with equal weights opts.Topic.TopicWeight = opts.Network.TotalTopicsWeight / float64(opts.Network.Subnets) - // Set expected message rate based on stage metrics - validatorsPerSubnet := float64(opts.Network.ActiveValidators) / float64(opts.Network.Subnets) - opts.Topic.ExpectedMsgRate = validatorsPerSubnet * msgsPerValidatorPerSecond + // Set the expected message rate for the topic + opts.Topic.ExpectedMsgRate = calculateMessageRateForTopic(committees) return opts } @@ -191,7 +185,7 @@ func NewSubnetTopicOpts(activeValidators, subnets int) Options { // TopicParams creates pubsub.TopicScoreParams from the given TopicOpts // implementation is based on ETH2.0, with alignments to ssv: // https://gist.github.com/blacktemplar/5c1862cb3f0e32a1a7fb0b25e79e6e2c -func TopicParams(opts Options) (*pubsub.TopicScoreParams, error) { +func TopicParams(opts *Options) (*pubsub.TopicScoreParams, error) { // Validate options if err := opts.validate(); err != nil { return nil, err diff --git a/network/topics/pubsub.go b/network/topics/pubsub.go index 8ef1ab725c..58904f6562 100644 --- a/network/topics/pubsub.go +++ b/network/topics/pubsub.go @@ -17,6 +17,7 @@ import ( "github.com/ssvlabs/ssv/network/commons" "github.com/ssvlabs/ssv/network/peers" "github.com/ssvlabs/ssv/network/topics/params" + "github.com/ssvlabs/ssv/registry/storage" ) const ( @@ -107,8 +108,12 @@ func (cfg *PubSubConfig) initScoring() { } } +type CommitteesProvider interface { + Committees() []*storage.Committee +} + // NewPubSub creates a new pubsub router and the necessary components -func NewPubSub(ctx context.Context, logger *zap.Logger, cfg *PubSubConfig, metrics Metrics) (*pubsub.PubSub, Controller, error) { +func NewPubSub(ctx context.Context, logger *zap.Logger, cfg *PubSubConfig, metrics Metrics, committeesProvider CommitteesProvider) (*pubsub.PubSub, Controller, error) { if err := cfg.init(); err != nil { return nil, nil, err } @@ -164,7 +169,7 @@ func NewPubSub(ctx context.Context, logger *zap.Logger, cfg *PubSubConfig, metri return 100, 100, 10, nil } } - topicScoreFactory = topicScoreParams(logger, cfg) + topicScoreFactory = topicScoreParams(logger, cfg, committeesProvider) } if cfg.MsgIDHandler != nil { diff --git a/network/topics/scoring.go b/network/topics/scoring.go index a9be3d569a..129185edd8 100644 --- a/network/topics/scoring.go +++ b/network/topics/scoring.go @@ -6,6 +6,7 @@ import ( "github.com/ssvlabs/ssv/logging/fields" "github.com/ssvlabs/ssv/network/commons" + "github.com/ssvlabs/ssv/registry/storage" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/peer" @@ -95,8 +96,10 @@ func scoreInspector(logger *zap.Logger, scoreIdx peers.ScoreIndex, logFrequency } // topicScoreParams factory for creating scoring params for topics -func topicScoreParams(logger *zap.Logger, cfg *PubSubConfig) func(string) *pubsub.TopicScoreParams { +func topicScoreParams(logger *zap.Logger, cfg *PubSubConfig, committeesProvider CommitteesProvider) func(string) *pubsub.TopicScoreParams { return func(t string) *pubsub.TopicScoreParams { + + // Get validator stats totalValidators, activeValidators, myValidators, err := cfg.GetValidatorStats() if err != nil { logger.Debug("could not read stats: active validators") @@ -105,7 +108,24 @@ func topicScoreParams(logger *zap.Logger, cfg *PubSubConfig) func(string) *pubsu logger := logger.With(zap.String("topic", t), zap.Uint64("totalValidators", totalValidators), zap.Uint64("activeValidators", activeValidators), zap.Uint64("myValidators", myValidators)) logger.Debug("got validator stats for score params") - opts := params.NewSubnetTopicOpts(int(totalValidators), commons.Subnets()) + + // Get committees + committees := committeesProvider.Committees() + topicCommittees := filterCommitteesForTopic(t, committees) + + // Log + validatorsInTopic := 0 + for _, committee := range topicCommittees { + validatorsInTopic += len(committee.Validators) + } + committeesInTopic := len(topicCommittees) + logger = logger.With(zap.Int("committees in topic", committeesInTopic), zap.Int("validators in topic", validatorsInTopic)) + logger.Debug("got filtered committees for score params") + + // Create topic options + opts := params.NewSubnetTopicOpts(int(totalValidators), commons.Subnets(), topicCommittees) + + // Generate topic parameters tp, err := params.TopicParams(opts) if err != nil { logger.Debug("ignoring topic score params", zap.Error(err)) @@ -114,3 +134,22 @@ func topicScoreParams(logger *zap.Logger, cfg *PubSubConfig) func(string) *pubsu return tp } } + +// Returns a new committee list with only the committees that belong to the given topic +func filterCommitteesForTopic(topic string, committees []*storage.Committee) []*storage.Committee { + + topicCommittees := make([]*storage.Committee, 0) + + for _, committee := range committees { + // Get topic + subnet := commons.CommitteeSubnet(committee.ID) + committeeTopic := commons.SubnetTopicID(subnet) + committeeTopicFullName := commons.GetTopicFullName(committeeTopic) + + // If it belongs to the topic, add it + if topic == committeeTopicFullName { + topicCommittees = append(topicCommittees, committee) + } + } + return topicCommittees +} diff --git a/networkconfig/config.go b/networkconfig/config.go index 3485c8bbf9..65a93396b2 100644 --- a/networkconfig/config.go +++ b/networkconfig/config.go @@ -6,7 +6,7 @@ import ( "math/big" "time" - spec "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/ssvlabs/ssv-spec/types" "github.com/ssvlabs/ssv/protocol/v2/blockchain/beacon" @@ -34,10 +34,12 @@ type NetworkConfig struct { Name string Beacon beacon.BeaconNetwork Domain spectypes.DomainType - GenesisEpoch spec.Epoch + GenesisEpoch phase0.Epoch RegistrySyncOffset *big.Int RegistryContractAddr string // TODO: ethcommon.Address Bootnodes []string + + AlanForkEpoch phase0.Epoch } func (n NetworkConfig) String() string { @@ -49,6 +51,11 @@ func (n NetworkConfig) String() string { return string(b) } +func (n NetworkConfig) AlanForked(slot phase0.Slot) bool { + epoch := n.Beacon.EstimatedEpochAtSlot(slot) + return epoch >= n.AlanForkEpoch +} + // ForkVersion returns the fork version of the network. func (n NetworkConfig) ForkVersion() [4]byte { return n.Beacon.ForkVersion() diff --git a/networkconfig/test-network.go b/networkconfig/test-network.go index 335a6a868a..81b334683d 100644 --- a/networkconfig/test-network.go +++ b/networkconfig/test-network.go @@ -10,7 +10,7 @@ import ( var TestNetwork = NetworkConfig{ Name: "testnet", - Beacon: beacon.NewNetwork(spectypes.PraterNetwork), + Beacon: beacon.NewNetwork(spectypes.BeaconTestNetwork), Domain: spectypes.JatoTestnet, GenesisEpoch: 152834, RegistrySyncOffset: new(big.Int).SetInt64(9015219), diff --git a/operator/duties/attester.go b/operator/duties/attester.go index a33c96ad4f..677fdab979 100644 --- a/operator/duties/attester.go +++ b/operator/duties/attester.go @@ -7,6 +7,7 @@ import ( eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" "github.com/attestantio/go-eth2-client/spec/phase0" + genesisspectypes "github.com/ssvlabs/ssv-spec-pre-cc/types" spectypes "github.com/ssvlabs/ssv-spec/types" "go.uber.org/zap" @@ -27,8 +28,6 @@ func NewAttesterHandler(duties *dutystore.Duties[eth2apiv1.AttesterDuty]) *Attes duties: duties, } h.fetchCurrentEpoch = true - // TODO: (Alan) genesis support - //h.fetchFirst = true return h } @@ -67,13 +66,15 @@ func (h *AttesterHandler) HandleDuties(ctx context.Context) { h.fetchNextEpoch = true + next := h.ticker.Next() for { select { case <-ctx.Done(): return - case <-h.ticker.Next(): + case <-next: slot := h.ticker.Slot() + next = h.ticker.Next() currentEpoch := h.network.Beacon.EstimatedEpochAtSlot(slot) buildStr := fmt.Sprintf("e%v-s%v-#%v", currentEpoch, slot, slot%32+1) h.logger.Debug("🛠 ticker event", zap.String("epoch_slot_pos", buildStr)) @@ -82,19 +83,7 @@ func (h *AttesterHandler) HandleDuties(ctx context.Context) { if h.indicesChanged { h.duties.ResetEpoch(currentEpoch) h.indicesChanged = false - // TODO: (Alan) genesis support - //h.processFetching(ctx, currentEpoch, slot) - //h.processExecution(currentEpoch, slot) } - // TODO: (Alan) genesis support - //else { - //h.processExecution(currentEpoch, slot) - //if h.indicesChanged { - // h.duties.ResetEpoch(currentEpoch) - // h.indicesChanged = false - //} - //h.processFetching(ctx, currentEpoch, slot) - //} h.processFetching(ctx, currentEpoch, slot) slotsPerEpoch := h.network.Beacon.SlotsPerEpoch() @@ -118,15 +107,12 @@ func (h *AttesterHandler) HandleDuties(ctx context.Context) { // reset current epoch duties if reorgEvent.Previous { h.duties.ResetEpoch(currentEpoch) - // TODO: (Alan) genesis support - //h.fetchFirst = true h.fetchCurrentEpoch = true if h.shouldFetchNexEpoch(reorgEvent.Slot) { h.duties.ResetEpoch(currentEpoch + 1) h.fetchNextEpoch = true } - // TODO: (Alan) genesis support h.processFetching(ctx, currentEpoch, reorgEvent.Slot) } else if reorgEvent.Current { // reset & re-fetch next epoch duties if in appropriate slot range, @@ -191,17 +177,27 @@ func (h *AttesterHandler) processExecution(epoch phase0.Epoch, slot phase0.Slot) return } - // range over duties and execute - toExecute := make([]*spectypes.BeaconDuty, 0, len(duties)*2) + if !h.network.AlanForked(slot) { + toExecute := make([]*genesisspectypes.Duty, 0, len(duties)*2) + for _, d := range duties { + if h.shouldExecute(d) { + toExecute = append(toExecute, h.toGenesisSpecDuty(d, genesisspectypes.BNRoleAttester)) + toExecute = append(toExecute, h.toGenesisSpecDuty(d, genesisspectypes.BNRoleAggregator)) + } + } + + h.dutiesExecutor.ExecuteGenesisDuties(h.logger, toExecute) + return + } + + toExecute := make([]*spectypes.BeaconDuty, 0, len(duties)) for _, d := range duties { if h.shouldExecute(d) { - // TODO: (Alan) genesis support - //toExecute = append(toExecute, h.toSpecDuty(d, spectypes.BNRoleAttester)) toExecute = append(toExecute, h.toSpecDuty(d, spectypes.BNRoleAggregator)) } } - h.executeDuties(h.logger, toExecute) + h.dutiesExecutor.ExecuteDuties(h.logger, toExecute) } func (h *AttesterHandler) fetchAndProcessDuties(ctx context.Context, epoch phase0.Epoch) error { @@ -209,6 +205,7 @@ func (h *AttesterHandler) fetchAndProcessDuties(ctx context.Context, epoch phase indices := indicesFromShares(h.validatorProvider.SelfParticipatingValidators(epoch)) if len(indices) == 0 { + h.logger.Debug("no active validators for epoch", fields.Epoch(epoch)) return nil } @@ -262,6 +259,19 @@ func (h *AttesterHandler) toSpecDuty(duty *eth2apiv1.AttesterDuty, role spectype } } +func (h *AttesterHandler) toGenesisSpecDuty(duty *eth2apiv1.AttesterDuty, role genesisspectypes.BeaconRole) *genesisspectypes.Duty { + return &genesisspectypes.Duty{ + Type: role, + PubKey: duty.PubKey, + Slot: duty.Slot, + ValidatorIndex: duty.ValidatorIndex, + CommitteeIndex: duty.CommitteeIndex, + CommitteeLength: duty.CommitteeLength, + CommitteesAtSlot: duty.CommitteesAtSlot, + ValidatorCommitteeIndex: duty.ValidatorCommitteeIndex, + } +} + func (h *AttesterHandler) shouldExecute(duty *eth2apiv1.AttesterDuty) bool { currentSlot := h.network.Beacon.EstimatedCurrentSlot() // execute task if slot already began and not pass 1 epoch diff --git a/operator/duties/attester_genesis_test.go b/operator/duties/attester_genesis_test.go new file mode 100644 index 0000000000..5ec5af2913 --- /dev/null +++ b/operator/duties/attester_genesis_test.go @@ -0,0 +1,972 @@ +package duties + +import ( + "context" + "testing" + "time" + + eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/cornelk/hashmap" + genesisspectypes "github.com/ssvlabs/ssv-spec-pre-cc/types" + spectypes "github.com/ssvlabs/ssv-spec/types" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + + "github.com/ssvlabs/ssv/beacon/goclient" + "github.com/ssvlabs/ssv/operator/duties/dutystore" + "github.com/ssvlabs/ssv/protocol/v2/types" +) + +func setupAttesterGenesisDutiesMock( + s *Scheduler, + dutiesMap *hashmap.Map[phase0.Epoch, []*eth2apiv1.AttesterDuty], + waitForDuties *SafeValue[bool], +) (chan struct{}, chan []*genesisspectypes.Duty) { + fetchDutiesCall := make(chan struct{}) + executeDutiesCall := make(chan []*genesisspectypes.Duty) + + s.beaconNode.(*MockBeaconNode).EXPECT().AttesterDuties(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) ([]*eth2apiv1.AttesterDuty, error) { + if waitForDuties.Get() { + fetchDutiesCall <- struct{}{} + } + duties, _ := dutiesMap.Get(epoch) + return duties, nil + }).AnyTimes() + + getShares := func(epoch phase0.Epoch) []*types.SSVShare { + uniqueIndices := make(map[phase0.ValidatorIndex]bool) + + duties, _ := dutiesMap.Get(epoch) + for _, d := range duties { + uniqueIndices[d.ValidatorIndex] = true + } + + shares := make([]*types.SSVShare, 0, len(uniqueIndices)) + for index := range uniqueIndices { + share := &types.SSVShare{ + Share: spectypes.Share{ + ValidatorIndex: index, + }, + } + shares = append(shares, share) + } + + return shares + } + s.validatorProvider.(*MockValidatorProvider).EXPECT().SelfParticipatingValidators(gomock.Any()).DoAndReturn(getShares).AnyTimes() + s.validatorProvider.(*MockValidatorProvider).EXPECT().ParticipatingValidators(gomock.Any()).DoAndReturn(getShares).AnyTimes() + + s.beaconNode.(*MockBeaconNode).EXPECT().SubmitBeaconCommitteeSubscriptions(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + + return fetchDutiesCall, executeDutiesCall +} + +func expectedExecutedGenesisAttesterDuties(handler *AttesterHandler, duties []*eth2apiv1.AttesterDuty) []*genesisspectypes.Duty { + expectedDuties := make([]*genesisspectypes.Duty, 0) + for _, d := range duties { + expectedDuties = append(expectedDuties, handler.toGenesisSpecDuty(d, genesisspectypes.BNRoleAttester)) + expectedDuties = append(expectedDuties, handler.toGenesisSpecDuty(d, genesisspectypes.BNRoleAggregator)) + } + return expectedDuties +} + +func TestScheduler_Attester_Genesis_Same_Slot(t *testing.T) { + var ( + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) + currentSlot = &SafeValue[phase0.Slot]{} + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() + waitForDuties = &SafeValue[bool]{} + forkEpoch = goclient.FarFutureEpoch + ) + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.AttesterDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 3}, + Slot: phase0.Slot(1), + ValidatorIndex: phase0.ValidatorIndex(1), + }, + }) + currentSlot.Set(phase0.Slot(1)) + + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) + fetchDutiesCall, executeDutiesCall := setupAttesterGenesisDutiesMock(scheduler, dutiesMap, waitForDuties) + startFn() + + duties, _ := dutiesMap.Get(phase0.Epoch(0)) + expected := expectedExecutedGenesisAttesterDuties(handler, duties) + setExecuteGenesisDutyFunc(scheduler, executeDutiesCall, len(expected)) + + startTime := time.Now() + ticker.Send(currentSlot.Get()) + waitForGenesisDutiesExecution(t, logger, fetchDutiesCall, executeDutiesCall, timeout, expected) + + require.Less(t, scheduler.network.Beacon.SlotDurationSec()/3, time.Since(startTime)) + + // Stop scheduler & wait for graceful exit. + cancel() + require.NoError(t, schedulerPool.Wait()) +} + +func TestScheduler_Attester_Genesis_Diff_Slots(t *testing.T) { + var ( + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) + currentSlot = &SafeValue[phase0.Slot]{} + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() + waitForDuties = &SafeValue[bool]{} + forkEpoch = goclient.FarFutureEpoch + ) + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.AttesterDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 3}, + Slot: phase0.Slot(2), + ValidatorIndex: phase0.ValidatorIndex(1), + }, + }) + currentSlot.Set(phase0.Slot(0)) + + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) + fetchDutiesCall, executeDutiesCall := setupAttesterGenesisDutiesMock(scheduler, dutiesMap, waitForDuties) + startFn() + + ticker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + currentSlot.Set(phase0.Slot(1)) + ticker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + currentSlot.Set(phase0.Slot(2)) + duties, _ := dutiesMap.Get(phase0.Epoch(0)) + expected := expectedExecutedGenesisAttesterDuties(handler, duties) + setExecuteGenesisDutyFunc(scheduler, executeDutiesCall, len(expected)) + + ticker.Send(currentSlot.Get()) + waitForGenesisDutiesExecution(t, logger, fetchDutiesCall, executeDutiesCall, timeout, expected) + + // Stop scheduler & wait for graceful exit. + cancel() + require.NoError(t, schedulerPool.Wait()) +} + +func TestScheduler_Attester_Genesis_Indices_Changed(t *testing.T) { + var ( + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) + currentSlot = &SafeValue[phase0.Slot]{} + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() + waitForDuties = &SafeValue[bool]{} + forkEpoch = goclient.FarFutureEpoch + ) + currentSlot.Set(phase0.Slot(0)) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) + fetchDutiesCall, executeDutiesCall := setupAttesterGenesisDutiesMock(scheduler, dutiesMap, waitForDuties) + startFn() + + // STEP 1: wait for no action to be taken + mockTicker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 2: trigger a change in active indices + scheduler.indicesChg <- struct{}{} + // no execution should happen in slot 0 + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.AttesterDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 3}, + Slot: phase0.Slot(0), + ValidatorIndex: phase0.ValidatorIndex(1), + }, + { + PubKey: phase0.BLSPubKey{1, 2, 4}, + Slot: phase0.Slot(1), + ValidatorIndex: phase0.ValidatorIndex(2), + }, + { + PubKey: phase0.BLSPubKey{1, 2, 5}, + Slot: phase0.Slot(2), + ValidatorIndex: phase0.ValidatorIndex(3), + }, + }) + + // STEP 3: wait for attester duties to be fetched again + currentSlot.Set(phase0.Slot(1)) + mockTicker.Send(currentSlot.Get()) + waitForDuties.Set(true) + waitForGenesisDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + // no execution should happen in slot 1 + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 4: wait for attester duties to be executed + currentSlot.Set(phase0.Slot(2)) + duties, _ := dutiesMap.Get(phase0.Epoch(0)) + expected := expectedExecutedGenesisAttesterDuties(handler, []*eth2apiv1.AttesterDuty{duties[2]}) + setExecuteGenesisDutyFunc(scheduler, executeDutiesCall, len(expected)) + + mockTicker.Send(currentSlot.Get()) + waitForGenesisDutiesExecution(t, logger, fetchDutiesCall, executeDutiesCall, timeout, expected) + + // Stop scheduler & wait for graceful exit. + cancel() + require.NoError(t, schedulerPool.Wait()) +} + +func TestScheduler_Attester_Genesis_Multiple_Indices_Changed_Same_Slot(t *testing.T) { + var ( + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) + currentSlot = &SafeValue[phase0.Slot]{} + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() + waitForDuties = &SafeValue[bool]{} + forkEpoch = goclient.FarFutureEpoch + ) + currentSlot.Set(phase0.Slot(0)) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) + fetchDutiesCall, executeDutiesCall := setupAttesterGenesisDutiesMock(scheduler, dutiesMap, waitForDuties) + startFn() + + // STEP 1: wait for no action to be taken + mockTicker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 2: wait for no action to be taken + currentSlot.Set(phase0.Slot(1)) + mockTicker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 3: trigger a change in active indices + scheduler.indicesChg <- struct{}{} + duties, _ := dutiesMap.Get(phase0.Epoch(0)) + dutiesMap.Set(phase0.Epoch(0), append(duties, ð2apiv1.AttesterDuty{ + PubKey: phase0.BLSPubKey{1, 2, 3}, + Slot: phase0.Slot(3), + ValidatorIndex: phase0.ValidatorIndex(1), + })) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 4: trigger a change in active indices in the same slot + scheduler.indicesChg <- struct{}{} + duties, _ = dutiesMap.Get(phase0.Epoch(0)) + dutiesMap.Set(phase0.Epoch(0), append(duties, ð2apiv1.AttesterDuty{ + PubKey: phase0.BLSPubKey{1, 2, 4}, + Slot: phase0.Slot(4), + ValidatorIndex: phase0.ValidatorIndex(2), + })) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 5: wait for attester duties to be fetched + currentSlot.Set(phase0.Slot(2)) + mockTicker.Send(currentSlot.Get()) + waitForDuties.Set(true) + waitForGenesisDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 6: wait for attester duties to be executed + currentSlot.Set(phase0.Slot(3)) + duties, _ = dutiesMap.Get(phase0.Epoch(0)) + expected := expectedExecutedGenesisAttesterDuties(handler, []*eth2apiv1.AttesterDuty{duties[0]}) + setExecuteGenesisDutyFunc(scheduler, executeDutiesCall, len(expected)) + + mockTicker.Send(currentSlot.Get()) + waitForGenesisDutiesExecution(t, logger, fetchDutiesCall, executeDutiesCall, timeout, expected) + + // STEP 7: wait for attester duties to be executed + currentSlot.Set(phase0.Slot(4)) + duties, _ = dutiesMap.Get(phase0.Epoch(0)) + expected = expectedExecutedGenesisAttesterDuties(handler, []*eth2apiv1.AttesterDuty{duties[1]}) + setExecuteGenesisDutyFunc(scheduler, executeDutiesCall, len(expected)) + + mockTicker.Send(currentSlot.Get()) + waitForGenesisDutiesExecution(t, logger, fetchDutiesCall, executeDutiesCall, timeout, expected) + + // Stop scheduler & wait for graceful exit. + cancel() + require.NoError(t, schedulerPool.Wait()) +} + +// reorg previous dependent root changed +func TestScheduler_Attester_Genesis_Reorg_Previous_Epoch_Transition(t *testing.T) { + var ( + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) + currentSlot = &SafeValue[phase0.Slot]{} + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() + waitForDuties = &SafeValue[bool]{} + forkEpoch = goclient.FarFutureEpoch + ) + currentSlot.Set(phase0.Slot(63)) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) + fetchDutiesCall, executeDutiesCall := setupAttesterGenesisDutiesMock(scheduler, dutiesMap, waitForDuties) + startFn() + + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 3}, + Slot: phase0.Slot(66), + ValidatorIndex: phase0.ValidatorIndex(1), + }, + }) + + // STEP 1: wait for attester duties to be fetched for next epoch + mockTicker.Send(currentSlot.Get()) + waitForDuties.Set(true) + waitForGenesisDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 2: trigger head event + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ + Slot: currentSlot.Get(), + CurrentDutyDependentRoot: phase0.Root{0x01}, + PreviousDutyDependentRoot: phase0.Root{0x01}, + }, + } + scheduler.HandleHeadEvent(logger)(e) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 3: Ticker with no action + currentSlot.Set(phase0.Slot(64)) + mockTicker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 4: trigger reorg on epoch transition + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ + Slot: currentSlot.Get(), + PreviousDutyDependentRoot: phase0.Root{0x02}, + }, + } + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 3}, + Slot: phase0.Slot(67), + ValidatorIndex: phase0.ValidatorIndex(1), + }, + }) + scheduler.HandleHeadEvent(logger)(e) + waitForGenesisDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 5: wait for attester duties to be fetched again for the current epoch + currentSlot.Set(phase0.Slot(65)) + mockTicker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 6: The first assigned duty should not be executed + currentSlot.Set(phase0.Slot(66)) + mockTicker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 7: The second assigned duty should be executed + currentSlot.Set(phase0.Slot(67)) + duties, _ := dutiesMap.Get(phase0.Epoch(2)) + expected := expectedExecutedGenesisAttesterDuties(handler, duties) + setExecuteGenesisDutyFunc(scheduler, executeDutiesCall, len(expected)) + + mockTicker.Send(currentSlot.Get()) + waitForGenesisDutiesExecution(t, logger, fetchDutiesCall, executeDutiesCall, timeout, expected) + + // Stop scheduler & wait for graceful exit. + cancel() + require.NoError(t, schedulerPool.Wait()) +} + +// reorg previous dependent root changed and the indices changed as well +func TestScheduler_Attester_Genesis_Reorg_Previous_Epoch_Transition_Indices_Changed(t *testing.T) { + var ( + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) + currentSlot = &SafeValue[phase0.Slot]{} + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() + waitForDuties = &SafeValue[bool]{} + forkEpoch = goclient.FarFutureEpoch + ) + currentSlot.Set(phase0.Slot(63)) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) + fetchDutiesCall, executeDutiesCall := setupAttesterGenesisDutiesMock(scheduler, dutiesMap, waitForDuties) + startFn() + + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 3}, + Slot: phase0.Slot(66), + ValidatorIndex: phase0.ValidatorIndex(1), + }, + }) + + // STEP 1: wait for attester duties to be fetched for next epoch + mockTicker.Send(currentSlot.Get()) + waitForDuties.Set(true) + waitForGenesisDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 2: trigger head event + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ + Slot: currentSlot.Get(), + CurrentDutyDependentRoot: phase0.Root{0x01}, + PreviousDutyDependentRoot: phase0.Root{0x01}, + }, + } + scheduler.HandleHeadEvent(logger)(e) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 3: Ticker with no action + currentSlot.Set(phase0.Slot(64)) + mockTicker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 4: trigger reorg on epoch transition + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ + Slot: currentSlot.Get(), + PreviousDutyDependentRoot: phase0.Root{0x02}, + }, + } + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 3}, + Slot: phase0.Slot(67), + ValidatorIndex: phase0.ValidatorIndex(1), + }, + }) + scheduler.HandleHeadEvent(logger)(e) + waitForGenesisDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 5: trigger indices change + scheduler.indicesChg <- struct{}{} + duties, _ := dutiesMap.Get(phase0.Epoch(2)) + dutiesMap.Set(phase0.Epoch(2), append(duties, ð2apiv1.AttesterDuty{ + PubKey: phase0.BLSPubKey{1, 2, 4}, + Slot: phase0.Slot(67), + ValidatorIndex: phase0.ValidatorIndex(2), + })) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 6: wait for attester duties to be fetched again for the current epoch + currentSlot.Set(phase0.Slot(65)) + mockTicker.Send(currentSlot.Get()) + waitForGenesisDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 7: The first assigned duty should not be executed + currentSlot.Set(phase0.Slot(66)) + mockTicker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 8: The second assigned duty should be executed + currentSlot.Set(phase0.Slot(67)) + duties, _ = dutiesMap.Get(phase0.Epoch(2)) + expected := expectedExecutedGenesisAttesterDuties(handler, duties) + setExecuteGenesisDutyFunc(scheduler, executeDutiesCall, len(expected)) + + mockTicker.Send(currentSlot.Get()) + waitForGenesisDutiesExecution(t, logger, fetchDutiesCall, executeDutiesCall, timeout, expected) + + // Stop scheduler & wait for graceful exit. + cancel() + require.NoError(t, schedulerPool.Wait()) +} + +// reorg previous dependent root changed +func TestScheduler_Attester_Genesis_Reorg_Previous(t *testing.T) { + var ( + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) + currentSlot = &SafeValue[phase0.Slot]{} + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() + waitForDuties = &SafeValue[bool]{} + forkEpoch = goclient.FarFutureEpoch + ) + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 3}, + Slot: phase0.Slot(35), + ValidatorIndex: phase0.ValidatorIndex(1), + }, + }) + currentSlot.Set(phase0.Slot(32)) + + // STEP 1: wait for attester duties to be fetched (handle initial duties) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) + fetchDutiesCall, executeDutiesCall := setupAttesterGenesisDutiesMock(scheduler, dutiesMap, waitForDuties) + startFn() + + mockTicker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 2: trigger head event + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ + Slot: currentSlot.Get(), + PreviousDutyDependentRoot: phase0.Root{0x01}, + }, + } + scheduler.HandleHeadEvent(logger)(e) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 3: Ticker with no action + currentSlot.Set(phase0.Slot(33)) + waitForDuties.Set(true) + mockTicker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 4: trigger reorg + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ + Slot: currentSlot.Get(), + PreviousDutyDependentRoot: phase0.Root{0x02}, + }, + } + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 3}, + Slot: phase0.Slot(36), + ValidatorIndex: phase0.ValidatorIndex(1), + }, + }) + scheduler.HandleHeadEvent(logger)(e) + waitForGenesisDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 5: wait for no action to be taken + currentSlot.Set(phase0.Slot(34)) + mockTicker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 6: The first assigned duty should not be executed + currentSlot.Set(phase0.Slot(35)) + mockTicker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 7: The second assigned duty should be executed + currentSlot.Set(phase0.Slot(36)) + duties, _ := dutiesMap.Get(phase0.Epoch(1)) + expected := expectedExecutedGenesisAttesterDuties(handler, duties) + setExecuteGenesisDutyFunc(scheduler, executeDutiesCall, len(expected)) + + mockTicker.Send(currentSlot.Get()) + waitForGenesisDutiesExecution(t, logger, fetchDutiesCall, executeDutiesCall, timeout, expected) + + // Stop scheduler & wait for graceful exit. + cancel() + require.NoError(t, schedulerPool.Wait()) +} + +// reorg previous dependent root changed and the indices changed the same slot +func TestScheduler_Attester_Genesis_Reorg_Previous_Indices_Change_Same_Slot(t *testing.T) { + var ( + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) + currentSlot = &SafeValue[phase0.Slot]{} + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() + waitForDuties = &SafeValue[bool]{} + forkEpoch = goclient.FarFutureEpoch + ) + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 3}, + Slot: phase0.Slot(35), + ValidatorIndex: phase0.ValidatorIndex(1), + }, + }) + currentSlot.Set(phase0.Slot(32)) + + // STEP 1: wait for attester duties to be fetched (handle initial duties) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) + fetchDutiesCall, executeDutiesCall := setupAttesterGenesisDutiesMock(scheduler, dutiesMap, waitForDuties) + startFn() + + mockTicker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 2: trigger head event + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ + Slot: currentSlot.Get(), + PreviousDutyDependentRoot: phase0.Root{0x01}, + }, + } + scheduler.HandleHeadEvent(logger)(e) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 3: Ticker with no action + currentSlot.Set(phase0.Slot(33)) + waitForDuties.Set(true) + mockTicker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 4: trigger reorg + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ + Slot: currentSlot.Get(), + PreviousDutyDependentRoot: phase0.Root{0x02}, + }, + } + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 3}, + Slot: phase0.Slot(36), + ValidatorIndex: phase0.ValidatorIndex(1), + }, + }) + scheduler.HandleHeadEvent(logger)(e) + waitForGenesisDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 5: trigger indices change + scheduler.indicesChg <- struct{}{} + duties, _ := dutiesMap.Get(phase0.Epoch(1)) + dutiesMap.Set(phase0.Epoch(1), append(duties, ð2apiv1.AttesterDuty{ + PubKey: phase0.BLSPubKey{1, 2, 4}, + Slot: phase0.Slot(36), + ValidatorIndex: phase0.ValidatorIndex(2), + })) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 6: wait for attester duties to be fetched again for the current epoch + currentSlot.Set(phase0.Slot(34)) + mockTicker.Send(currentSlot.Get()) + waitForGenesisDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 7: The first assigned duty should not be executed + currentSlot.Set(phase0.Slot(35)) + mockTicker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 8: The second and new from indices change assigned duties should be executed + currentSlot.Set(phase0.Slot(36)) + duties, _ = dutiesMap.Get(phase0.Epoch(1)) + expected := expectedExecutedGenesisAttesterDuties(handler, duties) + setExecuteGenesisDutyFunc(scheduler, executeDutiesCall, len(expected)) + + mockTicker.Send(currentSlot.Get()) + waitForGenesisDutiesExecution(t, logger, fetchDutiesCall, executeDutiesCall, timeout, expected) + + // Stop scheduler & wait for graceful exit. + cancel() + require.NoError(t, schedulerPool.Wait()) +} + +// reorg current dependent root changed +func TestScheduler_Attester_Genesis_Reorg_Current(t *testing.T) { + var ( + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) + currentSlot = &SafeValue[phase0.Slot]{} + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() + waitForDuties = &SafeValue[bool]{} + forkEpoch = goclient.FarFutureEpoch + ) + currentSlot.Set(phase0.Slot(47)) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) + fetchDutiesCall, executeDutiesCall := setupAttesterGenesisDutiesMock(scheduler, dutiesMap, waitForDuties) + startFn() + + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 3}, + Slot: phase0.Slot(64), + ValidatorIndex: phase0.ValidatorIndex(1), + }, + }) + + // STEP 1: wait for attester duties to be fetched for next epoch + waitForDuties.Set(true) + mockTicker.Send(currentSlot.Get()) + waitForGenesisDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 2: trigger head event + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ + Slot: currentSlot.Get(), + CurrentDutyDependentRoot: phase0.Root{0x01}, + }, + } + scheduler.HandleHeadEvent(logger)(e) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 3: Ticker with no action + currentSlot.Set(phase0.Slot(48)) + mockTicker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 4: trigger reorg + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ + Slot: currentSlot.Get(), + CurrentDutyDependentRoot: phase0.Root{0x02}, + }, + } + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 3}, + Slot: phase0.Slot(65), + ValidatorIndex: phase0.ValidatorIndex(1), + }, + }) + scheduler.HandleHeadEvent(logger)(e) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 5: wait for attester duties to be fetched again for the current epoch + currentSlot.Set(phase0.Slot(49)) + mockTicker.Send(currentSlot.Get()) + waitForGenesisDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 6: skip to the next epoch + currentSlot.Set(phase0.Slot(50)) + for slot := currentSlot.Get(); slot < 64; slot++ { + mockTicker.Send(slot) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + currentSlot.Set(slot + 1) + } + + // STEP 7: The first assigned duty should not be executed + // slot = 64 + mockTicker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 8: The second assigned duty should be executed + currentSlot.Set(phase0.Slot(65)) + duties, _ := dutiesMap.Get(phase0.Epoch(2)) + expected := expectedExecutedGenesisAttesterDuties(handler, duties) + setExecuteGenesisDutyFunc(scheduler, executeDutiesCall, len(expected)) + + mockTicker.Send(currentSlot.Get()) + waitForGenesisDutiesExecution(t, logger, fetchDutiesCall, executeDutiesCall, timeout, expected) + + // Stop scheduler & wait for graceful exit. + cancel() + require.NoError(t, schedulerPool.Wait()) +} + +// reorg current dependent root changed including indices change in the same slot +func TestScheduler_Attester_Genesis_Reorg_Current_Indices_Changed(t *testing.T) { + var ( + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) + currentSlot = &SafeValue[phase0.Slot]{} + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() + waitForDuties = &SafeValue[bool]{} + forkEpoch = goclient.FarFutureEpoch + ) + currentSlot.Set(phase0.Slot(47)) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) + fetchDutiesCall, executeDutiesCall := setupAttesterGenesisDutiesMock(scheduler, dutiesMap, waitForDuties) + startFn() + + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 3}, + Slot: phase0.Slot(64), + ValidatorIndex: phase0.ValidatorIndex(1), + }, + }) + + // STEP 1: wait for attester duties to be fetched for next epoch + waitForDuties.Set(true) + mockTicker.Send(currentSlot.Get()) + waitForGenesisDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 2: trigger head event + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ + Slot: currentSlot.Get(), + CurrentDutyDependentRoot: phase0.Root{0x01}, + }, + } + scheduler.HandleHeadEvent(logger)(e) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 3: Ticker with no action + currentSlot.Set(phase0.Slot(48)) + mockTicker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 4: trigger reorg + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ + Slot: currentSlot.Get(), + CurrentDutyDependentRoot: phase0.Root{0x02}, + }, + } + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 3}, + Slot: phase0.Slot(65), + ValidatorIndex: phase0.ValidatorIndex(1), + }, + }) + scheduler.HandleHeadEvent(logger)(e) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 5: trigger indices change + scheduler.indicesChg <- struct{}{} + duties, _ := dutiesMap.Get(phase0.Epoch(2)) + dutiesMap.Set(phase0.Epoch(2), append(duties, ð2apiv1.AttesterDuty{ + PubKey: phase0.BLSPubKey{1, 2, 4}, + Slot: phase0.Slot(65), + ValidatorIndex: phase0.ValidatorIndex(2), + })) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 6: wait for attester duties to be fetched again for the next epoch due to indices change + currentSlot.Set(phase0.Slot(49)) + mockTicker.Send(currentSlot.Get()) + waitForGenesisDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 7: skip to the next epoch + currentSlot.Set(phase0.Slot(50)) + for slot := currentSlot.Get(); slot < 64; slot++ { + mockTicker.Send(slot) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + currentSlot.Set(slot + 1) + } + + // STEP 8: The first assigned duty should not be executed + // slot = 64 + mockTicker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 9: The second assigned duty should be executed + currentSlot.Set(phase0.Slot(65)) + duties, _ = dutiesMap.Get(phase0.Epoch(2)) + expected := expectedExecutedGenesisAttesterDuties(handler, duties) + setExecuteGenesisDutyFunc(scheduler, executeDutiesCall, len(expected)) + + mockTicker.Send(currentSlot.Get()) + waitForGenesisDutiesExecution(t, logger, fetchDutiesCall, executeDutiesCall, timeout, expected) + + // Stop scheduler & wait for graceful exit. + cancel() + require.NoError(t, schedulerPool.Wait()) +} + +func TestScheduler_Attester_Genesis_Early_Block(t *testing.T) { + var ( + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) + currentSlot = &SafeValue[phase0.Slot]{} + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() + waitForDuties = &SafeValue[bool]{} + forkEpoch = goclient.FarFutureEpoch + ) + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.AttesterDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 3}, + Slot: phase0.Slot(2), + ValidatorIndex: phase0.ValidatorIndex(1), + }, + }) + currentSlot.Set(phase0.Slot(0)) + + // STEP 1: wait for attester duties to be fetched (handle initial duties) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) + fetchDutiesCall, executeDutiesCall := setupAttesterGenesisDutiesMock(scheduler, dutiesMap, waitForDuties) + startFn() + + mockTicker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 2: wait for no action to be taken + currentSlot.Set(phase0.Slot(1)) + mockTicker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 3: wait for attester duties to be executed faster than 1/3 of the slot duration + startTime := time.Now() + currentSlot.Set(phase0.Slot(2)) + mockTicker.Send(currentSlot.Get()) + duties, _ := dutiesMap.Get(phase0.Epoch(0)) + expected := expectedExecutedGenesisAttesterDuties(handler, duties) + setExecuteGenesisDutyFunc(scheduler, executeDutiesCall, len(expected)) + + // STEP 4: trigger head event (block arrival) + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ + Slot: currentSlot.Get(), + }, + } + scheduler.HandleHeadEvent(logger)(e) + waitForGenesisDutiesExecution(t, logger, fetchDutiesCall, executeDutiesCall, timeout, expected) + require.Less(t, time.Since(startTime), scheduler.network.Beacon.SlotDurationSec()/3) + + // Stop scheduler & wait for graceful exit. + cancel() + require.NoError(t, schedulerPool.Wait()) +} + +func TestScheduler_Attester_Genesis_Start_In_The_End_Of_The_Epoch(t *testing.T) { + var ( + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) + currentSlot = &SafeValue[phase0.Slot]{} + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() + waitForDuties = &SafeValue[bool]{} + forkEpoch = goclient.FarFutureEpoch + ) + currentSlot.Set(phase0.Slot(31)) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) + fetchDutiesCall, executeDutiesCall := setupAttesterGenesisDutiesMock(scheduler, dutiesMap, waitForDuties) + startFn() + + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 3}, + Slot: phase0.Slot(32), + ValidatorIndex: phase0.ValidatorIndex(1), + }, + }) + + // STEP 1: wait for attester duties to be fetched for the next epoch + waitForDuties.Set(true) + mockTicker.Send(currentSlot.Get()) + waitForGenesisDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 2: wait for attester duties to be executed + currentSlot.Set(phase0.Slot(32)) + duties, _ := dutiesMap.Get(phase0.Epoch(1)) + expected := expectedExecutedGenesisAttesterDuties(handler, duties) + setExecuteGenesisDutyFunc(scheduler, executeDutiesCall, len(expected)) + + mockTicker.Send(currentSlot.Get()) + waitForGenesisDutiesExecution(t, logger, fetchDutiesCall, executeDutiesCall, timeout, expected) + + // Stop scheduler & wait for graceful exit. + cancel() + require.NoError(t, schedulerPool.Wait()) +} + +func TestScheduler_Attester_Genesis_Fetch_Execute_Next_Epoch_Duty(t *testing.T) { + var ( + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) + currentSlot = &SafeValue[phase0.Slot]{} + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() + waitForDuties = &SafeValue[bool]{} + forkEpoch = goclient.FarFutureEpoch + ) + currentSlot.Set(phase0.Slot(13)) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) + fetchDutiesCall, executeDutiesCall := setupAttesterGenesisDutiesMock(scheduler, dutiesMap, waitForDuties) + startFn() + + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 3}, + Slot: phase0.Slot(32), + ValidatorIndex: phase0.ValidatorIndex(1), + }, + }) + + // STEP 1: wait for no action to be taken + mockTicker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 2: wait for no action to be taken + currentSlot.Set(phase0.Slot(14)) + mockTicker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 2: wait for duties to be fetched for the next epoch + currentSlot.Set(phase0.Slot(15)) + waitForDuties.Set(true) + mockTicker.Send(currentSlot.Get()) + waitForGenesisDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 3: wait for attester duties to be executed + currentSlot.Set(phase0.Slot(32)) + duties, _ := dutiesMap.Get(phase0.Epoch(1)) + expected := expectedExecutedGenesisAttesterDuties(handler, duties) + setExecuteGenesisDutyFunc(scheduler, executeDutiesCall, len(expected)) + + mockTicker.Send(currentSlot.Get()) + waitForGenesisDutiesExecution(t, logger, fetchDutiesCall, executeDutiesCall, timeout, expected) + + // Stop scheduler & wait for graceful exit. + cancel() + require.NoError(t, schedulerPool.Wait()) +} diff --git a/operator/duties/attester_test.go b/operator/duties/attester_test.go index 1fd33fc8dd..73525d7a01 100644 --- a/operator/duties/attester_test.go +++ b/operator/duties/attester_test.go @@ -8,13 +8,11 @@ import ( eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/cornelk/hashmap" - "github.com/stretchr/testify/require" - gomock "go.uber.org/mock/gomock" - spectypes "github.com/ssvlabs/ssv-spec/types" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ssvlabs/ssv/operator/duties/dutystore" - "github.com/ssvlabs/ssv/operator/duties/mocks" "github.com/ssvlabs/ssv/protocol/v2/types" ) @@ -26,7 +24,7 @@ func setupAttesterDutiesMock( fetchDutiesCall := make(chan struct{}) executeDutiesCall := make(chan []*spectypes.BeaconDuty) - s.beaconNode.(*mocks.MockBeaconNode).EXPECT().AttesterDuties(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + s.beaconNode.(*MockBeaconNode).EXPECT().AttesterDuties(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( func(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) ([]*eth2apiv1.AttesterDuty, error) { if waitForDuties.Get() { fetchDutiesCall <- struct{}{} @@ -55,10 +53,10 @@ func setupAttesterDutiesMock( return shares } - s.validatorProvider.(*mocks.MockValidatorProvider).EXPECT().SelfParticipatingValidators(gomock.Any()).DoAndReturn(getShares).AnyTimes() - s.validatorProvider.(*mocks.MockValidatorProvider).EXPECT().ParticipatingValidators(gomock.Any()).DoAndReturn(getShares).AnyTimes() + s.validatorProvider.(*MockValidatorProvider).EXPECT().SelfParticipatingValidators(gomock.Any()).DoAndReturn(getShares).AnyTimes() + s.validatorProvider.(*MockValidatorProvider).EXPECT().ParticipatingValidators(gomock.Any()).DoAndReturn(getShares).AnyTimes() - s.beaconNode.(*mocks.MockBeaconNode).EXPECT().SubmitBeaconCommitteeSubscriptions(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + s.beaconNode.(*MockBeaconNode).EXPECT().SubmitBeaconCommitteeSubscriptions(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() return fetchDutiesCall, executeDutiesCall } @@ -66,8 +64,6 @@ func setupAttesterDutiesMock( func expectedExecutedAttesterDuties(handler *AttesterHandler, duties []*eth2apiv1.AttesterDuty) []*spectypes.BeaconDuty { expectedDuties := make([]*spectypes.BeaconDuty, 0) for _, d := range duties { - // TODO: (Alan) genesis support - //expectedDuties = append(expectedDuties, handler.toSpecDuty(d, spectypes.BNRoleAttester)) expectedDuties = append(expectedDuties, handler.toSpecDuty(d, spectypes.BNRoleAggregator)) } return expectedDuties @@ -79,6 +75,7 @@ func TestScheduler_Attester_Same_Slot(t *testing.T) { currentSlot = &SafeValue[phase0.Slot]{} dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() waitForDuties = &SafeValue[bool]{} + forkEpoch = phase0.Epoch(0) ) dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.AttesterDuty{ { @@ -88,11 +85,11 @@ func TestScheduler_Attester_Same_Slot(t *testing.T) { }, }) currentSlot.Set(phase0.Slot(1)) - scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) + + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap, waitForDuties) startFn() - // STEP 1: wait for attester duties to be fetched and executed at the same slot duties, _ := dutiesMap.Get(phase0.Epoch(0)) expected := expectedExecutedAttesterDuties(handler, duties) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) @@ -111,6 +108,7 @@ func TestScheduler_Attester_Diff_Slots(t *testing.T) { currentSlot = &SafeValue[phase0.Slot]{} dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() waitForDuties = &SafeValue[bool]{} + forkEpoch = phase0.Epoch(0) ) dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.AttesterDuty{ { @@ -119,22 +117,19 @@ func TestScheduler_Attester_Diff_Slots(t *testing.T) { ValidatorIndex: phase0.ValidatorIndex(1), }, }) - currentSlot.Set(phase0.Slot(0)) - scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) + + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap, waitForDuties) startFn() - // STEP 1: wait for attester duties to be fetched ticker.Send(currentSlot.Get()) waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) - // STEP 2: wait for no action to be taken currentSlot.Set(phase0.Slot(1)) ticker.Send(currentSlot.Get()) waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) - // STEP 3: wait for attester duties to be executed currentSlot.Set(phase0.Slot(2)) duties, _ := dutiesMap.Get(phase0.Epoch(0)) expected := expectedExecutedAttesterDuties(handler, duties) @@ -154,9 +149,10 @@ func TestScheduler_Attester_Indices_Changed(t *testing.T) { currentSlot = &SafeValue[phase0.Slot]{} dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() waitForDuties = &SafeValue[bool]{} + forkEpoch = phase0.Epoch(0) ) currentSlot.Set(phase0.Slot(0)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap, waitForDuties) startFn() @@ -214,9 +210,10 @@ func TestScheduler_Attester_Multiple_Indices_Changed_Same_Slot(t *testing.T) { currentSlot = &SafeValue[phase0.Slot]{} dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() waitForDuties = &SafeValue[bool]{} + forkEpoch = phase0.Epoch(0) ) currentSlot.Set(phase0.Slot(0)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap, waitForDuties) startFn() @@ -285,9 +282,10 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition(t *testing.T) { currentSlot = &SafeValue[phase0.Slot]{} dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() waitForDuties = &SafeValue[bool]{} + forkEpoch = phase0.Epoch(0) ) currentSlot.Set(phase0.Slot(63)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap, waitForDuties) startFn() @@ -368,9 +366,10 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition_Indices_Changed(t *t currentSlot = &SafeValue[phase0.Slot]{} dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() waitForDuties = &SafeValue[bool]{} + forkEpoch = phase0.Epoch(0) ) currentSlot.Set(phase0.Slot(63)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap, waitForDuties) startFn() @@ -462,6 +461,7 @@ func TestScheduler_Attester_Reorg_Previous(t *testing.T) { currentSlot = &SafeValue[phase0.Slot]{} dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() waitForDuties = &SafeValue[bool]{} + forkEpoch = phase0.Epoch(0) ) dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { @@ -470,10 +470,10 @@ func TestScheduler_Attester_Reorg_Previous(t *testing.T) { ValidatorIndex: phase0.ValidatorIndex(1), }, }) + currentSlot.Set(phase0.Slot(32)) // STEP 1: wait for attester duties to be fetched (handle initial duties) - currentSlot.Set(phase0.Slot(32)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap, waitForDuties) startFn() @@ -513,7 +513,7 @@ func TestScheduler_Attester_Reorg_Previous(t *testing.T) { scheduler.HandleHeadEvent(logger)(e) waitForDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) - // STEP 5: wait for attester duties to be fetched again for the current epoch + // STEP 5: wait for no action to be taken currentSlot.Set(phase0.Slot(34)) mockTicker.Send(currentSlot.Get()) waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) @@ -544,6 +544,7 @@ func TestScheduler_Attester_Reorg_Previous_Indices_Change_Same_Slot(t *testing.T currentSlot = &SafeValue[phase0.Slot]{} dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() waitForDuties = &SafeValue[bool]{} + forkEpoch = phase0.Epoch(0) ) dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { @@ -552,10 +553,10 @@ func TestScheduler_Attester_Reorg_Previous_Indices_Change_Same_Slot(t *testing.T ValidatorIndex: phase0.ValidatorIndex(1), }, }) + currentSlot.Set(phase0.Slot(32)) // STEP 1: wait for attester duties to be fetched (handle initial duties) - currentSlot.Set(phase0.Slot(32)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap, waitForDuties) startFn() @@ -636,9 +637,10 @@ func TestScheduler_Attester_Reorg_Current(t *testing.T) { currentSlot = &SafeValue[phase0.Slot]{} dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() waitForDuties = &SafeValue[bool]{} + forkEpoch = phase0.Epoch(0) ) currentSlot.Set(phase0.Slot(47)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap, waitForDuties) startFn() @@ -726,9 +728,10 @@ func TestScheduler_Attester_Reorg_Current_Indices_Changed(t *testing.T) { currentSlot = &SafeValue[phase0.Slot]{} dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() waitForDuties = &SafeValue[bool]{} + forkEpoch = phase0.Epoch(0) ) currentSlot.Set(phase0.Slot(47)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap, waitForDuties) startFn() @@ -805,7 +808,7 @@ func TestScheduler_Attester_Reorg_Current_Indices_Changed(t *testing.T) { mockTicker.Send(currentSlot.Get()) waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) - // STEP 8: The second assigned duty should be executed + // STEP 9: The second assigned duty should be executed currentSlot.Set(phase0.Slot(65)) duties, _ = dutiesMap.Get(phase0.Epoch(2)) expected := expectedExecutedAttesterDuties(handler, duties) @@ -825,6 +828,7 @@ func TestScheduler_Attester_Early_Block(t *testing.T) { currentSlot = &SafeValue[phase0.Slot]{} dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() waitForDuties = &SafeValue[bool]{} + forkEpoch = phase0.Epoch(0) ) dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.AttesterDuty{ { @@ -833,10 +837,10 @@ func TestScheduler_Attester_Early_Block(t *testing.T) { ValidatorIndex: phase0.ValidatorIndex(1), }, }) + currentSlot.Set(phase0.Slot(0)) // STEP 1: wait for attester duties to be fetched (handle initial duties) - currentSlot.Set(phase0.Slot(0)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap, waitForDuties) startFn() @@ -877,9 +881,10 @@ func TestScheduler_Attester_Start_In_The_End_Of_The_Epoch(t *testing.T) { currentSlot = &SafeValue[phase0.Slot]{} dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() waitForDuties = &SafeValue[bool]{} + forkEpoch = phase0.Epoch(0) ) currentSlot.Set(phase0.Slot(31)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap, waitForDuties) startFn() @@ -916,9 +921,10 @@ func TestScheduler_Attester_Fetch_Execute_Next_Epoch_Duty(t *testing.T) { currentSlot = &SafeValue[phase0.Slot]{} dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() waitForDuties = &SafeValue[bool]{} + forkEpoch = phase0.Epoch(0) ) currentSlot.Set(phase0.Slot(13)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap, waitForDuties) startFn() @@ -939,7 +945,7 @@ func TestScheduler_Attester_Fetch_Execute_Next_Epoch_Duty(t *testing.T) { mockTicker.Send(currentSlot.Get()) waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) - // STEP 2: wait for no action to be taken + // STEP 2: wait for duties to be fetched for the next epoch currentSlot.Set(phase0.Slot(15)) waitForDuties.Set(true) mockTicker.Send(currentSlot.Get()) diff --git a/operator/duties/base_handler.go b/operator/duties/base_handler.go index ebf44fdb6c..c03114f775 100644 --- a/operator/duties/base_handler.go +++ b/operator/duties/base_handler.go @@ -3,7 +3,6 @@ package duties import ( "context" - spectypes "github.com/ssvlabs/ssv-spec/types" "go.uber.org/zap" "github.com/ssvlabs/ssv/networkconfig" @@ -12,29 +11,34 @@ import ( //go:generate mockgen -package=duties -destination=./base_handler_mock.go -source=./base_handler.go -// ExecuteDutiesFunc is a non-blocking functions which executes the given duties. -type ExecuteDutiesFunc func(logger *zap.Logger, duties []*spectypes.BeaconDuty) - -// ExecuteCommitteeDutiesFunc is a non-blocking function which executes the given committee duties. -type ExecuteCommitteeDutiesFunc func(logger *zap.Logger, duties committeeDutiesMap) - type dutyHandler interface { - Setup(string, *zap.Logger, BeaconNode, ExecutionClient, networkconfig.NetworkConfig, ValidatorProvider, ValidatorController, ExecuteDutiesFunc, ExecuteCommitteeDutiesFunc, slotticker.Provider, chan ReorgEvent, chan struct{}) + Setup( + name string, + logger *zap.Logger, + beaconNode BeaconNode, + executionClient ExecutionClient, + network networkconfig.NetworkConfig, + validatorProvider ValidatorProvider, + validatorController ValidatorController, + dutiesExecutor DutiesExecutor, + slotTickerProvider slotticker.Provider, + reorgEvents chan ReorgEvent, + indicesChange chan struct{}, + ) HandleDuties(context.Context) HandleInitialDuties(context.Context) Name() string } type baseHandler struct { - logger *zap.Logger - beaconNode BeaconNode - executionClient ExecutionClient - network networkconfig.NetworkConfig - validatorProvider ValidatorProvider - validatorController ValidatorController - executeDuties ExecuteDutiesFunc - executeCommitteeDuties ExecuteCommitteeDutiesFunc - ticker slotticker.SlotTicker + logger *zap.Logger + beaconNode BeaconNode + executionClient ExecutionClient + network networkconfig.NetworkConfig + validatorProvider ValidatorProvider + validatorController ValidatorController + dutiesExecutor DutiesExecutor + ticker slotticker.SlotTicker reorg chan ReorgEvent indicesChange chan struct{} @@ -50,8 +54,7 @@ func (h *baseHandler) Setup( network networkconfig.NetworkConfig, validatorProvider ValidatorProvider, validatorController ValidatorController, - executeDuties ExecuteDutiesFunc, - executeCommitteeDuties ExecuteCommitteeDutiesFunc, + dutiesExecutor DutiesExecutor, slotTickerProvider slotticker.Provider, reorgEvents chan ReorgEvent, indicesChange chan struct{}, @@ -62,8 +65,7 @@ func (h *baseHandler) Setup( h.network = network h.validatorProvider = validatorProvider h.validatorController = validatorController - h.executeDuties = executeDuties - h.executeCommitteeDuties = executeCommitteeDuties + h.dutiesExecutor = dutiesExecutor h.ticker = slotTickerProvider() h.reorg = reorgEvents h.indicesChange = indicesChange diff --git a/operator/duties/base_handler_mock.go b/operator/duties/base_handler_mock.go index 2c4a1f7c07..a39f148be9 100644 --- a/operator/duties/base_handler_mock.go +++ b/operator/duties/base_handler_mock.go @@ -81,13 +81,13 @@ func (mr *MockdutyHandlerMockRecorder) Name() *gomock.Call { } // Setup mocks base method. -func (m *MockdutyHandler) Setup(arg0 string, arg1 *zap.Logger, arg2 BeaconNode, arg3 ExecutionClient, arg4 networkconfig.NetworkConfig, arg5 ValidatorProvider, arg6 ValidatorController, arg7 ExecuteDutiesFunc, arg8 ExecuteCommitteeDutiesFunc, arg9 slotticker.Provider, arg10 chan ReorgEvent, arg11 chan struct{}) { +func (m *MockdutyHandler) Setup(name string, logger *zap.Logger, beaconNode BeaconNode, executionClient ExecutionClient, network networkconfig.NetworkConfig, validatorProvider ValidatorProvider, validatorController ValidatorController, dutyExecutor DutiesExecutor, slotTickerProvider slotticker.Provider, reorgEvents chan ReorgEvent, indicesChange chan struct{}) { m.ctrl.T.Helper() - m.ctrl.Call(m, "Setup", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11) + m.ctrl.Call(m, "Setup", name, logger, beaconNode, executionClient, network, validatorProvider, validatorController, dutyExecutor, slotTickerProvider, reorgEvents, indicesChange) } // Setup indicates an expected call of Setup. -func (mr *MockdutyHandlerMockRecorder) Setup(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11 any) *gomock.Call { +func (mr *MockdutyHandlerMockRecorder) Setup(name, logger, beaconNode, executionClient, network, validatorProvider, validatorController, dutyExecutor, slotTickerProvider, reorgEvents, indicesChange any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Setup", reflect.TypeOf((*MockdutyHandler)(nil).Setup), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Setup", reflect.TypeOf((*MockdutyHandler)(nil).Setup), name, logger, beaconNode, executionClient, network, validatorProvider, validatorController, dutyExecutor, slotTickerProvider, reorgEvents, indicesChange) } diff --git a/operator/duties/committee.go b/operator/duties/committee.go index 9f0f7b7f1f..e564b030fc 100644 --- a/operator/duties/committee.go +++ b/operator/duties/committee.go @@ -19,12 +19,16 @@ type CommitteeHandler struct { attDuties *dutystore.Duties[eth2apiv1.AttesterDuty] syncDuties *dutystore.SyncCommitteeDuties + + validatorCommitteeIDs map[phase0.ValidatorIndex]spectypes.CommitteeID } func NewCommitteeHandler(attDuties *dutystore.Duties[eth2apiv1.AttesterDuty], syncDuties *dutystore.SyncCommitteeDuties) *CommitteeHandler { h := &CommitteeHandler{ attDuties: attDuties, syncDuties: syncDuties, + + validatorCommitteeIDs: make(map[phase0.ValidatorIndex]spectypes.CommitteeID), } return h @@ -38,18 +42,28 @@ func (h *CommitteeHandler) HandleDuties(ctx context.Context) { h.logger.Info("starting duty handler") defer h.logger.Info("duty handler exited") + next := h.ticker.Next() for { select { case <-ctx.Done(): return - case <-h.ticker.Next(): + case <-next: slot := h.ticker.Slot() + next = h.ticker.Next() epoch := h.network.Beacon.EstimatedEpochAtSlot(slot) period := h.network.Beacon.EstimatedSyncCommitteePeriodAtEpoch(epoch) buildStr := fmt.Sprintf("p%v-e%v-s%v-#%v", period, epoch, slot, slot%32+1) - h.logger.Debug("🛠 ticker event", zap.String("period_epoch_slot_pos", buildStr)) + if !h.network.AlanForked(slot) { + h.logger.Debug("🛠 ticker event", + zap.String("period_epoch_slot_pos", buildStr), + zap.String("status", "alan not forked yet"), + ) + continue + } + + h.logger.Debug("🛠 ticker event", zap.String("period_epoch_slot_pos", buildStr)) h.processExecution(period, epoch, slot) case <-h.reorg: @@ -69,37 +83,37 @@ func (h *CommitteeHandler) processExecution(period uint64, epoch phase0.Epoch, s } committeeMap := h.buildCommitteeDuties(attDuties, syncDuties, epoch, slot) - h.executeCommitteeDuties(h.logger, committeeMap) + h.dutiesExecutor.ExecuteCommitteeDuties(h.logger, committeeMap) } func (h *CommitteeHandler) buildCommitteeDuties(attDuties []*eth2apiv1.AttesterDuty, syncDuties []*eth2apiv1.SyncCommitteeDuty, epoch phase0.Epoch, slot phase0.Slot) committeeDutiesMap { - // TODO: tmp solution to get committee id fast - vcmap := make(map[phase0.ValidatorIndex]spectypes.CommitteeID) + // NOTE: Instead of getting validators using duties one by one, we are getting all validators for the slot at once. + // This approach reduces contention and improves performance, as multiple individual calls would be slower. vs := h.validatorProvider.SelfParticipatingValidators(epoch) for _, v := range vs { - vcmap[v.ValidatorIndex] = v.CommitteeID() + h.validatorCommitteeIDs[v.ValidatorIndex] = v.CommitteeID() } committeeMap := make(committeeDutiesMap) for _, d := range attDuties { if h.shouldExecuteAtt(d) { specDuty := h.toSpecAttDuty(d, spectypes.BNRoleAttester) - h.appendBeaconDuty(committeeMap, vcmap, specDuty) + h.appendBeaconDuty(committeeMap, specDuty) } } for _, d := range syncDuties { if h.shouldExecuteSync(d, slot) { specDuty := h.toSpecSyncDuty(d, slot, spectypes.BNRoleSyncCommittee) - h.appendBeaconDuty(committeeMap, vcmap, specDuty) + h.appendBeaconDuty(committeeMap, specDuty) } } return committeeMap } -func (h *CommitteeHandler) appendBeaconDuty(m committeeDutiesMap, vcmap map[phase0.ValidatorIndex]spectypes.CommitteeID, beaconDuty *spectypes.BeaconDuty) { - committeeID, ok := vcmap[beaconDuty.ValidatorIndex] +func (h *CommitteeHandler) appendBeaconDuty(m committeeDutiesMap, beaconDuty *spectypes.BeaconDuty) { + committeeID, ok := h.validatorCommitteeIDs[beaconDuty.ValidatorIndex] if !ok { h.logger.Error("can't find validator committeeID in validator store", zap.Uint64("validator_index", uint64(beaconDuty.ValidatorIndex))) return diff --git a/operator/duties/committee_test.go b/operator/duties/committee_test.go index 0a37f9f097..10b54f6abe 100644 --- a/operator/duties/committee_test.go +++ b/operator/duties/committee_test.go @@ -10,10 +10,9 @@ import ( "github.com/cornelk/hashmap" spectypes "github.com/ssvlabs/ssv-spec/types" "github.com/stretchr/testify/require" - gomock "go.uber.org/mock/gomock" + "go.uber.org/mock/gomock" "github.com/ssvlabs/ssv/operator/duties/dutystore" - "github.com/ssvlabs/ssv/operator/duties/mocks" mocknetwork "github.com/ssvlabs/ssv/protocol/v2/blockchain/beacon/mocks" ssvtypes "github.com/ssvlabs/ssv/protocol/v2/types" ) @@ -55,7 +54,7 @@ func setupCommitteeDutiesMock( }, ).AnyTimes() - s.beaconNode.(*mocks.MockBeaconNode).EXPECT().AttesterDuties(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + s.beaconNode.(*MockBeaconNode).EXPECT().AttesterDuties(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( func(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) ([]*eth2apiv1.AttesterDuty, error) { if waitForDuties.Get() { fetchDutiesCall <- struct{}{} @@ -64,7 +63,7 @@ func setupCommitteeDutiesMock( return duties, nil }).AnyTimes() - s.beaconNode.(*mocks.MockBeaconNode).EXPECT().SyncCommitteeDuties(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + s.beaconNode.(*MockBeaconNode).EXPECT().SyncCommitteeDuties(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( func(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) ([]*eth2apiv1.SyncCommitteeDuty, error) { if waitForDuties.Get() { fetchDutiesCall <- struct{}{} @@ -74,16 +73,16 @@ func setupCommitteeDutiesMock( return duties, nil }).AnyTimes() - s.validatorProvider.(*mocks.MockValidatorProvider).EXPECT().SelfParticipatingValidators(gomock.Any()).Return(activeShares).AnyTimes() - s.validatorProvider.(*mocks.MockValidatorProvider).EXPECT().ParticipatingValidators(gomock.Any()).Return(activeShares).AnyTimes() + s.validatorProvider.(*MockValidatorProvider).EXPECT().SelfParticipatingValidators(gomock.Any()).Return(activeShares).AnyTimes() + s.validatorProvider.(*MockValidatorProvider).EXPECT().ParticipatingValidators(gomock.Any()).Return(activeShares).AnyTimes() - s.validatorController.(*mocks.MockValidatorController).EXPECT().AllActiveIndices(gomock.Any(), gomock.Any()).DoAndReturn( + s.validatorController.(*MockValidatorController).EXPECT().AllActiveIndices(gomock.Any(), gomock.Any()).DoAndReturn( func(epoch phase0.Epoch, afterInit bool) []phase0.ValidatorIndex { return indicesFromShares(activeShares) }).AnyTimes() - s.beaconNode.(*mocks.MockBeaconNode).EXPECT().SubmitBeaconCommitteeSubscriptions(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() - s.beaconNode.(*mocks.MockBeaconNode).EXPECT().SubmitSyncCommitteeSubscriptions(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + s.beaconNode.(*MockBeaconNode).EXPECT().SubmitBeaconCommitteeSubscriptions(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + s.beaconNode.(*MockBeaconNode).EXPECT().SubmitSyncCommitteeSubscriptions(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() return fetchDutiesCall, executeDutiesCall } @@ -94,6 +93,7 @@ func TestScheduler_Committee_Same_Slot_Attester_Only(t *testing.T) { attHandler = NewAttesterHandler(dutyStore.Attester) syncHandler = NewSyncCommitteeHandler(dutyStore.SyncCommittee) commHandler = NewCommitteeHandler(dutyStore.Attester, dutyStore.SyncCommittee) + alanForkEpoch = phase0.Epoch(0) currentSlot = &SafeValue[phase0.Slot]{} waitForDuties = &SafeValue[bool]{} attDuties = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() @@ -116,7 +116,7 @@ func TestScheduler_Committee_Same_Slot_Attester_Only(t *testing.T) { }) currentSlot.Set(phase0.Slot(1)) - scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocksCommittee(t, []dutyHandler{attHandler, syncHandler, commHandler}, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{attHandler, syncHandler, commHandler}, currentSlot, alanForkEpoch) fetchDutiesCall, executeDutiesCall := setupCommitteeDutiesMock(scheduler, activeShares, attDuties, syncDuties, waitForDuties) startFn() @@ -145,6 +145,7 @@ func TestScheduler_Committee_Same_Slot_SyncCommittee_Only(t *testing.T) { attHandler = NewAttesterHandler(dutyStore.Attester) syncHandler = NewSyncCommitteeHandler(dutyStore.SyncCommittee) commHandler = NewCommitteeHandler(dutyStore.Attester, dutyStore.SyncCommittee) + alanForkEpoch = phase0.Epoch(0) currentSlot = &SafeValue[phase0.Slot]{} waitForDuties = &SafeValue[bool]{} attDuties = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() @@ -166,7 +167,7 @@ func TestScheduler_Committee_Same_Slot_SyncCommittee_Only(t *testing.T) { }) currentSlot.Set(phase0.Slot(1)) - scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocksCommittee(t, []dutyHandler{attHandler, syncHandler, commHandler}, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{attHandler, syncHandler, commHandler}, currentSlot, alanForkEpoch) fetchDutiesCall, executeDutiesCall := setupCommitteeDutiesMock(scheduler, activeShares, attDuties, syncDuties, waitForDuties) startFn() @@ -195,6 +196,7 @@ func TestScheduler_Committee_Same_Slot(t *testing.T) { attHandler = NewAttesterHandler(dutyStore.Attester) syncHandler = NewSyncCommitteeHandler(dutyStore.SyncCommittee) commHandler = NewCommitteeHandler(dutyStore.Attester, dutyStore.SyncCommittee) + alanForkEpoch = phase0.Epoch(0) currentSlot = &SafeValue[phase0.Slot]{} waitForDuties = &SafeValue[bool]{} attDuties = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() @@ -223,7 +225,7 @@ func TestScheduler_Committee_Same_Slot(t *testing.T) { }) currentSlot.Set(phase0.Slot(1)) - scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocksCommittee(t, []dutyHandler{attHandler, syncHandler, commHandler}, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{attHandler, syncHandler, commHandler}, currentSlot, alanForkEpoch) fetchDutiesCall, executeDutiesCall := setupCommitteeDutiesMock(scheduler, activeShares, attDuties, syncDuties, waitForDuties) startFn() @@ -253,6 +255,7 @@ func TestScheduler_Committee_Diff_Slot_Attester_Only(t *testing.T) { attHandler = NewAttesterHandler(dutyStore.Attester) syncHandler = NewSyncCommitteeHandler(dutyStore.SyncCommittee) commHandler = NewCommitteeHandler(dutyStore.Attester, dutyStore.SyncCommittee) + alanForkEpoch = phase0.Epoch(0) currentSlot = &SafeValue[phase0.Slot]{} waitForDuties = &SafeValue[bool]{} attDuties = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() @@ -275,7 +278,7 @@ func TestScheduler_Committee_Diff_Slot_Attester_Only(t *testing.T) { }) // STEP 1: wait for attester duties to be fetched using handle initial duties - scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocksCommittee(t, []dutyHandler{attHandler, syncHandler, commHandler}, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{attHandler, syncHandler, commHandler}, currentSlot, alanForkEpoch) fetchDutiesCall, executeDutiesCall := setupCommitteeDutiesMock(scheduler, activeShares, attDuties, syncDuties, waitForDuties) startFn() @@ -309,6 +312,7 @@ func TestScheduler_Committee_Indices_Changed_Attester_Only(t *testing.T) { attHandler = NewAttesterHandler(dutyStore.Attester) syncHandler = NewSyncCommitteeHandler(dutyStore.SyncCommittee) commHandler = NewCommitteeHandler(dutyStore.Attester, dutyStore.SyncCommittee) + alanForkEpoch = phase0.Epoch(0) currentSlot = &SafeValue[phase0.Slot]{} waitForDuties = &SafeValue[bool]{} attDuties = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() @@ -341,7 +345,7 @@ func TestScheduler_Committee_Indices_Changed_Attester_Only(t *testing.T) { } ) - scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocksCommittee(t, []dutyHandler{attHandler, syncHandler, commHandler}, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{attHandler, syncHandler, commHandler}, currentSlot, alanForkEpoch) fetchDutiesCall, executeDutiesCall := setupCommitteeDutiesMock(scheduler, activeShares, attDuties, syncDuties, waitForDuties) startFn() @@ -406,6 +410,7 @@ func TestScheduler_Committee_Indices_Changed_Attester_Only_2(t *testing.T) { attHandler = NewAttesterHandler(dutyStore.Attester) syncHandler = NewSyncCommitteeHandler(dutyStore.SyncCommittee) commHandler = NewCommitteeHandler(dutyStore.Attester, dutyStore.SyncCommittee) + alanForkEpoch = phase0.Epoch(0) currentSlot = &SafeValue[phase0.Slot]{} waitForDuties = &SafeValue[bool]{} attDuties = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() @@ -438,7 +443,7 @@ func TestScheduler_Committee_Indices_Changed_Attester_Only_2(t *testing.T) { } ) - scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocksCommittee(t, []dutyHandler{attHandler, syncHandler, commHandler}, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{attHandler, syncHandler, commHandler}, currentSlot, alanForkEpoch) fetchDutiesCall, executeDutiesCall := setupCommitteeDutiesMock(scheduler, activeShares, attDuties, syncDuties, waitForDuties) startFn() @@ -503,6 +508,7 @@ func TestScheduler_Committee_Indices_Changed_Attester_Only_3(t *testing.T) { attHandler = NewAttesterHandler(dutyStore.Attester) syncHandler = NewSyncCommitteeHandler(dutyStore.SyncCommittee) commHandler = NewCommitteeHandler(dutyStore.Attester, dutyStore.SyncCommittee) + alanForkEpoch = phase0.Epoch(0) currentSlot = &SafeValue[phase0.Slot]{} waitForDuties = &SafeValue[bool]{} attDuties = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() @@ -535,7 +541,7 @@ func TestScheduler_Committee_Indices_Changed_Attester_Only_3(t *testing.T) { }) // STEP 1: wait for attester duties to be fetched using handle initial duties - scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocksCommittee(t, []dutyHandler{attHandler, syncHandler, commHandler}, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{attHandler, syncHandler, commHandler}, currentSlot, alanForkEpoch) fetchDutiesCall, executeDutiesCall := setupCommitteeDutiesMock(scheduler, activeShares, attDuties, syncDuties, waitForDuties) startFn() @@ -591,6 +597,7 @@ func TestScheduler_Committee_Reorg_Previous_Epoch_Transition_Attester_only(t *te attHandler = NewAttesterHandler(dutyStore.Attester) syncHandler = NewSyncCommitteeHandler(dutyStore.SyncCommittee) commHandler = NewCommitteeHandler(dutyStore.Attester, dutyStore.SyncCommittee) + alanForkEpoch = phase0.Epoch(0) currentSlot = &SafeValue[phase0.Slot]{} waitForDuties = &SafeValue[bool]{} attDuties = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() @@ -608,7 +615,7 @@ func TestScheduler_Committee_Reorg_Previous_Epoch_Transition_Attester_only(t *te ) currentSlot.Set(phase0.Slot(63)) - scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocksCommittee(t, []dutyHandler{attHandler, syncHandler, commHandler}, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{attHandler, syncHandler, commHandler}, currentSlot, alanForkEpoch) fetchDutiesCall, executeDutiesCall := setupCommitteeDutiesMock(scheduler, activeShares, attDuties, syncDuties, waitForDuties) startFn() @@ -686,6 +693,7 @@ func TestScheduler_Committee_Reorg_Previous_Epoch_Transition_Indices_Changed_Att attHandler = NewAttesterHandler(dutyStore.Attester) syncHandler = NewSyncCommitteeHandler(dutyStore.SyncCommittee) commHandler = NewCommitteeHandler(dutyStore.Attester, dutyStore.SyncCommittee) + alanForkEpoch = phase0.Epoch(0) currentSlot = &SafeValue[phase0.Slot]{} waitForDuties = &SafeValue[bool]{} attDuties = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() @@ -711,7 +719,7 @@ func TestScheduler_Committee_Reorg_Previous_Epoch_Transition_Indices_Changed_Att ) currentSlot.Set(phase0.Slot(63)) - scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocksCommittee(t, []dutyHandler{attHandler, syncHandler, commHandler}, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{attHandler, syncHandler, commHandler}, currentSlot, alanForkEpoch) fetchDutiesCall, executeDutiesCall := setupCommitteeDutiesMock(scheduler, activeShares, attDuties, syncDuties, waitForDuties) startFn() @@ -798,6 +806,7 @@ func TestScheduler_Committee_Reorg_Previous_Attester_only(t *testing.T) { attHandler = NewAttesterHandler(dutyStore.Attester) syncHandler = NewSyncCommitteeHandler(dutyStore.SyncCommittee) commHandler = NewCommitteeHandler(dutyStore.Attester, dutyStore.SyncCommittee) + alanForkEpoch = phase0.Epoch(0) currentSlot = &SafeValue[phase0.Slot]{} waitForDuties = &SafeValue[bool]{} attDuties = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() @@ -824,7 +833,7 @@ func TestScheduler_Committee_Reorg_Previous_Attester_only(t *testing.T) { // STEP 1: wait for attester duties to be fetched using handle initial duties currentSlot.Set(phase0.Slot(32)) - scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocksCommittee(t, []dutyHandler{attHandler, syncHandler, commHandler}, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{attHandler, syncHandler, commHandler}, currentSlot, alanForkEpoch) fetchDutiesCall, executeDutiesCall := setupCommitteeDutiesMock(scheduler, activeShares, attDuties, syncDuties, waitForDuties) startFn() @@ -889,3 +898,87 @@ func TestScheduler_Committee_Reorg_Previous_Attester_only(t *testing.T) { func TestScheduler_Committee_Early_Block(t *testing.T) { t.Skip("TODO") } + +func TestScheduler_Committee_Fork_Attester_only(t *testing.T) { + var ( + dutyStore = dutystore.New() + attHandler = NewAttesterHandler(dutyStore.Attester) + syncHandler = NewSyncCommitteeHandler(dutyStore.SyncCommittee) + commHandler = NewCommitteeHandler(dutyStore.Attester, dutyStore.SyncCommittee) + alanForkEpoch = phase0.Epoch(2) + currentSlot = &SafeValue[phase0.Slot]{} + waitForDuties = &SafeValue[bool]{} + attDuties = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() + syncDuties = hashmap.New[uint64, []*eth2apiv1.SyncCommitteeDuty]() + activeShares = []*ssvtypes.SSVShare{{ + Share: spectypes.Share{ + Committee: []*spectypes.ShareMember{ + {Signer: 1}, {Signer: 2}, {Signer: 3}, {Signer: 4}, + }, + ValidatorIndex: 1, + }, + }} + ) + attDuties.Set(phase0.Epoch(0), []*eth2apiv1.AttesterDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 3}, + Slot: phase0.Slot(1), + ValidatorIndex: phase0.ValidatorIndex(1), + }, + }) + attDuties.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 3}, + Slot: phase0.Slot(64), + ValidatorIndex: phase0.ValidatorIndex(1), + }, + }) + + currentSlot.Set(phase0.Slot(1)) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{attHandler, syncHandler, commHandler}, currentSlot, alanForkEpoch) + fetchAttesterDutiesCall, executeAttesterDutiesCall := setupAttesterGenesisDutiesMock(scheduler, attDuties, waitForDuties) + _, _ = setupSyncCommitteeDutiesMock(scheduler, activeShares, syncDuties, waitForDuties) + fetchDutiesCall, executeDutiesCall := setupCommitteeDutiesMock(scheduler, activeShares, attDuties, syncDuties, waitForDuties) + startFn() + + aDuties, _ := attDuties.Get(0) + aExpected := expectedExecutedGenesisAttesterDuties(attHandler, aDuties) + setExecuteGenesisDutyFunc(scheduler, executeAttesterDutiesCall, len(aExpected)) + + startTime := time.Now() + ticker.Send(currentSlot.Get()) + waitForGenesisDutiesExecution(t, logger, fetchAttesterDutiesCall, executeAttesterDutiesCall, timeout, aExpected) + + // validate the 1/3 of the slot waiting time + require.Less(t, scheduler.network.Beacon.SlotDurationSec()/3, time.Since(startTime)) + + // skip to the next epoch + currentSlot.Set(phase0.Slot(2)) + for slot := currentSlot.Get(); slot < 47; slot++ { + ticker.Send(slot) + waitForNoActionGenesis(t, logger, fetchAttesterDutiesCall, executeAttesterDutiesCall, timeout) + currentSlot.Set(slot + 1) + } + + // wait for duties to be fetched for the next fork epoch + currentSlot.Set(phase0.Slot(47)) + waitForDuties.Set(true) + ticker.Send(currentSlot.Get()) + waitForGenesisDutiesFetch(t, logger, fetchAttesterDutiesCall, executeAttesterDutiesCall, timeout) + + currentSlot.Set(phase0.Slot(64)) + aDuties, _ = attDuties.Get(2) + committeeMap := commHandler.buildCommitteeDuties(aDuties, nil, 2, currentSlot.Get()) + setExecuteDutyFuncs(scheduler, executeDutiesCall, len(committeeMap)) + + startTime = time.Now() + ticker.Send(currentSlot.Get()) + waitForDutiesExecutionCommittee(t, logger, fetchDutiesCall, executeDutiesCall, timeout, committeeMap) + + // validate the 1/3 of the slot waiting time + require.Less(t, scheduler.network.Beacon.SlotDurationSec()/3, time.Since(startTime)) + + // Stop scheduler & wait for graceful exit. + cancel() + require.NoError(t, schedulerPool.Wait()) +} diff --git a/operator/duties/dutystore/store.go b/operator/duties/dutystore/store.go index 53dbfaefcc..1b1ff4032e 100644 --- a/operator/duties/dutystore/store.go +++ b/operator/duties/dutystore/store.go @@ -8,6 +8,7 @@ type Store struct { Attester *Duties[eth2apiv1.AttesterDuty] Proposer *Duties[eth2apiv1.ProposerDuty] SyncCommittee *SyncCommitteeDuties + VoluntaryExit *VoluntaryExitDuties } func New() *Store { @@ -15,5 +16,6 @@ func New() *Store { Attester: NewDuties[eth2apiv1.AttesterDuty](), Proposer: NewDuties[eth2apiv1.ProposerDuty](), SyncCommittee: NewSyncCommitteeDuties(), + VoluntaryExit: NewVoluntaryExit(), } } diff --git a/operator/duties/dutystore/voluntary_exit.go b/operator/duties/dutystore/voluntary_exit.go new file mode 100644 index 0000000000..d742753645 --- /dev/null +++ b/operator/duties/dutystore/voluntary_exit.go @@ -0,0 +1,51 @@ +package dutystore + +import ( + "sync" + + "github.com/attestantio/go-eth2-client/spec/phase0" +) + +type VoluntaryExitDuties struct { + mu sync.RWMutex + m map[phase0.Slot]map[phase0.BLSPubKey]int +} + +func NewVoluntaryExit() *VoluntaryExitDuties { + return &VoluntaryExitDuties{ + m: make(map[phase0.Slot]map[phase0.BLSPubKey]int), + } +} + +func (d *VoluntaryExitDuties) GetDutyCount(slot phase0.Slot, pk phase0.BLSPubKey) int { + d.mu.RLock() + defer d.mu.RUnlock() + + v, ok := d.m[slot] + if !ok { + return 0 + } + + return v[pk] +} + +func (d *VoluntaryExitDuties) AddDuty(slot phase0.Slot, pk phase0.BLSPubKey) { + d.mu.Lock() + defer d.mu.Unlock() + + v, ok := d.m[slot] + if !ok { + d.m[slot] = map[phase0.BLSPubKey]int{ + pk: 1, + } + } else { + v[pk]++ + } +} + +func (d *VoluntaryExitDuties) RemoveSlot(slot phase0.Slot) { + d.mu.Lock() + defer d.mu.Unlock() + + delete(d.m, slot) +} diff --git a/operator/duties/proposer.go b/operator/duties/proposer.go index 06ac4b5f86..1f29702da8 100644 --- a/operator/duties/proposer.go +++ b/operator/duties/proposer.go @@ -54,13 +54,15 @@ func (h *ProposerHandler) HandleDuties(ctx context.Context) { h.logger.Info("starting duty handler") defer h.logger.Info("duty handler exited") + next := h.ticker.Next() for { select { case <-ctx.Done(): return - case <-h.ticker.Next(): + case <-next: slot := h.ticker.Slot() + next = h.ticker.Next() currentEpoch := h.network.Beacon.EstimatedEpochAtSlot(slot) buildStr := fmt.Sprintf("e%v-s%v-#%v", currentEpoch, slot, slot%32+1) h.logger.Debug("🛠 ticker event", zap.String("epoch_slot_pos", buildStr)) @@ -139,7 +141,7 @@ func (h *ProposerHandler) processExecution(epoch phase0.Epoch, slot phase0.Slot) toExecute = append(toExecute, h.toSpecDuty(d, spectypes.BNRoleProposer)) } } - h.executeDuties(h.logger, toExecute) + h.dutiesExecutor.ExecuteDuties(h.logger, toExecute) } func (h *ProposerHandler) fetchAndProcessDuties(ctx context.Context, epoch phase0.Epoch) error { @@ -147,6 +149,7 @@ func (h *ProposerHandler) fetchAndProcessDuties(ctx context.Context, epoch phase allIndices := indicesFromShares(h.validatorProvider.ParticipatingValidators(epoch)) if len(allIndices) == 0 { + h.logger.Debug("no active validators for epoch", fields.Epoch(epoch)) return nil } diff --git a/operator/duties/proposer_test.go b/operator/duties/proposer_test.go index c2007062bf..1316004b0d 100644 --- a/operator/duties/proposer_test.go +++ b/operator/duties/proposer_test.go @@ -8,12 +8,12 @@ import ( "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/cornelk/hashmap" "github.com/stretchr/testify/require" - gomock "go.uber.org/mock/gomock" + "go.uber.org/mock/gomock" spectypes "github.com/ssvlabs/ssv-spec/types" + "github.com/ssvlabs/ssv/beacon/goclient" "github.com/ssvlabs/ssv/operator/duties/dutystore" - "github.com/ssvlabs/ssv/operator/duties/mocks" "github.com/ssvlabs/ssv/protocol/v2/types" ) @@ -21,7 +21,7 @@ func setupProposerDutiesMock(s *Scheduler, dutiesMap *hashmap.Map[phase0.Epoch, fetchDutiesCall := make(chan struct{}) executeDutiesCall := make(chan []*spectypes.BeaconDuty) - s.beaconNode.(*mocks.MockBeaconNode).EXPECT().ProposerDuties(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + s.beaconNode.(*MockBeaconNode).EXPECT().ProposerDuties(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( func(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) ([]*eth2apiv1.ProposerDuty, error) { fetchDutiesCall <- struct{}{} duties, _ := dutiesMap.Get(epoch) @@ -49,8 +49,8 @@ func setupProposerDutiesMock(s *Scheduler, dutiesMap *hashmap.Map[phase0.Epoch, return shares } - s.validatorProvider.(*mocks.MockValidatorProvider).EXPECT().SelfParticipatingValidators(gomock.Any()).DoAndReturn(getShares).AnyTimes() - s.validatorProvider.(*mocks.MockValidatorProvider).EXPECT().ParticipatingValidators(gomock.Any()).DoAndReturn(getShares).AnyTimes() + s.validatorProvider.(*MockValidatorProvider).EXPECT().SelfParticipatingValidators(gomock.Any()).DoAndReturn(getShares).AnyTimes() + s.validatorProvider.(*MockValidatorProvider).EXPECT().ParticipatingValidators(gomock.Any()).DoAndReturn(getShares).AnyTimes() return fetchDutiesCall, executeDutiesCall } @@ -70,7 +70,7 @@ func TestScheduler_Proposer_Same_Slot(t *testing.T) { dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.Set(phase0.Slot(0)) - scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, goclient.FarFutureEpoch) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) startFn() @@ -103,7 +103,7 @@ func TestScheduler_Proposer_Diff_Slots(t *testing.T) { dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.Set(phase0.Slot(0)) - scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, goclient.FarFutureEpoch) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) startFn() @@ -146,7 +146,7 @@ func TestScheduler_Proposer_Indices_Changed(t *testing.T) { dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.Set(phase0.Slot(0)) - scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, goclient.FarFutureEpoch) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) startFn() @@ -209,7 +209,7 @@ func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.Set(phase0.Slot(0)) - scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, goclient.FarFutureEpoch) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) startFn() @@ -290,7 +290,7 @@ func TestScheduler_Proposer_Reorg_Current(t *testing.T) { dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.Set(phase0.Slot(34)) - scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, goclient.FarFutureEpoch) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) startFn() @@ -366,7 +366,7 @@ func TestScheduler_Proposer_Reorg_Current_Indices_Changed(t *testing.T) { dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.Set(phase0.Slot(34)) - scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, goclient.FarFutureEpoch) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) startFn() diff --git a/operator/duties/scheduler.go b/operator/duties/scheduler.go index 9ccd259e31..8297d726b4 100644 --- a/operator/duties/scheduler.go +++ b/operator/duties/scheduler.go @@ -16,9 +16,9 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prysmaticlabs/prysm/v4/async/event" "github.com/sourcegraph/conc/pool" - "go.uber.org/zap" - + genesisspectypes "github.com/ssvlabs/ssv-spec-pre-cc/types" spectypes "github.com/ssvlabs/ssv-spec/types" + "go.uber.org/zap" "github.com/ssvlabs/ssv/beacon/goclient" "github.com/ssvlabs/ssv/logging" @@ -29,7 +29,7 @@ import ( "github.com/ssvlabs/ssv/protocol/v2/types" ) -//go:generate mockgen -package=mocks -destination=./mocks/scheduler.go -source=./scheduler.go +//go:generate mockgen -package=duties -destination=./scheduler_mock.go -source=./scheduler.go var slotDelayHistogram = promauto.NewHistogram(prometheus.HistogramOpts{ Name: "slot_ticker_delay_milliseconds", @@ -50,9 +50,18 @@ const ( blockPropagationDelay = 200 * time.Millisecond ) -type SlotTicker interface { - Next() <-chan time.Time - Slot() phase0.Slot +// DutiesExecutor is an interface for executing duties. +type DutiesExecutor interface { + ExecuteGenesisDuties(logger *zap.Logger, duties []*genesisspectypes.Duty) + ExecuteDuties(logger *zap.Logger, duties []*spectypes.BeaconDuty) + ExecuteCommitteeDuties(logger *zap.Logger, duties committeeDutiesMap) +} + +// DutyExecutor is an interface for executing duty. +type DutyExecutor interface { + ExecuteGenesisDuty(logger *zap.Logger, duty *genesisspectypes.Duty) + ExecuteDuty(logger *zap.Logger, duty *spectypes.BeaconDuty) + ExecuteCommitteeDuty(logger *zap.Logger, committeeID spectypes.CommitteeID, duty *spectypes.CommitteeDuty) } type BeaconNode interface { @@ -80,33 +89,28 @@ type ValidatorController interface { AllActiveIndices(epoch phase0.Epoch, afterInit bool) []phase0.ValidatorIndex } -type ExecuteDutyFunc func(logger *zap.Logger, duty *spectypes.BeaconDuty) -type ExecuteCommitteeDutyFunc func(logger *zap.Logger, committeeID spectypes.CommitteeID, duty *spectypes.CommitteeDuty) - type SchedulerOptions struct { - Ctx context.Context - BeaconNode BeaconNode - ExecutionClient ExecutionClient - Network networkconfig.NetworkConfig - ValidatorProvider ValidatorProvider - ValidatorController ValidatorController - ExecuteDuty ExecuteDutyFunc - ExecuteCommitteeDuty ExecuteCommitteeDutyFunc - IndicesChg chan struct{} - ValidatorExitCh <-chan ExitDescriptor - SlotTickerProvider slotticker.Provider - DutyStore *dutystore.Store + Ctx context.Context + BeaconNode BeaconNode + ExecutionClient ExecutionClient + Network networkconfig.NetworkConfig + ValidatorProvider ValidatorProvider + ValidatorController ValidatorController + DutyExecutor DutyExecutor + IndicesChg chan struct{} + ValidatorExitCh <-chan ExitDescriptor + SlotTickerProvider slotticker.Provider + DutyStore *dutystore.Store } type Scheduler struct { - beaconNode BeaconNode - executionClient ExecutionClient - network networkconfig.NetworkConfig - validatorProvider ValidatorProvider - validatorController ValidatorController - slotTickerProvider slotticker.Provider - executeDuty ExecuteDutyFunc - executeCommitteeDuty ExecuteCommitteeDutyFunc + beaconNode BeaconNode + executionClient ExecutionClient + network networkconfig.NetworkConfig + validatorProvider ValidatorProvider + validatorController ValidatorController + slotTickerProvider slotticker.Provider + dutyExecutor DutyExecutor handlers []dutyHandler blockPropagateDelay time.Duration @@ -130,22 +134,21 @@ func NewScheduler(opts *SchedulerOptions) *Scheduler { } s := &Scheduler{ - beaconNode: opts.BeaconNode, - executionClient: opts.ExecutionClient, - network: opts.Network, - slotTickerProvider: opts.SlotTickerProvider, - executeDuty: opts.ExecuteDuty, - executeCommitteeDuty: opts.ExecuteCommitteeDuty, - validatorProvider: opts.ValidatorProvider, - validatorController: opts.ValidatorController, - indicesChg: opts.IndicesChg, - blockPropagateDelay: blockPropagationDelay, + beaconNode: opts.BeaconNode, + executionClient: opts.ExecutionClient, + network: opts.Network, + slotTickerProvider: opts.SlotTickerProvider, + dutyExecutor: opts.DutyExecutor, + validatorProvider: opts.ValidatorProvider, + validatorController: opts.ValidatorController, + indicesChg: opts.IndicesChg, + blockPropagateDelay: blockPropagationDelay, handlers: []dutyHandler{ NewAttesterHandler(dutyStore.Attester), NewProposerHandler(dutyStore.Proposer), NewSyncCommitteeHandler(dutyStore.SyncCommittee), - NewVoluntaryExitHandler(opts.ValidatorExitCh), + NewVoluntaryExitHandler(dutyStore.VoluntaryExit, opts.ValidatorExitCh), NewCommitteeHandler(dutyStore.Attester, dutyStore.SyncCommittee), }, @@ -195,8 +198,7 @@ func (s *Scheduler) Start(ctx context.Context, logger *zap.Logger) error { s.network, s.validatorProvider, s.validatorController, - s.ExecuteDuties, - s.ExecuteCommitteeDuties, + s, s.slotTickerProvider, reorgCh, indicesChangeCh, @@ -369,6 +371,24 @@ func (s *Scheduler) HandleHeadEvent(logger *zap.Logger) func(event *eth2apiv1.Ev } } +func (s *Scheduler) ExecuteGenesisDuties(logger *zap.Logger, duties []*genesisspectypes.Duty) { + for _, duty := range duties { + duty := duty + logger := s.loggerWithGenesisDutyContext(logger, duty) + slotDelay := time.Since(s.network.Beacon.GetSlotStartTime(duty.Slot)) + if slotDelay >= 100*time.Millisecond { + logger.Debug("⚠️ late duty execution", zap.Int64("slot_delay", slotDelay.Milliseconds())) + } + slotDelayHistogram.Observe(float64(slotDelay.Milliseconds())) + go func() { + if duty.Type == genesisspectypes.BNRoleAttester || duty.Type == genesisspectypes.BNRoleSyncCommittee { + s.waitOneThirdOrValidBlock(duty.Slot) + } + s.dutyExecutor.ExecuteGenesisDuty(logger, duty) + }() + } +} + // ExecuteDuties tries to execute the given duties func (s *Scheduler) ExecuteDuties(logger *zap.Logger, duties []*spectypes.BeaconDuty) { for _, duty := range duties { @@ -383,7 +403,7 @@ func (s *Scheduler) ExecuteDuties(logger *zap.Logger, duties []*spectypes.Beacon if duty.Type == spectypes.BNRoleAttester || duty.Type == spectypes.BNRoleSyncCommittee { s.waitOneThirdOrValidBlock(duty.Slot) } - s.executeDuty(logger, duty) + s.dutyExecutor.ExecuteDuty(logger, duty) }() } } @@ -401,11 +421,23 @@ func (s *Scheduler) ExecuteCommitteeDuties(logger *zap.Logger, duties committeeD slotDelayHistogram.Observe(float64(slotDelay.Milliseconds())) go func() { s.waitOneThirdOrValidBlock(duty.Slot) - s.executeCommitteeDuty(logger, committeeID, duty) + s.dutyExecutor.ExecuteCommitteeDuty(logger, committeeID, duty) }() } } +// loggerWithGenesisDutyContext returns an instance of logger with the given genesis duty's information +func (s *Scheduler) loggerWithGenesisDutyContext(logger *zap.Logger, duty *genesisspectypes.Duty) *zap.Logger { + return logger. + With(zap.Stringer(fields.FieldRole, duty.Type)). + With(zap.Uint64("committee_index", uint64(duty.CommitteeIndex))). + With(fields.CurrentSlot(s.network.Beacon.EstimatedCurrentSlot())). + With(fields.Slot(duty.Slot)). + With(fields.Epoch(s.network.Beacon.EstimatedEpochAtSlot(duty.Slot))). + With(fields.PubKey(duty.PubKey[:])). + With(fields.StartTimeUnixMilli(s.network.Beacon.GetSlotStartTime(duty.Slot))) +} + // loggerWithDutyContext returns an instance of logger with the given duty's information func (s *Scheduler) loggerWithDutyContext(logger *zap.Logger, duty *spectypes.BeaconDuty) *zap.Logger { return logger. diff --git a/operator/duties/mocks/scheduler.go b/operator/duties/scheduler_mock.go similarity index 67% rename from operator/duties/mocks/scheduler.go rename to operator/duties/scheduler_mock.go index 9e0ae16b4a..e0bada43ef 100644 --- a/operator/duties/mocks/scheduler.go +++ b/operator/duties/scheduler_mock.go @@ -3,75 +3,144 @@ // // Generated by this command: // -// mockgen -package=mocks -destination=./mocks/scheduler.go -source=./scheduler.go +// mockgen -package=duties -destination=./scheduler_mock.go -source=./scheduler.go // -// Package mocks is a generated GoMock package. -package mocks +// Package duties is a generated GoMock package. +package duties import ( context "context" big "math/big" reflect "reflect" - time "time" client "github.com/attestantio/go-eth2-client" v1 "github.com/attestantio/go-eth2-client/api/v1" phase0 "github.com/attestantio/go-eth2-client/spec/phase0" types "github.com/ethereum/go-ethereum/core/types" - types0 "github.com/ssvlabs/ssv/protocol/v2/types" + types0 "github.com/ssvlabs/ssv-spec-pre-cc/types" + types1 "github.com/ssvlabs/ssv-spec/types" + types2 "github.com/ssvlabs/ssv/protocol/v2/types" gomock "go.uber.org/mock/gomock" + zap "go.uber.org/zap" ) -// MockSlotTicker is a mock of SlotTicker interface. -type MockSlotTicker struct { +// MockDutiesExecutor is a mock of DutiesExecutor interface. +type MockDutiesExecutor struct { ctrl *gomock.Controller - recorder *MockSlotTickerMockRecorder + recorder *MockDutiesExecutorMockRecorder } -// MockSlotTickerMockRecorder is the mock recorder for MockSlotTicker. -type MockSlotTickerMockRecorder struct { - mock *MockSlotTicker +// MockDutiesExecutorMockRecorder is the mock recorder for MockDutiesExecutor. +type MockDutiesExecutorMockRecorder struct { + mock *MockDutiesExecutor } -// NewMockSlotTicker creates a new mock instance. -func NewMockSlotTicker(ctrl *gomock.Controller) *MockSlotTicker { - mock := &MockSlotTicker{ctrl: ctrl} - mock.recorder = &MockSlotTickerMockRecorder{mock} +// NewMockDutiesExecutor creates a new mock instance. +func NewMockDutiesExecutor(ctrl *gomock.Controller) *MockDutiesExecutor { + mock := &MockDutiesExecutor{ctrl: ctrl} + mock.recorder = &MockDutiesExecutorMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockSlotTicker) EXPECT() *MockSlotTickerMockRecorder { +func (m *MockDutiesExecutor) EXPECT() *MockDutiesExecutorMockRecorder { return m.recorder } -// Next mocks base method. -func (m *MockSlotTicker) Next() <-chan time.Time { +// ExecuteCommitteeDuties mocks base method. +func (m *MockDutiesExecutor) ExecuteCommitteeDuties(logger *zap.Logger, duties committeeDutiesMap) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Next") - ret0, _ := ret[0].(<-chan time.Time) - return ret0 + m.ctrl.Call(m, "ExecuteCommitteeDuties", logger, duties) } -// Next indicates an expected call of Next. -func (mr *MockSlotTickerMockRecorder) Next() *gomock.Call { +// ExecuteCommitteeDuties indicates an expected call of ExecuteCommitteeDuties. +func (mr *MockDutiesExecutorMockRecorder) ExecuteCommitteeDuties(logger, duties any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockSlotTicker)(nil).Next)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteCommitteeDuties", reflect.TypeOf((*MockDutiesExecutor)(nil).ExecuteCommitteeDuties), logger, duties) } -// Slot mocks base method. -func (m *MockSlotTicker) Slot() phase0.Slot { +// ExecuteDuties mocks base method. +func (m *MockDutiesExecutor) ExecuteDuties(logger *zap.Logger, duties []*types1.BeaconDuty) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Slot") - ret0, _ := ret[0].(phase0.Slot) - return ret0 + m.ctrl.Call(m, "ExecuteDuties", logger, duties) +} + +// ExecuteDuties indicates an expected call of ExecuteDuties. +func (mr *MockDutiesExecutorMockRecorder) ExecuteDuties(logger, duties any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteDuties", reflect.TypeOf((*MockDutiesExecutor)(nil).ExecuteDuties), logger, duties) +} + +// ExecuteGenesisDuties mocks base method. +func (m *MockDutiesExecutor) ExecuteGenesisDuties(logger *zap.Logger, duties []*types0.Duty) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "ExecuteGenesisDuties", logger, duties) +} + +// ExecuteGenesisDuties indicates an expected call of ExecuteGenesisDuties. +func (mr *MockDutiesExecutorMockRecorder) ExecuteGenesisDuties(logger, duties any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteGenesisDuties", reflect.TypeOf((*MockDutiesExecutor)(nil).ExecuteGenesisDuties), logger, duties) +} + +// MockDutyExecutor is a mock of DutyExecutor interface. +type MockDutyExecutor struct { + ctrl *gomock.Controller + recorder *MockDutyExecutorMockRecorder +} + +// MockDutyExecutorMockRecorder is the mock recorder for MockDutyExecutor. +type MockDutyExecutorMockRecorder struct { + mock *MockDutyExecutor +} + +// NewMockDutyExecutor creates a new mock instance. +func NewMockDutyExecutor(ctrl *gomock.Controller) *MockDutyExecutor { + mock := &MockDutyExecutor{ctrl: ctrl} + mock.recorder = &MockDutyExecutorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDutyExecutor) EXPECT() *MockDutyExecutorMockRecorder { + return m.recorder +} + +// ExecuteCommitteeDuty mocks base method. +func (m *MockDutyExecutor) ExecuteCommitteeDuty(logger *zap.Logger, committeeID types1.CommitteeID, duty *types1.CommitteeDuty) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "ExecuteCommitteeDuty", logger, committeeID, duty) +} + +// ExecuteCommitteeDuty indicates an expected call of ExecuteCommitteeDuty. +func (mr *MockDutyExecutorMockRecorder) ExecuteCommitteeDuty(logger, committeeID, duty any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteCommitteeDuty", reflect.TypeOf((*MockDutyExecutor)(nil).ExecuteCommitteeDuty), logger, committeeID, duty) +} + +// ExecuteDuty mocks base method. +func (m *MockDutyExecutor) ExecuteDuty(logger *zap.Logger, duty *types1.BeaconDuty) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "ExecuteDuty", logger, duty) +} + +// ExecuteDuty indicates an expected call of ExecuteDuty. +func (mr *MockDutyExecutorMockRecorder) ExecuteDuty(logger, duty any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteDuty", reflect.TypeOf((*MockDutyExecutor)(nil).ExecuteDuty), logger, duty) +} + +// ExecuteGenesisDuty mocks base method. +func (m *MockDutyExecutor) ExecuteGenesisDuty(logger *zap.Logger, duty *types0.Duty) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "ExecuteGenesisDuty", logger, duty) } -// Slot indicates an expected call of Slot. -func (mr *MockSlotTickerMockRecorder) Slot() *gomock.Call { +// ExecuteGenesisDuty indicates an expected call of ExecuteGenesisDuty. +func (mr *MockDutyExecutorMockRecorder) ExecuteGenesisDuty(logger, duty any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Slot", reflect.TypeOf((*MockSlotTicker)(nil).Slot)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteGenesisDuty", reflect.TypeOf((*MockDutyExecutor)(nil).ExecuteGenesisDuty), logger, duty) } // MockBeaconNode is a mock of BeaconNode interface. @@ -246,10 +315,10 @@ func (m *MockValidatorProvider) EXPECT() *MockValidatorProviderMockRecorder { } // ParticipatingValidators mocks base method. -func (m *MockValidatorProvider) ParticipatingValidators(epoch phase0.Epoch) []*types0.SSVShare { +func (m *MockValidatorProvider) ParticipatingValidators(epoch phase0.Epoch) []*types2.SSVShare { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ParticipatingValidators", epoch) - ret0, _ := ret[0].([]*types0.SSVShare) + ret0, _ := ret[0].([]*types2.SSVShare) return ret0 } @@ -260,10 +329,10 @@ func (mr *MockValidatorProviderMockRecorder) ParticipatingValidators(epoch any) } // SelfParticipatingValidators mocks base method. -func (m *MockValidatorProvider) SelfParticipatingValidators(epoch phase0.Epoch) []*types0.SSVShare { +func (m *MockValidatorProvider) SelfParticipatingValidators(epoch phase0.Epoch) []*types2.SSVShare { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SelfParticipatingValidators", epoch) - ret0, _ := ret[0].([]*types0.SSVShare) + ret0, _ := ret[0].([]*types2.SSVShare) return ret0 } @@ -274,10 +343,10 @@ func (mr *MockValidatorProviderMockRecorder) SelfParticipatingValidators(epoch a } // Validator mocks base method. -func (m *MockValidatorProvider) Validator(pubKey []byte) *types0.SSVShare { +func (m *MockValidatorProvider) Validator(pubKey []byte) *types2.SSVShare { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Validator", pubKey) - ret0, _ := ret[0].(*types0.SSVShare) + ret0, _ := ret[0].(*types2.SSVShare) return ret0 } diff --git a/operator/duties/scheduler_test.go b/operator/duties/scheduler_test.go index b2cf5196fe..04e6a41ea8 100644 --- a/operator/duties/scheduler_test.go +++ b/operator/duties/scheduler_test.go @@ -9,14 +9,14 @@ import ( "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/prysmaticlabs/prysm/v4/async/event" "github.com/sourcegraph/conc/pool" + genesisspectypes "github.com/ssvlabs/ssv-spec-pre-cc/types" spectypes "github.com/ssvlabs/ssv-spec/types" "github.com/stretchr/testify/require" - gomock "go.uber.org/mock/gomock" + "go.uber.org/mock/gomock" "go.uber.org/zap" "github.com/ssvlabs/ssv/logging" "github.com/ssvlabs/ssv/networkconfig" - "github.com/ssvlabs/ssv/operator/duties/mocks" "github.com/ssvlabs/ssv/operator/slotticker" mockslotticker "github.com/ssvlabs/ssv/operator/slotticker/mocks" mocknetwork "github.com/ssvlabs/ssv/protocol/v2/blockchain/beacon/mocks" @@ -73,7 +73,7 @@ type mockSlotTickerService struct { event.Feed } -func setupSchedulerAndMocks(t *testing.T, handler dutyHandler, currentSlot *SafeValue[phase0.Slot]) ( +func setupSchedulerAndMocks(t *testing.T, handlers []dutyHandler, currentSlot *SafeValue[phase0.Slot], alanForkEpoch phase0.Epoch) ( *Scheduler, *zap.Logger, *mockSlotTickerService, @@ -89,13 +89,15 @@ func setupSchedulerAndMocks(t *testing.T, handler dutyHandler, currentSlot *Safe ctx, cancel := context.WithCancel(context.Background()) logger := logging.TestLogger(t) - mockBeaconNode := mocks.NewMockBeaconNode(ctrl) - mockExecutionClient := mocks.NewMockExecutionClient(ctrl) - mockValidatorProvider := mocks.NewMockValidatorProvider(ctrl) - mockValidatorController := mocks.NewMockValidatorController(ctrl) + mockBeaconNode := NewMockBeaconNode(ctrl) + mockExecutionClient := NewMockExecutionClient(ctrl) + mockValidatorProvider := NewMockValidatorProvider(ctrl) + mockValidatorController := NewMockValidatorController(ctrl) + mockDutyExecutor := NewMockDutyExecutor(ctrl) mockSlotService := &mockSlotTickerService{} mockNetworkConfig := networkconfig.NetworkConfig{ - Beacon: mocknetwork.NewMockBeaconNetwork(ctrl), + Beacon: mocknetwork.NewMockBeaconNetwork(ctrl), + AlanForkEpoch: alanForkEpoch, } opts := &SchedulerOptions{ @@ -105,6 +107,7 @@ func setupSchedulerAndMocks(t *testing.T, handler dutyHandler, currentSlot *Safe Network: mockNetworkConfig, ValidatorProvider: mockValidatorProvider, ValidatorController: mockValidatorController, + DutyExecutor: mockDutyExecutor, SlotTickerProvider: func() slotticker.SlotTicker { ticker := NewMockSlotTicker() mockSlotService.Subscribe(ticker.Subscribe()) @@ -115,7 +118,7 @@ func setupSchedulerAndMocks(t *testing.T, handler dutyHandler, currentSlot *Safe s := NewScheduler(opts) s.blockPropagateDelay = 1 * time.Millisecond s.indicesChg = make(chan struct{}) - s.handlers = []dutyHandler{handler} + s.handlers = handlers mockBeaconNode.EXPECT().Events(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) @@ -162,114 +165,54 @@ func setupSchedulerAndMocks(t *testing.T, handler dutyHandler, currentSlot *Safe return s, logger, mockSlotService, timeout, cancel, schedulerPool, startFunction } -func setupSchedulerAndMocksCommittee(t *testing.T, handlers []dutyHandler, currentSlot *SafeValue[phase0.Slot]) ( - *Scheduler, - *zap.Logger, - *mockSlotTickerService, - time.Duration, - context.CancelFunc, - *pool.ContextPool, - func(), -) { - ctrl := gomock.NewController(t) - // A 200ms timeout ensures the test passes, even with mockSlotTicker overhead. - timeout := 200 * time.Millisecond - - ctx, cancel := context.WithCancel(context.Background()) - logger := logging.TestLogger(t) - - mockBeaconNode := mocks.NewMockBeaconNode(ctrl) - mockExecutionClient := mocks.NewMockExecutionClient(ctrl) - mockValidatorProvider := mocks.NewMockValidatorProvider(ctrl) - mockValidatorController := mocks.NewMockValidatorController(ctrl) - mockSlotService := &mockSlotTickerService{} - mockNetworkConfig := networkconfig.NetworkConfig{ - Beacon: mocknetwork.NewMockBeaconNetwork(ctrl), - } - - opts := &SchedulerOptions{ - Ctx: ctx, - BeaconNode: mockBeaconNode, - ExecutionClient: mockExecutionClient, - Network: mockNetworkConfig, - ValidatorProvider: mockValidatorProvider, - ValidatorController: mockValidatorController, - SlotTickerProvider: func() slotticker.SlotTicker { - ticker := NewMockSlotTicker() - mockSlotService.Subscribe(ticker.Subscribe()) - return ticker - }, - } - - s := NewScheduler(opts) - s.blockPropagateDelay = 1 * time.Millisecond - s.indicesChg = make(chan struct{}) - s.handlers = handlers +func setExecuteDutyFunc(s *Scheduler, executeDutiesCall chan []*spectypes.BeaconDuty, executeDutiesCallSize int) { + executeDutiesBuffer := make(chan *spectypes.BeaconDuty, executeDutiesCallSize) - mockBeaconNode.EXPECT().Events(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + s.dutyExecutor.(*MockDutyExecutor).EXPECT().ExecuteDuty(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn( + func(logger *zap.Logger, duty *spectypes.BeaconDuty) error { + logger.Debug("🏃 Executing duty", zap.Any("duty", duty)) + executeDutiesBuffer <- duty + + // Check if all expected duties have been received + if len(executeDutiesBuffer) == executeDutiesCallSize { + // Build the array of duties + var duties []*spectypes.BeaconDuty + for i := 0; i < executeDutiesCallSize; i++ { + d := <-executeDutiesBuffer + duties = append(duties, d) + } - mockNetworkConfig.Beacon.(*mocknetwork.MockBeaconNetwork).EXPECT().MinGenesisTime().Return(uint64(0)).AnyTimes() - mockNetworkConfig.Beacon.(*mocknetwork.MockBeaconNetwork).EXPECT().SlotDurationSec().Return(150 * time.Millisecond).AnyTimes() - mockNetworkConfig.Beacon.(*mocknetwork.MockBeaconNetwork).EXPECT().SlotsPerEpoch().Return(uint64(32)).AnyTimes() - mockNetworkConfig.Beacon.(*mocknetwork.MockBeaconNetwork).EXPECT().GetSlotStartTime(gomock.Any()).DoAndReturn( - func(slot phase0.Slot) time.Time { - return time.Now() - }, - ).AnyTimes() - mockNetworkConfig.Beacon.(*mocknetwork.MockBeaconNetwork).EXPECT().EstimatedEpochAtSlot(gomock.Any()).DoAndReturn( - func(slot phase0.Slot) phase0.Epoch { - return phase0.Epoch(uint64(slot) / s.network.SlotsPerEpoch()) + // Send the array of duties to executeDutiesCall + executeDutiesCall <- duties + } + return nil }, ).AnyTimes() +} - s.network.Beacon.(*mocknetwork.MockBeaconNetwork).EXPECT().EstimatedCurrentSlot().DoAndReturn( - func() phase0.Slot { - return currentSlot.Get() - }, - ).AnyTimes() +func setExecuteGenesisDutyFunc(s *Scheduler, executeDutiesCall chan []*genesisspectypes.Duty, executeDutiesCallSize int) { + executeDutiesBuffer := make(chan *genesisspectypes.Duty, executeDutiesCallSize) + + s.dutyExecutor.(*MockDutyExecutor).EXPECT().ExecuteGenesisDuty(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn( + func(logger *zap.Logger, duty *genesisspectypes.Duty) error { + logger.Debug("🏃 Executing duty", zap.Any("duty", duty)) + executeDutiesBuffer <- duty + + // Check if all expected duties have been received + if len(executeDutiesBuffer) == executeDutiesCallSize { + // Build the array of duties + var duties []*genesisspectypes.Duty + for i := 0; i < executeDutiesCallSize; i++ { + d := <-executeDutiesBuffer + duties = append(duties, d) + } - s.network.Beacon.(*mocknetwork.MockBeaconNetwork).EXPECT().EstimatedCurrentEpoch().DoAndReturn( - func() phase0.Epoch { - return phase0.Epoch(uint64(currentSlot.Get()) / s.network.SlotsPerEpoch()) + // Send the array of duties to executeDutiesCall + executeDutiesCall <- duties + } + return nil }, ).AnyTimes() - - s.network.Beacon.(*mocknetwork.MockBeaconNetwork).EXPECT().EpochsPerSyncCommitteePeriod().Return(uint64(256)).AnyTimes() - - // Create a pool to wait for the scheduler to finish. - schedulerPool := pool.New().WithErrors().WithContext(ctx) - - startFunction := func() { - err := s.Start(ctx, logger) - require.NoError(t, err) - - schedulerPool.Go(func(ctx context.Context) error { - return s.Wait() - }) - } - - return s, logger, mockSlotService, timeout, cancel, schedulerPool, startFunction -} - -func setExecuteDutyFunc(s *Scheduler, executeDutiesCall chan []*spectypes.BeaconDuty, executeDutiesCallSize int) { - executeDutiesBuffer := make(chan *spectypes.BeaconDuty, executeDutiesCallSize) - s.executeDuty = func(logger *zap.Logger, duty *spectypes.BeaconDuty) { - logger.Debug("🏃 Executing duty", zap.Any("duty", duty)) - executeDutiesBuffer <- duty - - // Check if all expected duties have been received - if len(executeDutiesBuffer) == executeDutiesCallSize { - // Build the array of duties - var duties []*spectypes.BeaconDuty - for i := 0; i < executeDutiesCallSize; i++ { - d := <-executeDutiesBuffer - duties = append(duties, d) - } - - // Send the array of duties to executeDutiesCall - executeDutiesCall <- duties - } - } } func setExecuteDutyFuncs(s *Scheduler, executeDutiesCall chan committeeDutiesMap, executeDutiesCallSize int) { @@ -278,32 +221,50 @@ func setExecuteDutyFuncs(s *Scheduler, executeDutiesCall chan committeeDutiesMap *spectypes.CommitteeDuty } executeDutiesBuffer := make(chan *committeeDuty, executeDutiesCallSize) - s.executeDuty = func(logger *zap.Logger, duty *spectypes.BeaconDuty) { - logger.Debug("🏃 Executing duty", zap.Any("duty", duty)) - } - s.executeCommitteeDuty = func(logger *zap.Logger, committeeID spectypes.CommitteeID, duty *spectypes.CommitteeDuty) { - logger.Debug("🏃 Executing committee duty", zap.Any("duty", duty)) - executeDutiesBuffer <- &committeeDuty{CommitteeID: committeeID, CommitteeDuty: duty} - - // Check if all expected duties have been received - if len(executeDutiesBuffer) == executeDutiesCallSize { - // Build the array of duties - duties := make(committeeDutiesMap) - for i := 0; i < executeDutiesCallSize; i++ { - d := <-executeDutiesBuffer - - if _, ok := duties[d.CommitteeID]; !ok { - duties[d.CommitteeID] = d.CommitteeDuty + + s.dutyExecutor.(*MockDutyExecutor).EXPECT().ExecuteDuty(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn( + func(logger *zap.Logger, duty *spectypes.BeaconDuty) error { + logger.Debug("🏃 Executing duty", zap.Any("duty", duty)) + return nil + }, + ).AnyTimes() + + s.dutyExecutor.(*MockDutyExecutor).EXPECT().ExecuteCommitteeDuty(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn( + func(logger *zap.Logger, committeeID spectypes.CommitteeID, duty *spectypes.CommitteeDuty) { + logger.Debug("🏃 Executing committee duty", zap.Any("duty", duty)) + executeDutiesBuffer <- &committeeDuty{CommitteeID: committeeID, CommitteeDuty: duty} + + // Check if all expected duties have been received + if len(executeDutiesBuffer) == executeDutiesCallSize { + // Build the array of duties + duties := make(committeeDutiesMap) + for i := 0; i < executeDutiesCallSize; i++ { + d := <-executeDutiesBuffer + + if _, ok := duties[d.CommitteeID]; !ok { + duties[d.CommitteeID] = d.CommitteeDuty + } } + + // Send the array of duties to executeDutiesCall + executeDutiesCall <- duties } + }, + ).AnyTimes() +} - // Send the array of duties to executeDutiesCall - executeDutiesCall <- duties - } +func waitForDutiesFetch(t *testing.T, logger *zap.Logger, fetchDutiesCall chan struct{}, executeDutiesCall chan []*spectypes.BeaconDuty, timeout time.Duration) { + select { + case <-fetchDutiesCall: + logger.Debug("duties fetched") + case <-executeDutiesCall: + require.FailNow(t, "unexpected execute duty call") + case <-time.After(timeout): + require.FailNow(t, "timed out waiting for duties to be fetched") } } -func waitForDutiesFetch(t *testing.T, logger *zap.Logger, fetchDutiesCall chan struct{}, executeDutiesCall chan []*spectypes.BeaconDuty, timeout time.Duration) { +func waitForGenesisDutiesFetch(t *testing.T, logger *zap.Logger, fetchDutiesCall chan struct{}, executeDutiesCall chan []*genesisspectypes.Duty, timeout time.Duration) { select { case <-fetchDutiesCall: logger.Debug("duties fetched") @@ -325,6 +286,17 @@ func waitForNoAction(t *testing.T, logger *zap.Logger, fetchDutiesCall chan stru } } +func waitForNoActionGenesis(t *testing.T, logger *zap.Logger, fetchDutiesCall chan struct{}, executeDutiesCall chan []*genesisspectypes.Duty, timeout time.Duration) { + select { + case <-fetchDutiesCall: + require.FailNow(t, "unexpected duties call") + case <-executeDutiesCall: + require.FailNow(t, "unexpected execute duty call") + case <-time.After(timeout): + // No action as expected. + } +} + func waitForDutiesExecution(t *testing.T, logger *zap.Logger, fetchDutiesCall chan struct{}, executeDutiesCall chan []*spectypes.BeaconDuty, timeout time.Duration, expectedDuties []*spectypes.BeaconDuty) { select { case <-fetchDutiesCall: @@ -349,6 +321,30 @@ func waitForDutiesExecution(t *testing.T, logger *zap.Logger, fetchDutiesCall ch } } +func waitForGenesisDutiesExecution(t *testing.T, logger *zap.Logger, fetchDutiesCall chan struct{}, executeDutiesCall chan []*genesisspectypes.Duty, timeout time.Duration, expectedDuties []*genesisspectypes.Duty) { + select { + case <-fetchDutiesCall: + require.FailNow(t, "unexpected duties call") + case duties := <-executeDutiesCall: + logger.Debug("duties executed", zap.Any("duties", duties)) + logger.Debug("expected duties", zap.Any("duties", expectedDuties)) + require.Len(t, duties, len(expectedDuties)) + for _, e := range expectedDuties { + found := false + for _, d := range duties { + if e.Type == d.Type && e.PubKey == d.PubKey && e.ValidatorIndex == d.ValidatorIndex && e.Slot == d.Slot { + found = true + break + } + } + require.True(t, found) + } + + case <-time.After(timeout): + require.FailNow(t, "timed out waiting for duty to be executed") + } +} + func waitForDutiesFetchCommittee(t *testing.T, logger *zap.Logger, fetchDutiesCall chan struct{}, executeDutiesCall chan committeeDutiesMap, timeout time.Duration) { select { case <-fetchDutiesCall: @@ -428,8 +424,8 @@ func TestScheduler_Run(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) logger := logging.TestLogger(t) - mockBeaconNode := mocks.NewMockBeaconNode(ctrl) - mockValidatorProvider := mocks.NewMockValidatorProvider(ctrl) + mockBeaconNode := NewMockBeaconNode(ctrl) + mockValidatorProvider := NewMockValidatorProvider(ctrl) mockTicker := mockslotticker.NewMockSlotTicker(ctrl) // create multiple mock duty handlers mockDutyHandler1 := NewMockdutyHandler(ctrl) @@ -457,7 +453,7 @@ func TestScheduler_Run(t *testing.T) { // setup mock duty handler expectations for _, mockDutyHandler := range s.handlers { - mockDutyHandler.(*MockdutyHandler).EXPECT().Setup(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(1) + mockDutyHandler.(*MockdutyHandler).EXPECT().Setup(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(1) mockDutyHandler.(*MockdutyHandler).EXPECT().HandleDuties(gomock.Any()). DoAndReturn(func(ctx context.Context) { <-ctx.Done() @@ -481,8 +477,8 @@ func TestScheduler_Regression_IndicesChangeStuck(t *testing.T) { defer cancel() logger := logging.TestLogger(t) - mockBeaconNode := mocks.NewMockBeaconNode(ctrl) - mockValidatorProvider := mocks.NewMockValidatorProvider(ctrl) + mockBeaconNode := NewMockBeaconNode(ctrl) + mockValidatorProvider := NewMockValidatorProvider(ctrl) mockTicker := mockslotticker.NewMockSlotTicker(ctrl) // create multiple mock duty handlers diff --git a/operator/duties/sync_committee.go b/operator/duties/sync_committee.go index df3d336c5b..5dd0d81c00 100644 --- a/operator/duties/sync_committee.go +++ b/operator/duties/sync_committee.go @@ -8,6 +8,7 @@ import ( eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" "github.com/attestantio/go-eth2-client/spec/phase0" + genesisspectypes "github.com/ssvlabs/ssv-spec-pre-cc/types" spectypes "github.com/ssvlabs/ssv-spec/types" "go.uber.org/zap" @@ -67,13 +68,15 @@ func (h *SyncCommitteeHandler) HandleDuties(ctx context.Context) { h.fetchNextPeriod = true } + next := h.ticker.Next() for { select { case <-ctx.Done(): return - case <-h.ticker.Next(): + case <-next: slot := h.ticker.Slot() + next = h.ticker.Next() epoch := h.network.Beacon.EstimatedEpochAtSlot(slot) period := h.network.Beacon.EstimatedSyncCommitteePeriodAtEpoch(epoch) buildStr := fmt.Sprintf("p%v-e%v-s%v-#%v", period, epoch, slot, slot%32+1) @@ -165,15 +168,27 @@ func (h *SyncCommitteeHandler) processExecution(period uint64, slot phase0.Slot) return } - toExecute := make([]*spectypes.BeaconDuty, 0, len(duties)*2) + if !h.network.AlanForked(slot) { + toExecute := make([]*genesisspectypes.Duty, 0, len(duties)*2) + for _, d := range duties { + if h.shouldExecute(d, slot) { + toExecute = append(toExecute, h.toGenesisSpecDuty(d, slot, genesisspectypes.BNRoleSyncCommittee)) + toExecute = append(toExecute, h.toGenesisSpecDuty(d, slot, genesisspectypes.BNRoleSyncCommitteeContribution)) + } + } + + h.dutiesExecutor.ExecuteGenesisDuties(h.logger, toExecute) + return + } + + toExecute := make([]*spectypes.BeaconDuty, 0, len(duties)) for _, d := range duties { if h.shouldExecute(d, slot) { - // TODO: (Alan) genesis support - //toExecute = append(toExecute, h.toSpecDuty(d, slot, spectypes.BNRoleSyncCommittee)) toExecute = append(toExecute, h.toSpecDuty(d, slot, spectypes.BNRoleSyncCommitteeContribution)) } } - h.executeDuties(h.logger, toExecute) + + h.dutiesExecutor.ExecuteDuties(h.logger, toExecute) } func (h *SyncCommitteeHandler) fetchAndProcessDuties(ctx context.Context, period uint64, waitForInitial bool) error { @@ -187,6 +202,7 @@ func (h *SyncCommitteeHandler) fetchAndProcessDuties(ctx context.Context, period allActiveIndices := h.validatorController.AllActiveIndices(firstEpoch, waitForInitial) if len(allActiveIndices) == 0 { + h.logger.Debug("no active validators for period", fields.Epoch(currentEpoch), zap.Uint64("period", period)) return nil } @@ -245,6 +261,20 @@ func (h *SyncCommitteeHandler) prepareDutiesResultLog(period uint64, duties []*e fields.Duration(start)) } +func (h *SyncCommitteeHandler) toGenesisSpecDuty(duty *eth2apiv1.SyncCommitteeDuty, slot phase0.Slot, role genesisspectypes.BeaconRole) *genesisspectypes.Duty { + indices := make([]uint64, len(duty.ValidatorSyncCommitteeIndices)) + for i, index := range duty.ValidatorSyncCommitteeIndices { + indices[i] = uint64(index) + } + return &genesisspectypes.Duty{ + Type: role, + PubKey: duty.PubKey, + Slot: slot, // in order for the duty scheduler to execute + ValidatorIndex: duty.ValidatorIndex, + ValidatorSyncCommitteeIndices: indices, + } +} + func (h *SyncCommitteeHandler) toSpecDuty(duty *eth2apiv1.SyncCommitteeDuty, slot phase0.Slot, role spectypes.BeaconRole) *spectypes.BeaconDuty { indices := make([]uint64, len(duty.ValidatorSyncCommitteeIndices)) for i, index := range duty.ValidatorSyncCommitteeIndices { diff --git a/operator/duties/sync_committee_genesis_test.go b/operator/duties/sync_committee_genesis_test.go new file mode 100644 index 0000000000..7c23376e95 --- /dev/null +++ b/operator/duties/sync_committee_genesis_test.go @@ -0,0 +1,673 @@ +package duties + +import ( + "context" + "testing" + "time" + + v1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/cornelk/hashmap" + genesisspectypes "github.com/ssvlabs/ssv-spec-pre-cc/types" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + + spectypes "github.com/ssvlabs/ssv-spec/types" + + "github.com/ssvlabs/ssv/beacon/goclient" + "github.com/ssvlabs/ssv/operator/duties/dutystore" + mocknetwork "github.com/ssvlabs/ssv/protocol/v2/blockchain/beacon/mocks" + ssvtypes "github.com/ssvlabs/ssv/protocol/v2/types" +) + +func setupSyncCommitteeGenesisDutiesMock( + s *Scheduler, + activeShares []*ssvtypes.SSVShare, + dutiesMap *hashmap.Map[uint64, []*v1.SyncCommitteeDuty], + waitForDuties *SafeValue[bool], +) (chan struct{}, chan []*genesisspectypes.Duty) { + fetchDutiesCall := make(chan struct{}) + executeDutiesCall := make(chan []*genesisspectypes.Duty) + + s.network.Beacon.(*mocknetwork.MockBeaconNetwork).EXPECT().EstimatedSyncCommitteePeriodAtEpoch(gomock.Any()).DoAndReturn( + func(epoch phase0.Epoch) uint64 { + return uint64(epoch) / s.network.Beacon.EpochsPerSyncCommitteePeriod() + }, + ).AnyTimes() + + s.network.Beacon.(*mocknetwork.MockBeaconNetwork).EXPECT().FirstEpochOfSyncPeriod(gomock.Any()).DoAndReturn( + func(period uint64) phase0.Epoch { + return phase0.Epoch(period * s.network.Beacon.EpochsPerSyncCommitteePeriod()) + }, + ).AnyTimes() + + s.network.Beacon.(*mocknetwork.MockBeaconNetwork).EXPECT().LastSlotOfSyncPeriod(gomock.Any()).DoAndReturn( + func(period uint64) phase0.Slot { + lastEpoch := s.network.Beacon.FirstEpochOfSyncPeriod(period+1) - 1 + // If we are in the sync committee that ends at slot x we do not generate a message during slot x-1 + // as it will never be included, hence -1. + return s.network.Beacon.GetEpochFirstSlot(lastEpoch+1) - 2 + }, + ).AnyTimes() + + s.network.Beacon.(*mocknetwork.MockBeaconNetwork).EXPECT().GetEpochFirstSlot(gomock.Any()).DoAndReturn( + func(epoch phase0.Epoch) phase0.Slot { + return phase0.Slot(uint64(epoch) * s.network.Beacon.SlotsPerEpoch()) + }, + ).AnyTimes() + + s.beaconNode.(*MockBeaconNode).EXPECT().SyncCommitteeDuties(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) ([]*v1.SyncCommitteeDuty, error) { + if waitForDuties.Get() { + fetchDutiesCall <- struct{}{} + } + period := s.network.Beacon.EstimatedSyncCommitteePeriodAtEpoch(epoch) + duties, _ := dutiesMap.Get(period) + return duties, nil + }).AnyTimes() + + s.validatorProvider.(*MockValidatorProvider).EXPECT().SelfParticipatingValidators(gomock.Any()).Return(activeShares).AnyTimes() + s.validatorProvider.(*MockValidatorProvider).EXPECT().ParticipatingValidators(gomock.Any()).Return(activeShares).AnyTimes() + + s.validatorController.(*MockValidatorController).EXPECT().AllActiveIndices(gomock.Any(), gomock.Any()).DoAndReturn( + func(epoch phase0.Epoch, afterInit bool) []phase0.ValidatorIndex { + return indicesFromShares(activeShares) + }).AnyTimes() + + s.beaconNode.(*MockBeaconNode).EXPECT().SubmitSyncCommitteeSubscriptions(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + + return fetchDutiesCall, executeDutiesCall +} + +func expectedExecutedGenesisSyncCommitteeDuties(handler *SyncCommitteeHandler, duties []*v1.SyncCommitteeDuty, slot phase0.Slot) []*genesisspectypes.Duty { + expectedDuties := make([]*genesisspectypes.Duty, 0) + for _, d := range duties { + expectedDuties = append(expectedDuties, handler.toGenesisSpecDuty(d, slot, genesisspectypes.BNRoleSyncCommittee)) + expectedDuties = append(expectedDuties, handler.toGenesisSpecDuty(d, slot, genesisspectypes.BNRoleSyncCommitteeContribution)) + } + return expectedDuties +} + +func TestScheduler_SyncCommittee_Genesis_Same_Period(t *testing.T) { + var ( + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) + currentSlot = &SafeValue[phase0.Slot]{} + waitForDuties = &SafeValue[bool]{} + forkEpoch = goclient.FarFutureEpoch + dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() + activeShares = []*ssvtypes.SSVShare{{ + Share: spectypes.Share{ + Committee: []*spectypes.ShareMember{ + {Signer: 1}, {Signer: 2}, {Signer: 3}, {Signer: 4}, + }, + ValidatorIndex: 1, + }, + }} + ) + dutiesMap.Set(0, []*v1.SyncCommitteeDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 3}, + ValidatorIndex: phase0.ValidatorIndex(1), + }, + }) + + // STEP 1: wait for sync committee duties to be fetched (handle initial duties) + currentSlot.Set(phase0.Slot(1)) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) + fetchDutiesCall, executeDutiesCall := setupSyncCommitteeGenesisDutiesMock(scheduler, activeShares, dutiesMap, waitForDuties) + startFn() + + // STEP 1: wait for sync committee duties to be fetched and executed at the same slot + duties, _ := dutiesMap.Get(0) + expected := expectedExecutedGenesisSyncCommitteeDuties(handler, duties, currentSlot.Get()) + setExecuteGenesisDutyFunc(scheduler, executeDutiesCall, len(expected)) + + ticker.Send(currentSlot.Get()) + waitForGenesisDutiesExecution(t, logger, fetchDutiesCall, executeDutiesCall, timeout, expected) + + // STEP 2: expect sync committee duties to be executed at the same period + currentSlot.Set(phase0.Slot(2)) + duties, _ = dutiesMap.Get(0) + expected = expectedExecutedGenesisSyncCommitteeDuties(handler, duties, currentSlot.Get()) + setExecuteGenesisDutyFunc(scheduler, executeDutiesCall, len(expected)) + + ticker.Send(currentSlot.Get()) + waitForGenesisDutiesExecution(t, logger, fetchDutiesCall, executeDutiesCall, timeout, expected) + + // STEP 3: expect sync committee duties to be executed at the last slot of the period + currentSlot.Set(scheduler.network.Beacon.LastSlotOfSyncPeriod(0)) + duties, _ = dutiesMap.Get(0) + expected = expectedExecutedGenesisSyncCommitteeDuties(handler, duties, currentSlot.Get()) + setExecuteGenesisDutyFunc(scheduler, executeDutiesCall, len(expected)) + + ticker.Send(currentSlot.Get()) + waitForGenesisDutiesExecution(t, logger, fetchDutiesCall, executeDutiesCall, timeout, expected) + + // STEP 4: expect no action to be taken as we are in the next period + firstSlotOfNextPeriod := scheduler.network.Beacon.GetEpochFirstSlot(scheduler.network.Beacon.FirstEpochOfSyncPeriod(1)) + currentSlot.Set(firstSlotOfNextPeriod) + ticker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // Stop scheduler & wait for graceful exit. + cancel() + require.NoError(t, schedulerPool.Wait()) +} + +func TestScheduler_SyncCommittee_Genesis_Current_Next_Periods(t *testing.T) { + var ( + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) + currentSlot = &SafeValue[phase0.Slot]{} + waitForDuties = &SafeValue[bool]{} + forkEpoch = goclient.FarFutureEpoch + dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() + activeShares = []*ssvtypes.SSVShare{ + { + Share: spectypes.Share{ + Committee: []*spectypes.ShareMember{ + {Signer: 1}, {Signer: 2}, {Signer: 3}, {Signer: 4}, + }, + ValidatorIndex: 1, + }, + }, + { + Share: spectypes.Share{ + Committee: []*spectypes.ShareMember{ + {Signer: 1}, {Signer: 2}, {Signer: 3}, {Signer: 4}, + }, + ValidatorIndex: 2, + }, + }, + } + ) + dutiesMap.Set(0, []*v1.SyncCommitteeDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 3}, + ValidatorIndex: phase0.ValidatorIndex(1), + }, + }) + dutiesMap.Set(1, []*v1.SyncCommitteeDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 4}, + ValidatorIndex: phase0.ValidatorIndex(2), + }, + }) + + // STEP 1: wait for sync committee duties to be fetched (handle initial duties) + currentSlot.Set(phase0.Slot(256*32 - 49)) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) + fetchDutiesCall, executeDutiesCall := setupSyncCommitteeGenesisDutiesMock(scheduler, activeShares, dutiesMap, waitForDuties) + startFn() + + duties, _ := dutiesMap.Get(0) + expected := expectedExecutedGenesisSyncCommitteeDuties(handler, duties, currentSlot.Get()) + setExecuteGenesisDutyFunc(scheduler, executeDutiesCall, len(expected)) + + ticker.Send(currentSlot.Get()) + waitForGenesisDutiesExecution(t, logger, fetchDutiesCall, executeDutiesCall, timeout, expected) + + // STEP 2: wait for sync committee duties to be executed + currentSlot.Set(phase0.Slot(256*32 - 48)) + duties, _ = dutiesMap.Get(0) + expected = expectedExecutedGenesisSyncCommitteeDuties(handler, duties, currentSlot.Get()) + setExecuteGenesisDutyFunc(scheduler, executeDutiesCall, len(expected)) + + ticker.Send(currentSlot.Get()) + waitForGenesisDutiesExecution(t, logger, fetchDutiesCall, executeDutiesCall, timeout, expected) + + // STEP 3: wait for sync committee duties to be executed + currentSlot.Set(phase0.Slot(256*32 - 47)) + duties, _ = dutiesMap.Get(0) + expected = expectedExecutedGenesisSyncCommitteeDuties(handler, duties, currentSlot.Get()) + setExecuteGenesisDutyFunc(scheduler, executeDutiesCall, len(expected)) + + ticker.Send(currentSlot.Get()) + waitForGenesisDutiesExecution(t, logger, fetchDutiesCall, executeDutiesCall, timeout, expected) + + // ... + + // STEP 4: new period, wait for sync committee duties to be executed + currentSlot.Set(phase0.Slot(256 * 32)) + duties, _ = dutiesMap.Get(1) + expected = expectedExecutedGenesisSyncCommitteeDuties(handler, duties, currentSlot.Get()) + setExecuteGenesisDutyFunc(scheduler, executeDutiesCall, len(expected)) + + ticker.Send(currentSlot.Get()) + waitForGenesisDutiesExecution(t, logger, fetchDutiesCall, executeDutiesCall, timeout, expected) + + // Stop scheduler & wait for graceful exit. + cancel() + require.NoError(t, schedulerPool.Wait()) +} + +func TestScheduler_SyncCommittee_Genesis_Indices_Changed(t *testing.T) { + var ( + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) + currentSlot = &SafeValue[phase0.Slot]{} + waitForDuties = &SafeValue[bool]{} + forkEpoch = goclient.FarFutureEpoch + dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() + activeShares = []*ssvtypes.SSVShare{ + { + Share: spectypes.Share{ + Committee: []*spectypes.ShareMember{ + {Signer: 1}, {Signer: 2}, {Signer: 3}, {Signer: 4}, + }, + ValidatorIndex: 1, + }, + }, + { + Share: spectypes.Share{ + Committee: []*spectypes.ShareMember{ + {Signer: 1}, {Signer: 2}, {Signer: 3}, {Signer: 4}, + }, + ValidatorIndex: 2, + }, + }, + } + ) + currentSlot.Set(phase0.Slot(256*32 - 3)) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) + fetchDutiesCall, executeDutiesCall := setupSyncCommitteeGenesisDutiesMock(scheduler, activeShares, dutiesMap, waitForDuties) + startFn() + + dutiesMap.Set(1, []*v1.SyncCommitteeDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 3}, + ValidatorIndex: phase0.ValidatorIndex(1), + }, + }) + + // STEP 1: wait for sync committee duties to be fetched for next period + waitForDuties.Set(true) + ticker.Send(currentSlot.Get()) + waitForGenesisDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 2: trigger a change in active indices + scheduler.indicesChg <- struct{}{} + duties, _ := dutiesMap.Get(1) + dutiesMap.Set(1, append(duties, &v1.SyncCommitteeDuty{ + PubKey: phase0.BLSPubKey{1, 2, 4}, + ValidatorIndex: phase0.ValidatorIndex(2), + })) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 3: wait for sync committee duties to be fetched again + currentSlot.Set(phase0.Slot(256*32 - 2)) + ticker.Send(currentSlot.Get()) + waitForGenesisDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + waitForGenesisDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 4: no action should be taken + currentSlot.Set(phase0.Slot(256*32 - 1)) + ticker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 5: execute duties + currentSlot.Set(phase0.Slot(256 * 32)) + duties, _ = dutiesMap.Get(1) + expected := expectedExecutedGenesisSyncCommitteeDuties(handler, duties, currentSlot.Get()) + setExecuteGenesisDutyFunc(scheduler, executeDutiesCall, len(expected)) + + ticker.Send(currentSlot.Get()) + waitForGenesisDutiesExecution(t, logger, fetchDutiesCall, executeDutiesCall, timeout, expected) + + // Stop scheduler & wait for graceful exit. + cancel() + require.NoError(t, schedulerPool.Wait()) +} + +func TestScheduler_SyncCommittee_Genesis_Multiple_Indices_Changed_Same_Slot(t *testing.T) { + var ( + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) + currentSlot = &SafeValue[phase0.Slot]{} + waitForDuties = &SafeValue[bool]{} + forkEpoch = goclient.FarFutureEpoch + dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() + activeShares = []*ssvtypes.SSVShare{ + { + Share: spectypes.Share{ + Committee: []*spectypes.ShareMember{ + {Signer: 1}, {Signer: 2}, {Signer: 3}, {Signer: 4}, + }, + ValidatorIndex: 1, + }, + }, + { + Share: spectypes.Share{ + Committee: []*spectypes.ShareMember{ + {Signer: 1}, {Signer: 2}, {Signer: 3}, {Signer: 4}, + }, + ValidatorIndex: 2, + }, + }, + } + ) + currentSlot.Set(phase0.Slot(256*32 - 3)) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) + fetchDutiesCall, executeDutiesCall := setupSyncCommitteeGenesisDutiesMock(scheduler, activeShares, dutiesMap, waitForDuties) + startFn() + + // STEP 1: wait for no action to be taken + ticker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 2: trigger a change in active indices + scheduler.indicesChg <- struct{}{} + dutiesMap.Set(1, []*v1.SyncCommitteeDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 3}, + ValidatorIndex: phase0.ValidatorIndex(1), + }, + }) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 3: trigger a change in active indices + scheduler.indicesChg <- struct{}{} + duties, _ := dutiesMap.Get(1) + dutiesMap.Set(1, append(duties, &v1.SyncCommitteeDuty{ + PubKey: phase0.BLSPubKey{1, 2, 4}, + ValidatorIndex: phase0.ValidatorIndex(2), + })) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 4: wait for sync committee duties to be fetched again + currentSlot.Set(phase0.Slot(256*32 - 2)) + waitForDuties.Set(true) + ticker.Send(currentSlot.Get()) + waitForGenesisDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + waitForGenesisDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 5: no action should be taken + currentSlot.Set(phase0.Slot(256*32 - 1)) + ticker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 6: The first assigned duty should not be executed, but the second one should + currentSlot.Set(phase0.Slot(256 * 32)) + duties, _ = dutiesMap.Get(1) + expected := expectedExecutedGenesisSyncCommitteeDuties(handler, duties, currentSlot.Get()) + setExecuteGenesisDutyFunc(scheduler, executeDutiesCall, len(expected)) + + ticker.Send(currentSlot.Get()) + waitForGenesisDutiesExecution(t, logger, fetchDutiesCall, executeDutiesCall, timeout, expected) + + // Stop scheduler & wait for graceful exit. + cancel() + require.NoError(t, schedulerPool.Wait()) +} + +// reorg current dependent root changed +func TestScheduler_SyncCommittee_Genesis_Reorg_Current(t *testing.T) { + var ( + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) + currentSlot = &SafeValue[phase0.Slot]{} + waitForDuties = &SafeValue[bool]{} + forkEpoch = goclient.FarFutureEpoch + dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() + activeShares = []*ssvtypes.SSVShare{ + { + Share: spectypes.Share{ + Committee: []*spectypes.ShareMember{ + {Signer: 1}, {Signer: 2}, {Signer: 3}, {Signer: 4}, + }, + ValidatorIndex: 1, + }, + }, + { + Share: spectypes.Share{ + Committee: []*spectypes.ShareMember{ + {Signer: 1}, {Signer: 2}, {Signer: 3}, {Signer: 4}, + }, + ValidatorIndex: 2, + }, + }, + } + ) + currentSlot.Set(phase0.Slot(256*32 - 3)) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) + fetchDutiesCall, executeDutiesCall := setupSyncCommitteeGenesisDutiesMock(scheduler, activeShares, dutiesMap, waitForDuties) + startFn() + + dutiesMap.Set(1, []*v1.SyncCommitteeDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 3}, + ValidatorIndex: phase0.ValidatorIndex(1), + }, + }) + + // STEP 1: wait for sync committee duties to be fetched and executed at the same slot + waitForDuties.Set(true) + ticker.Send(currentSlot.Get()) + waitForGenesisDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 2: trigger head event + e := &v1.Event{ + Data: &v1.HeadEvent{ + Slot: currentSlot.Get(), + CurrentDutyDependentRoot: phase0.Root{0x01}, + }, + } + scheduler.HandleHeadEvent(logger)(e) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 3: Ticker with no action + currentSlot.Set(phase0.Slot(256*32 - 2)) + ticker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 4: trigger reorg + e = &v1.Event{ + Data: &v1.HeadEvent{ + Slot: currentSlot.Get(), + CurrentDutyDependentRoot: phase0.Root{0x02}, + }, + } + dutiesMap.Set(1, []*v1.SyncCommitteeDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 4}, + ValidatorIndex: phase0.ValidatorIndex(2), + }, + }) + scheduler.HandleHeadEvent(logger)(e) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 5: wait for sync committee duties to be fetched again for the current epoch + currentSlot.Set(phase0.Slot(256*32 - 1)) + ticker.Send(currentSlot.Get()) + waitForGenesisDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 6: The first assigned duty should not be executed, but the second one should + currentSlot.Set(phase0.Slot(256 * 32)) + duties, _ := dutiesMap.Get(1) + expected := expectedExecutedGenesisSyncCommitteeDuties(handler, duties, currentSlot.Get()) + setExecuteGenesisDutyFunc(scheduler, executeDutiesCall, len(expected)) + + ticker.Send(currentSlot.Get()) + waitForGenesisDutiesExecution(t, logger, fetchDutiesCall, executeDutiesCall, timeout, expected) + + // Stop scheduler & wait for graceful exit. + cancel() + require.NoError(t, schedulerPool.Wait()) +} + +// reorg current dependent root changed including indices change in the same slot +func TestScheduler_SyncCommittee_Genesis_Reorg_Current_Indices_Changed(t *testing.T) { + var ( + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) + currentSlot = &SafeValue[phase0.Slot]{} + waitForDuties = &SafeValue[bool]{} + forkEpoch = goclient.FarFutureEpoch + dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() + activeShares = []*ssvtypes.SSVShare{ + { + Share: spectypes.Share{ + Committee: []*spectypes.ShareMember{ + {Signer: 1}, {Signer: 2}, {Signer: 3}, {Signer: 4}, + }, + ValidatorIndex: 1, + }, + }, + { + Share: spectypes.Share{ + Committee: []*spectypes.ShareMember{ + {Signer: 1}, {Signer: 2}, {Signer: 3}, {Signer: 4}, + }, + ValidatorIndex: 2, + }, + }, + { + Share: spectypes.Share{ + Committee: []*spectypes.ShareMember{ + {Signer: 1}, {Signer: 2}, {Signer: 3}, {Signer: 4}, + }, + ValidatorIndex: 3, + }, + }, + } + ) + currentSlot.Set(phase0.Slot(256*32 - 3)) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) + fetchDutiesCall, executeDutiesCall := setupSyncCommitteeGenesisDutiesMock(scheduler, activeShares, dutiesMap, waitForDuties) + startFn() + + dutiesMap.Set(1, []*v1.SyncCommitteeDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 3}, + ValidatorIndex: phase0.ValidatorIndex(1), + }, + }) + + // STEP 1: wait for sync committee duties to be fetched and executed at the same slot + waitForDuties.Set(true) + ticker.Send(currentSlot.Get()) + waitForGenesisDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 2: trigger head event + e := &v1.Event{ + Data: &v1.HeadEvent{ + Slot: currentSlot.Get(), + CurrentDutyDependentRoot: phase0.Root{0x01}, + }, + } + scheduler.HandleHeadEvent(logger)(e) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 3: Ticker with no action + currentSlot.Set(phase0.Slot(256*32 - 2)) + ticker.Send(currentSlot.Get()) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 4: trigger reorg + e = &v1.Event{ + Data: &v1.HeadEvent{ + Slot: currentSlot.Get(), + CurrentDutyDependentRoot: phase0.Root{0x02}, + }, + } + dutiesMap.Set(1, []*v1.SyncCommitteeDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 4}, + ValidatorIndex: phase0.ValidatorIndex(2), + }, + }) + scheduler.HandleHeadEvent(logger)(e) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 3: trigger a change in active indices + scheduler.indicesChg <- struct{}{} + duties, _ := dutiesMap.Get(1) + dutiesMap.Set(1, append(duties, &v1.SyncCommitteeDuty{ + PubKey: phase0.BLSPubKey{1, 2, 5}, + ValidatorIndex: phase0.ValidatorIndex(3), + })) + waitForNoActionGenesis(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 5: wait for sync committee duties to be fetched again for the current epoch + currentSlot.Set(phase0.Slot(256*32 - 1)) + ticker.Send(currentSlot.Get()) + waitForGenesisDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + waitForGenesisDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) + + // STEP 6: The first assigned duty should not be executed, but the second and the new from indices change should + currentSlot.Set(phase0.Slot(256 * 32)) + duties, _ = dutiesMap.Get(1) + expected := expectedExecutedGenesisSyncCommitteeDuties(handler, duties, currentSlot.Get()) + setExecuteGenesisDutyFunc(scheduler, executeDutiesCall, len(expected)) + + ticker.Send(currentSlot.Get()) + waitForGenesisDutiesExecution(t, logger, fetchDutiesCall, executeDutiesCall, timeout, expected) + + // Stop scheduler & wait for graceful exit. + cancel() + require.NoError(t, schedulerPool.Wait()) +} + +func TestScheduler_SyncCommittee_Genesis_Early_Block(t *testing.T) { + var ( + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) + currentSlot = &SafeValue[phase0.Slot]{} + waitForDuties = &SafeValue[bool]{} + forkEpoch = goclient.FarFutureEpoch + dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() + activeShares = []*ssvtypes.SSVShare{{ + Share: spectypes.Share{ + Committee: []*spectypes.ShareMember{ + {Signer: 1}, {Signer: 2}, {Signer: 3}, {Signer: 4}, + }, + ValidatorIndex: 1, + }, + }} + ) + dutiesMap.Set(0, []*v1.SyncCommitteeDuty{ + { + PubKey: phase0.BLSPubKey{1, 2, 3}, + ValidatorIndex: phase0.ValidatorIndex(1), + }, + }) + + currentSlot.Set(phase0.Slot(0)) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) + fetchDutiesCall, executeDutiesCall := setupSyncCommitteeGenesisDutiesMock(scheduler, activeShares, dutiesMap, waitForDuties) + startFn() + + duties, _ := dutiesMap.Get(0) + expected := expectedExecutedGenesisSyncCommitteeDuties(handler, duties, currentSlot.Get()) + setExecuteGenesisDutyFunc(scheduler, executeDutiesCall, len(expected)) + + // STEP 1: wait for sync committee duties to be fetched and executed at the same slot + ticker.Send(currentSlot.Get()) + waitForGenesisDutiesExecution(t, logger, fetchDutiesCall, executeDutiesCall, timeout, expected) + + // STEP 2: expect sync committee duties to be executed at the same period + currentSlot.Set(phase0.Slot(1)) + duties, _ = dutiesMap.Get(0) + expected = expectedExecutedGenesisSyncCommitteeDuties(handler, duties, currentSlot.Get()) + setExecuteGenesisDutyFunc(scheduler, executeDutiesCall, len(expected)) + + ticker.Send(currentSlot.Get()) + waitForGenesisDutiesExecution(t, logger, fetchDutiesCall, executeDutiesCall, timeout, expected) + + // STEP 3: wait for sync committee duties to be executed faster than 1/3 of the slot duration + startTime := time.Now() + currentSlot.Set(phase0.Slot(2)) + duties, _ = dutiesMap.Get(0) + expected = expectedExecutedGenesisSyncCommitteeDuties(handler, duties, currentSlot.Get()) + setExecuteGenesisDutyFunc(scheduler, executeDutiesCall, len(expected)) + + ticker.Send(currentSlot.Get()) + + // STEP 4: trigger head event (block arrival) + e := &v1.Event{ + Data: &v1.HeadEvent{ + Slot: currentSlot.Get(), + }, + } + scheduler.HandleHeadEvent(logger)(e) + waitForGenesisDutiesExecution(t, logger, fetchDutiesCall, executeDutiesCall, timeout, expected) + require.Less(t, time.Since(startTime), scheduler.network.Beacon.SlotDurationSec()/3) + + // Stop scheduler & wait for graceful exit. + cancel() + require.NoError(t, schedulerPool.Wait()) +} diff --git a/operator/duties/sync_committee_test.go b/operator/duties/sync_committee_test.go index edcf9de404..2c10bb8fb3 100644 --- a/operator/duties/sync_committee_test.go +++ b/operator/duties/sync_committee_test.go @@ -9,12 +9,11 @@ import ( "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/cornelk/hashmap" "github.com/stretchr/testify/require" - gomock "go.uber.org/mock/gomock" + "go.uber.org/mock/gomock" spectypes "github.com/ssvlabs/ssv-spec/types" "github.com/ssvlabs/ssv/operator/duties/dutystore" - "github.com/ssvlabs/ssv/operator/duties/mocks" mocknetwork "github.com/ssvlabs/ssv/protocol/v2/blockchain/beacon/mocks" ssvtypes "github.com/ssvlabs/ssv/protocol/v2/types" ) @@ -55,7 +54,7 @@ func setupSyncCommitteeDutiesMock( }, ).AnyTimes() - s.beaconNode.(*mocks.MockBeaconNode).EXPECT().SyncCommitteeDuties(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + s.beaconNode.(*MockBeaconNode).EXPECT().SyncCommitteeDuties(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( func(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) ([]*v1.SyncCommitteeDuty, error) { if waitForDuties.Get() { fetchDutiesCall <- struct{}{} @@ -65,15 +64,15 @@ func setupSyncCommitteeDutiesMock( return duties, nil }).AnyTimes() - s.validatorProvider.(*mocks.MockValidatorProvider).EXPECT().SelfParticipatingValidators(gomock.Any()).Return(activeShares).AnyTimes() - s.validatorProvider.(*mocks.MockValidatorProvider).EXPECT().ParticipatingValidators(gomock.Any()).Return(activeShares).AnyTimes() + s.validatorProvider.(*MockValidatorProvider).EXPECT().SelfParticipatingValidators(gomock.Any()).Return(activeShares).AnyTimes() + s.validatorProvider.(*MockValidatorProvider).EXPECT().ParticipatingValidators(gomock.Any()).Return(activeShares).AnyTimes() - s.validatorController.(*mocks.MockValidatorController).EXPECT().AllActiveIndices(gomock.Any(), gomock.Any()).DoAndReturn( + s.validatorController.(*MockValidatorController).EXPECT().AllActiveIndices(gomock.Any(), gomock.Any()).DoAndReturn( func(epoch phase0.Epoch, afterInit bool) []phase0.ValidatorIndex { return indicesFromShares(activeShares) }).AnyTimes() - s.beaconNode.(*mocks.MockBeaconNode).EXPECT().SubmitSyncCommitteeSubscriptions(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + s.beaconNode.(*MockBeaconNode).EXPECT().SubmitSyncCommitteeSubscriptions(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() return fetchDutiesCall, executeDutiesCall } @@ -81,7 +80,9 @@ func setupSyncCommitteeDutiesMock( func expectedExecutedSyncCommitteeDuties(handler *SyncCommitteeHandler, duties []*v1.SyncCommitteeDuty, slot phase0.Slot) []*spectypes.BeaconDuty { expectedDuties := make([]*spectypes.BeaconDuty, 0) for _, d := range duties { - //expectedDuties = append(expectedDuties, handler.toSpecDuty(d, slot, spectypes.BNRoleSyncCommittee)) + if !handler.network.AlanForked(slot) { + expectedDuties = append(expectedDuties, handler.toSpecDuty(d, slot, spectypes.BNRoleSyncCommittee)) + } expectedDuties = append(expectedDuties, handler.toSpecDuty(d, slot, spectypes.BNRoleSyncCommitteeContribution)) } return expectedDuties @@ -92,6 +93,7 @@ func TestScheduler_SyncCommittee_Same_Period(t *testing.T) { handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SafeValue[phase0.Slot]{} waitForDuties = &SafeValue[bool]{} + forkEpoch = phase0.Epoch(0) dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() activeShares = []*ssvtypes.SSVShare{{ Share: spectypes.Share{ @@ -111,7 +113,7 @@ func TestScheduler_SyncCommittee_Same_Period(t *testing.T) { // STEP 1: wait for sync committee duties to be fetched (handle initial duties) currentSlot.Set(phase0.Slot(1)) - scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) fetchDutiesCall, executeDutiesCall := setupSyncCommitteeDutiesMock(scheduler, activeShares, dutiesMap, waitForDuties) startFn() @@ -157,6 +159,7 @@ func TestScheduler_SyncCommittee_Current_Next_Periods(t *testing.T) { handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SafeValue[phase0.Slot]{} waitForDuties = &SafeValue[bool]{} + forkEpoch = phase0.Epoch(0) dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() activeShares = []*ssvtypes.SSVShare{ { @@ -192,7 +195,7 @@ func TestScheduler_SyncCommittee_Current_Next_Periods(t *testing.T) { // STEP 1: wait for sync committee duties to be fetched (handle initial duties) currentSlot.Set(phase0.Slot(256*32 - 49)) - scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) fetchDutiesCall, executeDutiesCall := setupSyncCommitteeDutiesMock(scheduler, activeShares, dutiesMap, waitForDuties) startFn() @@ -242,6 +245,7 @@ func TestScheduler_SyncCommittee_Indices_Changed(t *testing.T) { handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SafeValue[phase0.Slot]{} waitForDuties = &SafeValue[bool]{} + forkEpoch = phase0.Epoch(0) dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() activeShares = []*ssvtypes.SSVShare{ { @@ -263,7 +267,7 @@ func TestScheduler_SyncCommittee_Indices_Changed(t *testing.T) { } ) currentSlot.Set(phase0.Slot(256*32 - 3)) - scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) fetchDutiesCall, executeDutiesCall := setupSyncCommitteeDutiesMock(scheduler, activeShares, dutiesMap, waitForDuties) startFn() @@ -318,6 +322,7 @@ func TestScheduler_SyncCommittee_Multiple_Indices_Changed_Same_Slot(t *testing.T handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SafeValue[phase0.Slot]{} waitForDuties = &SafeValue[bool]{} + forkEpoch = phase0.Epoch(0) dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() activeShares = []*ssvtypes.SSVShare{ { @@ -339,7 +344,7 @@ func TestScheduler_SyncCommittee_Multiple_Indices_Changed_Same_Slot(t *testing.T } ) currentSlot.Set(phase0.Slot(256*32 - 3)) - scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) fetchDutiesCall, executeDutiesCall := setupSyncCommitteeDutiesMock(scheduler, activeShares, dutiesMap, waitForDuties) startFn() @@ -398,6 +403,7 @@ func TestScheduler_SyncCommittee_Reorg_Current(t *testing.T) { handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SafeValue[phase0.Slot]{} waitForDuties = &SafeValue[bool]{} + forkEpoch = phase0.Epoch(0) dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() activeShares = []*ssvtypes.SSVShare{ { @@ -419,7 +425,7 @@ func TestScheduler_SyncCommittee_Reorg_Current(t *testing.T) { } ) currentSlot.Set(phase0.Slot(256*32 - 3)) - scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) fetchDutiesCall, executeDutiesCall := setupSyncCommitteeDutiesMock(scheduler, activeShares, dutiesMap, waitForDuties) startFn() @@ -491,6 +497,7 @@ func TestScheduler_SyncCommittee_Reorg_Current_Indices_Changed(t *testing.T) { handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SafeValue[phase0.Slot]{} waitForDuties = &SafeValue[bool]{} + forkEpoch = phase0.Epoch(0) dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() activeShares = []*ssvtypes.SSVShare{ { @@ -520,7 +527,7 @@ func TestScheduler_SyncCommittee_Reorg_Current_Indices_Changed(t *testing.T) { } ) currentSlot.Set(phase0.Slot(256*32 - 3)) - scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) fetchDutiesCall, executeDutiesCall := setupSyncCommitteeDutiesMock(scheduler, activeShares, dutiesMap, waitForDuties) startFn() @@ -597,11 +604,11 @@ func TestScheduler_SyncCommittee_Reorg_Current_Indices_Changed(t *testing.T) { } func TestScheduler_SyncCommittee_Early_Block(t *testing.T) { - t.Skip("TODO: Check if relevant") var ( handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SafeValue[phase0.Slot]{} waitForDuties = &SafeValue[bool]{} + forkEpoch = phase0.Epoch(0) dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() activeShares = []*ssvtypes.SSVShare{{ Share: spectypes.Share{ @@ -620,7 +627,7 @@ func TestScheduler_SyncCommittee_Early_Block(t *testing.T) { }) currentSlot.Set(phase0.Slot(0)) - scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, forkEpoch) fetchDutiesCall, executeDutiesCall := setupSyncCommitteeDutiesMock(scheduler, activeShares, dutiesMap, waitForDuties) startFn() @@ -658,7 +665,7 @@ func TestScheduler_SyncCommittee_Early_Block(t *testing.T) { } scheduler.HandleHeadEvent(logger)(e) waitForDutiesExecution(t, logger, fetchDutiesCall, executeDutiesCall, timeout, expected) - require.Less(t, time.Since(startTime), scheduler.network.Beacon.SlotDurationSec()/3) + require.Greater(t, time.Since(startTime), scheduler.network.Beacon.SlotDurationSec()/3) // Stop scheduler & wait for graceful exit. cancel() diff --git a/operator/duties/validatorregistration.go b/operator/duties/validatorregistration.go index a0ea397b43..aaf91a5af8 100644 --- a/operator/duties/validatorregistration.go +++ b/operator/duties/validatorregistration.go @@ -29,13 +29,15 @@ func (h *ValidatorRegistrationHandler) HandleDuties(ctx context.Context) { // should be registered within validatorRegistrationEpochInterval epochs time in a corresponding slot registrationSlotInterval := h.network.SlotsPerEpoch() * validatorRegistrationEpochInterval + next := h.ticker.Next() for { select { case <-ctx.Done(): return - case <-h.ticker.Next(): + case <-next: slot := h.ticker.Slot() + next = h.ticker.Next() epoch := h.network.Beacon.EstimatedEpochAtSlot(slot) shares := h.validatorProvider.SelfParticipatingValidators(epoch + phase0.Epoch(validatorRegistrationEpochInterval)) @@ -48,7 +50,7 @@ func (h *ValidatorRegistrationHandler) HandleDuties(ctx context.Context) { pk := phase0.BLSPubKey{} copy(pk[:], share.ValidatorPubKey[:]) - h.executeDuties(h.logger, []*spectypes.BeaconDuty{{ + h.dutiesExecutor.ExecuteDuties(h.logger, []*spectypes.BeaconDuty{{ Type: spectypes.BNRoleValidatorRegistration, ValidatorIndex: share.ValidatorIndex, PubKey: pk, diff --git a/operator/duties/voluntary_exit.go b/operator/duties/voluntary_exit.go index 6570565228..c55d9e43bc 100644 --- a/operator/duties/voluntary_exit.go +++ b/operator/duties/voluntary_exit.go @@ -9,11 +9,13 @@ import ( "go.uber.org/zap" "github.com/ssvlabs/ssv/logging/fields" + "github.com/ssvlabs/ssv/operator/duties/dutystore" ) const voluntaryExitSlotsToPostpone = phase0.Slot(4) type ExitDescriptor struct { + OwnValidator bool PubKey phase0.BLSPubKey ValidatorIndex phase0.ValidatorIndex BlockNumber uint64 @@ -21,13 +23,15 @@ type ExitDescriptor struct { type VoluntaryExitHandler struct { baseHandler + duties *dutystore.VoluntaryExitDuties validatorExitCh <-chan ExitDescriptor dutyQueue []*spectypes.BeaconDuty blockSlots map[uint64]phase0.Slot } -func NewVoluntaryExitHandler(validatorExitCh <-chan ExitDescriptor) *VoluntaryExitHandler { +func NewVoluntaryExitHandler(duties *dutystore.VoluntaryExitDuties, validatorExitCh <-chan ExitDescriptor) *VoluntaryExitHandler { return &VoluntaryExitHandler{ + duties: duties, validatorExitCh: validatorExitCh, dutyQueue: make([]*spectypes.BeaconDuty, 0), blockSlots: map[uint64]phase0.Slot{}, @@ -42,13 +46,15 @@ func (h *VoluntaryExitHandler) HandleDuties(ctx context.Context) { h.logger.Info("starting duty handler") defer h.logger.Info("duty handler exited") + next := h.ticker.Next() for { select { case <-ctx.Done(): return - case <-h.ticker.Next(): + case <-next: currentSlot := h.ticker.Slot() + next = h.ticker.Next() h.logger.Debug("🛠 ticker event", fields.Slot(currentSlot)) @@ -63,9 +69,10 @@ func (h *VoluntaryExitHandler) HandleDuties(ctx context.Context) { } h.dutyQueue = pendingDuties + h.duties.RemoveSlot(currentSlot - phase0.Slot(h.network.SlotsPerEpoch())) if dutyCount := len(dutiesForExecution); dutyCount != 0 { - h.executeDuties(h.logger, dutiesForExecution) + h.dutiesExecutor.ExecuteDuties(h.logger, dutiesForExecution) h.logger.Debug("executed voluntary exit duties", fields.Slot(currentSlot), fields.Count(dutyCount)) @@ -92,6 +99,11 @@ func (h *VoluntaryExitHandler) HandleDuties(ctx context.Context) { ValidatorIndex: exitDescriptor.ValidatorIndex, } + h.duties.AddDuty(dutySlot, exitDescriptor.PubKey) + if !exitDescriptor.OwnValidator { + continue + } + h.dutyQueue = append(h.dutyQueue, duty) h.logger.Debug("🛠 scheduled duty for execution", diff --git a/operator/duties/voluntary_exit_test.go b/operator/duties/voluntary_exit_test.go index 9d1a107bab..48c8794497 100644 --- a/operator/duties/voluntary_exit_test.go +++ b/operator/duties/voluntary_exit_test.go @@ -10,21 +10,23 @@ import ( ethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/trie" "github.com/stretchr/testify/require" - gomock "go.uber.org/mock/gomock" + "go.uber.org/mock/gomock" spectypes "github.com/ssvlabs/ssv-spec/types" - "github.com/ssvlabs/ssv/operator/duties/mocks" + + "github.com/ssvlabs/ssv/beacon/goclient" + "github.com/ssvlabs/ssv/operator/duties/dutystore" mocknetwork "github.com/ssvlabs/ssv/protocol/v2/blockchain/beacon/mocks" ) func TestVoluntaryExitHandler_HandleDuties(t *testing.T) { exitCh := make(chan ExitDescriptor) - handler := NewVoluntaryExitHandler(exitCh) + handler := NewVoluntaryExitHandler(dutystore.NewVoluntaryExit(), exitCh) currentSlot := &SafeValue[phase0.Slot]{} currentSlot.Set(0) - scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, []dutyHandler{handler}, currentSlot, goclient.FarFutureEpoch) startFn() blockByNumberCalls := create1to1BlockSlotMapping(scheduler) @@ -37,21 +39,25 @@ func TestVoluntaryExitHandler_HandleDuties(t *testing.T) { const blockNumber = uint64(1) normalExit := ExitDescriptor{ + OwnValidator: true, PubKey: phase0.BLSPubKey{1, 2, 3}, ValidatorIndex: phase0.ValidatorIndex(1), BlockNumber: blockNumber, } sameBlockExit := ExitDescriptor{ + OwnValidator: true, PubKey: phase0.BLSPubKey{4, 5, 6}, ValidatorIndex: phase0.ValidatorIndex(2), BlockNumber: normalExit.BlockNumber, } newBlockExit := ExitDescriptor{ + OwnValidator: true, PubKey: phase0.BLSPubKey{1, 2, 3}, ValidatorIndex: phase0.ValidatorIndex(1), BlockNumber: normalExit.BlockNumber + 1, } pastBlockExit := ExitDescriptor{ + OwnValidator: true, PubKey: phase0.BLSPubKey{1, 2, 3}, ValidatorIndex: phase0.ValidatorIndex(1), BlockNumber: normalExit.BlockNumber + 4, @@ -139,7 +145,7 @@ func TestVoluntaryExitHandler_HandleDuties(t *testing.T) { func create1to1BlockSlotMapping(scheduler *Scheduler) *atomic.Uint64 { var blockByNumberCalls atomic.Uint64 - scheduler.executionClient.(*mocks.MockExecutionClient).EXPECT().BlockByNumber(gomock.Any(), gomock.Any()).DoAndReturn( + scheduler.executionClient.(*MockExecutionClient).EXPECT().BlockByNumber(gomock.Any(), gomock.Any()).DoAndReturn( func(ctx context.Context, blockNumber *big.Int) (*ethtypes.Block, error) { blockByNumberCalls.Add(1) expectedBlock := ethtypes.NewBlock(ðtypes.Header{Time: blockNumber.Uint64()}, nil, nil, nil, trie.NewStackTrie(nil)) diff --git a/operator/node.go b/operator/node.go index 7e50340b25..06dfe7ae6a 100644 --- a/operator/node.go +++ b/operator/node.go @@ -3,11 +3,6 @@ package operator import ( "context" "fmt" - "github.com/ssvlabs/ssv/exporter/exporter_message" - - storage2 "github.com/ssvlabs/ssv/registry/storage" - - "github.com/ssvlabs/ssv/network" "go.uber.org/zap" @@ -16,6 +11,7 @@ import ( qbftstorage "github.com/ssvlabs/ssv/ibft/storage" "github.com/ssvlabs/ssv/logging" "github.com/ssvlabs/ssv/logging/fields" + "github.com/ssvlabs/ssv/network" "github.com/ssvlabs/ssv/networkconfig" "github.com/ssvlabs/ssv/operator/duties" "github.com/ssvlabs/ssv/operator/duties/dutystore" @@ -24,6 +20,7 @@ import ( "github.com/ssvlabs/ssv/operator/storage" "github.com/ssvlabs/ssv/operator/validator" beaconprotocol "github.com/ssvlabs/ssv/protocol/v2/blockchain/beacon" + storage2 "github.com/ssvlabs/ssv/registry/storage" "github.com/ssvlabs/ssv/storage/basedb" ) @@ -72,23 +69,7 @@ type operatorNode struct { } // New is the constructor of operatorNode -func New(logger *zap.Logger, opts Options, slotTickerProvider slotticker.Provider) Node { - storageMap := qbftstorage.NewStores() - - roles := []exporter_message.RunnerRole{ - exporter_message.RoleCommittee, - exporter_message.RoleAttester, - exporter_message.RoleAggregator, - exporter_message.RoleProposer, - exporter_message.RoleSyncCommittee, - exporter_message.RoleSyncCommitteeContribution, - exporter_message.RoleValidatorRegistration, - exporter_message.RoleVoluntaryExit, - } - for _, role := range roles { - storageMap.Add(role, qbftstorage.New(opts.DB, role.String())) - } - +func New(logger *zap.Logger, opts Options, slotTickerProvider slotticker.Provider, qbftStorage *qbftstorage.QBFTStores) Node { node := &operatorNode{ context: opts.Context, validatorsCtrl: opts.ValidatorController, @@ -98,20 +79,19 @@ func New(logger *zap.Logger, opts Options, slotTickerProvider slotticker.Provide executionClient: opts.ExecutionClient, net: opts.P2PNetwork, storage: opts.ValidatorOptions.RegistryStorage, - qbftStorage: storageMap, + qbftStorage: qbftStorage, dutyScheduler: duties.NewScheduler(&duties.SchedulerOptions{ - Ctx: opts.Context, - BeaconNode: opts.BeaconNode, - ExecutionClient: opts.ExecutionClient, - Network: opts.Network, - ValidatorProvider: opts.ValidatorStore.WithOperatorID(opts.ValidatorOptions.OperatorDataStore.GetOperatorID), - ValidatorController: opts.ValidatorController, - IndicesChg: opts.ValidatorController.IndicesChangeChan(), - ValidatorExitCh: opts.ValidatorController.ValidatorExitChan(), - ExecuteDuty: opts.ValidatorController.ExecuteDuty, - ExecuteCommitteeDuty: opts.ValidatorController.ExecuteCommitteeDuty, - DutyStore: opts.DutyStore, - SlotTickerProvider: slotTickerProvider, + Ctx: opts.Context, + BeaconNode: opts.BeaconNode, + ExecutionClient: opts.ExecutionClient, + Network: opts.Network, + ValidatorProvider: opts.ValidatorStore.WithOperatorID(opts.ValidatorOptions.OperatorDataStore.GetOperatorID), + ValidatorController: opts.ValidatorController, + DutyExecutor: opts.ValidatorController, + IndicesChg: opts.ValidatorController.IndicesChangeChan(), + ValidatorExitCh: opts.ValidatorController.ValidatorExitChan(), + DutyStore: opts.DutyStore, + SlotTickerProvider: slotTickerProvider, }), feeRecipientCtrl: fee_recipient.NewController(&fee_recipient.ControllerOptions{ Ctx: opts.Context, @@ -166,6 +146,7 @@ func (n *operatorNode) Start(logger *zap.Logger) error { } } go n.net.UpdateSubnets(logger) + go n.net.UpdateScoreParams(logger) n.validatorsCtrl.StartValidators() go n.reportOperators(logger) @@ -196,7 +177,7 @@ func (n *operatorNode) handleQueryRequests(logger *zap.Logger, nm *api.NetworkMe zap.String("type", string(nm.Msg.Type))) switch nm.Msg.Type { case api.TypeDecided: - api.HandleParticipantsQuery(logger, n.qbftStorage, nm) + api.HandleParticipantsQuery(logger, n.qbftStorage, nm, n.network.Domain) case api.TypeError: api.HandleErrorQuery(logger, nm) default: diff --git a/operator/validator/controller.go b/operator/validator/controller.go index 0c4a79026c..32370efdf5 100644 --- a/operator/validator/controller.go +++ b/operator/validator/controller.go @@ -5,12 +5,12 @@ import ( "encoding/hex" "encoding/json" "fmt" - "github.com/ssvlabs/ssv/exporter/exporter_message" "sync" "time" - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" + "github.com/ssvlabs/ssv/exporter/convert" + + genesisspectypes "github.com/ssvlabs/ssv-spec-pre-cc/types" "github.com/attestantio/go-eth2-client/spec/bellatrix" "github.com/attestantio/go-eth2-client/spec/phase0" @@ -21,14 +21,17 @@ import ( specqbft "github.com/ssvlabs/ssv-spec/qbft" specssv "github.com/ssvlabs/ssv-spec/ssv" spectypes "github.com/ssvlabs/ssv-spec/types" - protocolp2p "github.com/ssvlabs/ssv/protocol/v2/p2p" + "go.uber.org/zap" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" "github.com/ssvlabs/ssv/ibft/storage" "github.com/ssvlabs/ssv/logging" "github.com/ssvlabs/ssv/logging/fields" "github.com/ssvlabs/ssv/message/validation" "github.com/ssvlabs/ssv/network" + "github.com/ssvlabs/ssv/networkconfig" operatordatastore "github.com/ssvlabs/ssv/operator/datastore" "github.com/ssvlabs/ssv/operator/duties" nodestorage "github.com/ssvlabs/ssv/operator/storage" @@ -36,6 +39,7 @@ import ( beaconprotocol "github.com/ssvlabs/ssv/protocol/v2/blockchain/beacon" "github.com/ssvlabs/ssv/protocol/v2/message" p2pprotocol "github.com/ssvlabs/ssv/protocol/v2/p2p" + protocolp2p "github.com/ssvlabs/ssv/protocol/v2/p2p" "github.com/ssvlabs/ssv/protocol/v2/qbft" qbftcontroller "github.com/ssvlabs/ssv/protocol/v2/qbft/controller" "github.com/ssvlabs/ssv/protocol/v2/qbft/roundtimer" @@ -84,8 +88,8 @@ type ControllerOptions struct { Metrics validator.Metrics ValidatorStore registrystorage.ValidatorStore MessageValidator validation.MessageValidator - UseNewExporterAPI bool `yaml:"UseNewExporterAPI" env:"USE_NEW_EXPORTER_API" env-description:"Use new exporter API which is simpler and has no workarounds"` ValidatorsMap *validators.ValidatorsMap + NetworkConfig networkconfig.NetworkConfig // worker flags WorkersCount int `yaml:"MsgWorkersCount" env:"MSG_WORKERS_COUNT" env-default:"256" env-description:"Number of goroutines to use for message workers"` @@ -100,8 +104,6 @@ type Controller interface { CommitteeActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex AllActiveIndices(epoch phase0.Epoch, afterInit bool) []phase0.ValidatorIndex GetValidator(pubKey spectypes.ValidatorPK) (*validator.Validator, bool) - ExecuteDuty(logger *zap.Logger, duty *spectypes.BeaconDuty) - ExecuteCommitteeDuty(logger *zap.Logger, committeeID spectypes.CommitteeID, duty *spectypes.CommitteeDuty) UpdateValidatorMetaDataLoop() StartNetworkHandlers() GetOperatorShares() []*ssvtypes.SSVShare @@ -119,6 +121,8 @@ type Controller interface { ReactivateCluster(owner common.Address, operatorIDs []uint64, toReactivate []*ssvtypes.SSVShare) error UpdateFeeRecipient(owner, recipient common.Address) error ExitValidator(pubKey phase0.BLSPubKey, blockNumber uint64, validatorIndex phase0.ValidatorIndex) error + + duties.DutyExecutor } type committeeObserver struct { @@ -135,6 +139,7 @@ type Recipients interface { type SharesStorage interface { Get(txn basedb.Reader, pubKey []byte) *types.SSVShare List(txn basedb.Reader, filters ...registrystorage.SharesFilter) []*types.SSVShare + Range(txn basedb.Reader, fn func(*types.SSVShare) bool) UpdateValidatorMetadata(pk spectypes.ValidatorPK, metadata *beaconprotocol.ValidatorMetadata) error UpdateValidatorsMetadata(map[spectypes.ValidatorPK]*beaconprotocol.ValidatorMetadata) error } @@ -154,6 +159,7 @@ type controller struct { logger *zap.Logger metrics validator.Metrics + networkConfig networkconfig.NetworkConfig sharesStorage SharesStorage operatorsStorage registrystorage.Operators recipientsStorage Recipients @@ -207,9 +213,9 @@ func NewController(logger *zap.Logger, options ControllerOptions) Controller { sigVerifier := validator.NewSignatureVerifier() validatorOptions := validator.Options{ //TODO add vars + NetworkConfig: options.NetworkConfig, Network: options.Network, Beacon: options.Beacon, - BeaconNetwork: options.BeaconNetwork.GetNetwork(), Storage: options.StorageMap, //Share: nil, // set per validator Signer: options.BeaconSigner, @@ -242,6 +248,7 @@ func NewController(logger *zap.Logger, options ControllerOptions) Controller { ctrl := controller{ logger: logger.Named(logging.NameController), metrics: metrics, + networkConfig: options.NetworkConfig, sharesStorage: options.RegistryStorage.Shares(), operatorsStorage: options.RegistryStorage, recipientsStorage: options.RegistryStorage, @@ -372,8 +379,9 @@ func (c *controller) handleWorkerMessages(msg *queue.DecodedSSVMessage) error { var ncv *committeeObserver item := c.getNonCommitteeValidators(msg.GetID()) if item == nil { - nonCommitteeOptions := validator.NonCommitteeOptions{ + committeeObserverOptions := validator.CommitteeObserverOptions{ Logger: c.logger, + NetworkConfig: c.networkConfig, ValidatorStore: c.validatorStore, Network: c.validatorOptions.Network, Storage: c.validatorOptions.Storage, @@ -382,7 +390,7 @@ func (c *controller) handleWorkerMessages(msg *queue.DecodedSSVMessage) error { NewDecidedHandler: c.validatorOptions.NewDecidedHandler, } ncv = &committeeObserver{ - CommitteeObserver: validator.NewNonCommitteeValidator(exporter_message.MessageID(msg.MsgID), nonCommitteeOptions), + CommitteeObserver: validator.NewCommitteeObserver(convert.MessageID(msg.MsgID), committeeObserverOptions), } ttlSlots := nonCommitteeValidatorTTLs[msg.MsgID.GetRoleType()] c.committeesObservers.Set( @@ -393,48 +401,36 @@ func (c *controller) handleWorkerMessages(msg *queue.DecodedSSVMessage) error { } else { ncv = item } - if err := c.handleConsensusMessages(msg, ncv); err != nil { - return err - } - if err := c.handlePostConsensusMessages(msg, ncv); err != nil { + if err := c.handleNonCommitteeMessages(msg, ncv); err != nil { return err } return nil } -func (c *controller) handleConsensusMessages(msg *queue.DecodedSSVMessage, ncv *committeeObserver) error { +func (c *controller) handleNonCommitteeMessages(msg *queue.DecodedSSVMessage, ncv *committeeObserver) error { c.committeesObserversMutex.Lock() defer c.committeesObserversMutex.Unlock() - if msg.MsgType != spectypes.SSVConsensusMsgType { - return nil - } - if msg.MsgID.GetRoleType() != spectypes.RoleCommittee { - return nil - } - - subMsg, ok := msg.Body.(*specqbft.Message) - if !ok || subMsg.MsgType != specqbft.ProposalMsgType { - return nil - } - ncv.OnProposalMsg(msg) - return nil -} + if msg.MsgType == spectypes.SSVConsensusMsgType { + // Process proposal messages for committee consensus only to get the roots + if msg.MsgID.GetRoleType() != spectypes.RoleCommittee { + return nil + } -func (c *controller) handlePostConsensusMessages(msg *queue.DecodedSSVMessage, ncv *committeeObserver) error { - if msg.MsgType != spectypes.SSVPartialSignatureMsgType { - return nil - } + subMsg, ok := msg.Body.(*specqbft.Message) + if !ok || subMsg.MsgType != specqbft.ProposalMsgType { + return nil + } - c.committeesObserversMutex.Lock() - defer c.committeesObserversMutex.Unlock() + return ncv.OnProposalMsg(msg) + } else if msg.MsgType == spectypes.SSVPartialSignatureMsgType { + pSigMessages := &spectypes.PartialSignatureMessages{} + if err := pSigMessages.Decode(msg.SignedSSVMessage.SSVMessage.GetData()); err != nil { + return err + } - pSigMessages := &spectypes.PartialSignatureMessages{} - if err := pSigMessages.Decode(msg.SignedSSVMessage.SSVMessage.GetData()); err != nil { - return err + return ncv.ProcessMessage(msg) } - - ncv.ProcessMessage(msg) return nil } @@ -448,17 +444,10 @@ func (c *controller) getNonCommitteeValidators(messageId spectypes.MessageID) *c // StartValidators loads all persisted shares and setup the corresponding validators func (c *controller) StartValidators() { - if c.validatorOptions.Exporter { - // There are no committee validators to setup. - close(c.committeeValidatorSetup) - - // Setup non-committee validators. - c.setupNonCommitteeValidators() - return - } - + // Load non-liquidated shares. shares := c.sharesStorage.List(nil, registrystorage.ByNotLiquidated()) if len(shares) == 0 { + close(c.committeeValidatorSetup) c.logger.Info("could not find validators") return } @@ -466,38 +455,52 @@ func (c *controller) StartValidators() { var ownShares []*ssvtypes.SSVShare var allPubKeys = make([][]byte, 0, len(shares)) for _, share := range shares { - if share.BelongsToOperator(c.operatorDataStore.GetOperatorID()) { + if c.operatorDataStore.GetOperatorID() != 0 && share.BelongsToOperator(c.operatorDataStore.GetOperatorID()) { ownShares = append(ownShares, share) } allPubKeys = append(allPubKeys, share.ValidatorPubKey[:]) } - // Setup committee validators. - inited, committees := c.setupValidators(ownShares) - if len(inited) == 0 { - // If no validators were started and therefore we're not subscribed to any subnets, - // then subscribe to a random subnet to participate in the network. - if err := c.network.SubscribeRandoms(c.logger, 1); err != nil { - c.logger.Error("failed to subscribe to random subnets", zap.Error(err)) + if c.validatorOptions.Exporter { + // There are no committee validators to setup. + close(c.committeeValidatorSetup) + } else { + // Setup committee validators. + inited, committees := c.setupValidators(ownShares) + if len(inited) == 0 { + // If no validators were started and therefore we're not subscribed to any subnets, + // then subscribe to a random subnet to participate in the network. + if err := c.network.SubscribeRandoms(c.logger, 1); err != nil { + c.logger.Error("failed to subscribe to random subnets", zap.Error(err)) + } } - } - close(c.committeeValidatorSetup) + close(c.committeeValidatorSetup) - // Start validators. - c.startValidators(inited, committees) + // Start validators. + c.startValidators(inited, committees) + } - // Fetch metadata for all validators. - start := time.Now() - err := beaconprotocol.UpdateValidatorsMetadata(c.logger, allPubKeys, c, c.beacon, c.onMetadataUpdated) - if err != nil { - c.logger.Error("failed to update validators metadata after setup", - zap.Int("shares", len(allPubKeys)), - fields.Took(time.Since(start)), - zap.Error(err)) - } else { - c.logger.Debug("updated validators metadata after setup", - zap.Int("shares", len(allPubKeys)), - fields.Took(time.Since(start))) + // Fetch metadata now if there is none. Otherwise, UpdateValidatorsMetadataLoop will handle it. + var hasMetadata bool + for _, share := range shares { + if !share.Liquidated && share.HasBeaconMetadata() { + hasMetadata = true + break + } + } + if !hasMetadata { + start := time.Now() + err := beaconprotocol.UpdateValidatorsMetadata(c.logger, allPubKeys, c, c.beacon, c.onMetadataUpdated) + if err != nil { + c.logger.Error("failed to update validators metadata after setup", + zap.Int("shares", len(allPubKeys)), + fields.Took(time.Since(start)), + zap.Error(err)) + } else { + c.logger.Debug("updated validators metadata after setup", + zap.Int("shares", len(allPubKeys)), + fields.Took(time.Since(start))) + } } } @@ -567,28 +570,6 @@ func (c *controller) startValidators(validators []*validator.Validator, committe return started } -// setupNonCommitteeValidators trigger SyncHighestDecided for each validator -// to start consensus flow which would save the highest decided instance -// and sync any gaps (in protocol/v2/qbft/controller/decided.go). -func (c *controller) setupNonCommitteeValidators() { - nonCommitteeShares := c.sharesStorage.List(nil, registrystorage.ByNotLiquidated()) - if len(nonCommitteeShares) == 0 { - c.logger.Info("could not find non-committee validators") - return - } - - pubKeys := make([][]byte, 0, len(nonCommitteeShares)) - for _, validatorShare := range nonCommitteeShares { - pubKeys = append(pubKeys, validatorShare.ValidatorPubKey[:]) - } - if len(pubKeys) > 0 { - c.logger.Debug("updating metadata for non-committee validators", zap.Int("count", len(pubKeys))) - if err := beaconprotocol.UpdateValidatorsMetadata(c.logger, pubKeys, c, c.beacon, c.onMetadataUpdated); err != nil { - c.logger.Warn("could not update all validators", zap.Error(err)) - } - } -} - // StartNetworkHandlers init msg worker that handles network messages func (c *controller) StartNetworkHandlers() { // first, set stream handlers @@ -604,8 +585,6 @@ func (c *controller) StartNetworkHandlers() { // UpdateValidatorMetadata updates a given validator with metadata (implements ValidatorMetadataStorage) func (c *controller) UpdateValidatorMetadata(pk spectypes.ValidatorPK, metadata *beaconprotocol.ValidatorMetadata) error { - c.metadataLastUpdated[pk] = time.Now() - if metadata == nil { return errors.New("could not update empty metadata") } @@ -721,6 +700,10 @@ func (c *controller) GetValidator(pubKey spectypes.ValidatorPK) (*validator.Vali return c.validatorsMap.GetValidator(pubKey) } +func (c *controller) ExecuteGenesisDuty(logger *zap.Logger, duty *genesisspectypes.Duty) { + panic("implement me") +} + func (c *controller) ExecuteDuty(logger *zap.Logger, duty *spectypes.BeaconDuty) { // because we're using the same duty for more than 1 duty (e.g. attest + aggregator) there is an error in bls.Deserialize func for cgo pointer to pointer. // so we need to copy the pubkey val to avoid pointer @@ -728,7 +711,7 @@ func (c *controller) ExecuteDuty(logger *zap.Logger, duty *spectypes.BeaconDuty) copy(pk, duty.PubKey[:]) if v, ok := c.GetValidator(spectypes.ValidatorPK(pk)); ok { - ssvMsg, err := CreateDutyExecuteMsg(duty, pk, types.GetDefaultDomain()) + ssvMsg, err := CreateDutyExecuteMsg(duty, pk, c.networkConfig.Domain) if err != nil { logger.Error("could not create duty execute msg", zap.Error(err)) return @@ -738,6 +721,7 @@ func (c *controller) ExecuteDuty(logger *zap.Logger, duty *spectypes.BeaconDuty) logger.Error("could not decode duty execute msg", zap.Error(err)) return } + if pushed := v.Queues[duty.RunnerRole()].Q.TryPush(dec); !pushed { logger.Warn("dropping ExecuteDuty message because the queue is full") } @@ -751,7 +735,7 @@ func (c *controller) ExecuteCommitteeDuty(logger *zap.Logger, committeeID specty logger = logger.With(fields.Slot(duty.Slot), fields.Role(duty.RunnerRole())) if cm, ok := c.validatorsMap.GetCommittee(committeeID); ok { - ssvMsg, err := CreateCommitteeDutyExecuteMsg(duty, committeeID, types.GetDefaultDomain()) + ssvMsg, err := CreateCommitteeDutyExecuteMsg(duty, committeeID, c.networkConfig.Domain) if err != nil { logger.Error("could not create duty execute msg", zap.Error(err)) return @@ -831,11 +815,13 @@ func (c *controller) AllActiveIndices(epoch phase0.Epoch, afterInit bool) []phas if afterInit { <-c.committeeValidatorSetup } - shares := c.sharesStorage.List(nil, registrystorage.ByAttesting(epoch)) - indices := make([]phase0.ValidatorIndex, len(shares)) - for i, share := range shares { - indices[i] = share.BeaconMetadata.Index - } + var indices []phase0.ValidatorIndex + c.sharesStorage.Range(nil, func(share *ssvtypes.SSVShare) bool { + if share.IsAttesting(epoch) { + indices = append(indices, share.BeaconMetadata.Index) + } + return true + }) return indices } @@ -958,9 +944,9 @@ func (c *controller) onShareInit(share *ssvtypes.SSVShare) (*validator.Validator zap.String("committee_id", hex.EncodeToString(operator.CommitteeID[:])), }...) - committeRunnerFunc := SetupCommitteeRunners(ctx, logger, opts) + committeeRunnerFunc := SetupCommitteeRunners(ctx, opts) - vc = validator.NewCommittee(c.context, logger, c.beacon.GetBeaconNetwork(), operator, opts.SignatureVerifier, committeRunnerFunc) + vc = validator.NewCommittee(c.context, logger, c.beacon.GetBeaconNetwork(), operator, opts.SignatureVerifier, committeeRunnerFunc) vc.AddShare(&share.Share) c.validatorsMap.PutCommittee(operator.CommitteeID, vc) @@ -975,7 +961,6 @@ func (c *controller) onShareInit(share *ssvtypes.SSVShare) (*validator.Validator } func (c *controller) committeeMemberFromShare(share *ssvtypes.SSVShare) (*spectypes.CommitteeMember, error) { - operators := make([]*spectypes.Operator, len(share.Committee)) for i, cm := range share.Committee { opdata, found, err := c.operatorsStorage.GetOperatorData(nil, cm.Signer) @@ -1099,37 +1084,45 @@ func (c *controller) startCommittee(vc *validator.Committee) (bool, error) { // UpdateValidatorMetaDataLoop updates metadata of validators in an interval func (c *controller) UpdateValidatorMetaDataLoop() { - var interval = c.beacon.GetBeaconNetwork().SlotDurationSec() * 2 - - // Prepare share filters. - filters := []registrystorage.SharesFilter{} - - // Filter for validators who are not liquidated. - filters = append(filters, registrystorage.ByNotLiquidated()) - - // Filter for validators which haven't been updated recently. - filters = append(filters, func(s *ssvtypes.SSVShare) bool { - last, ok := c.metadataLastUpdated[s.ValidatorPubKey] - return !ok || time.Since(last) > c.metadataUpdateInterval - }) + const batchSize = 512 + var sleep = 2 * time.Second for { - time.Sleep(interval) - start := time.Now() - // Get the shares to fetch metadata for. - shares := c.sharesStorage.List(nil, filters...) - var pks [][]byte + start := time.Now() + var existingShares, newShares []*ssvtypes.SSVShare + c.sharesStorage.Range(nil, func(share *ssvtypes.SSVShare) bool { + if share.Liquidated { + return true + } + if share.BeaconMetadata == nil && share.MetadataLastUpdated().IsZero() { + newShares = append(newShares, share) + } else if time.Since(share.MetadataLastUpdated()) > c.metadataUpdateInterval { + existingShares = append(existingShares, share) + } + return len(newShares) < batchSize + }) + + // Combine validators up to batchSize, prioritizing the new ones. + shares := newShares + if remainder := batchSize - len(shares); remainder > 0 { + end := remainder + if end > len(existingShares) { + end = len(existingShares) + } + shares = append(shares, existingShares[:end]...) + } for _, share := range shares { - pks = append(pks, share.ValidatorPubKey[:]) - c.metadataLastUpdated[share.ValidatorPubKey] = time.Now() + share.SetMetadataLastUpdated(time.Now()) } - // TODO: continue if there is nothing to update. - - c.recentlyStartedValidators = 0 - if len(pks) > 0 { - err := beaconprotocol.UpdateValidatorsMetadata(c.logger, pks, c, c.beacon, c.onMetadataUpdated) + filteringTook := time.Since(start) + if len(shares) > 0 { + pubKeys := make([][]byte, len(shares)) + for i, s := range shares { + pubKeys[i] = s.ValidatorPubKey[:] + } + err := c.updateValidatorsMetadata(c.logger, pubKeys, c, c.beacon, c.onMetadataUpdated) if err != nil { c.logger.Warn("failed to update validators metadata", zap.Error(err)) continue @@ -1137,18 +1130,56 @@ func (c *controller) UpdateValidatorMetaDataLoop() { } c.logger.Debug("updated validators metadata", zap.Int("validators", len(shares)), + zap.Int("new_validators", len(newShares)), zap.Uint64("started_validators", c.recentlyStartedValidators), + zap.Duration("filtering_took", filteringTook), fields.Took(time.Since(start))) - // Notify DutyScheduler of new validators. - if c.recentlyStartedValidators > 0 { - select { - case c.indicesChange <- struct{}{}: - case <-time.After(interval): - c.logger.Warn("timed out while notifying DutyScheduler of new validators") - } + // Only sleep if there aren't more validators to fetch metadata for. + if len(shares) < batchSize { + time.Sleep(sleep) + } + } +} + +func (c *controller) updateValidatorsMetadata(logger *zap.Logger, pks [][]byte, storage beaconprotocol.ValidatorMetadataStorage, beacon beaconprotocol.BeaconNode, onMetadataUpdated func(pk spectypes.ValidatorPK, meta *beaconprotocol.ValidatorMetadata)) error { + // Fetch metadata for all validators. + c.recentlyStartedValidators = 0 + beforeUpdate := c.AllActiveIndices(c.beacon.GetBeaconNetwork().EstimatedCurrentEpoch(), false) + + err := beaconprotocol.UpdateValidatorsMetadata(logger, pks, storage, beacon, onMetadataUpdated) + if err != nil { + return errors.Wrap(err, "failed to update validators metadata") + } + + // Refresh duties if there are any new active validators. + afterUpdate := c.AllActiveIndices(c.beacon.GetBeaconNetwork().EstimatedCurrentEpoch(), false) + if c.recentlyStartedValidators > 0 || hasNewValidators(beforeUpdate, afterUpdate) { + c.logger.Debug("new validators found after metadata update", + zap.Int("before", len(beforeUpdate)), + zap.Int("after", len(afterUpdate)), + zap.Uint64("started_validators", c.recentlyStartedValidators), + ) + select { + case c.indicesChange <- struct{}{}: + case <-time.After(2 * c.beacon.GetBeaconNetwork().SlotDurationSec()): + c.logger.Warn("timed out while notifying DutyScheduler of new validators") } } + return nil +} + +func hasNewValidators(before []phase0.ValidatorIndex, after []phase0.ValidatorIndex) bool { + m := make(map[phase0.ValidatorIndex]struct{}) + for _, v := range before { + m[v] = struct{}{} + } + for _, v := range after { + if _, ok := m[v]; !ok { + return true + } + } + return false } // TODO alan: use spec when they fix bugs @@ -1169,7 +1200,7 @@ func TempBeaconVoteValueCheckF( } if bv.Source.Epoch >= bv.Target.Epoch { - return errors.New("attestation data source > target") + return errors.New("attestation data source >= target") } // attestationData := &phase0.AttestationData{ @@ -1187,45 +1218,59 @@ func TempBeaconVoteValueCheckF( } } -func SetupCommitteeRunners(ctx context.Context, logger *zap.Logger, options validator.Options) func(slot phase0.Slot, shares map[phase0.ValidatorIndex]*spectypes.Share) *runner.CommitteeRunner { - // TODO: alan fix domains (from netCfg) - domainType := spectypes.GenesisMainnet +func SetupCommitteeRunners( + ctx context.Context, + options validator.Options, +) func(slot phase0.Slot, shares map[phase0.ValidatorIndex]*spectypes.Share) *runner.CommitteeRunner { buildController := func(role spectypes.RunnerRole, valueCheckF specqbft.ProposedValueCheckF) *qbftcontroller.Controller { config := &qbft.Config{ BeaconSigner: options.Signer, OperatorSigner: options.OperatorSigner, SigningPK: options.SSVShare.ValidatorPubKey[:], // TODO right val? SignatureVerifier: options.SignatureVerifier, - Domain: domainType, + Domain: options.NetworkConfig.Domain, ValueCheckF: nil, // sets per role type ProposerF: func(state *specqbft.State, round specqbft.Round) spectypes.OperatorID { leader := specqbft.RoundRobinProposer(state, round) //logger.Debug("leader", zap.Int("operator_id", int(leader))) return leader }, - Storage: options.Storage.Get(exporter_message.RunnerRole(role)), + Storage: options.Storage.Get(convert.RunnerRole(role)), Network: options.Network, - Timer: roundtimer.New(ctx, options.BeaconNetwork, role, nil), + Timer: roundtimer.New(ctx, options.NetworkConfig.Beacon, role, nil), SignatureVerification: true, } config.ValueCheckF = valueCheckF - identifier := spectypes.NewMsgID(spectypes.GenesisMainnet, options.Operator.CommitteeID[:], role) + identifier := spectypes.NewMsgID(options.NetworkConfig.Domain, options.Operator.CommitteeID[:], role) qbftCtrl := qbftcontroller.NewController(identifier[:], options.Operator, config, options.FullNode) return qbftCtrl } return func(slot phase0.Slot, shares map[phase0.ValidatorIndex]*spectypes.Share) *runner.CommitteeRunner { // Create a committee runner. - epoch := options.BeaconNetwork.GetBeaconNetwork().EstimatedEpochAtSlot(slot) + epoch := options.NetworkConfig.Beacon.GetBeaconNetwork().EstimatedEpochAtSlot(slot) valCheck := TempBeaconVoteValueCheckF(options.Signer, slot, options.SSVShare.Share.SharePubKey, epoch) // TODO: (Alan) fix slashing check (committee is not 1 pubkey) - crunner := runner.NewCommitteeRunner(options.BeaconNetwork.GetBeaconNetwork(), shares, buildController(spectypes.RoleCommittee, valCheck), options.Beacon, options.Network, options.Signer, options.OperatorSigner, valCheck) + crunner := runner.NewCommitteeRunner( + options.NetworkConfig, + shares, + buildController(spectypes.RoleCommittee, valCheck), + options.Beacon, + options.Network, + options.Signer, + options.OperatorSigner, + valCheck, + ) return crunner.(*runner.CommitteeRunner) } } // SetupRunners initializes duty runners for the given validator -func SetupRunners(ctx context.Context, logger *zap.Logger, options validator.Options) runner.ValidatorDutyRunners { +func SetupRunners( + ctx context.Context, + logger *zap.Logger, + options validator.Options, +) runner.ValidatorDutyRunners { if options.SSVShare == nil || options.SSVShare.BeaconMetadata == nil { logger.Error("missing validator metadata", zap.String("validator", hex.EncodeToString(options.SSVShare.ValidatorPubKey[:]))) return runner.ValidatorDutyRunners{} // TODO need to find better way to fix it @@ -1240,13 +1285,12 @@ func SetupRunners(ctx context.Context, logger *zap.Logger, options validator.Opt spectypes.RoleVoluntaryExit, } - domainType := ssvtypes.GetDefaultDomain() buildController := func(role spectypes.RunnerRole, valueCheckF specqbft.ProposedValueCheckF) *qbftcontroller.Controller { config := &qbft.Config{ BeaconSigner: options.Signer, OperatorSigner: options.OperatorSigner, SigningPK: options.SSVShare.ValidatorPubKey[:], // TODO right val? - Domain: domainType, + Domain: options.NetworkConfig.Domain, SignatureVerifier: options.SignatureVerifier, ValueCheckF: nil, // sets per role type ProposerF: func(state *specqbft.State, round specqbft.Round) spectypes.OperatorID { @@ -1254,14 +1298,14 @@ func SetupRunners(ctx context.Context, logger *zap.Logger, options validator.Opt //logger.Debug("leader", zap.Int("operator_id", int(leader))) return leader }, - Storage: options.Storage.Get(exporter_message.RunnerRole(role)), + Storage: options.Storage.Get(convert.RunnerRole(role)), Network: options.Network, - Timer: roundtimer.New(ctx, options.BeaconNetwork, role, nil), + Timer: roundtimer.New(ctx, options.NetworkConfig.Beacon, role, nil), SignatureVerification: true, } config.ValueCheckF = valueCheckF - identifier := spectypes.NewMsgID(ssvtypes.GetDefaultDomain(), options.SSVShare.Share.ValidatorPubKey[:], role) + identifier := spectypes.NewMsgID(options.NetworkConfig.Domain, options.SSVShare.Share.ValidatorPubKey[:], role) qbftCtrl := qbftcontroller.NewController(identifier[:], options.Operator, config, options.FullNode) return qbftCtrl } @@ -1273,30 +1317,30 @@ func SetupRunners(ctx context.Context, logger *zap.Logger, options validator.Opt for _, role := range runnersType { switch role { //case spectypes.BNRoleAttester: - // valCheck := specssv.AttesterValueCheckF(options.Signer, options.BeaconNetwork.GetBeaconNetwork(), options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index, options.SSVShare.SharePubKey) + // valCheck := specssv.AttesterValueCheckF(options.Signer, options.NetworkConfig.Beacon.GetBeaconNetwork(), options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index, options.SSVShare.SharePubKey) // qbftCtrl := buildController(spectypes.BNRoleAttester, valCheck) - // runners[role] = runner.NewAttesterRunner(options.BeaconNetwork.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, options.OperatorSigner, valCheck, 0) + // runners[role] = runner.NewAttesterRunner(options.NetworkConfig.Beacon.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, options.OperatorSigner, valCheck, 0) case spectypes.RoleProposer: - proposedValueCheck := specssv.ProposerValueCheckF(options.Signer, options.BeaconNetwork.GetBeaconNetwork(), options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index, options.SSVShare.SharePubKey) + proposedValueCheck := specssv.ProposerValueCheckF(options.Signer, options.NetworkConfig.Beacon.GetBeaconNetwork(), options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index, options.SSVShare.SharePubKey) qbftCtrl := buildController(spectypes.RoleProposer, proposedValueCheck) - runners[role] = runner.NewProposerRunner(options.BeaconNetwork.GetBeaconNetwork(), shareMap, qbftCtrl, options.Beacon, options.Network, options.Signer, options.OperatorSigner, proposedValueCheck, 0) + runners[role] = runner.NewProposerRunner(options.NetworkConfig.Beacon.GetBeaconNetwork(), shareMap, qbftCtrl, options.Beacon, options.Network, options.Signer, options.OperatorSigner, proposedValueCheck, 0) case spectypes.RoleAggregator: - aggregatorValueCheckF := specssv.AggregatorValueCheckF(options.Signer, options.BeaconNetwork.GetBeaconNetwork(), options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) + aggregatorValueCheckF := specssv.AggregatorValueCheckF(options.Signer, options.NetworkConfig.Beacon.GetBeaconNetwork(), options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) qbftCtrl := buildController(spectypes.RoleAggregator, aggregatorValueCheckF) - runners[role] = runner.NewAggregatorRunner(options.BeaconNetwork.GetBeaconNetwork(), shareMap, qbftCtrl, options.Beacon, options.Network, options.Signer, options.OperatorSigner, aggregatorValueCheckF, 0) + runners[role] = runner.NewAggregatorRunner(options.NetworkConfig.Beacon.GetBeaconNetwork(), shareMap, qbftCtrl, options.Beacon, options.Network, options.Signer, options.OperatorSigner, aggregatorValueCheckF, 0) //case spectypes.BNRoleSyncCommittee: - //syncCommitteeValueCheckF := specssv.SyncCommitteeValueCheckF(options.Signer, options.BeaconNetwork.GetBeaconNetwork(), options.SSVShare.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) + //syncCommitteeValueCheckF := specssv.SyncCommitteeValueCheckF(options.Signer, options.NetworkConfig.Beacon.GetBeaconNetwork(), options.SSVShare.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) //qbftCtrl := buildController(spectypes.BNRoleSyncCommittee, syncCommitteeValueCheckF) - //runners[role] = runner.NewSyncCommitteeRunner(options.BeaconNetwork.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, options.OperatorSigner, syncCommitteeValueCheckF, 0) + //runners[role] = runner.NewSyncCommitteeRunner(options.NetworkConfig.Beacon.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, options.OperatorSigner, syncCommitteeValueCheckF, 0) case spectypes.RoleSyncCommitteeContribution: - syncCommitteeContributionValueCheckF := specssv.SyncCommitteeContributionValueCheckF(options.Signer, options.BeaconNetwork.GetBeaconNetwork(), options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) + syncCommitteeContributionValueCheckF := specssv.SyncCommitteeContributionValueCheckF(options.Signer, options.NetworkConfig.Beacon.GetBeaconNetwork(), options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) qbftCtrl := buildController(spectypes.RoleSyncCommitteeContribution, syncCommitteeContributionValueCheckF) - runners[role] = runner.NewSyncCommitteeAggregatorRunner(options.BeaconNetwork.GetBeaconNetwork(), shareMap, qbftCtrl, options.Beacon, options.Network, options.Signer, options.OperatorSigner, syncCommitteeContributionValueCheckF, 0) + runners[role] = runner.NewSyncCommitteeAggregatorRunner(options.NetworkConfig.Beacon.GetBeaconNetwork(), shareMap, qbftCtrl, options.Beacon, options.Network, options.Signer, options.OperatorSigner, syncCommitteeContributionValueCheckF, 0) case spectypes.RoleValidatorRegistration: qbftCtrl := buildController(spectypes.RoleValidatorRegistration, nil) - runners[role] = runner.NewValidatorRegistrationRunner(options.BeaconNetwork.GetBeaconNetwork(), shareMap, qbftCtrl, options.Beacon, options.Network, options.Signer, options.OperatorSigner) + runners[role] = runner.NewValidatorRegistrationRunner(options.NetworkConfig.Beacon.GetBeaconNetwork(), shareMap, qbftCtrl, options.Beacon, options.Network, options.Signer, options.OperatorSigner) case spectypes.RoleVoluntaryExit: - runners[role] = runner.NewVoluntaryExitRunner(options.BeaconNetwork.GetBeaconNetwork(), shareMap, options.Beacon, options.Network, options.Signer, options.OperatorSigner) + runners[role] = runner.NewVoluntaryExitRunner(options.NetworkConfig.Beacon.GetBeaconNetwork(), shareMap, options.Beacon, options.Network, options.Signer, options.OperatorSigner) } } return runners diff --git a/operator/validator/controller_test.go b/operator/validator/controller_test.go index b7d44d0460..8d6055034c 100644 --- a/operator/validator/controller_test.go +++ b/operator/validator/controller_test.go @@ -16,11 +16,12 @@ import ( "github.com/herumi/bls-eth-go-binary/bls" "github.com/pkg/errors" "github.com/stretchr/testify/require" - gomock "go.uber.org/mock/gomock" + "go.uber.org/mock/gomock" "go.uber.org/zap" specqbft "github.com/ssvlabs/ssv-spec/qbft" spectypes "github.com/ssvlabs/ssv-spec/types" + "github.com/ssvlabs/ssv/ekm" ibftstorage "github.com/ssvlabs/ssv/ibft/storage" "github.com/ssvlabs/ssv/logging" @@ -90,11 +91,11 @@ func TestNewController(t *testing.T) { require.IsType(t, &controller{}, control) } -func TestSetupNonCommitteeValidators(t *testing.T) { +func TestSetupValidatorsExporter(t *testing.T) { passedEpoch := phase0.Epoch(1) operators := buildOperators(t) - operatorDataStore := operatordatastore.New(buildOperatorData(1, "67Ce5c69260bd819B4e0AD13f4b873074D479811")) + operatorDataStore := operatordatastore.New(buildOperatorData(0, "67Ce5c69260bd819B4e0AD13f4b873074D479811")) recipientData := buildFeeRecipient("67Ce5c69260bd819B4e0AD13f4b873074D479811", "45E668aba4b7fc8761331EC3CE77584B7A99A51A") secretKey := &bls.SecretKey{} @@ -102,18 +103,8 @@ func TestSetupNonCommitteeValidators(t *testing.T) { require.NoError(t, secretKey.SetHexString(sk1Str)) require.NoError(t, secretKey2.SetHexString(sk2Str)) - firstValidator := &validator.Validator{ - DutyRunners: runner.ValidatorDutyRunners{}, - Storage: ibftstorage.NewStores(), - Share: &types.SSVShare{ - Share: spectypes.Share{ - ValidatorPubKey: spectypes.ValidatorPK(secretKey.GetPublicKey().Serialize()), - }, - }, - } - bcResponse := map[phase0.ValidatorIndex]*eth2apiv1.Validator{ - 0: { + 2: { Balance: 0, Status: 3, Index: 2, @@ -122,9 +113,18 @@ func TestSetupNonCommitteeValidators(t *testing.T) { PublicKey: phase0.BLSPubKey(secretKey.GetPublicKey().Serialize()), }, }, + 3: { + Balance: 0, + Status: 3, + Index: 3, + Validator: &phase0.Validator{ + ActivationEpoch: passedEpoch, + PublicKey: phase0.BLSPubKey(secretKey2.GetPublicKey().Serialize()), + }, + }, } - sharesSlice := []*types.SSVShare{ + sharesWithMetadata := []*types.SSVShare{ { Share: spectypes.Share{ Committee: operators, @@ -156,35 +156,83 @@ func TestSetupNonCommitteeValidators(t *testing.T) { }, }, } + _ = sharesWithMetadata + + sharesWithoutMetadata := []*types.SSVShare{ + { + Share: spectypes.Share{ + //OperatorID: 1, + Committee: operators, + ValidatorPubKey: spectypes.ValidatorPK(secretKey.GetPublicKey().Serialize()), + }, + Metadata: types.Metadata{ + Liquidated: false, + }, + }, + { + Share: spectypes.Share{ + //OperatorID: 2, + Committee: operators, + ValidatorPubKey: spectypes.ValidatorPK(secretKey2.GetPublicKey().Serialize()), + }, + Metadata: types.Metadata{ + Liquidated: false, + }, + }, + } + _ = sharesWithoutMetadata testCases := []struct { name string shareStorageListResponse []*types.SSVShare + expectMetadataFetch bool syncHighestDecidedResponse error getValidatorDataResponse error }{ - {"no shares of non committee", nil, nil, nil}, - {"set up non committee validators", sharesSlice, nil, nil}, - {"fail to sync highest decided", sharesSlice, errors.New("failed to sync highest decided"), nil}, - {"fail to update validators metadata", sharesSlice, nil, errors.New("could not update all validators")}, + {"no shares of non committee", nil, false, nil, nil}, + {"set up non committee validators", sharesWithMetadata, false, nil, nil}, + {"set up non committee validators without metadata", sharesWithoutMetadata, true, nil, nil}, + {"fail to sync highest decided", sharesWithMetadata, false, errors.New("failed to sync highest decided"), nil}, + {"fail to update validators metadata", sharesWithMetadata, false, nil, errors.New("could not update all validators")}, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { ctrl, logger, sharesStorage, network, _, recipientStorage, bc := setupCommonTestComponents(t) defer ctrl.Finish() - testValidatorsMap := map[spectypes.ValidatorPK]*validator.Validator{ - (spectypes.ValidatorPK)(secretKey.GetPublicKey().Serialize()): firstValidator, - } - mockValidatorsMap := validators.New(context.TODO(), validators.WithInitialState(testValidatorsMap, nil)) + mockValidatorsMap := validators.New(context.TODO()) if tc.shareStorageListResponse == nil { sharesStorage.EXPECT().List(gomock.Any(), gomock.Any()).Return(tc.shareStorageListResponse).Times(1) } else { - sharesStorage.EXPECT().Get(gomock.Any(), gomock.Any()).Return(sharesSlice[0]).AnyTimes() - bc.EXPECT().GetValidatorData(gomock.Any()).Return(bcResponse, tc.getValidatorDataResponse).Times(1) + sharesStorage.EXPECT().Get(gomock.Any(), gomock.Any()).DoAndReturn(func(_ basedb.Reader, pubKey []byte) *types.SSVShare { + for _, share := range tc.shareStorageListResponse { + if hex.EncodeToString(share.Share.ValidatorPubKey[:]) == hex.EncodeToString(pubKey) { + return share + } + } + return nil + }).AnyTimes() sharesStorage.EXPECT().List(gomock.Any(), gomock.Any()).Return(tc.shareStorageListResponse).AnyTimes() - sharesStorage.EXPECT().UpdateValidatorMetadata(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + sharesStorage.EXPECT().Range(gomock.Any(), gomock.Any()).DoAndReturn(func(_ basedb.Reader, fn func(*types.SSVShare) bool) { + for _, share := range tc.shareStorageListResponse { + if !fn(share) { + break + } + } + }).AnyTimes() + if tc.expectMetadataFetch { + bc.EXPECT().GetValidatorData(gomock.Any()).Return(bcResponse, tc.getValidatorDataResponse).Times(1) + sharesStorage.EXPECT().UpdateValidatorMetadata(gomock.Any(), gomock.Any()).DoAndReturn(func(pk string, metadata *beacon.ValidatorMetadata) error { + for _, share := range tc.shareStorageListResponse { + if hex.EncodeToString(share.Share.ValidatorPubKey[:]) == pk { + share.Metadata.BeaconMetadata = metadata + } + } + return nil + }).Times(len(tc.shareStorageListResponse)) + bc.EXPECT().GetBeaconNetwork().Return(networkconfig.Mainnet.Beacon.GetBeaconNetwork()).AnyTimes() + } sharesStorage.EXPECT().UpdateValidatorsMetadata(gomock.Any()).Return(nil).AnyTimes() recipientStorage.EXPECT().GetRecipientData(gomock.Any(), gomock.Any()).Return(recipientData, true, nil).Times(0) } @@ -193,19 +241,21 @@ func TestSetupNonCommitteeValidators(t *testing.T) { return true, nil } controllerOptions := MockControllerOptions{ - beacon: bc, - network: network, - operatorDataStore: operatorDataStore, - sharesStorage: sharesStorage, - recipientsStorage: recipientStorage, - validatorsMap: mockValidatorsMap, - validatorOptions: validator.Options{}, + beacon: bc, + network: network, + operatorDataStore: operatorDataStore, + sharesStorage: sharesStorage, + recipientsStorage: recipientStorage, + validatorsMap: mockValidatorsMap, + validatorOptions: validator.Options{ + Exporter: true, + }, metrics: validator.NopMetrics{}, metadataLastUpdated: map[spectypes.ValidatorPK]time.Time{}, } ctr := setupController(logger, controllerOptions) ctr.validatorStartFunc = validatorStartFunc - ctr.setupNonCommitteeValidators() + ctr.StartValidators() }) } } @@ -637,7 +687,7 @@ func TestSetupValidators(t *testing.T) { operatorStorage: opStorage, validatorsMap: mockValidatorsMap, validatorOptions: validator.Options{ - BeaconNetwork: networkconfig.TestNetwork.Beacon, + NetworkConfig: networkconfig.TestNetwork, Storage: storageMap, }, metadataLastUpdated: metadataLastMap, @@ -1025,21 +1075,22 @@ func TestGetIndices(t *testing.T) { func setupController(logger *zap.Logger, opts MockControllerOptions) controller { return controller{ - metadataUpdateInterval: 0, - logger: logger, - beacon: opts.beacon, - network: opts.network, - metrics: opts.metrics, - beaconSigner: opts.signer, - ibftStorageMap: opts.StorageMap, - operatorDataStore: opts.operatorDataStore, - operatorsStorage: opts.operatorStorage, - sharesStorage: opts.sharesStorage, - validatorsMap: opts.validatorsMap, - context: context.Background(), - validatorOptions: opts.validatorOptions, - recipientsStorage: opts.recipientsStorage, - messageRouter: newMessageRouter(logger), + metadataUpdateInterval: 0, + logger: logger, + beacon: opts.beacon, + network: opts.network, + metrics: opts.metrics, + ibftStorageMap: opts.StorageMap, + operatorDataStore: opts.operatorDataStore, + sharesStorage: opts.sharesStorage, + operatorsStorage: opts.operatorStorage, + validatorsMap: opts.validatorsMap, + context: context.Background(), + validatorOptions: opts.validatorOptions, + recipientsStorage: opts.recipientsStorage, + messageRouter: newMessageRouter(logger), + committeeValidatorSetup: make(chan struct{}), + indicesChange: make(chan struct{}, 32), messageWorker: worker.NewWorker(logger, &worker.Config{ Ctx: context.Background(), WorkersCount: 1, diff --git a/operator/validator/mocks/controller.go b/operator/validator/mocks/controller.go index 8eaec89f8e..0f199d4716 100644 --- a/operator/validator/mocks/controller.go +++ b/operator/validator/mocks/controller.go @@ -392,6 +392,18 @@ func (mr *MockSharesStorageMockRecorder) List(txn any, filters ...any) *gomock.C return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockSharesStorage)(nil).List), varargs...) } +// Range mocks base method. +func (m *MockSharesStorage) Range(txn basedb.Reader, fn func(*types0.SSVShare) bool) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Range", txn, fn) +} + +// Range indicates an expected call of Range. +func (mr *MockSharesStorageMockRecorder) Range(txn, fn interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Range", reflect.TypeOf((*MockSharesStorage)(nil).Range), txn, fn) +} + // UpdateValidatorMetadata mocks base method. func (m *MockSharesStorage) UpdateValidatorMetadata(pk types.ValidatorPK, metadata *beacon.ValidatorMetadata) error { m.ctrl.T.Helper() diff --git a/operator/validator/task_executor_test.go b/operator/validator/task_executor_test.go index 836c857069..7ea0d184a3 100644 --- a/operator/validator/task_executor_test.go +++ b/operator/validator/task_executor_test.go @@ -9,11 +9,11 @@ import ( v1 "github.com/attestantio/go-eth2-client/api/v1" "github.com/ethereum/go-ethereum/common" "github.com/herumi/bls-eth-go-binary/bls" - "github.com/stretchr/testify/require" - gomock "go.uber.org/mock/gomock" - spectypes "github.com/ssvlabs/ssv-spec/types" "github.com/ssvlabs/ssv-spec/types/testingutils" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + ibftstorage "github.com/ssvlabs/ssv/ibft/storage" "github.com/ssvlabs/ssv/networkconfig" operatordatastore "github.com/ssvlabs/ssv/operator/datastore" @@ -178,7 +178,7 @@ func TestController_ReactivateCluster(t *testing.T) { validatorsMap: mockValidatorsMap, validatorOptions: validator.Options{ Storage: storageMap, - BeaconNetwork: networkconfig.TestNetwork.Beacon, + NetworkConfig: networkconfig.TestNetwork, }, metrics: validator.NopMetrics{}, metadataLastUpdated: map[spectypes.ValidatorPK]time.Time{}, diff --git a/protocol/v2/p2p/network.go b/protocol/v2/p2p/network.go index 093fbc4b37..3823f28faa 100644 --- a/protocol/v2/p2p/network.go +++ b/protocol/v2/p2p/network.go @@ -34,7 +34,7 @@ type Broadcaster interface { } // RequestHandler handles p2p requests -type RequestHandler func(*spectypes.SSVMessage) (*spectypes.SSVMessage, error) +type RequestHandler func(ssvMessage *spectypes.SSVMessage) (*spectypes.SSVMessage, error) // CombineRequestHandlers combines multiple handlers into a single handler func CombineRequestHandlers(handlers ...RequestHandler) RequestHandler { diff --git a/protocol/v2/qbft/controller/controller_test.go b/protocol/v2/qbft/controller/controller_test.go index 75b99dcbdd..858e52cbf5 100644 --- a/protocol/v2/qbft/controller/controller_test.go +++ b/protocol/v2/qbft/controller/controller_test.go @@ -44,11 +44,17 @@ func TestController_OnTimeoutWithRoundCheck(t *testing.T) { Timer: roundtimer.NewTestingTimer(), } + identifier := make([]byte, 56) + identifier[0] = 1 + identifier[1] = 2 + identifier[2] = 3 + identifier[3] = 4 + share := spectestingutils.TestingCommitteeMember(keySet) inst := instance.NewInstance( testConfig, share, - []byte{1, 2, 3, 4}, + identifier, specqbft.FirstHeight, ) diff --git a/protocol/v2/qbft/instance/instance.go b/protocol/v2/qbft/instance/instance.go index a16d1d49ed..268ece9730 100644 --- a/protocol/v2/qbft/instance/instance.go +++ b/protocol/v2/qbft/instance/instance.go @@ -3,15 +3,14 @@ package instance import ( "encoding/base64" "encoding/json" - "github.com/ssvlabs/ssv-spec-pre-cc/types" "sync" "time" "github.com/pkg/errors" + spectypes "github.com/ssvlabs/ssv-spec/types" "go.uber.org/zap" specqbft "github.com/ssvlabs/ssv-spec/qbft" - spectypes "github.com/ssvlabs/ssv-spec/types" "github.com/ssvlabs/ssv/logging/fields" "github.com/ssvlabs/ssv/protocol/v2/qbft" ) @@ -40,10 +39,11 @@ func NewInstance( ) *Instance { var name = "" if len(identifier) == 56 { - name = types.MessageID(identifier).GetRoleType().String() + name = spectypes.MessageID(identifier).GetRoleType().String() } else { name = base64.StdEncoding.EncodeToString(identifier) } + return &Instance{ State: &specqbft.State{ CommitteeMember: committeeMember, @@ -62,6 +62,15 @@ func NewInstance( } } +// TODO remove +func messageIDFromBytes(mid []byte) spectypes.MessageID { + if len(mid) < 56 { + return spectypes.MessageID{} + } + + return spectypes.MessageID(mid) +} + func (i *Instance) ForceStop() { i.forceStop = true } @@ -73,8 +82,6 @@ func (i *Instance) Start(logger *zap.Logger, value []byte, height specqbft.Heigh i.bumpToRound(specqbft.FirstRound) i.State.Height = height i.metrics.StartStage() - i.started = time.Now() - i.config.GetTimer().TimeoutForRound(height, specqbft.FirstRound) logger = logger.With( diff --git a/protocol/v2/qbft/instance/prepare.go b/protocol/v2/qbft/instance/prepare.go index 1ea1d2f56e..77ac07a8a8 100644 --- a/protocol/v2/qbft/instance/prepare.go +++ b/protocol/v2/qbft/instance/prepare.go @@ -2,8 +2,6 @@ package instance import ( "bytes" - "time" - "github.com/pkg/errors" "go.uber.org/zap" @@ -53,10 +51,7 @@ func (i *Instance) uponPrepare(logger *zap.Logger, signedPrepare *spectypes.Sign logger.Debug("🎯 got prepare quorum", fields.Round(i.State.Round), - zap.Any("prepare-signers", allSigners(prepareMsgContainer.MessagesForRound(i.State.Round))), - fields.Root(proposedRoot), - fields.QuorumTime(time.Since(i.started)), - ) + zap.Any("prepare-signers", allSigners(prepareMsgContainer.MessagesForRound(i.State.Round)))) commitMsg, err := CreateCommit(i.State, i.config, proposedRoot) if err != nil { diff --git a/protocol/v2/qbft/spectest/controller_type.go b/protocol/v2/qbft/spectest/controller_type.go index 4077d26276..9d9bf733e3 100644 --- a/protocol/v2/qbft/spectest/controller_type.go +++ b/protocol/v2/qbft/spectest/controller_type.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "encoding/json" "fmt" + "github.com/ssvlabs/ssv/exporter/convert" "os" "path/filepath" "reflect" @@ -58,10 +59,10 @@ func RunControllerSpecTest(t *testing.T, test *spectests.ControllerSpecTest) { func generateController(logger *zap.Logger) *controller.Controller { identifier := []byte{1, 2, 3, 4} - config := qbfttesting.TestingConfig(logger, spectestingutils.Testing4SharesSet(), spectypes.RunnerRole(spectypes.RoleCommittee)) + config := qbfttesting.TestingConfig(logger, spectestingutils.Testing4SharesSet(), convert.RoleCommittee) return qbfttesting.NewTestingQBFTController( identifier[:], - spectestingutils.TestingOperator(spectestingutils.Testing4SharesSet()), + spectestingutils.TestingCommitteeMember(spectestingutils.Testing4SharesSet()), config, false, ) @@ -110,13 +111,13 @@ func testBroadcastedDecided( config *qbft.Config, identifier []byte, runData *spectests.RunInstanceData, - operators []*spectypes.CommitteeMember, + committee []*spectypes.Operator, ) { if runData.ExpectedDecidedState.BroadcastedDecided != nil { // test broadcasted broadcastedSignedMsgs := config.GetNetwork().(*spectestingutils.TestingNetwork).BroadcastedMsgs require.Greater(t, len(broadcastedSignedMsgs), 0) - require.NoError(t, spectestingutils.VerifyListOfSignedSSVMessages(broadcastedSignedMsgs, operators)) + require.NoError(t, spectestingutils.VerifyListOfSignedSSVMessages(broadcastedSignedMsgs, committee)) found := false for _, msg := range broadcastedSignedMsgs { @@ -158,7 +159,7 @@ func runInstanceWithData(t *testing.T, logger *zap.Logger, height specqbft.Heigh lastErr = err } - testBroadcastedDecided(t, contr.GetConfig().(*qbft.Config), contr.Identifier, runData, contr.Share.Committee) + testBroadcastedDecided(t, contr.GetConfig().(*qbft.Config), contr.Identifier, runData, contr.CommitteeMember.Committee) // test root r, err := contr.GetRoot() diff --git a/protocol/v2/qbft/spectest/create_msg_type.go b/protocol/v2/qbft/spectest/create_msg_type.go index 02354a6f29..8970fca845 100644 --- a/protocol/v2/qbft/spectest/create_msg_type.go +++ b/protocol/v2/qbft/spectest/create_msg_type.go @@ -48,8 +48,8 @@ func RunCreateMsg(t *testing.T, test *spectests.CreateMsgSpecTest) { func createCommit(test *spectests.CreateMsgSpecTest) (*spectypes.SignedSSVMessage, error) { ks := testingutils.Testing4SharesSet() state := &specqbft.State{ - Share: testingutils.TestingOperator(ks), - ID: []byte{1, 2, 3, 4}, + CommitteeMember: testingutils.TestingCommitteeMember(ks), + ID: []byte{1, 2, 3, 4}, } config := testingutils.TestingConfig(ks) @@ -59,8 +59,8 @@ func createCommit(test *spectests.CreateMsgSpecTest) (*spectypes.SignedSSVMessag func createPrepare(test *spectests.CreateMsgSpecTest) (*spectypes.SignedSSVMessage, error) { ks := testingutils.Testing4SharesSet() state := &specqbft.State{ - Share: testingutils.TestingOperator(ks), - ID: []byte{1, 2, 3, 4}, + CommitteeMember: testingutils.TestingCommitteeMember(ks), + ID: []byte{1, 2, 3, 4}, } config := testingutils.TestingConfig(ks) @@ -70,8 +70,8 @@ func createPrepare(test *spectests.CreateMsgSpecTest) (*spectypes.SignedSSVMessa func createProposal(test *spectests.CreateMsgSpecTest) (*spectypes.SignedSSVMessage, error) { ks := testingutils.Testing4SharesSet() state := &specqbft.State{ - Share: testingutils.TestingOperator(ks), - ID: []byte{1, 2, 3, 4}, + CommitteeMember: testingutils.TestingCommitteeMember(ks), + ID: []byte{1, 2, 3, 4}, } config := testingutils.TestingConfig(ks) @@ -81,7 +81,7 @@ func createProposal(test *spectests.CreateMsgSpecTest) (*spectypes.SignedSSVMess func createRoundChange(test *spectests.CreateMsgSpecTest) (*spectypes.SignedSSVMessage, error) { ks := testingutils.Testing4SharesSet() state := &specqbft.State{ - Share: testingutils.TestingOperator(ks), + CommitteeMember: testingutils.TestingCommitteeMember(ks), ID: []byte{1, 2, 3, 4}, PrepareContainer: qbft.NewMsgContainer(), } diff --git a/protocol/v2/qbft/spectest/msg_processing_type.go b/protocol/v2/qbft/spectest/msg_processing_type.go index c15d634c4d..eef4748d15 100644 --- a/protocol/v2/qbft/spectest/msg_processing_type.go +++ b/protocol/v2/qbft/spectest/msg_processing_type.go @@ -31,8 +31,8 @@ func RunMsgProcessing(t *testing.T, test *spectests.MsgProcessingSpecTest) { msgId := specqbft.ControllerIdToMessageID(test.Pre.State.ID) logger := logging.TestLogger(t) pre := instance.NewInstance( - qbfttesting.TestingConfig(logger, spectestingutils.KeySetForOperator(test.Pre.State.Share), msgId.GetRoleType()), - test.Pre.State.Share, + qbfttesting.TestingConfig(logger, spectestingutils.KeySetForCommitteeMember(test.Pre.State.CommitteeMember), msgId.GetRoleType()), + test.Pre.State.CommitteeMember, test.Pre.State.ID, test.Pre.State.Height, ) diff --git a/protocol/v2/qbft/spectest/qbft_mapping_test.go b/protocol/v2/qbft/spectest/qbft_mapping_test.go index 75b03c8388..486b3a208e 100644 --- a/protocol/v2/qbft/spectest/qbft_mapping_test.go +++ b/protocol/v2/qbft/spectest/qbft_mapping_test.go @@ -18,7 +18,6 @@ import ( "github.com/ssvlabs/ssv/protocol/v2/qbft/instance" protocoltesting "github.com/ssvlabs/ssv/protocol/v2/testing" - "github.com/ssvlabs/ssv/protocol/v2/types" ) func TestQBFTMapping(t *testing.T) { @@ -31,8 +30,6 @@ func TestQBFTMapping(t *testing.T) { panic(err.Error()) } - types.SetDefaultDomain(testingutils.TestingSSVDomainType) - for name, test := range untypedTests { name, test := name, test testName := strings.Split(name, "_")[1] @@ -100,12 +97,11 @@ func TestQBFTMapping(t *testing.T) { // a little trick we do to instantiate all the internal instance params - identifier := spectypes.MessageIDFromBytes(typedTest.Pre.State.ID) preByts, _ := typedTest.Pre.Encode() logger := logging.TestLogger(t) pre := instance.NewInstance( - testing2.TestingConfig(logger, testingutils.KeySetForOperator(typedTest.Pre.State.Share), identifier.GetRoleType()), - typedTest.Pre.State.Share, + testing2.TestingConfig(logger, testingutils.KeySetForCommitteeMember(typedTest.Pre.State.CommitteeMember), spectypes.RoleCommittee), + typedTest.Pre.State.CommitteeMember, typedTest.Pre.State.ID, typedTest.Pre.State.Height, ) diff --git a/protocol/v2/qbft/storage/ibft_store.go b/protocol/v2/qbft/storage/ibft_store.go index 0937b39b80..a7365981f2 100644 --- a/protocol/v2/qbft/storage/ibft_store.go +++ b/protocol/v2/qbft/storage/ibft_store.go @@ -2,10 +2,9 @@ package qbftstorage import ( "encoding/json" - spectypes "github.com/ssvlabs/ssv-spec/types" - "github.com/ssvlabs/ssv/exporter/exporter_message" - "github.com/attestantio/go-eth2-client/spec/phase0" + spectypes "github.com/ssvlabs/ssv-spec/types" + "github.com/ssvlabs/ssv/exporter/convert" "go.uber.org/zap" specqbft "github.com/ssvlabs/ssv-spec/qbft" @@ -30,7 +29,7 @@ func (si *StoredInstance) Decode(data []byte) error { type ParticipantsRangeEntry struct { Slot phase0.Slot Signers []spectypes.OperatorID - Identifier exporter_message.MessageID + Identifier convert.MessageID } // InstanceStore manages instance data. @@ -57,13 +56,13 @@ type InstanceStore interface { CleanAllInstances(logger *zap.Logger, msgID []byte) error // SaveParticipants save participants in quorum. - SaveParticipants(identifier exporter_message.MessageID, slot phase0.Slot, operators []spectypes.OperatorID) error + SaveParticipants(identifier convert.MessageID, slot phase0.Slot, operators []spectypes.OperatorID) error // GetParticipantsInRange returns participants in quorum for the given slot range. - GetParticipantsInRange(identifier exporter_message.MessageID, from, to phase0.Slot) ([]ParticipantsRangeEntry, error) + GetParticipantsInRange(identifier convert.MessageID, from, to phase0.Slot) ([]ParticipantsRangeEntry, error) // GetParticipants returns participants in quorum for the given slot. - GetParticipants(identifier exporter_message.MessageID, slot phase0.Slot) ([]spectypes.OperatorID, error) + GetParticipants(identifier convert.MessageID, slot phase0.Slot) ([]spectypes.OperatorID, error) } // QBFTStore is the store used by QBFT components diff --git a/protocol/v2/qbft/testing/storage.go b/protocol/v2/qbft/testing/storage.go index 77fd351cbf..87e14d62ae 100644 --- a/protocol/v2/qbft/testing/storage.go +++ b/protocol/v2/qbft/testing/storage.go @@ -2,9 +2,9 @@ package testing import ( "context" + "github.com/ssvlabs/ssv/exporter/convert" "sync" - spectypes "github.com/ssvlabs/ssv-spec/types" qbftstorage "github.com/ssvlabs/ssv/ibft/storage" "github.com/ssvlabs/ssv/storage/basedb" "github.com/ssvlabs/ssv/storage/kv" @@ -27,13 +27,13 @@ func getDB(logger *zap.Logger) basedb.Database { return db } -var allRoles = []spectypes.RunnerRole{ - spectypes.RoleCommittee, - spectypes.RoleProposer, - spectypes.RoleAggregator, - spectypes.RoleSyncCommitteeContribution, - spectypes.RoleValidatorRegistration, - spectypes.RoleVoluntaryExit, +var allRoles = []convert.RunnerRole{ + convert.RoleCommittee, + convert.RoleProposer, + convert.RoleAggregator, + convert.RoleSyncCommitteeContribution, + convert.RoleValidatorRegistration, + convert.RoleVoluntaryExit, } func TestingStores(logger *zap.Logger) *qbftstorage.QBFTStores { diff --git a/protocol/v2/qbft/testing/utils.go b/protocol/v2/qbft/testing/utils.go index 204147d993..e886b4f5a3 100644 --- a/protocol/v2/qbft/testing/utils.go +++ b/protocol/v2/qbft/testing/utils.go @@ -2,8 +2,8 @@ package testing import ( "bytes" - "github.com/pkg/errors" + "github.com/ssvlabs/ssv/exporter/convert" "github.com/ssvlabs/ssv/protocol/v2/qbft/roundtimer" "go.uber.org/zap" @@ -14,7 +14,7 @@ import ( "github.com/ssvlabs/ssv/protocol/v2/qbft/controller" ) -var TestingConfig = func(logger *zap.Logger, keySet *testingutils.TestKeySet, role types.RunnerRole) *qbft.Config { +var TestingConfig = func(logger *zap.Logger, keySet *testingutils.TestKeySet, role convert.RunnerRole) *qbft.Config { return &qbft.Config{ BeaconSigner: testingutils.NewTestingKeyManager(), OperatorSigner: testingutils.NewTestingOperatorSigner(keySet, 1), @@ -39,6 +39,7 @@ var TestingConfig = func(logger *zap.Logger, keySet *testingutils.TestKeySet, ro Timer: roundtimer.NewTestingTimer(), SignatureVerification: true, SignatureVerifier: testingutils.NewTestingVerifier(), + CutOffRound: testingutils.TestingCutOffRound, } } diff --git a/protocol/v2/ssv/runner/aggregator.go b/protocol/v2/ssv/runner/aggregator.go index c2208fd4c0..68e92fabf3 100644 --- a/protocol/v2/ssv/runner/aggregator.go +++ b/protocol/v2/ssv/runner/aggregator.go @@ -2,7 +2,6 @@ package runner import ( "crypto/sha256" - "encoding/hex" "encoding/json" "time" @@ -214,15 +213,7 @@ func (r *AggregatorRunner) ProcessPostConsensus(logger *zap.Logger, signedMsg *s } start := time.Now() - endSubmission := r.metrics.StartBeaconSubmission() - - logger = logger.With( - zap.Uint64s("signers", getPostConsensusSigners(r.GetState(), root)), - fields.PreConsensusTime(r.metrics.GetPreConsensusTime()), - fields.ConsensusTime(r.metrics.GetConsensusTime()), - fields.PostConsensusTime(r.metrics.GetPostConsensusTime()), - zap.String("block_root", hex.EncodeToString(msg.Message.Aggregate.Data.BeaconBlockRoot[:])), - ) + if err := r.GetBeaconNode().SubmitSignedAggregateSelectionProof(msg); err != nil { r.metrics.RoleSubmissionFailed() logger.Error("❌ could not submit to Beacon chain reconstructed contribution and proof", @@ -231,7 +222,6 @@ func (r *AggregatorRunner) ProcessPostConsensus(logger *zap.Logger, signedMsg *s return errors.Wrap(err, "could not submit to Beacon chain reconstructed signed aggregate") } - endSubmission() r.metrics.EndDutyFullFlow(r.GetState().RunningInstance.State.Round) r.metrics.RoleSubmitted() diff --git a/protocol/v2/ssv/runner/committee.go b/protocol/v2/ssv/runner/committee.go index e705911476..6ca1e6d04b 100644 --- a/protocol/v2/ssv/runner/committee.go +++ b/protocol/v2/ssv/runner/committee.go @@ -7,8 +7,6 @@ import ( "strconv" "time" - "github.com/ssvlabs/ssv/protocol/v2/blockchain/beacon" - "github.com/attestantio/go-eth2-client/spec/altair" "github.com/attestantio/go-eth2-client/spec/phase0" ssz "github.com/ferranbt/fastssz" @@ -20,6 +18,8 @@ import ( "go.uber.org/zap" "github.com/ssvlabs/ssv/logging/fields" + "github.com/ssvlabs/ssv/networkconfig" + "github.com/ssvlabs/ssv/protocol/v2/blockchain/beacon" "github.com/ssvlabs/ssv/protocol/v2/qbft/controller" ) @@ -34,6 +34,7 @@ import ( type CommitteeRunner struct { BaseRunner *BaseRunner + domain spectypes.DomainType beacon beacon.BeaconNode network specqbft.Network signer types.BeaconSigner @@ -42,12 +43,15 @@ type CommitteeRunner struct { stoppedValidators map[spectypes.ValidatorPK]struct{} + submittedDuties map[types.BeaconRole]map[phase0.ValidatorIndex]struct{} + started time.Time consensusDone time.Time postStarted time.Time } -func NewCommitteeRunner(beaconNetwork types.BeaconNetwork, +func NewCommitteeRunner( + networkConfig networkconfig.NetworkConfig, share map[phase0.ValidatorIndex]*types.Share, qbftController *controller.Controller, beacon beacon.BeaconNode, @@ -59,21 +63,29 @@ func NewCommitteeRunner(beaconNetwork types.BeaconNetwork, return &CommitteeRunner{ BaseRunner: &BaseRunner{ RunnerRoleType: types.RoleCommittee, - BeaconNetwork: beaconNetwork, + BeaconNetwork: networkConfig.Beacon.GetBeaconNetwork(), Share: share, QBFTController: qbftController, }, + domain: networkConfig.Domain, beacon: beacon, network: network, signer: signer, operatorSigner: operatorSigner, valCheck: valCheck, stoppedValidators: make(map[spectypes.ValidatorPK]struct{}), + submittedDuties: make(map[spectypes.BeaconRole]map[phase0.ValidatorIndex]struct{}), } } func (cr *CommitteeRunner) StartNewDuty(logger *zap.Logger, duty spectypes.Duty, quorum uint64) error { - return cr.BaseRunner.baseStartNewDuty(logger, cr, duty, quorum) + err := cr.BaseRunner.baseStartNewDuty(logger, cr, duty, quorum) + if err != nil { + return err + } + cr.submittedDuties[types.BNRoleAttester] = make(map[phase0.ValidatorIndex]struct{}) + cr.submittedDuties[types.BNRoleSyncCommittee] = make(map[phase0.ValidatorIndex]struct{}) + return nil } func (cr *CommitteeRunner) Encode() ([]byte, error) { @@ -98,6 +110,68 @@ func (cr *CommitteeRunner) GetRoot() ([32]byte, error) { return ret, nil } +func (cr *CommitteeRunner) MarshalJSON() ([]byte, error) { + type CommitteeAlias struct { + BaseRunner *BaseRunner + beacon beacon.BeaconNode + network specqbft.Network + signer types.BeaconSigner + operatorSigner types.OperatorSigner + valCheck specqbft.ProposedValueCheckF + } + + // Create object and marshal + alias := &CommitteeAlias{ + BaseRunner: cr.BaseRunner, + beacon: cr.beacon, + network: cr.network, + signer: cr.signer, + operatorSigner: cr.operatorSigner, + valCheck: cr.valCheck, + } + + byts, err := json.Marshal(alias) + + return byts, err +} + +func (cr *CommitteeRunner) UnmarshalJSON(data []byte) error { + type CommitteeAlias struct { + BaseRunner *BaseRunner + beacon beacon.BeaconNode + network specqbft.Network + signer types.BeaconSigner + operatorSigner types.OperatorSigner + valCheck specqbft.ProposedValueCheckF + // + //stoppedValidators map[spectypes.ValidatorPK]struct{} + // + //started time.Time + //consensusDone time.Time + //postStarted time.Time + } + + // Unmarshal the JSON data into the auxiliary struct + aux := &CommitteeAlias{} + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + // Assign fields + cr.BaseRunner = aux.BaseRunner + cr.beacon = aux.beacon + cr.network = aux.network + cr.signer = aux.signer + cr.operatorSigner = aux.operatorSigner + cr.valCheck = aux.valCheck + //cr.stoppedValidators = aux.stoppedValidators + //cr.started = aux.started + //cr.consensusDone = aux.consensusDone + //cr.postStarted = aux.postStarted + + return nil +} + func (cr *CommitteeRunner) GetBaseRunner() *BaseRunner { return cr.BaseRunner } @@ -154,7 +228,11 @@ func (cr *CommitteeRunner) ProcessConsensus(logger *zap.Logger, msg *types.Signe switch duty.Type { case types.BNRoleAttester: attestationData := constructAttestationData(beaconVote, duty) - + err = cr.GetSigner().IsAttestationSlashable(cr.GetBaseRunner().Share[duty.ValidatorIndex].SharePubKey, + attestationData) + if err != nil { + return errors.Wrap(err, "attempting to sign slashable attestation data") + } partialMsg, err := cr.BaseRunner.signBeaconObject(cr, duty, attestationData, duty.DutySlot(), types.DomainAttester) if err != nil { @@ -189,9 +267,11 @@ func (cr *CommitteeRunner) ProcessConsensus(logger *zap.Logger, msg *types.Signe ssvMsg := &types.SSVMessage{ MsgType: types.SSVPartialSignatureMsgType, - //TODO: The Domain will be updated after new Domain PR... Will be created after this PR is merged - MsgID: types.NewMsgID(types.GenesisMainnet, cr.GetBaseRunner().QBFTController.CommitteeMember.CommitteeID[:], - cr.BaseRunner.RunnerRoleType), + MsgID: types.NewMsgID( + cr.domain, + cr.GetBaseRunner().QBFTController.CommitteeMember.CommitteeID[:], + cr.BaseRunner.RunnerRoleType, + ), } ssvMsg.Data, err = postConsensusMsg.Encode() if err != nil { @@ -229,6 +309,13 @@ func (cr *CommitteeRunner) ProcessPostConsensus(logger *zap.Logger, signedMsg *t for i, msg := range signedMsg.Messages { indices[i] = int(msg.ValidatorIndex) } + + // Get unique roots to avoid repetition + rootSet := make(map[[32]byte]struct{}) + for _, root := range roots { + rootSet[root] = struct{}{} + } + logger.Debug("got post consensus", zap.Bool("quorum", quorum), fields.Slot(cr.BaseRunner.State.StartingDuty.DutySlot()), @@ -251,14 +338,32 @@ func (cr *CommitteeRunner) ProcessPostConsensus(logger *zap.Logger, signedMsg *t zap.String("total_consensus_time", strconv.FormatFloat(totalDuration.Seconds(), 'f', 5, 64)), } + // Get validator-root maps for attestations and sync committees, and the root-beacon object map attestationMap, committeeMap, beaconObjects, err := cr.expectedPostConsensusRootsAndBeaconObjects() if err != nil { return errors.Wrap(err, "could not get expected post consensus roots and beacon objects") } - for _, root := range roots { + var anyErr error + attestationsToSubmit := make(map[phase0.ValidatorIndex]*phase0.Attestation) + syncCommitteeMessagesToSubmit := make(map[phase0.ValidatorIndex]*altair.SyncCommitteeMessage) + + // For each root that got at least one quorum, find the duties associated to it and try to submit + for root := range rootSet { + + // Get validators related to the given root role, validators, found := findValidators(root, attestationMap, committeeMap) - // TODO: (Alan) revert? + + if !found { + // Check if duty has terminated (runner has submitted for all duties) + if cr.HasSubmittedAllBeaconDuties(attestationMap, committeeMap) { + cr.BaseRunner.State.Finished = true + } + // All roots have quorum, so if we can't find validators for a root, it means we have a bug + // We assume it is safe to stop due to honest majority assumption + return errors.New("could not find validators for root") + } + logger.Debug("found validators for root", fields.Slot(cr.BaseRunner.State.StartingDuty.DutySlot()), zap.String("role", role.String()), @@ -266,15 +371,21 @@ func (cr *CommitteeRunner) ProcessPostConsensus(logger *zap.Logger, signedMsg *t zap.Any("validators", validators), ) - if !found { - // TODO error? - continue - } for _, validator := range validators { - validator := validator + + // Skip if no quorum - We know that a root has quorum but not necessarily for the validator + if !cr.BaseRunner.State.PostConsensusContainer.HasQuorum(validator, root) { + continue + } + // Skip if already submitted + if cr.HasSubmitted(role, validator) { + continue + } + + //validator := validator + // Reconstruct signature share := cr.BaseRunner.Share[validator] pubKey := share.ValidatorPubKey - vlogger := logger.With(zap.Int("validator_index", int(validator)), zap.String("pubkey", hex.EncodeToString(pubKey[:]))) vlogger = vlogger.With(durationFields...) @@ -291,80 +402,118 @@ func (cr *CommitteeRunner) ProcessPostConsensus(logger *zap.Logger, signedMsg *t fields.Slot(cr.BaseRunner.State.StartingDuty.DutySlot()), zap.Error(err), ) - // TODO: @GalRogozinski - // return errors.Wrap(err, "got post-consensus quorum but it has invalid signatures") + + anyErr = errors.Wrap(err, "got post-consensus quorum but it has invalid signatures") continue } specSig := phase0.BLSSignature{} copy(specSig[:], sig) - if role == types.BNRoleAttester { - att := beaconObjects[BeaconObjectID{Root: root, ValidatorIndex: validator}].(*phase0.Attestation) - att.Signature = specSig - // TODO: revert log - adr, err := att.Data.HashTreeRoot() - if err != nil { - return errors.Wrap(err, "failed to hash attestation data") - } - vlogger.Debug("submitting attestation", - zap.Any("attestation", att), - zap.String("attestation_data_root", hex.EncodeToString(adr[:])), - zap.String("signing_root", hex.EncodeToString(root[:])), - zap.String("signature", hex.EncodeToString(att.Signature[:])), - ) + // Get the beacon object related to root + if _, exists := beaconObjects[validator]; !exists { + anyErr = errors.Wrap(err, "could not find beacon object for validator") + continue + } + if _, exists := beaconObjects[validator][root]; !exists { + anyErr = errors.Wrap(err, "could not find beacon object for validator") + continue + } + + sszObject := beaconObjects[validator][root] - // broadcast - // TODO: (Alan) bulk submit instead of goroutine? (at least properly manage goroutines with wg) - go func() { - start := time.Now() - if err := cr.beacon.SubmitAttestations([]*phase0.Attestation{att}); err != nil { - vlogger.Error("could not submit to Beacon chain reconstructed attestation", - fields.Slot(att.Data.Slot), - zap.Error(err), - ) - - // TODO: @GalRogozinski - // return errors.Wrap(err, "could not submit to Beacon chain reconstructed attestation") - // continue - return - } - vlogger.Info("✅ successfully submitted attestation", - zap.String("block_root", hex.EncodeToString(att.Data.BeaconBlockRoot[:])), - fields.SubmissionTime(time.Since(start)), - fields.Height(cr.BaseRunner.QBFTController.Height), - fields.Round(cr.BaseRunner.State.RunningInstance.State.Round), - ) - }() - // TODO: like AttesterRunner - } else if role == types.BNRoleSyncCommittee { - syncMsg := beaconObjects[BeaconObjectID{Root: root, ValidatorIndex: validator}].(*altair.SyncCommitteeMessage) + // Store objects for multiple submission + if role == types.BNRoleSyncCommittee { + syncMsg := sszObject.(*altair.SyncCommitteeMessage) + // Insert signature syncMsg.Signature = specSig - // Broadcast - // TODO: (Alan) bulk submit instead of goroutine? - go func() { - start := time.Now() - if err := cr.beacon.SubmitSyncMessages([]*altair.SyncCommitteeMessage{syncMsg}); err != nil { - vlogger.Error("could not submit to Beacon chain reconstructed signed sync committee", - fields.Slot(syncMsg.Slot), - zap.Error(err), - ) - // TODO: @GalRogozinski - // return errors.Wrap(err, "could not submit to Beacon chain reconstructed signed sync committee") - // continue - return - } - vlogger.Debug("📢 submitted sync committee message", - fields.SubmissionTime(time.Since(start)), - fields.Slot(syncMsg.Slot), - ) - }() + + syncCommitteeMessagesToSubmit[validator] = syncMsg + + } else if role == types.BNRoleAttester { + att := sszObject.(*phase0.Attestation) + // Insert signature + att.Signature = specSig + + attestationsToSubmit[validator] = att } } } - cr.BaseRunner.State.Finished = true + // Submit multiple attestations + attestations := make([]*phase0.Attestation, 0) + for _, att := range attestationsToSubmit { + attestations = append(attestations, att) + } + if err := cr.beacon.SubmitAttestations(attestations); err != nil { + return errors.Wrap(err, "could not submit to Beacon chain reconstructed attestation") + } + // Record successful submissions + for validator := range attestationsToSubmit { + cr.RecordSubmission(types.BNRoleAttester, validator) + } + + // Submit multiple sync committee + syncCommitteeMessages := make([]*altair.SyncCommitteeMessage, 0) + for _, syncMsg := range syncCommitteeMessagesToSubmit { + syncCommitteeMessages = append(syncCommitteeMessages, syncMsg) + } + if err := cr.beacon.SubmitSyncMessages(syncCommitteeMessages); err != nil { + return errors.Wrap(err, "could not submit to Beacon chain reconstructed signed sync committee") + } + // Record successful submissions + for validator := range syncCommitteeMessagesToSubmit { + cr.RecordSubmission(types.BNRoleSyncCommittee, validator) + } + + if anyErr != nil { + return anyErr + } + + // Check if duty has terminated (runner has submitted for all duties) + if cr.HasSubmittedAllBeaconDuties(attestationMap, committeeMap) { + cr.BaseRunner.State.Finished = true + } return nil } +// HasSubmittedAllBeaconDuties -- Returns true if the runner has done submissions for all validators for the given slot +func (cr *CommitteeRunner) HasSubmittedAllBeaconDuties(attestationMap map[phase0.ValidatorIndex][32]byte, syncCommitteeMap map[phase0.ValidatorIndex][32]byte) bool { + // Expected total + expectedTotalSubmissions := len(attestationMap) + len(syncCommitteeMap) + + totalSubmissions := 0 + + // Add submitted attestation duties + for valIdx := range attestationMap { + if cr.HasSubmitted(types.BNRoleAttester, valIdx) { + totalSubmissions++ + } + } + // Add submitted sync committee duties + for valIdx := range syncCommitteeMap { + if cr.HasSubmitted(types.BNRoleSyncCommittee, valIdx) { + totalSubmissions++ + } + } + return totalSubmissions >= expectedTotalSubmissions +} + +// RecordSubmission -- Records a submission for the (role, validator index, slot) tuple +func (cr *CommitteeRunner) RecordSubmission(role types.BeaconRole, valIdx phase0.ValidatorIndex) { + if _, ok := cr.submittedDuties[role]; !ok { + cr.submittedDuties[role] = make(map[phase0.ValidatorIndex]struct{}) + } + cr.submittedDuties[role][valIdx] = struct{}{} +} + +// HasSubmitted -- Returns true if there is a record of submission for the (role, validator index, slot) tuple +func (cr *CommitteeRunner) HasSubmitted(role types.BeaconRole, valIdx phase0.ValidatorIndex) bool { + if _, ok := cr.submittedDuties[role]; !ok { + return false + } + _, ok := cr.submittedDuties[role][valIdx] + return ok +} + func findValidators( expectedRoot [32]byte, attestationMap map[phase0.ValidatorIndex][32]byte, @@ -383,9 +532,12 @@ func findValidators( // look for the expectedRoot in committeeMap for validator, root := range committeeMap { if root == expectedRoot { - return types.BNRoleSyncCommittee, []phase0.ValidatorIndex{validator}, true + validators = append(validators, validator) } } + if len(validators) > 0 { + return types.BNRoleSyncCommittee, validators, true + } return types.BNRoleUnknown, nil, false } @@ -400,19 +552,14 @@ func (cr CommitteeRunner) expectedPostConsensusRootsAndDomain() ([]ssz.HashRoot, return []ssz.HashRoot{}, types.DomainAttester, nil } -type BeaconObjectID struct { - Root [32]byte - ValidatorIndex phase0.ValidatorIndex -} - func (cr *CommitteeRunner) expectedPostConsensusRootsAndBeaconObjects() ( attestationMap map[phase0.ValidatorIndex][32]byte, syncCommitteeMap map[phase0.ValidatorIndex][32]byte, - beaconObjects map[BeaconObjectID]ssz.HashRoot, error error, + beaconObjects map[phase0.ValidatorIndex]map[[32]byte]ssz.HashRoot, error error, ) { attestationMap = make(map[phase0.ValidatorIndex][32]byte) syncCommitteeMap = make(map[phase0.ValidatorIndex][32]byte) - beaconObjects = make(map[BeaconObjectID]ssz.HashRoot) + beaconObjects = make(map[phase0.ValidatorIndex]map[[32]byte]ssz.HashRoot) duty := cr.BaseRunner.State.StartingDuty // TODO DecidedValue should be interface?? beaconVoteData := cr.BaseRunner.State.DecidedValue @@ -455,17 +602,15 @@ func (cr *CommitteeRunner) expectedPostConsensusRootsAndBeaconObjects() ( // Add to map attestationMap[beaconDuty.ValidatorIndex] = root - beaconObjects[BeaconObjectID{Root: root, ValidatorIndex: beaconDuty.ValidatorIndex}] = unSignedAtt + if _, ok := beaconObjects[beaconDuty.ValidatorIndex]; !ok { + beaconObjects[beaconDuty.ValidatorIndex] = make(map[[32]byte]ssz.HashRoot) + } + beaconObjects[beaconDuty.ValidatorIndex][root] = unSignedAtt case types.BNRoleSyncCommittee: - // Block root - blockRoot := types.SSZBytes(beaconVote.BlockRoot[:]) - blockRootSlice := [32]byte{} - copy(blockRootSlice[:], blockRoot) - // Sync committee beacon object syncMsg := &altair.SyncCommitteeMessage{ Slot: slot, - BeaconBlockRoot: phase0.Root(blockRootSlice), + BeaconBlockRoot: beaconVote.BlockRoot, ValidatorIndex: beaconDuty.ValidatorIndex, } @@ -474,6 +619,8 @@ func (cr *CommitteeRunner) expectedPostConsensusRootsAndBeaconObjects() ( if err != nil { continue } + // Eth root + blockRoot := types.SSZBytes(beaconVote.BlockRoot[:]) root, err := types.ComputeETHSigningRoot(blockRoot, domain) if err != nil { continue @@ -481,15 +628,19 @@ func (cr *CommitteeRunner) expectedPostConsensusRootsAndBeaconObjects() ( // Set root and beacon object syncCommitteeMap[beaconDuty.ValidatorIndex] = root - beaconObjects[BeaconObjectID{Root: root, ValidatorIndex: beaconDuty.ValidatorIndex}] = syncMsg + if _, ok := beaconObjects[beaconDuty.ValidatorIndex]; !ok { + beaconObjects[beaconDuty.ValidatorIndex] = make(map[[32]byte]ssz.HashRoot) + } + beaconObjects[beaconDuty.ValidatorIndex][root] = syncMsg } } return attestationMap, syncCommitteeMap, beaconObjects, nil } func (cr *CommitteeRunner) executeDuty(logger *zap.Logger, duty types.Duty) error { + slot := duty.DutySlot() //TODO committeeIndex is 0, is this correct? - attData, _, err := cr.GetBeaconNode().GetAttestationData(duty.DutySlot(), 0) + attData, _, err := cr.GetBeaconNode().GetAttestationData(slot, 0) if err != nil { return errors.Wrap(err, "failed to get attestation data") } diff --git a/protocol/v2/ssv/runner/proposer.go b/protocol/v2/ssv/runner/proposer.go index 10c6ff5e98..5396d0de3c 100644 --- a/protocol/v2/ssv/runner/proposer.go +++ b/protocol/v2/ssv/runner/proposer.go @@ -103,12 +103,10 @@ func (r *ProposerRunner) ProcessPreConsensus(logger *zap.Logger, signedMsg *spec r.BaseRunner.FallBackAndVerifyEachSignature(r.GetState().PreConsensusContainer, root, r.GetShare().Committee, r.GetShare().ValidatorIndex) return errors.Wrap(err, "got pre-consensus quorum but it has invalid signatures") } - r.metrics.EndPreConsensus() logger.Debug("🧩 reconstructed partial RANDAO signatures", zap.Uint64s("signers", getPreConsensusSigners(r.GetState(), root)), fields.PreConsensusTime(r.metrics.GetPreConsensusTime())) - r.metrics.StartBeaconData() start := time.Now() duty = r.GetState().StartingDuty.(*spectypes.BeaconDuty) obj, ver, err := r.GetBeaconNode().GetBeaconBlock(duty.Slot, r.GetShare().Graffiti, fullSig) diff --git a/protocol/v2/ssv/runner/runner.go b/protocol/v2/ssv/runner/runner.go index d4d3de4d64..046dc6caaa 100644 --- a/protocol/v2/ssv/runner/runner.go +++ b/protocol/v2/ssv/runner/runner.go @@ -1,6 +1,7 @@ package runner import ( + "encoding/json" "sync" "github.com/ssvlabs/ssv/protocol/v2/blockchain/beacon" @@ -66,6 +67,39 @@ type BaseRunner struct { highestDecidedSlot phase0.Slot } +func (b *BaseRunner) Encode() ([]byte, error) { + return json.Marshal(b) +} + +func (b *BaseRunner) Decode(data []byte) error { + return json.Unmarshal(data, &b) +} + +func (b *BaseRunner) MarshalJSON() ([]byte, error) { + type BaseRunnerAlias struct { + State *State + Share map[phase0.ValidatorIndex]*spectypes.Share + QBFTController *controller.Controller + BeaconNetwork spectypes.BeaconNetwork + RunnerRoleType spectypes.RunnerRole + highestDecidedSlot phase0.Slot + } + + // Create object and marshal + alias := &BaseRunnerAlias{ + State: b.State, + Share: b.Share, + QBFTController: b.QBFTController, + BeaconNetwork: b.BeaconNetwork, + RunnerRoleType: b.RunnerRoleType, + highestDecidedSlot: b.highestDecidedSlot, + } + + byts, err := json.Marshal(alias) + + return byts, err +} + // SetHighestDecidedSlot set highestDecidedSlot for base runner func (b *BaseRunner) SetHighestDecidedSlot(slot phase0.Slot) { b.highestDecidedSlot = slot diff --git a/protocol/v2/ssv/runner/runner_state.go b/protocol/v2/ssv/runner/runner_state.go index 16e40bb24a..eb5579a6d4 100644 --- a/protocol/v2/ssv/runner/runner_state.go +++ b/protocol/v2/ssv/runner/runner_state.go @@ -3,8 +3,8 @@ package runner import ( "crypto/sha256" "encoding/json" - "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/pkg/errors" specssv "github.com/ssvlabs/ssv-spec/ssv" spectypes "github.com/ssvlabs/ssv-spec/types" @@ -20,7 +20,7 @@ type State struct { RunningInstance *instance.Instance DecidedValue []byte //spectypes.Encoder // CurrentDuty is the duty the node pulled locally from the beacon node, might be different from decided duty - StartingDuty spectypes.Duty + StartingDuty spectypes.Duty `json:"StartingDuty,omitempty"` // flags Finished bool // Finished marked true when there is a full successful cycle (pre, consensus and post) with quorum } @@ -64,3 +64,73 @@ func (pcs *State) Encode() ([]byte, error) { func (pcs *State) Decode(data []byte) error { return json.Unmarshal(data, &pcs) } + +func (pcs *State) MarshalJSON() ([]byte, error) { + // Create alias without duty + type StateAlias struct { + PreConsensusContainer *specssv.PartialSigContainer + PostConsensusContainer *specssv.PartialSigContainer + RunningInstance *instance.Instance + DecidedValue []byte + Finished bool + BeaconDuty *spectypes.BeaconDuty `json:"BeaconDuty,omitempty"` + CommitteeDuty *spectypes.CommitteeDuty `json:"CommitteeDuty,omitempty"` + } + + alias := &StateAlias{ + PreConsensusContainer: pcs.PreConsensusContainer, + PostConsensusContainer: pcs.PostConsensusContainer, + RunningInstance: pcs.RunningInstance, + DecidedValue: pcs.DecidedValue, + Finished: pcs.Finished, + } + + if pcs.StartingDuty != nil { + if BeaconDuty, ok := pcs.StartingDuty.(*spectypes.BeaconDuty); ok { + alias.BeaconDuty = BeaconDuty + } else if committeeDuty, ok := pcs.StartingDuty.(*spectypes.CommitteeDuty); ok { + alias.CommitteeDuty = committeeDuty + } else { + return nil, errors.New("can't marshal because BaseRunner.State.StartingDuty isn't BeaconDuty or CommitteeDuty") + } + } + byts, err := json.Marshal(alias) + + return byts, err +} + +func (pcs *State) UnmarshalJSON(data []byte) error { + + // Create alias without duty + type StateAlias struct { + PreConsensusContainer *specssv.PartialSigContainer + PostConsensusContainer *specssv.PartialSigContainer + RunningInstance *instance.Instance + DecidedValue []byte + Finished bool + BeaconDuty *spectypes.BeaconDuty `json:"BeaconDuty,omitempty"` + CommitteeDuty *spectypes.CommitteeDuty `json:"CommitteeDuty,omitempty"` + } + + aux := &StateAlias{} + + // Unmarshal the JSON data into the auxiliary struct + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + pcs.PreConsensusContainer = aux.PreConsensusContainer + pcs.PostConsensusContainer = aux.PostConsensusContainer + pcs.RunningInstance = aux.RunningInstance + pcs.DecidedValue = aux.DecidedValue + pcs.Finished = aux.Finished + + // Determine which type of duty was marshaled + if aux.BeaconDuty != nil { + pcs.StartingDuty = aux.BeaconDuty + } else if aux.CommitteeDuty != nil { + pcs.StartingDuty = aux.CommitteeDuty + } + + return nil +} diff --git a/protocol/v2/ssv/runner/sync_committee_aggregator.go b/protocol/v2/ssv/runner/sync_committee_aggregator.go index c64dd7a959..e3ee0176a4 100644 --- a/protocol/v2/ssv/runner/sync_committee_aggregator.go +++ b/protocol/v2/ssv/runner/sync_committee_aggregator.go @@ -3,7 +3,6 @@ package runner import ( "bytes" "crypto/sha256" - "encoding/hex" "encoding/json" "time" @@ -269,13 +268,6 @@ func (r *SyncCommitteeAggregatorRunner) ProcessPostConsensus(logger *zap.Logger, } submissionEnd := r.metrics.StartBeaconSubmission() - logger = logger.With( - zap.Uint64s("signers", getPostConsensusSigners(r.GetState(), root)), - fields.PreConsensusTime(r.metrics.GetPreConsensusTime()), - fields.ConsensusTime(r.metrics.GetConsensusTime()), - fields.PostConsensusTime(r.metrics.GetPostConsensusTime()), - zap.String("block_root", hex.EncodeToString(contribAndProof.Contribution.BeaconBlockRoot[:])), - ) if err := r.GetBeaconNode().SubmitSignedContributionAndProof(signedContribAndProof); err != nil { r.metrics.RoleSubmissionFailed() logger.Error("❌ could not submit to Beacon chain reconstructed contribution and proof", diff --git a/protocol/v2/ssv/runner/voluntary_exit.go b/protocol/v2/ssv/runner/voluntary_exit.go index a401fde61d..d9f48fc30c 100644 --- a/protocol/v2/ssv/runner/voluntary_exit.go +++ b/protocol/v2/ssv/runner/voluntary_exit.go @@ -4,7 +4,6 @@ import ( "crypto/sha256" "encoding/hex" "encoding/json" - "time" "github.com/ssvlabs/ssv/protocol/v2/blockchain/beacon" @@ -90,19 +89,13 @@ func (r *VoluntaryExitRunner) ProcessPreConsensus(logger *zap.Logger, signedMsg } specSig := phase0.BLSSignature{} copy(specSig[:], fullSig) - r.metrics.EndPreConsensus() // create SignedVoluntaryExit using VoluntaryExit created on r.executeDuty() and reconstructed signature signedVoluntaryExit := &phase0.SignedVoluntaryExit{ Message: r.voluntaryExit, Signature: specSig, } - submissionTime := time.Now() if err := r.beacon.SubmitVoluntaryExit(signedVoluntaryExit); err != nil { - logger.Error("failed to submit voluntary exit", - fields.SubmissionTime(time.Since(submissionTime)), - fields.QuorumTime(r.metrics.GetPreConsensusTime()), - zap.Error(err)) return errors.Wrap(err, "could not submit voluntary exit") } @@ -110,8 +103,6 @@ func (r *VoluntaryExitRunner) ProcessPreConsensus(logger *zap.Logger, signedMsg fields.Epoch(r.voluntaryExit.Epoch), zap.Uint64("validator_index", uint64(r.voluntaryExit.ValidatorIndex)), zap.String("signature", hex.EncodeToString(specSig[:])), - fields.SubmissionTime(time.Since(submissionTime)), - fields.QuorumTime(r.metrics.GetPreConsensusTime()), ) r.GetState().Finished = true diff --git a/protocol/v2/ssv/spectest/committee_msg_processing_type.go b/protocol/v2/ssv/spectest/committee_msg_processing_type.go new file mode 100644 index 0000000000..9c342ea897 --- /dev/null +++ b/protocol/v2/ssv/spectest/committee_msg_processing_type.go @@ -0,0 +1,194 @@ +package spectest + +import ( + "encoding/hex" + "path/filepath" + "reflect" + "strings" + "testing" + + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/pkg/errors" + spectypes "github.com/ssvlabs/ssv-spec/types" + spectestingutils "github.com/ssvlabs/ssv-spec/types/testingutils" + typescomparable "github.com/ssvlabs/ssv-spec/types/testingutils/comparable" + "github.com/ssvlabs/ssv/integration/qbft/tests" + "github.com/ssvlabs/ssv/logging" + "github.com/ssvlabs/ssv/protocol/v2/ssv/queue" + "github.com/ssvlabs/ssv/protocol/v2/ssv/validator" + protocoltesting "github.com/ssvlabs/ssv/protocol/v2/testing" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "github.com/ssvlabs/ssv-spec/ssv" + "github.com/ssvlabs/ssv-spec/types" +) + +type CommitteeSpecTest struct { + Name string + Committee *validator.Committee + Input []interface{} // Can be a types.Duty or a *types.SignedSSVMessage + PostDutyCommitteeRoot string + PostDutyCommittee spectypes.Root `json:"-"` // Field is ignored by encoding/json + OutputMessages []*types.PartialSignatureMessages + BeaconBroadcastedRoots []string + ExpectedError string +} + +func (test *CommitteeSpecTest) TestName() string { + return test.Name +} + +// RunAsPartOfMultiTest runs the test as part of a MultiCommitteeSpecTest +func (test *CommitteeSpecTest) RunAsPartOfMultiTest(t *testing.T) { + logger := logging.TestLogger(t) + lastErr := test.runPreTesting(logger) + + if len(test.ExpectedError) != 0 { + require.EqualError(t, lastErr, test.ExpectedError) + } else { + require.NoError(t, lastErr) + } + + broadcastedMsgs := make([]*types.SignedSSVMessage, 0) + broadcastedRoots := make([]phase0.Root, 0) + for _, runner := range test.Committee.Runners { + network := runner.GetNetwork().(*spectestingutils.TestingNetwork) + beaconNetwork := runner.GetBeaconNode().(*tests.TestingBeaconNodeWrapped) + broadcastedMsgs = append(broadcastedMsgs, network.BroadcastedMsgs...) + broadcastedRoots = append(broadcastedRoots, beaconNetwork.GetBroadcastedRoots()...) + } + + // test output message (in asynchronous order) + spectestingutils.ComparePartialSignatureOutputMessagesInAsynchronousOrder(t, test.OutputMessages, broadcastedMsgs, test.Committee.Operator.Committee) + + // test beacon broadcasted msgs + spectestingutils.CompareBroadcastedBeaconMsgs(t, test.BeaconBroadcastedRoots, broadcastedRoots) + + // post root + postRoot, err := test.Committee.GetRoot() + require.NoError(t, err) + + if test.PostDutyCommitteeRoot != hex.EncodeToString(postRoot[:]) { + diff := dumpState(t, test.Name, test.Committee, test.PostDutyCommittee) + t.Errorf("post runner state not equal %s", diff) + } +} + +// Run as an individual test +func (test *CommitteeSpecTest) Run(t *testing.T) { + test.overrideStateComparison(t) + test.RunAsPartOfMultiTest(t) +} + +func (test *CommitteeSpecTest) runPreTesting(logger *zap.Logger) error { + + var lastErr error + + for _, input := range test.Input { + + var err error + switch input := input.(type) { + case spectypes.Duty: + err = test.Committee.StartDuty(logger, input.(*spectypes.CommitteeDuty)) + if err != nil { + lastErr = err + } + case *spectypes.SignedSSVMessage: + msg, err := queue.DecodeSignedSSVMessage(input) + if err != nil { + return errors.Wrap(err, "failed to decode SignedSSVMessage") + } + err = test.Committee.ProcessMessage(logger, msg) + if err != nil { + lastErr = err + } + default: + panic("input is neither duty or SignedSSVMessage") + } + } + + return lastErr +} + +func (test *CommitteeSpecTest) overrideStateComparison(t *testing.T) { + strType := reflect.TypeOf(test).String() + strType = strings.Replace(strType, "spectest.", "committee.", 1) + overrideStateComparisonCommitteeSpecTest(t, test, test.Name, strType) +} + +func (test *CommitteeSpecTest) GetPostState(logger *zap.Logger) (interface{}, error) { + lastErr := test.runPreTesting(logger) + if lastErr != nil && len(test.ExpectedError) == 0 { + return nil, lastErr + } + + return test.Committee, nil +} + +type MultiCommitteeSpecTest struct { + Name string + Tests []*CommitteeSpecTest +} + +func (tests *MultiCommitteeSpecTest) TestName() string { + return tests.Name +} + +func (tests *MultiCommitteeSpecTest) Run(t *testing.T) { + tests.overrideStateComparison(t) + + for _, test := range tests.Tests { + t.Run(test.TestName(), func(t *testing.T) { + test.RunAsPartOfMultiTest(t) + }) + } +} + +// overrideStateComparison overrides the post state comparison for all tests in the multi test +func (tests *MultiCommitteeSpecTest) overrideStateComparison(t *testing.T) { + testsName := strings.ReplaceAll(tests.TestName(), " ", "_") + for _, test := range tests.Tests { + path := filepath.Join(testsName, test.TestName()) + strType := reflect.TypeOf(tests).String() + strType = strings.Replace(strType, "spectest.", "committee.", 1) + overrideStateComparisonCommitteeSpecTest(t, test, path, strType) + } +} + +func (tests *MultiCommitteeSpecTest) GetPostState(logger *zap.Logger) (interface{}, error) { + ret := make(map[string]types.Root, len(tests.Tests)) + for _, test := range tests.Tests { + err := test.runPreTesting(logger) + if err != nil && test.ExpectedError != err.Error() { + return nil, err + } + ret[test.Name] = test.Committee + } + return ret, nil +} + +func overrideStateComparisonCommitteeSpecTest(t *testing.T, test *CommitteeSpecTest, name string, testType string) { + specCommittee := &ssv.Committee{} + specDir, err := protocoltesting.GetSpecDir("", filepath.Join("ssv", "spectest")) + require.NoError(t, err) + specCommittee, err = typescomparable.UnmarshalStateComparison(specDir, name, testType, specCommittee) + + require.NoError(t, err) + committee := &validator.Committee{} + committee, err = typescomparable.UnmarshalStateComparison(specDir, name, testType, committee) + require.NoError(t, err) + + committee.Shares = specCommittee.Share + committee.Operator = &specCommittee.CommitteeMember + //for _, r := range committee.Runners { + // r.BaseRunner.BeaconNetwork = spectypes.BeaconTestNetwork + //} + + root, err := committee.GetRoot() + require.NoError(t, err) + + test.PostDutyCommitteeRoot = hex.EncodeToString(root[:]) + + test.PostDutyCommittee = committee +} diff --git a/protocol/v2/ssv/spectest/debug_states.go b/protocol/v2/ssv/spectest/debug_states.go new file mode 100644 index 0000000000..cc639b4d76 --- /dev/null +++ b/protocol/v2/ssv/spectest/debug_states.go @@ -0,0 +1,54 @@ +package spectest + +import ( + "encoding/json" + "fmt" + "os" + "testing" + + "github.com/google/go-cmp/cmp" + spectypes "github.com/ssvlabs/ssv-spec/types" + "github.com/stretchr/testify/require" +) + +var ( + DebugDumpState = false + dumpDir = "./debug" +) + +func dumpState(t *testing.T, + name string, + expect spectypes.Root, + actual spectypes.Root, +) string { + a, err := json.Marshal(expect) + require.NoError(t, err) + b, err := json.Marshal(actual) + require.NoError(t, err) + + // comparing jsons + diff := cmp.Diff(a, b, + cmp.FilterValues(func(x, y []byte) bool { + return json.Valid(x) && json.Valid(y) + }, cmp.Transformer("ParseJSON", func(in []byte) (out interface{}) { + if err := json.Unmarshal(in, &out); err != nil { + panic(err) // should never occur given previous filter to ensure valid JSON + } + return out + }), + ), + ) + + if len(diff) > 0 && DebugDumpState { + logJSON(t, fmt.Sprintf("test_%s_EXPECT", name), expect) + logJSON(t, fmt.Sprintf("test_%s_ACTUAL", name), actual) + } + return diff +} + +func logJSON(t *testing.T, name string, value interface{}) { + bytes, err := json.Marshal(value) + require.NoError(t, err) + err = os.WriteFile(fmt.Sprintf("%s/%s_test_serialized.json", dumpDir, name), bytes, 0644) + require.NoError(t, err) +} diff --git a/protocol/v2/ssv/spectest/msg_processing_type.go b/protocol/v2/ssv/spectest/msg_processing_type.go index efea66b89f..eb5c1c6f85 100644 --- a/protocol/v2/ssv/spectest/msg_processing_type.go +++ b/protocol/v2/ssv/spectest/msg_processing_type.go @@ -1,24 +1,31 @@ package spectest import ( + "context" "encoding/hex" + "encoding/json" "path/filepath" "reflect" "strings" "testing" - "github.com/google/go-cmp/cmp" + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/pkg/errors" + specqbft "github.com/ssvlabs/ssv-spec/qbft" spectypes "github.com/ssvlabs/ssv-spec/types" spectestingutils "github.com/ssvlabs/ssv-spec/types/testingutils" typescomparable "github.com/ssvlabs/ssv-spec/types/testingutils/comparable" - "github.com/stretchr/testify/require" - "go.uber.org/zap" - + "github.com/ssvlabs/ssv/integration/qbft/tests" "github.com/ssvlabs/ssv/logging" + "github.com/ssvlabs/ssv/networkconfig" + "github.com/ssvlabs/ssv/protocol/v2/qbft/controller" "github.com/ssvlabs/ssv/protocol/v2/ssv/queue" "github.com/ssvlabs/ssv/protocol/v2/ssv/runner" + ssvprotocoltesting "github.com/ssvlabs/ssv/protocol/v2/ssv/testing" "github.com/ssvlabs/ssv/protocol/v2/ssv/validator" protocoltesting "github.com/ssvlabs/ssv/protocol/v2/testing" + "github.com/stretchr/testify/require" + "go.uber.org/zap" ) type MsgProcessingSpecTest struct { @@ -26,6 +33,7 @@ type MsgProcessingSpecTest struct { Runner runner.Runner Duty spectypes.Duty Messages []*spectypes.SignedSSVMessage + DecidedSlashable bool // DecidedSlashable makes the decided value slashable. Simulates consensus instances running in parallel. PostDutyRunnerStateRoot string PostDutyRunnerState spectypes.Root `json:"-"` // Field is ignored by encoding/json // OutputMessages compares pre/ post signed partial sigs to output. We exclude consensus msgs as it's tested in consensus @@ -45,8 +53,9 @@ func RunMsgProcessing(t *testing.T, test *MsgProcessingSpecTest) { test.RunAsPartOfMultiTest(t, logger) } -func (test *MsgProcessingSpecTest) runPreTesting(logger *zap.Logger) (*validator.Validator, error) { +func (test *MsgProcessingSpecTest) runPreTesting(ctx context.Context, logger *zap.Logger) (*validator.Validator, *validator.Committee, error) { var share *spectypes.Share + ketSetMap := make(map[phase0.ValidatorIndex]*spectestingutils.TestKeySet) if len(test.Runner.GetBaseRunner().Share) == 0 { panic("No share in base runner for tests") } @@ -54,31 +63,69 @@ func (test *MsgProcessingSpecTest) runPreTesting(logger *zap.Logger) (*validator share = validatorShare break } - v := protocoltesting.BaseValidator(logger, spectestingutils.KeySetForShare(share)) - v.DutyRunners[test.Runner.GetBaseRunner().RunnerRoleType] = test.Runner - v.Network = test.Runner.GetNetwork() - var lastErr error - if !test.DontStartDuty { - lastErr = v.StartDuty(logger, test.Duty) + for valIdx, validatorShare := range test.Runner.GetBaseRunner().Share { + ketSetMap[valIdx] = spectestingutils.KeySetForShare(validatorShare) } - for _, msg := range test.Messages { - dmsg, err := queue.DecodeSignedSSVMessage(msg) - if err != nil { - lastErr = err - continue + + var v *validator.Validator + var c *validator.Committee + var lastErr error + switch test.Runner.(type) { + case *runner.CommitteeRunner: + c = baseCommitteeWithRunnerSample(ctx, logger, ketSetMap, test.Runner.(*runner.CommitteeRunner)) + + if test.DontStartDuty { + c.Runners[test.Duty.DutySlot()] = test.Runner.(*runner.CommitteeRunner) + } else { + lastErr = c.StartDuty(logger, test.Duty.(*spectypes.CommitteeDuty)) } - err = v.ProcessMessage(logger, dmsg) - if err != nil { - lastErr = err + + for _, msg := range test.Messages { + dmsg, err := wrapSignedSSVMessageToDecodedSSVMessage(msg) + if err != nil { + lastErr = err + continue + } + err = c.ProcessMessage(logger, dmsg) + if err != nil { + lastErr = err + } + if test.DecidedSlashable && IsQBFTProposalMessage(msg) { + for _, validatorShare := range test.Runner.GetBaseRunner().Share { + test.Runner.GetSigner().(*spectestingutils.TestingKeyManager).AddSlashableDataRoot(validatorShare. + SharePubKey, spectestingutils.TestingAttestationDataRoot[:]) + } + } + } + + default: + v = ssvprotocoltesting.BaseValidator(logger, spectestingutils.KeySetForShare(share)) + v.DutyRunners[test.Runner.GetBaseRunner().RunnerRoleType] = test.Runner + v.Network = test.Runner.GetNetwork() + + if !test.DontStartDuty { + lastErr = v.StartDuty(logger, test.Duty) + } + for _, msg := range test.Messages { + dmsg, err := wrapSignedSSVMessageToDecodedSSVMessage(msg) + if err != nil { + lastErr = err + continue + } + err = v.ProcessMessage(logger, dmsg) + if err != nil { + lastErr = err + } } } - return v, lastErr + return v, c, lastErr } func (test *MsgProcessingSpecTest) RunAsPartOfMultiTest(t *testing.T, logger *zap.Logger) { - v, lastErr := test.runPreTesting(logger) + ctx := context.Background() + v, c, lastErr := test.runPreTesting(ctx, logger) if len(test.ExpectedError) != 0 { require.EqualError(t, lastErr, test.ExpectedError) @@ -86,23 +133,44 @@ func (test *MsgProcessingSpecTest) RunAsPartOfMultiTest(t *testing.T, logger *za require.NoError(t, lastErr) } + network := &spectestingutils.TestingNetwork{} + beaconNetwork := tests.NewTestingBeaconNodeWrapped() + var committee []*spectypes.Operator + + switch test.Runner.(type) { + case *runner.CommitteeRunner: + var runnerInstance *runner.CommitteeRunner + for _, runner := range c.Runners { + runnerInstance = runner + break + } + network = runnerInstance.GetNetwork().(*spectestingutils.TestingNetwork) + beaconNetwork = runnerInstance.GetBeaconNode().(*tests.TestingBeaconNodeWrapped) + committee = c.Operator.Committee + default: + network = v.Network.(*spectestingutils.TestingNetwork) + committee = v.Operator.Committee + beaconNetwork = test.Runner.GetBeaconNode() + } + // test output message - test.compareOutputMsgs(t, v) + spectestingutils.ComparePartialSignatureOutputMessages(t, test.OutputMessages, network.BroadcastedMsgs, committee) // test beacon broadcasted msgs - test.compareBroadcastedBeaconMsgs(t) + spectestingutils.CompareBroadcastedBeaconMsgs(t, test.BeaconBroadcastedRoots, beaconNetwork.(*tests.TestingBeaconNodeWrapped).GetBroadcastedRoots()) // post root postRoot, err := test.Runner.GetRoot() require.NoError(t, err) if test.PostDutyRunnerStateRoot != hex.EncodeToString(postRoot[:]) { - logger.Error("post runner state not equal", zap.String("state", cmp.Diff(test.Runner, test.PostDutyRunnerState, cmp.Exporter(func(p reflect.Type) bool { return true })))) + diff := dumpState(t, test.Name, test.Runner, test.PostDutyRunnerState) + logger.Error("post runner state not equal", zap.String("state", diff)) } } func (test *MsgProcessingSpecTest) compareBroadcastedBeaconMsgs(t *testing.T) { - broadcastedRoots := test.Runner.GetBeaconNode().(*spectestingutils.TestingBeaconNode).BroadcastedRoots + broadcastedRoots := test.Runner.GetBeaconNode().(*tests.TestingBeaconNodeWrapped).GetBroadcastedRoots() require.Len(t, broadcastedRoots, len(test.BeaconBroadcastedRoots)) for _, r1 := range test.BeaconBroadcastedRoots { found := false @@ -116,65 +184,6 @@ func (test *MsgProcessingSpecTest) compareBroadcastedBeaconMsgs(t *testing.T) { } } -func (test *MsgProcessingSpecTest) compareOutputMsgs(t *testing.T, v *validator.Validator) { - filterPartialSigs := func(messages []*spectypes.SSVMessage) []*spectypes.SSVMessage { - ret := make([]*spectypes.SSVMessage, 0) - for _, msg := range messages { - if msg.MsgType != spectypes.SSVPartialSignatureMsgType { - continue - } - ret = append(ret, msg) - } - return ret - } - broadcastedSignedMsgs := v.Network.(*spectestingutils.TestingNetwork).BroadcastedMsgs - require.NoError(t, spectestingutils.VerifyListOfSignedSSVMessages(broadcastedSignedMsgs, v.Operator.Committee)) - broadcastedMsgs := spectestingutils.ConvertBroadcastedMessagesToSSVMessages(broadcastedSignedMsgs) - broadcastedMsgs = filterPartialSigs(broadcastedMsgs) - require.Len(t, broadcastedMsgs, len(test.OutputMessages)) - index := 0 - for _, msg := range broadcastedMsgs { - if msg.MsgType != spectypes.SSVPartialSignatureMsgType { - continue - } - - msg1 := &spectypes.PartialSignatureMessages{} - require.NoError(t, msg1.Decode(msg.Data)) - msg2 := test.OutputMessages[index] - require.Len(t, msg1.Messages, len(msg2.Messages)) - - // messages are not guaranteed to be in order so we map them and then test all roots to be equal - roots := make(map[string]string) - for i, partialSigMsg2 := range msg2.Messages { - r2, err := partialSigMsg2.GetRoot() - require.NoError(t, err) - if _, found := roots[hex.EncodeToString(r2[:])]; !found { - roots[hex.EncodeToString(r2[:])] = "" - } else { - roots[hex.EncodeToString(r2[:])] = hex.EncodeToString(r2[:]) - } - - partialSigMsg1 := msg1.Messages[i] - r1, err := partialSigMsg1.GetRoot() - require.NoError(t, err) - - if _, found := roots[hex.EncodeToString(r1[:])]; !found { - roots[hex.EncodeToString(r1[:])] = "" - } else { - roots[hex.EncodeToString(r1[:])] = hex.EncodeToString(r1[:]) - } - } - for k, v := range roots { - require.EqualValues(t, k, v, "missing output msg") - } - - // test that slot is correct in broadcasted msg - require.EqualValues(t, msg1.Slot, msg2.Slot, "incorrect broadcasted slot") - - index++ - } -} - func (test *MsgProcessingSpecTest) overrideStateComparison(t *testing.T) { testType := reflect.TypeOf(test).String() testType = strings.Replace(testType, "spectest.", "tests.", 1) @@ -212,3 +221,159 @@ func overrideStateComparison(t *testing.T, test *MsgProcessingSpecTest, name str test.PostDutyRunnerStateRoot = hex.EncodeToString(root[:]) } + +var baseCommitteeWithRunnerSample = func( + ctx context.Context, + logger *zap.Logger, + keySetMap map[phase0.ValidatorIndex]*spectestingutils.TestKeySet, + runnerSample *runner.CommitteeRunner, +) *validator.Committee { + + var keySetSample *spectestingutils.TestKeySet + for _, ks := range keySetMap { + keySetSample = ks + break + } + + shareMap := make(map[phase0.ValidatorIndex]*spectypes.Share) + for valIdx, ks := range keySetMap { + shareMap[valIdx] = spectestingutils.TestingShare(ks, valIdx) + } + + createRunnerF := func(_ phase0.Slot, shareMap map[phase0.ValidatorIndex]*spectypes.Share) *runner.CommitteeRunner { + return runner.NewCommitteeRunner( + networkconfig.TestNetwork, + shareMap, + controller.NewController( + runnerSample.BaseRunner.QBFTController.Identifier, + runnerSample.BaseRunner.QBFTController.CommitteeMember, + runnerSample.BaseRunner.QBFTController.GetConfig(), + false, + ), + runnerSample.GetBeaconNode(), + runnerSample.GetNetwork(), + runnerSample.GetSigner(), + runnerSample.GetOperatorSigner(), + runnerSample.GetValCheckF(), + ).(*runner.CommitteeRunner) + } + + c := validator.NewCommittee( + ctx, + logger, + runnerSample.GetBaseRunner().BeaconNetwork, + spectestingutils.TestingCommitteeMember(keySetSample), + spectestingutils.NewTestingVerifier(), + createRunnerF, + ) + c.Shares = shareMap + + return c +} + +// wrapSignedSSVMessageToDecodedSSVMessage - wraps a SignedSSVMessage to a DecodedSSVMessage to pass the queue.DecodedSSVMessage to ProcessMessage +// In spec it accepts SignedSSVMessage, but in the protocol it accepts DecodedSSVMessage +// Without handling nil case in tests we get a panic in decodeSignedSSVMessage +func wrapSignedSSVMessageToDecodedSSVMessage(msg *spectypes.SignedSSVMessage) (*queue.DecodedSSVMessage, error) { + var dmsg *queue.DecodedSSVMessage + var err error + + if msg.SSVMessage == nil { + dmsg = &queue.DecodedSSVMessage{ + SignedSSVMessage: msg, + SSVMessage: &spectypes.SSVMessage{}, + } + dmsg.SSVMessage.MsgType = spectypes.SSVConsensusMsgType + } else { + dmsg, err = queue.DecodeSignedSSVMessage(msg) + } + + return dmsg, err +} + +// Create alias without duty +type MsgProcessingSpecTestAlias struct { + Name string + Runner runner.Runner + // No duty from type types.Duty. Its interface + Messages []*spectypes.SignedSSVMessage + DecidedSlashable bool + PostDutyRunnerStateRoot string + PostDutyRunnerState spectypes.Root `json:"-"` // Field is ignored by encoding/json + OutputMessages []*spectypes.PartialSignatureMessages + BeaconBroadcastedRoots []string + DontStartDuty bool // if set to true will not start a duty for the runner + ExpectedError string + BeaconDuty *spectypes.BeaconDuty `json:"BeaconDuty,omitempty"` + CommitteeDuty *spectypes.CommitteeDuty `json:"CommitteeDuty,omitempty"` +} + +func (t *MsgProcessingSpecTest) MarshalJSON() ([]byte, error) { + alias := &MsgProcessingSpecTestAlias{ + Name: t.Name, + Runner: t.Runner, + Messages: t.Messages, + DecidedSlashable: t.DecidedSlashable, + PostDutyRunnerStateRoot: t.PostDutyRunnerStateRoot, + PostDutyRunnerState: t.PostDutyRunnerState, + OutputMessages: t.OutputMessages, + BeaconBroadcastedRoots: t.BeaconBroadcastedRoots, + DontStartDuty: t.DontStartDuty, + ExpectedError: t.ExpectedError, + } + + if t.Duty != nil { + if beaconDuty, ok := t.Duty.(*spectypes.BeaconDuty); ok { + alias.BeaconDuty = beaconDuty + } else if committeeDuty, ok := t.Duty.(*spectypes.CommitteeDuty); ok { + alias.CommitteeDuty = committeeDuty + } else { + return nil, errors.New("can't marshal StartNewRunnerDutySpecTest because t.Duty isn't BeaconDuty or CommitteeDuty") + } + } + byts, err := json.Marshal(alias) + + return byts, err +} + +func (t *MsgProcessingSpecTest) UnmarshalJSON(data []byte) error { + aux := &MsgProcessingSpecTestAlias{} + aux.Runner = &runner.CommitteeRunner{} + // Unmarshal the JSON data into the auxiliary struct + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + t.Name = aux.Name + t.Runner = aux.Runner + t.DecidedSlashable = aux.DecidedSlashable + t.Messages = aux.Messages + t.PostDutyRunnerStateRoot = aux.PostDutyRunnerStateRoot + t.PostDutyRunnerState = aux.PostDutyRunnerState + t.OutputMessages = aux.OutputMessages + t.BeaconBroadcastedRoots = aux.BeaconBroadcastedRoots + t.DontStartDuty = aux.DontStartDuty + t.ExpectedError = aux.ExpectedError + + // Determine which type of duty was marshaled + if aux.BeaconDuty != nil { + t.Duty = aux.BeaconDuty + } else if aux.CommitteeDuty != nil { + t.Duty = aux.CommitteeDuty + } + + return nil +} + +// IsQBFTProposalMessage checks if the message is a QBFT proposal message +func IsQBFTProposalMessage(msg *spectypes.SignedSSVMessage) bool { + if msg.SSVMessage.MsgType == spectypes.SSVConsensusMsgType { + qbftMsg := specqbft.Message{} + err := qbftMsg.Decode(msg.SSVMessage.Data) + if err != nil { + panic("could not decode message") + } + return qbftMsg.MsgType == specqbft.ProposalMsgType + } + return false +} diff --git a/protocol/v2/ssv/spectest/multi_start_new_runner_duty_type.go b/protocol/v2/ssv/spectest/multi_start_new_runner_duty_type.go index d7d4741111..145c544d93 100644 --- a/protocol/v2/ssv/spectest/multi_start_new_runner_duty_type.go +++ b/protocol/v2/ssv/spectest/multi_start_new_runner_duty_type.go @@ -23,6 +23,7 @@ type StartNewRunnerDutySpecTest struct { Name string Runner runner.Runner Duty spectypes.Duty + Threshold uint64 PostDutyRunnerStateRoot string PostDutyRunnerState spectypes.Root `json:"-"` // Field is ignored by encoding/json OutputMessages []*spectypes.PartialSignatureMessages @@ -97,10 +98,11 @@ func (test *StartNewRunnerDutySpecTest) RunAsPartOfMultiTest(t *testing.T, logge // post root postRoot, err := test.Runner.GetRoot() require.NoError(t, err) + require.EqualValues(t, test.PostDutyRunnerStateRoot, hex.EncodeToString(postRoot[:])) if test.PostDutyRunnerStateRoot != hex.EncodeToString(postRoot[:]) { - diff := typescomparable.PrintDiff(test.Runner, test.PostDutyRunnerState) + diff := dumpState(t, test.Name, test.Runner, test.PostDutyRunnerState) require.EqualValues(t, test.PostDutyRunnerStateRoot, hex.EncodeToString(postRoot[:]), fmt.Sprintf("post runner state not equal\n%s\n", diff)) } } @@ -173,6 +175,6 @@ func overrideStateComparisonForStartNewRunnerDutySpecTest(t *testing.T, test *St } func (test *StartNewRunnerDutySpecTest) runPreTesting(logger *zap.Logger) error { - err := test.Runner.StartNewDuty(logger, test.Duty) + err := test.Runner.StartNewDuty(logger, test.Duty, test.Threshold) return err } diff --git a/protocol/v2/ssv/spectest/ssv_mapping_test.go b/protocol/v2/ssv/spectest/ssv_mapping_test.go index c3fe189c36..b271034629 100644 --- a/protocol/v2/ssv/spectest/ssv_mapping_test.go +++ b/protocol/v2/ssv/spectest/ssv_mapping_test.go @@ -1,13 +1,18 @@ package spectest import ( + "context" "encoding/json" + "fmt" "os" "reflect" "strings" "testing" + "github.com/attestantio/go-eth2-client/spec/phase0" + specssv "github.com/ssvlabs/ssv-spec/ssv" "github.com/ssvlabs/ssv-spec/ssv/spectest/tests" + "github.com/ssvlabs/ssv-spec/ssv/spectest/tests/committee" "github.com/ssvlabs/ssv-spec/ssv/spectest/tests/partialsigcontainer" "github.com/ssvlabs/ssv-spec/ssv/spectest/tests/runner/duties/newduty" "github.com/ssvlabs/ssv-spec/ssv/spectest/tests/runner/duties/synccommitteeaggregator" @@ -19,19 +24,22 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/zap" + "github.com/ssvlabs/ssv/exporter/convert" + tests2 "github.com/ssvlabs/ssv/integration/qbft/tests" "github.com/ssvlabs/ssv/logging" "github.com/ssvlabs/ssv/protocol/v2/qbft/controller" "github.com/ssvlabs/ssv/protocol/v2/qbft/instance" qbfttesting "github.com/ssvlabs/ssv/protocol/v2/qbft/testing" "github.com/ssvlabs/ssv/protocol/v2/ssv/runner" ssvtesting "github.com/ssvlabs/ssv/protocol/v2/ssv/testing" + "github.com/ssvlabs/ssv/protocol/v2/ssv/validator" protocoltesting "github.com/ssvlabs/ssv/protocol/v2/testing" - "github.com/ssvlabs/ssv/protocol/v2/types" ) func TestSSVMapping(t *testing.T) { - path, _ := os.Getwd() - jsonTests, err := protocoltesting.GetSpecTestJSON(path, "ssv") + path, err := os.Getwd() + require.NoError(t, err) + jsonTests, err := protocoltesting.GetSSVMappingSpecTestJSON(path, "ssv") require.NoError(t, err) logger := logging.TestLogger(t) @@ -41,7 +49,11 @@ func TestSSVMapping(t *testing.T) { panic(err.Error()) } - types.SetDefaultDomain(spectestingutils.TestingSSVDomainType) + // Set true if you need to check the post run states of actual and expected committees / runners + if DebugDumpState { + _ = os.RemoveAll(dumpDir) + os.Mkdir(dumpDir, 0755) + } for name, test := range untypedTests { name, test := name, test @@ -66,17 +78,12 @@ func prepareTest(t *testing.T, logger *zap.Logger, name string, test interface{} switch testType { case reflect.TypeOf(&tests.MsgProcessingSpecTest{}).String(): - byts, err := json.Marshal(test) - require.NoError(t, err) - typedTest := &MsgProcessingSpecTest{ - Runner: &runner.CommitteeRunner{}, - } // TODO: fix blinded test if strings.Contains(testName, "propose regular decide blinded") || strings.Contains(testName, "propose blinded decide regular") { logger.Info("skipping blinded block test", zap.String("test", testName)) return nil } - require.NoError(t, json.Unmarshal(byts, &typedTest)) + typedTest := msgProcessingSpecTestFromMap(t, test.(map[string]interface{})) return &runnable{ name: typedTest.TestName(), @@ -168,6 +175,32 @@ func prepareTest(t *testing.T, logger *zap.Logger, name string, test interface{} typedTest := &partialsigcontainer.PartialSigContainerTest{} require.NoError(t, json.Unmarshal(byts, &typedTest)) + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { + typedTest.Run(t) + }, + } + case reflect.TypeOf(&committee.CommitteeSpecTest{}).String(): + typedTest := committeeSpecTestFromMap(t, logger, test.(map[string]interface{})) + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { + typedTest.Run(t) + }, + } + case reflect.TypeOf(&committee.MultiCommitteeSpecTest{}).String(): + subtests := test.(map[string]interface{})["Tests"].([]interface{}) + typedTests := make([]*CommitteeSpecTest, 0) + for _, subtest := range subtests { + typedTests = append(typedTests, committeeSpecTestFromMap(t, logger, subtest.(map[string]interface{}))) + } + + typedTest := &MultiCommitteeSpecTest{ + Name: test.(map[string]interface{})["Name"].(string), + Tests: typedTests, + } + return &runnable{ name: typedTest.TestName(), test: func(t *testing.T) { @@ -239,6 +272,7 @@ func newRunnerDutySpecTestFromMap(t *testing.T, m map[string]interface{}) *Start Name: m["Name"].(string), Duty: testDuty, Runner: r, + Threshold: ks.Threshold, PostDutyRunnerStateRoot: m["PostDutyRunnerStateRoot"].(string), ExpectedError: m["ExpectedError"].(string), OutputMessages: outputMsgs, @@ -322,6 +356,7 @@ func msgProcessingSpecTestFromMap(t *testing.T, m map[string]interface{}) *MsgPr Duty: duty, Runner: r, Messages: msgs, + DecidedSlashable: m["DecidedSlashable"].(bool), PostDutyRunnerStateRoot: m["PostDutyRunnerStateRoot"].(string), DontStartDuty: m["DontStartDuty"].(bool), ExpectedError: m["ExpectedError"].(string), @@ -345,7 +380,7 @@ func fixRunnerForRun(t *testing.T, runnerMap map[string]interface{}, ks *spectes ret.GetBaseRunner().QBFTController = fixControllerForRun(t, logger, ret, ret.GetBaseRunner().QBFTController, ks) if ret.GetBaseRunner().State != nil { if ret.GetBaseRunner().State.RunningInstance != nil { - operator := spectestingutils.TestingOperator(ks) + operator := spectestingutils.TestingCommitteeMember(ks) ret.GetBaseRunner().State.RunningInstance = fixInstanceForRun(t, ret.GetBaseRunner().State.RunningInstance, ret.GetBaseRunner().QBFTController, operator) } } @@ -355,11 +390,11 @@ func fixRunnerForRun(t *testing.T, runnerMap map[string]interface{}, ks *spectes } func fixControllerForRun(t *testing.T, logger *zap.Logger, runner runner.Runner, contr *controller.Controller, ks *spectestingutils.TestKeySet) *controller.Controller { - config := qbfttesting.TestingConfig(logger, ks, spectypes.RoleCommittee) + config := qbfttesting.TestingConfig(logger, ks, convert.RoleCommittee) config.ValueCheckF = runner.GetValCheckF() newContr := controller.NewController( contr.Identifier, - contr.Share, + contr.CommitteeMember, config, false, ) @@ -370,13 +405,13 @@ func fixControllerForRun(t *testing.T, logger *zap.Logger, runner runner.Runner, if inst == nil { continue } - operator := spectestingutils.TestingOperator(ks) + operator := spectestingutils.TestingCommitteeMember(ks) newContr.StoredInstances[i] = fixInstanceForRun(t, inst, newContr, operator) } return newContr } -func fixInstanceForRun(t *testing.T, inst *instance.Instance, contr *controller.Controller, share *spectypes.Operator) *instance.Instance { +func fixInstanceForRun(t *testing.T, inst *instance.Instance, contr *controller.Controller, share *spectypes.CommitteeMember) *instance.Instance { newInst := instance.NewInstance( contr.GetConfig(), share, @@ -385,7 +420,7 @@ func fixInstanceForRun(t *testing.T, inst *instance.Instance, contr *controller. newInst.State.DecidedValue = inst.State.DecidedValue newInst.State.Decided = inst.State.Decided - newInst.State.Share = inst.State.Share + newInst.State.CommitteeMember = inst.State.CommitteeMember newInst.State.Round = inst.State.Round newInst.State.Height = inst.State.Height newInst.State.ProposalAcceptedForCurrentRound = inst.State.ProposalAcceptedForCurrentRound @@ -396,6 +431,7 @@ func fixInstanceForRun(t *testing.T, inst *instance.Instance, contr *controller. newInst.State.PrepareContainer = inst.State.PrepareContainer newInst.State.CommitContainer = inst.State.CommitContainer newInst.State.RoundChangeContainer = inst.State.RoundChangeContainer + newInst.StartValue = inst.StartValue return newInst } @@ -433,3 +469,109 @@ func baseRunnerForRole(logger *zap.Logger, role spectypes.RunnerRole, base *runn panic("unknown beacon role") } } + +func committeeSpecTestFromMap(t *testing.T, logger *zap.Logger, m map[string]interface{}) *CommitteeSpecTest { + committeeMap := m["Committee"].(map[string]interface{}) + + inputs := make([]interface{}, 0) + for _, input := range m["Input"].([]interface{}) { + byts, err := json.Marshal(input) + if err != nil { + panic(err) + } + + var getDecoder = func() *json.Decoder { + decoder := json.NewDecoder(strings.NewReader(string(byts))) + decoder.DisallowUnknownFields() + return decoder + } + + committeeDuty := &spectypes.CommitteeDuty{} + err = getDecoder().Decode(&committeeDuty) + if err == nil { + inputs = append(inputs, committeeDuty) + continue + } + + beaconDuty := &spectypes.BeaconDuty{} + err = getDecoder().Decode(&beaconDuty) + if err == nil { + inputs = append(inputs, beaconDuty) + continue + } + + msg := &spectypes.SignedSSVMessage{} + err = getDecoder().Decode(&msg) + if err == nil { + inputs = append(inputs, msg) + continue + } + + panic(fmt.Sprintf("Unsupported input: %T\n", input)) + } + + outputMsgs := make([]*spectypes.PartialSignatureMessages, 0) + require.NotNilf(t, m["OutputMessages"], "OutputMessages can't be nil") + for _, msg := range m["OutputMessages"].([]interface{}) { + byts, _ := json.Marshal(msg) + typedMsg := &spectypes.PartialSignatureMessages{} + require.NoError(t, json.Unmarshal(byts, typedMsg)) + outputMsgs = append(outputMsgs, typedMsg) + } + + beaconBroadcastedRoots := make([]string, 0) + if m["BeaconBroadcastedRoots"] != nil { + for _, r := range m["BeaconBroadcastedRoots"].([]interface{}) { + beaconBroadcastedRoots = append(beaconBroadcastedRoots, r.(string)) + } + } + + ctx := context.Background() // TODO refactor this + c := fixCommitteeForRun(t, ctx, logger, committeeMap) + + return &CommitteeSpecTest{ + Name: m["Name"].(string), + Committee: c, + Input: inputs, + PostDutyCommitteeRoot: m["PostDutyCommitteeRoot"].(string), + OutputMessages: outputMsgs, + BeaconBroadcastedRoots: beaconBroadcastedRoots, + ExpectedError: m["ExpectedError"].(string), + } +} + +func fixCommitteeForRun(t *testing.T, ctx context.Context, logger *zap.Logger, committeeMap map[string]interface{}) *validator.Committee { + byts, _ := json.Marshal(committeeMap) + specCommittee := &specssv.Committee{} + require.NoError(t, json.Unmarshal(byts, specCommittee)) + + c := validator.NewCommittee( + ctx, + logger, + tests2.NewTestingBeaconNodeWrapped().GetBeaconNetwork(), + &specCommittee.CommitteeMember, + testingutils.NewTestingVerifier(), + func(slot phase0.Slot, shareMap map[phase0.ValidatorIndex]*spectypes.Share) *runner.CommitteeRunner { + return ssvtesting.CommitteeRunnerWithShareMap(logger, shareMap).(*runner.CommitteeRunner) + }, + ) + tmpSsvCommittee := &validator.Committee{} + require.NoError(t, json.Unmarshal(byts, tmpSsvCommittee)) + + c.Runners = tmpSsvCommittee.Runners + + for slot := range c.Runners { + + var shareInstance *spectypes.Share + for _, share := range c.Runners[slot].BaseRunner.Share { + shareInstance = share + break + } + + fixedRunner := fixRunnerForRun(t, committeeMap["Runners"].(map[string]interface{})[fmt.Sprintf("%v", slot)].(map[string]interface{}), testingutils.KeySetForShare(shareInstance)) + c.Runners[slot] = fixedRunner.(*runner.CommitteeRunner) + } + c.Shares = specCommittee.Share + + return c +} diff --git a/protocol/v2/ssv/spectest/sync_committee_aggregator_proof_type.go b/protocol/v2/ssv/spectest/sync_committee_aggregator_proof_type.go index da35033bed..c18738c0f7 100644 --- a/protocol/v2/ssv/spectest/sync_committee_aggregator_proof_type.go +++ b/protocol/v2/ssv/spectest/sync_committee_aggregator_proof_type.go @@ -27,7 +27,7 @@ func RunSyncCommitteeAggProof(t *testing.T, test *synccommitteeaggregator.SyncCo ks := testingutils.Testing4SharesSet() share := testingutils.TestingShare(ks, testingutils.TestingValidatorIndex) logger := logging.TestLogger(t) - v := ssvtesting.BaseValidator(logger, keySetForShare(share)) + v := ssvtesting.BaseValidator(logger, testingutils.KeySetForShare(share)) r := v.DutyRunners[spectypes.RoleSyncCommitteeContribution] r.GetBeaconNode().(*tests.TestingBeaconNodeWrapped).SetSyncCommitteeAggregatorRootHexes(test.ProofRootsMap) @@ -71,16 +71,3 @@ func overrideStateComparisonForSyncCommitteeAggregatorProofSpecTest(t *testing.T test.PostDutyRunnerStateRoot = hex.EncodeToString(root[:]) } - -func keySetForShare(share *spectypes.Share) *testingutils.TestKeySet { - if share.Quorum == 5 { - return testingutils.Testing7SharesSet() - } - if share.Quorum == 7 { - return testingutils.Testing10SharesSet() - } - if share.Quorum == 9 { - return testingutils.Testing13SharesSet() - } - return testingutils.Testing4SharesSet() -} diff --git a/protocol/v2/ssv/testing/runner.go b/protocol/v2/ssv/testing/runner.go index 48d99c94fb..7639d1aece 100644 --- a/protocol/v2/ssv/testing/runner.go +++ b/protocol/v2/ssv/testing/runner.go @@ -1,7 +1,12 @@ package testing import ( + "bytes" + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ssvlabs/ssv/integration/qbft/tests" + "github.com/ssvlabs/ssv/networkconfig" + "github.com/ssvlabs/ssv/operator/validator" "github.com/ssvlabs/ssv/protocol/v2/qbft/testing" "github.com/ssvlabs/ssv/protocol/v2/ssv/runner" "go.uber.org/zap" @@ -15,7 +20,16 @@ import ( var TestingHighestDecidedSlot = phase0.Slot(0) var CommitteeRunner = func(logger *zap.Logger, keySet *spectestingutils.TestKeySet) runner.Runner { - return baseRunner(logger, spectypes.RoleCommittee, specssv.BeaconVoteValueCheckF(spectestingutils.NewTestingKeyManager(), spectestingutils.TestingDutySlot, nil, spectestingutils.TestingDutyEpoch), keySet) + // TODO fixme ? + + return baseRunner(logger, spectypes.RoleCommittee, validator.TempBeaconVoteValueCheckF(spectestingutils.NewTestingKeyManager(), spectestingutils.TestingDutySlot, nil, spectestingutils.TestingDutyEpoch), keySet) + //return baseRunner(logger, spectypes.RoleCommittee, specssv.BeaconVoteValueCheckF(spectestingutils.NewTestingKeyManager(), spectestingutils.TestingDutySlot, nil, spectestingutils.TestingDutyEpoch), keySet) +} + +var CommitteeRunnerWithShareMap = func(logger *zap.Logger, shareMap map[phase0.ValidatorIndex]*spectypes.Share) runner.Runner { + // TODO fixme ? + return baseRunnerWithShareMap(logger, spectypes.RoleCommittee, validator.TempBeaconVoteValueCheckF(spectestingutils.NewTestingKeyManager(), spectestingutils.TestingDutySlot, nil, spectestingutils.TestingDutyEpoch), shareMap) + //return baseRunnerWithShareMap(logger, spectypes.RoleCommittee, specssv.BeaconVoteValueCheckF(spectestingutils.NewTestingKeyManager(), spectestingutils.TestingDutySlot, nil, spectestingutils.TestingDutyEpoch), shareMap) } //var AttesterRunner7Operators = func(keySet *spectestingutils.TestKeySet) runner.Runner { @@ -80,10 +94,11 @@ var baseRunner = func( config.OperatorSigner = opSigner config.SignatureVerifier = spectestingutils.NewTestingVerifier() - contr := specqbft.NewController( + contr := testing.NewTestingQBFTController( identifier[:], operator, config, + false, ) shareMap := make(map[phase0.ValidatorIndex]*spectypes.Share) @@ -91,22 +106,22 @@ var baseRunner = func( switch role { case spectypes.RoleCommittee: - return specssv.NewCommitteeRunner( - spectypes.BeaconTestNetwork, + return runner.NewCommitteeRunner( + networkconfig.TestNetwork, shareMap, contr, - spectestingutils.NewTestingBeaconNode(), + tests.NewTestingBeaconNodeWrapped(), net, km, opSigner, valCheck, ).(runner.Runner) case spectypes.RoleAggregator: - return specssv.NewAggregatorRunner( + return runner.NewAggregatorRunner( spectypes.BeaconTestNetwork, shareMap, contr, - spectestingutils.NewTestingBeaconNode(), + tests.NewTestingBeaconNodeWrapped(), net, km, opSigner, @@ -114,11 +129,11 @@ var baseRunner = func( TestingHighestDecidedSlot, ).(runner.Runner) case spectypes.RoleProposer: - return specssv.NewProposerRunner( + return runner.NewProposerRunner( spectypes.BeaconTestNetwork, shareMap, contr, - spectestingutils.NewTestingBeaconNode(), + tests.NewTestingBeaconNodeWrapped(), net, km, opSigner, @@ -126,11 +141,11 @@ var baseRunner = func( TestingHighestDecidedSlot, ).(runner.Runner) case spectypes.RoleSyncCommitteeContribution: - return specssv.NewSyncCommitteeAggregatorRunner( + return runner.NewSyncCommitteeAggregatorRunner( spectypes.BeaconTestNetwork, shareMap, contr, - spectestingutils.NewTestingBeaconNode(), + tests.NewTestingBeaconNodeWrapped(), net, km, opSigner, @@ -138,35 +153,36 @@ var baseRunner = func( TestingHighestDecidedSlot, ).(runner.Runner) case spectypes.RoleValidatorRegistration: - return specssv.NewValidatorRegistrationRunner( + return runner.NewValidatorRegistrationRunner( spectypes.BeaconTestNetwork, shareMap, - spectestingutils.NewTestingBeaconNode(), + contr, + tests.NewTestingBeaconNodeWrapped(), net, km, opSigner, ).(runner.Runner) case spectypes.RoleVoluntaryExit: - return specssv.NewVoluntaryExitRunner( + return runner.NewVoluntaryExitRunner( spectypes.BeaconTestNetwork, shareMap, - spectestingutils.NewTestingBeaconNode(), + tests.NewTestingBeaconNodeWrapped(), net, km, opSigner, ).(runner.Runner) case spectestingutils.UnknownDutyType: - ret := specssv.NewCommitteeRunner( - spectypes.BeaconTestNetwork, + ret := runner.NewCommitteeRunner( + networkconfig.TestNetwork, shareMap, contr, - spectestingutils.NewTestingBeaconNode(), + tests.NewTestingBeaconNodeWrapped(), net, km, opSigner, valCheck, ) - ret.(*specssv.CommitteeRunner).BaseRunner.RunnerRoleType = spectestingutils.UnknownDutyType + ret.(*runner.CommitteeRunner).BaseRunner.RunnerRoleType = spectestingutils.UnknownDutyType return ret.(runner.Runner) default: panic("unknown role type") @@ -238,3 +254,137 @@ var baseRunner = func( // } // return msgs //} + +var baseRunnerWithShareMap = func( + logger *zap.Logger, + role spectypes.RunnerRole, + valCheck specqbft.ProposedValueCheckF, + shareMap map[phase0.ValidatorIndex]*spectypes.Share, +) runner.Runner { + + var keySetInstance *spectestingutils.TestKeySet + for _, share := range shareMap { + keySetInstance = spectestingutils.KeySetForShare(share) + break + } + + // Identifier + var ownerID []byte + if role == spectypes.RoleCommittee { + committee := make([]uint64, 0) + for _, op := range keySetInstance.Committee() { + committee = append(committee, op.Signer) + } + committeeID := spectypes.GetCommitteeID(committee) + ownerID = bytes.Clone(committeeID[:]) + } else { + ownerID = spectestingutils.TestingValidatorPubKey[:] + } + identifier := spectypes.NewMsgID(spectestingutils.TestingSSVDomainType, ownerID, role) + + net := spectestingutils.NewTestingNetwork(1, keySetInstance.OperatorKeys[1]) + + km := spectestingutils.NewTestingKeyManager() + committeeMember := spectestingutils.TestingCommitteeMember(keySetInstance) + opSigner := spectestingutils.NewTestingOperatorSigner(keySetInstance, committeeMember.OperatorID) + + config := testing.TestingConfig(logger, keySetInstance, identifier.GetRoleType()) + config.ValueCheckF = valCheck + config.ProposerF = func(state *specqbft.State, round specqbft.Round) spectypes.OperatorID { + return 1 + } + config.Network = net + config.OperatorSigner = opSigner + config.SignatureVerifier = spectestingutils.NewTestingVerifier() + + contr := testing.NewTestingQBFTController( + identifier[:], + committeeMember, + config, + false, + ) + + switch role { + case spectypes.RoleCommittee: + return runner.NewCommitteeRunner( + networkconfig.TestNetwork, + shareMap, + contr, + tests.NewTestingBeaconNodeWrapped(), + net, + km, + opSigner, + valCheck, + ) + case spectypes.RoleAggregator: + return runner.NewAggregatorRunner( + spectypes.BeaconTestNetwork, + shareMap, + contr, + tests.NewTestingBeaconNodeWrapped(), + net, + km, + opSigner, + valCheck, + TestingHighestDecidedSlot, + ) + case spectypes.RoleProposer: + return runner.NewProposerRunner( + spectypes.BeaconTestNetwork, + shareMap, + contr, + tests.NewTestingBeaconNodeWrapped(), + net, + km, + opSigner, + valCheck, + TestingHighestDecidedSlot, + ) + case spectypes.RoleSyncCommitteeContribution: + return runner.NewSyncCommitteeAggregatorRunner( + spectypes.BeaconTestNetwork, + shareMap, + contr, + tests.NewTestingBeaconNodeWrapped(), + net, + km, + opSigner, + valCheck, + TestingHighestDecidedSlot, + ) + case spectypes.RoleValidatorRegistration: + return runner.NewValidatorRegistrationRunner( + spectypes.BeaconTestNetwork, + shareMap, + contr, + tests.NewTestingBeaconNodeWrapped(), + net, + km, + opSigner, + ) + case spectypes.RoleVoluntaryExit: + return runner.NewVoluntaryExitRunner( + spectypes.BeaconTestNetwork, + shareMap, + tests.NewTestingBeaconNodeWrapped(), + net, + km, + opSigner, + ) + case spectestingutils.UnknownDutyType: + ret := runner.NewCommitteeRunner( + networkconfig.TestNetwork, + shareMap, + contr, + tests.NewTestingBeaconNodeWrapped(), + net, + km, + opSigner, + valCheck, + ) + ret.(*runner.CommitteeRunner).BaseRunner.RunnerRoleType = spectestingutils.UnknownDutyType + return ret + default: + panic("unknown role type") + } +} diff --git a/protocol/v2/ssv/testing/validator.go b/protocol/v2/ssv/testing/validator.go index 2bc230d700..c16189e31e 100644 --- a/protocol/v2/ssv/testing/validator.go +++ b/protocol/v2/ssv/testing/validator.go @@ -3,13 +3,14 @@ package testing import ( "context" + "go.uber.org/zap" + "github.com/ssvlabs/ssv/integration/qbft/tests" "github.com/ssvlabs/ssv/networkconfig" "github.com/ssvlabs/ssv/protocol/v2/qbft/testing" "github.com/ssvlabs/ssv/protocol/v2/ssv/runner" "github.com/ssvlabs/ssv/protocol/v2/ssv/validator" "github.com/ssvlabs/ssv/protocol/v2/types" - "go.uber.org/zap" spectypes "github.com/ssvlabs/ssv-spec/types" spectestingutils "github.com/ssvlabs/ssv-spec/types/testingutils" @@ -23,13 +24,14 @@ var BaseValidator = func(logger *zap.Logger, keySet *spectestingutils.TestKeySet cancel, validator.Options{ Network: spectestingutils.NewTestingNetwork(1, keySet.OperatorKeys[1]), + NetworkConfig: networkconfig.TestNetwork, Beacon: tests.NewTestingBeaconNodeWrapped(), - BeaconNetwork: networkconfig.TestNetwork.Beacon, Storage: testing.TestingStores(logger), SSVShare: &types.SSVShare{ Share: *spectestingutils.TestingShare(keySet, spectestingutils.TestingValidatorIndex), }, Signer: spectestingutils.NewTestingKeyManager(), + Operator: spectestingutils.TestingCommitteeMember(keySet), OperatorSigner: spectestingutils.NewTestingOperatorSigner(keySet, 1), SignatureVerifier: spectestingutils.NewTestingVerifier(), DutyRunners: map[spectypes.RunnerRole]runner.Runner{ diff --git a/protocol/v2/ssv/validator/committee.go b/protocol/v2/ssv/validator/committee.go index b1bd91b575..bd44211993 100644 --- a/protocol/v2/ssv/validator/committee.go +++ b/protocol/v2/ssv/validator/committee.go @@ -2,16 +2,17 @@ package validator import ( "context" + "crypto/sha256" "encoding/hex" + "encoding/json" "fmt" "sync" - "github.com/ssvlabs/ssv/logging/fields" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/pkg/errors" "github.com/ssvlabs/ssv-spec/qbft" spectypes "github.com/ssvlabs/ssv-spec/types" + "github.com/ssvlabs/ssv/logging/fields" "go.uber.org/zap" "github.com/ssvlabs/ssv/ibft/storage" @@ -51,21 +52,22 @@ func NewCommittee( operator *spectypes.CommitteeMember, verifier spectypes.SignatureVerifier, createRunnerFn func(slot phase0.Slot, shares map[phase0.ValidatorIndex]*spectypes.Share) *runner.CommitteeRunner, + // share map[phase0.ValidatorIndex]*spectypes.Share, // TODO Shouldn't we pass the shares map here the same way we do in spec? ) *Committee { return &Committee{ logger: logger, BeaconNetwork: beaconNetwork, ctx: ctx, // TODO alan: drain maps - Queues: make(map[phase0.Slot]queueContainer), - Runners: make(map[phase0.Slot]*runner.CommitteeRunner), - Shares: make(map[phase0.ValidatorIndex]*spectypes.Share), + Queues: make(map[phase0.Slot]queueContainer), + Runners: make(map[phase0.Slot]*runner.CommitteeRunner), + Shares: make(map[phase0.ValidatorIndex]*spectypes.Share), + //Shares: share, HighestAttestingSlotMap: make(map[spectypes.ValidatorPK]phase0.Slot), Operator: operator, SignatureVerifier: verifier, CreateRunnerFn: createRunnerFn, } - } func (c *Committee) AddShare(share *spectypes.Share) { @@ -88,34 +90,40 @@ func (c *Committee) StartDuty(logger *zap.Logger, duty *spectypes.CommitteeDuty) c.logger.Debug("Starting committee duty runner", zap.Uint64("slot", uint64(duty.Slot))) c.mtx.Lock() defer c.mtx.Unlock() - // TODO alan : lock per slot? + + if len(duty.BeaconDuties) == 0 { + return errors.New("no beacon duties") + } if _, exists := c.Runners[duty.Slot]; exists { return errors.New(fmt.Sprintf("CommitteeRunner for slot %d already exists", duty.Slot)) } - var validatorToStopMap map[phase0.Slot]spectypes.ValidatorPK - //Filter old duties based on highest attesting slot - - duty, validatorToStopMap, highestAttestingSlotMap, err := FilterCommitteeDuty(logger, duty, c.HighestAttestingSlotMap) - if err != nil { - return errors.Wrap(err, "cannot filter committee duty") + validatorShares := make(map[phase0.ValidatorIndex]*spectypes.Share, len(duty.BeaconDuties)) + toRemove := make([]int, 0) + // Remove beacon duties that don't have a share + for i, bd := range duty.BeaconDuties { + share, ok := c.Shares[bd.ValidatorIndex] + if !ok { + toRemove = append(toRemove, i) + continue + } + validatorShares[bd.ValidatorIndex] = share } - c.HighestAttestingSlotMap = highestAttestingSlotMap - // Stop validators with old duties - c.stopDuties(logger, validatorToStopMap) - c.updateAttestingSlotMap(duty) - - if len(duty.BeaconDuties) == 0 { - logger.Debug("No beacon duties to run") - return nil + // Remove beacon duties that don't have a share + if len(toRemove) > 0 { + newDuties, err := removeIndices(duty.BeaconDuties, toRemove) + if err != nil { + logger.Warn("could not remove beacon duties", zap.Error(err), zap.Ints("indices", toRemove)) + } else { + duty.BeaconDuties = newDuties + } } - var sharesCopy = make(map[phase0.ValidatorIndex]*spectypes.Share, len(c.Shares)) - for k, v := range c.Shares { - sharesCopy[k] = v + if len(duty.BeaconDuties) == 0 { + return errors.New("CommitteeDuty has no valid beacon duties") } - r := c.CreateRunnerFn(duty.Slot, sharesCopy) + r := c.CreateRunnerFn(duty.Slot, validatorShares) // Set timeout function. r.GetBaseRunner().TimeoutF = c.onTimeout c.Runners[duty.Slot] = r @@ -140,20 +148,6 @@ func (c *Committee) StartDuty(logger *zap.Logger, duty *spectypes.CommitteeDuty) return c.Runners[duty.Slot].StartNewDuty(logger, duty, c.Operator.GetQuorum()) } -// NOT threadsafe -func (c *Committee) stopDuties(logger *zap.Logger, validatorToStopMap map[phase0.Slot]spectypes.ValidatorPK) { - for slot, validator := range validatorToStopMap { - r, exists := c.Runners[slot] - if exists { - logger.Debug("stopping duty for validator", - fields.DutyID(fields.FormatCommitteeDutyID(c.Operator.Committee, c.BeaconNetwork.EstimatedEpochAtSlot(slot), slot)), - zap.Uint64("slot", uint64(slot)), zap.String("validator", hex.EncodeToString(validator[:])), - ) - r.StopDuty(validator) - } - } -} - // NOT threadsafe func (c *Committee) stopValidator(logger *zap.Logger, validator spectypes.ValidatorPK) { for slot, runner := range c.Runners { @@ -197,52 +191,22 @@ func removeIndices(s []*spectypes.BeaconDuty, indicesToRemove []int) ([]*spectyp return result, nil } -// FilterCommitteeDuty filters the committee duties by the slots given per validator. -// It returns the filtered duties, the validators to stop and updated slot map. -// NOT threadsafe -func FilterCommitteeDuty(logger *zap.Logger, duty *spectypes.CommitteeDuty, slotMap map[spectypes.ValidatorPK]phase0.Slot) ( - *spectypes.CommitteeDuty, - map[phase0.Slot]spectypes.ValidatorPK, - map[spectypes.ValidatorPK]phase0.Slot, - error, -) { - validatorsToStop := make(map[phase0.Slot]spectypes.ValidatorPK) - indicesToRemove := make([]int, 0) - for i, beaconDuty := range duty.BeaconDuties { - validatorPK := spectypes.ValidatorPK(beaconDuty.PubKey) - slot, exists := slotMap[validatorPK] - if exists { - if slot < beaconDuty.Slot { - validatorsToStop[beaconDuty.Slot] = validatorPK - slotMap[validatorPK] = beaconDuty.Slot - } else { // else don't run duty with old slot - // remove the duty - logger.Debug("removing beacon duty from committeeduty", zap.Uint64("slot", uint64(beaconDuty.Slot)), zap.String("validator", hex.EncodeToString(beaconDuty.PubKey[:]))) - indicesToRemove = append(indicesToRemove, i) - } - } - } - - filteredDuties, err := removeIndices(duty.BeaconDuties, indicesToRemove) - if err != nil { - return nil, nil, nil, errors.Wrap(err, "cannot remove indices") - } - duty.BeaconDuties = filteredDuties - return duty, validatorsToStop, slotMap, err -} - // ProcessMessage processes Network Message of all types func (c *Committee) ProcessMessage(logger *zap.Logger, msg *queue.DecodedSSVMessage) error { // Validate message if msg.GetType() != message.SSVEventMsgType { if err := msg.SignedSSVMessage.Validate(); err != nil { - return errors.Wrap(err, "invalid signed message") + return errors.Wrap(err, "invalid SignedSSVMessage") } // Verify SignedSSVMessage's signature if err := c.SignatureVerifier.Verify(msg.SignedSSVMessage, c.Operator.Committee); err != nil { return errors.Wrap(err, "SignedSSVMessage has an invalid signature") } + + if err := c.validateMessage(msg.SignedSSVMessage.SSVMessage); err != nil { + return errors.Wrap(err, "Message invalid") + } } switch msg.GetType() { @@ -294,6 +258,64 @@ func (c *Committee) ProcessMessage(logger *zap.Logger, msg *queue.DecodedSSVMess } +func (c *Committee) Encode() ([]byte, error) { + return json.Marshal(c) +} + +func (c *Committee) Decode(data []byte) error { + return json.Unmarshal(data, &c) +} + +// GetRoot returns the state's deterministic root +func (c *Committee) GetRoot() ([32]byte, error) { + marshaledRoot, err := c.Encode() + if err != nil { + return [32]byte{}, errors.Wrap(err, "could not encode state") + } + ret := sha256.Sum256(marshaledRoot) + return ret, nil +} + +func (c *Committee) MarshalJSON() ([]byte, error) { + type CommitteeAlias struct { + Runners map[phase0.Slot]*runner.CommitteeRunner + CommitteeMember *spectypes.CommitteeMember + Share map[phase0.ValidatorIndex]*spectypes.Share + } + + // Create object and marshal + alias := &CommitteeAlias{ + Runners: c.Runners, + CommitteeMember: c.Operator, + Share: c.Shares, + } + + byts, err := json.Marshal(alias) + + return byts, err +} + +func (c *Committee) UnmarshalJSON(data []byte) error { + type CommitteeAlias struct { + Runners map[phase0.Slot]*runner.CommitteeRunner + Operator *spectypes.CommitteeMember + Shares map[phase0.ValidatorIndex]*spectypes.Share + } + + // Unmarshal the JSON data into the auxiliary struct + aux := &CommitteeAlias{} + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + // Assign fields + c.Runners = aux.Runners + c.Operator = aux.Operator + c.Shares = aux.Shares + + return nil +} + // updateAttestingSlotMap updates the highest attesting slot map from beacon duties func (c *Committee) updateAttestingSlotMap(duty *spectypes.CommitteeDuty) { for _, beaconDuty := range duty.BeaconDuties { @@ -308,3 +330,15 @@ func (c *Committee) updateAttestingSlotMap(duty *spectypes.CommitteeDuty) { } } } + +func (c *Committee) validateMessage(msg *spectypes.SSVMessage) error { + if !(c.Operator.CommitteeID.MessageIDBelongs(msg.GetID())) { + return errors.New("msg ID doesn't match committee ID") + } + + if len(msg.GetData()) == 0 { + return errors.New("msg data is invalid") + } + + return nil +} diff --git a/protocol/v2/ssv/validator/non_committee_validator.go b/protocol/v2/ssv/validator/non_committee_validator.go index 7c603e8682..60c101d2ca 100644 --- a/protocol/v2/ssv/validator/non_committee_validator.go +++ b/protocol/v2/ssv/validator/non_committee_validator.go @@ -2,14 +2,18 @@ package validator import ( "fmt" + "strconv" + "strings" + "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/herumi/bls-eth-go-binary/bls" specqbft "github.com/ssvlabs/ssv-spec/qbft" specssv "github.com/ssvlabs/ssv-spec/ssv" spectypes "github.com/ssvlabs/ssv-spec/types" - "github.com/ssvlabs/ssv/exporter/exporter_message" + "github.com/ssvlabs/ssv/exporter/convert" "github.com/ssvlabs/ssv/ibft/storage" "github.com/ssvlabs/ssv/logging/fields" + "github.com/ssvlabs/ssv/networkconfig" "github.com/ssvlabs/ssv/protocol/v2/qbft" qbftcontroller "github.com/ssvlabs/ssv/protocol/v2/qbft/controller" qbftctrl "github.com/ssvlabs/ssv/protocol/v2/qbft/controller" @@ -19,8 +23,6 @@ import ( registrystorage "github.com/ssvlabs/ssv/registry/storage" "go.uber.org/zap" "golang.org/x/exp/slices" - "strconv" - "strings" ) type CommitteeObserver struct { @@ -33,20 +35,21 @@ type CommitteeObserver struct { postConsensusContainer map[phase0.ValidatorIndex]*specssv.PartialSigContainer } -type NonCommitteeOptions struct { +type CommitteeObserverOptions struct { FullNode bool Logger *zap.Logger Network specqbft.Network Storage *storage.QBFTStores Operator *spectypes.CommitteeMember + NetworkConfig networkconfig.NetworkConfig NewDecidedHandler qbftctrl.NewDecidedHandler ValidatorStore registrystorage.ValidatorStore } -func NewNonCommitteeValidator(identifier exporter_message.MessageID, opts NonCommitteeOptions) *CommitteeObserver { +func NewCommitteeObserver(identifier convert.MessageID, opts CommitteeObserverOptions) *CommitteeObserver { // currently, only need domain & storage config := &qbft.Config{ - Domain: types.GetDefaultDomain(), + Domain: opts.NetworkConfig.Domain, Storage: opts.Storage.Get(identifier.GetRoleType()), Network: opts.Network, SignatureVerification: true, @@ -71,44 +74,40 @@ func NewNonCommitteeValidator(identifier exporter_message.MessageID, opts NonCom } } -func (ncv *CommitteeObserver) ProcessMessage(msg *queue.DecodedSSVMessage) { +func (ncv *CommitteeObserver) ProcessMessage(msg *queue.DecodedSSVMessage) error { cid := spectypes.CommitteeID(msg.GetID().GetDutyExecutorID()[16:]) logger := ncv.logger.With(fields.CommitteeID(cid), fields.Role(msg.MsgID.GetRoleType())) partialSigMessages := &spectypes.PartialSignatureMessages{} if err := partialSigMessages.Decode(msg.SSVMessage.GetData()); err != nil { - logger.Debug("❗ failed to get partial signature message from network message", zap.Error(err)) - return + return fmt.Errorf("failed to get partial signature message from network message %w", err) } if partialSigMessages.Type != spectypes.PostConsensusPartialSig { - return + return fmt.Errorf("not processing message type %s", partialSigMessages.Type) } slot := partialSigMessages.Slot logger = logger.With(fields.Slot(slot)) if err := partialSigMessages.Validate(); err != nil { - logger.Debug("❌ got invalid message", zap.Error(err)) + return fmt.Errorf("got invalid message %w", err) } quorums, err := ncv.processMessage(partialSigMessages) if err != nil { - logger.Debug("❌ could not process SignedPartialSignatureMessage", - zap.Error(err)) - return + return fmt.Errorf("could not process SignedPartialSignatureMessage %w", err) } if len(quorums) == 0 { - return + return nil } for key, quorum := range quorums { role := ncv.getRole(msg, key.Root) validator := ncv.ValidatorStore.ValidatorByIndex(key.ValidatorIndex) - MsgID := exporter_message.NewMsgID(types.GetDefaultDomain(), validator.ValidatorPubKey[:], role) + MsgID := convert.NewMsgID(ncv.qbftController.GetConfig().GetSignatureDomainType(), validator.ValidatorPubKey[:], role) if err := ncv.Storage.Get(MsgID.GetRoleType()).SaveParticipants(MsgID, slot, quorum); err != nil { - logger.Error("❌ could not save participants", zap.Error(err)) - return + return fmt.Errorf("could not save participants %w", err) } else { var operatorIDs []string for _, share := range quorum { @@ -129,17 +128,19 @@ func (ncv *CommitteeObserver) ProcessMessage(msg *queue.DecodedSSVMessage) { }) } } + + return nil } -func (ncv *CommitteeObserver) getRole(msg *queue.DecodedSSVMessage, root [32]byte) exporter_message.RunnerRole { +func (ncv *CommitteeObserver) getRole(msg *queue.DecodedSSVMessage, root [32]byte) convert.RunnerRole { if msg.MsgID.GetRoleType() == spectypes.RoleCommittee { _, found := ncv.Roots[root] if !found { - return exporter_message.RoleAttester + return convert.RoleAttester } - return exporter_message.RoleSyncCommittee + return convert.RoleSyncCommittee } - return exporter_message.RunnerRole(msg.MsgID.GetRoleType()) + return convert.RunnerRole(msg.MsgID.GetRoleType()) } // nonCommitteeInstanceContainerCapacity returns the capacity of InstanceContainer for non-committee validators @@ -163,9 +164,6 @@ func (ncv *CommitteeObserver) processMessage( for _, msg := range signedMsg.Messages { validator := ncv.ValidatorStore.ValidatorByIndex(msg.ValidatorIndex) - if validator == nil { - panic("fuck my life") - } container, ok := ncv.postConsensusContainer[msg.ValidatorIndex] if !ok { container = specssv.NewPartialSigContainer(validator.Quorum()) @@ -242,33 +240,22 @@ func (ncv *CommitteeObserver) verifyBeaconPartialSignature(signer uint64, signat return fmt.Errorf("unknown signer") } -func (ncv *CommitteeObserver) OnProposalMsg(msg *queue.DecodedSSVMessage) { +func (ncv *CommitteeObserver) OnProposalMsg(msg *queue.DecodedSSVMessage) error { mssg := &specqbft.Message{} if err := mssg.Decode(msg.SSVMessage.GetData()); err != nil { ncv.logger.Debug("❗ failed to get decode ssv message", zap.Error(err)) - return + return err } // decode consensus data beaconVote := &spectypes.BeaconVote{} if err := beaconVote.Decode(msg.SignedSSVMessage.FullData); err != nil { ncv.logger.Debug("❗ failed to get beacon vote data", zap.Error(err)) - return + return err } cid := spectypes.CommitteeID(msg.GetID().GetDutyExecutorID()[16:]) ncv.logger.Info("✅ Got proposal message", fields.CommitteeID(cid)) ncv.Roots[beaconVote.BlockRoot] = spectypes.BNRoleSyncCommittee + return nil } - -//func (ncv *NonCommitteeValidator) onPostProposalMsg(msg *queue.DecodedSSVMessage) { -// role := ncvroots_to_roles[msg.root] -// if role == 'attester': -// att = Attestation(msg) -// attesters[att.index] = att -// elif role == 'sync': -// sync = Sync(msg) -// syncs[sync.index] = sync -// else: -// raise Exception('Unknown role') -//} diff --git a/protocol/v2/ssv/validator/opts.go b/protocol/v2/ssv/validator/opts.go index 5975cdf053..ff1ca67df1 100644 --- a/protocol/v2/ssv/validator/opts.go +++ b/protocol/v2/ssv/validator/opts.go @@ -1,11 +1,12 @@ package validator import ( + specqbft "github.com/ssvlabs/ssv-spec/qbft" spectypes "github.com/ssvlabs/ssv-spec/types" - specqbft "github.com/ssvlabs/ssv-spec/qbft" "github.com/ssvlabs/ssv/ibft/storage" "github.com/ssvlabs/ssv/message/validation" + "github.com/ssvlabs/ssv/networkconfig" "github.com/ssvlabs/ssv/protocol/v2/blockchain/beacon" qbftctrl "github.com/ssvlabs/ssv/protocol/v2/qbft/controller" "github.com/ssvlabs/ssv/protocol/v2/ssv/runner" @@ -18,9 +19,9 @@ const ( // Options represents options that should be passed to a new instance of Validator. type Options struct { + NetworkConfig networkconfig.NetworkConfig Network specqbft.Network Beacon beacon.BeaconNode - BeaconNetwork beacon.BeaconNetwork Storage *storage.QBFTStores SSVShare *types.SSVShare Operator *spectypes.CommitteeMember diff --git a/protocol/v2/ssv/validator/startup.go b/protocol/v2/ssv/validator/startup.go index 1eedadd410..f5eb5aa7b7 100644 --- a/protocol/v2/ssv/validator/startup.go +++ b/protocol/v2/ssv/validator/startup.go @@ -6,11 +6,9 @@ import ( "github.com/pkg/errors" "github.com/ssvlabs/ssv-spec/p2p" spectypes "github.com/ssvlabs/ssv-spec/types" - "github.com/ssvlabs/ssv/logging" - "github.com/ssvlabs/ssv/protocol/v2/types" - "go.uber.org/zap" + "github.com/ssvlabs/ssv/logging" "github.com/ssvlabs/ssv/logging/fields" ) @@ -42,7 +40,7 @@ func (v *Validator) Start(logger *zap.Logger) (started bool, err error) { continue } - identifier := spectypes.NewMsgID(types.GetDefaultDomain(), share.ValidatorPubKey[:], role) + identifier := spectypes.NewMsgID(v.NetworkConfig.Domain, share.ValidatorPubKey[:], role) if ctrl := dutyRunner.GetBaseRunner().QBFTController; ctrl != nil { highestInstance, err := ctrl.LoadHighestInstance(identifier[:]) if err != nil { diff --git a/protocol/v2/ssv/validator/validator.go b/protocol/v2/ssv/validator/validator.go index cb23cc9094..2acff6bf4c 100644 --- a/protocol/v2/ssv/validator/validator.go +++ b/protocol/v2/ssv/validator/validator.go @@ -15,6 +15,7 @@ import ( "github.com/ssvlabs/ssv/ibft/storage" "github.com/ssvlabs/ssv/logging/fields" "github.com/ssvlabs/ssv/message/validation" + "github.com/ssvlabs/ssv/networkconfig" "github.com/ssvlabs/ssv/protocol/v2/message" "github.com/ssvlabs/ssv/protocol/v2/ssv/queue" "github.com/ssvlabs/ssv/protocol/v2/ssv/runner" @@ -29,8 +30,9 @@ type Validator struct { ctx context.Context cancel context.CancelFunc - DutyRunners runner.ValidatorDutyRunners - Network specqbft.Network + NetworkConfig networkconfig.NetworkConfig + DutyRunners runner.ValidatorDutyRunners + Network specqbft.Network Operator *spectypes.CommitteeMember Share *types.SSVShare @@ -61,6 +63,7 @@ func NewValidator(pctx context.Context, cancel func(), options Options) *Validat mtx: &sync.RWMutex{}, ctx: pctx, cancel: cancel, + NetworkConfig: options.NetworkConfig, DutyRunners: options.DutyRunners, Network: options.Network, Storage: options.Storage, @@ -126,7 +129,7 @@ func (v *Validator) ProcessMessage(logger *zap.Logger, msg *queue.DecodedSSVMess if msg.GetType() != message.SSVEventMsgType { // Validate message if err := msg.SignedSSVMessage.Validate(); err != nil { - return errors.Wrap(err, "invalid signed message") + return errors.Wrap(err, "invalid SignedSSVMessage") } // Verify SignedSSVMessage's signature diff --git a/protocol/v2/testing/test_utils.go b/protocol/v2/testing/test_utils.go index d5c1675486..0df2e3aa0c 100644 --- a/protocol/v2/testing/test_utils.go +++ b/protocol/v2/testing/test_utils.go @@ -1,8 +1,13 @@ package testing import ( + "archive/tar" + "compress/gzip" "crypto/rsa" + "encoding/json" "fmt" + "io" + "log" "os" "path" "path/filepath" @@ -101,6 +106,37 @@ func SignMsg(t *testing.T, sks []*rsa.PrivateKey, signers []spectypes.OperatorID return testingutils.MultiSignQBFTMsg(sks, signers, msg) } +func GetSSVMappingSpecTestJSON(path string, module string) ([]byte, error) { + p, err := GetSpecDir(path, module) + if err != nil { + return nil, errors.Wrap(err, "could not get spec test dir") + } + gzPath := filepath.Join(p, "spectest", "generate", "tests.json.gz") + untypedTests := map[string]interface{}{} + + file, err := os.Open(gzPath) + if err != nil { + return nil, errors.Wrap(err, "failed to open gzip file") + } + defer file.Close() + + gzipReader, err := gzip.NewReader(file) + if err != nil { + return nil, errors.Wrap(err, "failed to create gzip reader") + } + defer gzipReader.Close() + + decompressedData, err := io.ReadAll(gzipReader) + if err != nil { + return nil, errors.Wrap(err, "failed to read decompressed data") + } + + if err := json.Unmarshal(decompressedData, &untypedTests); err != nil { + return nil, errors.Wrap(err, "failed to unmarshal JSON") + } + return decompressedData, nil +} + func GetSpecTestJSON(path string, module string) ([]byte, error) { p, err := GetSpecDir(path, module) if err != nil { @@ -208,3 +244,57 @@ func getGoModFile(path string) (*modfile.File, error) { // parse go.mod return modfile.Parse("go.mod", buf, nil) } + +func ExtractTarGz(gzipStream io.Reader) { + uncompressedStream, err := gzip.NewReader(gzipStream) + if err != nil { + log.Fatal("ExtractTarGz: NewReader failed") + } + + tarReader := tar.NewReader(uncompressedStream) + + for true { + header, err := tarReader.Next() + + if err == io.EOF { + break + } + + if err != nil { + log.Fatalf("ExtractTarGz: Next() failed: %s", err.Error()) + } + + switch header.Typeflag { + case tar.TypeDir: + if err := os.Mkdir(header.Name, 0755); err != nil { + log.Fatalf("ExtractTarGz: Mkdir() failed: %s", err.Error()) + } + case tar.TypeReg: + outFile, err := os.Create(header.Name) + if err != nil { + log.Fatalf("ExtractTarGz: Create() failed: %s", err.Error()) + } + if _, err := io.Copy(outFile, tarReader); err != nil { + log.Fatalf("ExtractTarGz: Copy() failed: %s", err.Error()) + } + outFile.Close() + + default: + log.Fatalf( + "ExtractTarGz: uknown type: %s in %s", + header.Typeflag, + header.Name) + } + + } +} + +func unpackTestsJson(path string) error { + r, err := os.Open(fmt.Sprintf("%s.gz", path)) + if err != nil { + errors.Wrap(err, "could not open file") + } + ExtractTarGz(r) + + return nil +} diff --git a/protocol/v2/types/domain.go b/protocol/v2/types/domain.go deleted file mode 100644 index ae3af02bed..0000000000 --- a/protocol/v2/types/domain.go +++ /dev/null @@ -1,25 +0,0 @@ -package types - -import ( - spectypes "github.com/ssvlabs/ssv-spec/types" - - "github.com/ssvlabs/ssv/networkconfig" -) - -// TODO: get rid of singleton, pass domain as a parameter -var ( - domain = networkconfig.Mainnet.Domain -) - -// GetDefaultDomain returns the global domain used across the system -// DEPRECATED: use networkconfig.NetworkConfig.Domain instead -func GetDefaultDomain() spectypes.DomainType { - return domain -} - -// SetDefaultDomain updates the global domain used across the system -// allows injecting domain for testnets -// DEPRECATED: use networkconfig.NetworkConfig.Domain instead -func SetDefaultDomain(d spectypes.DomainType) { - domain = d -} diff --git a/protocol/v2/types/ssvshare.go b/protocol/v2/types/ssvshare.go index dec5d914d8..16abb6c238 100644 --- a/protocol/v2/types/ssvshare.go +++ b/protocol/v2/types/ssvshare.go @@ -4,6 +4,7 @@ import ( "crypto/sha256" "encoding/binary" "sort" + "time" "golang.org/x/exp/slices" @@ -101,8 +102,8 @@ func ComputeClusterIDHash(address common.Address, operatorIds []uint64) []byte { } func ComputeQuorumAndPartialQuorum(committeeSize int) (quorum uint64, partialQuorum uint64) { - f := (committeeSize - 1) / 3 - return uint64(f*2 + 1), uint64(f + 1) + f := ComputeF(committeeSize) + return f*2 + 1, f + 1 } func ComputeF(committeeSize int) uint64 { @@ -110,7 +111,7 @@ func ComputeF(committeeSize int) uint64 { } func ValidCommitteeSize(committeeSize int) bool { - f := (committeeSize - 1) / 3 + f := ComputeF(committeeSize) return (committeeSize-1)%3 == 0 && f >= 1 && f <= 4 } @@ -119,6 +120,17 @@ type Metadata struct { BeaconMetadata *beaconprotocol.ValidatorMetadata OwnerAddress common.Address Liquidated bool + + // lastUpdated is an internal field that can be used to track the last time the metadata was updated. + lastUpdated time.Time +} + +func (m *Metadata) MetadataLastUpdated() time.Time { + return m.lastUpdated +} + +func (m *Metadata) SetMetadataLastUpdated(t time.Time) { + m.lastUpdated = t } // Return a 32 bytes ID for the committee of operators diff --git a/registry/storage/shares.go b/registry/storage/shares.go index 628bbbc1d2..d8bcd923f2 100644 --- a/registry/storage/shares.go +++ b/registry/storage/shares.go @@ -6,6 +6,7 @@ import ( "encoding/hex" "fmt" "sync" + "time" "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/ssvlabs/ssv-spec/types" @@ -33,6 +34,10 @@ type Shares interface { // List returns a list of shares, filtered by the given filters (if any). List(txn basedb.Reader, filters ...SharesFilter) []*types.SSVShare + // Range calls the given function over each share. + // If the function returns false, the iteration stops. + Range(txn basedb.Reader, fn func(*types.SSVShare) bool) + // Save saves the given shares. Save(txn basedb.ReadWriter, shares ...*types.SSVShare) error @@ -201,6 +206,17 @@ Shares: return shares } +func (s *sharesStorage) Range(_ basedb.Reader, fn func(*types.SSVShare) bool) { + s.mu.RLock() + defer s.mu.RUnlock() + + for _, share := range s.shares { + if !fn(share) { + break + } + } +} + func (s *sharesStorage) Save(rw basedb.ReadWriter, shares ...*types.SSVShare) error { if len(shares) == 0 { return nil @@ -324,11 +340,16 @@ func (s *sharesStorage) Delete(rw basedb.ReadWriter, pubKey []byte) error { // UpdateValidatorMetadata updates the metadata of the given validator func (s *sharesStorage) UpdateValidatorMetadata(pk spectypes.ValidatorPK, metadata *beaconprotocol.ValidatorMetadata) error { + if metadata == nil { + return nil + } + share := s.Get(nil, pk[:]) if share == nil { return nil } + share.SetMetadataLastUpdated(time.Now()) share.BeaconMetadata = metadata share.Share.ValidatorIndex = metadata.Index diff --git a/registry/storage/validatorstore.go b/registry/storage/validatorstore.go index 22c75cdb47..cf5cb65f00 100644 --- a/registry/storage/validatorstore.go +++ b/registry/storage/validatorstore.go @@ -45,6 +45,7 @@ type Committee struct { ID spectypes.CommitteeID Operators []spectypes.OperatorID Validators []*types.SSVShare + Indices []phase0.ValidatorIndex } // IsParticipating returns whether any validator in the committee should participate in the given epoch. @@ -261,15 +262,18 @@ func (c *validatorStore) handleShareRemoved(pk spectypes.ValidatorPK) { return } validators := make([]*types.SSVShare, 0, len(committee.Validators)-1) + indices := make([]phase0.ValidatorIndex, 0, len(committee.Validators)-1) for _, validator := range committee.Validators { if validator.ValidatorPubKey != pk { validators = append(validators, validator) + indices = append(indices, validator.ValidatorIndex) } } if len(validators) == 0 { delete(c.byCommitteeID, committee.ID) } else { committee.Validators = validators + committee.Indices = indices } // Update byOperatorID @@ -309,6 +313,13 @@ func (c *validatorStore) handleShareUpdated(share *types.SSVShare) { break } } + + for i, index := range committee.Indices { + if index == share.ValidatorIndex { + committee.Indices[i] = share.ValidatorIndex + break + } + } } // Update byOperatorID @@ -336,6 +347,7 @@ func buildCommittee(shares []*types.SSVShare) *Committee { ID: shares[0].CommitteeID(), Operators: make([]spectypes.OperatorID, 0, len(shares)), Validators: shares, + Indices: make([]phase0.ValidatorIndex, 0, len(shares)), } seenOperators := make(map[spectypes.OperatorID]struct{}) @@ -344,6 +356,7 @@ func buildCommittee(shares []*types.SSVShare) *Committee { for _, shareMember := range share.Committee { seenOperators[shareMember.Signer] = struct{}{} } + committee.Indices = append(committee.Indices, share.ValidatorIndex) } committee.Operators = maps.Keys(seenOperators)