From 6a181e3651a2880f4de082001e57f9ee3447b716 Mon Sep 17 00:00:00 2001 From: moshe-blox <89339422+moshe-blox@users.noreply.github.com> Date: Wed, 11 Sep 2024 16:00:26 +0300 Subject: [PATCH 01/35] fix: (differ) incorrect diffs (#1719) * fix: (differ) incorrect diffs * fix: funcs without param names * revert differ scope --- scripts/differ/diff.go | 97 +++++++++++++++++++++++++ scripts/differ/go.mod | 3 +- scripts/differ/go.sum | 6 +- scripts/differ/main.go | 14 ++-- scripts/differ/parser.go | 3 + scripts/differ/ui/package-lock.json | 105 +++++++++++++++------------- scripts/differ/ui/package.json | 5 +- scripts/differ/ui/pages/index.tsx | 103 +++++++++++++++------------ 8 files changed, 226 insertions(+), 110 deletions(-) create mode 100644 scripts/differ/diff.go diff --git a/scripts/differ/diff.go b/scripts/differ/diff.go new file mode 100644 index 0000000000..26c0af0a8e --- /dev/null +++ b/scripts/differ/diff.go @@ -0,0 +1,97 @@ +package main + +import ( + "bytes" + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" +) + +// Diff returns the output of git diff between two given sources, excluding the diff header. +func Diff(leftName, rightName string, left, right []byte, contextLines int) ([]byte, error) { + expectChanges := !bytes.Equal(left, right) + + // Save the sources to temporary files. + dir, err := os.MkdirTemp("", "differ-git-diff") + if err != nil { + return nil, fmt.Errorf("failed to create temp dir: %w", err) + } + defer func() { + if err := os.RemoveAll(dir); err != nil { + fmt.Printf("failed to remove temp dir: %v", err) + } + }() + + leftPath := filepath.Join(dir, "left") + if err := os.WriteFile(leftPath, left, 0600); err != nil { + return nil, fmt.Errorf("failed to write left file: %w", err) + } + rightPath := filepath.Join(dir, "right") + if err := os.WriteFile(rightPath, right, 0600); err != nil { + return nil, fmt.Errorf("failed to write right file: %w", err) + } + + // Run git diff. + hasChanges := false + diff, err := exec.Command("git", "diff", "--no-index", fmt.Sprintf("-U%d", contextLines), leftPath, rightPath).CombinedOutput() // #nosec G204 + if err != nil { + var exitErr *exec.ExitError + if errors.As(err, &exitErr) { + hasChanges = exitErr.ExitCode() == 1 + } else { + return nil, fmt.Errorf("failed to execute git diff (%w): %s", err, string(diff)) + } + } + if expectChanges { + if !hasChanges { + return nil, fmt.Errorf("git diff claims no changes, but sources are different") + } + if len(diff) == 0 { + return nil, fmt.Errorf("git diff returned empty diff despite claiming changes") + } + } else { + if hasChanges { + return nil, fmt.Errorf("git diff claims changes, but sources are equal") + } + if len(diff) != 0 { + return nil, fmt.Errorf("git diff returned non-empty diff despite claiming no changes") + } + return nil, nil + } + + // Verify header. + const header1 = "diff --git " + const header2 = "index " + const header3 = "--- " + const header4 = "+++ " + + parts := bytes.SplitN(diff, []byte("\n"), 5) + if len(parts) != 5 { + return nil, fmt.Errorf("unexpected number of lines in git diff output: %d", len(parts)) + } + if !bytes.HasPrefix(parts[0], []byte(header1)) { + return nil, fmt.Errorf("unexpected header line 1 in git diff output: %q", parts[1]) + } + if !bytes.HasPrefix(parts[1], []byte(header2)) { + return nil, fmt.Errorf("unexpected header line 2 in git diff output: %q", parts[2]) + } + if !bytes.HasPrefix(parts[2], []byte(header3)) { + return nil, fmt.Errorf("unexpected header line 3 in git diff output: %q", parts[2]) + } + if !bytes.HasPrefix(parts[3], []byte(header4)) { + return nil, fmt.Errorf("unexpected header line 4 in git diff output: %q", parts[2]) + } + + // Reconstruct the diff without the header. + b := bytes.NewBuffer(nil) + fmt.Fprintf(b, "--- %s\n+++ %s\n", leftName, rightName) + b.Write(parts[4]) + diff = b.Bytes() + if len(diff) == 0 { + return nil, fmt.Errorf("empty diff") + } + + return diff, nil +} diff --git a/scripts/differ/go.mod b/scripts/differ/go.mod index 141e2a4aee..5117dcb831 100644 --- a/scripts/differ/go.mod +++ b/scripts/differ/go.mod @@ -5,10 +5,9 @@ go 1.19 require ( github.com/alecthomas/kong v0.7.1 github.com/aquasecurity/table v1.8.0 - github.com/aymanbagabas/go-udiff v0.1.2 github.com/cespare/xxhash/v2 v2.2.0 github.com/pkg/errors v0.9.1 - github.com/stretchr/testify v1.7.1 + github.com/stretchr/testify v1.8.0 golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 gopkg.in/yaml.v3 v3.0.1 ) diff --git a/scripts/differ/go.sum b/scripts/differ/go.sum index 997e40da0d..3a3d8d958e 100644 --- a/scripts/differ/go.sum +++ b/scripts/differ/go.sum @@ -4,8 +4,6 @@ github.com/alecthomas/kong v0.7.1/go.mod h1:n1iCIO2xS46oE8ZfYCNDqdR0b0wZNrXAIAqr github.com/alecthomas/repr v0.1.0 h1:ENn2e1+J3k09gyj2shc0dHr/yjaWSHRlrJ4DPMevDqE= github.com/aquasecurity/table v1.8.0 h1:9ntpSwrUfjrM6/YviArlx/ZBGd6ix8W+MtojQcM7tv0= github.com/aquasecurity/table v1.8.0/go.mod h1:eqOmvjjB7AhXFgFqpJUEE/ietg7RrMSJZXyTN8E/wZw= -github.com/aymanbagabas/go-udiff v0.1.2 h1:+GXvyNIKR4HdzSfJagEXgmK/rWjFoaOAQ1UwgeYP52c= -github.com/aymanbagabas/go-udiff v0.1.2/go.mod h1:RE4Ex0qsGkTAJoQdQQCA0uG+nAzJO/pI/QwceO5fgrA= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -21,8 +19,10 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U= diff --git a/scripts/differ/main.go b/scripts/differ/main.go index 2626fbd1fe..34ec82238d 100644 --- a/scripts/differ/main.go +++ b/scripts/differ/main.go @@ -11,8 +11,6 @@ import ( "github.com/alecthomas/kong" "github.com/aquasecurity/table" - "github.com/aymanbagabas/go-udiff" - "github.com/aymanbagabas/go-udiff/myers" "github.com/cespare/xxhash/v2" "github.com/pkg/errors" "golang.org/x/exp/maps" @@ -135,16 +133,14 @@ func run() (changes int, err error) { if !approved { leftName := fmt.Sprintf("a/%s@%s", left.Path, left.Name) rightName := fmt.Sprintf("b/%s@%s", right.Path, right.Name) - edits := myers.ComputeEdits(left.Code, right.Code) - udiff.SortEdits(edits) - if len(edits) == 0 { + diff, err := Diff(leftName, rightName, []byte(left.Code), []byte(right.Code), 100) + if err != nil { + return 0, errors.Wrap(err, "failed to generate diff") + } + if len(diff) == 0 { // No changes. approved = true } else { - diff, err := udiff.ToUnifiedDiff(leftName, rightName, left.Code, edits) - if err != nil { - return 0, errors.Wrap(err, "failed to generate unified diff") - } fmt.Fprintf(diffFile, "diff --git %s %s\nindex %s..2222222\n%s", leftName, rightName, diffID, diff) } diff --git a/scripts/differ/parser.go b/scripts/differ/parser.go index fd77418dcc..9c1d9ed379 100644 --- a/scripts/differ/parser.go +++ b/scripts/differ/parser.go @@ -143,6 +143,9 @@ func (p *Parser) transformFuncDecl(funcDecl *ast.FuncDecl) { if funcDecl.Type.Params != nil { newList := []*ast.Field{} for _, field := range funcDecl.Type.Params.List { + if len(field.Names) == 0 { + continue + } if !p.containsIdent(field.Names[0].Name) { newList = append(newList, field) } diff --git a/scripts/differ/ui/package-lock.json b/scripts/differ/ui/package-lock.json index 431404e6cc..b725394cc3 100644 --- a/scripts/differ/ui/package-lock.json +++ b/scripts/differ/ui/package-lock.json @@ -1,5 +1,5 @@ { - "name": "diff-viewer", + "name": "ui", "lockfileVersion": 2, "requires": true, "packages": { @@ -8,10 +8,11 @@ "next": "latest", "prism-themes": "^1.9.0", "react": "^18.2.0", - "react-diff-view": "^3.0.3", + "react-diff-view": "^3.2.1", "react-dom": "^18.2.0", "refractor": "^3.6.0", - "styled-components": "^5.3.10" + "styled-components": "^5.3.10", + "unidiff": "^1.0.4" }, "devDependencies": { "@types/node": "^12.12.21", @@ -922,6 +923,14 @@ "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==", "dev": true }, + "node_modules/diff": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/diff/-/diff-5.2.0.tgz", + "integrity": "sha512-uIFDxqpRZGZ6ThOk84hEfqWoHx2devRFvpTZcTHur85vImfaxUbTW9Ryh4CpCuDnToOP1CEtXKIgytHBPVff5A==", + "engines": { + "node": ">=0.3.1" + } + }, "node_modules/diff-match-patch": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/diff-match-patch/-/diff-match-patch-1.0.5.tgz", @@ -1044,6 +1053,11 @@ "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", "dev": true }, + "node_modules/gitdiff-parser": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/gitdiff-parser/-/gitdiff-parser-0.3.1.tgz", + "integrity": "sha512-YQJnY8aew65id8okGxKCksH3efDCJ9HzV7M9rsvd65habf39Pkh4cgYJ27AaoDMqo1X98pgNJhNMrm/kpV7UVQ==" + }, "node_modules/glob": { "version": "7.1.6", "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", @@ -1479,6 +1493,7 @@ "version": "4.1.1", "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "dev": true, "engines": { "node": ">=0.10.0" } @@ -1710,23 +1725,6 @@ "node": ">=6" } }, - "node_modules/prop-types": { - "version": "15.8.1", - "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", - "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", - "peer": true, - "dependencies": { - "loose-envify": "^1.4.0", - "object-assign": "^4.1.1", - "react-is": "^16.13.1" - } - }, - "node_modules/prop-types/node_modules/react-is": { - "version": "16.13.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", - "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", - "peer": true - }, "node_modules/property-information": { "version": "5.6.0", "resolved": "https://registry.npmjs.org/property-information/-/property-information-5.6.0.tgz", @@ -1771,18 +1769,19 @@ } }, "node_modules/react-diff-view": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/react-diff-view/-/react-diff-view-3.0.3.tgz", - "integrity": "sha512-orETYmQbptfMbOnbkSHH61Ew5RBTYWAO2M1MDx2ZvsEDHPygn6U8mWh7kUCm/z40YzVKmd3+8hpH872Y0uioIg==", + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/react-diff-view/-/react-diff-view-3.2.1.tgz", + "integrity": "sha512-JoDahgiyeReeH9W9lrI3Z4c4esbd/HNAOdThj6Pce/ZAukFBmXSbZ4Qv8ayo7yow+fTpRNfqtQ9gX5nArEi08w==", "dependencies": { "classnames": "^2.3.2", "diff-match-patch": "^1.0.5", + "gitdiff-parser": "^0.3.1", + "lodash": "^4.17.21", "shallow-equal": "^3.1.0", "warning": "^4.0.3" }, "peerDependencies": { - "prop-types": ">=15.6", - "react": ">=16.8" + "react": ">=16.14.0" } }, "node_modules/react-dom": { @@ -2130,6 +2129,14 @@ "node": ">=4.2.0" } }, + "node_modules/unidiff": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/unidiff/-/unidiff-1.0.4.tgz", + "integrity": "sha512-ynU0vsAXw0ir8roa+xPCUHmnJ5goc5BTM2Kuc3IJd8UwgaeRs7VSD5+eeaQL+xp1JtB92hu/Zy/Lgy7RZcr1pQ==", + "dependencies": { + "diff": "^5.1.0" + } + }, "node_modules/update-browserslist-db": { "version": "1.0.11", "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz", @@ -2830,6 +2837,11 @@ "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==", "dev": true }, + "diff": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/diff/-/diff-5.2.0.tgz", + "integrity": "sha512-uIFDxqpRZGZ6ThOk84hEfqWoHx2devRFvpTZcTHur85vImfaxUbTW9Ryh4CpCuDnToOP1CEtXKIgytHBPVff5A==" + }, "diff-match-patch": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/diff-match-patch/-/diff-match-patch-1.0.5.tgz", @@ -2925,6 +2937,11 @@ "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", "dev": true }, + "gitdiff-parser": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/gitdiff-parser/-/gitdiff-parser-0.3.1.tgz", + "integrity": "sha512-YQJnY8aew65id8okGxKCksH3efDCJ9HzV7M9rsvd65habf39Pkh4cgYJ27AaoDMqo1X98pgNJhNMrm/kpV7UVQ==" + }, "glob": { "version": "7.1.6", "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", @@ -3221,7 +3238,8 @@ "object-assign": { "version": "4.1.1", "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==" + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "dev": true }, "object-hash": { "version": "3.0.0", @@ -3360,25 +3378,6 @@ "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.27.0.tgz", "integrity": "sha512-t13BGPUlFDR7wRB5kQDG4jjl7XeuH6jbJGt11JHPL96qwsEHNX2+68tFXqc1/k+/jALsbSWJKUOT/hcYAZ5LkA==" }, - "prop-types": { - "version": "15.8.1", - "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", - "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", - "peer": true, - "requires": { - "loose-envify": "^1.4.0", - "object-assign": "^4.1.1", - "react-is": "^16.13.1" - }, - "dependencies": { - "react-is": { - "version": "16.13.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", - "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", - "peer": true - } - } - }, "property-information": { "version": "5.6.0", "resolved": "https://registry.npmjs.org/property-information/-/property-information-5.6.0.tgz", @@ -3402,12 +3401,14 @@ } }, "react-diff-view": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/react-diff-view/-/react-diff-view-3.0.3.tgz", - "integrity": "sha512-orETYmQbptfMbOnbkSHH61Ew5RBTYWAO2M1MDx2ZvsEDHPygn6U8mWh7kUCm/z40YzVKmd3+8hpH872Y0uioIg==", + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/react-diff-view/-/react-diff-view-3.2.1.tgz", + "integrity": "sha512-JoDahgiyeReeH9W9lrI3Z4c4esbd/HNAOdThj6Pce/ZAukFBmXSbZ4Qv8ayo7yow+fTpRNfqtQ9gX5nArEi08w==", "requires": { "classnames": "^2.3.2", "diff-match-patch": "^1.0.5", + "gitdiff-parser": "^0.3.1", + "lodash": "^4.17.21", "shallow-equal": "^3.1.0", "warning": "^4.0.3" } @@ -3648,6 +3649,14 @@ "integrity": "sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g==", "dev": true }, + "unidiff": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/unidiff/-/unidiff-1.0.4.tgz", + "integrity": "sha512-ynU0vsAXw0ir8roa+xPCUHmnJ5goc5BTM2Kuc3IJd8UwgaeRs7VSD5+eeaQL+xp1JtB92hu/Zy/Lgy7RZcr1pQ==", + "requires": { + "diff": "^5.1.0" + } + }, "update-browserslist-db": { "version": "1.0.11", "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz", diff --git a/scripts/differ/ui/package.json b/scripts/differ/ui/package.json index e88024a35f..2dcf37ffed 100644 --- a/scripts/differ/ui/package.json +++ b/scripts/differ/ui/package.json @@ -10,10 +10,11 @@ "next": "latest", "prism-themes": "^1.9.0", "react": "^18.2.0", - "react-diff-view": "^3.0.3", + "react-diff-view": "^3.2.1", "react-dom": "^18.2.0", "refractor": "^3.6.0", - "styled-components": "^5.3.10" + "styled-components": "^5.3.10", + "unidiff": "^1.0.4" }, "devDependencies": { "@types/node": "^12.12.21", diff --git a/scripts/differ/ui/pages/index.tsx b/scripts/differ/ui/pages/index.tsx index 1617c89263..e2cc402d78 100644 --- a/scripts/differ/ui/pages/index.tsx +++ b/scripts/differ/ui/pages/index.tsx @@ -34,7 +34,7 @@ const Home: React.FC = () => { ); } - const files = parseDiff(diffString); + const files = parseDiff(diffString, { nearbySequences: "zip" }); const renderFile = ({ oldPath, @@ -57,54 +57,63 @@ const Home: React.FC = () => { const tokens = tokenize(hunks, options); return ( -
-
-
- +
+
+
+ + + + {oldPath.substring(oldPath.indexOf("@") + 1)} +
+
- -
-
-
- {oldPath.substring(0, oldPath.indexOf("@"))} -
-
- {newPath.substring(0, newPath.indexOf("@"))} +
+ {oldPath.substring(0, oldPath.indexOf("@"))} +
+
+ {newPath.substring(0, newPath.indexOf("@"))} +
{!isApproved && ( -
+
{ }; return ( -
-
+
+
{
-
{files.map(renderFile)}
+
+
{files.map(renderFile)}
+
); }; From 67ff75dffa0eefd1f58f08491e45f38ccd4201d1 Mon Sep 17 00:00:00 2001 From: Anton Korpusenko Date: Wed, 11 Sep 2024 18:08:30 +0300 Subject: [PATCH 02/35] chore: removed redundant slashableAttestationCheck (#1727) --- beacon/goclient/attest.go | 29 ----------------------------- beacon/goclient/attest_protect.go | 13 ------------- 2 files changed, 42 deletions(-) delete mode 100644 beacon/goclient/attest_protect.go diff --git a/beacon/goclient/attest.go b/beacon/goclient/attest.go index 2f9a454e01..7b8dab1dc0 100644 --- a/beacon/goclient/attest.go +++ b/beacon/goclient/attest.go @@ -9,8 +9,6 @@ import ( eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" "github.com/attestantio/go-eth2-client/spec" "github.com/attestantio/go-eth2-client/spec/phase0" - "github.com/pkg/errors" - spectypes "github.com/ssvlabs/ssv-spec/types" ) // AttesterDuties returns attester duties for a given epoch. @@ -49,32 +47,5 @@ func (gc *GoClient) GetAttestationData(slot phase0.Slot, committeeIndex phase0.C // SubmitAttestations implements Beacon interface func (gc *GoClient) SubmitAttestations(attestations []*phase0.Attestation) error { - - // TODO: better way to return error and not stop sending other attestations - for _, attestation := range attestations { - signingRoot, err := gc.getSigningRoot(attestation.Data) - if err != nil { - return errors.Wrap(err, "failed to get signing root") - } - - if err := gc.slashableAttestationCheck(gc.ctx, signingRoot); err != nil { - return errors.Wrap(err, "failed attestation slashing protection check") - } - } - return gc.client.SubmitAttestations(gc.ctx, attestations) } - -// getSigningRoot returns signing root -func (gc *GoClient) getSigningRoot(data *phase0.AttestationData) ([32]byte, error) { - epoch := gc.network.EstimatedEpochAtSlot(data.Slot) - domain, err := gc.DomainData(epoch, spectypes.DomainAttester) - if err != nil { - return [32]byte{}, err - } - root, err := gc.ComputeSigningRoot(data, domain) - if err != nil { - return [32]byte{}, err - } - return root, nil -} diff --git a/beacon/goclient/attest_protect.go b/beacon/goclient/attest_protect.go deleted file mode 100644 index 847f9469d9..0000000000 --- a/beacon/goclient/attest_protect.go +++ /dev/null @@ -1,13 +0,0 @@ -package goclient - -import ( - "context" -) - -// slashableAttestationCheck checks if an attestation is slashable by comparing it with the attesting -// history for the given public key in our DB. If it is not, we then update the history -// with new values and save it to the database. -func (gc *GoClient) slashableAttestationCheck(ctx context.Context, signingRoot [32]byte) error { - // TODO: Implement - return nil -} From e6e75b70f13b5247fcacb5e6c6c53132b70fea1a Mon Sep 17 00:00:00 2001 From: Anton Korpusenko Date: Wed, 11 Sep 2024 18:57:34 +0300 Subject: [PATCH 03/35] chore: removed attester and sync committee runners from post fork code (#1731) --- protocol/v2/ssv/runner/attester.go | 297 ----------------------- protocol/v2/ssv/runner/sync_committee.go | 279 --------------------- 2 files changed, 576 deletions(-) delete mode 100644 protocol/v2/ssv/runner/attester.go delete mode 100644 protocol/v2/ssv/runner/sync_committee.go diff --git a/protocol/v2/ssv/runner/attester.go b/protocol/v2/ssv/runner/attester.go deleted file mode 100644 index 0a984875ed..0000000000 --- a/protocol/v2/ssv/runner/attester.go +++ /dev/null @@ -1,297 +0,0 @@ -package runner - -// -//import ( -// "encoding/hex" -// "github.com/attestantio/go-eth2-client/spec/phase0" -// "github.com/prysmaticlabs/go-bitfield" -// specssv "github.com/ssvlabs/ssv-spec/ssv" -// spectypes "github.com/ssvlabs/ssv-spec/types" -// "github.com/ssvlabs/ssv/logging/fields" -// "github.com/ssvlabs/ssv/protocol/v2/qbft/controller" -// "github.com/ssvlabs/ssv/protocol/v2/ssv/runner/metrics" -// "go.uber.org/zap" -// "time" -//) -// -//type AttesterRunner struct { -// BaseRunner *BaseRunner -// -// beacon specssv.BeaconNode -// network specqbft.Network -// signer spectypes.BeaconSigner -// operatorSigner ssvtypes.OperatorSigner -// valCheck specqbft.ProposedValueCheckF -// -// started time.Time -// metrics metrics.ConsensusMetrics -//} -// -//func NewAttesterRunner( -// beaconNetwork spectypes.BeaconNetwork, -// share *spectypes.Share, -// qbftController *controller.Controller, -// beacon specssv.BeaconNode, -// network specqbft.Network, -// signer spectypes.BeaconSigner, -// operatorSigner ssvtypes.OperatorSigner, -// valCheck specqbft.ProposedValueCheckF, -// highestDecidedSlot phase0.Slot, -//) Runner { -// return &AttesterRunner{ -// BaseRunner: &BaseRunner{ -// RunnerRoleType: spectypes.BNRoleAttester, -// BeaconNetwork: beaconNetwork, -// Share: share, -// QBFTController: qbftController, -// highestDecidedSlot: highestDecidedSlot, -// }, -// -// beacon: beacon, -// network: network, -// signer: signer, -// operatorSigner: operatorSigner, -// valCheck: valCheck, -// -// metrics: metrics.NewConsensusMetrics(spectypes.RoleAttester), -// } -//} -// -//func (r *AttesterRunner) StartNewDuty(logger *zap.Logger, duty *spectypes.Duty) error { -// return r.BaseRunner.baseStartNewDuty(logger, r, duty) -//} -// -//// HasRunningDuty returns true if a duty is already running (StartNewDuty called and returned nil) -//func (r *AttesterRunner) HasRunningDuty() bool { -// return r.BaseRunner.hasRunningDuty() -//} -// -//func (r *AttesterRunner) ProcessPreConsensus(logger *zap.Logger, signedMsg *spectypes.SignedPartialSignatureMessage) error { -// return errors.New("no pre consensus sigs required for attester role") -//} -// -//func (r *AttesterRunner) ProcessConsensus(logger *zap.Logger, signedMsg *spectypes.SignedSSVMessage) error { -// decided, decidedValue, err := r.BaseRunner.baseConsensusMsgProcessing(logger, r, signedMsg) -// if err != nil { -// return errors.Wrap(err, "failed processing consensus message") -// } -// -// // Decided returns true only once so if it is true it must be for the current running instance -// if !decided { -// return nil -// } -// -// r.metrics.EndConsensus() -// r.metrics.StartPostConsensus() -// -// attestationData, err := decidedValue.GetAttestationData() -// if err != nil { -// return errors.Wrap(err, "could not get attestation data") -// } -// -// // specific duty sig -// msg, err := r.BaseRunner.signBeaconObject(r, attestationData, decidedValue.Duty.Slot, spectypes.DomainAttester) -// if err != nil { -// return errors.Wrap(err, "failed signing attestation data") -// } -// postConsensusMsg := &spectypes.PartialSignatureMessages{ -// Type: spectypes.PostConsensusPartialSig, -// Slot: decidedValue.Duty.Slot, -// Messages: []*spectypes.PartialSignatureMessage{msg}, -// } -// -// postSignedMsg, err := r.BaseRunner.signPostConsensusMsg(r, postConsensusMsg) -// if err != nil { -// return errors.Wrap(err, "could not sign post consensus msg") -// } -// -// data, err := postSignedMsg.Encode() -// if err != nil { -// return errors.Wrap(err, "failed to encode post consensus signature msg") -// } -// -// ssvMsg := &spectypes.SSVMessage{ -// MsgType: spectypes.SSVPartialSignatureMsgType, -// MsgID: spectypes.NewMsgID(r.BaseRunner.DomainTypeProvider.DomainType(), r.GetShare().ValidatorPubKey, r.BaseRunner.BeaconRoleType), -// Data: data, -// } -// -// msgToBroadcast, err := spectypes.SSVMessageToSignedSSVMessage(ssvMsg, r.BaseRunner.Share.OperatorID, r.operatorSigner.SignSSVMessage) -// if err != nil { -// return errors.Wrap(err, "could not create SignedSSVMessage from SSVMessage") -// } -// -// if err := r.GetNetwork().Broadcast(msgID, ssvMsg.GetID(), msgToBroadcast); err != nil { -// return errors.Wrap(err, "can't broadcast partial post consensus sig") -// } -// return nil -//} -// -//func (r *AttesterRunner) ProcessPostConsensus(logger *zap.Logger, signedMsg *spectypes.SignedPartialSignatureMessage) error { -// quorum, roots, err := r.BaseRunner.basePostConsensusMsgProcessing(logger, r, signedMsg) -// if err != nil { -// return errors.Wrap(err, "failed processing post consensus message") -// } -// -// duty := r.GetState().DecidedValue.Duty -// logger = logger.With(fields.Slot(duty.Slot)) -// logger.Debug("🧩 got partial signatures", -// zap.Uint64("signer", signedMsg.Signer)) -// -// if !quorum { -// return nil -// } -// -// r.metrics.EndPostConsensus() -// -// attestationData, err := r.GetState().DecidedValue.GetAttestationData() -// if err != nil { -// return errors.Wrap(err, "could not get attestation data") -// } -// -// for _, root := range roots { -// sig, err := r.GetState().ReconstructBeaconSig(r.GetState().PostConsensusContainer, root, r.GetShare().ValidatorPubKey) -// if err != nil { -// // If the reconstructed signature verification failed, fall back to verifying each partial signature -// for _, root := range roots { -// r.BaseRunner.FallBackAndVerifyEachSignature(r.GetState().PostConsensusContainer, root) -// } -// return errors.Wrap(err, "got post-consensus quorum but it has invalid signatures") -// } -// specSig := phase0.BLSSignature{} -// copy(specSig[:], sig) -// -// logger.Debug("🧩 reconstructed partial signatures", -// zap.Uint64s("signers", getPostConsensusSigners(r.GetState(), root))) -// -// aggregationBitfield := bitfield.NewBitlist(r.GetState().DecidedValue.Duty.CommitteeLength) -// aggregationBitfield.SetBitAt(duty.ValidatorCommitteeIndex, true) -// signedAtt := &phase0.Attestation{ -// Data: attestationData, -// Signature: specSig, -// AggregationBits: aggregationBitfield, -// } -// -// attestationSubmissionEnd := r.metrics.StartBeaconSubmission() -// consensusDuration := time.Since(r.started) -// -// // Submit it to the BN. -// start := time.Now() -// if err := r.beacon.SubmitAttestation(signedAtt); err != nil { -// r.metrics.RoleSubmissionFailed() -// logger.Error("❌ failed to submit attestation", zap.Error(err)) -// return errors.Wrap(err, "could not submit to Beacon chain reconstructed attestation") -// } -// -// attestationSubmissionEnd() -// r.metrics.EndDutyFullFlow(r.GetState().RunningInstance.State.Round) -// r.metrics.RoleSubmitted() -// -// logger.Info("✅ successfully submitted attestation", -// zap.String("block_root", hex.EncodeToString(signedAtt.Data.BeaconBlockRoot[:])), -// fields.ConsensusTime(consensusDuration), -// fields.SubmissionTime(time.Since(start)), -// fields.Height(r.BaseRunner.QBFTController.Height), -// fields.Round(r.GetState().RunningInstance.State.Round)) -// } -// r.GetState().Finished = true -// -// return nil -//} -// -//func (r *AttesterRunner) expectedPreConsensusRootsAndDomain() ([]ssz.HashRoot, phase0.DomainType, error) { -// return []ssz.HashRoot{}, spectypes.DomainError, errors.New("no expected pre consensus roots for attester") -//} -// -//// expectedPostConsensusRootsAndDomain an INTERNAL function, returns the expected post-consensus roots to sign -//func (r *AttesterRunner) expectedPostConsensusRootsAndDomain() ([]ssz.HashRoot, phase0.DomainType, error) { -// attestationData, err := r.GetState().DecidedValue.GetAttestationData() -// if err != nil { -// return nil, phase0.DomainType{}, errors.Wrap(err, "could not get attestation data") -// } -// -// return []ssz.HashRoot{attestationData}, spectypes.DomainAttester, nil -//} -// -//// executeDuty steps: -//// 1) get attestation data from BN -//// 2) start consensus on duty + attestation data -//// 3) Once consensus decides, sign partial attestation and broadcast -//// 4) collect 2f+1 partial sigs, reconstruct and broadcast valid attestation sig to the BN -//func (r *AttesterRunner) executeDuty(logger *zap.Logger, duty *spectypes.Duty) error { -// start := time.Now() -// attData, ver, err := r.GetBeaconNode().GetAttestationData(duty.Slot, duty.CommitteeIndex) -// if err != nil { -// return errors.Wrap(err, "failed to get attestation data") -// } -// logger = logger.With(zap.Duration("attestation_data_time", time.Since(start))) -// -// r.started = time.Now() -// -// r.metrics.StartDutyFullFlow() -// r.metrics.StartConsensus() -// -// attDataByts, err := attData.MarshalSSZ() -// if err != nil { -// return errors.Wrap(err, "could not marshal attestation data") -// } -// -// input := &spectypes.ValidatorConsensusData{ -// Duty: *duty, -// Version: ver, -// DataSSZ: attDataByts, -// } -// -// if err := r.BaseRunner.decide(logger, r, input); err != nil { -// return errors.Wrap(err, "can't start new duty runner instance for duty") -// } -// return nil -//} -// -//func (r *AttesterRunner) GetBaseRunner() *BaseRunner { -// return r.BaseRunner -//} -// -//func (r *AttesterRunner) GetNetwork() specqbft.Network { -// return r.network -//} -// -//func (r *AttesterRunner) GetBeaconNode() specssv.BeaconNode { -// return r.beacon -//} -// -//func (r *AttesterRunner) GetShare() *spectypes.Share { -// return r.BaseRunner.Share -//} -// -//func (r *AttesterRunner) GetState() *State { -// return r.BaseRunner.State -//} -// -//func (r *AttesterRunner) GetValCheckF() specqbft.ProposedValueCheckF { -// return r.valCheck -//} -// -//func (r *AttesterRunner) GetSigner() spectypes.BeaconSigner { -// return r.signer -//} -// -//// Encode returns the encoded struct in bytes or error -//func (r *AttesterRunner) Encode() ([]byte, error) { -// return json.Marshal(r) -//} -// -//// Decode returns error if decoding failed -//func (r *AttesterRunner) Decode(data []byte) error { -// return json.Unmarshal(data, &r) -//} -// -//// GetRoot returns the root used for signing and verification -//func (r *AttesterRunner) GetRoot() ([32]byte, error) { -// marshaledRoot, err := r.Encode() -// if err != nil { -// return [32]byte{}, errors.Wrap(err, "could not encode DutyRunnerState") -// } -// ret := sha256.Sum256(marshaledRoot) -// return ret, nil -//} diff --git a/protocol/v2/ssv/runner/sync_committee.go b/protocol/v2/ssv/runner/sync_committee.go deleted file mode 100644 index f6a1789ba6..0000000000 --- a/protocol/v2/ssv/runner/sync_committee.go +++ /dev/null @@ -1,279 +0,0 @@ -package runner - -//import ( -// "crypto/sha256" -// "encoding/hex" -// "encoding/json" -// -// "github.com/attestantio/go-eth2-client/spec/altair" -// "github.com/attestantio/go-eth2-client/spec/phase0" -// specqbft "github.com/ssvlabs/ssv-spec/qbft" -// specssv "github.com/ssvlabs/ssv-spec/ssv" -// spectypes "github.com/ssvlabs/ssv-spec/types" -// ssz "github.com/ferranbt/fastssz" -// "github.com/pkg/errors" -// "go.uber.org/zap" -// -// "github.com/ssvlabs/ssv/logging/fields" -// "github.com/ssvlabs/ssv/protocol/v2/qbft/controller" -// "github.com/ssvlabs/ssv/protocol/v2/ssv/runner/metrics" -//) -// -//type SyncCommitteeRunner struct { -// BaseRunner *BaseRunner -// -// beacon specssv.BeaconNode -// network specqbft.Network -// signer spectypes.BeaconSigner -// operatorSigner ssvtypes.OperatorSigner -// valCheck specqbft.ProposedValueCheckF -// -// metrics metrics.ConsensusMetrics -//} -// -//func NewSyncCommitteeRunner( -// beaconNetwork spectypes.BeaconNetwork, -// share map[phase0.ValidatorIndex]*spectypes.Share, -// qbftController *controller.Controller, -// beacon specssv.BeaconNode, -// network specqbft.Network, -// signer spectypes.BeaconSigner, -// operatorSigner ssvtypes.OperatorSigner, -// valCheck specqbft.ProposedValueCheckF, -// highestDecidedSlot phase0.Slot, -//) Runner { -// return &SyncCommitteeRunner{ -// BaseRunner: &BaseRunner{ -// RunnerRoleType: spectypes, -// BeaconNetwork: beaconNetwork, -// Share: share, -// QBFTController: qbftController, -// highestDecidedSlot: highestDecidedSlot, -// }, -// -// beacon: beacon, -// network: network, -// signer: signer, -// valCheck: valCheck, -// operatorSigner: operatorSigner, -// -// metrics: metrics.NewConsensusMetrics(spectypes.RoleSyncCommittee), -// } -//} -// -//func (r *SyncCommitteeRunner) StartNewDuty(logger *zap.Logger, duty *spectypes.Duty) error { -// return r.BaseRunner.baseStartNewDuty(logger, r, duty) -//} -// -//// HasRunningDuty returns true if a duty is already running (StartNewDuty called and returned nil) -//func (r *SyncCommitteeRunner) HasRunningDuty() bool { -// return r.BaseRunner.hasRunningDuty() -//} -// -//func (r *SyncCommitteeRunner) ProcessPreConsensus(logger *zap.Logger, signedMsg *spectypes.SignedPartialSignatureMessage) error { -// return errors.New("no pre consensus sigs required for sync committee role") -//} -// -//func (r *SyncCommitteeRunner) ProcessConsensus(logger *zap.Logger, signedMsg *spectypes.SignedSSVMessage) error { -// decided, decidedValue, err := r.BaseRunner.baseConsensusMsgProcessing(logger, r, signedMsg) -// if err != nil { -// return errors.Wrap(err, "failed processing consensus message") -// } -// -// // Decided returns true only once so if it is true it must be for the current running instance -// if !decided { -// return nil -// } -// -// r.metrics.EndConsensus() -// r.metrics.StartPostConsensus() -// -// // specific duty sig -// root, err := decidedValue.GetSyncCommitteeBlockRoot() -// if err != nil { -// return errors.Wrap(err, "could not get sync committee block root") -// } -// msg, err := r.BaseRunner.signBeaconObject(r, spectypes.SSZBytes(root[:]), decidedValue.Duty.Slot, spectypes.DomainSyncCommittee) -// if err != nil { -// return errors.Wrap(err, "failed signing attestation data") -// } -// postConsensusMsg := &spectypes.PartialSignatureMessages{ -// Type: spectypes.PostConsensusPartialSig, -// Slot: decidedValue.Duty.Slot, -// Messages: []*spectypes.PartialSignatureMessage{msg}, -// } -// -// postSignedMsg, err := r.BaseRunner.signPostConsensusMsg(r, postConsensusMsg) -// if err != nil { -// return errors.Wrap(err, "could not sign post consensus msg") -// } -// -// data, err := postSignedMsg.Encode() -// if err != nil { -// return errors.Wrap(err, "failed to encode post consensus signature msg") -// } -// -// ssvMsg := &spectypes.SSVMessage{ -// MsgType: spectypes.SSVPartialSignatureMsgType, -// MsgID: spectypes.NewMsgID(r.BaseRunner.DomainTypeProvider.DomainType(), r.GetShare().ValidatorPubKey, r.BaseRunner.BeaconRoleType), -// Data: data, -// } -// -// msgToBroadcast, err := spectypes.SSVMessageToSignedSSVMessage(ssvMsg, r.BaseRunner.Share.OperatorID, r.operatorSigner.SignSSVMessage) -// if err != nil { -// return errors.Wrap(err, "could not create SignedSSVMessage from SSVMessage") -// } -// -// if err := r.GetNetwork().Broadcast(msgID, ssvMsg.GetID(), msgToBroadcast); err != nil { -// return errors.Wrap(err, "can't broadcast partial post consensus sig") -// } -// return nil -//} -// -//func (r *SyncCommitteeRunner) ProcessPostConsensus(logger *zap.Logger, signedMsg *spectypes.SignedPartialSignatureMessage) error { -// quorum, roots, err := r.BaseRunner.basePostConsensusMsgProcessing(logger, r, signedMsg) -// if err != nil { -// return errors.Wrap(err, "failed processing post consensus message") -// } -// -// if !quorum { -// return nil -// } -// -// r.metrics.EndPostConsensus() -// -// blockRoot, err := r.GetState().DecidedValue.GetSyncCommitteeBlockRoot() -// if err != nil { -// return errors.Wrap(err, "could not get sync committee block root") -// } -// -// for _, root := range roots { -// sig, err := r.GetState().ReconstructBeaconSig(r.GetState().PostConsensusContainer, root, r.GetShare().ValidatorPubKey) -// if err != nil { -// // If the reconstructed signature verification failed, fall back to verifying each partial signature -// for _, root := range roots { -// r.BaseRunner.FallBackAndVerifyEachSignature(r.GetState().PostConsensusContainer, root) -// } -// return errors.Wrap(err, "got post-consensus quorum but it has invalid signatures") -// } -// specSig := phase0.BLSSignature{} -// copy(specSig[:], sig) -// -// msg := &altair.SyncCommitteeMessage{ -// Slot: r.GetState().DecidedValue.Duty.Slot, -// BeaconBlockRoot: blockRoot, -// ValidatorIndex: r.GetState().DecidedValue.Duty.ValidatorIndex, -// Signature: specSig, -// } -// -// messageSubmissionEnd := r.metrics.StartBeaconSubmission() -// -// if err := r.GetBeaconNode().SubmitSyncMessage(msg); err != nil { -// r.metrics.RoleSubmissionFailed() -// return errors.Wrap(err, "could not submit to Beacon chain reconstructed signed sync committee") -// } -// -// messageSubmissionEnd() -// r.metrics.EndDutyFullFlow(r.GetState().RunningInstance.State.Round) -// r.metrics.RoleSubmitted() -// -// logger.Info("✅ successfully submitted sync committee", -// fields.Slot(msg.Slot), -// zap.String("block_root", hex.EncodeToString(msg.BeaconBlockRoot[:])), -// fields.Height(r.BaseRunner.QBFTController.Height), -// fields.Round(r.GetState().RunningInstance.State.Round)) -// } -// r.GetState().Finished = true -// -// return nil -//} -// -//func (r *SyncCommitteeRunner) expectedPreConsensusRootsAndDomain() ([]ssz.HashRoot, phase0.DomainType, error) { -// return []ssz.HashRoot{}, spectypes.DomainError, errors.New("no expected pre consensus roots for sync committee") -//} -// -//// expectedPostConsensusRootsAndDomain an INTERNAL function, returns the expected post-consensus roots to sign -//func (r *SyncCommitteeRunner) expectedPostConsensusRootsAndDomain() ([]ssz.HashRoot, phase0.DomainType, error) { -// root, err := r.GetState().DecidedValue.GetSyncCommitteeBlockRoot() -// if err != nil { -// return nil, phase0.DomainType{}, errors.Wrap(err, "could not get sync committee block root") -// } -// -// return []ssz.HashRoot{spectypes.SSZBytes(root[:])}, spectypes.DomainSyncCommittee, nil -//} -// -//// executeDuty steps: -//// 1) get sync block root from BN -//// 2) start consensus on duty + block root data -//// 3) Once consensus decides, sign partial block root and broadcast -//// 4) collect 2f+1 partial sigs, reconstruct and broadcast valid sync committee sig to the BN -//func (r *SyncCommitteeRunner) executeDuty(logger *zap.Logger, duty *spectypes.Duty) error { -// // TODO - waitOneThirdOrValidBlock -// -// root, ver, err := r.GetBeaconNode().GetSyncMessageBlockRoot(duty.Slot) -// if err != nil { -// return errors.Wrap(err, "failed to get sync committee block root") -// } -// -// r.metrics.StartDutyFullFlow() -// r.metrics.StartConsensus() -// -// input := &spectypes.ValidatorConsensusData{ -// Duty: *duty, -// Version: ver, -// DataSSZ: root[:], -// } -// -// if err := r.BaseRunner.decide(logger, r, input); err != nil { -// return errors.Wrap(err, "can't start new duty runner instance for duty") -// } -// return nil -//} -// -//func (r *SyncCommitteeRunner) GetBaseRunner() *BaseRunner { -// return r.BaseRunner -//} -// -//func (r *SyncCommitteeRunner) GetNetwork() specqbft.Network { -// return r.network -//} -// -//func (r *SyncCommitteeRunner) GetBeaconNode() specssv.BeaconNode { -// return r.beacon -//} -// -//func (r *SyncCommitteeRunner) GetShare() *spectypes.Share { -// return r.BaseRunner.Share -//} -// -//func (r *SyncCommitteeRunner) GetState() *State { -// return r.BaseRunner.State -//} -// -//func (r *SyncCommitteeRunner) GetValCheckF() specqbft.ProposedValueCheckF { -// return r.valCheck -//} -// -//func (r *SyncCommitteeRunner) GetSigner() spectypes.BeaconSigner { -// return r.signer -//} -// -//// Encode returns the encoded struct in bytes or error -//func (r *SyncCommitteeRunner) Encode() ([]byte, error) { -// return json.Marshal(r) -//} -// -//// Decode returns error if decoding failed -//func (r *SyncCommitteeRunner) Decode(data []byte) error { -// return json.Unmarshal(data, &r) -//} -// -//// GetRoot returns the root used for signing and verification -//func (r *SyncCommitteeRunner) GetRoot() ([32]byte, error) { -// marshaledRoot, err := r.Encode() -// if err != nil { -// return [32]byte{}, errors.Wrap(err, "could not encode DutyRunnerState") -// } -// ret := sha256.Sum256(marshaledRoot) -// return ret, nil -//} From 1e245d45914f3f9abf2fb6673b0e09476b01a646 Mon Sep 17 00:00:00 2001 From: MatheusFranco99 <48058141+MatheusFranco99@users.noreply.github.com> Date: Sun, 15 Sep 2024 15:20:09 +0100 Subject: [PATCH 04/35] feat: (p2p) drop bad or irrelevant peers (#1707) * Add the BadPeersCollector structure * Make scoreInspector register bad peers * Add BadPeersCollector to p2pNetwork and disconnect from bad peers * Add feature to disconnect from irrelevant peers * Add log and treat error * Refactor BadPeersCollector to GossipSubScoreIndex. Include it in PeersIndex * Fix comment * Improve comments * Add left change * Add fork epoch to test * Debug: add logs * Debug: reduce score inspect interval * Debug: Update connMng interval * Only perform further disconnections if couldn't disconnect yet * Return score on IsBad call * Log current number of peers * Debug: decrease the function interval * Debug: change fork epoch * Revert debug changes * Add check in ConnGater * Add check in HandshakeFilter * added basic UT * set all scores at once. * Fix lint issue. Deprecate unused methods * Update network/peers/conn_manager.go Co-authored-by: moshe-blox <89339422+moshe-blox@users.noreply.github.com> * Update network/peers/conn_manager.go Co-authored-by: moshe-blox <89339422+moshe-blox@users.noreply.github.com> * Update network/peers/conn_manager.go Co-authored-by: moshe-blox <89339422+moshe-blox@users.noreply.github.com> * Update network/peers/conn_manager.go Co-authored-by: moshe-blox <89339422+moshe-blox@users.noreply.github.com> * rename GossipSubScore to GossipScore * Disconnect from bad peers regardless of amount of peers * Fix GossipSub -> Gossip names * also preventing outbound dials to bad peers * minor refactors --------- Co-authored-by: y0sher Co-authored-by: moshe-blox <89339422+moshe-blox@users.noreply.github.com> Co-authored-by: moshe-blox --- network/p2p/p2p.go | 36 +++++++-- network/p2p/p2p_setup.go | 17 ++++- network/peers/conn_manager.go | 90 +++++++++++++++++++---- network/peers/conn_manager_test.go | 2 +- network/peers/connections/conn_gater.go | 15 +++- network/peers/connections/filters.go | 12 +++ network/peers/gossip_score_index_test.go | 93 ++++++++++++++++++++++++ network/peers/gossipsub_score_index.go | 59 +++++++++++++++ network/peers/index.go | 11 +++ network/peers/peers_index.go | 41 ++++++++--- network/peers/subnets.go | 3 +- network/topics/controller_test.go | 2 +- network/topics/pubsub.go | 4 +- network/topics/scoring.go | 25 +++++-- 14 files changed, 363 insertions(+), 47 deletions(-) create mode 100644 network/peers/gossip_score_index_test.go create mode 100644 network/peers/gossipsub_score_index.go diff --git a/network/p2p/p2p.go b/network/p2p/p2p.go index df17c886f3..4a412ca337 100644 --- a/network/p2p/p2p.go +++ b/network/p2p/p2p.go @@ -48,11 +48,12 @@ const ( ) const ( - connManagerGCInterval = 3 * time.Minute - connManagerGCTimeout = time.Minute - peersReportingInterval = 60 * time.Second - peerIdentitiesReportingInterval = 5 * time.Minute - topicsReportingInterval = 180 * time.Second + connManagerBalancingInterval = 3 * time.Minute + connManagerBalancingTimeout = time.Minute + peersReportingInterval = 60 * time.Second + peerIdentitiesReportingInterval = 5 * time.Minute + topicsReportingInterval = 180 * time.Second + maximumIrrelevantPeersToDisconnect = 3 ) // PeersIndexProvider holds peers index instance @@ -257,7 +258,7 @@ func (n *p2pNetwork) Start(logger *zap.Logger) error { go n.startDiscovery(logger, connector) - async.Interval(n.ctx, connManagerGCInterval, n.peersBalancing(logger)) + async.Interval(n.ctx, connManagerBalancingInterval, n.peersBalancing(logger)) // don't report metrics in tests if n.cfg.Metrics != nil { async.Interval(n.ctx, peersReportingInterval, n.reportAllPeers(logger)) @@ -274,19 +275,38 @@ func (n *p2pNetwork) Start(logger *zap.Logger) error { return nil } +// Returns a function that balances the peers. +// Balancing is peformed by: +// - Dropping peers with bad Gossip score. +// - Dropping irrelevant peers that don't have any subnet in common. +// - Tagging the best MaxPeers-1 peers (according to subnets intersection) as Protected and, then, removing the worst peer. func (n *p2pNetwork) peersBalancing(logger *zap.Logger) func() { return func() { allPeers := n.host.Network().Peers() + connMgr := peers.NewConnManager(logger, n.libConnManager, n.idx, n.idx) + + // Disconnect from bad peers + connMgr.DisconnectFromBadPeers(logger, n.host.Network(), allPeers) + + // Check if it has the maximum number of connections currentCount := len(allPeers) if currentCount < n.cfg.MaxPeers { _ = n.idx.GetSubnetsStats() // trigger metrics update return } - ctx, cancel := context.WithTimeout(n.ctx, connManagerGCTimeout) + + ctx, cancel := context.WithTimeout(n.ctx, connManagerBalancingTimeout) defer cancel() - connMgr := peers.NewConnManager(logger, n.libConnManager, n.idx) mySubnets := records.Subnets(n.activeSubnets).Clone() + + // Disconnect from irrelevant peers + disconnectedPeers := connMgr.DisconnectFromIrrelevantPeers(logger, maximumIrrelevantPeersToDisconnect, n.host.Network(), allPeers, mySubnets) + if disconnectedPeers > 0 { + return + } + + // Trim peers according to subnet participation (considering the subnet size) connMgr.TagBestPeers(logger, n.cfg.MaxPeers-1, mySubnets, allPeers, n.cfg.TopicMaxPeers) connMgr.TrimPeers(ctx, logger, n.host.Network()) } diff --git a/network/p2p/p2p_setup.go b/network/p2p/p2p_setup.go index a814b63096..65f2d72f71 100644 --- a/network/p2p/p2p_setup.go +++ b/network/p2p/p2p_setup.go @@ -12,6 +12,7 @@ import ( "github.com/libp2p/go-libp2p" "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" libp2pdiscbackoff "github.com/libp2p/go-libp2p/p2p/discovery/backoff" basichost "github.com/libp2p/go-libp2p/p2p/host/basic" rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" @@ -108,6 +109,14 @@ func (n *p2pNetwork) initCfg() error { return nil } +// Returns whetehr a peer is bad +func (n *p2pNetwork) IsBadPeer(logger *zap.Logger, peerID peer.ID) bool { + if n.idx == nil { + return false + } + return n.idx.IsBad(logger, peerID) +} + // SetupHost configures a libp2p host and backoff connector utility func (n *p2pNetwork) SetupHost(logger *zap.Logger) error { opts, err := n.cfg.Libp2pOptions(logger) @@ -121,7 +130,7 @@ func (n *p2pNetwork) SetupHost(logger *zap.Logger) error { if err != nil { return errors.Wrap(err, "could not create resource manager") } - n.connGater = connections.NewConnectionGater(logger, n.cfg.DisableIPRateLimit, n.connectionsAtLimit) + n.connGater = connections.NewConnectionGater(logger, n.cfg.DisableIPRateLimit, n.connectionsAtLimit, n.IsBadPeer) opts = append(opts, libp2p.ResourceManager(rmgr), libp2p.ConnectionGater(n.connGater)) host, err := libp2p.New(opts...) if err != nil { @@ -180,7 +189,7 @@ func (n *p2pNetwork) setupPeerServices(logger *zap.Logger) error { return libPrivKey } - n.idx = peers.NewPeersIndex(logger, n.host.Network(), self, n.getMaxPeers, getPrivKey, p2pcommons.Subnets(), 10*time.Minute) + n.idx = peers.NewPeersIndex(logger, n.host.Network(), self, n.getMaxPeers, getPrivKey, p2pcommons.Subnets(), 10*time.Minute, peers.NewGossipScoreIndex()) logger.Debug("peers index is ready") var ids identify.IDService @@ -198,11 +207,13 @@ func (n *p2pNetwork) setupPeerServices(logger *zap.Logger) error { return n.activeSubnets } + // Handshake filters filters := func() []connections.HandshakeFilter { newDomain := n.cfg.Network.DomainType() newDomainString := "0x" + hex.EncodeToString(newDomain[:]) return []connections.HandshakeFilter{ connections.NetworkIDFilter(newDomainString), + connections.BadPeerFilter(logger, n.idx), } } @@ -307,7 +318,7 @@ func (n *p2pNetwork) setupPubsub(logger *zap.Logger) error { // run GC every 3 minutes to clear old messages async.RunEvery(n.ctx, time.Minute*3, midHandler.GC) - _, tc, err := topics.NewPubSub(n.ctx, logger, cfg, n.metrics, n.nodeStorage.ValidatorStore()) + _, tc, err := topics.NewPubSub(n.ctx, logger, cfg, n.metrics, n.nodeStorage.ValidatorStore(), n.idx) if err != nil { return errors.Wrap(err, "could not setup pubsub") } diff --git a/network/peers/conn_manager.go b/network/peers/conn_manager.go index 3c04b34da9..38b1886aa3 100644 --- a/network/peers/conn_manager.go +++ b/network/peers/conn_manager.go @@ -12,6 +12,7 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "go.uber.org/zap" + "github.com/ssvlabs/ssv/logging/fields" "github.com/ssvlabs/ssv/network/records" ) @@ -29,25 +30,37 @@ type ConnManager interface { TagBestPeers(logger *zap.Logger, n int, mySubnets records.Subnets, allPeers []peer.ID, topicMaxPeers int) // TrimPeers will trim unprotected peers. TrimPeers(ctx context.Context, logger *zap.Logger, net libp2pnetwork.Network) + // DisconnectFromBadPeers will disconnect from bad peers according to their Gossip scores. It returns the number of disconnected peers. + DisconnectFromBadPeers(logger *zap.Logger, net libp2pnetwork.Network, allPeers []peer.ID) int + // DisconnectFromIrrelevantPeers will disconnect from at most [disconnectQuota] peers that doesn't share any subnet in common. It returns the number of disconnected peers. + DisconnectFromIrrelevantPeers(logger *zap.Logger, disconnectQuota int, net libp2pnetwork.Network, allPeers []peer.ID, mySubnets records.Subnets) int +} + +// connManager implements ConnManager +type connManager struct { + logger *zap.Logger + connManager connmgrcore.ConnManager + subnetsIdx SubnetsIndex + gossipScoreIndex GossipScoreIndex } // NewConnManager creates a new conn manager. // multiple instances can be created, but concurrency is not supported. -func NewConnManager(logger *zap.Logger, connMgr connmgrcore.ConnManager, subnetsIdx SubnetsIndex) ConnManager { +func NewConnManager(logger *zap.Logger, connMgr connmgrcore.ConnManager, subnetsIdx SubnetsIndex, gossipScoreIndex GossipScoreIndex) ConnManager { return &connManager{ - logger: logger, - connManager: connMgr, - subnetsIdx: subnetsIdx, + logger: logger, + connManager: connMgr, + subnetsIdx: subnetsIdx, + gossipScoreIndex: gossipScoreIndex, } } -// connManager implements ConnManager -type connManager struct { - logger *zap.Logger - connManager connmgrcore.ConnManager - subnetsIdx SubnetsIndex +// Disconnects from a peer +func (c connManager) disconnect(peerID peer.ID, net libp2pnetwork.Network) error { + return net.ClosePeer(peerID) } +// Set the "Protect" tag for the best [n] peers. For the others, set the "Unprotect" tag func (c connManager) TagBestPeers(logger *zap.Logger, n int, mySubnets records.Subnets, allPeers []peer.ID, topicMaxPeers int) { bestPeers := c.getBestPeers(n, mySubnets, allPeers, topicMaxPeers) logger.Debug("tagging best peers", @@ -66,6 +79,7 @@ func (c connManager) TagBestPeers(logger *zap.Logger, n int, mySubnets records.S } } +// Closes the connection to all peers that are not protected func (c connManager) TrimPeers(ctx context.Context, logger *zap.Logger, net libp2pnetwork.Network) { allPeers := net.Peers() before := len(allPeers) @@ -73,22 +87,19 @@ func (c connManager) TrimPeers(ctx context.Context, logger *zap.Logger, net libp // c.connManager.TrimOpenConns(ctx) for _, pid := range allPeers { if !c.connManager.IsProtected(pid, protectedTag) { - err := net.ClosePeer(pid) - logger.Debug("closing peer", zap.String("pid", pid.String()), zap.Error(err)) - // if err != nil { - // logger.Debug("could not close trimmed peer", - // zap.String("pid", pid.String()), zap.Error(err)) - //} + err := c.disconnect(pid, net) + logger.Debug("closing peer", fields.PeerID(pid), zap.Error(err)) } } logger.Debug("trimmed peers", zap.Int("beforeTrim", before), zap.Int("afterTrim", len(net.Peers()))) } -// getBestPeers loop over all the existing peers and returns the best set +// getBestPeers loop over all the existing peers and returns the best set with [n] peers // according to the number of shared subnets, // while considering subnets with low peer count to be more important. func (c connManager) getBestPeers(n int, mySubnets records.Subnets, allPeers []peer.ID, topicMaxPeers int) map[peer.ID]PeerScore { + // If we have less than n peers, just return all as the best peers peerScores := make(map[peer.ID]PeerScore) if len(allPeers) < n { for _, p := range allPeers { @@ -96,10 +107,13 @@ func (c connManager) getBestPeers(n int, mySubnets records.Subnets, allPeers []p } return peerScores } + + // Get score for each subnet stats := c.subnetsIdx.GetSubnetsStats() minSubnetPeers := 4 subnetsScores := GetSubnetsDistributionScores(stats, minSubnetPeers, mySubnets, topicMaxPeers) + // Compute the score for each peer according to peer's subnets and subnets' score var peerLogs []peerLog for _, pid := range allPeers { peerSubnets := c.subnetsIdx.GetPeerSubnets(pid) @@ -121,6 +135,7 @@ func (c connManager) getBestPeers(n int, mySubnets records.Subnets, allPeers []p c.logPeerScores(peerLogs, mySubnets, stats.Connected) + // Returns the [n] best peers return GetTopScores(peerScores, n) } @@ -192,3 +207,46 @@ func scorePeer(peerSubnets records.Subnets, subnetsScores []float64) PeerScore { } return PeerScore(score) } + +// DisconnectFromBadPeers will disconnect from bad peers according to their Gossip scores. It returns the number of disconnected peers. +func (c connManager) DisconnectFromBadPeers(logger *zap.Logger, net libp2pnetwork.Network, allPeers []peer.ID) int { + disconnectedPeers := 0 + for _, peerID := range allPeers { + // Disconnect if peer has bad gossip score. + if isBad, gossipScore := c.gossipScoreIndex.HasBadGossipScore(peerID); isBad { + err := c.disconnect(peerID, net) + if err != nil { + logger.Error("failed to disconnect from bad peer", fields.PeerID(peerID), zap.Float64("gossip_score", gossipScore)) + } else { + logger.Debug("disconnecting from bad peer", fields.PeerID(peerID), zap.Float64("gossip_score", gossipScore)) + disconnectedPeers++ + } + } + } + + return disconnectedPeers +} + +// DisconnectFromIrrelevantPeers will disconnect from at most [disconnectQuota] peers that doesn't share any subnet in common. It returns the number of disconnected peers. +func (c connManager) DisconnectFromIrrelevantPeers(logger *zap.Logger, disconnectQuota int, net libp2pnetwork.Network, allPeers []peer.ID, mySubnets records.Subnets) int { + disconnectedPeers := 0 + for _, peerID := range allPeers { + peerSubnets := c.subnetsIdx.GetPeerSubnets(peerID) + sharedSubnets := records.SharedSubnets(mySubnets, peerSubnets, len(mySubnets)) + + // If there's no common subnet, disconnect from peer. + if len(sharedSubnets) == 0 { + err := c.disconnect(peerID, net) + if err != nil { + logger.Error("failed to disconnect from peer with irrelevant subnets", fields.PeerID(peerID)) + } else { + logger.Debug("disconnecting from peer with irrelevant subnets", fields.PeerID(peerID)) + disconnectedPeers++ + if disconnectedPeers >= disconnectQuota { + return disconnectedPeers + } + } + } + } + return disconnectedPeers +} diff --git a/network/peers/conn_manager_test.go b/network/peers/conn_manager_test.go index bc14197587..17cb8ac53a 100644 --- a/network/peers/conn_manager_test.go +++ b/network/peers/conn_manager_test.go @@ -22,7 +22,7 @@ func TestTagBestPeers(t *testing.T) { allSubs, _ := records.Subnets{}.FromString(records.AllSubnets) si := NewSubnetsIndex(len(allSubs)) - cm := NewConnManager(zap.NewNop(), connMgrMock, si).(*connManager) + cm := NewConnManager(zap.NewNop(), connMgrMock, si, nil).(*connManager) pids, err := createPeerIDs(50) require.NoError(t, err) diff --git a/network/peers/connections/conn_gater.go b/network/peers/connections/conn_gater.go index 4751976f2b..ffa000a593 100644 --- a/network/peers/connections/conn_gater.go +++ b/network/peers/connections/conn_gater.go @@ -12,6 +12,7 @@ import ( ma "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" leakybucket "github.com/prysmaticlabs/prysm/v4/container/leaky-bucket" + "github.com/ssvlabs/ssv/logging/fields" "go.uber.org/zap" ) @@ -24,6 +25,8 @@ const ( // ) +type BadPeerF func(logger *zap.Logger, peerID peer.ID) bool + // connGater implements ConnectionGater interface: // https://github.com/libp2p/go-libp2p/core/blob/master/connmgr/gater.go type connGater struct { @@ -31,15 +34,17 @@ type connGater struct { disable bool atLimit func() bool ipLimiter *leakybucket.Collector + isBadPeer BadPeerF } // NewConnectionGater creates a new instance of ConnectionGater -func NewConnectionGater(logger *zap.Logger, disable bool, atLimit func() bool) connmgr.ConnectionGater { +func NewConnectionGater(logger *zap.Logger, disable bool, atLimit func() bool, isBadPeerF BadPeerF) connmgr.ConnectionGater { return &connGater{ logger: logger, disable: disable, atLimit: atLimit, ipLimiter: leakybucket.NewCollector(ipLimitRate, ipLimitBurst, ipLimitPeriod, true), + isBadPeer: isBadPeerF, } } @@ -54,6 +59,10 @@ func (n *connGater) InterceptPeerDial(id peer.ID) bool { // particular address. Blocking connections at this stage is typical for // address filtering. func (n *connGater) InterceptAddrDial(id peer.ID, multiaddr ma.Multiaddr) bool { + if n.isBadPeer(n.logger, id) { + n.logger.Debug("preventing outbound connection due to bad peer", fields.PeerID(id)) + return false + } return true } @@ -79,6 +88,10 @@ func (n *connGater) InterceptAccept(multiaddrs libp2pnetwork.ConnMultiaddrs) boo // InterceptSecured is called for both inbound and outbound connections, // after a security handshake has taken place and we've authenticated the peer. func (n *connGater) InterceptSecured(direction libp2pnetwork.Direction, id peer.ID, multiaddrs libp2pnetwork.ConnMultiaddrs) bool { + if n.isBadPeer(n.logger, id) { + n.logger.Debug("rejecting inbound connection due to bad peer", fields.PeerID(id)) + return false + } return true } diff --git a/network/peers/connections/filters.go b/network/peers/connections/filters.go index 7d05a265d4..d814042ff3 100644 --- a/network/peers/connections/filters.go +++ b/network/peers/connections/filters.go @@ -3,7 +3,9 @@ package connections import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/pkg/errors" + "go.uber.org/zap" + "github.com/ssvlabs/ssv/network/peers" "github.com/ssvlabs/ssv/network/records" ) @@ -18,4 +20,14 @@ func NetworkIDFilter(networkID string) HandshakeFilter { } } +// BadPeerFilter avoids connecting to a bad peer +func BadPeerFilter(logger *zap.Logger, n peers.Index) HandshakeFilter { + return func(senderID peer.ID, sni *records.NodeInfo) error { + if n.IsBad(logger, senderID) { + return errors.New("bad peer") + } + return nil + } +} + // TODO: filter based on domaintype diff --git a/network/peers/gossip_score_index_test.go b/network/peers/gossip_score_index_test.go new file mode 100644 index 0000000000..61e8575dc7 --- /dev/null +++ b/network/peers/gossip_score_index_test.go @@ -0,0 +1,93 @@ +package peers + +import ( + "testing" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" +) + +func TestGetGossipScore(t *testing.T) { + index := NewGossipScoreIndex() + peerID := peer.ID("peer1") + peerID2 := peer.ID("peer2") + + score, exists := index.GetGossipScore(peerID) + require.False(t, exists) + require.Equal(t, 0.0, score) + + score, exists = index.GetGossipScore(peerID2) + require.False(t, exists) + require.Equal(t, 0.0, score) + + index.SetScores(map[peer.ID]float64{ + peerID: 10.0, + }) + score, exists = index.GetGossipScore(peerID) + require.True(t, exists) + require.Equal(t, 10.0, score) + + score, exists = index.GetGossipScore(peerID2) + require.False(t, exists) + require.Equal(t, 0.0, score) +} + +func TestSetScores(t *testing.T) { + index := NewGossipScoreIndex() + peerID := peer.ID("peer1") + peerID2 := peer.ID("peer2") + peerID3 := peer.ID("peer3") + + index.SetScores(map[peer.ID]float64{ + peerID: 10.0, + peerID2: -100.0, + }) + + score, exists := index.GetGossipScore(peerID) + require.True(t, exists) + require.Equal(t, 10.0, score) + + score2, exists2 := index.GetGossipScore(peerID2) + require.True(t, exists2) + require.Equal(t, -100.0, score2) + + score3, exists3 := index.GetGossipScore(peerID3) + require.False(t, exists3) + require.Equal(t, 0.0, score3) +} + +func TestClear(t *testing.T) { + index := NewGossipScoreIndex() + peerID := peer.ID("peer1") + + index.SetScores(map[peer.ID]float64{ + peerID: 10.0, + }) + index.clear() + score, exists := index.GetGossipScore(peerID) + require.False(t, exists) + require.Equal(t, 0.0, score) +} + +func TestHasBadGossipScore(t *testing.T) { + index := NewGossipScoreIndex() + peerID := peer.ID("peer1") + + bad, score := index.HasBadGossipScore(peerID) + require.False(t, bad) + require.Equal(t, 0.0, score) + + index.SetScores(map[peer.ID]float64{ + peerID: index.graylistThreshold - 1, + }) + bad, score = index.HasBadGossipScore(peerID) + require.True(t, bad) + require.Equal(t, index.graylistThreshold-1, score) + + index.SetScores(map[peer.ID]float64{ + peerID: index.graylistThreshold + 1, + }) + bad, score = index.HasBadGossipScore(peerID) + require.False(t, bad) + require.Equal(t, index.graylistThreshold+1, score) +} diff --git a/network/peers/gossipsub_score_index.go b/network/peers/gossipsub_score_index.go new file mode 100644 index 0000000000..19aef42a92 --- /dev/null +++ b/network/peers/gossipsub_score_index.go @@ -0,0 +1,59 @@ +package peers + +import ( + "sync" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/ssvlabs/ssv/network/topics/params" +) + +// Implements GossipScoreIndex +type gossipScoreIndex struct { + score map[peer.ID]float64 + mutex sync.RWMutex + + graylistThreshold float64 +} + +func NewGossipScoreIndex() *gossipScoreIndex { + + graylistThreshold := params.PeerScoreThresholds().GraylistThreshold + + return &gossipScoreIndex{ + score: make(map[peer.ID]float64), + graylistThreshold: graylistThreshold, + } +} + +func (g *gossipScoreIndex) GetGossipScore(peerID peer.ID) (float64, bool) { + g.mutex.RLock() + defer g.mutex.RUnlock() + + if score, exists := g.score[peerID]; exists { + return score, true + } + return 0.0, false +} + +func (g *gossipScoreIndex) SetScores(peerScores map[peer.ID]float64) { + g.mutex.Lock() + defer g.mutex.Unlock() + + g.clear() + // Copy the map + for peerID, score := range peerScores { + g.score[peerID] = score + } +} + +func (g *gossipScoreIndex) clear() { + g.score = make(map[peer.ID]float64) +} + +func (g *gossipScoreIndex) HasBadGossipScore(peerID peer.ID) (bool, float64) { + score, exists := g.GetGossipScore(peerID) + if !exists { + return false, 0.0 + } + return (score <= g.graylistThreshold), score +} diff --git a/network/peers/index.go b/network/peers/index.go index f518633a9d..a102aca9fb 100644 --- a/network/peers/index.go +++ b/network/peers/index.go @@ -112,6 +112,16 @@ type SubnetsIndex interface { GetSubnetsStats() *SubnetsStats } +// GossipScoreIndex serves as an interface to get a peer's Gossip score +type GossipScoreIndex interface { + // AddScore adds a score for a peer + SetScores(scores map[peer.ID]float64) + // GetGossipScore returns the peer score and a boolean flag for whether it has such score or not + GetGossipScore(peerID peer.ID) (float64, bool) + // HasBadGossipScore returns true if the peer has a bad Gossip score + HasBadGossipScore(peerID peer.ID) (bool, float64) +} + // Index is a facade interface of this package type Index interface { ConnectionIndex @@ -120,4 +130,5 @@ type Index interface { ScoreIndex SubnetsIndex io.Closer + GossipScoreIndex } diff --git a/network/peers/peers_index.go b/network/peers/peers_index.go index 269271a830..30e2f412d4 100644 --- a/network/peers/peers_index.go +++ b/network/peers/peers_index.go @@ -34,28 +34,37 @@ type peersIndex struct { self *records.NodeInfo maxPeers MaxPeersProvider + + gossipScoreIndex GossipScoreIndex } // NewPeersIndex creates a new Index func NewPeersIndex(logger *zap.Logger, network libp2pnetwork.Network, self *records.NodeInfo, maxPeers MaxPeersProvider, - netKeyProvider NetworkKeyProvider, subnetsCount int, pruneTTL time.Duration) *peersIndex { + netKeyProvider NetworkKeyProvider, subnetsCount int, pruneTTL time.Duration, gossipScoreIndex GossipScoreIndex) *peersIndex { + return &peersIndex{ - network: network, - scoreIdx: newScoreIndex(), - SubnetsIndex: NewSubnetsIndex(subnetsCount), - PeerInfoIndex: NewPeerInfoIndex(), - self: self, - selfLock: &sync.RWMutex{}, - maxPeers: maxPeers, - netKeyProvider: netKeyProvider, + network: network, + scoreIdx: newScoreIndex(), + SubnetsIndex: NewSubnetsIndex(subnetsCount), + PeerInfoIndex: NewPeerInfoIndex(), + self: self, + selfLock: &sync.RWMutex{}, + maxPeers: maxPeers, + netKeyProvider: netKeyProvider, + gossipScoreIndex: gossipScoreIndex, } } // IsBad returns whether the given peer is bad. // a peer is considered to be bad if one of the following applies: +// - bad gossip score // - pruned (that was not expired) // - bad score func (pi *peersIndex) IsBad(logger *zap.Logger, id peer.ID) bool { + if isBad, _ := pi.HasBadGossipScore(id); isBad { + return true + } + // TODO: check scores threshold := -10000.0 scores, err := pi.GetScore(id, "") @@ -63,6 +72,7 @@ func (pi *peersIndex) IsBad(logger *zap.Logger, id peer.ID) bool { // logger.Debug("could not read score", zap.Error(err)) return false } + for _, score := range scores { if score.Value < threshold { logger.Debug("bad peer (low score)") @@ -183,3 +193,16 @@ func (pi *peersIndex) Close() error { } return nil } + +// GossipScoreIndex methods +func (pi *peersIndex) SetScores(scores map[peer.ID]float64) { + pi.gossipScoreIndex.SetScores(scores) +} + +func (pi *peersIndex) GetGossipScore(peerID peer.ID) (float64, bool) { + return pi.gossipScoreIndex.GetGossipScore(peerID) +} + +func (pi *peersIndex) HasBadGossipScore(peerID peer.ID) (bool, float64) { + return pi.gossipScoreIndex.HasBadGossipScore(peerID) +} diff --git a/network/peers/subnets.go b/network/peers/subnets.go index 0bfcee2942..5b347d80aa 100644 --- a/network/peers/subnets.go +++ b/network/peers/subnets.go @@ -118,7 +118,8 @@ func (si *subnetsIndex) GetPeerSubnets(id peer.ID) records.Subnets { } // GetSubnetsDistributionScores returns current subnets scores based on peers distribution. -// subnets with low peer count would get higher score, and overloaded subnets gets a lower score. +// subnets with low peer count would get higher score, and overloaded subnets gets a lower score (possibly negative). +// Subnets in which the node doesn't participate receive a score of 0. func GetSubnetsDistributionScores(stats *SubnetsStats, minPerSubnet int, mySubnets records.Subnets, topicMaxPeers int) []float64 { const activeSubnetBoost = 0.2 diff --git a/network/topics/controller_test.go b/network/topics/controller_test.go index 2974b9c504..5c4beba80b 100644 --- a/network/topics/controller_test.go +++ b/network/topics/controller_test.go @@ -396,7 +396,7 @@ func newPeer(ctx context.Context, logger *zap.Logger, t *testing.T, msgValidator t.Fatal(err) } - ps, tm, err := NewPubSub(ctx, logger, cfg, metricsreporter.NewNop(), validatorStore) + ps, tm, err := NewPubSub(ctx, logger, cfg, metricsreporter.NewNop(), validatorStore, nil) require.NoError(t, err) p = &P{ diff --git a/network/topics/pubsub.go b/network/topics/pubsub.go index 20143b0e2a..ced81cd7f8 100644 --- a/network/topics/pubsub.go +++ b/network/topics/pubsub.go @@ -117,7 +117,7 @@ type CommitteesProvider interface { } // NewPubSub creates a new pubsub router and the necessary components -func NewPubSub(ctx context.Context, logger *zap.Logger, cfg *PubSubConfig, metrics Metrics, committeesProvider CommitteesProvider) (*pubsub.PubSub, Controller, error) { +func NewPubSub(ctx context.Context, logger *zap.Logger, cfg *PubSubConfig, metrics Metrics, committeesProvider CommitteesProvider, gossipScoreIndex peers.GossipScoreIndex) (*pubsub.PubSub, Controller, error) { if err := cfg.init(); err != nil { return nil, nil, err } @@ -176,7 +176,7 @@ func NewPubSub(ctx context.Context, logger *zap.Logger, cfg *PubSubConfig, metri peerConnected := func(pid peer.ID) bool { return cfg.Host.Network().Connectedness(pid) == libp2pnetwork.Connected } - inspector = scoreInspector(logger, cfg.ScoreIndex, scoreInspectLogFrequency, metrics, peerConnected, peerScoreParams, topicScoreFactory) + inspector = scoreInspector(logger, cfg.ScoreIndex, scoreInspectLogFrequency, metrics, peerConnected, peerScoreParams, topicScoreFactory, gossipScoreIndex) } if inspectInterval == 0 { inspectInterval = defaultScoreInspectInterval diff --git a/network/topics/scoring.go b/network/topics/scoring.go index 7644ab6161..68036b1fa2 100644 --- a/network/topics/scoring.go +++ b/network/topics/scoring.go @@ -32,18 +32,35 @@ type topicScoreSnapshot struct { *pubsub.TopicScoreSnapshot } -// scoreInspector inspects scores and updates the score index accordingly +// scoreInspector inspects and logs scores. +// It also updates the GossipScoreIndex by resetting it and +// adding the peers' scores. // TODO: finalize once validation is in place -func scoreInspector(logger *zap.Logger, scoreIdx peers.ScoreIndex, logFrequency int, metrics Metrics, peerConnected func(pid peer.ID) bool, peerScoreParams *pubsub.PeerScoreParams, topicScoreParamsFactory func(string) *pubsub.TopicScoreParams) pubsub.ExtendedPeerScoreInspectFn { +func scoreInspector(logger *zap.Logger, + scoreIdx peers.ScoreIndex, + logFrequency int, + metrics Metrics, + peerConnected func(pid peer.ID) bool, + peerScoreParams *pubsub.PeerScoreParams, + topicScoreParamsFactory func(string) *pubsub.TopicScoreParams, + gossipScoreIndex peers.GossipScoreIndex, +) pubsub.ExtendedPeerScoreInspectFn { inspections := 0 return func(scores map[peer.ID]*pubsub.PeerScoreSnapshot) { + // Update gossipScoreIndex. + peerScores := make(map[peer.ID]float64) + for pid, ps := range scores { + peerScores[pid] = ps.Score + } + gossipScoreIndex.SetScores(peerScores) + // Skip if it's not time to log yet. if inspections%logFrequency != 0 { - // Don't log yet. inspections++ return } + inspections++ // Reset metrics before updating them. metrics.ResetPeerScores() @@ -168,8 +185,6 @@ func scoreInspector(logger *zap.Logger, scoreIdx peers.ScoreIndex, logFrequency // zap.Any("scores", scores), zap.Any("topicScores", peerScores.Topics)) //} } - - inspections++ } } From 4ab561d97c008ca33b57070c7f8de661c5038c15 Mon Sep 17 00:00:00 2001 From: Anton Korpusenko Date: Sun, 15 Sep 2024 17:26:46 +0300 Subject: [PATCH 05/35] chore: (validation package replace panic statements with errors (#1726) * removed some panics in msg validation * removed all panics from validation package. fixed test * fixed wrong role type * replaced errors.new and wrap with fmt.Errorf() * fixed missing import fmt --- message/validation/consensus_validation.go | 17 +- .../genesis/consensus_validation.go | 31 ++- message/validation/genesis/message_counts.go | 17 +- .../validation/genesis/partial_validation.go | 30 ++- message/validation/genesis/utils_test.go | 197 ++++++++++++++ message/validation/genesis/validation_test.go | 252 +++++++++++++----- message/validation/message_counts.go | 14 +- message/validation/partial_validation.go | 8 +- message/validation/utils_test.go | 221 +++++++++++++++ message/validation/validation.go | 2 +- 10 files changed, 679 insertions(+), 110 deletions(-) create mode 100644 message/validation/genesis/utils_test.go create mode 100644 message/validation/utils_test.go diff --git a/message/validation/consensus_validation.go b/message/validation/consensus_validation.go index 2ccc4df542..41f43a8cc4 100644 --- a/message/validation/consensus_validation.go +++ b/message/validation/consensus_validation.go @@ -276,7 +276,11 @@ func (mv *messageValidator) validateQBFTMessageByDutyLogic( // Rule: Round cut-offs for roles: // - 12 (committee and aggregation) // - 6 (other types) - if maxRound := mv.maxRound(role); consensusMessage.Round > maxRound { + maxRound, err := mv.maxRound(role) + if err != nil { + return fmt.Errorf("failed to get max round: %w", err) + } + if consensusMessage.Round > maxRound { err := ErrRoundTooHigh err.got = fmt.Sprintf("%v (%v role)", consensusMessage.Round, message.RunnerRoleToString(role)) err.want = fmt.Sprintf("%v (%v role)", maxRound, message.RunnerRoleToString(role)) @@ -326,8 +330,7 @@ func (mv *messageValidator) processSignerState(signedSSVMessage *spectypes.Signe signerState.SeenSigners[encodedOperators] = struct{}{} } - signerState.MessageCounts.RecordConsensusMessage(signedSSVMessage, consensusMessage) - return nil + return signerState.MessageCounts.RecordConsensusMessage(signedSSVMessage, consensusMessage) } func (mv *messageValidator) validateJustifications(message *specqbft.Message) error { @@ -362,14 +365,14 @@ func (mv *messageValidator) validateJustifications(message *specqbft.Message) er return nil } -func (mv *messageValidator) maxRound(role spectypes.RunnerRole) specqbft.Round { +func (mv *messageValidator) maxRound(role spectypes.RunnerRole) (specqbft.Round, error) { switch role { case spectypes.RoleCommittee, spectypes.RoleAggregator: // TODO: check if value for aggregator is correct as there are messages on stage exceeding the limit - return 12 // TODO: consider calculating based on quick timeout and slow timeout + return 12, nil // TODO: consider calculating based on quick timeout and slow timeout case spectypes.RoleProposer, spectypes.RoleSyncCommitteeContribution: - return 6 + return 6, nil default: - panic("unknown role") + return 0, fmt.Errorf("unknown role") } } diff --git a/message/validation/genesis/consensus_validation.go b/message/validation/genesis/consensus_validation.go index c5c7dc6643..06a36799ac 100644 --- a/message/validation/genesis/consensus_validation.go +++ b/message/validation/genesis/consensus_validation.go @@ -63,7 +63,11 @@ func (mv *messageValidator) validateConsensusMessage( return consensusDescriptor, msgSlot, err } - if maxRound := mv.maxRound(role); msgRound > maxRound { + maxRound, err := mv.maxRound(role) + if err != nil { + return consensusDescriptor, msgSlot, fmt.Errorf("failed to get max round: %w", err) + } + if msgRound > maxRound { err := ErrRoundTooHigh err.got = fmt.Sprintf("%v (%v role)", msgRound, role) err.want = fmt.Sprintf("%v (%v role)", maxRound, role) @@ -140,7 +144,10 @@ func (mv *messageValidator) validateConsensusMessage( } } - signerState.MessageCounts.RecordConsensusMessage(signedMsg) + err := signerState.MessageCounts.RecordConsensusMessage(signedMsg) + if err != nil { + return consensusDescriptor, msgSlot, fmt.Errorf("can't record consensus message: %w", err) + } } return consensusDescriptor, msgSlot, nil @@ -330,16 +337,16 @@ func (mv *messageValidator) isDecidedMessage(signedMsg *genesisspecqbft.SignedMe return signedMsg.Message.MsgType == genesisspecqbft.CommitMsgType && len(signedMsg.Signers) > 1 } -func (mv *messageValidator) maxRound(role genesisspectypes.BeaconRole) genesisspecqbft.Round { +func (mv *messageValidator) maxRound(role genesisspectypes.BeaconRole) (genesisspecqbft.Round, error) { switch role { case genesisspectypes.BNRoleAttester, genesisspectypes.BNRoleAggregator: // TODO: check if value for aggregator is correct as there are messages on stage exceeding the limit - return 12 // TODO: consider calculating based on quick timeout and slow timeout + return 12, nil // TODO: consider calculating based on quick timeout and slow timeout case genesisspectypes.BNRoleProposer, genesisspectypes.BNRoleSyncCommittee, genesisspectypes.BNRoleSyncCommitteeContribution: - return 6 + return 6, nil case genesisspectypes.BNRoleValidatorRegistration, genesisspectypes.BNRoleVoluntaryExit: - return 0 + return 0, nil default: - panic("unknown role") + return 0, fmt.Errorf("unknown role") } } @@ -353,16 +360,16 @@ func (mv *messageValidator) currentEstimatedRound(sinceSlotStart time.Duration) return estimatedRound } -func (mv *messageValidator) waitAfterSlotStart(role genesisspectypes.BeaconRole) time.Duration { +func (mv *messageValidator) waitAfterSlotStart(role genesisspectypes.BeaconRole) (time.Duration, error) { switch role { case genesisspectypes.BNRoleAttester, genesisspectypes.BNRoleSyncCommittee: - return mv.netCfg.Beacon.SlotDurationSec() / 3 + return mv.netCfg.Beacon.SlotDurationSec() / 3, nil case genesisspectypes.BNRoleAggregator, genesisspectypes.BNRoleSyncCommitteeContribution: - return mv.netCfg.Beacon.SlotDurationSec() / 3 * 2 + return mv.netCfg.Beacon.SlotDurationSec() / 3 * 2, nil case genesisspectypes.BNRoleProposer, genesisspectypes.BNRoleValidatorRegistration, genesisspectypes.BNRoleVoluntaryExit: - return 0 + return 0, nil default: - panic("unknown role") + return 0, fmt.Errorf("unknown role") } } diff --git a/message/validation/genesis/message_counts.go b/message/validation/genesis/message_counts.go index 4c6997533e..8a51796f36 100644 --- a/message/validation/genesis/message_counts.go +++ b/message/validation/genesis/message_counts.go @@ -70,7 +70,7 @@ func (c *MessageCounts) ValidateConsensusMessage(msg *specqbft.SignedMessage, li return err } default: - panic("unexpected signed message type") // should be checked before + return fmt.Errorf("unexpected signed message type") // should be checked before } return nil @@ -93,14 +93,14 @@ func (c *MessageCounts) ValidatePartialSignatureMessage(m *spectypes.SignedParti return err } default: - panic("unexpected partial signature message type") // should be checked before + return fmt.Errorf("unexpected partial signature message type") // should be checked before } return nil } // RecordConsensusMessage updates the counts based on the provided consensus message type. -func (c *MessageCounts) RecordConsensusMessage(msg *specqbft.SignedMessage) { +func (c *MessageCounts) RecordConsensusMessage(msg *specqbft.SignedMessage) error { switch msg.Message.MsgType { case specqbft.ProposalMsgType: c.Proposal++ @@ -113,25 +113,28 @@ func (c *MessageCounts) RecordConsensusMessage(msg *specqbft.SignedMessage) { case len(msg.Signers) > 1: c.Decided++ default: - panic("expected signers") // 0 length should be checked before + return fmt.Errorf("expected signers") // 0 length should be checked before } case specqbft.RoundChangeMsgType: c.RoundChange++ default: - panic("unexpected signed message type") // should be checked before + return fmt.Errorf("unexpected signed message type") // should be checked before } + + return nil } // RecordPartialSignatureMessage updates the counts based on the provided partial signature message type. -func (c *MessageCounts) RecordPartialSignatureMessage(msg *spectypes.SignedPartialSignatureMessage) { +func (c *MessageCounts) RecordPartialSignatureMessage(msg *spectypes.SignedPartialSignatureMessage) error { switch msg.Message.Type { case spectypes.RandaoPartialSig, spectypes.SelectionProofPartialSig, spectypes.ContributionProofs, spectypes.ValidatorRegistrationPartialSig, spectypes.VoluntaryExitPartialSig: c.PreConsensus++ case spectypes.PostConsensusPartialSig: c.PostConsensus++ default: - panic("unexpected partial signature message type") // should be checked before + return fmt.Errorf("unexpected partial signature message type") // should be checked before } + return nil } // maxMessageCounts is the maximum number of acceptable messages from a signer within a slot & round. diff --git a/message/validation/genesis/partial_validation.go b/message/validation/genesis/partial_validation.go index 4ee83f2172..584658c236 100644 --- a/message/validation/genesis/partial_validation.go +++ b/message/validation/genesis/partial_validation.go @@ -1,11 +1,13 @@ package validation import ( + "fmt" "time" "github.com/attestantio/go-eth2-client/spec/phase0" genesisspecqbft "github.com/ssvlabs/ssv-spec-pre-cc/qbft" genesisspectypes "github.com/ssvlabs/ssv-spec-pre-cc/types" + ssvtypes "github.com/ssvlabs/ssv/protocol/v2/types" ) @@ -27,7 +29,11 @@ func (mv *messageValidator) validatePartialSignatureMessage( } role := msgID.GetRoleType() - if !mv.partialSignatureTypeMatchesRole(signedMsg.Message.Type, role) { + matchesRole, err := mv.partialSignatureTypeMatchesRole(signedMsg.Message.Type, role) + if err != nil { + return msgSlot, err + } + if !matchesRole { return msgSlot, ErrPartialSignatureTypeRoleMismatch } @@ -66,7 +72,9 @@ func (mv *messageValidator) validatePartialSignatureMessage( signerState.ResetSlot(msgSlot, genesisspecqbft.FirstRound, newEpoch) } - signerState.MessageCounts.RecordPartialSignatureMessage(signedMsg) + if err := signerState.MessageCounts.RecordPartialSignatureMessage(signedMsg); err != nil { + return msgSlot, err + } return msgSlot, nil } @@ -85,24 +93,24 @@ func (mv *messageValidator) validPartialSigMsgType(msgType genesisspectypes.Part } } -func (mv *messageValidator) partialSignatureTypeMatchesRole(msgType genesisspectypes.PartialSigMsgType, role genesisspectypes.BeaconRole) bool { +func (mv *messageValidator) partialSignatureTypeMatchesRole(msgType genesisspectypes.PartialSigMsgType, role genesisspectypes.BeaconRole) (bool, error) { switch role { case genesisspectypes.BNRoleAttester: - return msgType == genesisspectypes.PostConsensusPartialSig + return msgType == genesisspectypes.PostConsensusPartialSig, nil case genesisspectypes.BNRoleAggregator: - return msgType == genesisspectypes.PostConsensusPartialSig || msgType == genesisspectypes.SelectionProofPartialSig + return msgType == genesisspectypes.PostConsensusPartialSig || msgType == genesisspectypes.SelectionProofPartialSig, nil case genesisspectypes.BNRoleProposer: - return msgType == genesisspectypes.PostConsensusPartialSig || msgType == genesisspectypes.RandaoPartialSig + return msgType == genesisspectypes.PostConsensusPartialSig || msgType == genesisspectypes.RandaoPartialSig, nil case genesisspectypes.BNRoleSyncCommittee: - return msgType == genesisspectypes.PostConsensusPartialSig + return msgType == genesisspectypes.PostConsensusPartialSig, nil case genesisspectypes.BNRoleSyncCommitteeContribution: - return msgType == genesisspectypes.PostConsensusPartialSig || msgType == genesisspectypes.ContributionProofs + return msgType == genesisspectypes.PostConsensusPartialSig || msgType == genesisspectypes.ContributionProofs, nil case genesisspectypes.BNRoleValidatorRegistration: - return msgType == genesisspectypes.ValidatorRegistrationPartialSig + return msgType == genesisspectypes.ValidatorRegistrationPartialSig, nil case genesisspectypes.BNRoleVoluntaryExit: - return msgType == genesisspectypes.VoluntaryExitPartialSig + return msgType == genesisspectypes.VoluntaryExitPartialSig, nil default: - panic("invalid role") // role validity should be checked before + return false, fmt.Errorf("invalid role") // role validity should be checked before } } diff --git a/message/validation/genesis/utils_test.go b/message/validation/genesis/utils_test.go new file mode 100644 index 0000000000..0931fe762e --- /dev/null +++ b/message/validation/genesis/utils_test.go @@ -0,0 +1,197 @@ +package validation + +import ( + "fmt" + "testing" + + specqbft "github.com/ssvlabs/ssv-spec-pre-cc/qbft" + spectypes "github.com/ssvlabs/ssv-spec/types" + "github.com/stretchr/testify/require" +) + +func TestRecordConsensusMessage(t *testing.T) { + tt := []struct { + name string + msg *specqbft.SignedMessage + initialCounts MessageCounts + expectedCounts MessageCounts + expectedError error + }{ + { + name: "ProposalMessage_IncrementsProposalCount", + msg: &specqbft.SignedMessage{ + Message: specqbft.Message{MsgType: specqbft.ProposalMsgType}, + }, + initialCounts: MessageCounts{}, + expectedCounts: MessageCounts{Proposal: 1}, + expectedError: nil, + }, + { + name: "PrepareMessage_IncrementsPrepareCount", + msg: &specqbft.SignedMessage{ + Message: specqbft.Message{MsgType: specqbft.PrepareMsgType}, + }, + initialCounts: MessageCounts{}, + expectedCounts: MessageCounts{Prepare: 1}, + expectedError: nil, + }, + { + name: "CommitMessageWithSingleOperator_IncrementsCommitCount", + msg: &specqbft.SignedMessage{ + Message: specqbft.Message{MsgType: specqbft.CommitMsgType}, + Signers: []spectypes.OperatorID{1}, + }, + initialCounts: MessageCounts{}, + expectedCounts: MessageCounts{Commit: 1}, + expectedError: nil, + }, + { + name: "CommitMessageWithMultipleOperators_IncrementsDecidedCount", + msg: &specqbft.SignedMessage{ + Message: specqbft.Message{MsgType: specqbft.CommitMsgType}, + Signers: []spectypes.OperatorID{1, 2}, + }, + initialCounts: MessageCounts{}, + expectedCounts: MessageCounts{Decided: 1}, + expectedError: nil, + }, + { + name: "RoundChangeMessage_IncrementsRoundChangeCount", + msg: &specqbft.SignedMessage{ + Message: specqbft.Message{MsgType: specqbft.RoundChangeMsgType}, + }, + initialCounts: MessageCounts{}, + expectedCounts: MessageCounts{RoundChange: 1}, + expectedError: nil, + }, + { + name: "UnexpectedMessageType_ReturnsError", + msg: &specqbft.SignedMessage{ + Message: specqbft.Message{MsgType: specqbft.MessageType(12345)}, + }, + initialCounts: MessageCounts{}, + expectedCounts: MessageCounts{}, + expectedError: fmt.Errorf("unexpected signed message type"), + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + counts := tc.initialCounts + err := counts.RecordConsensusMessage(tc.msg) + + if tc.expectedError != nil { + require.EqualError(t, err, tc.expectedError.Error()) + } else { + require.NoError(t, err) + } + + require.Equal(t, tc.expectedCounts, counts) + }) + } +} + +func TestValidateConsensusMessage(t *testing.T) { + type input struct { + signedSSVMessage *specqbft.SignedMessage + counts *MessageCounts + limits MessageCounts + } + + tt := []struct { + name string + input input + expectedError error + }{ + { + name: "ProposalMessage_ExceedsLimit_ReturnsError", + input: input{ + counts: &MessageCounts{Proposal: 2}, + signedSSVMessage: &specqbft.SignedMessage{ + Message: specqbft.Message{MsgType: specqbft.ProposalMsgType}, + }, + limits: MessageCounts{Proposal: 1}, + }, + expectedError: fmt.Errorf("too many messages of same type per round, got proposal, having pre-consensus: 0, proposal: 2, prepare: 0, commit: 0, decided: 0, round change: 0, post-consensus: 0"), + }, + { + name: "PrepareMessage_ExceedsLimit_ReturnsError", + input: input{ + counts: &MessageCounts{Prepare: 2}, + signedSSVMessage: &specqbft.SignedMessage{ + Message: specqbft.Message{MsgType: specqbft.PrepareMsgType}, + }, + limits: MessageCounts{Prepare: 1}, + }, + expectedError: fmt.Errorf("too many messages of same type per round, got prepare, having pre-consensus: 0, proposal: 0, prepare: 2, commit: 0, decided: 0, round change: 0, post-consensus: 0"), + }, + { + name: "CommitMessageWithSingleOperator_ExceedsLimit_ReturnsError", + input: input{ + counts: &MessageCounts{Commit: 2}, + signedSSVMessage: &specqbft.SignedMessage{ + Message: specqbft.Message{MsgType: specqbft.CommitMsgType}, + Signers: []spectypes.OperatorID{1}, + }, + limits: MessageCounts{Commit: 0}, + }, + expectedError: fmt.Errorf("too many messages of same type per round, got commit, having pre-consensus: 0, proposal: 0, prepare: 0, commit: 2, decided: 0, round change: 0, post-consensus: 0"), + }, + { + name: "CommitMessageWithManyOperators_ExceedsLimit_ReturnsError", + input: input{ + counts: &MessageCounts{Commit: 2}, + signedSSVMessage: &specqbft.SignedMessage{ + Message: specqbft.Message{MsgType: specqbft.CommitMsgType}, + Signers: []spectypes.OperatorID{1, 2, 3}, + }, + limits: MessageCounts{Commit: 1}, + }, + expectedError: fmt.Errorf("too many messages of same type per round, got decided, having pre-consensus: 0, proposal: 0, prepare: 0, commit: 2, decided: 0, round change: 0, post-consensus: 0"), + }, + { + name: "RoundChangeMessage_ExceedsLimit_ReturnsError", + input: input{ + counts: &MessageCounts{RoundChange: 2}, + signedSSVMessage: &specqbft.SignedMessage{ + Message: specqbft.Message{MsgType: specqbft.RoundChangeMsgType}, + }, + limits: MessageCounts{RoundChange: 1}, + }, + expectedError: fmt.Errorf("too many messages of same type per round, got round change, having pre-consensus: 0, proposal: 0, prepare: 0, commit: 0, decided: 0, round change: 2, post-consensus: 0"), + }, + { + name: "UnexpectedMessageType_ReturnsError", + input: input{ + counts: &MessageCounts{}, + signedSSVMessage: &specqbft.SignedMessage{Message: specqbft.Message{MsgType: specqbft.MessageType(12345)}}, + limits: MessageCounts{}, + }, + expectedError: fmt.Errorf("unexpected signed message type"), + }, + { + name: "ValidProposalMessage_HappyFlow", + input: input{ + counts: &MessageCounts{Proposal: 2}, + signedSSVMessage: &specqbft.SignedMessage{ + Message: specqbft.Message{MsgType: specqbft.ProposalMsgType}, + }, + limits: MessageCounts{Proposal: 100}, + }, + expectedError: nil, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + + err := tc.input.counts.ValidateConsensusMessage(tc.input.signedSSVMessage, tc.input.limits) + + if tc.expectedError != nil { + require.EqualError(t, err, tc.expectedError.Error()) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/message/validation/genesis/validation_test.go b/message/validation/genesis/validation_test.go index 28409d7020..3d4d10d511 100644 --- a/message/validation/genesis/validation_test.go +++ b/message/validation/genesis/validation_test.go @@ -83,7 +83,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.NoError(t, err) }) @@ -115,7 +117,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.NoError(t, err) @@ -208,8 +212,10 @@ func Test_ValidateSSVMessage(t *testing.T) { pmsg := &pubsub.Message{} - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err := validator.validateP2PMessage(pmsg, receivedAt) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) + _, _, err = validator.validateP2PMessage(pmsg, receivedAt) require.ErrorIs(t, err, ErrPubSubMessageHasNoData) }) @@ -229,7 +235,9 @@ func Test_ValidateSSVMessage(t *testing.T) { }, } - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateP2PMessage(pmsg, receivedAt) e := ErrPubSubDataTooBig @@ -252,7 +260,9 @@ func Test_ValidateSSVMessage(t *testing.T) { }, } - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateP2PMessage(pmsg, receivedAt) require.ErrorContains(t, err, ErrMalformedPubSubMessage.Error()) @@ -282,7 +292,9 @@ func Test_ValidateSSVMessage(t *testing.T) { }, } - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateP2PMessage(pmsg, receivedAt) require.ErrorContains(t, err, ErrMalformedMessage.Error()) @@ -312,7 +324,9 @@ func Test_ValidateSSVMessage(t *testing.T) { }, } - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateP2PMessage(pmsg, receivedAt) require.ErrorContains(t, err, ErrMalformedMessage.Error()) }) @@ -460,7 +474,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) expectedErr := ErrWrongDomain expectedErr.got = hex.EncodeToString(wrongDomain[:]) @@ -489,7 +505,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorIs(t, err, ErrInvalidRole) }) @@ -514,7 +532,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorContains(t, err, ErrUnexpectedConsensusMessage.Error()) @@ -605,7 +625,9 @@ func Test_ValidateSSVMessage(t *testing.T) { require.NoError(t, err) slot := netCfg.Beacon.FirstSlotAtEpoch(1) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) expectedErr := ErrValidatorNotAttesting @@ -651,7 +673,9 @@ func Test_ValidateSSVMessage(t *testing.T) { MsgID: spectypes.NewMsgID(spectypes.DomainType(netCfg.DomainType()), nonUpdatedMetadataShare.ValidatorPubKey[:], roleAttester), Data: encodedValidSignedMessage, } - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) @@ -701,7 +725,9 @@ func Test_ValidateSSVMessage(t *testing.T) { MsgID: spectypes.NewMsgID(spectypes.DomainType(netCfg.DomainType()), nonUpdatedMetadataShare.ValidatorPubKey[:], roleAttester), Data: encodedValidSignedMessage, } - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) @@ -744,7 +770,9 @@ func Test_ValidateSSVMessage(t *testing.T) { require.NoError(t, err) slot := netCfg.Beacon.FirstSlotAtEpoch(1) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorIs(t, err, ErrNoShareMetadata) @@ -772,7 +800,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)), nil) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait), nil) require.NoError(t, err) validSignedMessage = spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height+4) @@ -788,7 +818,10 @@ func Test_ValidateSSVMessage(t *testing.T) { message2, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg2) require.NoError(t, err) - _, _, err = validator.validateSSVMessage(message2, netCfg.Beacon.GetSlotStartTime(slot+4).Add(validator.waitAfterSlotStart(roleAttester)), nil) + timeToWait, err = validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + + _, _, err = validator.validateSSVMessage(message2, netCfg.Beacon.GetSlotStartTime(slot+4).Add(timeToWait), nil) require.NoError(t, err) validSignedMessage = spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height+8) @@ -804,7 +837,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message3, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg3) require.NoError(t, err) - _, _, err = validator.validateSSVMessage(message3, netCfg.Beacon.GetSlotStartTime(slot+8).Add(validator.waitAfterSlotStart(roleAttester)), nil) + timeToWait, err = validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + _, _, err = validator.validateSSVMessage(message3, netCfg.Beacon.GetSlotStartTime(slot+8).Add(timeToWait), nil) require.ErrorContains(t, err, ErrTooManyDutiesPerEpoch.Error()) }) @@ -831,13 +866,17 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(spectypes.BNRoleProposer)), nil) + timeToWait, err := validator.waitAfterSlotStart(spectypes.BNRoleProposer) + require.NoError(t, err) + _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait), nil) require.ErrorContains(t, err, ErrNoDuty.Error()) dutyStore = dutystore.New() dutyStore.Proposer.Add(epoch, slot, validatorIndex, ð2apiv1.ProposerDuty{}, true) validator = New(netCfg, WithNodeStorage(ns), WithDutyStore(dutyStore)).(*messageValidator) - _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(spectypes.BNRoleProposer)), nil) + timeToWait, err = validator.waitAfterSlotStart(spectypes.BNRoleProposer) + require.NoError(t, err) + _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait), nil) require.NoError(t, err) }) @@ -874,7 +913,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorIs(t, err, ErrSignerNotInCommittee) }) @@ -899,7 +940,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorIs(t, err, ErrZeroSigner) }) @@ -925,7 +968,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) expectedErr := ErrUnexpectedSigner expectedErr.got = spectypes.OperatorID(2) @@ -954,7 +999,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorIs(t, err, ErrDuplicatedPartialSignatureMessage) }) @@ -980,7 +1027,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorIs(t, err, ErrNoPartialMessages) }) @@ -1014,7 +1063,9 @@ func Test_ValidateSSVMessage(t *testing.T) { }, } - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateP2PMessage(pmsg, receivedAt) require.ErrorContains(t, err, ErrMalformedMessage.Error()) }) @@ -1186,7 +1237,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) expectedErr := ErrUnknownQBFTMessageType require.ErrorIs(t, err, expectedErr) @@ -1229,7 +1282,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorIs(t, err, ErrZeroSignature) }) @@ -1252,7 +1307,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorIs(t, err, ErrZeroSignature) }) @@ -1280,7 +1337,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorIs(t, err, ErrNoSigners) }) @@ -1326,7 +1385,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorIs(t, err, ErrZeroSigner) }) @@ -1348,7 +1409,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorIs(t, err, ErrZeroSigner) }) @@ -1379,7 +1442,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorIs(t, err, ErrDuplicatedSigner) }) @@ -1407,7 +1472,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorIs(t, err, ErrSignersNotSorted) }) @@ -1435,7 +1502,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) expectedErr := ErrWrongSignersLength @@ -1465,7 +1534,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) expectedErr := ErrNonDecidedWithMultipleSigners @@ -1484,12 +1555,23 @@ func Test_ValidateSSVMessage(t *testing.T) { encodedValidSignedMessage, err := validSignedMessage.Encode() require.NoError(t, err) + attesterTimeToWait, err := validator.waitAfterSlotStart(spectypes.BNRoleAttester) + require.NoError(t, err) + aggregatorTimeToWait, err := validator.waitAfterSlotStart(spectypes.BNRoleAggregator) + require.NoError(t, err) + proposerTimeToWait, err := validator.waitAfterSlotStart(spectypes.BNRoleProposer) + require.NoError(t, err) + scTimeToWait, err := validator.waitAfterSlotStart(spectypes.BNRoleSyncCommittee) + require.NoError(t, err) + sccTimeToWait, err := validator.waitAfterSlotStart(spectypes.BNRoleSyncCommitteeContribution) + require.NoError(t, err) + tests := map[spectypes.BeaconRole]time.Time{ - spectypes.BNRoleAttester: netCfg.Beacon.GetSlotStartTime(slot + 35).Add(validator.waitAfterSlotStart(spectypes.BNRoleAttester)), - spectypes.BNRoleAggregator: netCfg.Beacon.GetSlotStartTime(slot + 35).Add(validator.waitAfterSlotStart(spectypes.BNRoleAggregator)), - spectypes.BNRoleProposer: netCfg.Beacon.GetSlotStartTime(slot + 4).Add(validator.waitAfterSlotStart(spectypes.BNRoleProposer)), - spectypes.BNRoleSyncCommittee: netCfg.Beacon.GetSlotStartTime(slot + 4).Add(validator.waitAfterSlotStart(spectypes.BNRoleSyncCommittee)), - spectypes.BNRoleSyncCommitteeContribution: netCfg.Beacon.GetSlotStartTime(slot + 4).Add(validator.waitAfterSlotStart(spectypes.BNRoleSyncCommitteeContribution)), + spectypes.BNRoleAttester: netCfg.Beacon.GetSlotStartTime(slot + 35).Add(attesterTimeToWait), + spectypes.BNRoleAggregator: netCfg.Beacon.GetSlotStartTime(slot + 35).Add(aggregatorTimeToWait), + spectypes.BNRoleProposer: netCfg.Beacon.GetSlotStartTime(slot + 4).Add(proposerTimeToWait), + spectypes.BNRoleSyncCommittee: netCfg.Beacon.GetSlotStartTime(slot + 4).Add(scTimeToWait), + spectypes.BNRoleSyncCommitteeContribution: netCfg.Beacon.GetSlotStartTime(slot + 4).Add(sccTimeToWait), } for role, receivedAt := range tests { @@ -1557,7 +1639,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) expectedErr := ErrSignerNotLeader expectedErr.got = spectypes.OperatorID(2) @@ -1587,7 +1671,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorContains(t, err, ErrMalformedPrepareJustifications.Error()) @@ -1619,7 +1705,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) expectedErr := ErrUnexpectedPrepareJustifications @@ -1654,7 +1742,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) expectedErr := ErrUnexpectedRoundChangeJustifications @@ -1684,7 +1774,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorContains(t, err, ErrMalformedRoundChangeJustifications.Error()) @@ -1712,7 +1804,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) expectedErr := ErrInvalidHash @@ -1738,7 +1832,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message1, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg1) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message1, receivedAt, nil) require.NoError(t, err) @@ -1783,7 +1879,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message1, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg1) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message1, receivedAt, nil) require.NoError(t, err) @@ -1827,7 +1925,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message1, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg1) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message1, receivedAt, nil) require.NoError(t, err) @@ -1869,7 +1969,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message1, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg1) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message1, receivedAt, nil) require.NoError(t, err) @@ -1914,7 +2016,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) for i := 0; i < maxDecidedCount(len(share.Committee)); i++ { _, _, err = validator.validateSSVMessage(message, receivedAt, nil) @@ -1957,7 +2061,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(0).Add(validator.waitAfterSlotStart(role)) + timeToWait, err := validator.waitAfterSlotStart(role) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(0).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorContains(t, err, ErrRoundTooHigh.Error()) }) @@ -1984,7 +2090,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.NoError(t, err) @@ -2028,7 +2136,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot+1).Add(validator.waitAfterSlotStart(roleAttester)), nil) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot+1).Add(timeToWait), nil) require.NoError(t, err) signedMessage = spectestingutils.TestingPrepareMessageWithHeight(ks.Shares[1], 1, height) @@ -2044,7 +2154,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message2, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg2) require.NoError(t, err) - _, _, err = validator.validateSSVMessage(message2, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)), nil) + timeToWait, err = validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + _, _, err = validator.validateSSVMessage(message2, netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait), nil) require.ErrorContains(t, err, ErrSlotAlreadyAdvanced.Error()) }) @@ -2070,7 +2182,9 @@ func Test_ValidateSSVMessage(t *testing.T) { decodedMsg, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - _, _, err = validator.validateSSVMessage(decodedMsg, netCfg.Beacon.GetSlotStartTime(slot+1).Add(validator.waitAfterSlotStart(roleAttester)), nil) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + _, _, err = validator.validateSSVMessage(decodedMsg, netCfg.Beacon.GetSlotStartTime(slot+1).Add(timeToWait), nil) require.NoError(t, err) message = spectestingutils.PostConsensusAttestationMsg(ks.Shares[2], 2, height) @@ -2091,7 +2205,9 @@ func Test_ValidateSSVMessage(t *testing.T) { decodedMsg2, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg2) require.NoError(t, err) - _, _, err = validator.validateSSVMessage(decodedMsg2, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)), nil) + timeToWait, err = validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + _, _, err = validator.validateSSVMessage(decodedMsg2, netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait), nil) require.ErrorContains(t, err, ErrSlotAlreadyAdvanced.Error()) }) }) @@ -2116,7 +2232,9 @@ func Test_ValidateSSVMessage(t *testing.T) { message, err := genesisqueue.DecodeGenesisSSVMessage(ssvMsg) require.NoError(t, err) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorIs(t, err, ErrEventMessage) }) @@ -2151,7 +2269,9 @@ func Test_ValidateSSVMessage(t *testing.T) { } slot := netCfg.Beacon.FirstSlotAtEpoch(epoch) - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateP2PMessage(pMsg, receivedAt) require.ErrorContains(t, err, ErrMalformedPubSubMessage.Error()) }) @@ -2212,7 +2332,9 @@ func Test_ValidateSSVMessage(t *testing.T) { }, } - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateP2PMessage(pMsg, receivedAt) require.NoError(t, err) @@ -2276,7 +2398,9 @@ func Test_ValidateSSVMessage(t *testing.T) { }, } - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateP2PMessage(pMsg, receivedAt) require.ErrorContains(t, err, ErrOperatorNotFound.Error()) @@ -2337,7 +2461,9 @@ func Test_ValidateSSVMessage(t *testing.T) { }, } - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + timeToWait, err := validator.waitAfterSlotStart(roleAttester) + require.NoError(t, err) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(timeToWait) _, _, err = validator.validateP2PMessage(pMsg, receivedAt) require.ErrorContains(t, err, ErrSignatureVerification.Error()) diff --git a/message/validation/message_counts.go b/message/validation/message_counts.go index a1040bcf0e..bb54852510 100644 --- a/message/validation/message_counts.go +++ b/message/validation/message_counts.go @@ -63,7 +63,7 @@ func (c *MessageCounts) ValidateConsensusMessage(signedSSVMessage *spectypes.Sig return err } default: - panic("unexpected signed message type") // should be checked before + return fmt.Errorf("unexpected signed message type") // should be checked before } return nil @@ -86,14 +86,14 @@ func (c *MessageCounts) ValidatePartialSignatureMessage(m *spectypes.PartialSign return err } default: - panic("unexpected partial signature message type") // should be checked before + return fmt.Errorf("unexpected partial signature message type") // should be checked before } return nil } // RecordConsensusMessage updates the counts based on the provided consensus message type. -func (c *MessageCounts) RecordConsensusMessage(signedSSVMessage *spectypes.SignedSSVMessage, msg *specqbft.Message) { +func (c *MessageCounts) RecordConsensusMessage(signedSSVMessage *spectypes.SignedSSVMessage, msg *specqbft.Message) error { switch msg.MsgType { case specqbft.ProposalMsgType: c.Proposal++ @@ -106,20 +106,22 @@ func (c *MessageCounts) RecordConsensusMessage(signedSSVMessage *spectypes.Signe case specqbft.RoundChangeMsgType: c.RoundChange++ default: - panic("unexpected signed message type") // should be checked before + return fmt.Errorf("unexpected signed message type") // should be checked before } + return nil } // RecordPartialSignatureMessage updates the counts based on the provided partial signature message type. -func (c *MessageCounts) RecordPartialSignatureMessage(messages *spectypes.PartialSignatureMessages) { +func (c *MessageCounts) RecordPartialSignatureMessage(messages *spectypes.PartialSignatureMessages) error { switch messages.Type { case spectypes.RandaoPartialSig, spectypes.SelectionProofPartialSig, spectypes.ContributionProofs, spectypes.ValidatorRegistrationPartialSig, spectypes.VoluntaryExitPartialSig: c.PreConsensus++ case spectypes.PostConsensusPartialSig: c.PostConsensus++ default: - panic("unexpected partial signature message type") // should be checked before + return fmt.Errorf("unexpected partial signature message type") // should be checked before } + return nil } // maxMessageCounts is the maximum number of acceptable messages from a signer within a slot & round. diff --git a/message/validation/partial_validation.go b/message/validation/partial_validation.go index 0e211a14a2..8ddaca41a6 100644 --- a/message/validation/partial_validation.go +++ b/message/validation/partial_validation.go @@ -55,7 +55,9 @@ func (mv *messageValidator) validatePartialSignatureMessage( return partialSignatureMessages, e } - mv.updatePartialSignatureState(partialSignatureMessages, state, signer) + if err := mv.updatePartialSignatureState(partialSignatureMessages, state, signer); err != nil { + return nil, err + } return partialSignatureMessages, nil } @@ -225,7 +227,7 @@ func (mv *messageValidator) updatePartialSignatureState( partialSignatureMessages *spectypes.PartialSignatureMessages, state *consensusState, signer spectypes.OperatorID, -) { +) error { stateBySlot := state.GetOrCreate(signer) messageSlot := partialSignatureMessages.Slot messageEpoch := mv.netCfg.Beacon.EstimatedEpochAtSlot(messageSlot) @@ -236,7 +238,7 @@ func (mv *messageValidator) updatePartialSignatureState( stateBySlot.Set(messageSlot, messageEpoch, signerState) } - signerState.MessageCounts.RecordPartialSignatureMessage(partialSignatureMessages) + return signerState.MessageCounts.RecordPartialSignatureMessage(partialSignatureMessages) } func (mv *messageValidator) validPartialSigMsgType(msgType spectypes.PartialSigMsgType) bool { diff --git a/message/validation/utils_test.go b/message/validation/utils_test.go new file mode 100644 index 0000000000..4f5ff62e65 --- /dev/null +++ b/message/validation/utils_test.go @@ -0,0 +1,221 @@ +package validation + +import ( + "fmt" + "testing" + + specqbft "github.com/ssvlabs/ssv-spec/qbft" + spectypes "github.com/ssvlabs/ssv-spec/types" + "github.com/stretchr/testify/require" +) + +func TestMessageValidator_maxRound(t *testing.T) { + tt := []struct { + name string + role spectypes.RunnerRole + want specqbft.Round + err error + }{ + { + name: "Committee role", + role: spectypes.RoleCommittee, + want: 12, + err: nil, + }, + { + name: "Aggregator role", + role: spectypes.RoleAggregator, + want: 12, + err: nil, + }, + { + name: "Proposer role", + role: spectypes.RoleProposer, + want: 6, + err: nil, + }, + { + name: "SyncCommitteeContribution role", + role: spectypes.RoleSyncCommitteeContribution, + want: 6, + err: nil, + }, + { + name: "Unknown role", + role: spectypes.RunnerRole(999), + want: 0, + err: fmt.Errorf("unknown role"), + }, + } + + for _, tc := range tt { + tc := tc + t.Run(tc.name, func(t *testing.T) { + mv := &messageValidator{} + got, err := mv.maxRound(tc.role) + if tc.err != nil { + require.EqualError(t, err, tc.err.Error()) + } else { + require.NoError(t, err) + } + require.Equal(t, tc.want, got) + }) + } +} + +func TestRecordConsensusMessage(t *testing.T) { + tt := []struct { + name string + signedSSVMessage *spectypes.SignedSSVMessage + msg *specqbft.Message + expectedCounts MessageCounts + expectedError error + }{ + { + name: "ProposalMessage_IncrementsProposalCount", + signedSSVMessage: &spectypes.SignedSSVMessage{}, + msg: &specqbft.Message{MsgType: specqbft.ProposalMsgType}, + expectedCounts: MessageCounts{Proposal: 1}, + expectedError: nil, + }, + { + name: "PrepareMessage_IncrementsPrepareCount", + signedSSVMessage: &spectypes.SignedSSVMessage{}, + msg: &specqbft.Message{MsgType: specqbft.PrepareMsgType}, + expectedCounts: MessageCounts{Prepare: 1}, + expectedError: nil, + }, + { + name: "CommitMessageWithSingleOperator_IncrementsCommitCount", + signedSSVMessage: &spectypes.SignedSSVMessage{OperatorIDs: []spectypes.OperatorID{1}}, + msg: &specqbft.Message{MsgType: specqbft.CommitMsgType}, + expectedCounts: MessageCounts{Commit: 1}, + expectedError: nil, + }, + { + name: "CommitMessageWithMultipleOperators_DoesNotIncrementCommitCount", + signedSSVMessage: &spectypes.SignedSSVMessage{OperatorIDs: []spectypes.OperatorID{1, 2}}, + msg: &specqbft.Message{MsgType: specqbft.CommitMsgType}, + expectedCounts: MessageCounts{}, + expectedError: nil, + }, + { + name: "RoundChangeMessage_IncrementsRoundChangeCount", + signedSSVMessage: &spectypes.SignedSSVMessage{}, + msg: &specqbft.Message{MsgType: specqbft.RoundChangeMsgType}, + expectedCounts: MessageCounts{RoundChange: 1}, + expectedError: nil, + }, + { + name: "UnexpectedMessageType_ReturnsError", + signedSSVMessage: &spectypes.SignedSSVMessage{}, + msg: &specqbft.Message{MsgType: specqbft.MessageType(12345)}, + expectedCounts: MessageCounts{}, + expectedError: fmt.Errorf("unexpected signed message type"), + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + counts := &MessageCounts{} + err := counts.RecordConsensusMessage(tc.signedSSVMessage, tc.msg) + + if tc.expectedError != nil { + require.EqualError(t, err, tc.expectedError.Error()) + } else { + require.NoError(t, err) + } + + require.Equal(t, tc.expectedCounts, *counts) + }) + } +} + +func TestValidateConsensusMessage(t *testing.T) { + type input struct { + counts *MessageCounts + signedSSVMessage *spectypes.SignedSSVMessage + msg *specqbft.Message + limits MessageCounts + } + + tt := []struct { + name string + input input + expectedError error + }{ + { + name: "ProposalMessage_ExceedsLimit_ReturnsError", + input: input{ + counts: &MessageCounts{Proposal: 2}, + signedSSVMessage: &spectypes.SignedSSVMessage{}, + msg: &specqbft.Message{MsgType: specqbft.ProposalMsgType}, + limits: MessageCounts{Proposal: 0}, + }, + expectedError: fmt.Errorf("message is duplicated, got proposal, having pre-consensus: 0, proposal: 2, prepare: 0, commit: 0, round change: 0, post-consensus: 0"), + }, + { + name: "PrepareMessage_ExceedsLimit_ReturnsError", + input: input{ + counts: &MessageCounts{Prepare: 2}, + signedSSVMessage: &spectypes.SignedSSVMessage{}, + msg: &specqbft.Message{MsgType: specqbft.PrepareMsgType}, + limits: MessageCounts{Prepare: 0}, + }, + expectedError: fmt.Errorf("message is duplicated, got prepare, having pre-consensus: 0, proposal: 0, prepare: 2, commit: 0, round change: 0, post-consensus: 0"), + }, + { + name: "CommitMessageWithSingleOperator_ExceedsLimit_ReturnsError", + input: input{ + counts: &MessageCounts{Commit: 2}, + signedSSVMessage: &spectypes.SignedSSVMessage{OperatorIDs: []spectypes.OperatorID{1}}, + msg: &specqbft.Message{MsgType: specqbft.CommitMsgType}, + limits: MessageCounts{Commit: 0}, + }, + expectedError: fmt.Errorf("message is duplicated, got commit, having pre-consensus: 0, proposal: 0, prepare: 0, commit: 2, round change: 0, post-consensus: 0"), + }, + { + name: "RoundChangeMessage_ExceedsLimit_ReturnsError", + input: input{ + counts: &MessageCounts{RoundChange: 2}, + signedSSVMessage: &spectypes.SignedSSVMessage{}, + msg: &specqbft.Message{MsgType: specqbft.RoundChangeMsgType}, + limits: MessageCounts{RoundChange: 0}, + }, + expectedError: fmt.Errorf("message is duplicated, got round change, having pre-consensus: 0, proposal: 0, prepare: 0, commit: 0, round change: 2, post-consensus: 0"), + }, + { + name: "UnexpectedMessageType_ReturnsError", + input: input{ + counts: &MessageCounts{}, + signedSSVMessage: &spectypes.SignedSSVMessage{}, + msg: &specqbft.Message{MsgType: specqbft.MessageType(12345)}, + limits: MessageCounts{}, + }, + expectedError: fmt.Errorf("unexpected signed message type"), + }, + { + name: "ValidProposalMessage_HappyFlow", + input: input{ + counts: &MessageCounts{Proposal: 2}, + signedSSVMessage: &spectypes.SignedSSVMessage{}, + msg: &specqbft.Message{MsgType: specqbft.ProposalMsgType}, + limits: MessageCounts{Proposal: 100}, + }, + expectedError: nil, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + err := tc.input.counts.ValidateConsensusMessage(tc.input.signedSSVMessage, tc.input.msg, tc.input.limits) + + if tc.expectedError != nil { + require.EqualError(t, err, tc.expectedError.Error()) + } else { + require.NoError(t, err) + } + + }) + } +} diff --git a/message/validation/validation.go b/message/validation/validation.go index 6e27443f1d..ca51574e03 100644 --- a/message/validation/validation.go +++ b/message/validation/validation.go @@ -162,7 +162,7 @@ func (mv *messageValidator) handleSignedSSVMessage(signedSSVMessage *spectypes.S } default: - panic("unreachable: message type assertion should have been done") + return decodedMessage, fmt.Errorf("unreachable: message type assertion should have been done") } return decodedMessage, nil From c0ecafbdebcfa7408b80bba896b06a92befff03d Mon Sep 17 00:00:00 2001 From: Anton Korpusenko Date: Sun, 15 Sep 2024 17:42:36 +0300 Subject: [PATCH 06/35] chore: removed all dkg related mentions (#1728) * removed all dkg related mentions * added unknown msg type test handling --- message/validation/errors.go | 1 - message/validation/genesis/errors.go | 2 -- message/validation/genesis/validation.go | 3 --- message/validation/signed_ssv_message.go | 3 --- message/validation/validation_test.go | 9 +++++---- operator/validator/controller_test.go | 2 +- protocol/genesis/message/msg.go | 2 -- protocol/v2/message/msg.go | 2 -- 8 files changed, 6 insertions(+), 18 deletions(-) diff --git a/message/validation/errors.go b/message/validation/errors.go index 9be19b3768..c1a90997f3 100644 --- a/message/validation/errors.go +++ b/message/validation/errors.go @@ -92,7 +92,6 @@ var ( ErrFullDataHash = Error{text: "couldn't hash root", reject: true} ErrUndecodableMessageData = Error{text: "message data could not be decoded", reject: true} ErrEventMessage = Error{text: "unexpected event message", reject: true} - ErrDKGMessage = Error{text: "unexpected DKG message", reject: true} ErrUnknownSSVMessageType = Error{text: "unknown SSV message type", reject: true} ErrUnknownQBFTMessageType = Error{text: "unknown QBFT message type", reject: true} ErrInvalidPartialSignatureType = Error{text: "unknown partial signature message type", reject: true} diff --git a/message/validation/genesis/errors.go b/message/validation/genesis/errors.go index b15deb2262..52a1e5dd7b 100644 --- a/message/validation/genesis/errors.go +++ b/message/validation/genesis/errors.go @@ -88,12 +88,10 @@ var ( ErrWrongSignersLength = Error{text: "decided signers size is not between quorum and committee size", reject: true} ErrDifferentProposalData = Error{text: "different proposal data", reject: true} ErrEventMessage = Error{text: "event messages are not broadcast", reject: true} - ErrDKGMessage = Error{text: "DKG messages are not supported", reject: true} ErrMalformedPrepareJustifications = Error{text: "malformed prepare justifications", reject: true} ErrUnexpectedPrepareJustifications = Error{text: "prepare justifications unexpected for this message type", reject: true} ErrMalformedRoundChangeJustifications = Error{text: "malformed round change justifications", reject: true} ErrUnexpectedRoundChangeJustifications = Error{text: "round change justifications unexpected for this message type", reject: true} - ErrInvalidJustifications = Error{text: "invalid justifications", reject: true} ErrTooManyDutiesPerEpoch = Error{text: "too many duties per epoch", reject: true} ErrNoDuty = Error{text: "no duty for this epoch", reject: true} ErrDeserializePublicKey = Error{text: "deserialize public key", reject: true} diff --git a/message/validation/genesis/validation.go b/message/validation/genesis/validation.go index 14f5998d5f..b401f8c898 100644 --- a/message/validation/genesis/validation.go +++ b/message/validation/genesis/validation.go @@ -487,9 +487,6 @@ func (mv *messageValidator) validateSSVMessage(msg *genesisqueue.GenesisSSVMessa case spectypes.MsgType(ssvmessage.SSVEventMsgType): return nil, descriptor, ErrEventMessage - case spectypes.DKGMsgType: - return nil, descriptor, ErrDKGMessage - default: return nil, descriptor, ErrUnknownSSVMessageType } diff --git a/message/validation/signed_ssv_message.go b/message/validation/signed_ssv_message.go index 8b04b91bb0..39d61a61e9 100644 --- a/message/validation/signed_ssv_message.go +++ b/message/validation/signed_ssv_message.go @@ -113,9 +113,6 @@ func (mv *messageValidator) validateSSVMessage(ssvMessage *spectypes.SSVMessage) case ssvmessage.SSVEventMsgType: // Rule: Event message return ErrEventMessage - case spectypes.DKGMsgType: - // Rule: DKG message - return ErrDKGMessage default: // Unknown message type e := ErrUnknownSSVMessageType diff --git a/message/validation/validation_test.go b/message/validation/validation_test.go index 15bab2ac89..7d084d43ce 100644 --- a/message/validation/validation_test.go +++ b/message/validation/validation_test.go @@ -1383,20 +1383,21 @@ func Test_ValidateSSVMessage(t *testing.T) { require.ErrorIs(t, err, ErrEventMessage) }) - // Receive a dkg message from an operator that is not myself should receive an error - t.Run("dkg message", func(t *testing.T) { + // Receive a unknown message type from an operator that is not myself should receive an error + t.Run("unknown type message", func(t *testing.T) { validator := New(netCfg, validatorStore, dutyStore, signatureVerifier).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) signedSSVMessage := generateSignedMessage(ks, committeeIdentifier, slot) - signedSSVMessage.SSVMessage.MsgType = spectypes.DKGMsgType + unknownType := spectypes.MsgType(12345) + signedSSVMessage.SSVMessage.MsgType = unknownType receivedAt := netCfg.Beacon.GetSlotStartTime(slot) topicID := commons.CommitteeTopicID(spectypes.CommitteeID(signedSSVMessage.SSVMessage.GetID().GetDutyExecutorID()[16:]))[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, receivedAt) - require.ErrorIs(t, err, ErrDKGMessage) + require.ErrorContains(t, err, fmt.Sprintf("%s, got %d", ErrUnknownSSVMessageType.Error(), unknownType)) }) // Receive a message with a wrong signature diff --git a/operator/validator/controller_test.go b/operator/validator/controller_test.go index 1d4d21b840..4599d22b7b 100644 --- a/operator/validator/controller_test.go +++ b/operator/validator/controller_test.go @@ -310,7 +310,7 @@ func TestHandleNonCommitteeMessages(t *testing.T) { ctr.messageRouter.Route(context.TODO(), &queue.SSVMessage{ SSVMessage: &spectypes.SSVMessage{ // checks that not process unnecessary message - MsgType: spectypes.DKGMsgType, + MsgType: 123, MsgID: identifier, Data: []byte("data"), }, diff --git a/protocol/genesis/message/msg.go b/protocol/genesis/message/msg.go index 08da4be00d..0ff34e9a09 100644 --- a/protocol/genesis/message/msg.go +++ b/protocol/genesis/message/msg.go @@ -21,8 +21,6 @@ func MsgTypeToString(mt genesisspectypes.MsgType) string { return "consensus" case genesisspectypes.SSVPartialSignatureMsgType: return "partial_signature" - case genesisspectypes.DKGMsgType: - return "dkg" case SSVSyncMsgType: return "sync" case SSVEventMsgType: diff --git a/protocol/v2/message/msg.go b/protocol/v2/message/msg.go index abf8f698ce..dd643753c8 100644 --- a/protocol/v2/message/msg.go +++ b/protocol/v2/message/msg.go @@ -21,8 +21,6 @@ func MsgTypeToString(mt spectypes.MsgType) string { return "consensus" case spectypes.SSVPartialSignatureMsgType: return "partial_signature" - case spectypes.DKGMsgType: - return "dkg" case SSVSyncMsgType: return "sync" case SSVEventMsgType: From 903c2ac0d56303228815bebba56695c1175b9b84 Mon Sep 17 00:00:00 2001 From: Anton Korpusenko Date: Sun, 15 Sep 2024 18:06:39 +0300 Subject: [PATCH 07/35] spec alignment to `dev` branch (#1639) * returned back filtering. spec tests fail still. #1638 * aligned to the new spec version. protocol/v2/ssv test mapping fails. * fixed spec tests * minor eth chagnes * returned back t.Parallel usage + removed extra if statement in StartDuty, filtering * changed simulator.go made it full copy of single file backend.go simulated package. updated eth tests to be compatible with it * fixed minor type and debug leftovers + splitted our code and copy from geth in simulator package * simplified return statement * minor fixes * fixed ConstructBaseRunnerWithShareMap * approve spec changes * spec alignment * add validator to differ.config.yaml * align more changes --------- Co-authored-by: moshe-blox --- eth/ethtest/common_test.go | 19 +- eth/ethtest/eth_e2e_test.go | 4 +- eth/ethtest/utils_test.go | 9 +- eth/eventhandler/event_handler_test.go | 15 +- eth/eventsyncer/event_syncer_test.go | 19 +- eth/executionclient/execution_client_test.go | 42 +- eth/simulator/simulator.go | 1027 ++--------------- go.mod | 38 +- go.sum | 227 +--- integration/qbft/tests/scenario_test.go | 3 +- network/discovery/options.go | 8 +- operator/duties/voluntary_exit_test.go | 2 +- operator/validator/controller.go | 47 +- protocol/v2/qbft/controller/controller.go | 15 +- protocol/v2/qbft/controller/decided.go | 8 - protocol/v2/ssv/runner/aggregator.go | 8 +- protocol/v2/ssv/runner/committee.go | 11 +- protocol/v2/ssv/runner/proposer.go | 8 +- protocol/v2/ssv/runner/runner.go | 22 +- .../ssv/runner/sync_committee_aggregator.go | 8 +- .../v2/ssv/runner/validator_registration.go | 8 +- protocol/v2/ssv/runner/voluntary_exit.go | 9 +- .../spectest/committee_msg_processing_type.go | 4 +- .../v2/ssv/spectest/msg_processing_type.go | 11 +- .../ssv/spectest/runner_construction_type.go | 46 + protocol/v2/ssv/spectest/ssv_mapping_test.go | 21 +- protocol/v2/ssv/testing/runner.go | 200 ++-- protocol/v2/ssv/validator/committee.go | 176 ++- protocol/v2/ssv/validator/utils_test.go | 90 -- protocol/v2/ssv/validator/validator.go | 32 +- scripts/spec-alignment/differ.config.yaml | 3 +- 31 files changed, 612 insertions(+), 1528 deletions(-) create mode 100644 protocol/v2/ssv/spectest/runner_construction_type.go delete mode 100644 protocol/v2/ssv/validator/utils_test.go diff --git a/eth/ethtest/common_test.go b/eth/ethtest/common_test.go index ec3c164b14..dacfb7b6a1 100644 --- a/eth/ethtest/common_test.go +++ b/eth/ethtest/common_test.go @@ -12,7 +12,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" ethcommon "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/rpc" - gomock "go.uber.org/mock/gomock" + "go.uber.org/mock/gomock" "go.uber.org/zap/zaptest" "github.com/ssvlabs/ssv/eth/eventsyncer" @@ -26,7 +26,7 @@ import ( type CommonTestInput struct { t *testing.T - sim *simulator.SimulatedBackend + sim *simulator.Backend boundContract *simcontract.Simcontract blockNum *uint64 nodeStorage storage.Storage @@ -35,7 +35,7 @@ type CommonTestInput struct { func NewCommonTestInput( t *testing.T, - sim *simulator.SimulatedBackend, + sim *simulator.Backend, boundContract *simcontract.Simcontract, blockNum *uint64, nodeStorage storage.Storage, @@ -56,7 +56,7 @@ type TestEnv struct { validators []*testValidatorData ops []*testOperator nodeStorage storage.Storage - sim *simulator.SimulatedBackend + sim *simulator.Backend boundContract *simcontract.Simcontract auth *bind.TransactOpts shares [][]byte @@ -131,8 +131,7 @@ func (e *TestEnv) setup( // Adding testAddresses to the genesis block mostly to specify some balances for them sim := simTestBackend(testAddresses) - // Create JSON-RPC handler - rpcServer, err := sim.Node.RPCHandler() + rpcServer, err := sim.Node().RPCHandler() e.rpcServer = rpcServer if err != nil { return fmt.Errorf("can't create RPC server: %w", err) @@ -153,7 +152,7 @@ func (e *TestEnv) setup( return err } - contractAddr, _, _, err := bind.DeployContract(auth, parsed, ethcommon.FromHex(simcontract.SimcontractMetaData.Bin), sim) + contractAddr, _, _, err := bind.DeployContract(auth, parsed, ethcommon.FromHex(simcontract.SimcontractMetaData.Bin), sim.Client()) if err != nil { return fmt.Errorf("deploy contract: %w", err) } @@ -161,7 +160,7 @@ func (e *TestEnv) setup( sim.Commit() // Check contract code at the simulated blockchain - contractCode, err := sim.CodeAt(ctx, contractAddr, nil) + contractCode, err := sim.Client().CodeAt(ctx, contractAddr, nil) if err != nil { return fmt.Errorf("get contract code: %w", err) } @@ -186,7 +185,7 @@ func (e *TestEnv) setup( return err } - e.boundContract, err = simcontract.NewSimcontract(contractAddr, sim) + e.boundContract, err = simcontract.NewSimcontract(contractAddr, sim.Client()) if err != nil { return err } @@ -225,7 +224,7 @@ func (e *TestEnv) CloseFollowDistance(blockNum *uint64) { } } -func commitBlock(sim *simulator.SimulatedBackend, blockNum *uint64) { +func commitBlock(sim *simulator.Backend, blockNum *uint64) { sim.Commit() *blockNum++ } diff --git a/eth/ethtest/eth_e2e_test.go b/eth/ethtest/eth_e2e_test.go index 6e1033d87d..c3ea9d3aa3 100644 --- a/eth/ethtest/eth_e2e_test.go +++ b/eth/ethtest/eth_e2e_test.go @@ -165,8 +165,10 @@ func TestEthExecLayer(t *testing.T) { require.NoError(t, err) require.Equal(t, expectedNonce, nonce) + lastBlockNum, err := testEnv.sim.Client().BlockByNumber(ctx, nil) + require.NoError(t, err) // Not sure does this make sense - require.Equal(t, uint64(testEnv.sim.Blockchain.CurrentBlock().Number.Int64()), *common.blockNum) + require.Equal(t, lastBlockNum.Number().Uint64(), *common.blockNum) } // Step 2: Exit validator diff --git a/eth/ethtest/utils_test.go b/eth/ethtest/utils_test.go index 8c61dc407a..f9baedfcc5 100644 --- a/eth/ethtest/utils_test.go +++ b/eth/ethtest/utils_test.go @@ -10,8 +10,9 @@ import ( ethcommon "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient/simulated" "github.com/herumi/bls-eth-go-binary/bls" - gomock "go.uber.org/mock/gomock" + "go.uber.org/mock/gomock" "go.uber.org/zap" "github.com/ssvlabs/ssv/ekm" @@ -281,14 +282,14 @@ func setupOperatorStorage( return nodeStorage, operatorData } -func simTestBackend(testAddresses []*ethcommon.Address) *simulator.SimulatedBackend { +func simTestBackend(testAddresses []*ethcommon.Address) *simulator.Backend { genesis := types.GenesisAlloc{} for _, testAddr := range testAddresses { genesis[*testAddr] = types.Account{Balance: big.NewInt(10000000000000000)} } - return simulator.NewSimulatedBackend( - genesis, 50_000_000, + return simulator.NewBackend(genesis, + simulated.WithBlockGasLimit(50_000_000), ) } diff --git a/eth/eventhandler/event_handler_test.go b/eth/eventhandler/event_handler_test.go index 5e0d08d4fe..138e171329 100644 --- a/eth/eventhandler/event_handler_test.go +++ b/eth/eventhandler/event_handler_test.go @@ -18,6 +18,7 @@ import ( ethcommon "github.com/ethereum/go-ethereum/common" ethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient/simulated" "github.com/herumi/bls-eth-go-binary/bls" "github.com/pkg/errors" "github.com/stretchr/testify/require" @@ -90,7 +91,7 @@ func TestHandleBlockEventsStream(t *testing.T) { sim := simTestBackend(testAddresses) // Create JSON-RPC handler - rpcServer, _ := sim.Node.RPCHandler() + rpcServer, _ := sim.Node().RPCHandler() // Expose handler on a test server with ws open httpsrv := httptest.NewServer(rpcServer.WebsocketHandler([]string{"*"})) defer rpcServer.Stop() @@ -99,14 +100,14 @@ func TestHandleBlockEventsStream(t *testing.T) { parsed, _ := abi.JSON(strings.NewReader(simcontract.SimcontractMetaData.ABI)) auth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) - contractAddr, _, _, err := bind.DeployContract(auth, parsed, ethcommon.FromHex(simcontract.SimcontractMetaData.Bin), sim) + contractAddr, _, _, err := bind.DeployContract(auth, parsed, ethcommon.FromHex(simcontract.SimcontractMetaData.Bin), sim.Client()) if err != nil { t.Errorf("deploying contract: %v", err) } sim.Commit() // Check contract code at the simulated blockchain - contractCode, err := sim.CodeAt(ctx, contractAddr, nil) + contractCode, err := sim.Client().CodeAt(ctx, contractAddr, nil) if err != nil { t.Errorf("getting contract code: %v", err) } @@ -124,7 +125,7 @@ func TestHandleBlockEventsStream(t *testing.T) { logs := client.StreamLogs(ctx, 0) - boundContract, err := simcontract.NewSimcontract(contractAddr, sim) + boundContract, err := simcontract.NewSimcontract(contractAddr, sim.Client()) require.NoError(t, err) // Generate a new validator @@ -1439,15 +1440,15 @@ func unmarshalLog(t *testing.T, rawOperatorAdded string) ethtypes.Log { return vLogOperatorAdded } -func simTestBackend(testAddresses []*ethcommon.Address) *simulator.SimulatedBackend { +func simTestBackend(testAddresses []*ethcommon.Address) *simulator.Backend { genesis := ethtypes.GenesisAlloc{} for _, testAddr := range testAddresses { genesis[*testAddr] = ethtypes.Account{Balance: big.NewInt(10000000000000000)} } - return simulator.NewSimulatedBackend( - genesis, 50_000_000, + return simulator.NewBackend( + genesis, simulated.WithBlockGasLimit(50_000_000), ) } diff --git a/eth/eventsyncer/event_syncer_test.go b/eth/eventsyncer/event_syncer_test.go index 8e6f8e3516..5cdd51a8fb 100644 --- a/eth/eventsyncer/event_syncer_test.go +++ b/eth/eventsyncer/event_syncer_test.go @@ -15,8 +15,9 @@ import ( "github.com/ethereum/go-ethereum/core/types" ethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient/simulated" "github.com/stretchr/testify/require" - gomock "go.uber.org/mock/gomock" + "go.uber.org/mock/gomock" "go.uber.org/zap" "go.uber.org/zap/zaptest" @@ -61,27 +62,27 @@ func TestEventSyncer(t *testing.T) { // Create sim instance with a delay between block execution sim := simTestBackend(testAddr) - rpcServer, _ := sim.Node.RPCHandler() + rpcServer, _ := sim.Node().RPCHandler() httpSrv := httptest.NewServer(rpcServer.WebsocketHandler([]string{"*"})) defer rpcServer.Stop() defer httpSrv.Close() parsed, _ := abi.JSON(strings.NewReader(simcontract.SimcontractMetaData.ABI)) auth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) - contractAddr, _, _, err := bind.DeployContract(auth, parsed, ethcommon.FromHex(simcontract.SimcontractMetaData.Bin), sim) + contractAddr, _, _, err := bind.DeployContract(auth, parsed, ethcommon.FromHex(simcontract.SimcontractMetaData.Bin), sim.Client()) if err != nil { t.Errorf("deploying contract: %v", err) } sim.Commit() // Check contract code at the simulated blockchain - contractCode, err := sim.CodeAt(ctx, contractAddr, nil) + contractCode, err := sim.Client().CodeAt(ctx, contractAddr, nil) if err != nil { t.Errorf("getting contract code: %v", err) } require.NotEmpty(t, contractCode) - boundContract, err := simcontract.NewSimcontract(contractAddr, sim) + boundContract, err := simcontract.NewSimcontract(contractAddr, sim.Client()) require.NoError(t, err) addr := "ws:" + strings.TrimPrefix(httpSrv.URL, "http:") @@ -107,7 +108,7 @@ func TestEventSyncer(t *testing.T) { tx, err := boundContract.SimcontractTransactor.RegisterOperator(auth, pckd, big.NewInt(100_000_000)) require.NoError(t, err) sim.Commit() - receipt, err := sim.TransactionReceipt(ctx, tx.Hash()) + receipt, err := sim.Client().TransactionReceipt(ctx, tx.Hash()) if err != nil { t.Errorf("get receipt: %v", err) } @@ -194,11 +195,11 @@ func setupEventHandler( return eh } -func simTestBackend(testAddr ethcommon.Address) *simulator.SimulatedBackend { - return simulator.NewSimulatedBackend( +func simTestBackend(testAddr ethcommon.Address) *simulator.Backend { + return simulator.NewBackend( types.GenesisAlloc{ testAddr: {Balance: big.NewInt(10000000000000000)}, - }, 10000000, + }, simulated.WithBlockGasLimit(10000000), ) } diff --git a/eth/executionclient/execution_client_test.go b/eth/executionclient/execution_client_test.go index 10fa1b9a18..0bcc24a583 100644 --- a/eth/executionclient/execution_client_test.go +++ b/eth/executionclient/execution_client_test.go @@ -16,6 +16,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" ethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient/simulated" "github.com/stretchr/testify/require" "go.uber.org/zap" "go.uber.org/zap/zaptest" @@ -31,11 +32,12 @@ var ( testAddr = crypto.PubkeyToAddress(testKey.PublicKey) ) -func simTestBackend(testAddr ethcommon.Address) *simulator.SimulatedBackend { - return simulator.NewSimulatedBackend( +func simTestBackend(testAddr ethcommon.Address) *simulator.Backend { + return simulator.NewBackend( types.GenesisAlloc{ testAddr: {Balance: big.NewInt(10000000000000000)}, - }, 10000000, + }, + simulated.WithBlockGasLimit(10000000), ) } @@ -63,7 +65,7 @@ func TestFetchHistoricalLogs(t *testing.T) { sim := simTestBackend(testAddr) // Create JSON-RPC handler - rpcServer, _ := sim.Node.RPCHandler() + rpcServer, _ := sim.Node().RPCHandler() // Expose handler on a test server with ws open httpsrv := httptest.NewServer(rpcServer.WebsocketHandler([]string{"*"})) defer rpcServer.Stop() @@ -72,7 +74,7 @@ func TestFetchHistoricalLogs(t *testing.T) { parsed, _ := abi.JSON(strings.NewReader(callableAbi)) auth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) - contractAddr, _, contract, err := bind.DeployContract(auth, parsed, ethcommon.FromHex(callableBin), sim) + contractAddr, _, contract, err := bind.DeployContract(auth, parsed, ethcommon.FromHex(callableBin), sim.Client()) if err != nil { t.Errorf("deploying contract: %v", err) } @@ -138,7 +140,7 @@ func TestStreamLogs(t *testing.T) { delay := time.Millisecond * 10 sim := simTestBackend(testAddr) - rpcServer, _ := sim.Node.RPCHandler() + rpcServer, _ := sim.Node().RPCHandler() httpsrv := httptest.NewServer(rpcServer.WebsocketHandler([]string{"*"})) defer rpcServer.Stop() defer httpsrv.Close() @@ -147,7 +149,7 @@ func TestStreamLogs(t *testing.T) { // Deploy the contract parsed, _ := abi.JSON(strings.NewReader(callableAbi)) auth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) - contractAddr, _, contract, err := bind.DeployContract(auth, parsed, ethcommon.FromHex(callableBin), sim) + contractAddr, _, contract, err := bind.DeployContract(auth, parsed, ethcommon.FromHex(callableBin), sim.Client()) if err != nil { t.Errorf("deploying contract: %v", err) } @@ -222,7 +224,7 @@ func TestFetchLogsInBatches(t *testing.T) { // Create simulator instance sim := simTestBackend(testAddr) - rpcServer, _ := sim.Node.RPCHandler() + rpcServer, _ := sim.Node().RPCHandler() httpsrv := httptest.NewServer(rpcServer.WebsocketHandler([]string{"*"})) defer rpcServer.Stop() defer httpsrv.Close() @@ -231,7 +233,7 @@ func TestFetchLogsInBatches(t *testing.T) { // Deploy the contract parsed, _ := abi.JSON(strings.NewReader(callableAbi)) auth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) - contractAddr, _, contract, err := bind.DeployContract(auth, parsed, ethcommon.FromHex(callableBin), sim) + contractAddr, _, contract, err := bind.DeployContract(auth, parsed, ethcommon.FromHex(callableBin), sim.Client()) if err != nil { t.Errorf("deploying contract: %v", err) } @@ -424,7 +426,7 @@ func TestSimSSV(t *testing.T) { sim := simTestBackend(testAddr) // Create JSON-RPC handler - rpcServer, _ := sim.Node.RPCHandler() + rpcServer, _ := sim.Node().RPCHandler() // Expose handler on a test server with ws open httpsrv := httptest.NewServer(rpcServer.WebsocketHandler([]string{"*"})) defer rpcServer.Stop() @@ -433,14 +435,14 @@ func TestSimSSV(t *testing.T) { parsed, _ := abi.JSON(strings.NewReader(simcontract.SimcontractMetaData.ABI)) auth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) - contractAddr, _, _, err := bind.DeployContract(auth, parsed, ethcommon.FromHex(simcontract.SimcontractMetaData.Bin), sim) + contractAddr, _, _, err := bind.DeployContract(auth, parsed, ethcommon.FromHex(simcontract.SimcontractMetaData.Bin), sim.Client()) if err != nil { t.Errorf("deploying contract: %v", err) } sim.Commit() // Check contract code at the simulated blockchain - contractCode, err := sim.CodeAt(ctx, contractAddr, nil) + contractCode, err := sim.Client().CodeAt(ctx, contractAddr, nil) if err != nil { t.Errorf("getting contract code: %v", err) } @@ -455,14 +457,14 @@ func TestSimSSV(t *testing.T) { logs := client.StreamLogs(ctx, 0) - boundContract, err := simcontract.NewSimcontract(contractAddr, sim) + boundContract, err := simcontract.NewSimcontract(contractAddr, sim.Client()) require.NoError(t, err) // Emit event OperatorAdded tx, err := boundContract.SimcontractTransactor.RegisterOperator(auth, ethcommon.Hex2Bytes("0xb24454393691331ee6eba4ffa2dbb2600b9859f908c3e648b6c6de9e1dea3e9329866015d08355c8d451427762b913d1"), big.NewInt(100_000_000)) require.NoError(t, err) sim.Commit() - receipt, err := sim.TransactionReceipt(ctx, tx.Hash()) + receipt, err := sim.Client().TransactionReceipt(ctx, tx.Hash()) if err != nil { t.Errorf("get receipt: %v", err) } @@ -475,7 +477,7 @@ func TestSimSSV(t *testing.T) { tx, err = boundContract.SimcontractTransactor.RemoveOperator(auth, 1) require.NoError(t, err) sim.Commit() - receipt, err = sim.TransactionReceipt(ctx, tx.Hash()) + receipt, err = sim.Client().TransactionReceipt(ctx, tx.Hash()) if err != nil { t.Errorf("get receipt: %v", err) } @@ -499,7 +501,7 @@ func TestSimSSV(t *testing.T) { }) require.NoError(t, err) sim.Commit() - receipt, err = sim.TransactionReceipt(ctx, tx.Hash()) + receipt, err = sim.Client().TransactionReceipt(ctx, tx.Hash()) if err != nil { t.Errorf("get receipt: %v", err) } @@ -522,7 +524,7 @@ func TestSimSSV(t *testing.T) { }) require.NoError(t, err) sim.Commit() - receipt, err = sim.TransactionReceipt(ctx, tx.Hash()) + receipt, err = sim.Client().TransactionReceipt(ctx, tx.Hash()) if err != nil { t.Errorf("get receipt: %v", err) } @@ -545,7 +547,7 @@ func TestSimSSV(t *testing.T) { }) require.NoError(t, err) sim.Commit() - receipt, err = sim.TransactionReceipt(ctx, tx.Hash()) + receipt, err = sim.Client().TransactionReceipt(ctx, tx.Hash()) if err != nil { t.Errorf("get receipt: %v", err) } @@ -568,7 +570,7 @@ func TestSimSSV(t *testing.T) { }) require.NoError(t, err) sim.Commit() - receipt, err = sim.TransactionReceipt(ctx, tx.Hash()) + receipt, err = sim.Client().TransactionReceipt(ctx, tx.Hash()) if err != nil { t.Errorf("get receipt: %v", err) } @@ -584,7 +586,7 @@ func TestSimSSV(t *testing.T) { ) require.NoError(t, err) sim.Commit() - receipt, err = sim.TransactionReceipt(ctx, tx.Hash()) + receipt, err = sim.Client().TransactionReceipt(ctx, tx.Hash()) if err != nil { t.Errorf("get receipt: %v", err) } diff --git a/eth/simulator/simulator.go b/eth/simulator/simulator.go index 565d174429..dd65e31783 100644 --- a/eth/simulator/simulator.go +++ b/eth/simulator/simulator.go @@ -1,4 +1,4 @@ -// Copyright 2015 The go-ethereum Authors +// Copyright 2023 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -17,168 +17,157 @@ package simulator import ( - "context" "errors" - "fmt" - "github.com/holiman/uint256" - "math/big" - "sync" "time" "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi" - "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/common/math" - "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/bloombits" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/eth/catalyst" + "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/filters" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/miner" + "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" ) -// This nil assignment ensures at compile time that SimulatedBackend implements bind.ContractBackend. -var _ bind.ContractBackend = (*SimulatedBackend)(nil) - -var ( - errBlockNumberUnsupported = errors.New("simulatedBackend cannot access blocks other than the latest block") - errBlockDoesNotExist = errors.New("block does not exist in blockchain") - errTransactionDoesNotExist = errors.New("transaction does not exist") -) - -// SimulatedBackend implements bind.ContractBackend, simulating a blockchain in -// the background. Its main purpose is to allow for easy testing of contract bindings. -// Simulated backend implements the following interfaces: -// ChainReader, ChainStateReader, ContractBackend, ContractCaller, ContractFilterer, ContractTransactor, -// DeployBackend, GasEstimator, GasPricer, LogFilterer, PendingContractCaller, TransactionReader, and TransactionSender -type SimulatedBackend struct { - Database ethdb.Database // In memory database to store our testing data - Blockchain *core.BlockChain // Ethereum blockchain to handle the consensus - Node *node.Node - - mu sync.Mutex - pendingBlock *types.Block // Currently pending block that will be imported on request - pendingState *state.StateDB // Currently pending state that will be the active on request - pendingReceipts types.Receipts // Currently receipts for the pending block - - events *filters.EventSystem // for filtering log events live - filterSystem *filters.FilterSystem // for filtering database logs - - config *params.ChainConfig -} - -// NewSimulatedBackendWithDatabase creates a new binding backend based on the given database -// and uses a simulated blockchain for testing purposes. +// Client exposes the methods provided by the Ethereum RPC client. +type Client interface { + ethereum.BlockNumberReader + ethereum.ChainReader + ethereum.ChainStateReader + ethereum.ContractCaller + ethereum.GasEstimator + ethereum.GasPricer + ethereum.GasPricer1559 + ethereum.FeeHistoryReader + ethereum.LogFilterer + ethereum.PendingStateReader + ethereum.PendingContractCaller + ethereum.TransactionReader + ethereum.TransactionSender + ethereum.ChainIDReader +} + +// simClient wraps ethclient. This exists to prevent extracting ethclient.Client +// from the Client interface returned by Backend. +type simClient struct { + *ethclient.Client +} + +// Backend is a simulated blockchain. You can use it to test your contracts or +// other code that interacts with the Ethereum chain. +type Backend struct { + node *node.Node + beacon *catalyst.SimulatedBeacon + client simClient +} + +// Node returns the internal *node.Node to be used by tests. +func (b *Backend) Node() *node.Node { + return b.node +} + +// NewBackend creates a new simulated blockchain that can be used as a backend for +// contract bindings in unit tests. +// // A simulated backend always uses chainID 1337. -func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc types.GenesisAlloc, gasLimit uint64) *SimulatedBackend { - genesis := core.Genesis{ - Config: params.AllEthashProtocolChanges, - GasLimit: gasLimit, - Alloc: alloc, - ExtraData: []byte("test genesis"), - Timestamp: 9000, - BaseFee: big.NewInt(params.InitialBaseFee), - } - - n, err := node.New(&node.Config{}) +func NewBackend(alloc types.GenesisAlloc, options ...func(nodeConf *node.Config, ethConf *ethconfig.Config)) *Backend { + // Create the default configurations for the outer node shell and the Ethereum + // service to mutate with the options afterwards + nodeConf := node.DefaultConfig + nodeConf.DataDir = "" + nodeConf.P2P = p2p.Config{NoDiscovery: true} + + ethConf := ethconfig.Defaults + ethConf.Genesis = &core.Genesis{ + Config: params.AllDevChainProtocolChanges, + GasLimit: ethconfig.Defaults.Miner.GasCeil, + Alloc: alloc, + } + ethConf.SyncMode = downloader.FullSync + ethConf.TxPool.NoLocals = true + + for _, option := range options { + option(&nodeConf, ðConf) + } + // Assemble the Ethereum stack to run the chain with + stack, err := node.New(&nodeConf) if err != nil { - panic(err) + panic(err) // this should never happen } - config := ðconfig.Config{Genesis: &genesis, Miner: miner.DefaultConfig} - ethservice, err := eth.New(n, config) + sim, err := newWithNode(stack, ðConf, 0) if err != nil { - panic(err) + panic(err) // this should never happen } + return sim +} - // Add required APIs - filterSystem := filters.NewFilterSystem(ethservice.APIBackend, filters.Config{}) - n.RegisterAPIs([]rpc.API{{ +// newWithNode sets up a simulated backend on an existing node. The provided node +// must not be started and will be started by this method. +func newWithNode(stack *node.Node, conf *eth.Config, blockPeriod uint64) (*Backend, error) { + backend, err := eth.New(stack, conf) + if err != nil { + return nil, err + } + // Register the filter system + filterSystem := filters.NewFilterSystem(backend.APIBackend, filters.Config{}) + stack.RegisterAPIs([]rpc.API{{ Namespace: "eth", - Service: filters.NewFilterAPI(filterSystem, false), + Service: filters.NewFilterAPI(filterSystem), }}) - - backend := &SimulatedBackend{ - Database: ethservice.ChainDb(), - Blockchain: ethservice.BlockChain(), - config: genesis.Config, - Node: n, + // Start the node + if err := stack.Start(); err != nil { + return nil, err } - - filterBackend := &filterBackend{ethservice.ChainDb(), ethservice.BlockChain(), backend} - backend.filterSystem = filters.NewFilterSystem(filterBackend, filters.Config{}) - backend.events = filters.NewEventSystem(backend.filterSystem, false) - - header := backend.Blockchain.CurrentBlock() - block := backend.Blockchain.GetBlock(header.Hash(), header.Number.Uint64()) - - backend.rollback(block) - // Start eth1 node - if err := n.Start(); err != nil { - panic(err) + // Set up the simulated beacon + beacon, err := catalyst.NewSimulatedBeacon(blockPeriod, backend) + if err != nil { + return nil, err } - return backend -} - -// NewSimulatedBackend creates a new binding backend using a simulated blockchain -// for testing purposes. -// A simulated backend always uses chainID 1337. -func NewSimulatedBackend(alloc types.GenesisAlloc, gasLimit uint64) *SimulatedBackend { - return NewSimulatedBackendWithDatabase(rawdb.NewMemoryDatabase(), alloc, gasLimit) -} - -// Close terminates the underlying blockchain's update loop. -func (b *SimulatedBackend) Close() error { - b.Blockchain.Stop() - return nil + // Reorg our chain back to genesis + if err := beacon.Fork(backend.BlockChain().GetCanonicalHash(0)); err != nil { + return nil, err + } + return &Backend{ + node: stack, + beacon: beacon, + client: simClient{ethclient.NewClient(stack.Attach())}, + }, nil } -// Commit imports all the pending transactions as a single block and starts a -// fresh new state. -func (b *SimulatedBackend) Commit() common.Hash { - b.mu.Lock() - defer b.mu.Unlock() - - if _, err := b.Blockchain.InsertChain([]*types.Block{b.pendingBlock}); err != nil { - panic(err) // This cannot happen unless the simulator is wrong, fail in that case +// Close shuts down the simBackend. +// The simulated backend can't be used afterwards. +func (n *Backend) Close() error { + if n.client.Client != nil { + n.client.Close() + n.client = simClient{} } - blockHash := b.pendingBlock.Hash() - - // Using the last inserted block here makes it possible to build on a side - // chain after a fork. - b.rollback(b.pendingBlock) - - return blockHash + var err error + if n.beacon != nil { + err = n.beacon.Stop() + n.beacon = nil + } + if n.node != nil { + err = errors.Join(err, n.node.Close()) + n.node = nil + } + return err } -// Rollback aborts all pending transactions, reverting to the last committed state. -func (b *SimulatedBackend) Rollback() { - b.mu.Lock() - defer b.mu.Unlock() - - header := b.Blockchain.CurrentBlock() - block := b.Blockchain.GetBlock(header.Hash(), header.Number.Uint64()) - - b.rollback(block) +// Commit seals a block and moves the chain forward to a new empty block. +func (n *Backend) Commit() common.Hash { + return n.beacon.Commit() } -func (b *SimulatedBackend) rollback(parent *types.Block) { - blocks, _ := core.GenerateChain(b.config, parent, ethash.NewFaker(), b.Database, 1, func(int, *core.BlockGen) {}) - - b.pendingBlock = blocks[0] - b.pendingState, _ = state.New(b.pendingBlock.Root(), b.Blockchain.StateCache(), nil) +// Rollback removes all pending transactions, reverting to the last committed state. +func (n *Backend) Rollback() { + n.beacon.Rollback() } // Fork creates a side-chain that can be used to simulate reorgs. @@ -193,777 +182,17 @@ func (b *SimulatedBackend) rollback(parent *types.Block) { // // There is a % chance that the side chain becomes canonical at the same length // to simulate live network behavior. -func (b *SimulatedBackend) Fork(ctx context.Context, parent common.Hash) error { - b.mu.Lock() - defer b.mu.Unlock() - - if len(b.pendingBlock.Transactions()) != 0 { - return errors.New("pending block dirty") - } - block, err := b.blockByHash(ctx, parent) - if err != nil { - return err - } - b.rollback(block) - return nil -} - -// stateByBlockNumber retrieves a state by a given blocknumber. -func (b *SimulatedBackend) stateByBlockNumber(ctx context.Context, blockNumber *big.Int) (*state.StateDB, error) { - if blockNumber == nil || blockNumber.Cmp(b.Blockchain.CurrentBlock().Number) == 0 { - return b.Blockchain.State() - } - block, err := b.blockByNumber(ctx, blockNumber) - if err != nil { - return nil, err - } - return b.Blockchain.StateAt(block.Root()) -} - -// CodeAt returns the code associated with a certain account in the blockchain. -func (b *SimulatedBackend) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) { - b.mu.Lock() - defer b.mu.Unlock() - - stateDB, err := b.stateByBlockNumber(ctx, blockNumber) - if err != nil { - return nil, err - } - - return stateDB.GetCode(contract), nil -} - -// BalanceAt returns the wei balance of a certain account in the blockchain. -func (b *SimulatedBackend) BalanceAt(ctx context.Context, contract common.Address, blockNumber *big.Int) (*big.Int, error) { - b.mu.Lock() - defer b.mu.Unlock() - - stateDB, err := b.stateByBlockNumber(ctx, blockNumber) - if err != nil { - return nil, err - } - // GetBalance now returns *uint256.Int, need to convert to *big.Int - uint256Balance := stateDB.GetBalance(contract) - - // Convert *uint256.Int to *big.Int - bigIntBalance := new(big.Int).SetBytes(uint256Balance.Bytes()) - - return bigIntBalance, nil -} - -// NonceAt returns the nonce of a certain account in the blockchain. -func (b *SimulatedBackend) NonceAt(ctx context.Context, contract common.Address, blockNumber *big.Int) (uint64, error) { - b.mu.Lock() - defer b.mu.Unlock() - - stateDB, err := b.stateByBlockNumber(ctx, blockNumber) - if err != nil { - return 0, err - } - - return stateDB.GetNonce(contract), nil -} - -// StorageAt returns the value of key in the storage of an account in the blockchain. -func (b *SimulatedBackend) StorageAt(ctx context.Context, contract common.Address, key common.Hash, blockNumber *big.Int) ([]byte, error) { - b.mu.Lock() - defer b.mu.Unlock() - - stateDB, err := b.stateByBlockNumber(ctx, blockNumber) - if err != nil { - return nil, err - } - - val := stateDB.GetState(contract, key) - return val[:], nil -} - -// TransactionReceipt returns the receipt of a transaction. -func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) { - b.mu.Lock() - defer b.mu.Unlock() - - receipt, _, _, _ := rawdb.ReadReceipt(b.Database, txHash, b.config) - if receipt == nil { - return nil, ethereum.NotFound - } - return receipt, nil -} - -// TransactionByHash checks the pool of pending transactions in addition to the -// blockchain. The isPending return value indicates whether the transaction has been -// mined yet. Note that the transaction may not be part of the canonical chain even if -// it's not pending. -func (b *SimulatedBackend) TransactionByHash(ctx context.Context, txHash common.Hash) (*types.Transaction, bool, error) { - b.mu.Lock() - defer b.mu.Unlock() - - tx := b.pendingBlock.Transaction(txHash) - if tx != nil { - return tx, true, nil - } - tx, _, _, _ = rawdb.ReadTransaction(b.Database, txHash) - if tx != nil { - return tx, false, nil - } - return nil, false, ethereum.NotFound -} - -// BlockByHash retrieves a block based on the block hash. -func (b *SimulatedBackend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { - b.mu.Lock() - defer b.mu.Unlock() - - return b.blockByHash(ctx, hash) -} - -// blockByHash retrieves a block based on the block hash without Locking. -func (b *SimulatedBackend) blockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { - if hash == b.pendingBlock.Hash() { - return b.pendingBlock, nil - } - - block := b.Blockchain.GetBlockByHash(hash) - if block != nil { - return block, nil - } - - return nil, errBlockDoesNotExist -} - -// BlockByNumber retrieves a block from the database by number, caching it -// (associated with its hash) if found. -func (b *SimulatedBackend) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { - b.mu.Lock() - defer b.mu.Unlock() - - return b.blockByNumber(ctx, number) -} - -// blockByNumber retrieves a block from the database by number, caching it -// (associated with its hash) if found without Lock. -func (b *SimulatedBackend) blockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { - if number == nil || number.Cmp(b.pendingBlock.Number()) == 0 { - return b.blockByHash(ctx, b.Blockchain.CurrentBlock().Hash()) - } - - block := b.Blockchain.GetBlockByNumber(uint64(number.Int64())) - if block == nil { - return nil, errBlockDoesNotExist - } - - return block, nil -} - -// HeaderByHash returns a block header from the current canonical chain. -func (b *SimulatedBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { - b.mu.Lock() - defer b.mu.Unlock() - - if hash == b.pendingBlock.Hash() { - return b.pendingBlock.Header(), nil - } - - header := b.Blockchain.GetHeaderByHash(hash) - if header == nil { - return nil, errBlockDoesNotExist - } - - return header, nil -} - -// HeaderByNumber returns a block header from the current canonical chain. If number is -// nil, the latest known header is returned. -func (b *SimulatedBackend) HeaderByNumber(ctx context.Context, block *big.Int) (*types.Header, error) { - b.mu.Lock() - defer b.mu.Unlock() - - if block == nil || block.Cmp(b.pendingBlock.Number()) == 0 { - return b.Blockchain.CurrentHeader(), nil - } - - return b.Blockchain.GetHeaderByNumber(uint64(block.Int64())), nil -} - -// TransactionCount returns the number of transactions in a given block. -func (b *SimulatedBackend) TransactionCount(ctx context.Context, blockHash common.Hash) (uint, error) { - b.mu.Lock() - defer b.mu.Unlock() - - if blockHash == b.pendingBlock.Hash() { - return uint(b.pendingBlock.Transactions().Len()), nil - } - - block := b.Blockchain.GetBlockByHash(blockHash) - if block == nil { - return uint(0), errBlockDoesNotExist - } - - return uint(block.Transactions().Len()), nil -} - -// TransactionInBlock returns the transaction for a specific block at a specific index. -func (b *SimulatedBackend) TransactionInBlock(ctx context.Context, blockHash common.Hash, index uint) (*types.Transaction, error) { - b.mu.Lock() - defer b.mu.Unlock() - - if blockHash == b.pendingBlock.Hash() { - transactions := b.pendingBlock.Transactions() - if uint(len(transactions)) < index+1 { - return nil, errTransactionDoesNotExist - } - - return transactions[index], nil - } - - block := b.Blockchain.GetBlockByHash(blockHash) - if block == nil { - return nil, errBlockDoesNotExist - } - - transactions := block.Transactions() - if uint(len(transactions)) < index+1 { - return nil, errTransactionDoesNotExist - } - - return transactions[index], nil -} - -// PendingCodeAt returns the code associated with an account in the pending state. -func (b *SimulatedBackend) PendingCodeAt(ctx context.Context, contract common.Address) ([]byte, error) { - b.mu.Lock() - defer b.mu.Unlock() - - return b.pendingState.GetCode(contract), nil -} - -func newRevertError(result *core.ExecutionResult) *revertError { - reason, errUnpack := abi.UnpackRevert(result.Revert()) - err := errors.New("execution reverted") - if errUnpack == nil { - err = fmt.Errorf("execution reverted: %v", reason) - } - return &revertError{ - error: err, - reason: hexutil.Encode(result.Revert()), - } -} - -// revertError is an API error that encompasses an EVM revert with JSON error -// code and a binary data blob. -type revertError struct { - error - reason string // revert reason hex encoded -} - -// ErrorCode returns the JSON error code for a revert. -// See: https://github.com/ethereum/wiki/wiki/JSON-RPC-Error-Codes-Improvement-Proposal -func (e *revertError) ErrorCode() int { - return 3 -} - -// ErrorData returns the hex encoded revert reason. -func (e *revertError) ErrorData() interface{} { - return e.reason -} - -// CallContract executes a contract call. -func (b *SimulatedBackend) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { - b.mu.Lock() - defer b.mu.Unlock() - - if blockNumber != nil && blockNumber.Cmp(b.Blockchain.CurrentBlock().Number) != 0 { - return nil, errBlockNumberUnsupported - } - stateDB, err := b.Blockchain.State() - if err != nil { - return nil, err - } - res, err := b.callContract(ctx, call, b.Blockchain.CurrentBlock(), stateDB) - if err != nil { - return nil, err - } - // If the result contains a revert reason, try to unpack and return it. - if len(res.Revert()) > 0 { - return nil, newRevertError(res) - } - return res.Return(), res.Err -} - -// PendingCallContract executes a contract call on the pending state. -func (b *SimulatedBackend) PendingCallContract(ctx context.Context, call ethereum.CallMsg) ([]byte, error) { - b.mu.Lock() - defer b.mu.Unlock() - defer b.pendingState.RevertToSnapshot(b.pendingState.Snapshot()) - - res, err := b.callContract(ctx, call, b.pendingBlock.Header(), b.pendingState) - if err != nil { - return nil, err - } - // If the result contains a revert reason, try to unpack and return it. - if len(res.Revert()) > 0 { - return nil, newRevertError(res) - } - return res.Return(), res.Err -} - -// PendingNonceAt implements PendingStateReader.PendingNonceAt, retrieving -// the nonce currently pending for the account. -func (b *SimulatedBackend) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) { - b.mu.Lock() - defer b.mu.Unlock() - - return b.pendingState.GetNonce(account), nil -} - -// SuggestGasPrice implements ContractTransactor.SuggestGasPrice. Since the simulated -// chain doesn't have miners, we just return a gas price of 1 for any call. -func (b *SimulatedBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error) { - b.mu.Lock() - defer b.mu.Unlock() - - if b.pendingBlock.Header().BaseFee != nil { - return b.pendingBlock.Header().BaseFee, nil - } - return big.NewInt(1), nil -} - -// SuggestGasTipCap implements ContractTransactor.SuggestGasTipCap. Since the simulated -// chain doesn't have miners, we just return a gas tip of 1 for any call. -func (b *SimulatedBackend) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { - return big.NewInt(1), nil -} - -// EstimateGas executes the requested code against the currently pending block/state and -// returns the used amount of gas. -func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) { - b.mu.Lock() - defer b.mu.Unlock() - - // Determine the lowest and highest possible gas limits to binary search in between - var ( - lo uint64 = params.TxGas - 1 - hi uint64 - cap uint64 - ) - if call.Gas >= params.TxGas { - hi = call.Gas - } else { - hi = b.pendingBlock.GasLimit() - } - // Normalize the max fee per gas the call is willing to spend. - var feeCap *big.Int - if call.GasPrice != nil && (call.GasFeeCap != nil || call.GasTipCap != nil) { - return 0, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") - } else if call.GasPrice != nil { - feeCap = call.GasPrice - } else if call.GasFeeCap != nil { - feeCap = call.GasFeeCap - } else { - feeCap = common.Big0 - } - // Recap the highest gas allowance with account's balance. - if feeCap.BitLen() != 0 { - balance := b.pendingState.GetBalance(call.From) // from can't be nil - available := new(big.Int).SetBytes(balance.Bytes()) - if call.Value != nil { - if call.Value.Cmp(available) >= 0 { - return 0, core.ErrInsufficientFundsForTransfer - } - available.Sub(available, call.Value) - } - allowance := new(big.Int).Div(available, feeCap) - if allowance.IsUint64() && hi > allowance.Uint64() { - transfer := call.Value - if transfer == nil { - transfer = new(big.Int) - } - log.Warn("Gas estimation capped by limited funds", "original", hi, "balance", balance, - "sent", transfer, "feecap", feeCap, "fundable", allowance) - hi = allowance.Uint64() - } - } - cap = hi - - // Create a helper to check if a gas allowance results in an executable transaction - executable := func(gas uint64) (bool, *core.ExecutionResult, error) { - call.Gas = gas - - snapshot := b.pendingState.Snapshot() - res, err := b.callContract(ctx, call, b.pendingBlock.Header(), b.pendingState) - b.pendingState.RevertToSnapshot(snapshot) - - if err != nil { - if errors.Is(err, core.ErrIntrinsicGas) { - return true, nil, nil // Special case, raise gas limit - } - return true, nil, err // Bail out - } - return res.Failed(), res, nil - } - // Execute the binary search and hone in on an executable gas limit - for lo+1 < hi { - mid := (hi + lo) / 2 - failed, _, err := executable(mid) - - // If the error is not nil(consensus error), it means the provided message - // call or transaction will never be accepted no matter how much gas it is - // assigned. Return the error directly, don't struggle any more - if err != nil { - return 0, err - } - if failed { - lo = mid - } else { - hi = mid - } - } - // Reject the transaction as invalid if it still fails at the highest allowance - if hi == cap { - failed, result, err := executable(hi) - if err != nil { - return 0, err - } - if failed { - if result != nil && !errors.Is(result.Err, vm.ErrOutOfGas) { - if len(result.Revert()) > 0 { - return 0, newRevertError(result) - } - return 0, result.Err - } - // Otherwise, the specified gas cap is too low - return 0, fmt.Errorf("gas required exceeds allowance (%d)", cap) - } - } - return hi, nil -} - -// callContract implements common code between normal and pending contract calls. -// state is modified during execution, make sure to copy it if necessary. -func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, header *types.Header, stateDB *state.StateDB) (*core.ExecutionResult, error) { - // Gas prices post 1559 need to be initialized - if call.GasPrice != nil && (call.GasFeeCap != nil || call.GasTipCap != nil) { - return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") - } - head := b.Blockchain.CurrentHeader() - if !b.Blockchain.Config().IsLondon(head.Number) { - // If there's no basefee, then it must be a non-1559 execution - if call.GasPrice == nil { - call.GasPrice = new(big.Int) - } - call.GasFeeCap, call.GasTipCap = call.GasPrice, call.GasPrice - } else { - // A basefee is provided, necessitating 1559-type execution - if call.GasPrice != nil { - // User specified the legacy gas field, convert to 1559 gas typing - call.GasFeeCap, call.GasTipCap = call.GasPrice, call.GasPrice - } else { - // User specified 1559 gas fields (or none), use those - if call.GasFeeCap == nil { - call.GasFeeCap = new(big.Int) - } - if call.GasTipCap == nil { - call.GasTipCap = new(big.Int) - } - // Backfill the legacy gasPrice for EVM execution, unless we're all zeroes - call.GasPrice = new(big.Int) - if call.GasFeeCap.BitLen() > 0 || call.GasTipCap.BitLen() > 0 { - call.GasPrice = math.BigMin(new(big.Int).Add(call.GasTipCap, head.BaseFee), call.GasFeeCap) - } - } - } - // Ensure message is initialized properly. - if call.Gas == 0 { - call.Gas = 50000000 - } - if call.Value == nil { - call.Value = new(big.Int) - } - - // Set infinite balance to the fake caller account. - maxUint2561 := big.NewInt(0).Sub(big.NewInt(0).Exp(big.NewInt(2), big.NewInt(256), nil), big.NewInt(1)) - uint256Max, _ := uint256.FromBig(maxUint2561) - stateDB.SetBalance(call.From, uint256Max) - - // Execute the call. - msg := &core.Message{ - From: call.From, - To: call.To, - Value: call.Value, - GasLimit: call.Gas, - GasPrice: call.GasPrice, - GasFeeCap: call.GasFeeCap, - GasTipCap: call.GasTipCap, - Data: call.Data, - AccessList: call.AccessList, - SkipAccountChecks: true, - } - - // Create a new environment which holds all relevant information - // about the transaction and calling mechanisms. - txContext := core.NewEVMTxContext(msg) - evmContext := core.NewEVMBlockContext(header, b.Blockchain, nil) - vmEnv := vm.NewEVM(evmContext, txContext, stateDB, b.config, vm.Config{NoBaseFee: true}) - gasPool := new(core.GasPool).AddGas(math.MaxUint64) - - return core.ApplyMessage(vmEnv, msg, gasPool) -} - -// SendTransaction updates the pending block to include the given transaction. -func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transaction) error { - b.mu.Lock() - defer b.mu.Unlock() - - // Get the last block - block, err := b.blockByHash(ctx, b.pendingBlock.ParentHash()) - if err != nil { - return errors.New("could not fetch parent") - } - // Check transaction validity - signer := types.MakeSigner(b.Blockchain.Config(), block.Number(), block.Time()) - sender, err := types.Sender(signer, tx) - if err != nil { - return fmt.Errorf("invalid transaction: %w", err) - } - nonce := b.pendingState.GetNonce(sender) - if tx.Nonce() != nonce { - return fmt.Errorf("invalid transaction nonce: got %d, want %d", tx.Nonce(), nonce) - } - // Include tx in chain - blocks, receipts := core.GenerateChain(b.config, block, ethash.NewFaker(), b.Database, 1, func(number int, block *core.BlockGen) { - for _, tx := range b.pendingBlock.Transactions() { - block.AddTxWithChain(b.Blockchain, tx) - } - block.AddTxWithChain(b.Blockchain, tx) - }) - stateDB, _ := b.Blockchain.State() - - b.pendingBlock = blocks[0] - b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil) - b.pendingReceipts = receipts[0] - return nil +func (n *Backend) Fork(parentHash common.Hash) error { + return n.beacon.Fork(parentHash) } -// FilterLogs executes a log filter operation, blocking during execution and -// returning all the results in one batch. -// -// TODO(karalabe): Deprecate when the subscription one can return past data too. -func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error) { - var filter *filters.Filter - if query.BlockHash != nil { - // Block filter requested, construct a single-shot filter - filter = b.filterSystem.NewBlockFilter(*query.BlockHash, query.Addresses, query.Topics) - } else { - // Initialize unset filter boundaries to run from genesis to chain head - from := int64(0) - if query.FromBlock != nil { - from = query.FromBlock.Int64() - } - to := int64(-1) - if query.ToBlock != nil { - to = query.ToBlock.Int64() - } - // Construct the range filter - filter = b.filterSystem.NewRangeFilter(from, to, query.Addresses, query.Topics) - } - // Run the filter and return all the logs - logs, err := filter.Logs(ctx) - if err != nil { - return nil, err - } - res := make([]types.Log, len(logs)) - for i, nLog := range logs { - res[i] = *nLog - } - return res, nil -} - -// SubscribeFilterLogs creates a background log filtering operation, returning a -// subscription immediately, which can be used to stream the found events. -func (b *SimulatedBackend) SubscribeFilterLogs(ctx context.Context, query ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) { - // Subscribe to contract events - sink := make(chan []*types.Log) - - sub, err := b.events.SubscribeLogs(query, sink) - if err != nil { - return nil, err - } - // Since we're getting logs in batches, we need to flatten them into a plain stream - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case logs := <-sink: - for _, nlog := range logs { - select { - case ch <- *nlog: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// SubscribeNewHead returns an event subscription for a new header. -func (b *SimulatedBackend) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error) { - // subscribe to a new head - sink := make(chan *types.Header) - sub := b.events.SubscribeNewHeads(sink) - - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case head := <-sink: - select { - case ch <- head: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// AdjustTime adds a time shift to the simulated clock. +// AdjustTime changes the block timestamp and creates a new block. // It can only be called on empty blocks. -func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error { - b.mu.Lock() - defer b.mu.Unlock() - - if len(b.pendingBlock.Transactions()) != 0 { - return errors.New("Could not adjust time on non-empty block") - } - // Get the last block - block := b.Blockchain.GetBlockByHash(b.pendingBlock.ParentHash()) - if block == nil { - return errors.New("could not find parent") - } - - blocks, _ := core.GenerateChain(b.config, block, ethash.NewFaker(), b.Database, 1, func(number int, block *core.BlockGen) { - block.OffsetTime(int64(adjustment.Seconds())) - }) - stateDB, _ := b.Blockchain.State() - - b.pendingBlock = blocks[0] - b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil) - - return nil -} - -// filterBackend implements filters.Backend to support filtering for logs without -// taking bloom-bits acceleration structures into account. -type filterBackend struct { - db ethdb.Database - bc *core.BlockChain - backend *SimulatedBackend -} - -func (fb *filterBackend) ChainDb() ethdb.Database { return fb.db } - -func (fb *filterBackend) EventMux() *event.Feed { panic("not supported") } - -func (fb *filterBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) { - switch number { - case rpc.PendingBlockNumber: - if block := fb.backend.pendingBlock; block != nil { - return block.Header(), nil - } - return nil, nil - case rpc.LatestBlockNumber: - return fb.bc.CurrentHeader(), nil - case rpc.FinalizedBlockNumber: - return fb.bc.CurrentFinalBlock(), nil - case rpc.SafeBlockNumber: - return fb.bc.CurrentSafeBlock(), nil - default: - return fb.bc.GetHeaderByNumber(uint64(number.Int64())), nil - } -} - -func (fb *filterBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { - return fb.bc.GetHeaderByHash(hash), nil -} - -func (fb *filterBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) { - if body := fb.bc.GetBody(hash); body != nil { - return body, nil - } - return nil, errors.New("block body not found") -} - -func (fb *filterBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { - return fb.backend.pendingBlock, fb.backend.pendingReceipts -} - -func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) { - number := rawdb.ReadHeaderNumber(fb.db, hash) - if number == nil { - return nil, nil - } - header := rawdb.ReadHeader(fb.db, hash, *number) - if header == nil { - return nil, nil - } - return rawdb.ReadReceipts(fb.db, hash, *number, header.Time, fb.bc.Config()), nil -} - -func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) { - logs := rawdb.ReadLogs(fb.db, hash, number) - return logs, nil -} - -func (fb *filterBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { - return nullSubscription() -} - -func (fb *filterBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription { - return fb.bc.SubscribeChainEvent(ch) -} - -func (fb *filterBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription { - return fb.bc.SubscribeRemovedLogsEvent(ch) -} - -func (fb *filterBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { - return fb.bc.SubscribeLogsEvent(ch) -} - -func (fb *filterBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription { - return nullSubscription() -} - -func (fb *filterBackend) BloomStatus() (uint64, uint64) { return 4096, 0 } - -func (fb *filterBackend) ServiceFilter(ctx context.Context, ms *bloombits.MatcherSession) { - panic("not supported") -} - -func (fb *filterBackend) ChainConfig() *params.ChainConfig { - panic("not supported") -} - -func (fb *filterBackend) CurrentHeader() *types.Header { - panic("not supported") +func (n *Backend) AdjustTime(adjustment time.Duration) error { + return n.beacon.AdjustTime(adjustment) } -func nullSubscription() event.Subscription { - return event.NewSubscription(func(quit <-chan struct{}) error { - <-quit - return nil - }) +// Client returns a client that accesses the simulated chain. +func (n *Backend) Client() Client { + return n.client } diff --git a/go.mod b/go.mod index 105e773ccf..4414e3755f 100644 --- a/go.mod +++ b/go.mod @@ -6,11 +6,11 @@ require ( github.com/aquasecurity/table v1.8.0 github.com/attestantio/go-eth2-client v0.21.7 github.com/bloxapp/eth2-key-manager v1.4.1-0.20240829091006-b5848884a7a5 - github.com/btcsuite/btcd/btcec/v2 v2.3.2 + github.com/btcsuite/btcd/btcec/v2 v2.3.4 github.com/cespare/xxhash/v2 v2.3.0 github.com/dgraph-io/badger/v4 v4.2.0 github.com/dgraph-io/ristretto v0.1.1 - github.com/ethereum/go-ethereum v1.13.15 + github.com/ethereum/go-ethereum v1.14.8 github.com/ferranbt/fastssz v0.1.3 github.com/go-chi/chi/v5 v5.0.8 github.com/go-chi/render v1.0.2 @@ -20,10 +20,10 @@ require ( github.com/gorilla/websocket v1.5.3 github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/herumi/bls-eth-go-binary v1.29.1 - github.com/holiman/uint256 v1.2.4 + github.com/holiman/uint256 v1.3.1 github.com/ilyakaznacheev/cleanenv v1.4.2 github.com/jellydator/ttlcache/v3 v3.2.0 - github.com/libp2p/go-libp2p v0.36.0 + github.com/libp2p/go-libp2p v0.36.1 github.com/libp2p/go-libp2p-kad-dht v0.25.2 github.com/libp2p/go-libp2p-pubsub v0.11.0 github.com/microsoft/go-crypto-openssl v0.2.9 @@ -36,7 +36,7 @@ require ( github.com/rs/zerolog v1.32.0 github.com/sourcegraph/conc v0.3.0 github.com/spf13/cobra v1.7.0 - github.com/ssvlabs/ssv-spec v0.3.11-0.20240714201559-ccf408d1ecd8 + github.com/ssvlabs/ssv-spec v0.3.11-0.20240820113812-496d839e9614 github.com/ssvlabs/ssv-spec-pre-cc v0.0.0-20240725052506-c48532da6a63 github.com/status-im/keycard-go v0.2.0 github.com/stretchr/testify v1.9.0 @@ -57,41 +57,41 @@ require ( require ( github.com/BurntSushi/toml v1.3.2 // indirect github.com/DataDog/zstd v1.5.2 // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/VictoriaMetrics/fastcache v1.12.1 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/VictoriaMetrics/fastcache v1.12.2 // indirect github.com/ajg/form v1.5.1 // indirect github.com/aristanetworks/goarista v0.0.0-20200805130819-fd197cf57d96 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.10.0 // indirect - github.com/cockroachdb/errors v1.9.1 // indirect + github.com/cockroachdb/errors v1.11.3 // indirect + github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect - github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 // indirect - github.com/cockroachdb/redact v1.1.3 // indirect + github.com/cockroachdb/pebble v1.1.1 // indirect + github.com/cockroachdb/redact v1.1.5 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/consensys/bavard v0.1.13 // indirect github.com/consensys/gnark-crypto v0.12.1 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect - github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 // indirect - github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect + github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c // indirect + github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect - github.com/deckarep/golang-set/v2 v2.1.0 // indirect + github.com/deckarep/golang-set/v2 v2.6.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.0 // indirect github.com/elastic/gosigar v0.14.3 // indirect - github.com/ethereum/c-kzg-4844 v0.4.0 // indirect + github.com/ethereum/c-kzg-4844 v1.0.0 // indirect + github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0 // indirect github.com/fatih/color v1.16.0 // indirect - github.com/fjl/memsize v0.0.2 // indirect github.com/flynn/noise v1.1.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect - github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 // indirect - github.com/getsentry/sentry-go v0.18.0 // indirect + github.com/getsentry/sentry-go v0.27.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect @@ -103,7 +103,7 @@ require ( github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/glog v1.0.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/flatbuffers v1.12.1 // indirect github.com/google/gopacket v1.1.19 // indirect @@ -204,7 +204,7 @@ require ( github.com/rs/cors v1.7.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shirou/gopsutil v3.21.11+incompatible // indirect - github.com/sirupsen/logrus v1.9.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/supranational/blst v0.3.11 // indirect diff --git a/go.sum b/go.sum index 425a340284..5c3aea3628 100644 --- a/go.sum +++ b/go.sum @@ -10,23 +10,18 @@ dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBr dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno= -github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo= github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= -github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Shopify/sarama v1.26.1/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= -github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= +github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= +github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= github.com/ajg/form v1.5.1 h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU= github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -43,10 +38,8 @@ github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3/go.mod h1:KASm github.com/aristanetworks/goarista v0.0.0-20200805130819-fd197cf57d96 h1:XJH0YfVFKbq782tlNThzN/Ud5qm/cx6LXOA/P6RkTxc= github.com/aristanetworks/goarista v0.0.0-20200805130819-fd197cf57d96/go.mod h1:QZe5Yh80Hp1b6JxQdpfSEEe8X7hTyTEZSosSrFf/oJE= github.com/aristanetworks/splunk-hec-go v0.3.3/go.mod h1:1VHO9r17b0K7WmOlLb9nTk/2YanvOEnLMUgsFrxBROc= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/attestantio/go-eth2-client v0.21.7 h1:tdTJWiOJUCDmYSDt5C8D8+N5Hxfos0yLp+iVT7tKWMk= github.com/attestantio/go-eth2-client v0.21.7/go.mod h1:d7ZPNrMX8jLfIgML5u7QZxFo2AukLM+5m08iMaLdqb8= -github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/bazelbuild/rules_go v0.23.2 h1:Wxu7JjqnF78cKZbsBsARLSXx/jlGaSLCnUV3mTlyHvM= github.com/bazelbuild/rules_go v0.23.2/go.mod h1:MC23Dc/wkXEyk3Wpq6lCqz0ZAYOZDw2DR5y3N1q2i7M= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -63,8 +56,8 @@ github.com/bloxapp/eth2-key-manager v1.4.1-0.20240829091006-b5848884a7a5 h1:TGyT github.com/bloxapp/eth2-key-manager v1.4.1-0.20240829091006-b5848884a7a5/go.mod h1:m2DsvNCyLAAcgkN2JuroXUGpM61OeKSclmWuaRNW4Ss= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/bradfitz/gomemcache v0.0.0-20170208213004-1952afaa557d/go.mod h1:PmM6Mmwb0LSuEubjR8N7PtNe1KxZLtOUHtbeikc5h60= -github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= -github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= +github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ= +github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= @@ -81,22 +74,20 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= -github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8= -github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk= -github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= +github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= +github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4= +github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 h1:aPEJyR4rPBvDmeyi+l/FS/VtA00IWvjeFvjen1m1l1A= -github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593/go.mod h1:6hk1eMY/u5t+Cf18q5lFMUA1Rc+Sm5I6Ra1QuPyxXCo= -github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= -github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/pebble v1.1.1 h1:XnKU22oiCLy2Xn8vp1re67cXg4SAasg/WDt1NtcRFaw= +github.com/cockroachdb/pebble v1.1.1/go.mod h1:4exszw1r40423ZsmkG/09AFEG83I0uDgfujJdbL6kYU= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= -github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M= @@ -104,22 +95,18 @@ github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5U github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 h1:d28BXYi+wUpz1KBmiF9bWrjEMacUEREV6MBi2ODnrfQ= -github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs= -github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA= -github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc= +github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c h1:uQYC5Z1mdLRPrZhHjHxufI8+2UG/i25QG92j0Er9p6I= +github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs= +github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI= +github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/d4l3k/messagediff v1.2.1 h1:ZcAIMYsUg0EAp9X+tt8/enBE/Q8Yd5kzPynLyKptt9U= github.com/d4l3k/messagediff v1.2.1/go.mod h1:Oozbb1TVXFac9FtSIxHBMnBCq2qeH/2KkEQxENCrlLo= @@ -128,13 +115,12 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= -github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6dtGktsI= -github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= +github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM= +github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= -github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgraph-io/badger/v4 v4.2.0 h1:kJrlajbXXL9DFTNuhhu9yCx7JJa4qpYWxtE8BzuWsEs= github.com/dgraph-io/badger/v4 v4.2.0/go.mod h1:qfCqhPoWDFJRx1gp5QwwyGo8xk1lbHUxvK9nK0OGAak= github.com/dgraph-io/ristretto v0.1.1-0.20211108053508-297c39e6640f h1:NBGp2JpfMtXmanFWt6f3gEdBtnLO5LupRvm3w4TXrvs= @@ -149,29 +135,24 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo= github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= -github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY= -github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= -github.com/ethereum/go-ethereum v1.13.15 h1:U7sSGYGo4SPjP6iNIifNoyIAiNjrmQkz6EwQG+/EZWo= -github.com/ethereum/go-ethereum v1.13.15/go.mod h1:TN8ZiHrdJwSe8Cb6x+p0hs5CxhJZPbqB7hHkaUXcmIU= -github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= +github.com/ethereum/c-kzg-4844 v1.0.0 h1:0X1LBXxaEtYD9xsyj9B9ctQEZIpnvVDeoBx8aHEwTNA= +github.com/ethereum/c-kzg-4844 v1.0.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= +github.com/ethereum/go-ethereum v1.14.8 h1:NgOWvXS+lauK+zFukEvi85UmmsS/OkV0N23UZ1VTIig= +github.com/ethereum/go-ethereum v1.14.8/go.mod h1:TJhyuDq0JDppAkFXgqjwpdlQApywnu/m10kFPxh8vvs= +github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0 h1:KrE8I4reeVvf7C1tm8elRjj4BdscTYzz/WAbYyf/JI4= +github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0/go.mod h1:D9AJLVXSyZQXJQVk8oh1EwjISE+sJTn2duYIZC0dy3w= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/ferranbt/fastssz v0.0.0-20210120143747-11b9eff30ea9/go.mod h1:DyEu2iuLBnb/T51BlsiO3yLYdJC6UbGMrIkqK1KmQxM= github.com/ferranbt/fastssz v0.1.3 h1:ZI+z3JH05h4kgmFXdHuR1aWYsgrg7o+Fw7/NCzM16Mo= github.com/ferranbt/fastssz v0.1.3/go.mod h1:0Y9TEd/9XuFlh7mskMPfXiI2Dkw4Ddg9EyXt1W7MRvE= -github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA= -github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= @@ -189,19 +170,12 @@ github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4 github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/garyburd/redigo v1.1.1-0.20170914051019-70e1b1943d4f/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= -github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= -github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 h1:BAIP2GihuqhwdILrV+7GJel5lyPV3u1+PgzrWLc0TkE= -github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46/go.mod h1:QNpY22eby74jVhqH4WhDLDwxc/vqsern6pW+u2kbkpc= -github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= -github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0= -github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ= +github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= +github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= -github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/go-chi/chi/v5 v5.0.8 h1:lD+NLqFcAi1ovnVZpsnObHGW4xb4J8lNmoYVfECH1Y0= github.com/go-chi/chi/v5 v5.0.8/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= github.com/go-chi/render v1.0.2 h1:4ER/udB0+fMWB2Jlf15RV3F4A2FDuYi/9f+lFttR/Lg= @@ -218,7 +192,6 @@ github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= @@ -234,9 +207,6 @@ github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg78 github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= -github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= -github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= -github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= github.com/goccy/go-yaml v1.11.3 h1:B3W9IdWbvrUu2OYQGwvU1nZtvMQJPBKgBUuweJjLj6I= github.com/goccy/go-yaml v1.11.3/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -245,15 +215,10 @@ github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= -github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= -github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/gddo v0.0.0-20200528160355-8d077c1d8f4c h1:HoqgYR60VYu5+0BuG6pjeGp7LKEPZnHt+dUClx9PeIs= @@ -282,14 +247,13 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/flatbuffers v1.11.0 h1:O7CEyB8Cb3/DmtxODGtLHcEvpr81Jm5qLg/hsHnxA2A= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= @@ -328,7 +292,6 @@ github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE0 github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20170920190843-316c5e0ff04e/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -345,13 +308,11 @@ github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v0.0.0-20170914154624-68e816d1c783/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/herumi/bls-eth-go-binary v0.0.0-20210130185500-57372fb27371/go.mod h1:luAnRm3OsMQeokhGzpYmc0ZKwawY7o87PUEP11Z7r7U= github.com/herumi/bls-eth-go-binary v1.29.1 h1:XcNSHYTyNjEUVfWDCE2gtG5r95biTwd7MJUJF09LtSE= github.com/herumi/bls-eth-go-binary v1.29.1/go.mod h1:luAnRm3OsMQeokhGzpYmc0ZKwawY7o87PUEP11Z7r7U= @@ -359,8 +320,8 @@ github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 h1:X4egAf/gcS1zATw6w github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= -github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= -github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= +github.com/holiman/uint256 v1.3.1 h1:JfTzmih28bittyHM8z360dCjIA9dbPIBlcTI6lmctQs= +github.com/holiman/uint256 v1.3.1/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/go-assert v1.1.5 h1:fjemmA7sSfYHJD7CUqs9qTwwfdNAx7/j2/ZlHXzNB3c= github.com/huandu/go-assert v1.1.5/go.mod h1:yOLvuqZwmcHIC5rIzrBhT7D3Q9c3GFnd0JrPVhn/06U= @@ -370,13 +331,10 @@ github.com/huandu/go-clone/generic v1.6.0 h1:Wgmt/fUZ28r16F2Y3APotFD59sHk1p78K0X github.com/huandu/go-clone/generic v1.6.0/go.mod h1:xgd9ZebcMsBWWcBx5mVMCoqMX24gLWr5lQicr+nVXNs= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= -github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ilyakaznacheev/cleanenv v1.4.2 h1:nRqiriLMAC7tz7GzjzUTBHfzdzw6SQ7XvTagkFqe/zU= github.com/ilyakaznacheev/cleanenv v1.4.2/go.mod h1:i0owW+HDxeGKE0/JPREJOdSCPIyOnmh6C0xhWAkF/xA= -github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= github.com/inconshreveable/log15 v0.0.0-20170622235902-74a0988b5f80/go.mod h1:cOaXtrgN4ScfRrD9Bre7U1thNq5RtJ8ZoP4iXVGRj6o= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= @@ -397,11 +355,6 @@ github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/ipld/go-ipld-prime v0.20.0 h1:Ud3VwE9ClxpO2LkCYP7vWPc0Fo+dYdYzgxUJZ3uRG4g= github.com/ipld/go-ipld-prime v0.20.0/go.mod h1:PzqZ/ZR981eKbgdr3y2DJYeD/8bgMawdGVlJDE8kK+M= -github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI= -github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= -github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk= -github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0GqwkjqxNd0u65g= -github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= @@ -423,22 +376,13 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= -github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8= -github.com/kataras/iris/v12 v12.1.8/go.mod h1:LMYy4VlP67TQ3Zgriz8RE2h2kMZV2SgMYbq3UhfoFmE= -github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2R1rmoTE= -github.com/kataras/pio v0.0.2/go.mod h1:hAoW0t9UmXi4R5Oyq5Z4irTbaTsOemSrDGUtaTl7Dro= -github.com/kataras/sitemap v0.0.5/go.mod h1:KY2eugMKiPwsJgx7+U103YZehfvNGOXURubcGyk0Bz8= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.10.1/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= -github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= @@ -450,7 +394,6 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -460,8 +403,6 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y= -github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= github.com/leodido/go-urn v1.2.3 h1:6BE2vPT0lqoz3fmOesHZiaiFh7889ssCo2GMvLCfiuA= @@ -472,8 +413,8 @@ github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38y github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.36.0 h1:euESeA1bndKdEjHoSbzyJ39PT14CL+/BNCfq7aaneuo= -github.com/libp2p/go-libp2p v0.36.0/go.mod h1:mdtNGqy0AQuiYJuO1bXPdFOyFeyMTMSVZ03OBi/XLS4= +github.com/libp2p/go-libp2p v0.36.1 h1:piAHesy0/8ifBEBUS8HF2m7ywR5vnktUFv00dTsVKcs= +github.com/libp2p/go-libp2p v0.36.1/go.mod h1:vHzel3CpRB+vS11fIjZSJAU4ALvieKV9VZHC9VerHj8= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= github.com/libp2p/go-libp2p-kad-dht v0.25.2 h1:FOIk9gHoe4YRWXTu8SY9Z1d0RILol0TrtApsMDPjAVQ= @@ -502,21 +443,13 @@ github.com/libp2p/zeroconf/v2 v2.2.0 h1:Cup06Jv6u81HLhIj1KasuNM/RHHrJ8T7wOTS4+Tv github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/magiconair/properties v1.7.4-0.20170902060319-8d7837e64d3c/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= github.com/mattn/go-colorable v0.0.10-0.20170816031813-ad5389df28cd/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.2/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= @@ -525,11 +458,8 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= -github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= github.com/microsoft/go-crypto-openssl v0.2.9 h1:pzWgU+PLq61DzuhfZM7L7nyr3DrQoa4Ln75gCwsvvjs= github.com/microsoft/go-crypto-openssl v0.2.9/go.mod h1:xOSmQnWz4xvNB2+KQN2g2UUwMG9vqDHBk9nk/NdmyRw= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= @@ -549,9 +479,7 @@ github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1 github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v0.0.0-20170523030023-d0303fe80992/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= @@ -570,7 +498,6 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= -github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= @@ -601,10 +528,6 @@ github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOEL github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= @@ -615,7 +538,6 @@ github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= @@ -643,7 +565,6 @@ github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTK github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/pelletier/go-toml v1.0.1-0.20170904195809-1d6b12b7cb29/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= @@ -748,8 +669,6 @@ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw= github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= @@ -762,9 +681,7 @@ github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= @@ -794,12 +711,10 @@ github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5k github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= @@ -809,22 +724,16 @@ github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v0.0.0-20170901052352-ee1bd8ee15a1/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.1.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/jwalterweatherman v0.0.0-20170901151539-12bd96e66386/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.1-0.20170901120850-7aff26db30c1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.0.0/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/ssvlabs/ssv-spec v0.3.11-0.20240714201559-ccf408d1ecd8 h1:hCdPUAxHoNh3q20McmLcPuzKOHP2cjVyylQ8gjRSjuI= -github.com/ssvlabs/ssv-spec v0.3.11-0.20240714201559-ccf408d1ecd8/go.mod h1:woZFNQHwfkY2fVvIRK6uPZGsUm8hCubZ6VWRbXRzCkM= +github.com/ssvlabs/ssv-spec v0.3.11-0.20240820113812-496d839e9614 h1:aBqK0SLjxYhTgcoElBWy2Pv2X66wAwy7ctgV4JVMluY= +github.com/ssvlabs/ssv-spec v0.3.11-0.20240820113812-496d839e9614/go.mod h1:lTqsNeTUIfpacMoztbN7YqvFttDigCLjINjy/8I2Wuc= github.com/ssvlabs/ssv-spec-pre-cc v0.0.0-20240725052506-c48532da6a63 h1:su82+QgRipnMdYyV/ux8+Pd6psdutbN97ltWl1Rr6Xc= github.com/ssvlabs/ssv-spec-pre-cc v0.0.0-20240725052506-c48532da6a63/go.mod h1:uYNLeK2YWjlGidlA1co2uXA0JBp15m+6088ZZmXaVBI= github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA= @@ -865,22 +774,12 @@ github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2n github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U= github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/umbracle/gohashtree v0.0.2-alpha.0.20230207094856-5b775a815c10 h1:CQh33pStIp/E30b7TxDlXfM0145bn2e8boI30IxAhTg= github.com/umbracle/gohashtree v0.0.2-alpha.0.20230207094856-5b775a815c10/go.mod h1:x/Pa0FF5Te9kdrlZKJK82YmAkvL8+f989USgz6Jiw7M= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= -github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= -github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= -github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= @@ -902,18 +801,10 @@ github.com/wlynxg/anet v0.0.3 h1:PvR53psxFXstc12jelG6f1Lv4MWqE0tI76/hHGjh9rg= github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= github.com/xtaci/kcp-go v5.4.20+incompatible/go.mod h1:bN6vIwHQbfHaHtFpEssmWsN45a+AZwO7eyRCmEIbtvE= github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae/go.mod h1:gXtu8J62kEgmN++bm9BVICuT/e8yiLI2KFobd/TRFsE= -github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= -github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= -github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= -github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= @@ -953,15 +844,12 @@ go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1 golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191219195013-becbf705a915/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -983,7 +871,6 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -999,20 +886,15 @@ golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191116160921-f9c825593386/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1024,7 +906,6 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= @@ -1062,25 +943,19 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200219091948-cb0a6d8edb6c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1088,16 +963,12 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1112,6 +983,7 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= @@ -1142,18 +1014,15 @@ golang.org/x/time v0.0.0-20170424234030-8be79e1e0910/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -1165,7 +1034,6 @@ golang.org/x/tools v0.0.0-20200221224223-e1da425f72fd/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= @@ -1192,7 +1060,6 @@ google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20170918111702-1e559d0a00ee/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -1201,11 +1068,9 @@ google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200218151345-dad8c97a84f5/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/grpc v1.2.1-0.20170921194603-d4b75ebd4f9f/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -1215,9 +1080,8 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= -google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1244,17 +1108,13 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= -gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= -gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/redis.v4 v4.2.4/go.mod h1:8KREHdypkCEojGKQcjMqAODMICIVwZAONWq8RowTITA= @@ -1268,7 +1128,6 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/integration/qbft/tests/scenario_test.go b/integration/qbft/tests/scenario_test.go index b670d80d58..c5eac5ad0d 100644 --- a/integration/qbft/tests/scenario_test.go +++ b/integration/qbft/tests/scenario_test.go @@ -220,7 +220,8 @@ func createValidator(t *testing.T, pCtx context.Context, id spectypes.OperatorID Operator: spectestingutils.TestingCommitteeMember(keySet), } - options.DutyRunners = validator.SetupRunners(ctx, logger, options) + options.DutyRunners, err = validator.SetupRunners(ctx, logger, options) + require.NoError(t, err) val := protocolvalidator.NewValidator(ctx, cancel, options) node.UseMessageRouter(newMsgRouter(logger, val)) started, err := val.Start(logger) diff --git a/network/discovery/options.go b/network/discovery/options.go index ff7cea5324..5cb359d231 100644 --- a/network/discovery/options.go +++ b/network/discovery/options.go @@ -2,14 +2,16 @@ package discovery import ( "crypto/ecdsa" + "net" + "github.com/ssvlabs/ssv/logging" compatible_logger "github.com/ssvlabs/ssv/network/discovery/logger" - "net" "github.com/pkg/errors" - "github.com/ssvlabs/ssv/network/commons" "go.uber.org/zap" + "github.com/ssvlabs/ssv/network/commons" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/discover" ) @@ -101,7 +103,7 @@ func (opts *DiscV5Options) DiscV5Cfg(logger *zap.Logger) (*discover.Config, erro zapLogger := logger.Named(logging.NameDiscoveryV5Logger) //TODO: this is a workaround for using slog without upgrade go to 1.21 zapHandler := compatible_logger.Option{Logger: zapLogger}.NewZapHandler() - newLogger := log.NewLogger(zapHandler) + newLogger := log.New(zapHandler) dv5Cfg.Log = newLogger } diff --git a/operator/duties/voluntary_exit_test.go b/operator/duties/voluntary_exit_test.go index a3c65d5785..1bd0460d2d 100644 --- a/operator/duties/voluntary_exit_test.go +++ b/operator/duties/voluntary_exit_test.go @@ -147,7 +147,7 @@ func create1to1BlockSlotMapping(scheduler *Scheduler) *atomic.Uint64 { scheduler.executionClient.(*MockExecutionClient).EXPECT().BlockByNumber(gomock.Any(), gomock.Any()).DoAndReturn( func(ctx context.Context, blockNumber *big.Int) (*ethtypes.Block, error) { blockByNumberCalls.Add(1) - expectedBlock := ethtypes.NewBlock(ðtypes.Header{Time: blockNumber.Uint64()}, nil, nil, nil, trie.NewStackTrie(nil)) + expectedBlock := ethtypes.NewBlock(ðtypes.Header{Time: blockNumber.Uint64()}, nil, nil, trie.NewStackTrie(nil)) return expectedBlock, nil }, ).AnyTimes() diff --git a/operator/validator/controller.go b/operator/validator/controller.go index d11fb0aa68..a29ba89cde 100644 --- a/operator/validator/controller.go +++ b/operator/validator/controller.go @@ -924,7 +924,11 @@ func (c *controller) onShareInit(share *ssvtypes.SSVShare) (*validators.Validato opts := c.validatorOptions opts.SSVShare = share opts.Operator = operator - opts.DutyRunners = SetupRunners(validatorCtx, c.logger, opts) + opts.DutyRunners, err = SetupRunners(validatorCtx, c.logger, opts) + if err != nil { + validatorCancel() + return nil, nil, fmt.Errorf("could not setup runners: %w", err) + } alanValidator := validator.NewValidator(validatorCtx, validatorCancel, opts) // TODO: (Alan) share mutations such as metadata changes and fee recipient updates aren't reflected in genesis shares @@ -976,7 +980,7 @@ func (c *controller) onShareInit(share *ssvtypes.SSVShare) (*validators.Validato committeeRunnerFunc := SetupCommitteeRunners(ctx, opts) - vc = validator.NewCommittee(ctx, cancel, logger, c.beacon.GetBeaconNetwork(), operator, committeeRunnerFunc) + vc = validator.NewCommittee(ctx, cancel, logger, c.beacon.GetBeaconNetwork(), operator, committeeRunnerFunc, nil) vc.AddShare(&share.Share) c.validatorsMap.PutCommittee(operator.CommitteeID, vc) @@ -1266,11 +1270,10 @@ func SetupCommitteeRunners( buildController := func(role spectypes.RunnerRole, valueCheckF specqbft.ProposedValueCheckF) *qbftcontroller.Controller { config := &qbft.Config{ BeaconSigner: options.Signer, - Domain: options.NetworkConfig.DomainType(), - ValueCheckF: nil, // sets per role type + Domain: options.NetworkConfig.AlanDomainType, + ValueCheckF: valueCheckF, ProposerF: func(state *specqbft.State, round specqbft.Round) spectypes.OperatorID { leader := qbft.RoundRobinProposer(state, round) - //logger.Debug("leader", zap.Int("operator_id", int(leader))) return leader }, Storage: options.Storage.Get(convert.RunnerRole(role)), @@ -1278,18 +1281,17 @@ func SetupCommitteeRunners( Timer: roundtimer.New(ctx, options.NetworkConfig.Beacon, role, nil), CutOffRound: specqbft.Round(specqbft.CutoffRound), } - config.ValueCheckF = valueCheckF - identifier := spectypes.NewMsgID(options.NetworkConfig.DomainType(), options.Operator.CommitteeID[:], role) + identifier := spectypes.NewMsgID(options.NetworkConfig.AlanDomainType, options.Operator.CommitteeID[:], role) qbftCtrl := qbftcontroller.NewController(identifier[:], options.Operator, config, options.OperatorSigner, options.FullNode) return qbftCtrl } - return func(slot phase0.Slot, shares map[phase0.ValidatorIndex]*spectypes.Share, slashableValidators []spectypes.ShareValidatorPK) *runner.CommitteeRunner { + return func(slot phase0.Slot, shares map[phase0.ValidatorIndex]*spectypes.Share, attestingValidators []spectypes.ShareValidatorPK) (*runner.CommitteeRunner, error) { // Create a committee runner. epoch := options.NetworkConfig.Beacon.GetBeaconNetwork().EstimatedEpochAtSlot(slot) - valCheck := ssv.BeaconVoteValueCheckF(options.Signer, slot, slashableValidators, epoch) - crunner := runner.NewCommitteeRunner( + valCheck := ssv.BeaconVoteValueCheckF(options.Signer, slot, attestingValidators, epoch) + crunner, err := runner.NewCommitteeRunner( options.NetworkConfig, shares, buildController(spectypes.RoleCommittee, valCheck), @@ -1299,7 +1301,10 @@ func SetupCommitteeRunners( options.OperatorSigner, valCheck, ) - return crunner.(*runner.CommitteeRunner) + if err != nil { + return nil, err + } + return crunner.(*runner.CommitteeRunner), nil } } @@ -1308,11 +1313,11 @@ func SetupRunners( ctx context.Context, logger *zap.Logger, options validator.Options, -) runner.ValidatorDutyRunners { +) (runner.ValidatorDutyRunners, error) { if options.SSVShare == nil || options.SSVShare.BeaconMetadata == nil { logger.Error("missing validator metadata", zap.String("validator", hex.EncodeToString(options.SSVShare.ValidatorPubKey[:]))) - return runner.ValidatorDutyRunners{} // TODO need to find better way to fix it + return runner.ValidatorDutyRunners{}, nil // TODO need to find better way to fix it } runnersType := []spectypes.RunnerRole{ @@ -1353,6 +1358,7 @@ func SetupRunners( runners := runner.ValidatorDutyRunners{} alanDomainType := options.NetworkConfig.AlanDomainType + var err error for _, role := range runnersType { switch role { //case spectypes.BNRoleAttester: @@ -1362,11 +1368,11 @@ func SetupRunners( case spectypes.RoleProposer: proposedValueCheck := ssv.ProposerValueCheckF(options.Signer, options.NetworkConfig.Beacon.GetBeaconNetwork(), options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index, options.SSVShare.SharePubKey) qbftCtrl := buildController(spectypes.RoleProposer, proposedValueCheck) - runners[role] = runner.NewProposerRunner(alanDomainType, options.NetworkConfig.Beacon.GetBeaconNetwork(), shareMap, qbftCtrl, options.Beacon, options.Network, options.Signer, options.OperatorSigner, proposedValueCheck, 0, options.Graffiti) + runners[role], err = runner.NewProposerRunner(alanDomainType, options.NetworkConfig.Beacon.GetBeaconNetwork(), shareMap, qbftCtrl, options.Beacon, options.Network, options.Signer, options.OperatorSigner, proposedValueCheck, 0, options.Graffiti) case spectypes.RoleAggregator: aggregatorValueCheckF := ssv.AggregatorValueCheckF(options.Signer, options.NetworkConfig.Beacon.GetBeaconNetwork(), options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) qbftCtrl := buildController(spectypes.RoleAggregator, aggregatorValueCheckF) - runners[role] = runner.NewAggregatorRunner(alanDomainType, options.NetworkConfig.Beacon.GetBeaconNetwork(), shareMap, qbftCtrl, options.Beacon, options.Network, options.Signer, options.OperatorSigner, aggregatorValueCheckF, 0) + runners[role], err = runner.NewAggregatorRunner(alanDomainType, options.NetworkConfig.Beacon.GetBeaconNetwork(), shareMap, qbftCtrl, options.Beacon, options.Network, options.Signer, options.OperatorSigner, aggregatorValueCheckF, 0) //case spectypes.BNRoleSyncCommittee: //syncCommitteeValueCheckF := specssv.SyncCommitteeValueCheckF(options.Signer, options.NetworkConfig.Beacon.GetBeaconNetwork(), options.SSVShare.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) //qbftCtrl := buildController(spectypes.BNRoleSyncCommittee, syncCommitteeValueCheckF) @@ -1374,14 +1380,17 @@ func SetupRunners( case spectypes.RoleSyncCommitteeContribution: syncCommitteeContributionValueCheckF := ssv.SyncCommitteeContributionValueCheckF(options.Signer, options.NetworkConfig.Beacon.GetBeaconNetwork(), options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) qbftCtrl := buildController(spectypes.RoleSyncCommitteeContribution, syncCommitteeContributionValueCheckF) - runners[role] = runner.NewSyncCommitteeAggregatorRunner(alanDomainType, options.NetworkConfig.Beacon.GetBeaconNetwork(), shareMap, qbftCtrl, options.Beacon, options.Network, options.Signer, options.OperatorSigner, syncCommitteeContributionValueCheckF, 0) + runners[role], err = runner.NewSyncCommitteeAggregatorRunner(alanDomainType, options.NetworkConfig.Beacon.GetBeaconNetwork(), shareMap, qbftCtrl, options.Beacon, options.Network, options.Signer, options.OperatorSigner, syncCommitteeContributionValueCheckF, 0) case spectypes.RoleValidatorRegistration: - runners[role] = runner.NewValidatorRegistrationRunner(alanDomainType, options.NetworkConfig.Beacon.GetBeaconNetwork(), shareMap, options.Beacon, options.Network, options.Signer, options.OperatorSigner) + runners[role], err = runner.NewValidatorRegistrationRunner(alanDomainType, options.NetworkConfig.Beacon.GetBeaconNetwork(), shareMap, options.Beacon, options.Network, options.Signer, options.OperatorSigner) case spectypes.RoleVoluntaryExit: - runners[role] = runner.NewVoluntaryExitRunner(alanDomainType, options.NetworkConfig.Beacon.GetBeaconNetwork(), shareMap, options.Beacon, options.Network, options.Signer, options.OperatorSigner) + runners[role], err = runner.NewVoluntaryExitRunner(alanDomainType, options.NetworkConfig.Beacon.GetBeaconNetwork(), shareMap, options.Beacon, options.Network, options.Signer, options.OperatorSigner) + } + if err != nil { + return nil, errors.Wrap(err, "could not create duty runner") } } - return runners + return runners, nil } func SetupGenesisRunners(ctx context.Context, logger *zap.Logger, options validator.Options) genesisrunner.DutyRunners { diff --git a/protocol/v2/qbft/controller/controller.go b/protocol/v2/qbft/controller/controller.go index f2257fabdc..729552b912 100644 --- a/protocol/v2/qbft/controller/controller.go +++ b/protocol/v2/qbft/controller/controller.go @@ -130,26 +130,21 @@ func (c *Controller) UponExistingInstanceMsg(logger *zap.Logger, msg *specqbft.P prevDecided, _ := inst.IsDecided() + // if previously decided, we don't process more messages + if prevDecided { + return nil, errors.New("not processing consensus message since instance is already decided") + } + decided, _, decidedMsg, err := inst.ProcessMsg(logger, msg) if err != nil { return nil, errors.Wrap(err, "could not process msg") } - if prevDecided { - return nil, err - } - // save the highest Decided if !decided { return nil, nil } - // ProcessMsg returns a nil decidedMsg when given a non-commit message - // while the instance is decided. In this case, we have nothing new to broadcast. - if decidedMsg == nil { - return nil, nil - } - if err := c.broadcastDecided(decidedMsg); err != nil { // no need to fail processing instance deciding if failed to save/ broadcast logger.Debug("❌ failed to broadcast decided message", zap.Error(err)) diff --git a/protocol/v2/qbft/controller/decided.go b/protocol/v2/qbft/controller/decided.go index b5cd4a5c2e..964fa242d9 100644 --- a/protocol/v2/qbft/controller/decided.go +++ b/protocol/v2/qbft/controller/decided.go @@ -99,18 +99,10 @@ func ValidateDecided( return errors.New("not a decided msg") } - if err := msg.Validate(); err != nil { - return errors.Wrap(err, "invalid decided msg") - } - if err := instance.BaseCommitValidationVerifySignature(config, msg, msg.QBFTMessage.Height, committeeMember.Committee); err != nil { return errors.Wrap(err, "invalid decided msg") } - if err := msg.Validate(); err != nil { - return errors.Wrap(err, "invalid decided") - } - r, err := specqbft.HashDataRoot(msg.SignedMessage.FullData) if err != nil { return errors.Wrap(err, "could not hash input data") diff --git a/protocol/v2/ssv/runner/aggregator.go b/protocol/v2/ssv/runner/aggregator.go index 8ddc03d945..5fc8d86d32 100644 --- a/protocol/v2/ssv/runner/aggregator.go +++ b/protocol/v2/ssv/runner/aggregator.go @@ -44,7 +44,11 @@ func NewAggregatorRunner( operatorSigner ssvtypes.OperatorSigner, valCheck specqbft.ProposedValueCheckF, highestDecidedSlot phase0.Slot, -) Runner { +) (Runner, error) { + if len(share) != 1 { + return nil, errors.New("must have one share") + } + return &AggregatorRunner{ BaseRunner: &BaseRunner{ RunnerRoleType: spectypes.RoleAggregator, @@ -62,7 +66,7 @@ func NewAggregatorRunner( valCheck: valCheck, metrics: metrics.NewConsensusMetrics(spectypes.RoleAggregator), - } + }, nil } func (r *AggregatorRunner) StartNewDuty(logger *zap.Logger, duty spectypes.Duty, quorum uint64) error { diff --git a/protocol/v2/ssv/runner/committee.go b/protocol/v2/ssv/runner/committee.go index ca981760a5..f9a26e8b59 100644 --- a/protocol/v2/ssv/runner/committee.go +++ b/protocol/v2/ssv/runner/committee.go @@ -56,7 +56,10 @@ func NewCommitteeRunner( signer spectypes.BeaconSigner, operatorSigner ssvtypes.OperatorSigner, valCheck specqbft.ProposedValueCheckF, -) Runner { +) (Runner, error) { + if len(share) == 0 { + return nil, errors.New("no shares") + } return &CommitteeRunner{ BaseRunner: &BaseRunner{ RunnerRoleType: spectypes.RoleCommittee, @@ -73,7 +76,7 @@ func NewCommitteeRunner( stoppedValidators: make(map[spectypes.ValidatorPK]struct{}), submittedDuties: make(map[spectypes.BeaconRole]map[phase0.ValidatorIndex]struct{}), metrics: metrics.NewConsensusMetrics(spectypes.RoleCommittee), - } + }, nil } func (cr *CommitteeRunner) StartNewDuty(logger *zap.Logger, duty spectypes.Duty, quorum uint64) error { @@ -225,14 +228,14 @@ func (cr *CommitteeRunner) ProcessConsensus(logger *zap.Logger, msg *spectypes.S postConsensusMsg.Messages = append(postConsensusMsg.Messages, partialMsg) // TODO: revert log - adr, err := attestationData.HashTreeRoot() + attDataRoot, err := attestationData.HashTreeRoot() if err != nil { return errors.Wrap(err, "failed to hash attestation data") } logger.Debug("signed attestation data", zap.Int("validator_index", int(duty.ValidatorIndex)), zap.String("pub_key", hex.EncodeToString(duty.PubKey[:])), zap.Any("attestation_data", attestationData), - zap.String("attestation_data_root", hex.EncodeToString(adr[:])), + zap.String("attestation_data_root", hex.EncodeToString(attDataRoot[:])), zap.String("signing_root", hex.EncodeToString(partialMsg.SigningRoot[:])), zap.String("signature", hex.EncodeToString(partialMsg.PartialSignature[:])), ) diff --git a/protocol/v2/ssv/runner/proposer.go b/protocol/v2/ssv/runner/proposer.go index dc2d3fc4ae..75427b9609 100644 --- a/protocol/v2/ssv/runner/proposer.go +++ b/protocol/v2/ssv/runner/proposer.go @@ -50,7 +50,11 @@ func NewProposerRunner( valCheck specqbft.ProposedValueCheckF, highestDecidedSlot phase0.Slot, graffiti []byte, -) Runner { +) (Runner, error) { + if len(share) != 1 { + return nil, errors.New("must have one share") + } + return &ProposerRunner{ BaseRunner: &BaseRunner{ RunnerRoleType: spectypes.RoleProposer, @@ -68,7 +72,7 @@ func NewProposerRunner( operatorSigner: operatorSigner, graffiti: graffiti, metrics: metrics.NewConsensusMetrics(spectypes.RoleProposer), - } + }, nil } func (r *ProposerRunner) StartNewDuty(logger *zap.Logger, duty spectypes.Duty, quorum uint64) error { diff --git a/protocol/v2/ssv/runner/runner.go b/protocol/v2/ssv/runner/runner.go index bacde7888a..b0d59fed4f 100644 --- a/protocol/v2/ssv/runner/runner.go +++ b/protocol/v2/ssv/runner/runner.go @@ -166,8 +166,8 @@ func (b *BaseRunner) basePreConsensusMsgProcessing(runner Runner, signedMsg *spe return false, nil, errors.Wrap(err, "invalid pre-consensus message") } - hasQuorum, roots, err := b.basePartialSigMsgProcessing(signedMsg, b.State.PreConsensusContainer) - return hasQuorum, roots, errors.Wrap(err, "could not process pre-consensus partial signature msg") + hasQuorum, roots := b.basePartialSigMsgProcessing(signedMsg, b.State.PreConsensusContainer) + return hasQuorum, roots, nil } // baseConsensusMsgProcessing is a base func that all runner implementation can call for processing a consensus msg @@ -176,6 +176,9 @@ func (b *BaseRunner) baseConsensusMsgProcessing(logger *zap.Logger, runner Runne if b.hasRunningDuty() && b.State != nil && b.State.RunningInstance != nil { prevDecided, _ = b.State.RunningInstance.IsDecided() } + if prevDecided { + return true, nil, errors.New("not processing consensus message since consensus has already finished") + } decidedMsg, err := b.QBFTController.ProcessMsg(logger, msg) if err != nil { @@ -215,9 +218,6 @@ func (b *BaseRunner) baseConsensusMsgProcessing(logger *zap.Logger, runner Runne return true, nil, errors.Wrap(err, "failed to parse decided value to ValidatorConsensusData") } - // update the highest decided slot - b.highestDecidedSlot = b.State.StartingDuty.DutySlot() - if err := b.validateDecidedConsensusData(runner, decidedValue); err != nil { return true, nil, errors.Wrap(err, "decided ValidatorConsensusData invalid") } @@ -227,6 +227,9 @@ func (b *BaseRunner) baseConsensusMsgProcessing(logger *zap.Logger, runner Runne return true, nil, errors.Wrap(err, "could not encode decided value") } + // update the highest decided slot + b.highestDecidedSlot = b.State.StartingDuty.DutySlot() + return true, decidedValue, nil } @@ -236,15 +239,15 @@ func (b *BaseRunner) basePostConsensusMsgProcessing(logger *zap.Logger, runner R return false, nil, errors.Wrap(err, "invalid post-consensus message") } - hasQuorum, roots, err := b.basePartialSigMsgProcessing(signedMsg, b.State.PostConsensusContainer) - return hasQuorum, roots, errors.Wrap(err, "could not process post-consensus partial signature msg") + hasQuorum, roots := b.basePartialSigMsgProcessing(signedMsg, b.State.PostConsensusContainer) + return hasQuorum, roots, nil } // basePartialSigMsgProcessing adds a validated (without signature verification) validated partial msg to the container, checks for quorum and returns true (and roots) if quorum exists func (b *BaseRunner) basePartialSigMsgProcessing( signedMsg *spectypes.PartialSignatureMessages, container *ssv.PartialSigContainer, -) (bool, [][32]byte, error) { +) (bool, [][32]byte) { roots := make([][32]byte, 0) anyQuorum := false @@ -267,7 +270,7 @@ func (b *BaseRunner) basePartialSigMsgProcessing( } } - return anyQuorum, roots, nil + return anyQuorum, roots } // didDecideCorrectly returns true if the expected consensus instance decided correctly @@ -313,7 +316,6 @@ func (b *BaseRunner) decide(logger *zap.Logger, runner Runner, slot phase0.Slot, if err := runner.GetValCheckF()(byts); err != nil { return errors.Wrap(err, "input data invalid") - } if err := runner.GetBaseRunner().QBFTController.StartNewInstance(logger, diff --git a/protocol/v2/ssv/runner/sync_committee_aggregator.go b/protocol/v2/ssv/runner/sync_committee_aggregator.go index 70c4406aa8..886d3fd369 100644 --- a/protocol/v2/ssv/runner/sync_committee_aggregator.go +++ b/protocol/v2/ssv/runner/sync_committee_aggregator.go @@ -44,7 +44,11 @@ func NewSyncCommitteeAggregatorRunner( operatorSigner ssvtypes.OperatorSigner, valCheck specqbft.ProposedValueCheckF, highestDecidedSlot phase0.Slot, -) Runner { +) (Runner, error) { + if len(share) != 1 { + return nil, errors.New("must have one share") + } + return &SyncCommitteeAggregatorRunner{ BaseRunner: &BaseRunner{ RunnerRoleType: spectypes.RoleSyncCommitteeContribution, @@ -62,7 +66,7 @@ func NewSyncCommitteeAggregatorRunner( operatorSigner: operatorSigner, metrics: metrics.NewConsensusMetrics(spectypes.RoleSyncCommitteeContribution), - } + }, nil } func (r *SyncCommitteeAggregatorRunner) StartNewDuty(logger *zap.Logger, duty spectypes.Duty, quorum uint64) error { diff --git a/protocol/v2/ssv/runner/validator_registration.go b/protocol/v2/ssv/runner/validator_registration.go index c82c35d932..817a65cff5 100644 --- a/protocol/v2/ssv/runner/validator_registration.go +++ b/protocol/v2/ssv/runner/validator_registration.go @@ -39,7 +39,11 @@ func NewValidatorRegistrationRunner( network specqbft.Network, signer spectypes.BeaconSigner, operatorSigner ssvtypes.OperatorSigner, -) Runner { +) (Runner, error) { + if len(share) != 1 { + return nil, errors.New("must have one share") + } + return &ValidatorRegistrationRunner{ BaseRunner: &BaseRunner{ RunnerRoleType: spectypes.RoleValidatorRegistration, @@ -54,7 +58,7 @@ func NewValidatorRegistrationRunner( operatorSigner: operatorSigner, metrics: metrics.NewConsensusMetrics(spectypes.RoleValidatorRegistration), - } + }, nil } func (r *ValidatorRegistrationRunner) StartNewDuty(logger *zap.Logger, duty spectypes.Duty, quorum uint64) error { diff --git a/protocol/v2/ssv/runner/voluntary_exit.go b/protocol/v2/ssv/runner/voluntary_exit.go index c2640406f9..5ddd8c9724 100644 --- a/protocol/v2/ssv/runner/voluntary_exit.go +++ b/protocol/v2/ssv/runner/voluntary_exit.go @@ -41,7 +41,12 @@ func NewVoluntaryExitRunner( network specqbft.Network, signer spectypes.BeaconSigner, operatorSigner ssvtypes.OperatorSigner, -) Runner { +) (Runner, error) { + + if len(share) != 1 { + return nil, errors.New("must have one share") + } + return &VoluntaryExitRunner{ BaseRunner: &BaseRunner{ RunnerRoleType: spectypes.RoleVoluntaryExit, @@ -56,7 +61,7 @@ func NewVoluntaryExitRunner( operatorSigner: operatorSigner, metrics: metrics.NewConsensusMetrics(spectypes.RoleVoluntaryExit), - } + }, nil } func (r *VoluntaryExitRunner) StartNewDuty(logger *zap.Logger, duty spectypes.Duty, quorum uint64) error { diff --git a/protocol/v2/ssv/spectest/committee_msg_processing_type.go b/protocol/v2/ssv/spectest/committee_msg_processing_type.go index 9c342ea897..bb65750e84 100644 --- a/protocol/v2/ssv/spectest/committee_msg_processing_type.go +++ b/protocol/v2/ssv/spectest/committee_msg_processing_type.go @@ -60,7 +60,7 @@ func (test *CommitteeSpecTest) RunAsPartOfMultiTest(t *testing.T) { } // test output message (in asynchronous order) - spectestingutils.ComparePartialSignatureOutputMessagesInAsynchronousOrder(t, test.OutputMessages, broadcastedMsgs, test.Committee.Operator.Committee) + spectestingutils.ComparePartialSignatureOutputMessagesInAsynchronousOrder(t, test.OutputMessages, broadcastedMsgs, test.Committee.CommitteeMember.Committee) // test beacon broadcasted msgs spectestingutils.CompareBroadcastedBeaconMsgs(t, test.BeaconBroadcastedRoots, broadcastedRoots) @@ -180,7 +180,7 @@ func overrideStateComparisonCommitteeSpecTest(t *testing.T, test *CommitteeSpecT require.NoError(t, err) committee.Shares = specCommittee.Share - committee.Operator = &specCommittee.CommitteeMember + committee.CommitteeMember = &specCommittee.CommitteeMember //for _, r := range committee.Runners { // r.BaseRunner.BeaconNetwork = spectypes.BeaconTestNetwork //} diff --git a/protocol/v2/ssv/spectest/msg_processing_type.go b/protocol/v2/ssv/spectest/msg_processing_type.go index 4468f721a8..08ffe4848c 100644 --- a/protocol/v2/ssv/spectest/msg_processing_type.go +++ b/protocol/v2/ssv/spectest/msg_processing_type.go @@ -146,7 +146,7 @@ func (test *MsgProcessingSpecTest) RunAsPartOfMultiTest(t *testing.T, logger *za } network = runnerInstance.GetNetwork().(*spectestingutils.TestingNetwork) beaconNetwork = runnerInstance.GetBeaconNode().(*tests.TestingBeaconNodeWrapped) - committee = c.Operator.Committee + committee = c.CommitteeMember.Committee default: network = v.Network.(*spectestingutils.TestingNetwork) committee = v.Operator.Committee @@ -240,8 +240,8 @@ var baseCommitteeWithRunnerSample = func( shareMap[valIdx] = spectestingutils.TestingShare(ks, valIdx) } - createRunnerF := func(_ phase0.Slot, shareMap map[phase0.ValidatorIndex]*spectypes.Share, _ []spectypes.ShareValidatorPK) *runner.CommitteeRunner { - return runner.NewCommitteeRunner( + createRunnerF := func(_ phase0.Slot, shareMap map[phase0.ValidatorIndex]*spectypes.Share, _ []spectypes.ShareValidatorPK) (*runner.CommitteeRunner, error) { + r, err := runner.NewCommitteeRunner( networkconfig.TestNetwork, shareMap, controller.NewController( @@ -256,7 +256,8 @@ var baseCommitteeWithRunnerSample = func( runnerSample.GetSigner(), runnerSample.GetOperatorSigner(), runnerSample.GetValCheckF(), - ).(*runner.CommitteeRunner) + ) + return r.(*runner.CommitteeRunner), err } ctx, cancel := context.WithCancel(ctx) @@ -267,8 +268,8 @@ var baseCommitteeWithRunnerSample = func( runnerSample.GetBaseRunner().BeaconNetwork, spectestingutils.TestingCommitteeMember(keySetSample), createRunnerF, + shareMap, ) - c.Shares = shareMap return c } diff --git a/protocol/v2/ssv/spectest/runner_construction_type.go b/protocol/v2/ssv/spectest/runner_construction_type.go new file mode 100644 index 0000000000..c948b2e2cd --- /dev/null +++ b/protocol/v2/ssv/spectest/runner_construction_type.go @@ -0,0 +1,46 @@ +package spectest + +import ( + "testing" + + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ssvlabs/ssv-spec/types" + "github.com/stretchr/testify/require" + + "github.com/ssvlabs/ssv/logging" + runnertesting "github.com/ssvlabs/ssv/protocol/v2/ssv/testing" +) + +type RunnerConstructionSpecTest struct { + Name string + Shares map[phase0.ValidatorIndex]*types.Share + RoleError map[types.RunnerRole]string +} + +func (test *RunnerConstructionSpecTest) TestName() string { + return "RunnerConstruction " + test.Name +} + +func (test *RunnerConstructionSpecTest) Run(t *testing.T) { + logger := logging.TestLogger(t) + if len(test.RoleError) == 0 { + panic("no roles") + } + + for role, expectedError := range test.RoleError { + // Construct runner and get construction error + _, err := runnertesting.ConstructBaseRunnerWithShareMap(logger, role, test.Shares) + + // Check error + if len(expectedError) > 0 { + require.Error(t, err) + require.Contains(t, err.Error(), expectedError) + } else { + require.NoError(t, err) + } + } +} + +func (test *RunnerConstructionSpecTest) GetPostState() (interface{}, error) { + return nil, nil +} diff --git a/protocol/v2/ssv/spectest/ssv_mapping_test.go b/protocol/v2/ssv/spectest/ssv_mapping_test.go index 6c4e40e321..d63aa369f7 100644 --- a/protocol/v2/ssv/spectest/ssv_mapping_test.go +++ b/protocol/v2/ssv/spectest/ssv_mapping_test.go @@ -14,6 +14,7 @@ import ( "github.com/ssvlabs/ssv-spec/ssv/spectest/tests" "github.com/ssvlabs/ssv-spec/ssv/spectest/tests/committee" "github.com/ssvlabs/ssv-spec/ssv/spectest/tests/partialsigcontainer" + runnerconstruction "github.com/ssvlabs/ssv-spec/ssv/spectest/tests/runner/construction" "github.com/ssvlabs/ssv-spec/ssv/spectest/tests/runner/duties/newduty" "github.com/ssvlabs/ssv-spec/ssv/spectest/tests/runner/duties/synccommitteeaggregator" "github.com/ssvlabs/ssv-spec/ssv/spectest/tests/valcheck" @@ -184,6 +185,19 @@ func prepareTest(t *testing.T, logger *zap.Logger, name string, test interface{} Tests: typedTests, } + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { + typedTest.Run(t) + }, + } + + case reflect.TypeOf(&runnerconstruction.RunnerConstructionSpecTest{}).String(): + byts, err := json.Marshal(test) + require.NoError(t, err) + typedTest := &RunnerConstructionSpecTest{} + require.NoError(t, json.Unmarshal(byts, &typedTest)) + return &runnable{ name: typedTest.TestName(), test: func(t *testing.T) { @@ -545,9 +559,11 @@ func fixCommitteeForRun(t *testing.T, ctx context.Context, logger *zap.Logger, c logger, tests2.NewTestingBeaconNodeWrapped().GetBeaconNetwork(), &specCommittee.CommitteeMember, - func(slot phase0.Slot, shareMap map[phase0.ValidatorIndex]*spectypes.Share, _ []spectypes.ShareValidatorPK) *runner.CommitteeRunner { - return ssvtesting.CommitteeRunnerWithShareMap(logger, shareMap).(*runner.CommitteeRunner) + func(slot phase0.Slot, shareMap map[phase0.ValidatorIndex]*spectypes.Share, _ []spectypes.ShareValidatorPK) (*runner.CommitteeRunner, error) { + r := ssvtesting.CommitteeRunnerWithShareMap(logger, shareMap) + return r.(*runner.CommitteeRunner), nil }, + specCommittee.Share, ) tmpSsvCommittee := &validator.Committee{} require.NoError(t, json.Unmarshal(byts, tmpSsvCommittee)) @@ -565,7 +581,6 @@ func fixCommitteeForRun(t *testing.T, ctx context.Context, logger *zap.Logger, c fixedRunner := fixRunnerForRun(t, committeeMap["Runners"].(map[string]interface{})[fmt.Sprintf("%v", slot)].(map[string]interface{}), testingutils.KeySetForShare(shareInstance)) c.Runners[slot] = fixedRunner.(*runner.CommitteeRunner) } - c.Shares = specCommittee.Share return c } diff --git a/protocol/v2/ssv/testing/runner.go b/protocol/v2/ssv/testing/runner.go index 9c2311a443..38dc5651a0 100644 --- a/protocol/v2/ssv/testing/runner.go +++ b/protocol/v2/ssv/testing/runner.go @@ -4,12 +4,15 @@ import ( "bytes" "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/pkg/errors" specqbft "github.com/ssvlabs/ssv-spec/qbft" spectypes "github.com/ssvlabs/ssv-spec/types" spectestingutils "github.com/ssvlabs/ssv-spec/types/testingutils" - "github.com/ssvlabs/ssv/protocol/v2/ssv" "go.uber.org/zap" + "github.com/ssvlabs/ssv/protocol/v2/qbft/controller" + "github.com/ssvlabs/ssv/protocol/v2/ssv" + "github.com/ssvlabs/ssv/exporter/convert" "github.com/ssvlabs/ssv/integration/qbft/tests" "github.com/ssvlabs/ssv/networkconfig" @@ -56,11 +59,19 @@ var UnknownDutyTypeRunner = func(logger *zap.Logger, keySet *spectestingutils.Te return baseRunner(logger, spectestingutils.UnknownDutyType, keySet) } -var baseRunner = func( +var baseRunner = func(logger *zap.Logger, role spectypes.RunnerRole, keySet *spectestingutils.TestKeySet) runner.Runner { + runner, err := ConstructBaseRunner(logger, role, keySet) + if err != nil { + panic(err) + } + return runner +} + +var ConstructBaseRunner = func( logger *zap.Logger, role spectypes.RunnerRole, keySet *spectestingutils.TestKeySet, -) runner.Runner { +) (runner.Runner, error) { share := spectestingutils.TestingShare(keySet, spectestingutils.TestingValidatorIndex) identifier := spectypes.NewMsgID(spectypes.JatoTestnet, spectestingutils.TestingValidatorPubKey[:], role) net := spectestingutils.NewTestingNetwork(1, keySet.OperatorKeys[1]) @@ -106,9 +117,12 @@ var baseRunner = func( shareMap := make(map[phase0.ValidatorIndex]*spectypes.Share) shareMap[share.ValidatorIndex] = share + var r runner.Runner + var err error + switch role { case spectypes.RoleCommittee: - return runner.NewCommitteeRunner( + r, err = runner.NewCommitteeRunner( networkconfig.TestNetwork, shareMap, contr, @@ -119,7 +133,7 @@ var baseRunner = func( valCheck, ) case spectypes.RoleAggregator: - return runner.NewAggregatorRunner( + r, err = runner.NewAggregatorRunner( networkconfig.TestNetwork.AlanDomainType, spectypes.BeaconTestNetwork, shareMap, @@ -132,7 +146,7 @@ var baseRunner = func( TestingHighestDecidedSlot, ) case spectypes.RoleProposer: - return runner.NewProposerRunner( + r, err = runner.NewProposerRunner( networkconfig.TestNetwork.AlanDomainType, spectypes.BeaconTestNetwork, shareMap, @@ -146,7 +160,7 @@ var baseRunner = func( []byte("graffiti"), ) case spectypes.RoleSyncCommitteeContribution: - return runner.NewSyncCommitteeAggregatorRunner( + r, err = runner.NewSyncCommitteeAggregatorRunner( networkconfig.TestNetwork.AlanDomainType, spectypes.BeaconTestNetwork, shareMap, @@ -159,7 +173,7 @@ var baseRunner = func( TestingHighestDecidedSlot, ) case spectypes.RoleValidatorRegistration: - return runner.NewValidatorRegistrationRunner( + r, err = runner.NewValidatorRegistrationRunner( networkconfig.TestNetwork.AlanDomainType, spectypes.BeaconTestNetwork, shareMap, @@ -169,7 +183,7 @@ var baseRunner = func( opSigner, ) case spectypes.RoleVoluntaryExit: - return runner.NewVoluntaryExitRunner( + r, err = runner.NewVoluntaryExitRunner( networkconfig.TestNetwork.AlanDomainType, spectypes.BeaconTestNetwork, shareMap, @@ -179,7 +193,7 @@ var baseRunner = func( opSigner, ) case spectestingutils.UnknownDutyType: - ret := runner.NewCommitteeRunner( + r, err = runner.NewCommitteeRunner( networkconfig.TestNetwork, shareMap, contr, @@ -189,11 +203,11 @@ var baseRunner = func( opSigner, valCheck, ) - ret.(*runner.CommitteeRunner).BaseRunner.RunnerRoleType = spectestingutils.UnknownDutyType - return ret + r.(*runner.CommitteeRunner).BaseRunner.RunnerRoleType = spectestingutils.UnknownDutyType default: - panic("unknown role type") + return nil, errors.New("unknown role type") } + return r, err } // @@ -262,81 +276,101 @@ var baseRunner = func( // return msgs //} -var baseRunnerWithShareMap = func( +var baseRunnerWithShareMap = func(logger *zap.Logger, role spectypes.RunnerRole, shareMap map[phase0.ValidatorIndex]*spectypes.Share) runner.Runner { + runner, err := ConstructBaseRunnerWithShareMap(logger, role, shareMap) + if err != nil { + panic(err) + } + return runner +} + +var ConstructBaseRunnerWithShareMap = func( logger *zap.Logger, role spectypes.RunnerRole, shareMap map[phase0.ValidatorIndex]*spectypes.Share, -) runner.Runner { +) (runner.Runner, error) { - var keySetInstance *spectestingutils.TestKeySet - var shareInstance *spectypes.Share - for _, share := range shareMap { - keySetInstance = spectestingutils.KeySetForShare(share) - break - } + var identifier spectypes.MessageID + var net *spectestingutils.TestingNetwork + var opSigner *spectypes.OperatorSigner + var valCheck specqbft.ProposedValueCheckF + var contr *controller.Controller - sharePubKeys := make([]spectypes.ShareValidatorPK, 0) - for _, share := range shareMap { - sharePubKeys = append(sharePubKeys, share.SharePubKey) - } + km := spectestingutils.NewTestingKeyManager() - // Identifier - var ownerID []byte - if role == spectypes.RoleCommittee { - committee := make([]uint64, 0) - for _, op := range keySetInstance.Committee() { - committee = append(committee, op.Signer) + if len(shareMap) > 0 { + var keySetInstance *spectestingutils.TestKeySet + var shareInstance *spectypes.Share + for _, share := range shareMap { + keySetInstance = spectestingutils.KeySetForShare(share) + shareInstance = spectestingutils.TestingShare(keySetInstance, share.ValidatorIndex) + break } - committeeID := spectypes.GetCommitteeID(committee) - ownerID = bytes.Clone(committeeID[:]) - } else { - ownerID = spectestingutils.TestingValidatorPubKey[:] - } - identifier := spectypes.NewMsgID(spectestingutils.TestingSSVDomainType, ownerID, role) - net := spectestingutils.NewTestingNetwork(1, keySetInstance.OperatorKeys[1]) + sharePubKeys := make([]spectypes.ShareValidatorPK, 0) + for _, share := range shareMap { + sharePubKeys = append(sharePubKeys, share.SharePubKey) + } - km := spectestingutils.NewTestingKeyManager() - committeeMember := spectestingutils.TestingCommitteeMember(keySetInstance) - opSigner := spectestingutils.NewOperatorSigner(keySetInstance, committeeMember.OperatorID) + // Identifier + var ownerID []byte + if role == spectypes.RoleCommittee { + committee := make([]uint64, 0) + for _, op := range keySetInstance.Committee() { + committee = append(committee, op.Signer) + } + committeeID := spectypes.GetCommitteeID(committee) + ownerID = bytes.Clone(committeeID[:]) + } else { + ownerID = spectestingutils.TestingValidatorPubKey[:] + } + identifier = spectypes.NewMsgID(spectestingutils.TestingSSVDomainType, ownerID, role) - var valCheck specqbft.ProposedValueCheckF - switch role { - case spectypes.RoleCommittee: - valCheck = ssv.BeaconVoteValueCheckF(km, spectestingutils.TestingDutySlot, - sharePubKeys, spectestingutils.TestingDutyEpoch) - case spectypes.RoleProposer: - valCheck = ssv.ProposerValueCheckF(km, spectypes.BeaconTestNetwork, - shareInstance.ValidatorPubKey, shareInstance.ValidatorIndex, shareInstance.SharePubKey) - case spectypes.RoleAggregator: - valCheck = ssv.AggregatorValueCheckF(km, spectypes.BeaconTestNetwork, - shareInstance.ValidatorPubKey, shareInstance.ValidatorIndex) - case spectypes.RoleSyncCommitteeContribution: - valCheck = ssv.SyncCommitteeContributionValueCheckF(km, spectypes.BeaconTestNetwork, - shareInstance.ValidatorPubKey, shareInstance.ValidatorIndex) - default: - valCheck = nil - } + net = spectestingutils.NewTestingNetwork(1, keySetInstance.OperatorKeys[1]) - config := testing.TestingConfig(logger, keySetInstance, convert.RunnerRole(identifier.GetRoleType())) - config.ValueCheckF = valCheck - config.ProposerF = func(state *specqbft.State, round specqbft.Round) spectypes.OperatorID { - return 1 - } - config.Network = net - config.Storage = testing.TestingStores(logger).Get(convert.RunnerRole(role)) + km = spectestingutils.NewTestingKeyManager() + committeeMember := spectestingutils.TestingCommitteeMember(keySetInstance) + opSigner = spectestingutils.NewOperatorSigner(keySetInstance, committeeMember.OperatorID) - contr := testing.NewTestingQBFTController( - spectestingutils.Testing4SharesSet(), - identifier[:], - committeeMember, - config, - false, - ) + switch role { + case spectypes.RoleCommittee: + valCheck = ssv.BeaconVoteValueCheckF(km, spectestingutils.TestingDutySlot, + sharePubKeys, spectestingutils.TestingDutyEpoch) + case spectypes.RoleProposer: + valCheck = ssv.ProposerValueCheckF(km, spectypes.BeaconTestNetwork, + shareInstance.ValidatorPubKey, shareInstance.ValidatorIndex, shareInstance.SharePubKey) + case spectypes.RoleAggregator: + valCheck = ssv.AggregatorValueCheckF(km, spectypes.BeaconTestNetwork, + shareInstance.ValidatorPubKey, shareInstance.ValidatorIndex) + case spectypes.RoleSyncCommitteeContribution: + valCheck = ssv.SyncCommitteeContributionValueCheckF(km, spectypes.BeaconTestNetwork, + shareInstance.ValidatorPubKey, shareInstance.ValidatorIndex) + default: + valCheck = nil + } + config := testing.TestingConfig(logger, keySetInstance, convert.RunnerRole(identifier.GetRoleType())) + config.ValueCheckF = valCheck + config.ProposerF = func(state *specqbft.State, round specqbft.Round) spectypes.OperatorID { + return 1 + } + config.Network = net + config.Storage = testing.TestingStores(logger).Get(convert.RunnerRole(role)) + + contr = testing.NewTestingQBFTController( + spectestingutils.Testing4SharesSet(), + identifier[:], + committeeMember, + config, + false, + ) + } + + var r runner.Runner + var err error switch role { case spectypes.RoleCommittee: - return runner.NewCommitteeRunner( + r, err = runner.NewCommitteeRunner( networkconfig.TestNetwork, shareMap, contr, @@ -347,7 +381,7 @@ var baseRunnerWithShareMap = func( valCheck, ) case spectypes.RoleAggregator: - return runner.NewAggregatorRunner( + r, err = runner.NewAggregatorRunner( networkconfig.TestNetwork.AlanDomainType, spectypes.BeaconTestNetwork, shareMap, @@ -360,7 +394,7 @@ var baseRunnerWithShareMap = func( TestingHighestDecidedSlot, ) case spectypes.RoleProposer: - return runner.NewProposerRunner( + r, err = runner.NewProposerRunner( networkconfig.TestNetwork.AlanDomainType, spectypes.BeaconTestNetwork, shareMap, @@ -374,7 +408,7 @@ var baseRunnerWithShareMap = func( []byte("graffiti"), ) case spectypes.RoleSyncCommitteeContribution: - return runner.NewSyncCommitteeAggregatorRunner( + r, err = runner.NewSyncCommitteeAggregatorRunner( networkconfig.TestNetwork.AlanDomainType, spectypes.BeaconTestNetwork, shareMap, @@ -387,7 +421,7 @@ var baseRunnerWithShareMap = func( TestingHighestDecidedSlot, ) case spectypes.RoleValidatorRegistration: - return runner.NewValidatorRegistrationRunner( + r, err = runner.NewValidatorRegistrationRunner( networkconfig.TestNetwork.AlanDomainType, spectypes.BeaconTestNetwork, shareMap, @@ -397,7 +431,7 @@ var baseRunnerWithShareMap = func( opSigner, ) case spectypes.RoleVoluntaryExit: - return runner.NewVoluntaryExitRunner( + r, err = runner.NewVoluntaryExitRunner( networkconfig.TestNetwork.AlanDomainType, spectypes.BeaconTestNetwork, shareMap, @@ -407,7 +441,7 @@ var baseRunnerWithShareMap = func( opSigner, ) case spectestingutils.UnknownDutyType: - ret := runner.NewCommitteeRunner( + r, err = runner.NewCommitteeRunner( networkconfig.TestNetwork, shareMap, contr, @@ -417,9 +451,11 @@ var baseRunnerWithShareMap = func( opSigner, valCheck, ) - ret.(*runner.CommitteeRunner).BaseRunner.RunnerRoleType = spectestingutils.UnknownDutyType - return ret + if r != nil { + r.(*runner.CommitteeRunner).BaseRunner.RunnerRoleType = spectestingutils.UnknownDutyType + } default: - panic("unknown role type") + return nil, errors.New("unknown role type") } + return r, err } diff --git a/protocol/v2/ssv/validator/committee.go b/protocol/v2/ssv/validator/committee.go index dfa14cab92..a1512b5717 100644 --- a/protocol/v2/ssv/validator/committee.go +++ b/protocol/v2/ssv/validator/committee.go @@ -27,7 +27,7 @@ var ( runnerExpirySlots = phase0.Slot(34) ) -type CommitteeRunnerFunc func(slot phase0.Slot, shares map[phase0.ValidatorIndex]*spectypes.Share, slashableValidators []spectypes.ShareValidatorPK) *runner.CommitteeRunner +type CommitteeRunnerFunc func(slot phase0.Slot, shares map[phase0.ValidatorIndex]*spectypes.Share, attestingValidators []spectypes.ShareValidatorPK) (*runner.CommitteeRunner, error) type Committee struct { logger *zap.Logger @@ -38,15 +38,11 @@ type Committee struct { BeaconNetwork spectypes.BeaconNetwork Storage *storage.QBFTStores - Queues map[phase0.Slot]queueContainer - - //runnersMtx sync.RWMutex + Queues map[phase0.Slot]queueContainer Runners map[phase0.Slot]*runner.CommitteeRunner + Shares map[phase0.ValidatorIndex]*spectypes.Share - //sharesMtx sync.RWMutex - Shares map[phase0.ValidatorIndex]*spectypes.Share - - Operator *spectypes.CommitteeMember + CommitteeMember *spectypes.CommitteeMember CreateRunnerFn CommitteeRunnerFunc HighestAttestingSlotMap map[spectypes.ValidatorPK]phase0.Slot @@ -58,21 +54,23 @@ func NewCommittee( cancel context.CancelFunc, logger *zap.Logger, beaconNetwork spectypes.BeaconNetwork, - operator *spectypes.CommitteeMember, + committeeMember *spectypes.CommitteeMember, createRunnerFn CommitteeRunnerFunc, - // share map[phase0.ValidatorIndex]*spectypes.Share, // TODO Shouldn't we pass the shares map here the same way we do in spec? + shares map[phase0.ValidatorIndex]*spectypes.Share, ) *Committee { + if shares == nil { + shares = make(map[phase0.ValidatorIndex]*spectypes.Share) + } return &Committee{ - logger: logger, - BeaconNetwork: beaconNetwork, - ctx: ctx, - cancel: cancel, - Queues: make(map[phase0.Slot]queueContainer), - Runners: make(map[phase0.Slot]*runner.CommitteeRunner), - Shares: make(map[phase0.ValidatorIndex]*spectypes.Share), - //Shares: share, + logger: logger, + BeaconNetwork: beaconNetwork, + ctx: ctx, + cancel: cancel, + Queues: make(map[phase0.Slot]queueContainer), + Runners: make(map[phase0.Slot]*runner.CommitteeRunner), + Shares: shares, HighestAttestingSlotMap: make(map[spectypes.ValidatorPK]phase0.Slot), - Operator: operator, + CommitteeMember: committeeMember, CreateRunnerFn: createRunnerFn, } } @@ -131,86 +129,67 @@ func (c *Committee) StartDuty(logger *zap.Logger, duty *spectypes.CommitteeDuty) return errors.New(fmt.Sprintf("CommitteeRunner for slot %d already exists", duty.Slot)) } - slashableValidators := make([]spectypes.ShareValidatorPK, 0, len(duty.ValidatorDuties)) - //validatorShares := make(map[phase0.ValidatorIndex]*spectypes.Share, len(duty.ValidatorDuties)) - //toRemove := make([]int, 0) - // Remove beacon duties that don't have a share - //for i, bd := range duty.ValidatorDuties { - // share, ok := c.Shares[bd.ValidatorIndex] - // if !ok { - // toRemove = append(toRemove, i) - // continue - // } - // if bd.Type == spectypes.BNRoleAttester { - // slashableValidators = append(slashableValidators, share.SharePubKey) - // } - // validatorShares[bd.ValidatorIndex] = share - //} - - // TODO bring this back when https://github.com/ssvlabs/ssv-spec/pull/467 is merged and spec is aligned - //// Remove beacon duties that don't have a share - //if len(toRemove) > 0 { - // newDuties, err := removeIndices(duty.BeaconDuties, toRemove) - // if err != nil { - // logger.Warn("could not remove beacon duties", zap.Error(err), zap.Ints("indices", toRemove)) - // } else { - // duty.ValidatorDuties = newDuties - // } - //} - // - //if len(duty.BeaconDuties) == 0 { - // return errors.New("CommitteeDuty has no valid beacon duties") - //} - - // TODO REMOVE this after https://github.com/ssvlabs/ssv-spec/pull/467 is merged and we are aligned to the spec - // and pas validatorShares instead of sharesCopy the runner - // --> - for _, bd := range duty.ValidatorDuties { - share, ok := c.Shares[bd.ValidatorIndex] - if !ok { + // Filter out Beacon duties for which we don't have a share. + filteredDuty := &spectypes.CommitteeDuty{ + Slot: duty.Slot, + ValidatorDuties: make([]*spectypes.ValidatorDuty, 0, len(duty.ValidatorDuties)), + } + shares := make(map[phase0.ValidatorIndex]*spectypes.Share, len(duty.ValidatorDuties)) + attesters := make([]spectypes.ShareValidatorPK, 0, len(duty.ValidatorDuties)) + for _, beaconDuty := range duty.ValidatorDuties { + share, exists := c.Shares[beaconDuty.ValidatorIndex] + if !exists { + logger.Debug("no share for validator duty", + fields.BeaconRole(beaconDuty.Type), + zap.Uint64("validator_index", uint64(beaconDuty.ValidatorIndex))) continue } - if bd.Type == spectypes.BNRoleAttester { - slashableValidators = append(slashableValidators, share.SharePubKey) + shares[beaconDuty.ValidatorIndex] = share + filteredDuty.ValidatorDuties = append(filteredDuty.ValidatorDuties, beaconDuty) + + if beaconDuty.Type == spectypes.BNRoleAttester { + attesters = append(attesters, share.SharePubKey) } } - var sharesCopy = make(map[phase0.ValidatorIndex]*spectypes.Share, len(c.Shares)) - for k, v := range c.Shares { - sharesCopy[k] = v + if len(shares) == 0 { + return errors.New("no shares for duty's validators") } - // <-- - r := c.CreateRunnerFn(duty.Slot, sharesCopy, slashableValidators) + duty = filteredDuty + + runner, err := c.CreateRunnerFn(duty.Slot, shares, attesters) + if err != nil { + return errors.Wrap(err, "could not create CommitteeRunner") + } + // Set timeout function. - r.GetBaseRunner().TimeoutF = c.onTimeout - c.Runners[duty.Slot] = r - if _, ok := c.Queues[duty.Slot]; !ok { + runner.GetBaseRunner().TimeoutF = c.onTimeout + c.Runners[duty.Slot] = runner + _, queueExists := c.Queues[duty.Slot] + if !queueExists { c.Queues[duty.Slot] = queueContainer{ Q: queue.WithMetrics(queue.New(1000), nil), // TODO alan: get queue opts from options queueState: &queue.State{ HasRunningInstance: false, Height: qbft.Height(duty.Slot), Slot: duty.Slot, - //Quorum: options.SSVShare.Share,// TODO + Quorum: c.CommitteeMember.GetQuorum(), }, } - } - pruneLogger := c.logger.With(zap.Uint64("current_slot", uint64(duty.Slot))) - // Prunes all expired committee runners, when new runner is created + pruneLogger := c.logger.With(zap.Uint64("current_slot", uint64(duty.Slot))) if err := c.unsafePruneExpiredRunners(pruneLogger, duty.Slot); err != nil { pruneLogger.Error("couldn't prune expired committee runners", zap.Error(err)) } logger.Info("ℹ️ starting duty processing") - return c.Runners[duty.Slot].StartNewDuty(logger, duty, c.Operator.GetQuorum()) + return runner.StartNewDuty(logger, duty, c.CommitteeMember.GetQuorum()) } -// NOT threadsafe func (c *Committee) stopValidator(logger *zap.Logger, validator spectypes.ValidatorPK) { for slot, runner := range c.Runners { - opIds := types.OperatorIDsFromOperators(c.Operator.Committee) + opIds := types.OperatorIDsFromOperators(c.CommitteeMember.Committee) epoch := c.BeaconNetwork.EstimatedEpochAtSlot(slot) committeeDutyID := fields.FormatCommitteeDutyID(opIds, epoch, slot) @@ -218,40 +197,23 @@ func (c *Committee) stopValidator(logger *zap.Logger, validator spectypes.Valida fields.DutyID(committeeDutyID), fields.Slot(slot), fields.Validator(validator[:]), ) + // TODO: after StopDuty is implemented, if it's not a super fast operation, + // then we maybe shouldn't do it under a lock. runner.StopDuty(validator) } } func (c *Committee) PushToQueue(slot phase0.Slot, dec *queue.SSVMessage) { c.mtx.RLock() - defer c.mtx.RUnlock() - if pushed := c.Queues[slot].Q.TryPush(dec); !pushed { - c.logger.Warn("dropping ExecuteDuty message because the queue is full") - } -} - -func removeIndices(s []*spectypes.ValidatorDuty, indicesToRemove []int) ([]*spectypes.ValidatorDuty, error) { - // Create a set to check for duplicate and invalid indices - uniqueIndices := make(map[int]struct{}, len(indicesToRemove)) - for _, id := range indicesToRemove { - if id < 0 || id >= len(s) { - return s, fmt.Errorf("index %d out of range of slice with length %d", id, len(s)) - } - if _, exists := uniqueIndices[id]; exists { - return s, fmt.Errorf("duplicate index %d in %v", id, indicesToRemove) - } - uniqueIndices[id] = struct{}{} + queue, exists := c.Queues[slot] + c.mtx.RUnlock() + if !exists { + c.logger.Warn("cannot push to non-existing queue", zap.Uint64("slot", uint64(slot))) + return } - - // Create a result slice excluding marked elements - result := make([]*spectypes.ValidatorDuty, 0, len(s)-len(indicesToRemove)) - for i, item := range s { - if _, found := uniqueIndices[i]; !found { - result = append(result, item) - } + if pushed := queue.Q.TryPush(dec); !pushed { + c.logger.Warn("dropping ExecuteDuty message because the queue is full") } - - return result, nil } // ProcessMessage processes Network Message of all types @@ -263,7 +225,7 @@ func (c *Committee) ProcessMessage(logger *zap.Logger, msg *queue.SSVMessage) er } // Verify SignedSSVMessage's signature - if err := spectypes.Verify(msg.SignedSSVMessage, c.Operator.Committee); err != nil { + if err := spectypes.Verify(msg.SignedSSVMessage, c.CommitteeMember.Committee); err != nil { return errors.Wrap(err, "SignedSSVMessage has an invalid signature") } @@ -329,7 +291,7 @@ func (c *Committee) unsafePruneExpiredRunners(logger *zap.Logger, currentSlot ph for slot := range c.Runners { if slot <= minValidSlot { - opIds := types.OperatorIDsFromOperators(c.Operator.Committee) + opIds := types.OperatorIDsFromOperators(c.CommitteeMember.Committee) epoch := c.BeaconNetwork.EstimatedEpochAtSlot(slot) committeeDutyID := fields.FormatCommitteeDutyID(opIds, epoch, slot) logger = logger.With(fields.DutyID(committeeDutyID)) @@ -374,7 +336,7 @@ func (c *Committee) MarshalJSON() ([]byte, error) { // Create object and marshal alias := &CommitteeAlias{ Runners: c.Runners, - CommitteeMember: c.Operator, + CommitteeMember: c.CommitteeMember, Share: c.Shares, } @@ -385,9 +347,9 @@ func (c *Committee) MarshalJSON() ([]byte, error) { func (c *Committee) UnmarshalJSON(data []byte) error { type CommitteeAlias struct { - Runners map[phase0.Slot]*runner.CommitteeRunner - Operator *spectypes.CommitteeMember - Shares map[phase0.ValidatorIndex]*spectypes.Share + Runners map[phase0.Slot]*runner.CommitteeRunner + CommitteeMember *spectypes.CommitteeMember + Shares map[phase0.ValidatorIndex]*spectypes.Share } // Unmarshal the JSON data into the auxiliary struct @@ -398,14 +360,14 @@ func (c *Committee) UnmarshalJSON(data []byte) error { // Assign fields c.Runners = aux.Runners - c.Operator = aux.Operator + c.CommitteeMember = aux.CommitteeMember c.Shares = aux.Shares return nil } func (c *Committee) validateMessage(msg *spectypes.SSVMessage) error { - if !(c.Operator.CommitteeID.MessageIDBelongs(msg.GetID())) { + if !(c.CommitteeMember.CommitteeID.MessageIDBelongs(msg.GetID())) { return errors.New("msg ID doesn't match committee ID") } diff --git a/protocol/v2/ssv/validator/utils_test.go b/protocol/v2/ssv/validator/utils_test.go deleted file mode 100644 index b5e65bbf9d..0000000000 --- a/protocol/v2/ssv/validator/utils_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package validator - -import ( - "testing" - - "github.com/ssvlabs/ssv-spec/types" - "github.com/stretchr/testify/require" -) - -func TestRemoveIndices(t *testing.T) { - type TestInputType struct { - duties []*types.ValidatorDuty - indicesToRemove []int - } - type TestCase struct { - input TestInputType - output []int - expectedErrorText string - } - - testCases := []TestCase{ - { - input: TestInputType{ - duties: []*types.ValidatorDuty{ - {Slot: 0}, {Slot: 1}, {Slot: 2}, {Slot: 3}, {Slot: 4}, - }, - indicesToRemove: []int{0, 3, 1}, - }, - output: []int{2, 4}, - expectedErrorText: "", - }, - { - input: TestInputType{ - duties: []*types.ValidatorDuty{ - {Slot: 1}, - }, - indicesToRemove: []int{0}, - }, - output: []int{}, - expectedErrorText: "", - }, - { - input: TestInputType{ - duties: []*types.ValidatorDuty{ - {Slot: 0}, {Slot: 1}, {Slot: 2}, {Slot: 3}, - }, - indicesToRemove: []int{0, 3}, - }, - output: []int{1, 2}, - expectedErrorText: "", - }, - { - input: TestInputType{ - duties: []*types.ValidatorDuty{ - {Slot: 0}, {Slot: 1}, {Slot: 2}, {Slot: 3}, - }, - indicesToRemove: []int{0, 3, 3, 3}, - }, - output: []int{}, - expectedErrorText: "duplicate index 3 in [0 3 3 3]", - }, - { - input: TestInputType{ - duties: []*types.ValidatorDuty{ - {Slot: 0}, {Slot: 1}, {Slot: 2}, {Slot: 3}, - }, - indicesToRemove: []int{0, 23, 42}, - }, - output: []int{}, - expectedErrorText: "index 23 out of range of slice with length 4", - }, - } - - for _, tc := range testCases { - filteredDuties, err := removeIndices(tc.input.duties, tc.input.indicesToRemove) - - if tc.expectedErrorText != "" { - require.Equal(t, tc.expectedErrorText, err.Error()) - continue - } - - slotsLeft := make([]int, 0) - for _, v := range filteredDuties { - slotsLeft = append(slotsLeft, int(v.Slot)) - } - - require.Len(t, slotsLeft, len(tc.output)) - require.ElementsMatch(t, tc.output, slotsLeft) - } -} diff --git a/protocol/v2/ssv/validator/validator.go b/protocol/v2/ssv/validator/validator.go index c39c3c30f6..3189d9adc6 100644 --- a/protocol/v2/ssv/validator/validator.go +++ b/protocol/v2/ssv/validator/validator.go @@ -99,19 +99,21 @@ func NewValidator(pctx context.Context, cancel func(), options Options) *Validat } // StartDuty starts a duty for the validator -func (v *Validator) StartDuty(logger *zap.Logger, iduty spectypes.Duty) error { - - duty := iduty.(*spectypes.ValidatorDuty) // TODO: err handling +func (v *Validator) StartDuty(logger *zap.Logger, duty spectypes.Duty) error { + vDuty, ok := duty.(*spectypes.ValidatorDuty) + if !ok { + return fmt.Errorf("expected ValidatorDuty, got %T", duty) + } - dutyRunner := v.DutyRunners[spectypes.MapDutyToRunnerRole(duty.Type)] + dutyRunner := v.DutyRunners[spectypes.MapDutyToRunnerRole(vDuty.Type)] if dutyRunner == nil { - return errors.Errorf("no runner for duty type %s", duty.Type.String()) + return errors.Errorf("no runner for duty type %s", vDuty.Type.String()) } // Log with duty ID. baseRunner := dutyRunner.GetBaseRunner() - v.dutyIDs.Set(spectypes.MapDutyToRunnerRole(duty.Type), fields.FormatDutyID(baseRunner.BeaconNetwork.EstimatedEpochAtSlot(duty.Slot), duty.Slot, duty.Type.String(), duty.ValidatorIndex)) - logger = trySetDutyID(logger, v.dutyIDs, spectypes.MapDutyToRunnerRole(duty.Type)) + v.dutyIDs.Set(spectypes.MapDutyToRunnerRole(vDuty.Type), fields.FormatDutyID(baseRunner.BeaconNetwork.EstimatedEpochAtSlot(vDuty.Slot), vDuty.Slot, vDuty.Type.String(), vDuty.ValidatorIndex)) + logger = trySetDutyID(logger, v.dutyIDs, spectypes.MapDutyToRunnerRole(vDuty.Type)) // Log with height. if baseRunner.QBFTController != nil { @@ -120,7 +122,7 @@ func (v *Validator) StartDuty(logger *zap.Logger, iduty spectypes.Duty) error { logger.Info("ℹ️ starting duty processing") - return dutyRunner.StartNewDuty(logger, duty, v.Operator.GetQuorum()) + return dutyRunner.StartNewDuty(logger, vDuty, v.Operator.GetQuorum()) } // ProcessMessage processes Network Message of all types @@ -157,15 +159,11 @@ func (v *Validator) ProcessMessage(logger *zap.Logger, msg *queue.SSVMessage) er if !ok { return errors.New("could not decode consensus message from network message") } - logger = v.loggerForDuty(logger, spectypes.BeaconRole(messageID.GetRoleType()), phase0.Slot(qbftMsg.Height)) - - // Check signer consistency - if !msg.SignedSSVMessage.CommonSigners([]spectypes.OperatorID{msg.SignedSSVMessage.OperatorIDs[0]}) { // todo: array check - return errors.New("SignedSSVMessage's signer not consistent with SignedMessage's signers") + if err := qbftMsg.Validate(); err != nil { + return errors.Wrap(err, "invalid qbft Message") } - + logger = v.loggerForDuty(logger, spectypes.BeaconRole(messageID.GetRoleType()), phase0.Slot(qbftMsg.Height)) logger = logger.With(fields.Height(qbftMsg.Height)) - // Process return dutyRunner.ProcessConsensus(logger, msg.SignedSSVMessage) case spectypes.SSVPartialSignatureMsgType: logger = trySetDutyID(logger, v.dutyIDs, messageID.GetRoleType()) @@ -183,10 +181,6 @@ func (v *Validator) ProcessMessage(logger *zap.Logger, msg *queue.SSVMessage) er if err := signedMsg.ValidateForSigner(msg.SignedSSVMessage.OperatorIDs[0]); err != nil { return errors.Wrap(err, "invalid PartialSignatureMessages") } - // Check signer consistency - if signedMsg.Messages[0].Signer != msg.SignedSSVMessage.OperatorIDs[0] { - return errors.New("SignedSSVMessage's signer not consistent with SignedPartialSignatureMessage's signer") - } if signedMsg.Type == spectypes.PostConsensusPartialSig { return dutyRunner.ProcessPostConsensus(logger, signedMsg) diff --git a/scripts/spec-alignment/differ.config.yaml b/scripts/spec-alignment/differ.config.yaml index fc535738c4..0cb8f86fc7 100644 --- a/scripts/spec-alignment/differ.config.yaml +++ b/scripts/spec-alignment/differ.config.yaml @@ -1,4 +1,4 @@ -ApprovedChanges: ["50e5bb7eda99594e", "870a3a66aeccd737","4e22a08543b079b","56ceb03cd44ff702","188adfe8914e04c1","2438f9c5b82b69a3","1a716ee3bdb3170","90b166f78390af18","68219b82a1d9d829","c4c4caa5d0938b85","dfe99ce1d27b6cb1","35f5dab1f128d193","9a3973b64d7e8932","f33f07301a770d03","3e9e0dddfad3b302","d4fef6512374c1f5","b49f54cb45787e4b","59b2375130aef5df","f094cd0460432170","8e51881e527dd603","a7d6d58d9fa06379","1d124224ca4d0fe3","39ea06bfd1477d2d","7e2550bab51f22b2","87ebd29bd49fc52f","ef39dd5223e0d080","fe14e7f0503ea188","6146023d4d5708a2","aebb8e4348b6d667","973a2e6704dbf3","fb4cac598a68c592","257c7eb81d6eb245","2a8e94fe037e13fd","5e7eb878de54eec6","960a9c64cd4ec93c","57dfd255520bd849","ec333ff8a708db69","1cc1ff39ad91ee69","5714652b88e2d44f","7a53b3b037c56325","8c02ef1964464c30","19a268910a20da3d","af6e01ed565029f3","318b5169ac4dabb6","372c6b332e8ba699","c0d8a364c0db855a","4287381be4fb1841","b1614afc1da7794f","c214975412f3fd7","8bbf7eba3fa0cf7e","8e4ec8debe331b36","7a671d8fcefc3793","e2b0e9c6454c1c08","6707ecfefa5fec21","d5a7389d730464f1","8dfae3b3223d2de0","a81c092c985de728","968df5082c727ed6","9e53c73ee60b1cc2","9d265e99dd31d4f5","a34619e078d2e42f","17e8cec4f0625d53","e913f373aa88f333","cfc1e05c372d88dc","e5de6901d78b8833","57c1885b43dd8d19","e8a49856a5edd893","22ea21d10a2f861c","954e4fce01631c4e","108b9575f7c1d4bc","1f8d076449068f64","5a7ad98296703f6","159536003eeddac8","8ca8f82e67ddd3dd","16ebe47404323cc1","48bfe5cf1e578b47","dd83182b693a7216","308d21d9830f7047","6dde03147e012b1a","730c3e5e59393b7d","5b44a4b425ecc397","df5debc50ec8babc","92a41554b2910bb8","c36c680554dde59f","447feaa5cdc1a010","fda90c61f44cb149","cdbb4930eced584c","274336ec1127e6c0","2a496f5b3ad542d2","6b395912dde33b0e","cac56ec14994216b","8850900b5d9bcc65","15e7706486c6359e","cc22f28953b787ea","3bad6ae11596a574","8f84422a240d889c","5b265432dfbbaac7","43794bf5953db193","7975821460ebe1e7","173c505e12aabb8f","47ee0d148148a56f","8cc38593ebe049b6","bda3aec7157b095a","248712911696a851","f4d9c910f1dbaef7","1a2146fcad37acb8","b0b146f9bdab64b6","edfd442b4d725fbb","122f053573538a32","d720d714a20833e1", "f9c984e71b685f9b","8c6b4fee5a4c13ce","c0a8d2019a2c30d5", "717bef26105c733f","2f70630c27062353","2f70337ba7566a69","dd607a44e1341e6b","5210501625ac3de5","f786bf475b5085aa","18a66ed6e613d9c1","e8943e7741f6843d","276a489bd5a00032","ba3bba59f10bf6b","3c50ce0c8089d871","89ee72f6c610ab84","c92b95a85da2cb11","927ea6aed3f98f20","9338904026a0ce37","9683cfa19dc544a3","4d3fa2b8dfcb5f5b", "f19e9a2b295bcfb3", "b10199b2de6f03b8", "1afc17e358f9ca79","4b58762c0b433442","d293ec1bc61bb707","3e88c3b49d093605","4890ff80c88cc41d","5227ff3a225dd20d","81a60407a3a0ba80"] +ApprovedChanges: ["50e5bb7eda99594e", "870a3a66aeccd737","4e22a08543b079b","56ceb03cd44ff702","188adfe8914e04c1","2438f9c5b82b69a3","1a716ee3bdb3170","90b166f78390af18","68219b82a1d9d829","c4c4caa5d0938b85","dfe99ce1d27b6cb1","35f5dab1f128d193","9a3973b64d7e8932","f33f07301a770d03","3e9e0dddfad3b302","d4fef6512374c1f5","b49f54cb45787e4b","59b2375130aef5df","f094cd0460432170","8e51881e527dd603","a7d6d58d9fa06379","1d124224ca4d0fe3","39ea06bfd1477d2d","7e2550bab51f22b2","87ebd29bd49fc52f","ef39dd5223e0d080","fe14e7f0503ea188","6146023d4d5708a2","aebb8e4348b6d667","973a2e6704dbf3","fb4cac598a68c592","257c7eb81d6eb245","2a8e94fe037e13fd","5e7eb878de54eec6","960a9c64cd4ec93c","57dfd255520bd849","ec333ff8a708db69","1cc1ff39ad91ee69","5714652b88e2d44f","7a53b3b037c56325","8c02ef1964464c30","19a268910a20da3d","af6e01ed565029f3","318b5169ac4dabb6","372c6b332e8ba699","c0d8a364c0db855a","4287381be4fb1841","b1614afc1da7794f","c214975412f3fd7","8bbf7eba3fa0cf7e","8e4ec8debe331b36","7a671d8fcefc3793","e2b0e9c6454c1c08","6707ecfefa5fec21","d5a7389d730464f1","8dfae3b3223d2de0","a81c092c985de728","968df5082c727ed6","9e53c73ee60b1cc2","9d265e99dd31d4f5","a34619e078d2e42f","17e8cec4f0625d53","e913f373aa88f333","cfc1e05c372d88dc","e5de6901d78b8833","57c1885b43dd8d19","e8a49856a5edd893","22ea21d10a2f861c","954e4fce01631c4e","108b9575f7c1d4bc","1f8d076449068f64","5a7ad98296703f6","159536003eeddac8","8ca8f82e67ddd3dd","16ebe47404323cc1","48bfe5cf1e578b47","dd83182b693a7216","308d21d9830f7047","6dde03147e012b1a","730c3e5e59393b7d","5b44a4b425ecc397","df5debc50ec8babc","92a41554b2910bb8","c36c680554dde59f","447feaa5cdc1a010","fda90c61f44cb149","cdbb4930eced584c","274336ec1127e6c0","2a496f5b3ad542d2","6b395912dde33b0e","cac56ec14994216b","8850900b5d9bcc65","15e7706486c6359e","cc22f28953b787ea","3bad6ae11596a574","8f84422a240d889c","5b265432dfbbaac7","43794bf5953db193","7975821460ebe1e7","173c505e12aabb8f","47ee0d148148a56f","8cc38593ebe049b6","bda3aec7157b095a","248712911696a851","f4d9c910f1dbaef7","1a2146fcad37acb8","b0b146f9bdab64b6","edfd442b4d725fbb","122f053573538a32","d720d714a20833e1", "f9c984e71b685f9b","8c6b4fee5a4c13ce","c0a8d2019a2c30d5", "717bef26105c733f","2f70630c27062353","2f70337ba7566a69","dd607a44e1341e6b","5210501625ac3de5","f786bf475b5085aa","18a66ed6e613d9c1","e8943e7741f6843d","276a489bd5a00032","ba3bba59f10bf6b","3c50ce0c8089d871","89ee72f6c610ab84","c92b95a85da2cb11","927ea6aed3f98f20","9338904026a0ce37","9683cfa19dc544a3","4d3fa2b8dfcb5f5b", "f19e9a2b295bcfb3", "b10199b2de6f03b8", "1afc17e358f9ca79","4b58762c0b433442","d293ec1bc61bb707","3e88c3b49d093605","4890ff80c88cc41d","5227ff3a225dd20d","81a60407a3a0ba80","db2ad807eb66254a","d308bd7c553ccdcf","bdaf172971637cbe","6ade9202843071fe","2fe8e14083997744","19c9a5362d1e1d3a","5956f803d239f178","92c55a4548a8b760","9a95524213bccfff","2f51a7338b86c229","e96966a281d74505","3ee479b9cbbc3a1d","82b392ba39c6c594","b9d2404e5c570019","24f528d85fb021f2","fe9609a785305d81","b0934079dcd986cc","a9c520a19b26049","d19a9403fd732d94","74a928f5dcb2fdd9","cbbfdb5e68cdac80","10e39d2ceda91f34","f99a004cf6697875","8fa5e8ebf7d223ec","6c80c145ba705243","fbabbc90d0b4178a"] IgnoredIdentifiers: - logger @@ -15,6 +15,7 @@ Comparisons: - Packages: Left: - ./protocol/v2/ssv + - ./protocol/v2/ssv/validator - ./protocol/v2/ssv/runner - ./protocol/v2/types Right: From 15dd0f89e2e24d3c3d5e78fe616adaf3fea61846 Mon Sep 17 00:00:00 2001 From: Anton Korpusenko Date: Mon, 16 Sep 2024 13:13:08 +0300 Subject: [PATCH 08/35] fix: Erroneous Threshold Logic in Key Splitting Mechanism (#1737) * fixed thereshold computation to use 3f+1 formula * use quorum instead of f for threshold --------- Co-authored-by: y0sher --- cli/export_keys_from_mnemonic.go | 2 +- cli/threshold.go | 17 ++++++++++++----- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/cli/export_keys_from_mnemonic.go b/cli/export_keys_from_mnemonic.go index 0c49b61ede..36811f00b9 100644 --- a/cli/export_keys_from_mnemonic.go +++ b/cli/export_keys_from_mnemonic.go @@ -17,7 +17,7 @@ import ( // exportKeysCmd is the command to export private/public keys based on given mnemonic var exportKeysCmd = &cobra.Command{ Use: "export-keys", - Short: "exports private/public keys based on given mnemonic", + Short: "exports private/public keys based on given mnemonic. For testing usage only", Run: func(cmd *cobra.Command, args []string) { if err := logging.SetGlobalLogger("dpanic", "capital", "console", nil); err != nil { log.Fatal(err) diff --git a/cli/threshold.go b/cli/threshold.go index c5b37f9aae..254898bd5b 100644 --- a/cli/threshold.go +++ b/cli/threshold.go @@ -6,17 +6,19 @@ import ( "github.com/herumi/bls-eth-go-binary/bls" "github.com/spf13/cobra" - "github.com/ssvlabs/ssv/logging" "go.uber.org/zap" - "github.com/ssvlabs/ssv/cli/flags" + "github.com/ssvlabs/ssv/logging" + ssvtypes "github.com/ssvlabs/ssv/protocol/v2/types" "github.com/ssvlabs/ssv/utils/threshold" + + "github.com/ssvlabs/ssv/cli/flags" ) // createThreshold is the command to create threshold based on the given private key var createThresholdCmd = &cobra.Command{ Use: "create-threshold", - Short: "Turns a private key into a threshold key", + Short: "Turns a private key into a threshold key. For testing usage only", Run: func(cmd *cobra.Command, args []string) { if err := logging.SetGlobalLogger("debug", "capital", "console", nil); err != nil { log.Fatal(err) @@ -33,15 +35,20 @@ var createThresholdCmd = &cobra.Command{ logger.Fatal("failed to get keys count flag value", zap.Error(err)) } + if !ssvtypes.ValidCommitteeSize(int(keysCount)) { + logger.Fatal("invalid keys count", zap.Int("keysCount", int(keysCount))) + } + baseKey := &bls.SecretKey{} if err := baseKey.SetHexString(privKey); err != nil { logger.Fatal("failed to set hex private key", zap.Error(err)) } // https://github.com/ethereum/eth2-ssv/issues/22 - // currently support 4 nodes threshold is keysCount-1(3). need to align based open the issue to + // currently support 4, 7, 10, 13 nodes threshold 3f+1. need to align based open the issue to // support k(2f+1) and n (3f+1) and allow to pass it as flag - privKeys, err := threshold.Create(baseKey.Serialize(), keysCount-1, keysCount) + quorum, _ := ssvtypes.ComputeQuorumAndPartialQuorum(int(keysCount)) + privKeys, err := threshold.Create(baseKey.Serialize(), quorum, keysCount) if err != nil { logger.Fatal("failed to turn a private key into a threshold key", zap.Error(err)) } From 0ef720b58ae8b848199c2a86213ef09e825f8745 Mon Sep 17 00:00:00 2001 From: Anton Korpusenko Date: Mon, 16 Sep 2024 14:27:34 +0300 Subject: [PATCH 09/35] added ownValidator field passing to ExitValidator (#1738) --- eth/ethtest/eth_e2e_test.go | 2 +- eth/eventhandler/event_handler.go | 10 ++++++++-- eth/eventhandler/task.go | 14 +++++++++++--- operator/validator/controller.go | 2 +- operator/validator/mocks/controller.go | 12 ++++++------ operator/validator/task_executor.go | 3 ++- 6 files changed, 29 insertions(+), 14 deletions(-) diff --git a/eth/ethtest/eth_e2e_test.go b/eth/ethtest/eth_e2e_test.go index c3ea9d3aa3..a25fc65386 100644 --- a/eth/ethtest/eth_e2e_test.go +++ b/eth/ethtest/eth_e2e_test.go @@ -173,7 +173,7 @@ func TestEthExecLayer(t *testing.T) { // Step 2: Exit validator { - validatorCtrl.EXPECT().ExitValidator(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + validatorCtrl.EXPECT().ExitValidator(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() shares := nodeStorage.Shares().List(nil) require.Equal(t, 7, len(shares)) diff --git a/eth/eventhandler/event_handler.go b/eth/eventhandler/event_handler.go index 1e9764ed82..f2f62da848 100644 --- a/eth/eventhandler/event_handler.go +++ b/eth/eventhandler/event_handler.go @@ -54,7 +54,7 @@ type taskExecutor interface { LiquidateCluster(owner ethcommon.Address, operatorIDs []uint64, toLiquidate []*ssvtypes.SSVShare) error ReactivateCluster(owner ethcommon.Address, operatorIDs []uint64, toReactivate []*ssvtypes.SSVShare) error UpdateFeeRecipient(owner, recipient ethcommon.Address) error - ExitValidator(pubKey phase0.BLSPubKey, blockNumber uint64, validatorIndex phase0.ValidatorIndex) error + ExitValidator(pubKey phase0.BLSPubKey, blockNumber uint64, validatorIndex phase0.ValidatorIndex, ownValidator bool) error } type EventHandler struct { @@ -418,7 +418,13 @@ func (eh *EventHandler) processEvent(txn basedb.Txn, event ethtypes.Log) (Task, return nil, nil } - task := NewExitValidatorTask(eh.taskExecutor, exitDescriptor.PubKey, exitDescriptor.BlockNumber, exitDescriptor.ValidatorIndex) + task := NewExitValidatorTask( + eh.taskExecutor, + exitDescriptor.PubKey, + exitDescriptor.BlockNumber, + exitDescriptor.ValidatorIndex, + exitDescriptor.OwnValidator, + ) return task, nil default: diff --git a/eth/eventhandler/task.go b/eth/eventhandler/task.go index ce581e6d08..61d11d4517 100644 --- a/eth/eventhandler/task.go +++ b/eth/eventhandler/task.go @@ -133,7 +133,7 @@ func (t UpdateFeeRecipientTask) Execute() error { } type exitValidatorExecutor interface { - ExitValidator(pubKey phase0.BLSPubKey, blockNumber uint64, validatorIndex phase0.ValidatorIndex) error + ExitValidator(pubKey phase0.BLSPubKey, blockNumber uint64, validatorIndex phase0.ValidatorIndex, ownValidator bool) error } type ExitValidatorTask struct { @@ -141,17 +141,25 @@ type ExitValidatorTask struct { pubKey phase0.BLSPubKey blockNumber uint64 validatorIndex phase0.ValidatorIndex + ownValidator bool } -func NewExitValidatorTask(executor exitValidatorExecutor, pubKey phase0.BLSPubKey, blockNumber uint64, validatorIndex phase0.ValidatorIndex) *ExitValidatorTask { +func NewExitValidatorTask( + executor exitValidatorExecutor, + pubKey phase0.BLSPubKey, + blockNumber uint64, + validatorIndex phase0.ValidatorIndex, + ownValidator bool, +) *ExitValidatorTask { return &ExitValidatorTask{ executor: executor, pubKey: pubKey, blockNumber: blockNumber, validatorIndex: validatorIndex, + ownValidator: ownValidator, } } func (t ExitValidatorTask) Execute() error { - return t.executor.ExitValidator(t.pubKey, t.blockNumber, t.validatorIndex) + return t.executor.ExitValidator(t.pubKey, t.blockNumber, t.validatorIndex, t.ownValidator) } diff --git a/operator/validator/controller.go b/operator/validator/controller.go index a29ba89cde..2c31a1fe7e 100644 --- a/operator/validator/controller.go +++ b/operator/validator/controller.go @@ -142,7 +142,7 @@ type Controller interface { LiquidateCluster(owner common.Address, operatorIDs []uint64, toLiquidate []*ssvtypes.SSVShare) error ReactivateCluster(owner common.Address, operatorIDs []uint64, toReactivate []*ssvtypes.SSVShare) error UpdateFeeRecipient(owner, recipient common.Address) error - ExitValidator(pubKey phase0.BLSPubKey, blockNumber uint64, validatorIndex phase0.ValidatorIndex) error + ExitValidator(pubKey phase0.BLSPubKey, blockNumber uint64, validatorIndex phase0.ValidatorIndex, ownValidator bool) error duties.DutyExecutor } diff --git a/operator/validator/mocks/controller.go b/operator/validator/mocks/controller.go index 178a59ebb7..bf9596075e 100644 --- a/operator/validator/mocks/controller.go +++ b/operator/validator/mocks/controller.go @@ -1,9 +1,9 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: ./controller.go +// Source: ./operator/validator/controller.go // // Generated by this command: // -// mockgen -package=mocks -destination=./mocks/controller.go -source=./controller.go +// mockgen -package=mocks -destination=./operator/validator/mocks/controller.go -source=./operator/validator/controller.go // // Package mocks is a generated GoMock package. @@ -101,17 +101,17 @@ func (mr *MockControllerMockRecorder) ExecuteGenesisDuty(logger, duty any) *gomo } // ExitValidator mocks base method. -func (m *MockController) ExitValidator(pubKey phase0.BLSPubKey, blockNumber uint64, validatorIndex phase0.ValidatorIndex) error { +func (m *MockController) ExitValidator(pubKey phase0.BLSPubKey, blockNumber uint64, validatorIndex phase0.ValidatorIndex, ownValidator bool) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ExitValidator", pubKey, blockNumber, validatorIndex) + ret := m.ctrl.Call(m, "ExitValidator", pubKey, blockNumber, validatorIndex, ownValidator) ret0, _ := ret[0].(error) return ret0 } // ExitValidator indicates an expected call of ExitValidator. -func (mr *MockControllerMockRecorder) ExitValidator(pubKey, blockNumber, validatorIndex any) *gomock.Call { +func (mr *MockControllerMockRecorder) ExitValidator(pubKey, blockNumber, validatorIndex, ownValidator any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExitValidator", reflect.TypeOf((*MockController)(nil).ExitValidator), pubKey, blockNumber, validatorIndex) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExitValidator", reflect.TypeOf((*MockController)(nil).ExitValidator), pubKey, blockNumber, validatorIndex, ownValidator) } // ForkListener mocks base method. diff --git a/operator/validator/task_executor.go b/operator/validator/task_executor.go index a168703bc6..7406204438 100644 --- a/operator/validator/task_executor.go +++ b/operator/validator/task_executor.go @@ -108,7 +108,7 @@ func (c *controller) UpdateFeeRecipient(owner, recipient common.Address) error { return nil } -func (c *controller) ExitValidator(pubKey phase0.BLSPubKey, blockNumber uint64, validatorIndex phase0.ValidatorIndex) error { +func (c *controller) ExitValidator(pubKey phase0.BLSPubKey, blockNumber uint64, validatorIndex phase0.ValidatorIndex, ownValidator bool) error { logger := c.taskLogger("ExitValidator", fields.PubKey(pubKey[:]), fields.BlockNumber(blockNumber), @@ -116,6 +116,7 @@ func (c *controller) ExitValidator(pubKey phase0.BLSPubKey, blockNumber uint64, ) exitDesc := duties.ExitDescriptor{ + OwnValidator: ownValidator, PubKey: pubKey, ValidatorIndex: validatorIndex, BlockNumber: blockNumber, From 487f5c31ff958d584dbe48bbb9ca2990ac24a5a3 Mon Sep 17 00:00:00 2001 From: Anton Korpusenko Date: Mon, 16 Sep 2024 14:42:22 +0300 Subject: [PATCH 10/35] fix: (eventhandler) pubkey & id uniqueness check in OperatorAdded (#1729) * added registering operator public key uniqueness check in OperatorAdded * fixed the test * Added one more test with same ids, but diff pubkeys * minor changes * fixed tests and logic * fixed error messages * minor renames * fixed unchanged error messages we expect in eth tests. simplified the logic in operatorAdded handler --------- Co-authored-by: moshe-blox --- eth/eventhandler/event_handler_test.go | 53 ++++++++++++++++++++++++++ eth/eventhandler/handlers.go | 32 +++++++++++----- 2 files changed, 76 insertions(+), 9 deletions(-) diff --git a/eth/eventhandler/event_handler_test.go b/eth/eventhandler/event_handler_test.go index 138e171329..d4b0d2a447 100644 --- a/eth/eventhandler/event_handler_test.go +++ b/eth/eventhandler/event_handler_test.go @@ -203,6 +203,59 @@ func TestHandleBlockEventsStream(t *testing.T) { } }) + t.Run("test OperatorAdded event fails for malformed event data", func(t *testing.T) { + t.Run("test OperatorAdded event handle with the same pubkey, but with a different id", func(t *testing.T) { + op := &testOperator{} + op.privateKey = ops[2].privateKey + op.id = 8 + + encodedPubKey, err := op.privateKey.Public().Base64() + require.NoError(t, err) + + operators, err := eh.nodeStorage.ListOperators(nil, 0, 0) + require.NoError(t, err) + require.Equal(t, len(ops), len(operators)) + + err = eh.handleOperatorAdded(nil, &contract.ContractOperatorAdded{ + OperatorId: op.id, + Owner: testAddr, + PublicKey: encodedPubKey, + }) + require.ErrorContains(t, err, "operator public key already exists") + + // check no operators were added + operators, err = eh.nodeStorage.ListOperators(nil, 0, 0) + require.NoError(t, err) + require.Equal(t, len(ops), len(operators)) + }) + t.Run("test OperatorAdded event handle with existing id and new pubkey", func(t *testing.T) { + privateKey, err := keys.GeneratePrivateKey() + require.NoError(t, err) + + op := &testOperator{} + op.id = ops[2].id + op.privateKey = privateKey + + encodedPubKey, err := op.privateKey.Public().Base64() + require.NoError(t, err) + + operators, err := eh.nodeStorage.ListOperators(nil, 0, 0) + require.NoError(t, err) + require.Equal(t, len(ops), len(operators)) + + err = eh.handleOperatorAdded(nil, &contract.ContractOperatorAdded{ + OperatorId: op.id, + Owner: testAddr, + PublicKey: encodedPubKey, + }) + require.ErrorContains(t, err, "operator ID already exists") + + // check no operators were added + operators, err = eh.nodeStorage.ListOperators(nil, 0, 0) + require.NoError(t, err) + require.Equal(t, len(ops), len(operators)) + }) + }) t.Run("test OperatorRemoved event handle", func(t *testing.T) { // Should return MalformedEventError and no changes to the state diff --git a/eth/eventhandler/handlers.go b/eth/eventhandler/handlers.go index 0fd1162d1d..be06fc9db5 100644 --- a/eth/eventhandler/handlers.go +++ b/eth/eventhandler/handlers.go @@ -25,7 +25,8 @@ import ( const encryptedKeyLength = 256 var ( - ErrAlreadyRegistered = fmt.Errorf("operator registered with the same operator public key") + ErrOperatorPubkeyAlreadyExists = fmt.Errorf("operator public key already exists") + ErrOperatorIDAlreadyExists = fmt.Errorf("operator ID already exists") ErrOperatorDataNotFound = fmt.Errorf("operator data not found") ErrIncorrectSharesLength = fmt.Errorf("shares length is not correct") ErrSignatureVerification = fmt.Errorf("signature verification failed") @@ -52,15 +53,28 @@ func (eh *EventHandler) handleOperatorAdded(txn basedb.Txn, event *contract.Cont ID: event.OperatorId, } - // throw an error if there is an existing operator with the same public key and different operator id - operatorData := eh.operatorDataStore.GetOperatorData() - if operatorData.ID != 0 && bytes.Equal(operatorData.PublicKey, event.PublicKey) && operatorData.ID != event.OperatorId { - logger.Warn("malformed event: operator registered with the same operator public key", - zap.Uint64("expected_operator_id", operatorData.ID)) - return &MalformedEventError{Err: ErrAlreadyRegistered} + // throw an error if operator with the same operator id already exists + existsById, err := eh.nodeStorage.OperatorsExist(txn, []spectypes.OperatorID{event.OperatorId}) + if err != nil { + return fmt.Errorf("could not check if operator exists: %w", err) + } + if existsById { + logger.Warn("malformed event: operator ID already exists", + fields.OperatorID(event.OperatorId)) + return &MalformedEventError{Err: ErrOperatorIDAlreadyExists} + } + + // throw an error if there is an existing operator with the same public key + operatorData, pubkeyExists, err := eh.nodeStorage.GetOperatorDataByPubKey(txn, event.PublicKey) + if err != nil { + return fmt.Errorf("could not get operator data by public key: %w", err) + } + if pubkeyExists { + logger.Warn("malformed event: operator public key already exists", + fields.OperatorPubKey(operatorData.PublicKey)) + return &MalformedEventError{Err: ErrOperatorPubkeyAlreadyExists} } - // TODO: consider saving other operators as well exists, err := eh.nodeStorage.SaveOperatorData(txn, od) if err != nil { return fmt.Errorf("save operator data: %w", err) @@ -70,7 +84,7 @@ func (eh *EventHandler) handleOperatorAdded(txn basedb.Txn, event *contract.Cont return nil } - if bytes.Equal(event.PublicKey, operatorData.PublicKey) { + if bytes.Equal(event.PublicKey, eh.operatorDataStore.GetOperatorData().PublicKey) { eh.operatorDataStore.SetOperatorData(od) logger = logger.With(zap.Bool("own_operator", true)) } From cce0af9580036f82071c62d5e80cbcd3a001d521 Mon Sep 17 00:00:00 2001 From: Anton Korpusenko Date: Mon, 16 Sep 2024 16:56:14 +0300 Subject: [PATCH 11/35] chore: removed startCommittee & startValidator methods (#1733) * removed startCommittee & startValidator methods * fixed tests * returned back StartValidator() call * returned startValidatorFunc as option back --- eth/ethtest/eth_e2e_test.go | 2 -- eth/eventhandler/event_handler.go | 5 +-- eth/eventhandler/task.go | 20 ------------ eth/eventhandler/task_executor_test.go | 42 +----------------------- operator/validator/controller.go | 44 +++----------------------- operator/validator/mocks/controller.go | 39 ++++++++--------------- operator/validator/task_executor.go | 10 ------ 7 files changed, 19 insertions(+), 143 deletions(-) diff --git a/eth/ethtest/eth_e2e_test.go b/eth/ethtest/eth_e2e_test.go index a25fc65386..9b95c88cb6 100644 --- a/eth/ethtest/eth_e2e_test.go +++ b/eth/ethtest/eth_e2e_test.go @@ -146,8 +146,6 @@ func TestEthExecLayer(t *testing.T) { // Step 1: Add more validators { - validatorCtrl.EXPECT().StartValidator(gomock.Any()).AnyTimes() - // Check current nonce before start nonce, err := nodeStorage.GetNextNonce(nil, testAddrAlice) require.NoError(t, err) diff --git a/eth/eventhandler/event_handler.go b/eth/eventhandler/event_handler.go index f2f62da848..547e3e04cf 100644 --- a/eth/eventhandler/event_handler.go +++ b/eth/eventhandler/event_handler.go @@ -49,7 +49,6 @@ var ( ) type taskExecutor interface { - StartValidator(share *ssvtypes.SSVShare) error StopValidator(pubKey spectypes.ValidatorPK) error LiquidateCluster(owner ethcommon.Address, operatorIDs []uint64, toLiquidate []*ssvtypes.SSVShare) error ReactivateCluster(owner ethcommon.Address, operatorIDs []uint64, toReactivate []*ssvtypes.SSVShare) error @@ -266,9 +265,7 @@ func (eh *EventHandler) processEvent(txn basedb.Txn, event ethtypes.Log) (Task, return nil, nil } - task := NewStartValidatorTask(eh.taskExecutor, share) - - return task, nil + return nil, nil case ValidatorRemoved: validatorRemovedEvent, err := eh.eventParser.ParseValidatorRemoved(event) diff --git a/eth/eventhandler/task.go b/eth/eventhandler/task.go index 61d11d4517..67c3d90ad0 100644 --- a/eth/eventhandler/task.go +++ b/eth/eventhandler/task.go @@ -12,26 +12,6 @@ type Task interface { Execute() error } -type startValidatorExecutor interface { - StartValidator(share *types.SSVShare) error -} - -type StartValidatorTask struct { - executor startValidatorExecutor - share *types.SSVShare -} - -func NewStartValidatorTask(executor startValidatorExecutor, share *types.SSVShare) *StartValidatorTask { - return &StartValidatorTask{ - executor: executor, - share: share, - } -} - -func (t StartValidatorTask) Execute() error { - return t.executor.StartValidator(t.share) -} - type stopValidatorExecutor interface { StopValidator(pubKey spectypes.ValidatorPK) error } diff --git a/eth/eventhandler/task_executor_test.go b/eth/eventhandler/task_executor_test.go index 79950d57c2..a2883df7d8 100644 --- a/eth/eventhandler/task_executor_test.go +++ b/eth/eventhandler/task_executor_test.go @@ -5,7 +5,7 @@ import ( "encoding/binary" "testing" - gomock "go.uber.org/mock/gomock" + "go.uber.org/mock/gomock" ethcommon "github.com/ethereum/go-ethereum/common" ethtypes "github.com/ethereum/go-ethereum/core/types" @@ -15,7 +15,6 @@ import ( "go.uber.org/zap/zaptest/observer" "github.com/ssvlabs/ssv/eth/executionclient" - beaconprotocol "github.com/ssvlabs/ssv/protocol/v2/blockchain/beacon" ssvtypes "github.com/ssvlabs/ssv/protocol/v2/types" "github.com/ssvlabs/ssv/registry/storage" ) @@ -55,45 +54,6 @@ func TestExecuteTask(t *testing.T) { eh, validatorCtrl, err := setupEventHandler(t, ctx, logger, nil, ops[0], true) require.NoError(t, err) - t.Run("test AddValidator task execution - not started", func(t *testing.T) { - logValidatorAdded := unmarshalLog(t, rawValidatorAdded) - validatorAddedEvent, err := eh.eventParser.ParseValidatorAdded(logValidatorAdded) - if err != nil { - t.Fatal("parse ValidatorAdded", err) - } - share := &ssvtypes.SSVShare{ - Share: spectypes.Share{ - ValidatorPubKey: spectypes.ValidatorPK(validatorAddedEvent.PublicKey), - }, - } - validatorCtrl.EXPECT().StartValidator(gomock.Any()).Return(nil).AnyTimes() - - task := NewStartValidatorTask(eh.taskExecutor, share) - require.NoError(t, task.Execute()) - }) - - // Currently Start Validator is a no-op in Controller, but we need to check this anyway - t.Run("test AddValidator task execution - started", func(t *testing.T) { - logValidatorAdded := unmarshalLog(t, rawValidatorAdded) - validatorAddedEvent, err := eh.eventParser.ParseValidatorAdded(logValidatorAdded) - if err != nil { - t.Fatal("parse ValidatorAdded", err) - } - share := &ssvtypes.SSVShare{ - Share: spectypes.Share{ - ValidatorPubKey: spectypes.ValidatorPK(validatorAddedEvent.PublicKey), - }, - Metadata: ssvtypes.Metadata{ - BeaconMetadata: &beaconprotocol.ValidatorMetadata{ - Index: 1, - }, - }, - } - - validatorCtrl.EXPECT().StartValidators().AnyTimes() - task := NewStartValidatorTask(eh.taskExecutor, share) - require.NoError(t, task.Execute()) - }) valPk := "b24454393691331ee6eba4ffa2dbb2600b9859f908c3e648b6c6de9e1dea3e9329866015d08355c8d451427762b913d1" t.Run("test StopValidator task execution", func(t *testing.T) { diff --git a/operator/validator/controller.go b/operator/validator/controller.go index 2c31a1fe7e..9ba9a0fc73 100644 --- a/operator/validator/controller.go +++ b/operator/validator/controller.go @@ -137,7 +137,6 @@ type Controller interface { IndicesChangeChan() chan struct{} ValidatorExitChan() <-chan duties.ExitDescriptor - StartValidator(share *ssvtypes.SSVShare) error StopValidator(pubKey spectypes.ValidatorPK) error LiquidateCluster(owner common.Address, operatorIDs []uint64, toLiquidate []*ssvtypes.SSVShare) error ReactivateCluster(owner common.Address, operatorIDs []uint64, toReactivate []*ssvtypes.SSVShare) error @@ -618,17 +617,7 @@ func (c *controller) startValidators(validators []*validators.ValidatorContainer } } - for _, vc := range committees { - s, err := c.startCommittee(vc) - if err != nil { - c.logger.Error("could not start committee", zap.Error(err)) - errs = append(errs, err) - continue - } - if s { - started++ - } - } + started += len(committees) c.logger.Info("setup validators done", zap.Int("map size", c.validatorsMap.SizeValidators()), zap.Int("failures", len(errs)), @@ -690,10 +679,6 @@ func (c *controller) UpdateValidatorsMetadata(data map[spectypes.ValidatorPK]*be vc, found := c.validatorsMap.GetCommittee(v.Share().CommitteeID()) if found { vc.AddShare(&v.Share().Share) - _, err := c.startCommittee(vc) - if err != nil { - c.logger.Warn("could not start committee", zap.Error(err)) - } } } else { c.logger.Info("starting new validator", fields.PubKey(share.ValidatorPubKey[:])) @@ -1034,7 +1019,7 @@ func (c *controller) committeeMemberFromShare(share *ssvtypes.SSVShare) (*specty } func (c *controller) onShareStart(share *ssvtypes.SSVShare) (bool, error) { - v, vc, err := c.onShareInit(share) + v, _, err := c.onShareInit(share) if err != nil || v == nil { return false, err } @@ -1043,11 +1028,8 @@ func (c *controller) onShareStart(share *ssvtypes.SSVShare) (bool, error) { if err != nil { return false, err } - vcstarted, err := c.startCommittee(vc) - if err != nil { - return false, err - } - return started && vcstarted, nil + + return started, nil } func (c *controller) printShare(s *ssvtypes.SSVShare, msg string) { @@ -1091,8 +1073,6 @@ func (c *controller) validatorStart(validator *validators.ValidatorContainer) (b return c.validatorStartFunc(validator) } -//func (c *controller) startValidatorAndCommittee(v *val) - // startValidator will start the given validator if applicable func (c *controller) startValidator(v *validators.ValidatorContainer) (bool, error) { c.reportValidatorStatus(v.Share().ValidatorPubKey[:], v.Share().BeaconMetadata) @@ -1111,22 +1091,6 @@ func (c *controller) startValidator(v *validators.ValidatorContainer) (bool, err return true, nil } -func (c *controller) startCommittee(vc *validator.Committee) (bool, error) { - //TODO alan: currently nothing to start in committee? - // c.logger.Debug("committee started ", zap.String("committee_id", hex.EncodeToString(vc.Operator.ClusterID[:]))) - //cstarted, err := vc.Start() // TODO alan : make it testable - //if err != nil { - // // todo alan: metrics - // //c.metrics.ValidatorError(vc.Share.ValidatorPubKey[:]) - // return false, errors.Wrap(err, "could not start committee") - //} - //if cstarted { - // c.recentlyStartedCommittees++ - //} - - return true, nil -} - func (c *controller) ForkListener(logger *zap.Logger) { if c.networkConfig.PastAlanFork() { return diff --git a/operator/validator/mocks/controller.go b/operator/validator/mocks/controller.go index bf9596075e..2ec55ecd50 100644 --- a/operator/validator/mocks/controller.go +++ b/operator/validator/mocks/controller.go @@ -10,21 +10,22 @@ package mocks import ( - reflect "reflect" + "reflect" - phase0 "github.com/attestantio/go-eth2-client/spec/phase0" - common "github.com/ethereum/go-ethereum/common" - types "github.com/ssvlabs/ssv-spec-pre-cc/types" + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethereum/go-ethereum/common" + "github.com/ssvlabs/ssv-spec-pre-cc/types" types0 "github.com/ssvlabs/ssv-spec/types" - network "github.com/ssvlabs/ssv/network" - duties "github.com/ssvlabs/ssv/operator/duties" - validators "github.com/ssvlabs/ssv/operator/validators" - beacon "github.com/ssvlabs/ssv/protocol/v2/blockchain/beacon" + "go.uber.org/mock/gomock" + "go.uber.org/zap" + + "github.com/ssvlabs/ssv/network" + "github.com/ssvlabs/ssv/operator/duties" + "github.com/ssvlabs/ssv/operator/validators" + "github.com/ssvlabs/ssv/protocol/v2/blockchain/beacon" types1 "github.com/ssvlabs/ssv/protocol/v2/types" - storage "github.com/ssvlabs/ssv/registry/storage" - basedb "github.com/ssvlabs/ssv/storage/basedb" - gomock "go.uber.org/mock/gomock" - zap "go.uber.org/zap" + "github.com/ssvlabs/ssv/registry/storage" + "github.com/ssvlabs/ssv/storage/basedb" ) // MockController is a mock of Controller interface. @@ -226,20 +227,6 @@ func (mr *MockControllerMockRecorder) StartNetworkHandlers() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartNetworkHandlers", reflect.TypeOf((*MockController)(nil).StartNetworkHandlers)) } -// StartValidator mocks base method. -func (m *MockController) StartValidator(share *types1.SSVShare) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StartValidator", share) - ret0, _ := ret[0].(error) - return ret0 -} - -// StartValidator indicates an expected call of StartValidator. -func (mr *MockControllerMockRecorder) StartValidator(share any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartValidator", reflect.TypeOf((*MockController)(nil).StartValidator), share) -} - // StartValidators mocks base method. func (m *MockController) StartValidators() { m.ctrl.T.Helper() diff --git a/operator/validator/task_executor.go b/operator/validator/task_executor.go index 7406204438..dd2a5db437 100644 --- a/operator/validator/task_executor.go +++ b/operator/validator/task_executor.go @@ -22,16 +22,6 @@ func (c *controller) taskLogger(taskName string, fields ...zap.Field) *zap.Logge With(fields...) } -func (c *controller) StartValidator(share *types.SSVShare) error { - // logger := c.taskLogger("StartValidator", fields.PubKey(share.ValidatorPubKey)) - - // Since we don't yet have the Beacon metadata for this validator, - // we can't yet start it. Starting happens in `UpdateValidatorMetaDataLoop`, - // so this task is currently a no-op. - - return nil -} - func (c *controller) StopValidator(pubKey spectypes.ValidatorPK) error { logger := c.taskLogger("StopValidator", fields.PubKey(pubKey[:])) From 011c98833c35ef240acd102aabe436b9b1869226 Mon Sep 17 00:00:00 2001 From: Anton Korpusenko Date: Tue, 17 Sep 2024 16:18:52 +0300 Subject: [PATCH 12/35] chore: update dependencies (#1742) * updated libp2p, go-eth2-client versions * returned go-eth2-client back to 0.21.7 --- go.mod | 8 ++++---- go.sum | 17 ++++++++--------- 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/go.mod b/go.mod index 4414e3755f..036e060698 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/holiman/uint256 v1.3.1 github.com/ilyakaznacheev/cleanenv v1.4.2 github.com/jellydator/ttlcache/v3 v3.2.0 - github.com/libp2p/go-libp2p v0.36.1 + github.com/libp2p/go-libp2p v0.36.3 github.com/libp2p/go-libp2p-kad-dht v0.25.2 github.com/libp2p/go-libp2p-pubsub v0.11.0 github.com/microsoft/go-crypto-openssl v0.2.9 @@ -174,7 +174,7 @@ require ( github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pion/datachannel v1.5.8 // indirect github.com/pion/dtls/v2 v2.2.12 // indirect - github.com/pion/ice/v2 v2.3.32 // indirect + github.com/pion/ice/v2 v2.3.34 // indirect github.com/pion/interceptor v0.1.29 // indirect github.com/pion/logging v0.2.2 // indirect github.com/pion/mdns v0.0.12 // indirect @@ -185,9 +185,9 @@ require ( github.com/pion/sdp/v3 v3.0.9 // indirect github.com/pion/srtp/v2 v2.0.20 // indirect github.com/pion/stun v0.6.1 // indirect - github.com/pion/transport/v2 v2.2.9 // indirect + github.com/pion/transport/v2 v2.2.10 // indirect github.com/pion/turn/v2 v2.1.6 // indirect - github.com/pion/webrtc/v3 v3.2.50 // indirect + github.com/pion/webrtc/v3 v3.3.0 // indirect github.com/pk910/dynamic-ssz v0.0.3 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polydawn/refmt v0.89.0 // indirect diff --git a/go.sum b/go.sum index 5c3aea3628..923e396861 100644 --- a/go.sum +++ b/go.sum @@ -413,8 +413,8 @@ github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38y github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.36.1 h1:piAHesy0/8ifBEBUS8HF2m7ywR5vnktUFv00dTsVKcs= -github.com/libp2p/go-libp2p v0.36.1/go.mod h1:vHzel3CpRB+vS11fIjZSJAU4ALvieKV9VZHC9VerHj8= +github.com/libp2p/go-libp2p v0.36.3 h1:NHz30+G7D8Y8YmznrVZZla0ofVANrvBl2c+oARfMeDQ= +github.com/libp2p/go-libp2p v0.36.3/go.mod h1:4Y5vFyCUiJuluEPmpnKYf6WFx5ViKPUYs/ixe9ANFZ8= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= github.com/libp2p/go-libp2p-kad-dht v0.25.2 h1:FOIk9gHoe4YRWXTu8SY9Z1d0RILol0TrtApsMDPjAVQ= @@ -573,8 +573,8 @@ github.com/pion/datachannel v1.5.8/go.mod h1:PgmdpoaNBLX9HNzNClmdki4DYW5JtI7Yibu github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= -github.com/pion/ice/v2 v2.3.32 h1:VwE/uEeqiMm0zUWpdt1DJtnqEkj3UjEbhX92/CurtWI= -github.com/pion/ice/v2 v2.3.32/go.mod h1:8fac0+qftclGy1tYd/nfwfHC729BLaxtVqMdMVCAVPU= +github.com/pion/ice/v2 v2.3.34 h1:Ic1ppYCj4tUOcPAp76U6F3fVrlSw8A9JtRXLqw6BbUM= +github.com/pion/ice/v2 v2.3.34/go.mod h1:mBF7lnigdqgtB+YHkaY/Y6s6tsyRyo4u4rPGRuOjUBQ= github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M= github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4= github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= @@ -600,17 +600,16 @@ github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/ github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= github.com/pion/transport/v2 v2.2.3/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= -github.com/pion/transport/v2 v2.2.8/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= -github.com/pion/transport/v2 v2.2.9 h1:WEDygVovkJlV2CCunM9KS2kds+kcl7zdIefQA5y/nkE= -github.com/pion/transport/v2 v2.2.9/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= +github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q= +github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0= github.com/pion/transport/v3 v3.0.6 h1:k1mQU06bmmX143qSWgXFqSH1KUJceQvIUuVH/K5ELWw= github.com/pion/transport/v3 v3.0.6/go.mod h1:HvJr2N/JwNJAfipsRleqwFoR3t/pWyHeZUs89v3+t5s= github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc= github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= -github.com/pion/webrtc/v3 v3.2.50 h1:C/rwL2mBfCxHv6tlLzDAO3krJpQXfVx8A8WHnGJ2j34= -github.com/pion/webrtc/v3 v3.2.50/go.mod h1:dytYYoSBy7ZUWhJMbndx9UckgYvzNAfL7xgVnrIKxqo= +github.com/pion/webrtc/v3 v3.3.0 h1:Rf4u6n6U5t5sUxhYPQk/samzU/oDv7jk6BA5hyO2F9I= +github.com/pion/webrtc/v3 v3.3.0/go.mod h1:hVmrDJvwhEertRWObeb1xzulzHGeVUoPlWvxdGzcfU0= github.com/pk910/dynamic-ssz v0.0.3 h1:fCWzFowq9P6SYCc7NtJMkZcIHk+r5hSVD+32zVi6Aio= github.com/pk910/dynamic-ssz v0.0.3/go.mod h1:b6CrLaB2X7pYA+OSEEbkgXDEcRnjLOZIxZTsMuO/Y9c= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= From ce465c879eb6f7bee833d0b66b2e461533de775e Mon Sep 17 00:00:00 2001 From: moshe-blox <89339422+moshe-blox@users.noreply.github.com> Date: Tue, 17 Sep 2024 16:53:20 +0300 Subject: [PATCH 13/35] fix: (DutyScheduler) duties reset race condition (#1741) * fix: (DutyScheduler) duties reset race condition * refactors * fix missing lock * fix test * optimization --- message/validation/genesis/validation_test.go | 8 +++- message/validation/validation_test.go | 48 ++++++++++++++----- operator/duties/attester.go | 14 +++--- operator/duties/dutystore/duties.go | 41 ++++++++-------- operator/duties/dutystore/sync_committee.go | 32 +++++++------ operator/duties/proposer.go | 9 +++- operator/duties/sync_committee.go | 9 +++- 7 files changed, 103 insertions(+), 58 deletions(-) diff --git a/message/validation/genesis/validation_test.go b/message/validation/genesis/validation_test.go index 3d4d10d511..82585fbe02 100644 --- a/message/validation/genesis/validation_test.go +++ b/message/validation/genesis/validation_test.go @@ -850,7 +850,9 @@ func Test_ValidateSSVMessage(t *testing.T) { height := specqbft.Height(slot) dutyStore := dutystore.New() - dutyStore.Proposer.Add(epoch, slot, validatorIndex+1, ð2apiv1.ProposerDuty{}, true) + dutyStore.Proposer.Set(epoch, []dutystore.StoreDuty[eth2apiv1.ProposerDuty]{ + {Slot: slot, ValidatorIndex: validatorIndex + 1, Duty: ð2apiv1.ProposerDuty{}, InCommittee: true}, + }) validator := New(netCfg, WithNodeStorage(ns), WithDutyStore(dutyStore)).(*messageValidator) validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) @@ -872,7 +874,9 @@ func Test_ValidateSSVMessage(t *testing.T) { require.ErrorContains(t, err, ErrNoDuty.Error()) dutyStore = dutystore.New() - dutyStore.Proposer.Add(epoch, slot, validatorIndex, ð2apiv1.ProposerDuty{}, true) + dutyStore.Proposer.Set(epoch, []dutystore.StoreDuty[eth2apiv1.ProposerDuty]{ + {Slot: slot, ValidatorIndex: validatorIndex, Duty: ð2apiv1.ProposerDuty{}, InCommittee: true}, + }) validator = New(netCfg, WithNodeStorage(ns), WithDutyStore(dutyStore)).(*messageValidator) timeToWait, err = validator.waitAfterSlotStart(spectypes.BNRoleProposer) require.NoError(t, err) diff --git a/message/validation/validation_test.go b/message/validation/validation_test.go index 7d084d43ce..9935bf3a4a 100644 --- a/message/validation/validation_test.go +++ b/message/validation/validation_test.go @@ -562,9 +562,11 @@ func Test_ValidateSSVMessage(t *testing.T) { epoch := phase0.Epoch(1) slot := netCfg.Beacon.FirstSlotAtEpoch(epoch) - dutyStore.Proposer.Add(epoch, slot, shares.active.ValidatorIndex, ð2apiv1.ProposerDuty{}, true) - dutyStore.Proposer.Add(epoch, slot+4, shares.active.ValidatorIndex, ð2apiv1.ProposerDuty{}, true) - dutyStore.Proposer.Add(epoch, slot+8, shares.active.ValidatorIndex, ð2apiv1.ProposerDuty{}, true) + dutyStore.Proposer.Set(epoch, []dutystore.StoreDuty[eth2apiv1.ProposerDuty]{ + {Slot: slot, ValidatorIndex: shares.active.ValidatorIndex, Duty: ð2apiv1.ProposerDuty{}, InCommittee: true}, + {Slot: slot + 4, ValidatorIndex: shares.active.ValidatorIndex, Duty: ð2apiv1.ProposerDuty{}, InCommittee: true}, + {Slot: slot + 8, ValidatorIndex: shares.active.ValidatorIndex, Duty: ð2apiv1.ProposerDuty{}, InCommittee: true}, + }) role := spectypes.RoleAggregator identifier := spectypes.NewMsgID(netCfg.DomainType(), ks.ValidatorPK.Serialize(), role) @@ -589,7 +591,9 @@ func Test_ValidateSSVMessage(t *testing.T) { slot := netCfg.Beacon.FirstSlotAtEpoch(epoch) ds := dutystore.New() - ds.Proposer.Add(epoch, slot, shares.active.ValidatorIndex+1, ð2apiv1.ProposerDuty{}, true) + ds.Proposer.Set(epoch, []dutystore.StoreDuty[eth2apiv1.ProposerDuty]{ + {Slot: slot, ValidatorIndex: shares.active.ValidatorIndex + 1, Duty: ð2apiv1.ProposerDuty{}, InCommittee: true}, + }) validator := New(netCfg, validatorStore, ds, signatureVerifier).(*messageValidator) identifier := spectypes.NewMsgID(netCfg.DomainType(), ks.ValidatorPK.Serialize(), spectypes.RoleProposer) @@ -600,7 +604,9 @@ func Test_ValidateSSVMessage(t *testing.T) { require.ErrorContains(t, err, ErrNoDuty.Error()) ds = dutystore.New() - ds.Proposer.Add(epoch, slot, shares.active.ValidatorIndex, ð2apiv1.ProposerDuty{}, true) + ds.Proposer.Set(epoch, []dutystore.StoreDuty[eth2apiv1.ProposerDuty]{ + {Slot: slot, ValidatorIndex: shares.active.ValidatorIndex, Duty: ð2apiv1.ProposerDuty{}, InCommittee: true}, + }) validator = New(netCfg, validatorStore, ds, signatureVerifier).(*messageValidator) _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, netCfg.Beacon.GetSlotStartTime(slot)) require.NoError(t, err) @@ -728,8 +734,12 @@ func Test_ValidateSSVMessage(t *testing.T) { subtestName := fmt.Sprintf("%v/%v", message.RunnerRoleToString(role), message.PartialMsgTypeToString(msgType)) t.Run(subtestName, func(t *testing.T) { ds := dutystore.New() - ds.Proposer.Add(spectestingutils.TestingDutyEpoch, spectestingutils.TestingDutySlot, shares.active.ValidatorIndex, ð2apiv1.ProposerDuty{}, true) - ds.SyncCommittee.Add(0, shares.active.ValidatorIndex, ð2apiv1.SyncCommitteeDuty{}, true) + ds.Proposer.Set(spectestingutils.TestingDutyEpoch, []dutystore.StoreDuty[eth2apiv1.ProposerDuty]{ + {Slot: spectestingutils.TestingDutySlot, ValidatorIndex: shares.active.ValidatorIndex, Duty: ð2apiv1.ProposerDuty{}, InCommittee: true}, + }) + ds.SyncCommittee.Set(0, []dutystore.StoreSyncCommitteeDuty{ + {ValidatorIndex: shares.active.ValidatorIndex, Duty: ð2apiv1.SyncCommitteeDuty{}, InCommittee: true}, + }) ds.VoluntaryExit.AddDuty(spectestingutils.TestingDutySlot, phase0.BLSPubKey(shares.active.ValidatorPubKey)) validator := New(netCfg, validatorStore, ds, signatureVerifier).(*messageValidator) @@ -805,8 +815,12 @@ func Test_ValidateSSVMessage(t *testing.T) { subtestName := fmt.Sprintf("%v/%v", message.RunnerRoleToString(role), message.PartialMsgTypeToString(msgType)) t.Run(subtestName, func(t *testing.T) { ds := dutystore.New() - ds.Proposer.Add(spectestingutils.TestingDutyEpoch, spectestingutils.TestingDutySlot, shares.active.ValidatorIndex, ð2apiv1.ProposerDuty{}, true) - ds.SyncCommittee.Add(0, shares.active.ValidatorIndex, ð2apiv1.SyncCommitteeDuty{}, true) + ds.Proposer.Set(spectestingutils.TestingDutyEpoch, []dutystore.StoreDuty[eth2apiv1.ProposerDuty]{ + {Slot: spectestingutils.TestingDutySlot, ValidatorIndex: shares.active.ValidatorIndex, Duty: ð2apiv1.ProposerDuty{}, InCommittee: true}, + }) + ds.SyncCommittee.Set(0, []dutystore.StoreSyncCommitteeDuty{ + {ValidatorIndex: shares.active.ValidatorIndex, Duty: ð2apiv1.SyncCommitteeDuty{}, InCommittee: true}, + }) validator := New(netCfg, validatorStore, ds, signatureVerifier).(*messageValidator) @@ -1001,8 +1015,12 @@ func Test_ValidateSSVMessage(t *testing.T) { slot := netCfg.Beacon.FirstSlotAtEpoch(epoch) ds := dutystore.New() - ds.Proposer.Add(epoch, slot, shares.active.ValidatorIndex, ð2apiv1.ProposerDuty{}, true) - ds.SyncCommittee.Add(0, shares.active.ValidatorIndex, ð2apiv1.SyncCommitteeDuty{}, true) + ds.Proposer.Set(epoch, []dutystore.StoreDuty[eth2apiv1.ProposerDuty]{ + {Slot: slot, ValidatorIndex: shares.active.ValidatorIndex, Duty: ð2apiv1.ProposerDuty{}, InCommittee: true}, + }) + ds.SyncCommittee.Set(0, []dutystore.StoreSyncCommitteeDuty{ + {ValidatorIndex: shares.active.ValidatorIndex, Duty: ð2apiv1.SyncCommitteeDuty{}, InCommittee: true}, + }) validator := New(netCfg, validatorStore, ds, signatureVerifier).(*messageValidator) @@ -1322,8 +1340,12 @@ func Test_ValidateSSVMessage(t *testing.T) { slot := netCfg.Beacon.FirstSlotAtEpoch(epoch) ds := dutystore.New() - ds.Proposer.Add(epoch, slot, shares.active.ValidatorIndex, ð2apiv1.ProposerDuty{}, true) - ds.SyncCommittee.Add(0, shares.active.ValidatorIndex, ð2apiv1.SyncCommitteeDuty{}, true) + ds.Proposer.Set(epoch, []dutystore.StoreDuty[eth2apiv1.ProposerDuty]{ + {Slot: slot, ValidatorIndex: shares.active.ValidatorIndex, Duty: ð2apiv1.ProposerDuty{}, InCommittee: true}, + }) + ds.SyncCommittee.Set(0, []dutystore.StoreSyncCommitteeDuty{ + {ValidatorIndex: shares.active.ValidatorIndex, Duty: ð2apiv1.SyncCommitteeDuty{}, InCommittee: true}, + }) validator := New(netCfg, validatorStore, ds, signatureVerifier).(*messageValidator) diff --git a/operator/duties/attester.go b/operator/duties/attester.go index cc3971f9ea..396ba70cd2 100644 --- a/operator/duties/attester.go +++ b/operator/duties/attester.go @@ -80,10 +80,6 @@ func (h *AttesterHandler) HandleDuties(ctx context.Context) { h.logger.Debug("🛠 ticker event", zap.String("epoch_slot_pos", buildStr)) h.processExecution(currentEpoch, slot) - if h.indicesChanged { - h.duties.ResetEpoch(currentEpoch) - h.indicesChanged = false - } h.processFetching(ctx, currentEpoch, slot) slotsPerEpoch := h.network.Beacon.SlotsPerEpoch() @@ -129,7 +125,6 @@ func (h *AttesterHandler) HandleDuties(ctx context.Context) { buildStr := fmt.Sprintf("e%v-s%v-#%v", currentEpoch, slot, slot%32+1) h.logger.Info("🔁 indices change received", zap.String("epoch_slot_pos", buildStr)) - h.indicesChanged = true h.fetchCurrentEpoch = true // reset next epoch duties if in appropriate slot range @@ -215,10 +210,17 @@ func (h *AttesterHandler) fetchAndProcessDuties(ctx context.Context, epoch phase } specDuties := make([]*spectypes.ValidatorDuty, 0, len(duties)) + storeDuties := make([]dutystore.StoreDuty[eth2apiv1.AttesterDuty], 0, len(duties)) for _, d := range duties { - h.duties.Add(epoch, d.Slot, d.ValidatorIndex, d, true) + storeDuties = append(storeDuties, dutystore.StoreDuty[eth2apiv1.AttesterDuty]{ + Slot: d.Slot, + ValidatorIndex: d.ValidatorIndex, + Duty: d, + InCommittee: true, + }) specDuties = append(specDuties, h.toSpecDuty(d, spectypes.BNRoleAttester)) } + h.duties.Set(epoch, storeDuties) h.logger.Debug("🗂 got duties", fields.Count(len(duties)), diff --git a/operator/duties/dutystore/duties.go b/operator/duties/dutystore/duties.go index 50fd0d7e22..175d230ca0 100644 --- a/operator/duties/dutystore/duties.go +++ b/operator/duties/dutystore/duties.go @@ -8,22 +8,24 @@ import ( ) type Duty interface { - eth2apiv1.AttesterDuty | eth2apiv1.ProposerDuty | eth2apiv1.SyncCommitteeDuty + eth2apiv1.AttesterDuty | eth2apiv1.ProposerDuty } -type dutyDescriptor[D Duty] struct { - duty *D - inCommittee bool +type StoreDuty[D Duty] struct { + Slot phase0.Slot + ValidatorIndex phase0.ValidatorIndex + Duty *D + InCommittee bool } type Duties[D Duty] struct { mu sync.RWMutex - m map[phase0.Epoch]map[phase0.Slot]map[phase0.ValidatorIndex]dutyDescriptor[D] + m map[phase0.Epoch]map[phase0.Slot]map[phase0.ValidatorIndex]StoreDuty[D] } func NewDuties[D Duty]() *Duties[D] { return &Duties[D]{ - m: make(map[phase0.Epoch]map[phase0.Slot]map[phase0.ValidatorIndex]dutyDescriptor[D]), + m: make(map[phase0.Epoch]map[phase0.Slot]map[phase0.ValidatorIndex]StoreDuty[D]), } } @@ -43,8 +45,8 @@ func (d *Duties[D]) CommitteeSlotDuties(epoch phase0.Epoch, slot phase0.Slot) [] var duties []*D for _, descriptor := range descriptorMap { - if descriptor.inCommittee { - duties = append(duties, descriptor.duty) + if descriptor.InCommittee { + duties = append(duties, descriptor.Duty) } } @@ -70,23 +72,22 @@ func (d *Duties[D]) ValidatorDuty(epoch phase0.Epoch, slot phase0.Slot, validato return nil } - return descriptor.duty + return descriptor.Duty } -func (d *Duties[D]) Add(epoch phase0.Epoch, slot phase0.Slot, validatorIndex phase0.ValidatorIndex, duty *D, inCommittee bool) { +func (d *Duties[D]) Set(epoch phase0.Epoch, duties []StoreDuty[D]) { + mapped := make(map[phase0.Slot]map[phase0.ValidatorIndex]StoreDuty[D]) + for _, duty := range duties { + if _, ok := mapped[duty.Slot]; !ok { + mapped[duty.Slot] = make(map[phase0.ValidatorIndex]StoreDuty[D]) + } + mapped[duty.Slot][duty.ValidatorIndex] = duty + } + d.mu.Lock() defer d.mu.Unlock() - if _, ok := d.m[epoch]; !ok { - d.m[epoch] = make(map[phase0.Slot]map[phase0.ValidatorIndex]dutyDescriptor[D]) - } - if _, ok := d.m[epoch][slot]; !ok { - d.m[epoch][slot] = make(map[phase0.ValidatorIndex]dutyDescriptor[D]) - } - d.m[epoch][slot][validatorIndex] = dutyDescriptor[D]{ - duty: duty, - inCommittee: inCommittee, - } + d.m[epoch] = mapped } func (d *Duties[D]) ResetEpoch(epoch phase0.Epoch) { diff --git a/operator/duties/dutystore/sync_committee.go b/operator/duties/dutystore/sync_committee.go index 0ae13041c7..c6a28c999e 100644 --- a/operator/duties/dutystore/sync_committee.go +++ b/operator/duties/dutystore/sync_committee.go @@ -7,14 +7,20 @@ import ( "github.com/attestantio/go-eth2-client/spec/phase0" ) +type StoreSyncCommitteeDuty struct { + ValidatorIndex phase0.ValidatorIndex + Duty *eth2apiv1.SyncCommitteeDuty + InCommittee bool +} + type SyncCommitteeDuties struct { mu sync.RWMutex - m map[uint64]map[phase0.ValidatorIndex]dutyDescriptor[eth2apiv1.SyncCommitteeDuty] + m map[uint64]map[phase0.ValidatorIndex]StoreSyncCommitteeDuty } func NewSyncCommitteeDuties() *SyncCommitteeDuties { return &SyncCommitteeDuties{ - m: make(map[uint64]map[phase0.ValidatorIndex]dutyDescriptor[eth2apiv1.SyncCommitteeDuty]), + m: make(map[uint64]map[phase0.ValidatorIndex]StoreSyncCommitteeDuty), } } @@ -29,8 +35,8 @@ func (d *SyncCommitteeDuties) CommitteePeriodDuties(period uint64) []*eth2apiv1. var duties []*eth2apiv1.SyncCommitteeDuty for _, descriptor := range descriptorMap { - if descriptor.inCommittee { - duties = append(duties, descriptor.duty) + if descriptor.InCommittee { + duties = append(duties, descriptor.Duty) } } @@ -51,21 +57,19 @@ func (d *SyncCommitteeDuties) Duty(period uint64, validatorIndex phase0.Validato return nil } - return descriptor.duty + return descriptor.Duty } -func (d *SyncCommitteeDuties) Add(period uint64, validatorIndex phase0.ValidatorIndex, duty *eth2apiv1.SyncCommitteeDuty, inCommittee bool) { +func (d *SyncCommitteeDuties) Set(period uint64, duties []StoreSyncCommitteeDuty) { + mapped := make(map[phase0.ValidatorIndex]StoreSyncCommitteeDuty) + for _, duty := range duties { + mapped[duty.ValidatorIndex] = duty + } + d.mu.Lock() defer d.mu.Unlock() - if _, ok := d.m[period]; !ok { - d.m[period] = make(map[phase0.ValidatorIndex]dutyDescriptor[eth2apiv1.SyncCommitteeDuty]) - } - - d.m[period][validatorIndex] = dutyDescriptor[eth2apiv1.SyncCommitteeDuty]{ - duty: duty, - inCommittee: inCommittee, - } + d.m[period] = mapped } func (d *SyncCommitteeDuties) Reset(period uint64) { diff --git a/operator/duties/proposer.go b/operator/duties/proposer.go index 8056737a3c..dfeb2cfea4 100644 --- a/operator/duties/proposer.go +++ b/operator/duties/proposer.go @@ -181,11 +181,18 @@ func (h *ProposerHandler) fetchAndProcessDuties(ctx context.Context, epoch phase h.duties.ResetEpoch(epoch) specDuties := make([]*spectypes.ValidatorDuty, 0, len(duties)) + storeDuties := make([]dutystore.StoreDuty[eth2apiv1.ProposerDuty], 0, len(duties)) for _, d := range duties { _, inCommitteeDuty := selfIndicesSet[d.ValidatorIndex] - h.duties.Add(epoch, d.Slot, d.ValidatorIndex, d, inCommitteeDuty) + storeDuties = append(storeDuties, dutystore.StoreDuty[eth2apiv1.ProposerDuty]{ + Slot: d.Slot, + ValidatorIndex: d.ValidatorIndex, + Duty: d, + InCommittee: inCommitteeDuty, + }) specDuties = append(specDuties, h.toSpecDuty(d, spectypes.BNRoleProposer)) } + h.duties.Set(epoch, storeDuties) h.logger.Debug("📚 got duties", fields.Count(len(duties)), diff --git a/operator/duties/sync_committee.go b/operator/duties/sync_committee.go index a596106fb6..13f2776e16 100644 --- a/operator/duties/sync_committee.go +++ b/operator/duties/sync_committee.go @@ -218,11 +218,16 @@ func (h *SyncCommitteeHandler) fetchAndProcessDuties(ctx context.Context, period return fmt.Errorf("failed to fetch sync committee duties: %w", err) } - h.duties.Reset(period) + storeDuties := make([]dutystore.StoreSyncCommitteeDuty, 0, len(duties)) for _, d := range duties { _, inCommitteeDuty := inCommitteeIndicesSet[d.ValidatorIndex] - h.duties.Add(period, d.ValidatorIndex, d, inCommitteeDuty) + storeDuties = append(storeDuties, dutystore.StoreSyncCommitteeDuty{ + ValidatorIndex: d.ValidatorIndex, + Duty: d, + InCommittee: inCommitteeDuty, + }) } + h.duties.Set(period, storeDuties) h.prepareDutiesResultLog(period, duties, start) From d81128e00dcc19c2eb10f2909610b071675736ba Mon Sep 17 00:00:00 2001 From: Anton Korpusenko Date: Tue, 17 Sep 2024 17:08:29 +0300 Subject: [PATCH 14/35] fix: minor refactor ECDSAPrivFromInterface to remove ScalarBaseMult (#1734) * refactor ECDSAPrivFromInterface * removed usage ecdh * btcec impl usage for ECDSAPrivFromInterface --- network/commons/keys.go | 14 ++++---------- network/commons/keys_test.go | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 10 deletions(-) create mode 100644 network/commons/keys_test.go diff --git a/network/commons/keys.go b/network/commons/keys.go index 61536c90d6..59c9e23ade 100644 --- a/network/commons/keys.go +++ b/network/commons/keys.go @@ -5,11 +5,8 @@ import ( crand "crypto/rand" "crypto/rsa" "crypto/x509" - "math/big" "github.com/btcsuite/btcd/btcec/v2" - gcrypto "github.com/ethereum/go-ethereum/crypto" - "github.com/libp2p/go-libp2p/core/crypto" "github.com/pkg/errors" ) @@ -19,15 +16,12 @@ func ECDSAPrivFromInterface(privkey crypto.PrivKey) (*ecdsa.PrivateKey, error) { secpKey := privkey.(*crypto.Secp256k1PrivateKey) rawKey, err := secpKey.Raw() if err != nil { - return nil, errors.Wrap(err, "could mot convert ecdsa.PrivateKey") + return nil, errors.Wrap(err, "could not convert ecdsa.PrivateKey") } - privKey := new(ecdsa.PrivateKey) - k := new(big.Int).SetBytes(rawKey) - privKey.D = k - privKey.Curve = gcrypto.S256() // Temporary hack, so libp2p Secp256k1 is recognized as geth Secp256k1 in disc v5.1. - privKey.X, privKey.Y = gcrypto.S256().ScalarBaseMult(rawKey) - return privKey, nil + privKey, _ := btcec.PrivKeyFromBytes(rawKey) + + return privKey.ToECDSA(), nil } // ECDSAPrivToInterface converts ecdsa.PrivateKey to crypto.PrivKey diff --git a/network/commons/keys_test.go b/network/commons/keys_test.go new file mode 100644 index 0000000000..56ad6017a0 --- /dev/null +++ b/network/commons/keys_test.go @@ -0,0 +1,32 @@ +package commons + +import ( + "crypto/ecdsa" + "encoding/hex" + "testing" + + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/stretchr/testify/require" +) + +func TestECDSAPrivFromInterface(t *testing.T) { + hexKey := "0f042adb4a9b3401e0cebad1ff1865fcb3e849b9f2a4880d1b1c9844ba50c816" + + rawKey, err := hex.DecodeString(hexKey) + require.NoError(t, err) + + privKey, err := crypto.UnmarshalSecp256k1PrivateKey(rawKey) + require.NoError(t, err) + + ecdsaPrivKey, err := ECDSAPrivFromInterface(privKey) + require.NoError(t, err) + require.NotNil(t, ecdsaPrivKey) + + require.IsType(t, &ecdsa.PrivateKey{}, ecdsaPrivKey) + require.NotNil(t, ecdsaPrivKey.D) + require.NotNil(t, ecdsaPrivKey.X) + require.NotNil(t, ecdsaPrivKey.Y) + require.NotNil(t, ecdsaPrivKey.Curve) + + require.Equal(t, ecdsaPrivKey.D.String(), "6792055902439951130224479433662882604105028919500185693322687975860017874966") +} From 806b7ec73fa46d49e3920f26afc2a39f8d091a6e Mon Sep 17 00:00:00 2001 From: Anton Korpusenko Date: Wed, 18 Sep 2024 12:26:11 +0300 Subject: [PATCH 15/35] Added validator roles to p2p test SubscribeBroadcast (#1717) * added validator roles to p2p test SubscribeBroadcast * removed commented line --- network/p2p/p2p_test.go | 70 ++++++++++++++++++++++++++++++++++------- 1 file changed, 58 insertions(+), 12 deletions(-) diff --git a/network/p2p/p2p_test.go b/network/p2p/p2p_test.go index df72ede3ac..0ab9bc1154 100644 --- a/network/p2p/p2p_test.go +++ b/network/p2p/p2p_test.go @@ -78,13 +78,23 @@ func TestP2pNetwork_SubscribeBroadcast(t *testing.T) { go func() { defer wg.Done() - msg1 := generateMsg(spectestingutils.Testing4SharesSet(), 1) - msg3 := generateMsg(spectestingutils.Testing4SharesSet(), 3) - require.NoError(t, node1.Broadcast(msg1.SSVMessage.GetID(), msg1)) + msgCommittee1 := generateCommitteeMsg(spectestingutils.Testing4SharesSet(), 1) + msgCommittee3 := generateCommitteeMsg(spectestingutils.Testing4SharesSet(), 3) + msgProposer := generateValidatorMsg(spectestingutils.Testing4SharesSet(), 4, spectypes.RoleProposer) + msgSyncCommitteeContribution := generateValidatorMsg(spectestingutils.Testing4SharesSet(), 5, spectypes.RoleSyncCommitteeContribution) + msgRoleVoluntaryExit := generateValidatorMsg(spectestingutils.Testing4SharesSet(), 6, spectypes.RoleVoluntaryExit) + + require.NoError(t, node1.Broadcast(msgCommittee1.SSVMessage.GetID(), msgCommittee1)) + <-time.After(time.Millisecond * 20) + require.NoError(t, node2.Broadcast(msgCommittee3.SSVMessage.GetID(), msgCommittee3)) + <-time.After(time.Millisecond * 20) + require.NoError(t, node2.Broadcast(msgCommittee1.SSVMessage.GetID(), msgCommittee1)) + <-time.After(time.Millisecond * 20) + require.NoError(t, node2.Broadcast(msgProposer.SSVMessage.GetID(), msgProposer)) <-time.After(time.Millisecond * 20) - require.NoError(t, node2.Broadcast(msg3.SSVMessage.GetID(), msg3)) + require.NoError(t, node2.Broadcast(msgSyncCommitteeContribution.SSVMessage.GetID(), msgSyncCommitteeContribution)) <-time.After(time.Millisecond * 20) - require.NoError(t, node2.Broadcast(msg1.SSVMessage.GetID(), msg1)) + require.NoError(t, node1.Broadcast(msgRoleVoluntaryExit.SSVMessage.GetID(), msgRoleVoluntaryExit)) }() wg.Add(1) @@ -92,17 +102,24 @@ func TestP2pNetwork_SubscribeBroadcast(t *testing.T) { go func() { defer wg.Done() - msg1 := generateMsg(spectestingutils.Testing4SharesSet(), 1) - msg2 := generateMsg(spectestingutils.Testing4SharesSet(), 2) - msg3 := generateMsg(spectestingutils.Testing4SharesSet(), 3) + msgCommittee1 := generateCommitteeMsg(spectestingutils.Testing4SharesSet(), 1) + msgCommittee2 := generateCommitteeMsg(spectestingutils.Testing4SharesSet(), 2) + msgCommittee3 := generateCommitteeMsg(spectestingutils.Testing4SharesSet(), 3) + msgProposer := generateValidatorMsg(spectestingutils.Testing4SharesSet(), 4, spectypes.RoleProposer) + msgSyncCommitteeContribution := generateValidatorMsg(spectestingutils.Testing4SharesSet(), 5, spectypes.RoleSyncCommitteeContribution) + msgRoleVoluntaryExit := generateValidatorMsg(spectestingutils.Testing4SharesSet(), 6, spectypes.RoleVoluntaryExit) + require.NoError(t, err) time.Sleep(time.Millisecond * 20) - require.NoError(t, node1.Broadcast(msg2.SSVMessage.GetID(), msg2)) + require.NoError(t, node1.Broadcast(msgCommittee2.SSVMessage.GetID(), msgCommittee2)) time.Sleep(time.Millisecond * 20) - require.NoError(t, node2.Broadcast(msg1.SSVMessage.GetID(), msg1)) - require.NoError(t, node1.Broadcast(msg3.SSVMessage.GetID(), msg3)) + require.NoError(t, node2.Broadcast(msgCommittee1.SSVMessage.GetID(), msgCommittee1)) + require.NoError(t, node1.Broadcast(msgCommittee3.SSVMessage.GetID(), msgCommittee3)) + require.NoError(t, node1.Broadcast(msgProposer.SSVMessage.GetID(), msgProposer)) + require.NoError(t, node1.Broadcast(msgSyncCommitteeContribution.SSVMessage.GetID(), msgSyncCommitteeContribution)) + require.NoError(t, node2.Broadcast(msgRoleVoluntaryExit.SSVMessage.GetID(), msgRoleVoluntaryExit)) }() wg.Wait() @@ -126,7 +143,36 @@ func TestP2pNetwork_SubscribeBroadcast(t *testing.T) { } } -func generateMsg(ks *spectestingutils.TestKeySet, round specqbft.Round) *spectypes.SignedSSVMessage { +func generateValidatorMsg(ks *spectestingutils.TestKeySet, round specqbft.Round, nonCommitteeRole spectypes.RunnerRole) *spectypes.SignedSSVMessage { + if nonCommitteeRole == spectypes.RoleCommittee { + panic("committee role shouldn't be used here") + } + netCfg := networkconfig.TestNetwork + height := specqbft.Height(netCfg.Beacon.EstimatedCurrentSlot()) + + fullData := spectestingutils.TestingQBFTFullData + + nonCommitteeIdentifier := spectypes.NewMsgID(netCfg.DomainType(), ks.ValidatorPK.Serialize(), nonCommitteeRole) + + qbftMessage := &specqbft.Message{ + MsgType: specqbft.ProposalMsgType, + Height: height, + Round: round, + Identifier: nonCommitteeIdentifier[:], + Root: sha256.Sum256(fullData), + + RoundChangeJustification: [][]byte{}, + PrepareJustification: [][]byte{}, + } + + leader := roundLeader(ks, height, round) + signedSSVMessage := spectestingutils.SignQBFTMsg(ks.OperatorKeys[leader], leader, qbftMessage) + signedSSVMessage.FullData = fullData + + return signedSSVMessage +} + +func generateCommitteeMsg(ks *spectestingutils.TestKeySet, round specqbft.Round) *spectypes.SignedSSVMessage { netCfg := networkconfig.TestNetwork height := specqbft.Height(netCfg.Beacon.EstimatedCurrentSlot()) From f3af51f9d7fcc4932347c4ff46f753e733f9b413 Mon Sep 17 00:00:00 2001 From: moshe-blox <89339422+moshe-blox@users.noreply.github.com> Date: Wed, 18 Sep 2024 14:04:31 +0300 Subject: [PATCH 16/35] feat: duty stopping (#1743) * draft * draft * refactor * RLock * renames * pass spectest * added test * improve test * improve test * refactor * post-fork * fix * comments * finish runner when no valid duties * finish runner ProcessPostConsensus and stop queue on ErrNoValidDuties * revert deployment * comment * more detailed log * approve spec diffs * upgrade log to WARN --- operator/validator/controller.go | 3 +- protocol/v2/ssv/runner/committee.go | 92 ++++++++----- .../v2/ssv/spectest/msg_processing_type.go | 29 ++++- protocol/v2/ssv/spectest/ssv_mapping_test.go | 2 +- protocol/v2/ssv/testing/runner.go | 5 + protocol/v2/ssv/validator/committee.go | 50 +++---- protocol/v2/ssv/validator/committee_guard.go | 70 ++++++++++ .../v2/ssv/validator/committee_guard_test.go | 123 ++++++++++++++++++ protocol/v2/ssv/validator/committee_queue.go | 11 +- scripts/spec-alignment/differ.config.yaml | 2 +- 10 files changed, 314 insertions(+), 73 deletions(-) create mode 100644 protocol/v2/ssv/validator/committee_guard.go create mode 100644 protocol/v2/ssv/validator/committee_guard_test.go diff --git a/operator/validator/controller.go b/operator/validator/controller.go index 9ba9a0fc73..b5168d82d2 100644 --- a/operator/validator/controller.go +++ b/operator/validator/controller.go @@ -1251,7 +1251,7 @@ func SetupCommitteeRunners( return qbftCtrl } - return func(slot phase0.Slot, shares map[phase0.ValidatorIndex]*spectypes.Share, attestingValidators []spectypes.ShareValidatorPK) (*runner.CommitteeRunner, error) { + return func(slot phase0.Slot, shares map[phase0.ValidatorIndex]*spectypes.Share, attestingValidators []spectypes.ShareValidatorPK, dutyGuard runner.CommitteeDutyGuard) (*runner.CommitteeRunner, error) { // Create a committee runner. epoch := options.NetworkConfig.Beacon.GetBeaconNetwork().EstimatedEpochAtSlot(slot) valCheck := ssv.BeaconVoteValueCheckF(options.Signer, slot, attestingValidators, epoch) @@ -1264,6 +1264,7 @@ func SetupCommitteeRunners( options.Signer, options.OperatorSigner, valCheck, + dutyGuard, ) if err != nil { return nil, err diff --git a/protocol/v2/ssv/runner/committee.go b/protocol/v2/ssv/runner/committee.go index f9a26e8b59..d9b2aa65ef 100644 --- a/protocol/v2/ssv/runner/committee.go +++ b/protocol/v2/ssv/runner/committee.go @@ -4,6 +4,7 @@ import ( "crypto/sha256" "encoding/hex" "encoding/json" + "fmt" "time" "github.com/attestantio/go-eth2-client/spec/altair" @@ -23,14 +24,14 @@ import ( ssvtypes "github.com/ssvlabs/ssv/protocol/v2/types" ) -//type Broadcaster interface { -// Broadcast(msg *spectypes.SignedSSVMessage) error -//} -// -//type BeaconNode interface { -// DomainData(epoch phase0.Epoch, domain phase0.DomainType) (phase0.Domain, error) -// SubmitAttestation(attestation *phase0.Attestation) error -//} +var ( + ErrNoValidDuties = errors.New("no valid duties") +) + +type CommitteeDutyGuard interface { + StartDuty(role spectypes.BeaconRole, validator spectypes.ValidatorPK, slot phase0.Slot) error + ValidDuty(role spectypes.BeaconRole, validator spectypes.ValidatorPK, slot phase0.Slot) error +} type CommitteeRunner struct { BaseRunner *BaseRunner @@ -39,9 +40,9 @@ type CommitteeRunner struct { signer spectypes.BeaconSigner operatorSigner ssvtypes.OperatorSigner valCheck specqbft.ProposedValueCheckF + DutyGuard CommitteeDutyGuard - stoppedValidators map[spectypes.ValidatorPK]struct{} - submittedDuties map[spectypes.BeaconRole]map[phase0.ValidatorIndex]struct{} + submittedDuties map[spectypes.BeaconRole]map[phase0.ValidatorIndex]struct{} started time.Time metrics metrics.ConsensusMetrics @@ -56,6 +57,7 @@ func NewCommitteeRunner( signer spectypes.BeaconSigner, operatorSigner ssvtypes.OperatorSigner, valCheck specqbft.ProposedValueCheckF, + dutyGuard CommitteeDutyGuard, ) (Runner, error) { if len(share) == 0 { return nil, errors.New("no shares") @@ -68,18 +70,29 @@ func NewCommitteeRunner( Share: share, QBFTController: qbftController, }, - beacon: beacon, - network: network, - signer: signer, - operatorSigner: operatorSigner, - valCheck: valCheck, - stoppedValidators: make(map[spectypes.ValidatorPK]struct{}), - submittedDuties: make(map[spectypes.BeaconRole]map[phase0.ValidatorIndex]struct{}), - metrics: metrics.NewConsensusMetrics(spectypes.RoleCommittee), + beacon: beacon, + network: network, + signer: signer, + operatorSigner: operatorSigner, + valCheck: valCheck, + submittedDuties: make(map[spectypes.BeaconRole]map[phase0.ValidatorIndex]struct{}), + metrics: metrics.NewConsensusMetrics(spectypes.RoleCommittee), + DutyGuard: dutyGuard, }, nil } func (cr *CommitteeRunner) StartNewDuty(logger *zap.Logger, duty spectypes.Duty, quorum uint64) error { + d, ok := duty.(*spectypes.CommitteeDuty) + if !ok { + return errors.New("duty is not a CommitteeDuty") + } + for _, validatorDuty := range d.ValidatorDuties { + err := cr.DutyGuard.StartDuty(validatorDuty.Type, spectypes.ValidatorPK(validatorDuty.PubKey), d.DutySlot()) + if err != nil { + return fmt.Errorf("could not start %s duty at slot %d for validator %x: %w", + validatorDuty.Type, d.DutySlot(), validatorDuty.PubKey, err) + } + } err := cr.BaseRunner.baseStartNewDuty(logger, cr, duty, quorum) if err != nil { return err @@ -93,11 +106,6 @@ func (cr *CommitteeRunner) Encode() ([]byte, error) { return json.Marshal(cr) } -// StopDuty stops the duty for the given validator -func (cr *CommitteeRunner) StopDuty(validator spectypes.ValidatorPK) { - cr.stoppedValidators[validator] = struct{}{} -} - func (cr *CommitteeRunner) Decode(data []byte) error { return json.Unmarshal(data, &cr) } @@ -144,8 +152,6 @@ func (cr *CommitteeRunner) UnmarshalJSON(data []byte) error { signer spectypes.BeaconSigner operatorSigner ssvtypes.OperatorSigner valCheck specqbft.ProposedValueCheckF - // - //stoppedValidators map[spectypes.ValidatorPK]struct{} } // Unmarshal the JSON data into the auxiliary struct @@ -161,7 +167,6 @@ func (cr *CommitteeRunner) UnmarshalJSON(data []byte) error { cr.signer = aux.signer cr.operatorSigner = aux.operatorSigner cr.valCheck = aux.valCheck - //cr.stoppedValidators = aux.stoppedValidators return nil } @@ -216,9 +221,15 @@ func (cr *CommitteeRunner) ProcessConsensus(logger *zap.Logger, msg *spectypes.S } beaconVote := decidedValue.(*spectypes.BeaconVote) + validDuties := 0 for _, duty := range duty.(*spectypes.CommitteeDuty).ValidatorDuties { + if err := cr.DutyGuard.ValidDuty(duty.Type, spectypes.ValidatorPK(duty.PubKey), duty.DutySlot()); err != nil { + logger.Warn("duty is no longer valid", fields.Validator(duty.PubKey[:]), fields.BeaconRole(duty.Type), zap.Error(err)) + continue + } switch duty.Type { case spectypes.BNRoleAttester: + validDuties++ attestationData := constructAttestationData(beaconVote, duty) partialMsg, err := cr.BaseRunner.signBeaconObject(cr, duty, attestationData, duty.DutySlot(), spectypes.DomainAttester) @@ -240,6 +251,7 @@ func (cr *CommitteeRunner) ProcessConsensus(logger *zap.Logger, msg *spectypes.S zap.String("signature", hex.EncodeToString(partialMsg.PartialSignature[:])), ) case spectypes.BNRoleSyncCommittee: + validDuties++ blockRoot := beaconVote.BlockRoot partialMsg, err := cr.BaseRunner.signBeaconObject(cr, duty, spectypes.SSZBytes(blockRoot[:]), duty.DutySlot(), spectypes.DomainSyncCommittee) @@ -247,8 +259,14 @@ func (cr *CommitteeRunner) ProcessConsensus(logger *zap.Logger, msg *spectypes.S return errors.Wrap(err, "failed signing sync committee message") } postConsensusMsg.Messages = append(postConsensusMsg.Messages, partialMsg) + default: + return fmt.Errorf("invalid duty type: %s", duty.Type) } } + if validDuties == 0 { + cr.BaseRunner.State.Finished = true + return ErrNoValidDuties + } ssvMsg := &spectypes.SSVMessage{ MsgType: spectypes.SSVPartialSignatureMsgType, @@ -310,10 +328,14 @@ func (cr *CommitteeRunner) ProcessPostConsensus(logger *zap.Logger, signedMsg *s } // Get validator-root maps for attestations and sync committees, and the root-beacon object map - attestationMap, committeeMap, beaconObjects, err := cr.expectedPostConsensusRootsAndBeaconObjects() + attestationMap, committeeMap, beaconObjects, err := cr.expectedPostConsensusRootsAndBeaconObjects(logger) if err != nil { return errors.Wrap(err, "could not get expected post consensus roots and beacon objects") } + if len(beaconObjects) == 0 { + cr.BaseRunner.State.Finished = true + return ErrNoValidDuties + } var anyErr error attestationsToSubmit := make(map[phase0.ValidatorIndex]*phase0.Attestation) @@ -344,7 +366,6 @@ func (cr *CommitteeRunner) ProcessPostConsensus(logger *zap.Logger, signedMsg *s ) for _, validator := range validators { - // Skip if no quorum - We know that a root has quorum but not necessarily for the validator if !cr.BaseRunner.State.PostConsensusContainer.HasQuorum(validator, root) { continue @@ -354,7 +375,6 @@ func (cr *CommitteeRunner) ProcessPostConsensus(logger *zap.Logger, signedMsg *s continue } - //validator := validator // Reconstruct signature share := cr.BaseRunner.Share[validator] pubKey := share.ValidatorPubKey @@ -553,7 +573,7 @@ func (cr CommitteeRunner) expectedPostConsensusRootsAndDomain() ([]ssz.HashRoot, return nil, spectypes.DomainError, errors.New("expected post consensus roots function is unused") } -func (cr *CommitteeRunner) expectedPostConsensusRootsAndBeaconObjects() ( +func (cr *CommitteeRunner) expectedPostConsensusRootsAndBeaconObjects(logger *zap.Logger) ( attestationMap map[phase0.ValidatorIndex][32]byte, syncCommitteeMap map[phase0.ValidatorIndex][32]byte, beaconObjects map[phase0.ValidatorIndex]map[[32]byte]ssz.HashRoot, error error, @@ -573,15 +593,15 @@ func (cr *CommitteeRunner) expectedPostConsensusRootsAndBeaconObjects() ( if validatorDuty == nil { continue } - _, stopped := cr.stoppedValidators[spectypes.ValidatorPK(validatorDuty.PubKey)] - if stopped { + if err := cr.DutyGuard.ValidDuty(validatorDuty.Type, spectypes.ValidatorPK(validatorDuty.PubKey), validatorDuty.DutySlot()); err != nil { + logger.Warn("duty is no longer valid", fields.Validator(validatorDuty.PubKey[:]), fields.BeaconRole(validatorDuty.Type), zap.Error(err)) continue } + logger := logger.With(fields.Validator(validatorDuty.PubKey[:])) slot := validatorDuty.DutySlot() epoch := cr.GetBaseRunner().BeaconNetwork.EstimatedEpochAtSlot(slot) switch validatorDuty.Type { case spectypes.BNRoleAttester: - // Attestation object attestationData := constructAttestationData(beaconVote, validatorDuty) aggregationBitfield := bitfield.NewBitlist(validatorDuty.CommitteeLength) @@ -594,10 +614,12 @@ func (cr *CommitteeRunner) expectedPostConsensusRootsAndBeaconObjects() ( // Root domain, err := cr.GetBeaconNode().DomainData(epoch, spectypes.DomainAttester) if err != nil { + logger.Debug("failed to get attester domain", zap.Error(err)) continue } root, err := spectypes.ComputeETHSigningRoot(attestationData, domain) if err != nil { + logger.Debug("failed to compute attester root", zap.Error(err)) continue } @@ -618,12 +640,14 @@ func (cr *CommitteeRunner) expectedPostConsensusRootsAndBeaconObjects() ( // Root domain, err := cr.GetBeaconNode().DomainData(epoch, spectypes.DomainSyncCommittee) if err != nil { + logger.Debug("failed to get sync committee domain", zap.Error(err)) continue } // Eth root blockRoot := spectypes.SSZBytes(beaconVote.BlockRoot[:]) root, err := spectypes.ComputeETHSigningRoot(blockRoot, domain) if err != nil { + logger.Debug("failed to compute sync committee root", zap.Error(err)) continue } @@ -633,6 +657,8 @@ func (cr *CommitteeRunner) expectedPostConsensusRootsAndBeaconObjects() ( beaconObjects[validatorDuty.ValidatorIndex] = make(map[[32]byte]ssz.HashRoot) } beaconObjects[validatorDuty.ValidatorIndex][root] = syncMsg + default: + return nil, nil, nil, fmt.Errorf("invalid duty type: %s", validatorDuty.Type) } } return attestationMap, syncCommitteeMap, beaconObjects, nil diff --git a/protocol/v2/ssv/spectest/msg_processing_type.go b/protocol/v2/ssv/spectest/msg_processing_type.go index 08ffe4848c..a26a732f31 100644 --- a/protocol/v2/ssv/spectest/msg_processing_type.go +++ b/protocol/v2/ssv/spectest/msg_processing_type.go @@ -74,10 +74,31 @@ func (test *MsgProcessingSpecTest) runPreTesting(ctx context.Context, logger *za var lastErr error switch test.Runner.(type) { case *runner.CommitteeRunner: - c = baseCommitteeWithRunnerSample(ctx, logger, ketSetMap, test.Runner.(*runner.CommitteeRunner)) + guard := validator.NewCommitteeDutyGuard() + c = baseCommitteeWithRunnerSample(ctx, logger, ketSetMap, test.Runner.(*runner.CommitteeRunner), guard) if test.DontStartDuty { - c.Runners[test.Duty.DutySlot()] = test.Runner.(*runner.CommitteeRunner) + r := test.Runner.(*runner.CommitteeRunner) + r.DutyGuard = guard + c.Runners[test.Duty.DutySlot()] = r + + // Inform the duty guard of the running duty, if any, so that it won't reject it. + if r.BaseRunner.State != nil && r.BaseRunner.State.StartingDuty != nil { + duty, ok := r.BaseRunner.State.StartingDuty.(*spectypes.CommitteeDuty) + if !ok { + panic("starting duty not found") + } + for _, validatorDuty := range duty.ValidatorDuties { + err := guard.StartDuty(validatorDuty.Type, spectypes.ValidatorPK(validatorDuty.PubKey), validatorDuty.Slot) + if err != nil { + panic(err) + } + err = guard.ValidDuty(validatorDuty.Type, spectypes.ValidatorPK(validatorDuty.PubKey), validatorDuty.Slot) + if err != nil { + panic(err) + } + } + } } else { lastErr = c.StartDuty(logger, test.Duty.(*spectypes.CommitteeDuty)) } @@ -227,6 +248,7 @@ var baseCommitteeWithRunnerSample = func( logger *zap.Logger, keySetMap map[phase0.ValidatorIndex]*spectestingutils.TestKeySet, runnerSample *runner.CommitteeRunner, + committeeDutyGuard runner.CommitteeDutyGuard, ) *validator.Committee { var keySetSample *spectestingutils.TestKeySet @@ -240,7 +262,7 @@ var baseCommitteeWithRunnerSample = func( shareMap[valIdx] = spectestingutils.TestingShare(ks, valIdx) } - createRunnerF := func(_ phase0.Slot, shareMap map[phase0.ValidatorIndex]*spectypes.Share, _ []spectypes.ShareValidatorPK) (*runner.CommitteeRunner, error) { + createRunnerF := func(_ phase0.Slot, shareMap map[phase0.ValidatorIndex]*spectypes.Share, _ []spectypes.ShareValidatorPK, _ runner.CommitteeDutyGuard) (*runner.CommitteeRunner, error) { r, err := runner.NewCommitteeRunner( networkconfig.TestNetwork, shareMap, @@ -256,6 +278,7 @@ var baseCommitteeWithRunnerSample = func( runnerSample.GetSigner(), runnerSample.GetOperatorSigner(), runnerSample.GetValCheckF(), + committeeDutyGuard, ) return r.(*runner.CommitteeRunner), err } diff --git a/protocol/v2/ssv/spectest/ssv_mapping_test.go b/protocol/v2/ssv/spectest/ssv_mapping_test.go index d63aa369f7..5e530ebef2 100644 --- a/protocol/v2/ssv/spectest/ssv_mapping_test.go +++ b/protocol/v2/ssv/spectest/ssv_mapping_test.go @@ -559,7 +559,7 @@ func fixCommitteeForRun(t *testing.T, ctx context.Context, logger *zap.Logger, c logger, tests2.NewTestingBeaconNodeWrapped().GetBeaconNetwork(), &specCommittee.CommitteeMember, - func(slot phase0.Slot, shareMap map[phase0.ValidatorIndex]*spectypes.Share, _ []spectypes.ShareValidatorPK) (*runner.CommitteeRunner, error) { + func(slot phase0.Slot, shareMap map[phase0.ValidatorIndex]*spectypes.Share, _ []spectypes.ShareValidatorPK, _ runner.CommitteeDutyGuard) (*runner.CommitteeRunner, error) { r := ssvtesting.CommitteeRunnerWithShareMap(logger, shareMap) return r.(*runner.CommitteeRunner), nil }, diff --git a/protocol/v2/ssv/testing/runner.go b/protocol/v2/ssv/testing/runner.go index 38dc5651a0..35af231928 100644 --- a/protocol/v2/ssv/testing/runner.go +++ b/protocol/v2/ssv/testing/runner.go @@ -18,6 +18,7 @@ import ( "github.com/ssvlabs/ssv/networkconfig" "github.com/ssvlabs/ssv/protocol/v2/qbft/testing" "github.com/ssvlabs/ssv/protocol/v2/ssv/runner" + "github.com/ssvlabs/ssv/protocol/v2/ssv/validator" ) var TestingHighestDecidedSlot = phase0.Slot(0) @@ -131,6 +132,7 @@ var ConstructBaseRunner = func( km, opSigner, valCheck, + validator.NewCommitteeDutyGuard(), ) case spectypes.RoleAggregator: r, err = runner.NewAggregatorRunner( @@ -202,6 +204,7 @@ var ConstructBaseRunner = func( km, opSigner, valCheck, + validator.NewCommitteeDutyGuard(), ) r.(*runner.CommitteeRunner).BaseRunner.RunnerRoleType = spectestingutils.UnknownDutyType default: @@ -379,6 +382,7 @@ var ConstructBaseRunnerWithShareMap = func( km, opSigner, valCheck, + validator.NewCommitteeDutyGuard(), ) case spectypes.RoleAggregator: r, err = runner.NewAggregatorRunner( @@ -450,6 +454,7 @@ var ConstructBaseRunnerWithShareMap = func( km, opSigner, valCheck, + validator.NewCommitteeDutyGuard(), ) if r != nil { r.(*runner.CommitteeRunner).BaseRunner.RunnerRoleType = spectestingutils.UnknownDutyType diff --git a/protocol/v2/ssv/validator/committee.go b/protocol/v2/ssv/validator/committee.go index a1512b5717..c953ed8370 100644 --- a/protocol/v2/ssv/validator/committee.go +++ b/protocol/v2/ssv/validator/committee.go @@ -27,7 +27,7 @@ var ( runnerExpirySlots = phase0.Slot(34) ) -type CommitteeRunnerFunc func(slot phase0.Slot, shares map[phase0.ValidatorIndex]*spectypes.Share, attestingValidators []spectypes.ShareValidatorPK) (*runner.CommitteeRunner, error) +type CommitteeRunnerFunc func(slot phase0.Slot, shares map[phase0.ValidatorIndex]*spectypes.Share, attestingValidators []spectypes.ShareValidatorPK, dutyGuard runner.CommitteeDutyGuard) (*runner.CommitteeRunner, error) type Committee struct { logger *zap.Logger @@ -44,8 +44,8 @@ type Committee struct { CommitteeMember *spectypes.CommitteeMember - CreateRunnerFn CommitteeRunnerFunc - HighestAttestingSlotMap map[spectypes.ValidatorPK]phase0.Slot + dutyGuard *CommitteeDutyGuard + CreateRunnerFn CommitteeRunnerFunc } // NewCommittee creates a new cluster @@ -62,16 +62,16 @@ func NewCommittee( shares = make(map[phase0.ValidatorIndex]*spectypes.Share) } return &Committee{ - logger: logger, - BeaconNetwork: beaconNetwork, - ctx: ctx, - cancel: cancel, - Queues: make(map[phase0.Slot]queueContainer), - Runners: make(map[phase0.Slot]*runner.CommitteeRunner), - Shares: shares, - HighestAttestingSlotMap: make(map[spectypes.ValidatorPK]phase0.Slot), - CommitteeMember: committeeMember, - CreateRunnerFn: createRunnerFn, + logger: logger, + BeaconNetwork: beaconNetwork, + ctx: ctx, + cancel: cancel, + Queues: make(map[phase0.Slot]queueContainer), + Runners: make(map[phase0.Slot]*runner.CommitteeRunner), + Shares: shares, + CommitteeMember: committeeMember, + CreateRunnerFn: createRunnerFn, + dutyGuard: NewCommitteeDutyGuard(), } } @@ -85,7 +85,7 @@ func (c *Committee) RemoveShare(validatorIndex phase0.ValidatorIndex) { c.mtx.Lock() defer c.mtx.Unlock() if share, exist := c.Shares[validatorIndex]; exist { - c.stopValidator(c.logger, share.ValidatorPubKey) + c.dutyGuard.StopValidator(share.ValidatorPubKey) delete(c.Shares, validatorIndex) } } @@ -156,7 +156,7 @@ func (c *Committee) StartDuty(logger *zap.Logger, duty *spectypes.CommitteeDuty) } duty = filteredDuty - runner, err := c.CreateRunnerFn(duty.Slot, shares, attesters) + runner, err := c.CreateRunnerFn(duty.Slot, shares, attesters, c.dutyGuard) if err != nil { return errors.Wrap(err, "could not create CommitteeRunner") } @@ -184,23 +184,11 @@ func (c *Committee) StartDuty(logger *zap.Logger, duty *spectypes.CommitteeDuty) } logger.Info("ℹ️ starting duty processing") - return runner.StartNewDuty(logger, duty, c.CommitteeMember.GetQuorum()) -} - -func (c *Committee) stopValidator(logger *zap.Logger, validator spectypes.ValidatorPK) { - for slot, runner := range c.Runners { - opIds := types.OperatorIDsFromOperators(c.CommitteeMember.Committee) - epoch := c.BeaconNetwork.EstimatedEpochAtSlot(slot) - committeeDutyID := fields.FormatCommitteeDutyID(opIds, epoch, slot) - - logger.Debug("trying to stop duty for validator", - fields.DutyID(committeeDutyID), - fields.Slot(slot), fields.Validator(validator[:]), - ) - // TODO: after StopDuty is implemented, if it's not a super fast operation, - // then we maybe shouldn't do it under a lock. - runner.StopDuty(validator) + err = runner.StartNewDuty(logger, duty, c.CommitteeMember.GetQuorum()) + if err != nil { + return errors.Wrap(err, "runner failed to start duty") } + return nil } func (c *Committee) PushToQueue(slot phase0.Slot, dec *queue.SSVMessage) { diff --git a/protocol/v2/ssv/validator/committee_guard.go b/protocol/v2/ssv/validator/committee_guard.go new file mode 100644 index 0000000000..8bd52c08a5 --- /dev/null +++ b/protocol/v2/ssv/validator/committee_guard.go @@ -0,0 +1,70 @@ +package validator + +import ( + "fmt" + "sync" + + "github.com/attestantio/go-eth2-client/spec/phase0" + spectypes "github.com/ssvlabs/ssv-spec/types" +) + +// CommitteeDutyGuard helps guarantee exclusive execution of one duty per validator +// and non-execution of stopped validators. +type CommitteeDutyGuard struct { + duties map[spectypes.BeaconRole]map[spectypes.ValidatorPK]phase0.Slot + mu sync.RWMutex +} + +func NewCommitteeDutyGuard() *CommitteeDutyGuard { + return &CommitteeDutyGuard{ + duties: map[spectypes.BeaconRole]map[spectypes.ValidatorPK]phase0.Slot{ + spectypes.BNRoleAttester: {}, + spectypes.BNRoleSyncCommittee: {}, + }, + } +} + +// StartDuty records a started duty. If a duty is already running at the same or higher slot, it returns an error. +func (a *CommitteeDutyGuard) StartDuty(role spectypes.BeaconRole, validator spectypes.ValidatorPK, slot phase0.Slot) error { + a.mu.Lock() + defer a.mu.Unlock() + + duties, ok := a.duties[role] + if !ok { + return fmt.Errorf("unsupported role %d", role) + } + runningSlot, exists := duties[validator] + if exists && runningSlot >= slot { + return fmt.Errorf("duty already running at slot %d", runningSlot) + } + duties[validator] = slot + return nil +} + +// StopValidator removes any running duties for a validator. +func (a *CommitteeDutyGuard) StopValidator(validator spectypes.ValidatorPK) { + a.mu.Lock() + defer a.mu.Unlock() + + delete(a.duties[spectypes.BNRoleAttester], validator) + delete(a.duties[spectypes.BNRoleSyncCommittee], validator) +} + +// ValidDuty checks if a duty is still valid for execution. +func (a *CommitteeDutyGuard) ValidDuty(role spectypes.BeaconRole, validator spectypes.ValidatorPK, slot phase0.Slot) error { + a.mu.RLock() + defer a.mu.RUnlock() + + duties, ok := a.duties[role] + if !ok { + return fmt.Errorf("unsupported role %d", role) + } + runningSlot, exists := duties[validator] + if !exists { + return fmt.Errorf("duty not found") + } + if runningSlot != slot { + return fmt.Errorf("slot mismatch: duty is running at slot %d", runningSlot) + } + return nil +} diff --git a/protocol/v2/ssv/validator/committee_guard_test.go b/protocol/v2/ssv/validator/committee_guard_test.go new file mode 100644 index 0000000000..fc85b39152 --- /dev/null +++ b/protocol/v2/ssv/validator/committee_guard_test.go @@ -0,0 +1,123 @@ +package validator + +import ( + "testing" + + spectypes "github.com/ssvlabs/ssv-spec/types" + "github.com/stretchr/testify/require" +) + +func TestCommitteeDutyGuard(t *testing.T) { + var ( + pk1 = spectypes.ValidatorPK{1} + pk2 = spectypes.ValidatorPK{2} + pk3 = spectypes.ValidatorPK{3} + + attester = spectypes.BNRoleAttester + sync = spectypes.BNRoleSyncCommittee + ) + + guard := NewCommitteeDutyGuard() + + // Unsupported role: + err := guard.StartDuty(spectypes.BNRoleProposer, pk1, 1) + require.EqualError(t, err, "unsupported role 2") + err = guard.ValidDuty(spectypes.BNRoleProposer, pk1, 1) + require.EqualError(t, err, "unsupported role 2") + + // Comprehensive test for both roles: + for _, role := range []spectypes.BeaconRole{attester, sync} { + err := guard.ValidDuty(role, pk1, 1) + require.EqualError(t, err, "duty not found") + + // Start duty at slot 2: + { + err = guard.StartDuty(role, pk1, 2) + require.NoError(t, err) + + err = guard.ValidDuty(role, pk1, 2) + require.NoError(t, err) + + err = guard.ValidDuty(role, pk1, 3) + require.EqualError(t, err, "slot mismatch: duty is running at slot 2") + + err = guard.ValidDuty(role, pk1, 1) + require.EqualError(t, err, "slot mismatch: duty is running at slot 2") + + err = guard.StartDuty(role, pk1, 2) + require.EqualError(t, err, "duty already running at slot 2") + } + + // Start duty at slot 3: + { + err = guard.StartDuty(role, pk1, 3) + require.NoError(t, err) + + err = guard.ValidDuty(role, pk1, 1) + require.EqualError(t, err, "slot mismatch: duty is running at slot 3") + + err = guard.ValidDuty(role, pk1, 2) + require.EqualError(t, err, "slot mismatch: duty is running at slot 3") + + err = guard.ValidDuty(role, pk1, 3) + require.NoError(t, err) + } + + // Try new validator 0x2: + { + err = guard.ValidDuty(role, pk2, 4) + require.EqualError(t, err, "duty not found") + + err = guard.StartDuty(role, pk2, 4) + require.NoError(t, err) + + err = guard.ValidDuty(role, pk2, 4) + require.NoError(t, err) + + // Check validator 0x1 is unchanged: + err = guard.ValidDuty(role, pk1, 2) + require.EqualError(t, err, "slot mismatch: duty is running at slot 3") + + err = guard.ValidDuty(role, pk1, 3) + require.NoError(t, err) + } + + // Stop validator 0x1: + { + guard.StopValidator(pk1) + + err = guard.ValidDuty(role, pk1, 3) + require.EqualError(t, err, "duty not found") + + // Check validator 0x2 is unchanged: + err = guard.ValidDuty(role, pk2, 4) + require.NoError(t, err) + + err = guard.ValidDuty(role, pk2, 3) + require.ErrorContains(t, err, "slot mismatch: duty is running at slot 4") + } + } + + // Stop non-existing validator: + { + guard.StopValidator(pk3) + + // Pre-check that validator 0x2 is unchanged: + err := guard.ValidDuty(attester, pk2, 4) + require.NoError(t, err) + + err = guard.ValidDuty(sync, pk2, 3) + require.EqualError(t, err, "slot mismatch: duty is running at slot 4") + } + + // Stop validator 0x2 to verify both duties are stopped: + { + guard.StopValidator(pk2) + + err = guard.ValidDuty(attester, pk2, 4) + require.EqualError(t, err, "duty not found") + + err = guard.ValidDuty(sync, pk2, 4) + require.EqualError(t, err, "duty not found") + } +} diff --git a/protocol/v2/ssv/validator/committee_queue.go b/protocol/v2/ssv/validator/committee_queue.go index 925e64acf5..b8c2563e7d 100644 --- a/protocol/v2/ssv/validator/committee_queue.go +++ b/protocol/v2/ssv/validator/committee_queue.go @@ -2,6 +2,7 @@ package validator import ( "context" + "errors" "github.com/attestantio/go-eth2-client/spec/phase0" specqbft "github.com/ssvlabs/ssv-spec/qbft" @@ -79,7 +80,7 @@ func (c *Committee) ConsumeQueue( logger *zap.Logger, slot phase0.Slot, handler MessageHandler, - runner *runner.CommitteeRunner, + rnr *runner.CommitteeRunner, ) error { state := *q.queueState @@ -89,8 +90,8 @@ func (c *Committee) ConsumeQueue( for ctx.Err() == nil { // Construct a representation of the current state. var runningInstance *instance.Instance - if runner.HasRunningDuty() { - runningInstance = runner.GetBaseRunner().State.RunningInstance + if rnr.HasRunningDuty() { + runningInstance = rnr.GetBaseRunner().State.RunningInstance if runningInstance != nil { decided, _ := runningInstance.IsDecided() state.HasRunningInstance = !decided @@ -143,6 +144,10 @@ func (c *Committee) ConsumeQueue( c.logMsg(logger, msg, "❗ could not handle message", fields.MessageType(msg.SSVMessage.MsgType), zap.Error(err)) + if errors.Is(err, runner.ErrNoValidDuties) { + // Stop the queue consumer if the runner no longer has any valid duties. + break + } } } diff --git a/scripts/spec-alignment/differ.config.yaml b/scripts/spec-alignment/differ.config.yaml index 0cb8f86fc7..d4c4f3cb63 100644 --- a/scripts/spec-alignment/differ.config.yaml +++ b/scripts/spec-alignment/differ.config.yaml @@ -1,4 +1,4 @@ -ApprovedChanges: ["50e5bb7eda99594e", "870a3a66aeccd737","4e22a08543b079b","56ceb03cd44ff702","188adfe8914e04c1","2438f9c5b82b69a3","1a716ee3bdb3170","90b166f78390af18","68219b82a1d9d829","c4c4caa5d0938b85","dfe99ce1d27b6cb1","35f5dab1f128d193","9a3973b64d7e8932","f33f07301a770d03","3e9e0dddfad3b302","d4fef6512374c1f5","b49f54cb45787e4b","59b2375130aef5df","f094cd0460432170","8e51881e527dd603","a7d6d58d9fa06379","1d124224ca4d0fe3","39ea06bfd1477d2d","7e2550bab51f22b2","87ebd29bd49fc52f","ef39dd5223e0d080","fe14e7f0503ea188","6146023d4d5708a2","aebb8e4348b6d667","973a2e6704dbf3","fb4cac598a68c592","257c7eb81d6eb245","2a8e94fe037e13fd","5e7eb878de54eec6","960a9c64cd4ec93c","57dfd255520bd849","ec333ff8a708db69","1cc1ff39ad91ee69","5714652b88e2d44f","7a53b3b037c56325","8c02ef1964464c30","19a268910a20da3d","af6e01ed565029f3","318b5169ac4dabb6","372c6b332e8ba699","c0d8a364c0db855a","4287381be4fb1841","b1614afc1da7794f","c214975412f3fd7","8bbf7eba3fa0cf7e","8e4ec8debe331b36","7a671d8fcefc3793","e2b0e9c6454c1c08","6707ecfefa5fec21","d5a7389d730464f1","8dfae3b3223d2de0","a81c092c985de728","968df5082c727ed6","9e53c73ee60b1cc2","9d265e99dd31d4f5","a34619e078d2e42f","17e8cec4f0625d53","e913f373aa88f333","cfc1e05c372d88dc","e5de6901d78b8833","57c1885b43dd8d19","e8a49856a5edd893","22ea21d10a2f861c","954e4fce01631c4e","108b9575f7c1d4bc","1f8d076449068f64","5a7ad98296703f6","159536003eeddac8","8ca8f82e67ddd3dd","16ebe47404323cc1","48bfe5cf1e578b47","dd83182b693a7216","308d21d9830f7047","6dde03147e012b1a","730c3e5e59393b7d","5b44a4b425ecc397","df5debc50ec8babc","92a41554b2910bb8","c36c680554dde59f","447feaa5cdc1a010","fda90c61f44cb149","cdbb4930eced584c","274336ec1127e6c0","2a496f5b3ad542d2","6b395912dde33b0e","cac56ec14994216b","8850900b5d9bcc65","15e7706486c6359e","cc22f28953b787ea","3bad6ae11596a574","8f84422a240d889c","5b265432dfbbaac7","43794bf5953db193","7975821460ebe1e7","173c505e12aabb8f","47ee0d148148a56f","8cc38593ebe049b6","bda3aec7157b095a","248712911696a851","f4d9c910f1dbaef7","1a2146fcad37acb8","b0b146f9bdab64b6","edfd442b4d725fbb","122f053573538a32","d720d714a20833e1", "f9c984e71b685f9b","8c6b4fee5a4c13ce","c0a8d2019a2c30d5", "717bef26105c733f","2f70630c27062353","2f70337ba7566a69","dd607a44e1341e6b","5210501625ac3de5","f786bf475b5085aa","18a66ed6e613d9c1","e8943e7741f6843d","276a489bd5a00032","ba3bba59f10bf6b","3c50ce0c8089d871","89ee72f6c610ab84","c92b95a85da2cb11","927ea6aed3f98f20","9338904026a0ce37","9683cfa19dc544a3","4d3fa2b8dfcb5f5b", "f19e9a2b295bcfb3", "b10199b2de6f03b8", "1afc17e358f9ca79","4b58762c0b433442","d293ec1bc61bb707","3e88c3b49d093605","4890ff80c88cc41d","5227ff3a225dd20d","81a60407a3a0ba80","db2ad807eb66254a","d308bd7c553ccdcf","bdaf172971637cbe","6ade9202843071fe","2fe8e14083997744","19c9a5362d1e1d3a","5956f803d239f178","92c55a4548a8b760","9a95524213bccfff","2f51a7338b86c229","e96966a281d74505","3ee479b9cbbc3a1d","82b392ba39c6c594","b9d2404e5c570019","24f528d85fb021f2","fe9609a785305d81","b0934079dcd986cc","a9c520a19b26049","d19a9403fd732d94","74a928f5dcb2fdd9","cbbfdb5e68cdac80","10e39d2ceda91f34","f99a004cf6697875","8fa5e8ebf7d223ec","6c80c145ba705243","fbabbc90d0b4178a"] +ApprovedChanges: ["50e5bb7eda99594e", "870a3a66aeccd737","4e22a08543b079b","56ceb03cd44ff702","188adfe8914e04c1","2438f9c5b82b69a3","1a716ee3bdb3170","90b166f78390af18","68219b82a1d9d829","c4c4caa5d0938b85","dfe99ce1d27b6cb1","35f5dab1f128d193","9a3973b64d7e8932","f33f07301a770d03","3e9e0dddfad3b302","d4fef6512374c1f5","b49f54cb45787e4b","59b2375130aef5df","f094cd0460432170","8e51881e527dd603","a7d6d58d9fa06379","1d124224ca4d0fe3","39ea06bfd1477d2d","7e2550bab51f22b2","87ebd29bd49fc52f","ef39dd5223e0d080","fe14e7f0503ea188","6146023d4d5708a2","aebb8e4348b6d667","973a2e6704dbf3","fb4cac598a68c592","257c7eb81d6eb245","2a8e94fe037e13fd","5e7eb878de54eec6","960a9c64cd4ec93c","57dfd255520bd849","ec333ff8a708db69","1cc1ff39ad91ee69","5714652b88e2d44f","7a53b3b037c56325","8c02ef1964464c30","19a268910a20da3d","af6e01ed565029f3","318b5169ac4dabb6","372c6b332e8ba699","c0d8a364c0db855a","4287381be4fb1841","b1614afc1da7794f","c214975412f3fd7","8bbf7eba3fa0cf7e","8e4ec8debe331b36","7a671d8fcefc3793","e2b0e9c6454c1c08","6707ecfefa5fec21","d5a7389d730464f1","8dfae3b3223d2de0","a81c092c985de728","968df5082c727ed6","9e53c73ee60b1cc2","9d265e99dd31d4f5","a34619e078d2e42f","17e8cec4f0625d53","e913f373aa88f333","cfc1e05c372d88dc","e5de6901d78b8833","57c1885b43dd8d19","e8a49856a5edd893","22ea21d10a2f861c","954e4fce01631c4e","108b9575f7c1d4bc","1f8d076449068f64","5a7ad98296703f6","159536003eeddac8","8ca8f82e67ddd3dd","16ebe47404323cc1","48bfe5cf1e578b47","dd83182b693a7216","308d21d9830f7047","6dde03147e012b1a","730c3e5e59393b7d","5b44a4b425ecc397","df5debc50ec8babc","92a41554b2910bb8","c36c680554dde59f","447feaa5cdc1a010","fda90c61f44cb149","cdbb4930eced584c","274336ec1127e6c0","2a496f5b3ad542d2","6b395912dde33b0e","cac56ec14994216b","8850900b5d9bcc65","15e7706486c6359e","cc22f28953b787ea","3bad6ae11596a574","8f84422a240d889c","5b265432dfbbaac7","43794bf5953db193","7975821460ebe1e7","173c505e12aabb8f","47ee0d148148a56f","8cc38593ebe049b6","bda3aec7157b095a","248712911696a851","f4d9c910f1dbaef7","1a2146fcad37acb8","b0b146f9bdab64b6","edfd442b4d725fbb","122f053573538a32","d720d714a20833e1", "f9c984e71b685f9b","8c6b4fee5a4c13ce","c0a8d2019a2c30d5", "717bef26105c733f","2f70630c27062353","2f70337ba7566a69","dd607a44e1341e6b","5210501625ac3de5","f786bf475b5085aa","18a66ed6e613d9c1","e8943e7741f6843d","276a489bd5a00032","ba3bba59f10bf6b","3c50ce0c8089d871","89ee72f6c610ab84","c92b95a85da2cb11","927ea6aed3f98f20","9338904026a0ce37","9683cfa19dc544a3","4d3fa2b8dfcb5f5b", "f19e9a2b295bcfb3", "b10199b2de6f03b8", "1afc17e358f9ca79","4b58762c0b433442","d293ec1bc61bb707","3e88c3b49d093605","4890ff80c88cc41d","5227ff3a225dd20d","81a60407a3a0ba80","db2ad807eb66254a","d308bd7c553ccdcf","bdaf172971637cbe","6ade9202843071fe","2fe8e14083997744","19c9a5362d1e1d3a","5956f803d239f178","92c55a4548a8b760","9a95524213bccfff","2f51a7338b86c229","e96966a281d74505","3ee479b9cbbc3a1d","82b392ba39c6c594","b9d2404e5c570019","24f528d85fb021f2","fe9609a785305d81","b0934079dcd986cc","a9c520a19b26049","d19a9403fd732d94","74a928f5dcb2fdd9","cbbfdb5e68cdac80","10e39d2ceda91f34","f99a004cf6697875","8fa5e8ebf7d223ec","6c80c145ba705243","fbabbc90d0b4178a","b110cba51df9f8d2","c4ff2ed3d20dc419","9295a5bb10efcec7","ab56ea44a75f898a","ff51ef26ab53ba58","df3771e2589008f9","106e5689655bcfc6","f90e0fb6883bff93","667656095cec39ee"] IgnoredIdentifiers: - logger From a8349889ffdbe7b4833634a3fa384c5daf21616f Mon Sep 17 00:00:00 2001 From: moshe-blox <89339422+moshe-blox@users.noreply.github.com> Date: Wed, 18 Sep 2024 14:23:38 +0300 Subject: [PATCH 17/35] fix: (discovery) publish latest ENR despite ongoing publish (#1740) * fix: (discovery) publish latest ENR despite ongoing publish * simplify * make channel * log some metrics * comment * comment * comment --- network/discovery/dv5_service.go | 66 +++++++++++++++++++++----------- 1 file changed, 43 insertions(+), 23 deletions(-) diff --git a/network/discovery/dv5_service.go b/network/discovery/dv5_service.go index 93e7e3bae6..b7cd72754b 100644 --- a/network/discovery/dv5_service.go +++ b/network/discovery/dv5_service.go @@ -5,7 +5,6 @@ import ( "context" "fmt" "net" - "sync/atomic" "time" "github.com/ethereum/go-ethereum/p2p/discover" @@ -25,9 +24,6 @@ import ( var ( defaultDiscoveryInterval = time.Millisecond * 100 publishENRTimeout = time.Minute - - publishStateReady = int32(0) - publishStatePending = int32(1) ) // NodeProvider is an interface for managing ENRs @@ -53,23 +49,24 @@ type DiscV5Service struct { conns peers.ConnectionIndex subnetsIdx peers.SubnetsIndex - publishState int32 - conn *net.UDPConn + conn *net.UDPConn domainType networkconfig.DomainTypeProvider subnets []byte + + publishLock chan struct{} } func newDiscV5Service(pctx context.Context, logger *zap.Logger, discOpts *Options) (Service, error) { ctx, cancel := context.WithCancel(pctx) dvs := DiscV5Service{ - ctx: ctx, - cancel: cancel, - publishState: publishStateReady, - conns: discOpts.ConnIndex, - subnetsIdx: discOpts.SubnetsIdx, - domainType: discOpts.DomainType, - subnets: discOpts.DiscV5Opts.Subnets, + ctx: ctx, + cancel: cancel, + conns: discOpts.ConnIndex, + subnetsIdx: discOpts.SubnetsIdx, + domainType: discOpts.DomainType, + subnets: discOpts.DiscV5Opts.Subnets, + publishLock: make(chan struct{}, 1), } logger.Debug("configuring discv5 discovery", zap.Any("discOpts", discOpts)) @@ -300,14 +297,7 @@ func (dvs *DiscV5Service) DeregisterSubnets(logger *zap.Logger, subnets ...int) // PublishENR publishes the ENR with the current domain type across the network func (dvs *DiscV5Service) PublishENR(logger *zap.Logger) { - ctx, done := context.WithTimeout(dvs.ctx, publishENRTimeout) - defer done() - if !atomic.CompareAndSwapInt32(&dvs.publishState, publishStateReady, publishStatePending) { - // pending - logger.Debug("pending publish ENR") - return - } - + // Update own node record. err := records.SetDomainTypeEntry(dvs.dv5Listener.LocalNode(), records.KeyDomainType, dvs.domainType.DomainType()) if err != nil { logger.Error("could not set domain type", zap.Error(err)) @@ -319,11 +309,33 @@ func (dvs *DiscV5Service) PublishENR(logger *zap.Logger) { return } - defer atomic.StoreInt32(&dvs.publishState, publishStateReady) + // Acquire publish lock to prevent parallel publishing. + // If there's an ongoing goroutine, it would now start publishing the record updated above, + // and if it's done before the new deadline, this goroutine would pick up where it left off. + ctx, done := context.WithTimeout(dvs.ctx, publishENRTimeout) + defer done() + + select { + case <-ctx.Done(): + return + case dvs.publishLock <- struct{}{}: + } + defer func() { + // Release lock. + <-dvs.publishLock + }() + + // Collect some metrics. + start := time.Now() + pings, errs := 0, 0 + peerIDs := map[peer.ID]struct{}{} + + // Publish ENR. dvs.discover(ctx, func(e PeerEvent) { metricPublishEnrPings.Inc() err := dvs.dv5Listener.Ping(e.Node) if err != nil { + errs++ if err.Error() == "RPC timeout" { // ignore return @@ -332,8 +344,16 @@ func (dvs *DiscV5Service) PublishENR(logger *zap.Logger) { return } metricPublishEnrPongs.Inc() - // logger.Debug("ping success", logging.TargetNodeEnr(e.Node)) + pings++ + peerIDs[e.AddrInfo.ID] = struct{}{} }, time.Millisecond*100, dvs.ssvNodeFilter(logger), dvs.badNodeFilter(logger)) + + // Log metrics. + logger.Debug("done publishing ENR", + fields.Duration(start), + zap.Int("unique_peers", len(peerIDs)), + zap.Int("pings", pings), + zap.Int("errors", errs)) } func (dvs *DiscV5Service) createLocalNode(logger *zap.Logger, discOpts *Options, ipAddr net.IP) (*enode.LocalNode, error) { From e3fe765d09df21095b69f717c729ba3e2aa484d5 Mon Sep 17 00:00:00 2001 From: oleg-ssvlabs Date: Wed, 18 Sep 2024 17:50:45 +0200 Subject: [PATCH 18/35] chore: fix typo in the log message field (#1744) --- protocol/genesis/qbft/instance/prepare.go | 2 +- protocol/v2/qbft/instance/prepare.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/protocol/genesis/qbft/instance/prepare.go b/protocol/genesis/qbft/instance/prepare.go index 50c1d48a86..54329a63f9 100644 --- a/protocol/genesis/qbft/instance/prepare.go +++ b/protocol/genesis/qbft/instance/prepare.go @@ -59,7 +59,7 @@ func (i *Instance) uponPrepare(logger *zap.Logger, signedPrepare *genesisspecqbf logger.Debug("📢 broadcasting commit message", fields.Round(specqbft.Round(i.State.Round)), - zap.Any("commit_singers", commitMsg.Signers), + zap.Any("commit_signers", commitMsg.Signers), fields.Root(commitMsg.Message.Root)) if err := i.Broadcast(logger, commitMsg); err != nil { diff --git a/protocol/v2/qbft/instance/prepare.go b/protocol/v2/qbft/instance/prepare.go index e2590a7a36..2919c1616a 100644 --- a/protocol/v2/qbft/instance/prepare.go +++ b/protocol/v2/qbft/instance/prepare.go @@ -56,7 +56,7 @@ func (i *Instance) uponPrepare(logger *zap.Logger, msg *specqbft.ProcessingMessa logger.Debug("📢 broadcasting commit message", fields.Round(i.State.Round), - zap.Any("commit_singers", commitMsg.OperatorIDs), + zap.Any("commit_signers", commitMsg.OperatorIDs), fields.Root(proposedRoot)) if err := i.Broadcast(logger, commitMsg); err != nil { From 9dcb1d52e2d4bd0fa6db347a653dd41db11c55ba Mon Sep 17 00:00:00 2001 From: olegshmuelov <45327364+olegshmuelov@users.noreply.github.com> Date: Fri, 20 Sep 2024 12:14:03 +0300 Subject: [PATCH 19/35] fix: fee recipient update (validator registration duty) (#1747) * improve log for test * make the fields public * add log * more logs * add feerecipient getter to runner * revert to 10 epochs * adjust test runner * use genesis options instead tmp func * remove unused type * remove the tmp logic from testing runner * approve spec diff --------- Co-authored-by: moshe-blox --- operator/duties/validatorregistration.go | 15 ++++++++++++--- operator/validator/controller.go | 6 +++--- .../genesis/ssv/runner/validator_registration.go | 10 +++++++--- protocol/genesis/ssv/validator/validator.go | 4 ++++ scripts/spec-alignment/genesis_differ.config.yaml | 2 +- 5 files changed, 27 insertions(+), 10 deletions(-) diff --git a/operator/duties/validatorregistration.go b/operator/duties/validatorregistration.go index 67974ca3e5..6926baf3ed 100644 --- a/operator/duties/validatorregistration.go +++ b/operator/duties/validatorregistration.go @@ -2,6 +2,7 @@ package duties import ( "context" + "encoding/hex" genesisspectypes "github.com/ssvlabs/ssv-spec-pre-cc/types" @@ -16,6 +17,11 @@ type ValidatorRegistrationHandler struct { baseHandler } +type ValidatorRegistration struct { + ValidatorIndex phase0.ValidatorIndex + FeeRecipient string +} + func NewValidatorRegistrationHandler() *ValidatorRegistrationHandler { return &ValidatorRegistrationHandler{} } @@ -43,7 +49,7 @@ func (h *ValidatorRegistrationHandler) HandleDuties(ctx context.Context) { epoch := h.network.Beacon.EstimatedEpochAtSlot(slot) shares := h.validatorProvider.SelfParticipatingValidators(epoch + phase0.Epoch(validatorRegistrationEpochInterval)) - var validators []phase0.ValidatorIndex + var vrs []ValidatorRegistration for _, share := range shares { if uint64(share.BeaconMetadata.Index)%registrationSlotInterval != uint64(slot)%registrationSlotInterval { continue @@ -69,11 +75,14 @@ func (h *ValidatorRegistrationHandler) HandleDuties(ctx context.Context) { }}) } - validators = append(validators, share.BeaconMetadata.Index) + vrs = append(vrs, ValidatorRegistration{ + ValidatorIndex: share.BeaconMetadata.Index, + FeeRecipient: hex.EncodeToString(share.FeeRecipientAddress[:]), + }) } h.logger.Debug("validator registration duties sent", zap.Uint64("slot", uint64(slot)), - zap.Any("validators", validators)) + zap.Any("validator_registrations", vrs)) case <-h.indicesChange: continue diff --git a/operator/validator/controller.go b/operator/validator/controller.go index b5168d82d2..6069235912 100644 --- a/operator/validator/controller.go +++ b/operator/validator/controller.go @@ -926,7 +926,7 @@ func (c *controller) onShareInit(share *ssvtypes.SSVShare) (*validators.Validato genesisOpts := c.genesisValidatorOptions genesisOpts.SSVShare = genesisssvtypes.ConvertToGenesisSSVShare(share, operator) - genesisOpts.DutyRunners = SetupGenesisRunners(genesisValidatorCtx, c.logger, opts) + genesisOpts.DutyRunners = SetupGenesisRunners(genesisValidatorCtx, c.logger, opts, genesisOpts) genesisValidator = genesisvalidator.NewValidator(genesisValidatorCtx, validatorCancel, genesisOpts) } @@ -1358,7 +1358,7 @@ func SetupRunners( return runners, nil } -func SetupGenesisRunners(ctx context.Context, logger *zap.Logger, options validator.Options) genesisrunner.DutyRunners { +func SetupGenesisRunners(ctx context.Context, logger *zap.Logger, options validator.Options, genesisOptions genesisvalidator.Options) genesisrunner.DutyRunners { if options.SSVShare == nil || options.SSVShare.BeaconMetadata == nil { logger.Error("missing validator metadata", zap.String("validator", hex.EncodeToString(options.SSVShare.ValidatorPubKey[:]))) return genesisrunner.DutyRunners{} // TODO need to find better way to fix it @@ -1374,7 +1374,7 @@ func SetupGenesisRunners(ctx context.Context, logger *zap.Logger, options valida genesisspectypes.BNRoleVoluntaryExit, } - share := genesisssvtypes.ConvertToGenesisShare(&options.SSVShare.Share, options.Operator) + share := &genesisOptions.SSVShare.Share buildController := func(role genesisspectypes.BeaconRole, valueCheckF genesisspecqbft.ProposedValueCheckF) *genesisqbftcontroller.Controller { config := &genesisqbft.Config{ diff --git a/protocol/genesis/ssv/runner/validator_registration.go b/protocol/genesis/ssv/runner/validator_registration.go index acb2ca19ec..f26d71cc1b 100644 --- a/protocol/genesis/ssv/runner/validator_registration.go +++ b/protocol/genesis/ssv/runner/validator_registration.go @@ -4,6 +4,7 @@ import ( "crypto/sha256" "encoding/hex" "encoding/json" + spectypes "github.com/ssvlabs/ssv-spec/types" v1 "github.com/attestantio/go-eth2-client/api/v1" @@ -84,12 +85,13 @@ func (r *ValidatorRegistrationRunner) ProcessPreConsensus(logger *zap.Logger, si specSig := phase0.BLSSignature{} copy(specSig[:], fullSig) - if err := r.beacon.SubmitValidatorRegistration(r.BaseRunner.Share.ValidatorPubKey, r.BaseRunner.Share.FeeRecipientAddress, specSig); err != nil { + feeRecipient := r.BaseRunner.Share.FeeRecipientAddress + if err := r.beacon.SubmitValidatorRegistration(r.GetShare().ValidatorPubKey, feeRecipient, specSig); err != nil { return errors.Wrap(err, "could not submit validator registration") } logger.Debug("validator registration submitted successfully", - fields.FeeRecipient(r.BaseRunner.Share.FeeRecipientAddress[:]), + fields.FeeRecipient(feeRecipient[:]), zap.String("signature", hex.EncodeToString(specSig[:]))) r.GetState().Finished = true @@ -118,6 +120,8 @@ func (r *ValidatorRegistrationRunner) expectedPostConsensusRootsAndDomain() ([]s } func (r *ValidatorRegistrationRunner) executeDuty(logger *zap.Logger, duty *genesisspectypes.Duty) error { + logger.Debug("executing validator registration duty", + zap.String("state_fee_recipient", hex.EncodeToString(r.BaseRunner.Share.FeeRecipientAddress[:]))) vr, err := r.calculateValidatorRegistration() if err != nil { return errors.Wrap(err, "could not calculate validator registration") @@ -163,7 +167,7 @@ func (r *ValidatorRegistrationRunner) executeDuty(logger *zap.Logger, duty *gene func (r *ValidatorRegistrationRunner) calculateValidatorRegistration() (*v1.ValidatorRegistration, error) { pk := phase0.BLSPubKey{} - copy(pk[:], r.BaseRunner.Share.ValidatorPubKey) + copy(pk[:], r.GetShare().ValidatorPubKey) epoch := r.BaseRunner.BeaconNetwork.EstimatedEpochAtSlot(r.BaseRunner.State.StartingDuty.Slot) diff --git a/protocol/genesis/ssv/validator/validator.go b/protocol/genesis/ssv/validator/validator.go index 52d356984d..650ddcffb4 100644 --- a/protocol/genesis/ssv/validator/validator.go +++ b/protocol/genesis/ssv/validator/validator.go @@ -107,6 +107,10 @@ func (v *Validator) StartDuty(logger *zap.Logger, duty *genesisspectypes.Duty) e logger = logger.With(fields.Height(specqbft.Height(baseRunner.QBFTController.Height))) } + if duty.Type == genesisspectypes.BNRoleValidatorRegistration { + logger = logger.With(fields.FeeRecipient(v.Share.FeeRecipientAddress[:])) + } + logger.Info("ℹ️ starting duty processing") return dutyRunner.StartNewDuty(logger, duty) diff --git a/scripts/spec-alignment/genesis_differ.config.yaml b/scripts/spec-alignment/genesis_differ.config.yaml index 7758ed636e..2e890c45b6 100644 --- a/scripts/spec-alignment/genesis_differ.config.yaml +++ b/scripts/spec-alignment/genesis_differ.config.yaml @@ -1,4 +1,4 @@ -ApprovedChanges: ["52b93267ba812308","9f2881f9e89b4c3","f8718ef9598a2d28","ea4da0c78bc1e930","11481543a56b03e7","4bc55d173976f499","5a326429bd7d816a","57938492d36e5b72","ea83b3555f29e44e","39a395cc56c381d8","2092a46a009de5e9","f9e12bb821abda59","74490095cad1f871","fef6a577794897e9","e243efb1fef8baca","b612f4f4bee5726c","b4072ece06d92c84","487d349a6296651e","1329fd2f0f7101e5","6ea163caa000821c","cb2a3fac03c9f70d","c155c7005d298b8a","5462556ab33327ae","66591f5d3e9c299d","ef530512222fa3a3","54f7ee00c5223d56","136792991a713119","519dec1f394a29bb","c16537938b23bb1c","930f8003cc73658a","b4d4b7c288d15580","8e871e3dc302502c","264f6c3cb6c044e","73b442121276436f","a5d665260b9545e7","e76da25dcc1b8b7b","3021b027e65343dc","37abca362396de40","10331350bdd5cea5","ff66465e82a0bce","7008ba0e5fb3bc50","17d86fc521251084","60ee89aed3dca274","cfb5a31338393004","774c67a1117bb600","bbbac3fd539c9da","4120ef6224cd7653","c13c14ac8b912a99","e34eb83c1de17e7b","d60440779e512dda","8b474f07634c3b5b","ac42b9ed129f763c","67809ff9e1f78fba","436d37b16e59e851","d201c716184904d6","422221ab59ac4658","30ed9a822232b7e1","c08c6d84582b86c1","c07315929c5bfdae","751997d95ea9340","7715acc5b4c5aa2","14d6cdfdf92136fc","c9db895746d32d2","a0a0164bd2ecb327","c0cb3984d0a20d8","1c8beb7d60ffa18a","b44005e951042d3","45749213deaece88","6afb57c28a55854c","5619c6b724070584","81385e7b399b3235","856eb69df47300bc","68ab7316969c38b","c8f63fe574c9cd3","a1dd0a169df78d67","4bb11f08323af914","466839f492add207","6c3507bea504fcc","560bb093d1aea569","a13eb5088003f50b","9f5f0eff2dca5e9", "397220931cab52bc","453245a906210497","1eb92714f465d122", "2015d5566f6182e1","9f0799ecd4426e43","20a0cfb49029370f","b5ae2491e369931a","94800037492ba19a","57f89a48ccc5bdb0","a825ac16288e518f","1a8754cea558330e","837449174a662384", "eb4770deec3d69ae"] +ApprovedChanges: ["52b93267ba812308","9f2881f9e89b4c3","f8718ef9598a2d28","ea4da0c78bc1e930","11481543a56b03e7","4bc55d173976f499","5a326429bd7d816a","57938492d36e5b72","ea83b3555f29e44e","39a395cc56c381d8","2092a46a009de5e9","f9e12bb821abda59","74490095cad1f871","fef6a577794897e9","e243efb1fef8baca","b612f4f4bee5726c","b4072ece06d92c84","487d349a6296651e","1329fd2f0f7101e5","6ea163caa000821c","cb2a3fac03c9f70d","c155c7005d298b8a","5462556ab33327ae","66591f5d3e9c299d","ef530512222fa3a3","54f7ee00c5223d56","136792991a713119","519dec1f394a29bb","c16537938b23bb1c","930f8003cc73658a","b4d4b7c288d15580","8e871e3dc302502c","264f6c3cb6c044e","73b442121276436f","a5d665260b9545e7","e76da25dcc1b8b7b","3021b027e65343dc","37abca362396de40","10331350bdd5cea5","ff66465e82a0bce","7008ba0e5fb3bc50","17d86fc521251084","60ee89aed3dca274","cfb5a31338393004","774c67a1117bb600","bbbac3fd539c9da","4120ef6224cd7653","c13c14ac8b912a99","e34eb83c1de17e7b","d60440779e512dda","8b474f07634c3b5b","ac42b9ed129f763c","67809ff9e1f78fba","436d37b16e59e851","d201c716184904d6","422221ab59ac4658","30ed9a822232b7e1","c08c6d84582b86c1","c07315929c5bfdae","751997d95ea9340","7715acc5b4c5aa2","14d6cdfdf92136fc","c9db895746d32d2","a0a0164bd2ecb327","c0cb3984d0a20d8","1c8beb7d60ffa18a","b44005e951042d3","45749213deaece88","6afb57c28a55854c","5619c6b724070584","81385e7b399b3235","856eb69df47300bc","68ab7316969c38b","c8f63fe574c9cd3","a1dd0a169df78d67","4bb11f08323af914","466839f492add207","6c3507bea504fcc","560bb093d1aea569","a13eb5088003f50b","9f5f0eff2dca5e9", "397220931cab52bc","453245a906210497","1eb92714f465d122", "2015d5566f6182e1","9f0799ecd4426e43","20a0cfb49029370f","b5ae2491e369931a","94800037492ba19a","57f89a48ccc5bdb0","a825ac16288e518f","1a8754cea558330e","837449174a662384", "eb4770deec3d69ae","88c54a52b5c156ec","7b39050170e98fbe"] IgnoredIdentifiers: - logger From 074624fc7fca1b034a09f774955aa43d23f8cbbd Mon Sep 17 00:00:00 2001 From: rehs0y Date: Sun, 22 Sep 2024 14:29:40 +0300 Subject: [PATCH 20/35] alan testnet: disable protocol ID & set fork epochs (#1750) * remove protocol ID from discovery. * set fork epoch for holesky to 84600, // Oct-08-2024 12:00:00 PM UTC * set mainnet fork epoch to far future. --- network/discovery/options.go | 5 +---- networkconfig/holesky.go | 1 + networkconfig/mainnet.go | 1 + utils/boot_node/node.go | 5 +---- 4 files changed, 4 insertions(+), 8 deletions(-) diff --git a/network/discovery/options.go b/network/discovery/options.go index 5cb359d231..d1bf67ac92 100644 --- a/network/discovery/options.go +++ b/network/discovery/options.go @@ -16,8 +16,6 @@ import ( "github.com/ethereum/go-ethereum/p2p/discover" ) -var SSVProtocolID = [6]byte{'s', 's', 'v', 'd', 'v', '5'} - // DiscV5Options for creating a new discv5 listener type DiscV5Options struct { // StoragePath is the path used to store the DB (DHT) @@ -88,8 +86,7 @@ func (opts *DiscV5Options) IPs() (net.IP, net.IP, string) { // DiscV5Cfg creates discv5 config from the options func (opts *DiscV5Options) DiscV5Cfg(logger *zap.Logger) (*discover.Config, error) { dv5Cfg := discover.Config{ - PrivateKey: opts.NetworkKey, - V5ProtocolID: &SSVProtocolID, + PrivateKey: opts.NetworkKey, } if len(opts.Bootnodes) > 0 { bootnodes, err := ParseENR(nil, false, opts.Bootnodes...) diff --git a/networkconfig/holesky.go b/networkconfig/holesky.go index 0b8ca1c891..ff68e4f5fc 100644 --- a/networkconfig/holesky.go +++ b/networkconfig/holesky.go @@ -14,6 +14,7 @@ var Holesky = NetworkConfig{ GenesisDomainType: spectypes.DomainType{0x0, 0x0, 0x5, 0x1}, AlanDomainType: spectypes.DomainType{0x0, 0x0, 0x5, 0x2}, GenesisEpoch: 1, + AlanForkEpoch: 84600, // Oct-08-2024 12:00:00 PM UTC RegistrySyncOffset: new(big.Int).SetInt64(181612), RegistryContractAddr: "0x38A4794cCEd47d3baf7370CcC43B560D3a1beEFA", Bootnodes: []string{ diff --git a/networkconfig/mainnet.go b/networkconfig/mainnet.go index 05fa2e92be..43627e2be2 100644 --- a/networkconfig/mainnet.go +++ b/networkconfig/mainnet.go @@ -14,6 +14,7 @@ var Mainnet = NetworkConfig{ GenesisDomainType: spectypes.GenesisMainnet, AlanDomainType: spectypes.AlanMainnet, GenesisEpoch: 218450, + AlanForkEpoch: 9999999999, RegistrySyncOffset: new(big.Int).SetInt64(17507487), RegistryContractAddr: "0xDD9BC35aE942eF0cFa76930954a156B3fF30a4E1", Bootnodes: []string{ diff --git a/utils/boot_node/node.go b/utils/boot_node/node.go index c1735c3eb2..71c87497e9 100644 --- a/utils/boot_node/node.go +++ b/utils/boot_node/node.go @@ -24,8 +24,6 @@ import ( "github.com/ssvlabs/ssv/utils" ) -var SSVProtocolID = [6]byte{'s', 's', 'v', 'd', 'v', '5'} - // Options contains options to create the node type Options struct { PrivateKey string `yaml:"PrivateKey" env:"BOOT_NODE_PRIVATE_KEY" env-description:"boot node private key (default will generate new)"` @@ -103,8 +101,7 @@ func (n *bootNode) Start(ctx context.Context, logger *zap.Logger) error { log.Fatal("Failed to get p2p privateKey", zap.Error(err)) } cfg := discover.Config{ - PrivateKey: privKey, - V5ProtocolID: &SSVProtocolID, + PrivateKey: privKey, } ipAddr, err := network.ExternalIP() // ipAddr = "127.0.0.1" From 0dd67f29e0b94dc128bef841ac897b775cc5e81a Mon Sep 17 00:00:00 2001 From: moshe-blox Date: Sun, 22 Sep 2024 15:37:41 +0300 Subject: [PATCH 21/35] Revert "alan testnet: disable protocol ID & set fork epochs (#1750)" This reverts commit 074624fc7fca1b034a09f774955aa43d23f8cbbd. --- network/discovery/options.go | 5 ++++- networkconfig/holesky.go | 1 - networkconfig/mainnet.go | 1 - utils/boot_node/node.go | 5 ++++- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/network/discovery/options.go b/network/discovery/options.go index d1bf67ac92..5cb359d231 100644 --- a/network/discovery/options.go +++ b/network/discovery/options.go @@ -16,6 +16,8 @@ import ( "github.com/ethereum/go-ethereum/p2p/discover" ) +var SSVProtocolID = [6]byte{'s', 's', 'v', 'd', 'v', '5'} + // DiscV5Options for creating a new discv5 listener type DiscV5Options struct { // StoragePath is the path used to store the DB (DHT) @@ -86,7 +88,8 @@ func (opts *DiscV5Options) IPs() (net.IP, net.IP, string) { // DiscV5Cfg creates discv5 config from the options func (opts *DiscV5Options) DiscV5Cfg(logger *zap.Logger) (*discover.Config, error) { dv5Cfg := discover.Config{ - PrivateKey: opts.NetworkKey, + PrivateKey: opts.NetworkKey, + V5ProtocolID: &SSVProtocolID, } if len(opts.Bootnodes) > 0 { bootnodes, err := ParseENR(nil, false, opts.Bootnodes...) diff --git a/networkconfig/holesky.go b/networkconfig/holesky.go index ff68e4f5fc..0b8ca1c891 100644 --- a/networkconfig/holesky.go +++ b/networkconfig/holesky.go @@ -14,7 +14,6 @@ var Holesky = NetworkConfig{ GenesisDomainType: spectypes.DomainType{0x0, 0x0, 0x5, 0x1}, AlanDomainType: spectypes.DomainType{0x0, 0x0, 0x5, 0x2}, GenesisEpoch: 1, - AlanForkEpoch: 84600, // Oct-08-2024 12:00:00 PM UTC RegistrySyncOffset: new(big.Int).SetInt64(181612), RegistryContractAddr: "0x38A4794cCEd47d3baf7370CcC43B560D3a1beEFA", Bootnodes: []string{ diff --git a/networkconfig/mainnet.go b/networkconfig/mainnet.go index 43627e2be2..05fa2e92be 100644 --- a/networkconfig/mainnet.go +++ b/networkconfig/mainnet.go @@ -14,7 +14,6 @@ var Mainnet = NetworkConfig{ GenesisDomainType: spectypes.GenesisMainnet, AlanDomainType: spectypes.AlanMainnet, GenesisEpoch: 218450, - AlanForkEpoch: 9999999999, RegistrySyncOffset: new(big.Int).SetInt64(17507487), RegistryContractAddr: "0xDD9BC35aE942eF0cFa76930954a156B3fF30a4E1", Bootnodes: []string{ diff --git a/utils/boot_node/node.go b/utils/boot_node/node.go index 71c87497e9..c1735c3eb2 100644 --- a/utils/boot_node/node.go +++ b/utils/boot_node/node.go @@ -24,6 +24,8 @@ import ( "github.com/ssvlabs/ssv/utils" ) +var SSVProtocolID = [6]byte{'s', 's', 'v', 'd', 'v', '5'} + // Options contains options to create the node type Options struct { PrivateKey string `yaml:"PrivateKey" env:"BOOT_NODE_PRIVATE_KEY" env-description:"boot node private key (default will generate new)"` @@ -101,7 +103,8 @@ func (n *bootNode) Start(ctx context.Context, logger *zap.Logger) error { log.Fatal("Failed to get p2p privateKey", zap.Error(err)) } cfg := discover.Config{ - PrivateKey: privKey, + PrivateKey: privKey, + V5ProtocolID: &SSVProtocolID, } ipAddr, err := network.ExternalIP() // ipAddr = "127.0.0.1" From e4180469685d17873d1c45acbf360826dc087044 Mon Sep 17 00:00:00 2001 From: rehs0y Date: Mon, 23 Sep 2024 15:32:50 +0300 Subject: [PATCH 22/35] add dutyid after we know slot (#1754) --- message/validation/logger_fields.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/message/validation/logger_fields.go b/message/validation/logger_fields.go index e747a0503d..a94306d813 100644 --- a/message/validation/logger_fields.go +++ b/message/validation/logger_fields.go @@ -69,10 +69,6 @@ func (mv *messageValidator) buildLoggerFields(decodedMessage *queue.SSVMessage) descriptor.Role = decodedMessage.SSVMessage.GetID().GetRoleType() descriptor.SSVMessageType = decodedMessage.SSVMessage.GetType() - if mv.logger.Level() == zap.DebugLevel { - mv.addDutyIDField(descriptor) - } - switch m := decodedMessage.Body.(type) { case *specqbft.Message: if m != nil { @@ -86,6 +82,10 @@ func (mv *messageValidator) buildLoggerFields(decodedMessage *queue.SSVMessage) } } + if mv.logger.Level() == zap.DebugLevel { + mv.addDutyIDField(descriptor) + } + return descriptor } From 0d7cead015b2466e06935c1e4e7267c051a2f27c Mon Sep 17 00:00:00 2001 From: rehs0y Date: Mon, 23 Sep 2024 15:49:37 +0300 Subject: [PATCH 23/35] fix: (bug) geth won't recognize ecdsa curve (#1755) * chore: fix typo in the log message field (#1744) * set curve to S256 to enable geth discovery to recognize it * fix test --------- Co-authored-by: oleg-ssvlabs Co-authored-by: moshe-blox --- network/commons/keys.go | 7 +++++-- network/commons/keys_test.go | 15 ++++++++------- protocol/genesis/qbft/instance/prepare.go | 2 +- protocol/v2/qbft/instance/prepare.go | 2 +- 4 files changed, 15 insertions(+), 11 deletions(-) diff --git a/network/commons/keys.go b/network/commons/keys.go index 59c9e23ade..db1bca55f7 100644 --- a/network/commons/keys.go +++ b/network/commons/keys.go @@ -6,6 +6,8 @@ import ( "crypto/rsa" "crypto/x509" + gcrypto "github.com/ethereum/go-ethereum/crypto" + "github.com/btcsuite/btcd/btcec/v2" "github.com/libp2p/go-libp2p/core/crypto" "github.com/pkg/errors" @@ -20,8 +22,9 @@ func ECDSAPrivFromInterface(privkey crypto.PrivKey) (*ecdsa.PrivateKey, error) { } privKey, _ := btcec.PrivKeyFromBytes(rawKey) - - return privKey.ToECDSA(), nil + ecdsaKey := privKey.ToECDSA() + ecdsaKey.Curve = gcrypto.S256() // temporary hack, so libp2p Secp256k1 is recognized as geth Secp256k1 in disc v5.1 + return ecdsaKey, nil } // ECDSAPrivToInterface converts ecdsa.PrivateKey to crypto.PrivKey diff --git a/network/commons/keys_test.go b/network/commons/keys_test.go index 56ad6017a0..7238343ce6 100644 --- a/network/commons/keys_test.go +++ b/network/commons/keys_test.go @@ -1,10 +1,10 @@ package commons import ( - "crypto/ecdsa" "encoding/hex" "testing" + gcrypto "github.com/ethereum/go-ethereum/crypto" "github.com/libp2p/go-libp2p/core/crypto" "github.com/stretchr/testify/require" ) @@ -22,11 +22,12 @@ func TestECDSAPrivFromInterface(t *testing.T) { require.NoError(t, err) require.NotNil(t, ecdsaPrivKey) - require.IsType(t, &ecdsa.PrivateKey{}, ecdsaPrivKey) - require.NotNil(t, ecdsaPrivKey.D) - require.NotNil(t, ecdsaPrivKey.X) - require.NotNil(t, ecdsaPrivKey.Y) - require.NotNil(t, ecdsaPrivKey.Curve) - require.Equal(t, ecdsaPrivKey.D.String(), "6792055902439951130224479433662882604105028919500185693322687975860017874966") + require.Equal(t, ecdsaPrivKey.X.String(), "22653320514410971312249902166871933285664081749262857866749567141267477006697") + require.Equal(t, ecdsaPrivKey.Y.String(), "103853204202400939811590846319591563498962634102053730872842929232997685705657") + require.Equal(t, ecdsaPrivKey.Curve, gcrypto.S256()) + + require.Equal(t, ecdsaPrivKey.PublicKey.X.String(), "22653320514410971312249902166871933285664081749262857866749567141267477006697") + require.Equal(t, ecdsaPrivKey.PublicKey.Y.String(), "103853204202400939811590846319591563498962634102053730872842929232997685705657") + require.Equal(t, ecdsaPrivKey.PublicKey.Curve, gcrypto.S256()) } diff --git a/protocol/genesis/qbft/instance/prepare.go b/protocol/genesis/qbft/instance/prepare.go index 50c1d48a86..54329a63f9 100644 --- a/protocol/genesis/qbft/instance/prepare.go +++ b/protocol/genesis/qbft/instance/prepare.go @@ -59,7 +59,7 @@ func (i *Instance) uponPrepare(logger *zap.Logger, signedPrepare *genesisspecqbf logger.Debug("📢 broadcasting commit message", fields.Round(specqbft.Round(i.State.Round)), - zap.Any("commit_singers", commitMsg.Signers), + zap.Any("commit_signers", commitMsg.Signers), fields.Root(commitMsg.Message.Root)) if err := i.Broadcast(logger, commitMsg); err != nil { diff --git a/protocol/v2/qbft/instance/prepare.go b/protocol/v2/qbft/instance/prepare.go index e2590a7a36..2919c1616a 100644 --- a/protocol/v2/qbft/instance/prepare.go +++ b/protocol/v2/qbft/instance/prepare.go @@ -56,7 +56,7 @@ func (i *Instance) uponPrepare(logger *zap.Logger, msg *specqbft.ProcessingMessa logger.Debug("📢 broadcasting commit message", fields.Round(i.State.Round), - zap.Any("commit_singers", commitMsg.OperatorIDs), + zap.Any("commit_signers", commitMsg.OperatorIDs), fields.Root(proposedRoot)) if err := i.Broadcast(logger, commitMsg); err != nil { From a952130abc68db7d49e08ea92c7535ce6d41807e Mon Sep 17 00:00:00 2001 From: olegshmuelov <45327364+olegshmuelov@users.noreply.github.com> Date: Mon, 23 Sep 2024 16:27:25 +0300 Subject: [PATCH 24/35] fix: committee role duty limit (#1756) * chore: fix typo in the log message field (#1744) * implementation * revert duty count limit check to inclusive comparison (>=) * add comment --------- Co-authored-by: oleg-ssvlabs --- message/validation/common_checks.go | 28 ++++++++++++++++++---- message/validation/consensus_validation.go | 2 +- message/validation/partial_validation.go | 4 ++-- 3 files changed, 27 insertions(+), 7 deletions(-) diff --git a/message/validation/common_checks.go b/message/validation/common_checks.go index 76e611d2a1..4a3d93e4eb 100644 --- a/message/validation/common_checks.go +++ b/message/validation/common_checks.go @@ -54,16 +54,19 @@ func (mv *messageValidator) messageLateness(slot phase0.Slot, role spectypes.Run func (mv *messageValidator) validateDutyCount( msgID spectypes.MessageID, msgSlot phase0.Slot, - validatorIndexCount int, + validatorIndices []phase0.ValidatorIndex, signerStateBySlot *OperatorState, ) error { dutyCount := signerStateBySlot.DutyCount(mv.netCfg.Beacon.EstimatedEpochAtSlot(msgSlot)) - dutyLimit, exists := mv.dutyLimit(msgID, msgSlot, validatorIndexCount) + dutyLimit, exists := mv.dutyLimit(msgID, msgSlot, validatorIndices) if !exists { return nil } + // Check if the duty count exceeds or equals the duty limit. + // This validation occurs before the state is updated, which is why + // the first count starts at 0 and we use an inclusive comparison (>=). if dutyCount >= dutyLimit { err := ErrTooManyDutiesPerEpoch err.got = fmt.Sprintf("%v (role %v)", dutyCount, msgID.GetRoleType()) @@ -74,7 +77,7 @@ func (mv *messageValidator) validateDutyCount( return nil } -func (mv *messageValidator) dutyLimit(msgID spectypes.MessageID, slot phase0.Slot, validatorIndexCount int) (int, bool) { +func (mv *messageValidator) dutyLimit(msgID spectypes.MessageID, slot phase0.Slot, validatorIndices []phase0.ValidatorIndex) (int, bool) { switch msgID.GetRoleType() { case spectypes.RoleVoluntaryExit: pk := phase0.BLSPubKey{} @@ -86,7 +89,24 @@ func (mv *messageValidator) dutyLimit(msgID spectypes.MessageID, slot phase0.Slo return 2, true case spectypes.RoleCommittee: - return 2 * validatorIndexCount, true + validatorIndexCount := len(validatorIndices) + slotsPerEpoch := int(mv.netCfg.Beacon.SlotsPerEpoch()) + + // Skip duty search if validators * 2 exceeds slots per epoch, + // as the maximum duties per epoch is capped at the number of slots. + // This avoids unnecessary checks. + if validatorIndexCount < slotsPerEpoch/2 { + // Check if there is at least one validator in the sync committee. + // If so, the duty limit is equal to the number of slots per epoch. + period := mv.netCfg.Beacon.EstimatedSyncCommitteePeriodAtEpoch(mv.netCfg.Beacon.EstimatedEpochAtSlot(slot)) + for _, i := range validatorIndices { + if mv.dutyStore.SyncCommittee.Duty(period, i) != nil { + return slotsPerEpoch, true + } + } + } + + return min(slotsPerEpoch, 2*validatorIndexCount), true default: return 0, false diff --git a/message/validation/consensus_validation.go b/message/validation/consensus_validation.go index 41f43a8cc4..ce34d2c89b 100644 --- a/message/validation/consensus_validation.go +++ b/message/validation/consensus_validation.go @@ -268,7 +268,7 @@ func (mv *messageValidator) validateQBFTMessageByDutyLogic( // - else, accept for _, signer := range signedSSVMessage.OperatorIDs { signerStateBySlot := state.GetOrCreate(signer) - if err := mv.validateDutyCount(signedSSVMessage.SSVMessage.GetID(), msgSlot, len(validatorIndices), signerStateBySlot); err != nil { + if err := mv.validateDutyCount(signedSSVMessage.SSVMessage.GetID(), msgSlot, validatorIndices, signerStateBySlot); err != nil { return err } } diff --git a/message/validation/partial_validation.go b/message/validation/partial_validation.go index 8ddaca41a6..5a6006696a 100644 --- a/message/validation/partial_validation.go +++ b/message/validation/partial_validation.go @@ -180,15 +180,15 @@ func (mv *messageValidator) validatePartialSigMessagesByDutyLogic( return err } - clusterValidatorCount := len(committeeInfo.indices) // Rule: valid number of duties per epoch: // - 2 for aggregation, voluntary exit and validator registration // - 2*V for Committee duty (where V is the number of validators in the cluster) (if no validator is doing sync committee in this epoch) // - else, accept - if err := mv.validateDutyCount(signedSSVMessage.SSVMessage.GetID(), messageSlot, clusterValidatorCount, signerStateBySlot); err != nil { + if err := mv.validateDutyCount(signedSSVMessage.SSVMessage.GetID(), messageSlot, committeeInfo.indices, signerStateBySlot); err != nil { return err } + clusterValidatorCount := len(committeeInfo.indices) partialSignatureMessageCount := len(partialSignatureMessages.Messages) if signedSSVMessage.SSVMessage.MsgID.GetRoleType() == spectypes.RoleCommittee { From c22c06ad32d0c41f0b88635e78ec769b5acb49fc Mon Sep 17 00:00:00 2001 From: moshe-blox <89339422+moshe-blox@users.noreply.github.com> Date: Mon, 23 Sep 2024 20:22:36 +0300 Subject: [PATCH 25/35] fix: (validation) duty count check (#1758) * fix: (validation) duty count check * comments --- message/validation/common_checks.go | 2 +- message/validation/validation_test.go | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/message/validation/common_checks.go b/message/validation/common_checks.go index 4a3d93e4eb..7846bf2118 100644 --- a/message/validation/common_checks.go +++ b/message/validation/common_checks.go @@ -67,7 +67,7 @@ func (mv *messageValidator) validateDutyCount( // Check if the duty count exceeds or equals the duty limit. // This validation occurs before the state is updated, which is why // the first count starts at 0 and we use an inclusive comparison (>=). - if dutyCount >= dutyLimit { + if dutyCount > dutyLimit { err := ErrTooManyDutiesPerEpoch err.got = fmt.Sprintf("%v (role %v)", dutyCount, msgID.GetRoleType()) err.want = fmt.Sprintf("less than %v", dutyLimit) diff --git a/message/validation/validation_test.go b/message/validation/validation_test.go index 9935bf3a4a..c8a2548cf2 100644 --- a/message/validation/validation_test.go +++ b/message/validation/validation_test.go @@ -572,16 +572,34 @@ func Test_ValidateSSVMessage(t *testing.T) { identifier := spectypes.NewMsgID(netCfg.DomainType(), ks.ValidatorPK.Serialize(), role) signedSSVMessage := generateSignedMessage(ks, identifier, slot) + // First duty. topicID := commons.CommitteeTopicID(committeeID)[0] _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, netCfg.Beacon.GetSlotStartTime(slot)) require.NoError(t, err) + // Second duty. signedSSVMessage = generateSignedMessage(ks, identifier, slot+4) _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, netCfg.Beacon.GetSlotStartTime(slot+4)) require.NoError(t, err) + // Second duty (another message). + signedSSVMessage = generateSignedMessage(ks, identifier, slot+4, func(qbftMessage *specqbft.Message) { + qbftMessage.MsgType = specqbft.RoundChangeMsgType + }) + _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, netCfg.Beacon.GetSlotStartTime(slot+4)) + require.NoError(t, err) + + // Third duty. + // TODO: this should fail, see https://github.com/ssvlabs/ssv/pull/1758 signedSSVMessage = generateSignedMessage(ks, identifier, slot+8) _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, netCfg.Beacon.GetSlotStartTime(slot+8)) + require.NoError(t, err) + + // Third duty (another message). + signedSSVMessage = generateSignedMessage(ks, identifier, slot+8, func(qbftMessage *specqbft.Message) { + qbftMessage.MsgType = specqbft.RoundChangeMsgType + }) + _, err = validator.handleSignedSSVMessage(signedSSVMessage, topicID, netCfg.Beacon.GetSlotStartTime(slot+8)) require.ErrorContains(t, err, ErrTooManyDutiesPerEpoch.Error()) }) From d94f219bb21b4c0c1986db7d8cc6de5a5ec44209 Mon Sep 17 00:00:00 2001 From: olegshmuelov <45327364+olegshmuelov@users.noreply.github.com> Date: Thu, 26 Sep 2024 11:00:52 +0300 Subject: [PATCH 26/35] feat: prevent incompatible alan version downgrade (#1762) * draft * get the alan network name from network config only * clean * lint * fix testNetworkName * if config is not found, mark the migration as completed * make alanforkname const --- cli/operator/node.go | 18 ++++---- cli/operator/node_test.go | 35 +++++++-------- ...onfiglock_add_alan_fork_to_network_name.go | 44 +++++++++++++++++++ migrations/migrations.go | 1 + networkconfig/config.go | 7 +++ operator/storage/config_lock.go | 9 ++-- operator/storage/config_lock_test.go | 8 ++-- 7 files changed, 85 insertions(+), 37 deletions(-) create mode 100644 migrations/migration_4_configlock_add_alan_fork_to_network_name.go diff --git a/cli/operator/node.go b/cli/operator/node.go index bf9676c200..98bd58e065 100644 --- a/cli/operator/node.go +++ b/cli/operator/node.go @@ -168,7 +168,9 @@ var StartNodeCmd = &cobra.Command{ usingLocalEvents := len(cfg.LocalEventsPath) != 0 - verifyConfig(logger, nodeStorage, networkConfig.Name, usingLocalEvents) + if err := validateConfig(nodeStorage, networkConfig.AlanForkNetworkName(), usingLocalEvents); err != nil { + logger.Fatal("failed to validate config", zap.Error(err)) + } ekmHashedKey, err := operatorPrivKey.EKMHash() if err != nil { @@ -420,10 +422,10 @@ var StartNodeCmd = &cobra.Command{ }, } -func verifyConfig(logger *zap.Logger, nodeStorage operatorstorage.Storage, networkName string, usingLocalEvents bool) { +func validateConfig(nodeStorage operatorstorage.Storage, networkName string, usingLocalEvents bool) error { storedConfig, foundConfig, err := nodeStorage.GetConfig(nil) if err != nil { - logger.Fatal("could not check saved local events config", zap.Error(err)) + return fmt.Errorf("failed to get stored config: %w", err) } currentConfig := &operatorstorage.ConfigLock{ @@ -432,16 +434,16 @@ func verifyConfig(logger *zap.Logger, nodeStorage operatorstorage.Storage, netwo } if foundConfig { - if err := storedConfig.EnsureSameWith(currentConfig); err != nil { - err = fmt.Errorf("incompatible config change: %w", err) - logger.Fatal(err.Error()) + if err := storedConfig.ValidateCompatibility(currentConfig); err != nil { + return fmt.Errorf("incompatible config change: %w", err) } } else { if err := nodeStorage.SaveConfig(nil, currentConfig); err != nil { - err = fmt.Errorf("failed to store config: %w", err) - logger.Fatal(err.Error()) + return fmt.Errorf("failed to store config: %w", err) } } + + return nil } func init() { diff --git a/cli/operator/node_test.go b/cli/operator/node_test.go index 7151ffc287..297a6f61fd 100644 --- a/cli/operator/node_test.go +++ b/cli/operator/node_test.go @@ -22,14 +22,14 @@ func Test_verifyConfig(t *testing.T) { nodeStorage, err := operatorstorage.NewNodeStorage(logger, db) require.NoError(t, err) - testNetworkName := networkconfig.TestNetwork.Name + testNetworkName := networkconfig.TestNetwork.AlanForkNetworkName() t.Run("no config in DB", func(t *testing.T) { c := &operatorstorage.ConfigLock{ NetworkName: testNetworkName, UsingLocalEvents: true, } - verifyConfig(logger, nodeStorage, c.NetworkName, c.UsingLocalEvents) + require.NoError(t, validateConfig(nodeStorage, c.NetworkName, c.UsingLocalEvents)) storedConfig, found, err := nodeStorage.GetConfig(nil) require.NoError(t, err) @@ -45,8 +45,7 @@ func Test_verifyConfig(t *testing.T) { UsingLocalEvents: true, } require.NoError(t, nodeStorage.SaveConfig(nil, c)) - - verifyConfig(logger, nodeStorage, testNetworkName, true) + require.NoError(t, validateConfig(nodeStorage, c.NetworkName, c.UsingLocalEvents)) storedConfig, found, err := nodeStorage.GetConfig(nil) require.NoError(t, err) @@ -62,10 +61,9 @@ func Test_verifyConfig(t *testing.T) { UsingLocalEvents: false, } require.NoError(t, nodeStorage.SaveConfig(nil, c)) - - require.PanicsWithValue(t, - "incompatible config change: can't change network from \"testnet1\" to \"testnet\" in an existing database, it must be removed first", - func() { verifyConfig(logger, nodeStorage, testNetworkName, true) }, + require.ErrorContains(t, + validateConfig(nodeStorage, testNetworkName, true), + "incompatible config change: network mismatch. Stored network testnet:alan1 does not match current network testnet:alan. The database must be removed or reinitialized", ) storedConfig, found, err := nodeStorage.GetConfig(nil) @@ -82,10 +80,9 @@ func Test_verifyConfig(t *testing.T) { UsingLocalEvents: true, } require.NoError(t, nodeStorage.SaveConfig(nil, c)) - - require.PanicsWithValue(t, - "incompatible config change: can't change network from \"testnet1\" to \"testnet\" in an existing database, it must be removed first", - func() { verifyConfig(logger, nodeStorage, testNetworkName, true) }, + require.ErrorContains(t, + validateConfig(nodeStorage, testNetworkName, c.UsingLocalEvents), + "incompatible config change: network mismatch. Stored network testnet:alan1 does not match current network testnet:alan. The database must be removed or reinitialized", ) storedConfig, found, err := nodeStorage.GetConfig(nil) @@ -102,10 +99,9 @@ func Test_verifyConfig(t *testing.T) { UsingLocalEvents: false, } require.NoError(t, nodeStorage.SaveConfig(nil, c)) - - require.PanicsWithValue(t, - "incompatible config change: can't switch on localevents, database must be removed first", - func() { verifyConfig(logger, nodeStorage, testNetworkName, true) }, + require.ErrorContains(t, + validateConfig(nodeStorage, c.NetworkName, true), + "incompatible config change: enabling local events is not allowed. The database must be removed or reinitialized", ) storedConfig, found, err := nodeStorage.GetConfig(nil) @@ -122,10 +118,9 @@ func Test_verifyConfig(t *testing.T) { UsingLocalEvents: true, } require.NoError(t, nodeStorage.SaveConfig(nil, c)) - - require.PanicsWithValue(t, - "incompatible config change: can't switch off localevents, database must be removed first", - func() { verifyConfig(logger, nodeStorage, testNetworkName, false) }, + require.ErrorContains(t, + validateConfig(nodeStorage, c.NetworkName, false), + "incompatible config change: disabling local events is not allowed. The database must be removed or reinitialized", ) storedConfig, found, err := nodeStorage.GetConfig(nil) diff --git a/migrations/migration_4_configlock_add_alan_fork_to_network_name.go b/migrations/migration_4_configlock_add_alan_fork_to_network_name.go new file mode 100644 index 0000000000..844f91a05d --- /dev/null +++ b/migrations/migration_4_configlock_add_alan_fork_to_network_name.go @@ -0,0 +1,44 @@ +package migrations + +import ( + "context" + "fmt" + + "go.uber.org/zap" + + "github.com/ssvlabs/ssv/networkconfig" + "github.com/ssvlabs/ssv/storage/basedb" +) + +// This migration adds the Alan fork name to the network name +var migration_4_configlock_add_alan_fork_to_network_name = Migration{ + Name: "migration_4_configlock_add_alan_fork_to_network_name", + Run: func(ctx context.Context, logger *zap.Logger, opt Options, key []byte, completed CompletedFunc) error { + return opt.Db.Update(func(txn basedb.Txn) error { + nodeStorage, err := opt.nodeStorage(logger) + if err != nil { + return fmt.Errorf("failed to get node storage: %w", err) + } + + config, found, err := nodeStorage.GetConfig(txn) + if err != nil { + return fmt.Errorf("failed to get config: %w", err) + } + + // If config is not found, it means the node is not initialized yet + if found { + networkConfig, err := networkconfig.GetNetworkConfigByName(config.NetworkName) + if err != nil { + return fmt.Errorf("failed to get network config by name: %w", err) + } + + config.NetworkName = networkConfig.AlanForkNetworkName() + if err := nodeStorage.SaveConfig(txn, config); err != nil { + return fmt.Errorf("failed to save config: %w", err) + } + } + + return completed(txn) + }) + }, +} diff --git a/migrations/migrations.go b/migrations/migrations.go index 6372956560..521baa73e7 100644 --- a/migrations/migrations.go +++ b/migrations/migrations.go @@ -26,6 +26,7 @@ var ( migration_1_example, migration_2_encrypt_shares, migration_3_drop_registry_data, + migration_4_configlock_add_alan_fork_to_network_name, } ) diff --git a/networkconfig/config.go b/networkconfig/config.go index 6db21466fa..15f9d5adad 100644 --- a/networkconfig/config.go +++ b/networkconfig/config.go @@ -9,6 +9,7 @@ import ( "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/ssvlabs/ssv-spec/types" + "github.com/ssvlabs/ssv/protocol/v2/blockchain/beacon" ) @@ -20,6 +21,8 @@ var SupportedConfigs = map[string]NetworkConfig{ HoleskyE2E.Name: HoleskyE2E, } +const alanForkName = "alan" + func GetNetworkConfigByName(name string) (NetworkConfig, error) { if network, ok := SupportedConfigs[name]; ok { return network, nil @@ -57,6 +60,10 @@ func (n NetworkConfig) String() string { return string(b) } +func (n NetworkConfig) AlanForkNetworkName() string { + return fmt.Sprintf("%s:%s", n.Name, alanForkName) +} + func (n NetworkConfig) PastAlanFork() bool { return n.Beacon.EstimatedCurrentEpoch() >= n.AlanForkEpoch } diff --git a/operator/storage/config_lock.go b/operator/storage/config_lock.go index 5d43c74aad..95719089e1 100644 --- a/operator/storage/config_lock.go +++ b/operator/storage/config_lock.go @@ -9,18 +9,17 @@ type ConfigLock struct { UsingLocalEvents bool `json:"using_local_events"` } -func (stored *ConfigLock) EnsureSameWith(current *ConfigLock) error { +func (stored *ConfigLock) ValidateCompatibility(current *ConfigLock) error { if stored.NetworkName != current.NetworkName { - return fmt.Errorf("can't change network from %q to %q in an existing database, it must be removed first", - stored.NetworkName, current.NetworkName) + return fmt.Errorf("network mismatch. Stored network %s does not match current network %s. The database must be removed or reinitialized", stored.NetworkName, current.NetworkName) } if stored.UsingLocalEvents && !current.UsingLocalEvents { - return fmt.Errorf("can't switch off localevents, database must be removed first") + return fmt.Errorf("disabling local events is not allowed. The database must be removed or reinitialized") } if !stored.UsingLocalEvents && current.UsingLocalEvents { - return fmt.Errorf("can't switch on localevents, database must be removed first") + return fmt.Errorf("enabling local events is not allowed. The database must be removed or reinitialized") } return nil diff --git a/operator/storage/config_lock_test.go b/operator/storage/config_lock_test.go index 435751c605..1b08dbed27 100644 --- a/operator/storage/config_lock_test.go +++ b/operator/storage/config_lock_test.go @@ -18,7 +18,7 @@ func TestConfigLock(t *testing.T) { UsingLocalEvents: true, } - require.NoError(t, c1.EnsureSameWith(c2)) + require.NoError(t, c1.ValidateCompatibility(c2)) }) t.Run("all fields are different", func(t *testing.T) { @@ -32,7 +32,7 @@ func TestConfigLock(t *testing.T) { UsingLocalEvents: false, } - require.Error(t, c1.EnsureSameWith(c2)) + require.Error(t, c1.ValidateCompatibility(c2)) }) t.Run("only network name is different", func(t *testing.T) { @@ -46,7 +46,7 @@ func TestConfigLock(t *testing.T) { UsingLocalEvents: true, } - require.Error(t, c1.EnsureSameWith(c2)) + require.Error(t, c1.ValidateCompatibility(c2)) }) t.Run("only local events usage is different", func(t *testing.T) { @@ -60,6 +60,6 @@ func TestConfigLock(t *testing.T) { UsingLocalEvents: false, } - require.Error(t, c1.EnsureSameWith(c2)) + require.Error(t, c1.ValidateCompatibility(c2)) }) } From 8c14d391e8e21eeb1bf35c92343549f02771281d Mon Sep 17 00:00:00 2001 From: Anton Korpusenko Date: Wed, 2 Oct 2024 12:48:34 +0300 Subject: [PATCH 27/35] fix: Potential Vulnerabilities Identified Through Static Code Analysis (#1736) * removed rand.Seed usage * removed few type conflicts + returned back linter rule G115 * few more gosec G115 fixes * few fixes and comments added * minor changes * added //nolint:gosec in time conversions which can't be resolved * added nolint:gosec reason * updated to golangcli-lint 1.61.0 * super minor refactoring uint64 for cfg.GetValidatorStats() * refactoring * added conversion wrapper to devide all existing G115 issues * refactored many G115 issues * minor fix * fixed expected value returned from mock * removed unused code * renames * cr: role to int32 * cr: remove len conversion, add duration conversion check, create var for cutoffround * fix * rename package from conversion to casts * readability * remove unnecessary loop gymnastics * remove more loop gymnastics * add nosec for trusted subnet input * approve spec diffs * approve spec diff * approve genesis spec diff * fix: duty count uint64 * fix test * remove extra debug log --------- Co-authored-by: moshe-blox Co-authored-by: y0sher --- .golangci.yaml | 4 -- api/handling.go | 12 +++-- beacon/goclient/goclient.go | 5 ++- cli/operator/node.go | 1 + cli/threshold.go | 6 +-- eth/eventhandler/validation.go | 2 +- exporter/api/query_handlers.go | 3 +- exporter/convert/message.go | 18 ++++++-- integration/qbft/tests/test_duty.go | 1 + message/validation/common_checks.go | 6 +-- message/validation/consensus_state.go | 10 ++--- message/validation/consensus_state_test.go | 14 +++--- message/validation/consensus_validation.go | 33 +++++++++----- .../validation/consensus_validation_test.go | 3 +- .../genesis/consensus_validation.go | 35 ++++++++++----- .../genesis/consensus_validation_test.go | 3 +- message/validation/validation_test.go | 7 ++- migrations/migrations.go | 10 ----- network/commons/common.go | 22 ++++++---- network/discovery/dv5_bootnode.go | 2 +- network/discovery/dv5_routing.go | 2 +- network/discovery/dv5_service.go | 10 ++--- network/discovery/enode.go | 6 +-- network/discovery/local_service.go | 4 +- network/discovery/options.go | 4 +- network/discovery/service.go | 4 +- network/discovery/subnets.go | 6 +-- network/discovery/subnets_test.go | 2 +- network/p2p/config.go | 4 +- network/p2p/p2p.go | 16 +++---- network/p2p/p2p_genesis.go | 4 +- network/p2p/p2p_pubsub.go | 11 ++--- network/p2p/p2p_setup.go | 2 - network/p2p/p2p_validation_test.go | 4 +- network/p2p/test_utils.go | 14 +++--- network/records/entries.go | 2 +- network/records/subnets.go | 10 +++-- network/records/test_utils.go | 4 +- network/testing/keys.go | 4 +- network/testing/local.go | 7 ++- network/testing/net.go | 15 ++++--- network/topics/params/message_rate_test.go | 9 ++-- network/topics/params/scores_test.go | 11 ++--- network/topics/params/topic_score.go | 9 ++-- network/topics/scoring.go | 4 +- networkconfig/config.go | 2 +- operator/duties/dutystore/voluntary_exit.go | 8 ++-- operator/duties/scheduler.go | 5 ++- operator/duties/scheduler_test.go | 2 +- operator/duties/voluntary_exit.go | 5 ++- operator/slotticker/slotticker.go | 5 ++- operator/validator/controller.go | 9 ++-- protocol/genesis/qbft/instance/instance.go | 2 +- protocol/genesis/qbft/instance/timeout.go | 3 +- protocol/genesis/qbft/roundtimer/timer.go | 11 ++--- .../genesis/qbft/spectest/controller_type.go | 5 ++- protocol/genesis/ssv/genesisqueue/messages.go | 2 +- .../ssv/spectest/msg_processing_type.go | 1 + .../ssv/validator/msgqueue_consumer.go | 12 ++--- protocol/genesis/testing/test_utils.go | 4 +- protocol/genesis/types/share.go | 2 +- protocol/genesis/types/ssvshare.go | 6 +-- .../v2/blockchain/beacon/mocks/network.go | 8 ++-- protocol/v2/blockchain/beacon/network.go | 14 +++--- protocol/v2/qbft/config.go | 2 + protocol/v2/qbft/round_robin_proposer.go | 6 +-- protocol/v2/qbft/roundtimer/timer.go | 15 ++++--- protocol/v2/qbft/spectest/controller_type.go | 8 ++-- .../v2/qbft/spectest/msg_processing_type.go | 2 +- protocol/v2/ssv/queue/messages.go | 2 +- protocol/v2/ssv/runner/committee.go | 12 ++--- protocol/v2/ssv/validator/committee_queue.go | 8 ++-- .../v2/ssv/validator/msgqueue_consumer.go | 8 ++-- .../ssv/validator/non_committee_validator.go | 3 +- protocol/v2/ssv/validator/validator.go | 7 +-- protocol/v2/testing/test_utils.go | 2 +- protocol/v2/types/ssvshare.go | 18 ++++---- protocol/v2/types/ssvshare_test.go | 6 +-- registry/storage/shares.go | 4 +- registry/storage/shares_test.go | 2 +- scripts/differ/parser.go | 4 +- scripts/spec-alignment/differ.config.yaml | 2 +- .../spec-alignment/genesis_differ.config.yaml | 2 +- utils/boot_node/node.go | 16 +++---- utils/casts/casts.go | 44 +++++++++++++++++++ 85 files changed, 381 insertions(+), 263 deletions(-) create mode 100644 utils/casts/casts.go diff --git a/.golangci.yaml b/.golangci.yaml index 3ac2ff0b6f..bb40568a9a 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -82,10 +82,6 @@ output: # all available settings of specific linters linters-settings: - gosec: - # TODO: fix all issues with int overflow and return this rule back - excludes: - - G115 dogsled: # checks assignments with too many blank identifiers; default is 2 max-blank-identifiers: 2 diff --git a/api/handling.go b/api/handling.go index 218527f723..3344986028 100644 --- a/api/handling.go +++ b/api/handling.go @@ -18,12 +18,18 @@ type HandlerFunc func(http.ResponseWriter, *http.Request) error func Handler(h HandlerFunc) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { if err := h(w, r); err != nil { - //nolint:all + //nolint:errorlint + // errors.As would be incorrect here since a renderer.Renderer + // wrapped inside another error should error, not render. switch e := err.(type) { case render.Renderer: - render.Render(w, r, e) + if err := render.Render(w, r, e); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } default: - render.Render(w, r, Error(err)) + if err := render.Render(w, r, Error(err)); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } } } } diff --git a/beacon/goclient/goclient.go b/beacon/goclient/goclient.go index 6d3fd3c617..b631a09e36 100644 --- a/beacon/goclient/goclient.go +++ b/beacon/goclient/goclient.go @@ -23,6 +23,7 @@ import ( operatordatastore "github.com/ssvlabs/ssv/operator/datastore" "github.com/ssvlabs/ssv/operator/slotticker" beaconprotocol "github.com/ssvlabs/ssv/protocol/v2/blockchain/beacon" + "github.com/ssvlabs/ssv/utils/casts" ) const ( @@ -263,8 +264,8 @@ func (gc *GoClient) GetBeaconNetwork() spectypes.BeaconNetwork { // SlotStartTime returns the start time in terms of its unix epoch // value. func (gc *GoClient) slotStartTime(slot phase0.Slot) time.Time { - duration := time.Second * time.Duration(uint64(slot)*uint64(gc.network.SlotDurationSec().Seconds())) - startTime := time.Unix(int64(gc.network.MinGenesisTime()), 0).Add(duration) + duration := time.Second * casts.DurationFromUint64(uint64(slot)*uint64(gc.network.SlotDurationSec().Seconds())) + startTime := time.Unix(gc.network.MinGenesisTime(), 0).Add(duration) return startTime } diff --git a/cli/operator/node.go b/cli/operator/node.go index 98bd58e065..fa94d113b3 100644 --- a/cli/operator/node.go +++ b/cli/operator/node.go @@ -438,6 +438,7 @@ func validateConfig(nodeStorage operatorstorage.Storage, networkName string, usi return fmt.Errorf("incompatible config change: %w", err) } } else { + if err := nodeStorage.SaveConfig(nil, currentConfig); err != nil { return fmt.Errorf("failed to store config: %w", err) } diff --git a/cli/threshold.go b/cli/threshold.go index 254898bd5b..a690a3f601 100644 --- a/cli/threshold.go +++ b/cli/threshold.go @@ -35,8 +35,8 @@ var createThresholdCmd = &cobra.Command{ logger.Fatal("failed to get keys count flag value", zap.Error(err)) } - if !ssvtypes.ValidCommitteeSize(int(keysCount)) { - logger.Fatal("invalid keys count", zap.Int("keysCount", int(keysCount))) + if !ssvtypes.ValidCommitteeSize(keysCount) { + logger.Fatal("invalid keys count", zap.Uint64("keysCount", keysCount)) } baseKey := &bls.SecretKey{} @@ -47,7 +47,7 @@ var createThresholdCmd = &cobra.Command{ // https://github.com/ethereum/eth2-ssv/issues/22 // currently support 4, 7, 10, 13 nodes threshold 3f+1. need to align based open the issue to // support k(2f+1) and n (3f+1) and allow to pass it as flag - quorum, _ := ssvtypes.ComputeQuorumAndPartialQuorum(int(keysCount)) + quorum, _ := ssvtypes.ComputeQuorumAndPartialQuorum(keysCount) privKeys, err := threshold.Create(baseKey.Serialize(), quorum, keysCount) if err != nil { logger.Fatal("failed to turn a private key into a threshold key", zap.Error(err)) diff --git a/eth/eventhandler/validation.go b/eth/eventhandler/validation.go index 303ec9a926..bdfbdb0823 100644 --- a/eth/eventhandler/validation.go +++ b/eth/eventhandler/validation.go @@ -25,7 +25,7 @@ func (eh *EventHandler) validateOperators(txn basedb.Txn, operators []uint64) er return fmt.Errorf("no operators") } - if !ssvtypes.ValidCommitteeSize(len(operators)) { + if !ssvtypes.ValidCommitteeSize(uint64(len(operators))) { return fmt.Errorf("given operator count (%d) cannot build a 3f+1 quorum", operatorCount) } diff --git a/exporter/api/query_handlers.go b/exporter/api/query_handlers.go index 8c91f5c731..b08d4bd9c6 100644 --- a/exporter/api/query_handlers.go +++ b/exporter/api/query_handlers.go @@ -12,6 +12,7 @@ import ( "github.com/ssvlabs/ssv/ibft/storage" "github.com/ssvlabs/ssv/logging/fields" "github.com/ssvlabs/ssv/protocol/v2/message" + "github.com/ssvlabs/ssv/utils/casts" ) const ( @@ -72,7 +73,7 @@ func HandleParticipantsQuery(logger *zap.Logger, qbftStorage *storage.QBFTStores nm.Msg = res return } - runnerRole := convert.RunnerRole(beaconRole) + runnerRole := casts.BeaconRoleToConvertRole(beaconRole) roleStorage := qbftStorage.Get(runnerRole) if roleStorage == nil { logger.Warn("role storage doesn't exist", fields.ExporterRole(runnerRole)) diff --git a/exporter/convert/message.go b/exporter/convert/message.go index c6fc712e7b..38a4724e31 100644 --- a/exporter/convert/message.go +++ b/exporter/convert/message.go @@ -3,6 +3,7 @@ package convert import ( "encoding/binary" "encoding/hex" + "math" spectypes "github.com/ssvlabs/ssv-spec/types" ) @@ -29,13 +30,24 @@ func (msg MessageID) GetDutyExecutorID() []byte { func (msg MessageID) GetRoleType() RunnerRole { roleByts := msg[roleTypeStartPos : roleTypeStartPos+roleTypeSize] - return RunnerRole(binary.LittleEndian.Uint32(roleByts)) + roleValue := binary.LittleEndian.Uint32(roleByts) + + // Sanitize RoleValue + if roleValue > math.MaxInt32 { + return spectypes.RoleUnknown + } + + return RunnerRole(roleValue) } func NewMsgID(domain spectypes.DomainType, dutyExecutorID []byte, role RunnerRole) MessageID { + // Sanitize role. If bad role, return an empty MessageID + roleValue := int32(role) + if roleValue < 0 { + return MessageID{} + } roleByts := make([]byte, 4) - binary.LittleEndian.PutUint32(roleByts, uint32(role)) - + binary.LittleEndian.PutUint32(roleByts, uint32(roleValue)) return newMessageID(domain[:], roleByts, dutyExecutorID) } diff --git a/integration/qbft/tests/test_duty.go b/integration/qbft/tests/test_duty.go index 8d40901366..cbc4458921 100644 --- a/integration/qbft/tests/test_duty.go +++ b/integration/qbft/tests/test_duty.go @@ -28,6 +28,7 @@ func createDuty(pk []byte, slot phase0.Slot, idx phase0.ValidatorIndex, role spe var beaconRole spectypes.BeaconRole switch role { case spectypes.RoleCommittee: + // #nosec G115 return spectestingutils.TestingCommitteeAttesterDuty(slot, []int{int(idx)}) case spectypes.RoleAggregator: testingDuty = spectestingutils.TestingAggregatorDuty diff --git a/message/validation/common_checks.go b/message/validation/common_checks.go index 7846bf2118..70e6918616 100644 --- a/message/validation/common_checks.go +++ b/message/validation/common_checks.go @@ -77,7 +77,7 @@ func (mv *messageValidator) validateDutyCount( return nil } -func (mv *messageValidator) dutyLimit(msgID spectypes.MessageID, slot phase0.Slot, validatorIndices []phase0.ValidatorIndex) (int, bool) { +func (mv *messageValidator) dutyLimit(msgID spectypes.MessageID, slot phase0.Slot, validatorIndices []phase0.ValidatorIndex) (uint64, bool) { switch msgID.GetRoleType() { case spectypes.RoleVoluntaryExit: pk := phase0.BLSPubKey{} @@ -89,8 +89,8 @@ func (mv *messageValidator) dutyLimit(msgID spectypes.MessageID, slot phase0.Slo return 2, true case spectypes.RoleCommittee: - validatorIndexCount := len(validatorIndices) - slotsPerEpoch := int(mv.netCfg.Beacon.SlotsPerEpoch()) + validatorIndexCount := uint64(len(validatorIndices)) + slotsPerEpoch := mv.netCfg.Beacon.SlotsPerEpoch() // Skip duty search if validators * 2 exceeds slots per epoch, // as the maximum duties per epoch is capped at the number of slots. diff --git a/message/validation/consensus_state.go b/message/validation/consensus_state.go index 4c9ba16f3d..f5d4b2c121 100644 --- a/message/validation/consensus_state.go +++ b/message/validation/consensus_state.go @@ -36,8 +36,8 @@ type OperatorState struct { state []*SignerState // the slice index is slot % storedSlotCount maxSlot phase0.Slot maxEpoch phase0.Epoch - lastEpochDuties int - prevEpochDuties int + lastEpochDuties uint64 + prevEpochDuties uint64 } func newOperatorState(size phase0.Slot) *OperatorState { @@ -50,7 +50,7 @@ func (os *OperatorState) Get(slot phase0.Slot) *SignerState { os.mu.RLock() defer os.mu.RUnlock() - s := os.state[int(slot)%len(os.state)] + s := os.state[(uint64(slot) % uint64(len(os.state)))] if s == nil || s.Slot != slot { return nil } @@ -62,7 +62,7 @@ func (os *OperatorState) Set(slot phase0.Slot, epoch phase0.Epoch, state *Signer os.mu.Lock() defer os.mu.Unlock() - os.state[int(slot)%len(os.state)] = state + os.state[uint64(slot)%uint64(len(os.state))] = state if slot > os.maxSlot { os.maxSlot = slot } @@ -82,7 +82,7 @@ func (os *OperatorState) MaxSlot() phase0.Slot { return os.maxSlot } -func (os *OperatorState) DutyCount(epoch phase0.Epoch) int { +func (os *OperatorState) DutyCount(epoch phase0.Epoch) uint64 { os.mu.RLock() defer os.mu.RUnlock() diff --git a/message/validation/consensus_state_test.go b/message/validation/consensus_state_test.go index e244588901..e8008fd804 100644 --- a/message/validation/consensus_state_test.go +++ b/message/validation/consensus_state_test.go @@ -62,8 +62,8 @@ func TestOperatorState(t *testing.T) { os.Set(slot, epoch, signerState) - require.Equal(t, os.DutyCount(epoch), 1) - require.Equal(t, os.DutyCount(epoch-1), 0) + require.Equal(t, os.DutyCount(epoch), uint64(1)) + require.Equal(t, os.DutyCount(epoch-1), uint64(0)) slot2 := phase0.Slot(6) epoch2 := phase0.Epoch(2) @@ -71,9 +71,9 @@ func TestOperatorState(t *testing.T) { os.Set(slot2, epoch2, signerState2) - require.Equal(t, os.DutyCount(epoch2), 1) - require.Equal(t, os.DutyCount(epoch), 1) - require.Equal(t, os.DutyCount(epoch-1), 0) + require.Equal(t, os.DutyCount(epoch2), uint64(1)) + require.Equal(t, os.DutyCount(epoch), uint64(1)) + require.Equal(t, os.DutyCount(epoch-1), uint64(0)) }) t.Run("TestIncrementLastEpochDuties", func(t *testing.T) { @@ -85,12 +85,12 @@ func TestOperatorState(t *testing.T) { signerState := &SignerState{Slot: slot} os.Set(slot, epoch, signerState) - require.Equal(t, os.DutyCount(epoch), 1) + require.Equal(t, os.DutyCount(epoch), uint64(1)) slot2 := phase0.Slot(6) signerState2 := &SignerState{Slot: slot2} os.Set(slot2, epoch, signerState2) - require.Equal(t, os.DutyCount(epoch), 2) + require.Equal(t, os.DutyCount(epoch), uint64(2)) }) } diff --git a/message/validation/consensus_validation.go b/message/validation/consensus_validation.go index ce34d2c89b..f3e9db31f2 100644 --- a/message/validation/consensus_validation.go +++ b/message/validation/consensus_validation.go @@ -18,6 +18,7 @@ import ( "github.com/ssvlabs/ssv/protocol/v2/message" "github.com/ssvlabs/ssv/protocol/v2/qbft/roundtimer" ssvtypes "github.com/ssvlabs/ssv/protocol/v2/types" + "github.com/ssvlabs/ssv/utils/casts" ) func (mv *messageValidator) validateConsensusMessage( @@ -81,7 +82,7 @@ func (mv *messageValidator) validateConsensusMessageSemantics( committee []spectypes.OperatorID, ) error { signers := signedSSVMessage.OperatorIDs - quorumSize, _ := ssvtypes.ComputeQuorumAndPartialQuorum(len(committee)) + quorumSize, _ := ssvtypes.ComputeQuorumAndPartialQuorum(uint64(len(committee))) msgType := consensusMessage.MsgType if len(signers) > 1 { @@ -376,14 +377,22 @@ func (mv *messageValidator) maxRound(role spectypes.RunnerRole) (specqbft.Round, } } -func (mv *messageValidator) currentEstimatedRound(sinceSlotStart time.Duration) specqbft.Round { - if currentQuickRound := specqbft.FirstRound + specqbft.Round(sinceSlotStart/roundtimer.QuickTimeout); currentQuickRound <= roundtimer.QuickTimeoutThreshold { - return currentQuickRound +func (mv *messageValidator) currentEstimatedRound(sinceSlotStart time.Duration) (specqbft.Round, error) { + delta, err := casts.DurationToUint64(sinceSlotStart / roundtimer.QuickTimeout) + if err != nil { + return 0, fmt.Errorf("failed to convert time duration to uint64: %w", err) + } + if currentQuickRound := specqbft.FirstRound + specqbft.Round(delta); currentQuickRound <= roundtimer.QuickTimeoutThreshold { + return currentQuickRound, nil } sinceFirstSlowRound := sinceSlotStart - (time.Duration(roundtimer.QuickTimeoutThreshold) * roundtimer.QuickTimeout) - estimatedRound := roundtimer.QuickTimeoutThreshold + specqbft.FirstRound + specqbft.Round(sinceFirstSlowRound/roundtimer.SlowTimeout) - return estimatedRound + delta, err = casts.DurationToUint64(sinceFirstSlowRound / roundtimer.SlowTimeout) + if err != nil { + return 0, fmt.Errorf("failed to convert time duration to uint64: %w", err) + } + estimatedRound := roundtimer.QuickTimeoutThreshold + specqbft.FirstRound + specqbft.Round(delta) + return estimatedRound, nil } func (mv *messageValidator) validConsensusMsgType(msgType specqbft.MessageType) bool { @@ -407,7 +416,11 @@ func (mv *messageValidator) roundBelongsToAllowedSpread( estimatedRound := specqbft.FirstRound if receivedAt.After(slotStartTime) { sinceSlotStart = receivedAt.Sub(slotStartTime) - estimatedRound = mv.currentEstimatedRound(sinceSlotStart) + currentEstimatedRound, err := mv.currentEstimatedRound(sinceSlotStart) + if err != nil { + return err + } + estimatedRound = currentEstimatedRound } // TODO: lowestAllowed is not supported yet because first round is non-deterministic now @@ -427,12 +440,12 @@ func (mv *messageValidator) roundBelongsToAllowedSpread( } func (mv *messageValidator) roundRobinProposer(height specqbft.Height, round specqbft.Round, committee []spectypes.OperatorID) types.OperatorID { - firstRoundIndex := 0 + firstRoundIndex := uint64(0) if height != specqbft.FirstHeight { - firstRoundIndex += int(height) % len(committee) + firstRoundIndex += uint64(height) % uint64(len(committee)) } - index := (firstRoundIndex + int(round) - int(specqbft.FirstRound)) % len(committee) + index := (firstRoundIndex + uint64(round) - uint64(specqbft.FirstRound)) % uint64(len(committee)) return committee[index] } diff --git a/message/validation/consensus_validation_test.go b/message/validation/consensus_validation_test.go index 7207a53f74..b1edbb8a3d 100644 --- a/message/validation/consensus_validation_test.go +++ b/message/validation/consensus_validation_test.go @@ -98,7 +98,8 @@ func TestMessageValidator_currentEstimatedRound(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { mv := &messageValidator{} - got := mv.currentEstimatedRound(tc.sinceSlotStart) + got, err := mv.currentEstimatedRound(tc.sinceSlotStart) + require.NoError(t, err) require.Equal(t, tc.want, got) }) } diff --git a/message/validation/genesis/consensus_validation.go b/message/validation/genesis/consensus_validation.go index 06a36799ac..ca0a0623f4 100644 --- a/message/validation/genesis/consensus_validation.go +++ b/message/validation/genesis/consensus_validation.go @@ -15,6 +15,7 @@ import ( "github.com/ssvlabs/ssv/protocol/v2/qbft/roundtimer" ssvtypes "github.com/ssvlabs/ssv/protocol/v2/types" + "github.com/ssvlabs/ssv/utils/casts" ) func (mv *messageValidator) validateConsensusMessage( @@ -81,7 +82,10 @@ func (mv *messageValidator) validateConsensusMessage( estimatedRound := genesisspecqbft.FirstRound if receivedAt.After(slotStartTime) { sinceSlotStart = receivedAt.Sub(slotStartTime) - estimatedRound = mv.currentEstimatedRound(sinceSlotStart) + estimatedRound, err = mv.currentEstimatedRound(sinceSlotStart) + if err != nil { + return consensusDescriptor, msgSlot, err + } } // TODO: lowestAllowed is not supported yet because first round is non-deterministic now @@ -350,14 +354,25 @@ func (mv *messageValidator) maxRound(role genesisspectypes.BeaconRole) (genesiss } } -func (mv *messageValidator) currentEstimatedRound(sinceSlotStart time.Duration) genesisspecqbft.Round { - if currentQuickRound := genesisspecqbft.FirstRound + genesisspecqbft.Round(sinceSlotStart/roundtimer.QuickTimeout); currentQuickRound <= genesisspecqbft.Round(roundtimer.QuickTimeoutThreshold) { - return currentQuickRound +func (mv *messageValidator) currentEstimatedRound(sinceSlotStart time.Duration) (genesisspecqbft.Round, error) { + // Quick rounds (<= QuickTimeoutThreshold) + quickRounds, err := casts.DurationToUint64(sinceSlotStart / roundtimer.QuickTimeout) + if err != nil { + return 0, fmt.Errorf("failed to convert time duration to uint64: %w", err) + } + currentQuickRound := genesisspecqbft.FirstRound + genesisspecqbft.Round(quickRounds) + if currentQuickRound <= genesisspecqbft.Round(roundtimer.QuickTimeoutThreshold) { + return currentQuickRound, nil } + // Slow rounds (> QuickTimeoutThreshold) sinceFirstSlowRound := sinceSlotStart - (time.Duration(genesisspecqbft.Round(roundtimer.QuickTimeoutThreshold)) * roundtimer.QuickTimeout) - estimatedRound := genesisspecqbft.Round(roundtimer.QuickTimeoutThreshold) + genesisspecqbft.FirstRound + genesisspecqbft.Round(sinceFirstSlowRound/roundtimer.SlowTimeout) - return estimatedRound + slowRounds, err := casts.DurationToUint64(sinceFirstSlowRound / roundtimer.SlowTimeout) + if err != nil { + return 0, fmt.Errorf("failed to convert time duration to uint64: %w", err) + } + currentSlowRound := genesisspecqbft.Round(roundtimer.QuickTimeoutThreshold) + genesisspecqbft.FirstRound + genesisspecqbft.Round(slowRounds) + return currentSlowRound, nil } func (mv *messageValidator) waitAfterSlotStart(role genesisspectypes.BeaconRole) (time.Duration, error) { @@ -416,7 +431,7 @@ func (mv *messageValidator) validConsensusSigners(share *ssvtypes.SSVShare, m *g e.got = len(m.Signers) return e - case !share.HasQuorum(len(m.Signers)) || len(m.Signers) > len(share.Committee): + case !share.HasQuorum(uint64(len(m.Signers))) || len(m.Signers) > len(share.Committee): e := ErrWrongSignersLength e.want = fmt.Sprintf("between %v and %v", share.Quorum(), len(share.Committee)) e.got = len(m.Signers) @@ -441,11 +456,11 @@ func (mv *messageValidator) validConsensusSigners(share *ssvtypes.SSVShare, m *g } func (mv *messageValidator) roundRobinProposer(height genesisspecqbft.Height, round genesisspecqbft.Round, share *ssvtypes.SSVShare) genesisspectypes.OperatorID { - firstRoundIndex := 0 + firstRoundIndex := uint64(0) if height != genesisspecqbft.FirstHeight { - firstRoundIndex += int(height) % len(share.Committee) + firstRoundIndex += uint64(height) % uint64(len(share.Committee)) } - index := (firstRoundIndex + int(round) - int(genesisspecqbft.FirstRound)) % len(share.Committee) + index := (firstRoundIndex + uint64(round) - uint64(genesisspecqbft.FirstRound)) % uint64(len(share.Committee)) return share.Committee[index].Signer } diff --git a/message/validation/genesis/consensus_validation_test.go b/message/validation/genesis/consensus_validation_test.go index 21293c6a0d..c88fc8d794 100644 --- a/message/validation/genesis/consensus_validation_test.go +++ b/message/validation/genesis/consensus_validation_test.go @@ -98,7 +98,8 @@ func TestMessageValidator_currentEstimatedRound(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { mv := &messageValidator{} - got := mv.currentEstimatedRound(tc.sinceSlotStart) + got, err := mv.currentEstimatedRound(tc.sinceSlotStart) + require.NoError(t, err) require.Equal(t, tc.want, got) }) } diff --git a/message/validation/validation_test.go b/message/validation/validation_test.go index c8a2548cf2..8376cd8b25 100644 --- a/message/validation/validation_test.go +++ b/message/validation/validation_test.go @@ -1392,7 +1392,12 @@ func Test_ValidateSSVMessage(t *testing.T) { topicID := commons.CommitteeTopicID(committeeID)[0] sinceSlotStart := time.Duration(0) - for validator.currentEstimatedRound(sinceSlotStart) != round { + for { + currentRound, err := validator.currentEstimatedRound(sinceSlotStart) + require.NoError(t, err) + if currentRound == round { + break + } sinceSlotStart += roundtimer.QuickTimeout } diff --git a/migrations/migrations.go b/migrations/migrations.go index 521baa73e7..e9a34bc066 100644 --- a/migrations/migrations.go +++ b/migrations/migrations.go @@ -13,7 +13,6 @@ import ( "github.com/ssvlabs/ssv/ekm" operatorstorage "github.com/ssvlabs/ssv/operator/storage" "github.com/ssvlabs/ssv/protocol/v2/blockchain/beacon" - "github.com/ssvlabs/ssv/protocol/v2/blockchain/eth1" "github.com/ssvlabs/ssv/storage/basedb" ) @@ -59,15 +58,6 @@ type Options struct { Network beacon.Network } -// nolint -func (o Options) getRegistryStores(logger *zap.Logger) ([]eth1.RegistryStore, error) { - nodeStorage, err := o.nodeStorage(logger) - if err != nil { - return nil, err - } - return []eth1.RegistryStore{nodeStorage, o.signerStorage(logger)}, nil -} - // nolint func (o Options) nodeStorage(logger *zap.Logger) (operatorstorage.Storage, error) { return operatorstorage.NewNodeStorage(logger, o.Db) diff --git a/network/commons/common.go b/network/commons/common.go index f496c12e84..2a9036e9bf 100644 --- a/network/commons/common.go +++ b/network/commons/common.go @@ -4,6 +4,7 @@ import ( "encoding/binary" "encoding/hex" "fmt" + "math" "math/big" "strconv" "strings" @@ -27,6 +28,8 @@ const ( // SubnetsCount returns the subnet count for genesis SubnetsCount uint64 = 128 + UnknownSubnetId = math.MaxUint64 + // UnknownSubnet is used when a validator public key is invalid UnknownSubnet = "unknown" @@ -65,8 +68,8 @@ func DecodeGenesisSignedSSVMessage(encoded []byte) ([]byte, genesisspectypes.Ope } // SubnetTopicID returns the topic to use for the given subnet -func SubnetTopicID(subnet int) string { - if subnet < 0 { +func SubnetTopicID(subnet uint64) string { + if subnet == UnknownSubnetId { return UnknownSubnet } return fmt.Sprintf("%d", subnet) @@ -80,7 +83,7 @@ func ValidatorTopicID(pkByts []byte) []string { } func CommitteeTopicID(cid spectypes.CommitteeID) []string { - return []string{strconv.Itoa(CommitteeSubnet(cid))} + return []string{fmt.Sprintf("%d", CommitteeSubnet(cid))} } // GetTopicFullName returns the topic full name, including prefix @@ -94,18 +97,19 @@ func GetTopicBaseName(topicName string) string { } // ValidatorSubnet returns the subnet for the given validator -func ValidatorSubnet(validatorPKHex string) int { +func ValidatorSubnet(validatorPKHex string) uint64 { if len(validatorPKHex) < 10 { - return -1 + return UnknownSubnetId } val := hexToUint64(validatorPKHex[:10]) - return int(val % SubnetsCount) + + return val % SubnetsCount } // CommitteeSubnet returns the subnet for the given committee -func CommitteeSubnet(cid spectypes.CommitteeID) int { +func CommitteeSubnet(cid spectypes.CommitteeID) uint64 { subnet := new(big.Int).Mod(new(big.Int).SetBytes(cid[:]), new(big.Int).SetUint64(SubnetsCount)) - return int(subnet.Int64()) + return subnet.Uint64() } // MsgIDFunc is the function that maps a message to a msg_id @@ -132,7 +136,7 @@ func Subnets() int { // Topics returns the available topics for this fork. func Topics() []string { topics := make([]string, Subnets()) - for i := 0; i < Subnets(); i++ { + for i := uint64(0); i < SubnetsCount; i++ { topics[i] = GetTopicFullName(SubnetTopicID(i)) } return topics diff --git a/network/discovery/dv5_bootnode.go b/network/discovery/dv5_bootnode.go index c9f6766bb0..d4e619770f 100644 --- a/network/discovery/dv5_bootnode.go +++ b/network/discovery/dv5_bootnode.go @@ -14,7 +14,7 @@ import ( type BootnodeOptions struct { PrivateKey string `yaml:"PrivateKey" env:"BOOTNODE_NETWORK_KEY" env-description:"Bootnode private key (default will generate new)"` ExternalIP string `yaml:"ExternalIP" env:"BOOTNODE_EXTERNAL_IP" env-description:"Override boot node's IP' "` - Port int `yaml:"Port" env:"BOOTNODE_PORT" env-description:"Override boot node's port' "` + Port uint16 `yaml:"Port" env:"BOOTNODE_PORT" env-description:"Override boot node's port' "` } // Bootnode represents a bootnode used for tests diff --git a/network/discovery/dv5_routing.go b/network/discovery/dv5_routing.go index 2ffa08c0f1..0f69fe5d60 100644 --- a/network/discovery/dv5_routing.go +++ b/network/discovery/dv5_routing.go @@ -56,7 +56,7 @@ func (dvs *DiscV5Service) FindPeers(ctx context.Context, ns string, opt ...disco dvs.discover(ctx, func(e PeerEvent) { cn <- e.AddrInfo - }, time.Millisecond, dvs.ssvNodeFilter(logger), dvs.badNodeFilter(logger), dvs.subnetFilter(uint64(subnet))) + }, time.Millisecond, dvs.ssvNodeFilter(logger), dvs.badNodeFilter(logger), dvs.subnetFilter(subnet)) return cn, nil } diff --git a/network/discovery/dv5_service.go b/network/discovery/dv5_service.go index b7cd72754b..055ffbc0d0 100644 --- a/network/discovery/dv5_service.go +++ b/network/discovery/dv5_service.go @@ -209,7 +209,7 @@ func (dvs *DiscV5Service) initDiscV5Listener(logger *zap.Logger, discOpts *Optio dvs.bootnodes = dv5Cfg.Bootnodes logger.Debug("started discv5 listener (UDP)", fields.BindIP(bindIP), - zap.Int("UdpPort", opts.Port), fields.ENRLocalNode(localNode), fields.Domain(discOpts.DomainType.DomainType())) + zap.Uint16("UdpPort", opts.Port), fields.ENRLocalNode(localNode), fields.Domain(discOpts.DomainType.DomainType())) return nil } @@ -260,7 +260,7 @@ func (dvs *DiscV5Service) discover(ctx context.Context, handler HandleNewPeer, i } // RegisterSubnets adds the given subnets and publish the updated node record -func (dvs *DiscV5Service) RegisterSubnets(logger *zap.Logger, subnets ...int) (updated bool, err error) { +func (dvs *DiscV5Service) RegisterSubnets(logger *zap.Logger, subnets ...uint64) (updated bool, err error) { if len(subnets) == 0 { return false, nil } @@ -277,7 +277,7 @@ func (dvs *DiscV5Service) RegisterSubnets(logger *zap.Logger, subnets ...int) (u } // DeregisterSubnets removes the given subnets and publish the updated node record -func (dvs *DiscV5Service) DeregisterSubnets(logger *zap.Logger, subnets ...int) (updated bool, err error) { +func (dvs *DiscV5Service) DeregisterSubnets(logger *zap.Logger, subnets ...uint64) (updated bool, err error) { logger = logger.Named(logging.NameDiscoveryService) if len(subnets) == 0 { @@ -384,10 +384,10 @@ func (dvs *DiscV5Service) createLocalNode(logger *zap.Logger, discOpts *Options, } // newUDPListener creates a udp server -func newUDPListener(bindIP net.IP, port int, network string) (*net.UDPConn, error) { +func newUDPListener(bindIP net.IP, port uint16, network string) (*net.UDPConn, error) { udpAddr := &net.UDPAddr{ IP: bindIP, - Port: port, + Port: int(port), } conn, err := net.ListenUDP(network, udpAddr) if err != nil { diff --git a/network/discovery/enode.go b/network/discovery/enode.go index 811d125096..f31320a8d5 100644 --- a/network/discovery/enode.go +++ b/network/discovery/enode.go @@ -16,7 +16,7 @@ import ( ) // createLocalNode create a new enode.LocalNode instance -func createLocalNode(privKey *ecdsa.PrivateKey, storagePath string, ipAddr net.IP, udpPort, tcpPort int) (*enode.LocalNode, error) { +func createLocalNode(privKey *ecdsa.PrivateKey, storagePath string, ipAddr net.IP, udpPort, tcpPort uint16) (*enode.LocalNode, error) { db, err := enode.OpenDB(storagePath) if err != nil { return nil, errors.Wrap(err, "could not open node's peer database") @@ -27,7 +27,7 @@ func createLocalNode(privKey *ecdsa.PrivateKey, storagePath string, ipAddr net.I localNode.Set(enr.UDP(udpPort)) localNode.Set(enr.TCP(tcpPort)) localNode.SetFallbackIP(ipAddr) - localNode.SetFallbackUDP(udpPort) + localNode.SetFallbackUDP(int(udpPort)) localNode.Set(enr.WithEntry("ssv", true)) return localNode, nil @@ -92,7 +92,7 @@ func ToMultiAddr(node *enode.Node) (ma.Multiaddr, error) { if ip.To4() == nil && ip.To16() == nil { return nil, errors.Errorf("invalid ip address: %s", ipAddr) } - port := uint(node.TCP()) + port := node.TCP() var s string if ip.To4() != nil { s = fmt.Sprintf("/ip4/%s/%s/%d/p2p/%s", ipAddr, "tcp", port, id.String()) diff --git a/network/discovery/local_service.go b/network/discovery/local_service.go index 5ad0a94196..5127036828 100644 --- a/network/discovery/local_service.go +++ b/network/discovery/local_service.go @@ -94,13 +94,13 @@ func (md *localDiscovery) FindPeers(ctx context.Context, ns string, opt ...disco } // RegisterSubnets implements Service -func (md *localDiscovery) RegisterSubnets(logger *zap.Logger, subnets ...int) (updated bool, err error) { +func (md *localDiscovery) RegisterSubnets(logger *zap.Logger, subnets ...uint64) (updated bool, err error) { // TODO return false, nil } // DeregisterSubnets implements Service -func (md *localDiscovery) DeregisterSubnets(logger *zap.Logger, subnets ...int) (updated bool, err error) { +func (md *localDiscovery) DeregisterSubnets(logger *zap.Logger, subnets ...uint64) (updated bool, err error) { // TODO return false, nil } diff --git a/network/discovery/options.go b/network/discovery/options.go index 5cb359d231..da54032003 100644 --- a/network/discovery/options.go +++ b/network/discovery/options.go @@ -28,9 +28,9 @@ type DiscV5Options struct { // BindIP is the IP to bind to the UDP listener BindIP string // Port is the UDP port used by discv5 - Port int + Port uint16 // TCPPort is the TCP port exposed in the ENR - TCPPort int + TCPPort uint16 // NetworkKey is the private key used to create the peer.ID if the node NetworkKey *ecdsa.PrivateKey // Bootnodes is a list of bootstrapper nodes diff --git a/network/discovery/service.go b/network/discovery/service.go index 51d131f329..15c66fb449 100644 --- a/network/discovery/service.go +++ b/network/discovery/service.go @@ -49,8 +49,8 @@ type Options struct { type Service interface { discovery.Discovery io.Closer - RegisterSubnets(logger *zap.Logger, subnets ...int) (updated bool, err error) - DeregisterSubnets(logger *zap.Logger, subnets ...int) (updated bool, err error) + RegisterSubnets(logger *zap.Logger, subnets ...uint64) (updated bool, err error) + DeregisterSubnets(logger *zap.Logger, subnets ...uint64) (updated bool, err error) Bootstrap(logger *zap.Logger, handler HandleNewPeer) error PublishENR(logger *zap.Logger) } diff --git a/network/discovery/subnets.go b/network/discovery/subnets.go index 175120d598..dd8c26c555 100644 --- a/network/discovery/subnets.go +++ b/network/discovery/subnets.go @@ -15,7 +15,7 @@ var ( ) // nsToSubnet converts the given topic to subnet -func (dvs *DiscV5Service) nsToSubnet(ns string) (int, error) { +func (dvs *DiscV5Service) nsToSubnet(ns string) (uint64, error) { r, done := regPool.Get() defer done() @@ -29,11 +29,11 @@ func (dvs *DiscV5Service) nsToSubnet(ns string) (int, error) { return 0, err } - if val >= uint64(commons.Subnets()) { + if val >= commons.SubnetsCount { return 0, errValueOutOfRange } - return int(val), nil + return val, nil } // isSubnet checks if the given string is a subnet string diff --git a/network/discovery/subnets_test.go b/network/discovery/subnets_test.go index 69e79e8eea..cea742d908 100644 --- a/network/discovery/subnets_test.go +++ b/network/discovery/subnets_test.go @@ -10,7 +10,7 @@ func TestNsToSubnet(t *testing.T) { tests := []struct { name string ns string - expected int + expected uint64 expectedErr string isSubnet bool }{ diff --git a/network/p2p/config.go b/network/p2p/config.go index b6328ba364..c659b610ea 100644 --- a/network/p2p/config.go +++ b/network/p2p/config.go @@ -39,8 +39,8 @@ type Config struct { Discovery string `yaml:"Discovery" env:"P2P_DISCOVERY" env-description:"Discovery system to use" env-default:"discv5"` TrustedPeers []string `yaml:"TrustedPeers" env:"TRUSTED_PEERS" env-default:"" env-description:"List of peers to connect to."` - TCPPort int `yaml:"TcpPort" env:"TCP_PORT" env-default:"13001" env-description:"TCP port for p2p transport"` - UDPPort int `yaml:"UdpPort" env:"UDP_PORT" env-default:"12001" env-description:"UDP port for discovery"` + TCPPort uint16 `yaml:"TcpPort" env:"TCP_PORT" env-default:"13001" env-description:"TCP port for p2p transport"` + UDPPort uint16 `yaml:"UdpPort" env:"UDP_PORT" env-default:"12001" env-description:"UDP port for discovery"` HostAddress string `yaml:"HostAddress" env:"HOST_ADDRESS" env-description:"External ip node is exposed for discovery"` HostDNS string `yaml:"HostDNS" env:"HOST_DNS" env-description:"External DNS node is exposed for discovery"` diff --git a/network/p2p/p2p.go b/network/p2p/p2p.go index 4a412ca337..674ab65c94 100644 --- a/network/p2p/p2p.go +++ b/network/p2p/p2p.go @@ -377,18 +377,18 @@ func (n *p2pNetwork) UpdateSubnets(logger *zap.Logger) { n.activeSubnets = updatedSubnets // Compute the not yet registered subnets. - addedSubnets := make([]int, 0) + addedSubnets := make([]uint64, 0) for subnet, active := range updatedSubnets { if active == byte(1) && registeredSubnets[subnet] == byte(0) { - addedSubnets = append(addedSubnets, subnet) + addedSubnets = append(addedSubnets, uint64(subnet)) // #nosec G115 -- subnets has a constant max len of 128 } } // Compute the not anymore registered subnets. - removedSubnets := make([]int, 0) + removedSubnets := make([]uint64, 0) for subnet, active := range registeredSubnets { if active == byte(1) && updatedSubnets[subnet] == byte(0) { - removedSubnets = append(removedSubnets, subnet) + removedSubnets = append(removedSubnets, uint64(subnet)) // #nosec G115 -- subnets has a constant max len of 128 } } @@ -423,12 +423,12 @@ func (n *p2pNetwork) UpdateSubnets(logger *zap.Logger) { } // Unsubscribe from the removed subnets. - for _, subnet := range removedSubnets { - if err := n.unsubscribeSubnet(logger, uint(subnet)); err != nil { - logger.Debug("could not unsubscribe from subnet", zap.Int("subnet", subnet), zap.Error(err)) + for _, removedSubnet := range removedSubnets { + if err := n.unsubscribeSubnet(logger, removedSubnet); err != nil { + logger.Debug("could not unsubscribe from subnet", zap.Uint64("subnet", removedSubnet), zap.Error(err)) errs = errors.Join(errs, err) } else { - logger.Debug("unsubscribed from subnet", zap.Int("subnet", subnet)) + logger.Debug("unsubscribed from subnet", zap.Uint64("subnet", removedSubnet)) } } } diff --git a/network/p2p/p2p_genesis.go b/network/p2p/p2p_genesis.go index 7d6539e33a..eb042fd388 100644 --- a/network/p2p/p2p_genesis.go +++ b/network/p2p/p2p_genesis.go @@ -20,7 +20,7 @@ type GenesisP2P struct { func (p *GenesisP2P) Broadcast(message *genesisspectypes.SSVMessage) error { - zap.L().Debug("broadcasting genesis msg", fields.PubKey(message.MsgID.GetPubKey()), zap.Int("msg_type", int(message.MsgType))) + zap.L().Debug("broadcasting genesis msg", fields.PubKey(message.MsgID.GetPubKey()), zap.Uint64("msg_type", uint64(message.MsgType))) if !p.Network.isReady() { return p2pprotocol.ErrNetworkIsNotReady @@ -50,7 +50,7 @@ func (p *GenesisP2P) Broadcast(message *genesisspectypes.SSVMessage) error { for _, topic := range topics { p.Network.interfaceLogger.Debug("broadcasting msg", fields.PubKey(message.MsgID.GetPubKey()), - zap.Int("msg_type", int(message.MsgType)), + zap.Uint64("msg_type", uint64(message.MsgType)), fields.Topic(topic)) if err := p.Network.topicsCtrl.Broadcast(topic, encodedMsg, p.Network.cfg.RequestTimeout); err != nil { p.Network.interfaceLogger.Debug("could not broadcast msg", fields.PubKey(message.MsgID.GetPubKey()), zap.Error(err)) diff --git a/network/p2p/p2p_pubsub.go b/network/p2p/p2p_pubsub.go index 07a18544a4..68baf5126c 100644 --- a/network/p2p/p2p_pubsub.go +++ b/network/p2p/p2p_pubsub.go @@ -77,7 +77,7 @@ func (n *p2pNetwork) SubscribeAll(logger *zap.Logger) error { return p2pprotocol.ErrNetworkIsNotReady } n.fixedSubnets, _ = records.Subnets{}.FromString(records.AllSubnets) - for subnet := 0; subnet < commons.Subnets(); subnet++ { + for subnet := uint64(0); subnet < commons.SubnetsCount; subnet++ { err := n.topicsCtrl.Subscribe(logger, commons.SubnetTopicID(subnet)) if err != nil { return err @@ -100,7 +100,8 @@ func (n *p2pNetwork) SubscribeRandoms(logger *zap.Logger, numSubnets int) error randomSubnets := rand.New(rand.NewSource(time.Now().UnixNano())).Perm(commons.Subnets()) randomSubnets = randomSubnets[:numSubnets] for _, subnet := range randomSubnets { - err := n.topicsCtrl.Subscribe(logger, commons.SubnetTopicID(subnet)) + // #nosec G115 + err := n.topicsCtrl.Subscribe(logger, commons.SubnetTopicID(uint64(subnet))) // Perm slice is [0, n) if err != nil { return fmt.Errorf("could not subscribe to subnet %d: %w", subnet, err) } @@ -172,14 +173,14 @@ func (n *p2pNetwork) subscribeValidator(pk spectypes.ValidatorPK) error { return nil } -func (n *p2pNetwork) unsubscribeSubnet(logger *zap.Logger, subnet uint) error { +func (n *p2pNetwork) unsubscribeSubnet(logger *zap.Logger, subnet uint64) error { if !n.isReady() { return p2pprotocol.ErrNetworkIsNotReady } - if subnet >= uint(commons.Subnets()) { + if subnet >= commons.SubnetsCount { return fmt.Errorf("invalid subnet %d", subnet) } - if err := n.topicsCtrl.Unsubscribe(logger, commons.SubnetTopicID(int(subnet)), false); err != nil { + if err := n.topicsCtrl.Unsubscribe(logger, commons.SubnetTopicID(subnet), false); err != nil { return fmt.Errorf("could not unsubscribe from subnet %d: %w", subnet, err) } return nil diff --git a/network/p2p/p2p_setup.go b/network/p2p/p2p_setup.go index 65f2d72f71..0a4893a80c 100644 --- a/network/p2p/p2p_setup.go +++ b/network/p2p/p2p_setup.go @@ -57,8 +57,6 @@ func (n *p2pNetwork) Setup(logger *zap.Logger) error { return errors.New("could not setup network: in ready state") } - // set a seed for rand values - rand.Seed(time.Now().UnixNano()) // nolint: staticcheck logger.Info("configuring") if err := n.initCfg(); err != nil { diff --git a/network/p2p/p2p_validation_test.go b/network/p2p/p2p_validation_test.go index dd70113b24..69bd60175e 100644 --- a/network/p2p/p2p_validation_test.go +++ b/network/p2p/p2p_validation_test.go @@ -138,7 +138,7 @@ func TestP2pNetwork_MessageValidation(t *testing.T) { } // Create a VirtualNet with 4 nodes. - vNet = CreateVirtualNet(t, ctx, 4, shares, func(nodeIndex int) validation.MessageValidator { + vNet = CreateVirtualNet(t, ctx, 4, shares, func(nodeIndex uint64) validation.MessageValidator { return messageValidators[nodeIndex] }) @@ -338,7 +338,7 @@ func CreateVirtualNet( ctx context.Context, nodes int, shares []*ssvtypes.SSVShare, - messageValidatorProvider func(int) validation.MessageValidator, + messageValidatorProvider func(uint64) validation.MessageValidator, ) *VirtualNet { var doneSetup atomic.Bool vn := &VirtualNet{} diff --git a/network/p2p/test_utils.go b/network/p2p/test_utils.go index 9762911873..04e5bd0d59 100644 --- a/network/p2p/test_utils.go +++ b/network/p2p/test_utils.go @@ -131,7 +131,7 @@ func (mockSignatureVerifier) VerifySignature(operatorID spectypes.OperatorID, me } // NewTestP2pNetwork creates a new network.P2PNetwork instance -func (ln *LocalNet) NewTestP2pNetwork(ctx context.Context, nodeIndex int, keys testing.NodeKeys, logger *zap.Logger, options LocalNetOptions) (network.P2PNetwork, error) { +func (ln *LocalNet) NewTestP2pNetwork(ctx context.Context, nodeIndex uint64, keys testing.NodeKeys, logger *zap.Logger, options LocalNetOptions) (network.P2PNetwork, error) { operatorPubkey, err := keys.OperatorKey.Public().Base64() if err != nil { return nil, err @@ -190,7 +190,7 @@ func (ln *LocalNet) NewTestP2pNetwork(ctx context.Context, nodeIndex int, keys t cfg.Network = networkconfig.TestNetwork if options.TotalValidators > 0 { cfg.GetValidatorStats = func() (uint64, uint64, uint64, error) { - return uint64(options.TotalValidators), uint64(options.ActiveValidators), uint64(options.MyValidators), nil + return options.TotalValidators, options.ActiveValidators, options.MyValidators, nil } } @@ -222,7 +222,7 @@ func (ln *LocalNet) NewTestP2pNetwork(ctx context.Context, nodeIndex int, keys t cfg.PeerScoreInspectorInterval = options.PeerScoreInspectorInterval } - cfg.OperatorDataStore = operatordatastore.New(®istrystorage.OperatorData{ID: spectypes.OperatorID(nodeIndex + 1)}) + cfg.OperatorDataStore = operatordatastore.New(®istrystorage.OperatorData{ID: nodeIndex + 1}) mr := metricsreporter.New() p, err := New(logger, cfg, mr) @@ -237,11 +237,11 @@ func (ln *LocalNet) NewTestP2pNetwork(ctx context.Context, nodeIndex int, keys t } type LocalNetOptions struct { - MessageValidatorProvider func(int) validation.MessageValidator + MessageValidatorProvider func(uint64) validation.MessageValidator Nodes int MinConnected int UseDiscv5 bool - TotalValidators, ActiveValidators, MyValidators int + TotalValidators, ActiveValidators, MyValidators uint64 PeerScoreInspector func(selfPeer peer.ID, peerMap map[peer.ID]*pubsub.PeerScoreSnapshot) PeerScoreInspectorInterval time.Duration Shares []*ssvtypes.SSVShare @@ -256,7 +256,7 @@ func NewLocalNet(ctx context.Context, logger *zap.Logger, options LocalNetOption return nil, err } } - nodes, keys, err := testing.NewLocalTestnet(ctx, options.Nodes, func(pctx context.Context, nodeIndex int, keys testing.NodeKeys) network.P2PNetwork { + nodes, keys, err := testing.NewLocalTestnet(ctx, options.Nodes, func(pctx context.Context, nodeIndex uint64, keys testing.NodeKeys) network.P2PNetwork { logger := logger.Named(fmt.Sprintf("node-%d", nodeIndex)) p, err := ln.NewTestP2pNetwork(pctx, nodeIndex, keys, logger, options) if err != nil { @@ -274,7 +274,7 @@ func NewLocalNet(ctx context.Context, logger *zap.Logger, options LocalNetOption } // NewNetConfig creates a new config for tests -func NewNetConfig(keys testing.NodeKeys, operatorPubKeyHash string, bn *discovery.Bootnode, tcpPort, udpPort, maxPeers int) *Config { +func NewNetConfig(keys testing.NodeKeys, operatorPubKeyHash string, bn *discovery.Bootnode, tcpPort, udpPort uint16, maxPeers int) *Config { bns := "" discT := "discv5" if bn != nil { diff --git a/network/records/entries.go b/network/records/entries.go index cda23dd372..846e7d938d 100644 --- a/network/records/entries.go +++ b/network/records/entries.go @@ -71,7 +71,7 @@ func GetDomainTypeEntry(record *enr.Record, key ENRKey) (spectypes.DomainType, e // SetSubnetsEntry adds subnets entry to our enode.LocalNode func SetSubnetsEntry(node *enode.LocalNode, subnets []byte) error { subnetsVec := bitfield.NewBitvector128() - for i, subnet := range subnets { + for i, subnet := range subnets { // #nosec G115 -- subnets has a constant len of 128 subnetsVec.SetBitAt(uint64(i), subnet > 0) } node.Set(enr.WithEntry("subnets", &subnetsVec)) diff --git a/network/records/subnets.go b/network/records/subnets.go index 58dabb5c7d..de3f2e3faa 100644 --- a/network/records/subnets.go +++ b/network/records/subnets.go @@ -21,7 +21,7 @@ const ( // UpdateSubnets updates subnets entry according to the given changes. // count is the amount of subnets, in case that the entry doesn't exist as we want to initialize it -func UpdateSubnets(node *enode.LocalNode, count int, added []int, removed []int) ([]byte, error) { +func UpdateSubnets(node *enode.LocalNode, count int, added []uint64, removed []uint64) ([]byte, error) { subnets, err := GetSubnetsEntry(node.Node().Record()) if err != nil && !errors.Is(err, ErrEntryNotFound) { return nil, errors.Wrap(err, "could not read subnets entry") @@ -59,8 +59,10 @@ func (s Subnets) Clone() Subnets { func (s Subnets) String() string { subnetsVec := bitfield.NewBitvector128() - for subnet, val := range s { - subnetsVec.SetBitAt(uint64(subnet), val > uint8(0)) + subnet := uint64(0) + for _, val := range s { + subnetsVec.SetBitAt(subnet, val > uint8(0)) + subnet++ } return hex.EncodeToString(subnetsVec.Bytes()) } @@ -144,7 +146,7 @@ func getCharMask(str string) ([]byte, error) { if err != nil { return nil, err } - maskData = append(maskData, uint8(val)) + maskData = append(maskData, uint8(val)) // nolint:gosec } return maskData, nil } diff --git a/network/records/test_utils.go b/network/records/test_utils.go index 83370744ac..d795b42d72 100644 --- a/network/records/test_utils.go +++ b/network/records/test_utils.go @@ -10,7 +10,7 @@ import ( ) // CreateLocalNode create a new enode.LocalNode instance -func CreateLocalNode(privKey *ecdsa.PrivateKey, storagePath string, ipAddr net.IP, udpPort, tcpPort int) (*enode.LocalNode, error) { +func CreateLocalNode(privKey *ecdsa.PrivateKey, storagePath string, ipAddr net.IP, udpPort, tcpPort uint16) (*enode.LocalNode, error) { db, err := enode.OpenDB(storagePath) if err != nil { return nil, errors.Wrap(err, "could not open node's peer database") @@ -21,7 +21,7 @@ func CreateLocalNode(privKey *ecdsa.PrivateKey, storagePath string, ipAddr net.I localNode.Set(enr.UDP(udpPort)) localNode.Set(enr.TCP(tcpPort)) localNode.SetFallbackIP(ipAddr) - localNode.SetFallbackUDP(udpPort) + localNode.SetFallbackUDP(int(udpPort)) return localNode, nil } diff --git a/network/testing/keys.go b/network/testing/keys.go index bed84ae678..49dee5fe6a 100644 --- a/network/testing/keys.go +++ b/network/testing/keys.go @@ -7,6 +7,7 @@ import ( "github.com/herumi/bls-eth-go-binary/bls" spectestingutils "github.com/ssvlabs/ssv-spec/types/testingutils" + "github.com/ssvlabs/ssv/network" "github.com/ssvlabs/ssv/network/commons" "github.com/ssvlabs/ssv/operator/keys" @@ -83,7 +84,8 @@ func NewLocalTestnetFromKeySet(ctx context.Context, factory NetworkFactory, ks * return nil, nil, err } - for i, k := range keys { + i := uint64(0) + for _, k := range keys { nodes[i] = factory(ctx, i, k) } diff --git a/network/testing/local.go b/network/testing/local.go index 97c8ee7d46..ae1ed9acf9 100644 --- a/network/testing/local.go +++ b/network/testing/local.go @@ -2,11 +2,12 @@ package testing import ( "context" + "github.com/ssvlabs/ssv/network" ) // NetworkFactory is a generic factory for network instances -type NetworkFactory func(pctx context.Context, nodeIndex int, keys NodeKeys) network.P2PNetwork +type NetworkFactory func(pctx context.Context, nodeIndex uint64, keys NodeKeys) network.P2PNetwork // NewLocalTestnet creates a new local network func NewLocalTestnet(ctx context.Context, n int, factory NetworkFactory) ([]network.P2PNetwork, []NodeKeys, error) { @@ -16,8 +17,10 @@ func NewLocalTestnet(ctx context.Context, n int, factory NetworkFactory) ([]netw return nil, nil, err } - for i, k := range keys { + i := uint64(0) + for _, k := range keys { nodes[i] = factory(ctx, i, k) + i++ } return nodes, keys, nil diff --git a/network/testing/net.go b/network/testing/net.go index e5205efdc9..048332ed3e 100644 --- a/network/testing/net.go +++ b/network/testing/net.go @@ -8,7 +8,7 @@ import ( ) // RandomTCPPort returns a new random tcp port -func RandomTCPPort(from, to int) int { +func RandomTCPPort(from, to uint16) uint16 { for { port := random(from, to) if checkTCPPort(port) == nil { @@ -20,7 +20,7 @@ func RandomTCPPort(from, to int) int { } // checkTCPPort checks that the given port is not taken -func checkTCPPort(port int) error { +func checkTCPPort(port uint16) error { conn, err := net.DialTimeout("tcp", fmt.Sprintf(":%d", port), 3*time.Second) if err != nil { return err @@ -33,12 +33,12 @@ func checkTCPPort(port int) error { type UDPPortsRandomizer map[int]bool // Next generates a new random port that is available -func (up UDPPortsRandomizer) Next(from, to int) int { +func (up UDPPortsRandomizer) Next(from, to uint16) uint16 { udpPort := random(from, to) udpPortLoop: for { - if !up[udpPort] { - up[udpPort] = true + if !up[int(udpPort)] { + up[int(udpPort)] = true break udpPortLoop } udpPort = random(from, to) @@ -46,7 +46,8 @@ udpPortLoop: return udpPort } -func random(from, to int) int { +func random(from, to uint16) uint16 { // #nosec G404 - return rand.Intn(to-from) + from + // #nosec G115 + return uint16(rand.Intn(int(to-from)) + int(from)) } diff --git a/network/topics/params/message_rate_test.go b/network/topics/params/message_rate_test.go index eef3a0b0e8..75db598f7a 100644 --- a/network/topics/params/message_rate_test.go +++ b/network/topics/params/message_rate_test.go @@ -5,9 +5,10 @@ import ( "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/ssvlabs/ssv-spec/types" + "github.com/stretchr/testify/require" + "github.com/ssvlabs/ssv/protocol/v2/types" "github.com/ssvlabs/ssv/registry/storage" - "github.com/stretchr/testify/require" ) func createTestingValidators(n int) []*types.SSVShare { @@ -22,10 +23,10 @@ func createTestingValidators(n int) []*types.SSVShare { return ret } -func createTestingSingleCommittees(n int) []*storage.Committee { +func createTestingSingleCommittees(n uint64) []*storage.Committee { ret := make([]*storage.Committee, 0) - for i := 0; i <= n; i++ { - opRef := uint64(i*4 + 1) + for i := uint64(0); i <= n; i++ { + opRef := i*4 + 1 ret = append(ret, &storage.Committee{ Operators: []uint64{opRef, opRef + 1, opRef + 2, opRef + 3}, Validators: createTestingValidators(1), diff --git a/network/topics/params/scores_test.go b/network/topics/params/scores_test.go index ccb172bf59..acf9dc8f12 100644 --- a/network/topics/params/scores_test.go +++ b/network/topics/params/scores_test.go @@ -7,8 +7,9 @@ import ( "time" pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/ssvlabs/ssv/registry/storage" "github.com/stretchr/testify/require" + + "github.com/ssvlabs/ssv/registry/storage" ) func TestTopicScoreParams(t *testing.T) { @@ -20,7 +21,7 @@ func TestTopicScoreParams(t *testing.T) { { "subnet topic 0 validators", func() *Options { - validators := 0 + validators := uint64(0) opts := NewSubnetTopicOpts(validators, 128, []*storage.Committee{}) return opts }, @@ -29,7 +30,7 @@ func TestTopicScoreParams(t *testing.T) { { "subnet topic 1k validators", func() *Options { - validators := 1000 + validators := uint64(1000) opts := NewSubnetTopicOpts(validators, 128, createTestingSingleCommittees(validators)) return opts }, @@ -38,7 +39,7 @@ func TestTopicScoreParams(t *testing.T) { { "subnet topic 10k validators", func() *Options { - validators := 10_000 + validators := uint64(10_000) opts := NewSubnetTopicOpts(validators, 128, createTestingSingleCommittees(validators)) return opts }, @@ -47,7 +48,7 @@ func TestTopicScoreParams(t *testing.T) { { "subnet topic 51k validators", func() *Options { - validators := 51_000 + validators := uint64(51_000) opts := NewSubnetTopicOpts(validators, 128, createTestingSingleCommittees(validators)) return opts }, diff --git a/network/topics/params/topic_score.go b/network/topics/params/topic_score.go index ddbee3967a..54613631f6 100644 --- a/network/topics/params/topic_score.go +++ b/network/topics/params/topic_score.go @@ -6,6 +6,7 @@ import ( pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/pkg/errors" + "github.com/ssvlabs/ssv/registry/storage" ) @@ -46,7 +47,7 @@ var ( // NetworkOpts is the config struct for network configurations type NetworkOpts struct { // ActiveValidators is the amount of validators in the network - ActiveValidators int + ActiveValidators uint64 // Subnets is the number of subnets in the network Subnets int // OneEpochDuration is used as a time-frame length to control scoring in a dynamic way @@ -149,7 +150,7 @@ func (o *Options) maxScore() float64 { } // NewOpts creates new TopicOpts instance -func NewOpts(activeValidators, subnets int) *Options { +func NewOpts(activeValidators uint64, subnets int) *Options { return &Options{ Network: NetworkOpts{ ActiveValidators: activeValidators, @@ -160,7 +161,7 @@ func NewOpts(activeValidators, subnets int) *Options { } // NewSubnetTopicOpts creates new TopicOpts for a subnet topic -func NewSubnetTopicOpts(activeValidators, subnets int, committees []*storage.Committee) *Options { +func NewSubnetTopicOpts(activeValidators uint64, subnets int, committees []*storage.Committee) *Options { // Create options with default values opts := NewOpts(activeValidators, subnets) opts.defaults() @@ -175,7 +176,7 @@ func NewSubnetTopicOpts(activeValidators, subnets int, committees []*storage.Com } // NewSubnetTopicOpts creates new TopicOpts for a subnet topic -func NewSubnetTopicOptsValidators(activeValidators, subnets int) *Options { +func NewSubnetTopicOptsValidators(activeValidators uint64, subnets int) *Options { // Create options with default values opts := NewOpts(activeValidators, subnets) opts.defaults() diff --git a/network/topics/scoring.go b/network/topics/scoring.go index 68036b1fa2..5bda80e447 100644 --- a/network/topics/scoring.go +++ b/network/topics/scoring.go @@ -216,7 +216,7 @@ func topicScoreParams(logger *zap.Logger, cfg *PubSubConfig, committeesProvider logger.Debug("got filtered committees for score params") // Create topic options - opts := params.NewSubnetTopicOpts(int(totalValidators), commons.Subnets(), topicCommittees) + opts := params.NewSubnetTopicOpts(totalValidators, commons.Subnets(), topicCommittees) // Generate topic parameters tp, err := params.TopicParams(opts) @@ -239,7 +239,7 @@ func validatorTopicScoreParams(logger *zap.Logger, cfg *PubSubConfig) func(strin logger := logger.With(zap.String("topic", t), zap.Uint64("totalValidators", totalValidators), zap.Uint64("activeValidators", activeValidators), zap.Uint64("myValidators", myValidators)) logger.Debug("got validator stats for score params") - opts := params.NewSubnetTopicOptsValidators(int(totalValidators), commons.Subnets()) + opts := params.NewSubnetTopicOptsValidators(totalValidators, commons.Subnets()) tp, err := params.TopicParams(opts) if err != nil { logger.Debug("ignoring topic score params", zap.Error(err)) diff --git a/networkconfig/config.go b/networkconfig/config.go index 15f9d5adad..fcfb001996 100644 --- a/networkconfig/config.go +++ b/networkconfig/config.go @@ -89,7 +89,7 @@ func (n NetworkConfig) SlotsPerEpoch() uint64 { // GetGenesisTime returns the genesis time in unix time. func (n NetworkConfig) GetGenesisTime() time.Time { - return time.Unix(int64(n.Beacon.MinGenesisTime()), 0) + return time.Unix(n.Beacon.MinGenesisTime(), 0) } // DomainType returns current domain type based on the current fork. diff --git a/operator/duties/dutystore/voluntary_exit.go b/operator/duties/dutystore/voluntary_exit.go index d742753645..b664523c2e 100644 --- a/operator/duties/dutystore/voluntary_exit.go +++ b/operator/duties/dutystore/voluntary_exit.go @@ -8,16 +8,16 @@ import ( type VoluntaryExitDuties struct { mu sync.RWMutex - m map[phase0.Slot]map[phase0.BLSPubKey]int + m map[phase0.Slot]map[phase0.BLSPubKey]uint64 } func NewVoluntaryExit() *VoluntaryExitDuties { return &VoluntaryExitDuties{ - m: make(map[phase0.Slot]map[phase0.BLSPubKey]int), + m: make(map[phase0.Slot]map[phase0.BLSPubKey]uint64), } } -func (d *VoluntaryExitDuties) GetDutyCount(slot phase0.Slot, pk phase0.BLSPubKey) int { +func (d *VoluntaryExitDuties) GetDutyCount(slot phase0.Slot, pk phase0.BLSPubKey) uint64 { d.mu.RLock() defer d.mu.RUnlock() @@ -35,7 +35,7 @@ func (d *VoluntaryExitDuties) AddDuty(slot phase0.Slot, pk phase0.BLSPubKey) { v, ok := d.m[slot] if !ok { - d.m[slot] = map[phase0.BLSPubKey]int{ + d.m[slot] = map[phase0.BLSPubKey]uint64{ pk: 1, } } else { diff --git a/operator/duties/scheduler.go b/operator/duties/scheduler.go index 0cdc2db1ae..2c42b4c268 100644 --- a/operator/duties/scheduler.go +++ b/operator/duties/scheduler.go @@ -29,6 +29,7 @@ import ( "github.com/ssvlabs/ssv/operator/duties/dutystore" "github.com/ssvlabs/ssv/operator/slotticker" "github.com/ssvlabs/ssv/protocol/v2/types" + "github.com/ssvlabs/ssv/utils/casts" ) //go:generate mockgen -package=duties -destination=./scheduler_mock.go -source=./scheduler.go @@ -272,7 +273,7 @@ func (s *Scheduler) SlotTicker(ctx context.Context) { case <-s.ticker.Next(): slot := s.ticker.Slot() - delay := s.network.SlotDurationSec() / time.Duration(goclient.IntervalsPerSlot) /* a third of the slot duration */ + delay := s.network.SlotDurationSec() / casts.DurationFromUint64(goclient.IntervalsPerSlot) /* a third of the slot duration */ finalTime := s.network.Beacon.GetSlotStartTime(slot).Add(delay) waitDuration := time.Until(finalTime) @@ -357,7 +358,7 @@ func (s *Scheduler) HandleHeadEvent(logger *zap.Logger) func(event *eth2apiv1.Ev s.currentDutyDependentRoot = data.CurrentDutyDependentRoot currentTime := time.Now() - delay := s.network.SlotDurationSec() / time.Duration(goclient.IntervalsPerSlot) /* a third of the slot duration */ + delay := s.network.SlotDurationSec() / casts.DurationFromUint64(goclient.IntervalsPerSlot) /* a third of the slot duration */ slotStartTimeWithDelay := s.network.Beacon.GetSlotStartTime(data.Slot).Add(delay) if currentTime.Before(slotStartTimeWithDelay) { logger.Debug("🏁 Head event: Block arrived before 1/3 slot", zap.Duration("time_saved", slotStartTimeWithDelay.Sub(currentTime))) diff --git a/operator/duties/scheduler_test.go b/operator/duties/scheduler_test.go index 29034820d9..e963e782ed 100644 --- a/operator/duties/scheduler_test.go +++ b/operator/duties/scheduler_test.go @@ -122,7 +122,7 @@ func setupSchedulerAndMocks(t *testing.T, handlers []dutyHandler, currentSlot *S mockBeaconNode.EXPECT().Events(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) - mockNetworkConfig.Beacon.(*mocknetwork.MockBeaconNetwork).EXPECT().MinGenesisTime().Return(uint64(0)).AnyTimes() + mockNetworkConfig.Beacon.(*mocknetwork.MockBeaconNetwork).EXPECT().MinGenesisTime().Return(int64(0)).AnyTimes() mockNetworkConfig.Beacon.(*mocknetwork.MockBeaconNetwork).EXPECT().SlotDurationSec().Return(150 * time.Millisecond).AnyTimes() mockNetworkConfig.Beacon.(*mocknetwork.MockBeaconNetwork).EXPECT().SlotsPerEpoch().Return(uint64(32)).AnyTimes() mockNetworkConfig.Beacon.(*mocknetwork.MockBeaconNetwork).EXPECT().GetSlotStartTime(gomock.Any()).DoAndReturn( diff --git a/operator/duties/voluntary_exit.go b/operator/duties/voluntary_exit.go index f4a03e8fb5..b89c2d7fc1 100644 --- a/operator/duties/voluntary_exit.go +++ b/operator/duties/voluntary_exit.go @@ -2,9 +2,10 @@ package duties import ( "context" - genesisspectypes "github.com/ssvlabs/ssv-spec-pre-cc/types" "math/big" + genesisspectypes "github.com/ssvlabs/ssv-spec-pre-cc/types" + "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/ssvlabs/ssv-spec/types" "go.uber.org/zap" @@ -162,7 +163,7 @@ func (h *VoluntaryExitHandler) blockSlot(ctx context.Context, blockNumber uint64 return 0, err } - blockSlot = h.network.Beacon.EstimatedSlotAtTime(int64(block.Time())) + blockSlot = h.network.Beacon.EstimatedSlotAtTime(int64(block.Time())) // #nosec G115 h.blockSlots[blockNumber] = blockSlot for k, v := range h.blockSlots { diff --git a/operator/slotticker/slotticker.go b/operator/slotticker/slotticker.go index 17a2da8458..098a75b2bf 100644 --- a/operator/slotticker/slotticker.go +++ b/operator/slotticker/slotticker.go @@ -4,6 +4,7 @@ import ( "time" "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ssvlabs/ssv/utils/casts" "go.uber.org/zap" ) @@ -72,13 +73,13 @@ func (s *slotTicker) Next() <-chan time.Time { default: } } - nextSlot := phase0.Slot(timeSinceGenesis/s.slotDuration) + 1 + nextSlot := phase0.Slot(timeSinceGenesis/s.slotDuration) + 1 // #nosec G115 if nextSlot <= s.slot { // We've already ticked for this slot, so we need to wait for the next one. nextSlot = s.slot + 1 s.logger.Debug("double tick", zap.Uint64("slot", uint64(s.slot))) } - nextSlotStartTime := s.genesisTime.Add(time.Duration(nextSlot) * s.slotDuration) + nextSlotStartTime := s.genesisTime.Add(casts.DurationFromUint64(uint64(nextSlot)) * s.slotDuration) s.timer.Reset(time.Until(nextSlotStartTime)) s.slot = nextSlot return s.timer.C() diff --git a/operator/validator/controller.go b/operator/validator/controller.go index 6069235912..de3e9dbd23 100644 --- a/operator/validator/controller.go +++ b/operator/validator/controller.go @@ -22,7 +22,6 @@ import ( "go.uber.org/zap" "github.com/ssvlabs/ssv/exporter/convert" - "github.com/ssvlabs/ssv/ibft/genesisstorage" "github.com/ssvlabs/ssv/ibft/storage" "github.com/ssvlabs/ssv/logging" @@ -429,7 +428,7 @@ func (c *controller) handleRouterMessages() { } } -var nonCommitteeValidatorTTLs = map[spectypes.RunnerRole]phase0.Slot{ +var nonCommitteeValidatorTTLs = map[spectypes.RunnerRole]int{ spectypes.RoleCommittee: 64, spectypes.RoleProposer: 4, spectypes.RoleAggregator: 4, @@ -1002,7 +1001,7 @@ func (c *controller) committeeMemberFromShare(share *ssvtypes.SSVShare) (*specty } } - f := ssvtypes.ComputeF(len(share.Committee)) + f := ssvtypes.ComputeF(uint64(len(share.Committee))) operatorPEM, err := base64.StdEncoding.DecodeString(string(c.operatorDataStore.GetOperatorData().PublicKey)) if err != nil { @@ -1243,7 +1242,7 @@ func SetupCommitteeRunners( Storage: options.Storage.Get(convert.RunnerRole(role)), Network: options.Network, Timer: roundtimer.New(ctx, options.NetworkConfig.Beacon, role, nil), - CutOffRound: specqbft.Round(specqbft.CutoffRound), + CutOffRound: roundtimer.CutOffRound, } identifier := spectypes.NewMsgID(options.NetworkConfig.AlanDomainType, options.Operator.CommitteeID[:], role) @@ -1307,7 +1306,7 @@ func SetupRunners( Storage: options.Storage.Get(convert.RunnerRole(role)), Network: options.Network, Timer: roundtimer.New(ctx, options.NetworkConfig.Beacon, role, nil), - CutOffRound: specqbft.Round(specqbft.CutoffRound), + CutOffRound: roundtimer.CutOffRound, } config.ValueCheckF = valueCheckF diff --git a/protocol/genesis/qbft/instance/instance.go b/protocol/genesis/qbft/instance/instance.go index c61f1007a4..18c0f5917f 100644 --- a/protocol/genesis/qbft/instance/instance.go +++ b/protocol/genesis/qbft/instance/instance.go @@ -255,5 +255,5 @@ func (i *Instance) bumpToRound(round genesisspecqbft.Round) { // CanProcessMessages will return true if instance can process messages func (i *Instance) CanProcessMessages() bool { - return !i.forceStop && int(i.State.Round) < CutoffRound + return !i.forceStop && uint64(i.State.Round) < CutoffRound } diff --git a/protocol/genesis/qbft/instance/timeout.go b/protocol/genesis/qbft/instance/timeout.go index f424bead0c..7232c627b4 100644 --- a/protocol/genesis/qbft/instance/timeout.go +++ b/protocol/genesis/qbft/instance/timeout.go @@ -5,10 +5,11 @@ import ( "go.uber.org/zap" specqbft "github.com/ssvlabs/ssv-spec/qbft" + "github.com/ssvlabs/ssv/logging/fields" ) -var CutoffRound = 15 // stop processing instances after 8*2+120*6 = 14.2 min (~ 2 epochs) +var CutoffRound = uint64(15) // stop processing instances after 8*2+120*6 = 14.2 min (~ 2 epochs) func (i *Instance) UponRoundTimeout(logger *zap.Logger) error { if !i.CanProcessMessages() { diff --git a/protocol/genesis/qbft/roundtimer/timer.go b/protocol/genesis/qbft/roundtimer/timer.go index 35a4709fa8..43854dd3ae 100644 --- a/protocol/genesis/qbft/roundtimer/timer.go +++ b/protocol/genesis/qbft/roundtimer/timer.go @@ -9,6 +9,7 @@ import ( "github.com/attestantio/go-eth2-client/spec/phase0" genesisspecqbft "github.com/ssvlabs/ssv-spec-pre-cc/qbft" genesisspectypes "github.com/ssvlabs/ssv-spec-pre-cc/types" + "github.com/ssvlabs/ssv/utils/casts" ) //go:generate mockgen -package=mocks -destination=./mocks/timer.go -source=./timer.go @@ -120,10 +121,10 @@ func (t *RoundTimer) RoundTimeout(height genesisspecqbft.Height, round genesissp // Calculate additional timeout based on round var additionalTimeout time.Duration if round <= t.timeoutOptions.quickThreshold { - additionalTimeout = time.Duration(int(round)) * t.timeoutOptions.quick + additionalTimeout = casts.DurationFromUint64(uint64(round)) * t.timeoutOptions.quick } else { - quickPortion := time.Duration(t.timeoutOptions.quickThreshold) * t.timeoutOptions.quick - slowPortion := time.Duration(int(round-t.timeoutOptions.quickThreshold)) * t.timeoutOptions.slow + quickPortion := casts.DurationFromUint64(uint64(t.timeoutOptions.quickThreshold)) * t.timeoutOptions.quick + slowPortion := casts.DurationFromUint64(uint64(round-t.timeoutOptions.quickThreshold)) * t.timeoutOptions.slow additionalTimeout = quickPortion + slowPortion } @@ -147,12 +148,12 @@ func (t *RoundTimer) OnTimeout(done OnRoundTimeoutF) { // Round returns a round. func (t *RoundTimer) Round() genesisspecqbft.Round { - return genesisspecqbft.Round(atomic.LoadInt64(&t.round)) + return genesisspecqbft.Round(atomic.LoadInt64(&t.round)) // #nosec G115 } // TimeoutForRound times out for a given round. func (t *RoundTimer) TimeoutForRound(height genesisspecqbft.Height, round genesisspecqbft.Round) { - atomic.StoreInt64(&t.round, int64(round)) + atomic.StoreInt64(&t.round, int64(round)) // #nosec G115 timeout := t.RoundTimeout(height, round) // preparing the underlying timer diff --git a/protocol/genesis/qbft/spectest/controller_type.go b/protocol/genesis/qbft/spectest/controller_type.go index cbb51cdb83..1b4c84f0cf 100644 --- a/protocol/genesis/qbft/spectest/controller_type.go +++ b/protocol/genesis/qbft/spectest/controller_type.go @@ -38,14 +38,15 @@ func RunControllerSpecTest(t *testing.T, test *spectests.ControllerSpecTest) { } var lastErr error - for i, runData := range test.RunInstanceData { - height := genesisspecqbft.Height(i) + height := genesisspecqbft.Height(0) + for _, runData := range test.RunInstanceData { if runData.Height != nil { height = *runData.Height } if err := runInstanceWithData(t, logger, height, contr, runData); err != nil { lastErr = err } + height++ } if len(test.ExpectedError) != 0 { diff --git a/protocol/genesis/ssv/genesisqueue/messages.go b/protocol/genesis/ssv/genesisqueue/messages.go index d2cc3f9824..fededfafc0 100644 --- a/protocol/genesis/ssv/genesisqueue/messages.go +++ b/protocol/genesis/ssv/genesisqueue/messages.go @@ -247,5 +247,5 @@ func isDecidedMesssage(s *State, sm *genesisspecqbft.SignedMessage) bool { return false } return sm.Message.MsgType == genesisspecqbft.CommitMsgType && - len(sm.Signers) > int(s.Quorum) + uint64(len(sm.Signers)) > s.Quorum } diff --git a/protocol/genesis/ssv/spectest/msg_processing_type.go b/protocol/genesis/ssv/spectest/msg_processing_type.go index b865e12a8e..ae8132faf4 100644 --- a/protocol/genesis/ssv/spectest/msg_processing_type.go +++ b/protocol/genesis/ssv/spectest/msg_processing_type.go @@ -15,6 +15,7 @@ import ( "go.uber.org/zap" typescomparable "github.com/ssvlabs/ssv-spec-pre-cc/types/testingutils/comparable" + "github.com/ssvlabs/ssv/logging" "github.com/ssvlabs/ssv/protocol/genesis/ssv/genesisqueue" "github.com/ssvlabs/ssv/protocol/genesis/ssv/runner" diff --git a/protocol/genesis/ssv/validator/msgqueue_consumer.go b/protocol/genesis/ssv/validator/msgqueue_consumer.go index e5bf69286f..269003de99 100644 --- a/protocol/genesis/ssv/validator/msgqueue_consumer.go +++ b/protocol/genesis/ssv/validator/msgqueue_consumer.go @@ -14,7 +14,7 @@ import ( "github.com/ssvlabs/ssv/logging/fields" "github.com/ssvlabs/ssv/protocol/genesis/message" "github.com/ssvlabs/ssv/protocol/genesis/qbft/instance" - genesisqueue "github.com/ssvlabs/ssv/protocol/genesis/ssv/genesisqueue" + "github.com/ssvlabs/ssv/protocol/genesis/ssv/genesisqueue" "github.com/ssvlabs/ssv/protocol/genesis/types" ) @@ -45,7 +45,7 @@ func (v *Validator) HandleMessage(logger *zap.Logger, msg *genesisqueue.GenesisS zap.String("msg_type", message.MsgTypeToString(msg.MsgType)), zap.String("msg_id", msgID)) } - logger.Debug("📬 queue: pushed message", fields.MessageID(spectypes.MessageID(msg.MsgID)), fields.MessageType(spectypes.MsgType(msg.MsgType))) + //logger.Debug("📬 queue: pushed message", fields.MessageID(spectypes.MessageID(msg.MsgID)), fields.MessageType(spectypes.MsgType(msg.MsgType))) } else { logger.Error("❌ missing queue for role type", fields.BeaconRole(spectypes.BeaconRole(msg.MsgID.GetRoleType()))) } @@ -168,15 +168,15 @@ func (v *Validator) logMsg(logger *zap.Logger, msg *genesisqueue.GenesisSSVMessa case genesisspectypes.SSVConsensusMsgType: sm := msg.Body.(*genesisspecqbft.SignedMessage) baseFields = []zap.Field{ - zap.Int64("msg_height", int64(sm.Message.Height)), - zap.Int64("msg_round", int64(sm.Message.Round)), - zap.Int64("consensus_msg_type", int64(sm.Message.MsgType)), + zap.Uint64("msg_height", uint64(sm.Message.Height)), + zap.Uint64("msg_round", uint64(sm.Message.Round)), + zap.Uint64("consensus_msg_type", uint64(sm.Message.MsgType)), zap.Any("signers", sm.Signers), } case genesisspectypes.SSVPartialSignatureMsgType: psm := msg.Body.(*genesisspectypes.SignedPartialSignatureMessage) baseFields = []zap.Field{ - zap.Int64("signer", int64(psm.Signer)), + zap.Uint64("signer", psm.Signer), fields.Slot(psm.Message.Slot), } } diff --git a/protocol/genesis/testing/test_utils.go b/protocol/genesis/testing/test_utils.go index f9e1ed5b4a..764feb7fbf 100644 --- a/protocol/genesis/testing/test_utils.go +++ b/protocol/genesis/testing/test_utils.go @@ -34,12 +34,12 @@ func GenerateBLSKeys(oids ...genesisspectypes.OperatorID) (map[genesisspectypes. nodes := make([]*genesisspectypes.Operator, 0) sks := make(map[genesisspectypes.OperatorID]*bls.SecretKey) - for i, oid := range oids { + for _, oid := range oids { sk := &bls.SecretKey{} sk.SetByCSPRNG() nodes = append(nodes, &genesisspectypes.Operator{ - OperatorID: genesisspectypes.OperatorID(i), + OperatorID: oid, PubKey: sk.GetPublicKey().Serialize(), }) sks[oid] = sk diff --git a/protocol/genesis/types/share.go b/protocol/genesis/types/share.go index ba208bd077..9766490159 100644 --- a/protocol/genesis/types/share.go +++ b/protocol/genesis/types/share.go @@ -8,7 +8,7 @@ import ( ) func ConvertToGenesisShare(share *spectypes.Share, operator *spectypes.CommitteeMember) *genesisspectypes.Share { - q, pc := ComputeQuorumAndPartialQuorum(len(share.Committee)) + q, pc := ComputeQuorumAndPartialQuorum(uint64(len((share.Committee)))) key := make([]byte, len(share.ValidatorPubKey)) copy(key, share.ValidatorPubKey[:]) diff --git a/protocol/genesis/types/ssvshare.go b/protocol/genesis/types/ssvshare.go index 5beaba666b..d2659ee60d 100644 --- a/protocol/genesis/types/ssvshare.go +++ b/protocol/genesis/types/ssvshare.go @@ -50,7 +50,7 @@ func (s *SSVShare) Decode(data []byte) error { if err := d.Decode(s); err != nil { return fmt.Errorf("decode SSVShare: %w", err) } - s.Quorum, s.PartialQuorum = ComputeQuorumAndPartialQuorum(len(s.Committee)) + s.Quorum, s.PartialQuorum = ComputeQuorumAndPartialQuorum(uint64(len(s.Committee))) return nil } @@ -93,9 +93,9 @@ func ComputeClusterIDHash(address common.Address, operatorIds []uint64) []byte { return hash } -func ComputeQuorumAndPartialQuorum(committeeSize int) (quorum uint64, partialQuorum uint64) { +func ComputeQuorumAndPartialQuorum(committeeSize uint64) (quorum uint64, partialQuorum uint64) { f := (committeeSize - 1) / 3 - return uint64(f*2 + 1), uint64(f + 1) + return f*2 + 1, f + 1 } func ValidCommitteeSize(committeeSize int) bool { diff --git a/protocol/v2/blockchain/beacon/mocks/network.go b/protocol/v2/blockchain/beacon/mocks/network.go index a8bf74f046..5e1b378809 100644 --- a/protocol/v2/blockchain/beacon/mocks/network.go +++ b/protocol/v2/blockchain/beacon/mocks/network.go @@ -1,9 +1,9 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: ./network.go +// Source: ./protocol/v2/blockchain/beacon/network.go // // Generated by this command: // -// mockgen -package=mocks -destination=./mocks/network.go -source=./network.go +// mockgen -package=mocks -destination=./protocol/v2/blockchain/beacon/mocks/network.go -source=./protocol/v2/blockchain/beacon/network.go // // Package mocks is a generated GoMock package. @@ -295,10 +295,10 @@ func (mr *MockBeaconNetworkMockRecorder) LastSlotOfSyncPeriod(period any) *gomoc } // MinGenesisTime mocks base method. -func (m *MockBeaconNetwork) MinGenesisTime() uint64 { +func (m *MockBeaconNetwork) MinGenesisTime() int64 { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MinGenesisTime") - ret0, _ := ret[0].(uint64) + ret0, _ := ret[0].(int64) return ret0 } diff --git a/protocol/v2/blockchain/beacon/network.go b/protocol/v2/blockchain/beacon/network.go index e4876b009b..c47fa700ed 100644 --- a/protocol/v2/blockchain/beacon/network.go +++ b/protocol/v2/blockchain/beacon/network.go @@ -17,7 +17,7 @@ type Network struct { type BeaconNetwork interface { ForkVersion() [4]byte - MinGenesisTime() uint64 + MinGenesisTime() int64 SlotDurationSec() time.Duration SlotsPerEpoch() uint64 EstimatedCurrentSlot() phase0.Slot @@ -59,11 +59,11 @@ func NewLocalTestNetwork(network spectypes.BeaconNetwork) Network { } // MinGenesisTime returns min genesis time value -func (n Network) MinGenesisTime() uint64 { +func (n Network) MinGenesisTime() int64 { if n.LocalTestNet { return 1689072978 } - return n.BeaconNetwork.MinGenesisTime() + return int64(n.BeaconNetwork.MinGenesisTime()) // #nosec G115 } // GetNetwork returns the network @@ -78,8 +78,8 @@ func (n Network) GetBeaconNetwork() spectypes.BeaconNetwork { // GetSlotStartTime returns the start time for the given slot func (n Network) GetSlotStartTime(slot phase0.Slot) time.Time { - timeSinceGenesisStart := uint64(slot) * uint64(n.SlotDurationSec().Seconds()) - start := time.Unix(int64(n.MinGenesisTime()+timeSinceGenesisStart), 0) + timeSinceGenesisStart := int64(uint64(slot) * uint64(n.SlotDurationSec().Seconds())) // #nosec G115 + start := time.Unix(n.MinGenesisTime()+timeSinceGenesisStart, 0) return start } @@ -95,11 +95,11 @@ func (n Network) EstimatedCurrentSlot() phase0.Slot { // EstimatedSlotAtTime estimates slot at the given time func (n Network) EstimatedSlotAtTime(time int64) phase0.Slot { - genesis := int64(n.MinGenesisTime()) + genesis := n.MinGenesisTime() if time < genesis { return 0 } - return phase0.Slot(uint64(time-genesis) / uint64(n.SlotDurationSec().Seconds())) + return phase0.Slot(uint64(time-genesis) / uint64(n.SlotDurationSec().Seconds())) //#nosec G115 } // EstimatedCurrentEpoch estimates the current epoch diff --git a/protocol/v2/qbft/config.go b/protocol/v2/qbft/config.go index 4042d9a4f0..1d356fd582 100644 --- a/protocol/v2/qbft/config.go +++ b/protocol/v2/qbft/config.go @@ -8,6 +8,8 @@ import ( qbftstorage "github.com/ssvlabs/ssv/protocol/v2/qbft/storage" ) +var CutOffRound specqbft.Round = specqbft.Round(specqbft.CutoffRound) + type signing interface { // GetShareSigner returns a BeaconSigner instance GetShareSigner() spectypes.BeaconSigner diff --git a/protocol/v2/qbft/round_robin_proposer.go b/protocol/v2/qbft/round_robin_proposer.go index cf02051705..c4117770ca 100644 --- a/protocol/v2/qbft/round_robin_proposer.go +++ b/protocol/v2/qbft/round_robin_proposer.go @@ -10,11 +10,11 @@ import ( // Each new height has a different first round proposer which is +1 from the previous height. // First height starts with index 0 func RoundRobinProposer(state *qbft.State, round qbft.Round) types.OperatorID { - firstRoundIndex := 0 + firstRoundIndex := uint64(0) if state.Height != qbft.FirstHeight { - firstRoundIndex += int(state.Height) % len(state.CommitteeMember.Committee) + firstRoundIndex += uint64(state.Height) % uint64(len(state.CommitteeMember.Committee)) } - index := (firstRoundIndex + int(round) - int(qbft.FirstRound)) % len(state.CommitteeMember.Committee) + index := (firstRoundIndex + uint64(round) - uint64(qbft.FirstRound)) % uint64(len(state.CommitteeMember.Committee)) return state.CommitteeMember.Committee[index].OperatorID } diff --git a/protocol/v2/qbft/roundtimer/timer.go b/protocol/v2/qbft/roundtimer/timer.go index 820483e5d7..a8bbec5ee2 100644 --- a/protocol/v2/qbft/roundtimer/timer.go +++ b/protocol/v2/qbft/roundtimer/timer.go @@ -9,6 +9,7 @@ import ( "github.com/attestantio/go-eth2-client/spec/phase0" specqbft "github.com/ssvlabs/ssv-spec/qbft" spectypes "github.com/ssvlabs/ssv-spec/types" + "github.com/ssvlabs/ssv/utils/casts" ) //go:generate mockgen -package=mocks -destination=./mocks/timer.go -source=./timer.go @@ -21,6 +22,8 @@ const ( SlowTimeout = 2 * time.Minute ) +var CutOffRound specqbft.Round = specqbft.Round(specqbft.CutoffRound) + // Timer is an interface for a round timer, calling the UponRoundTimeout when times out type Timer interface { // TimeoutForRound will reset running timer if exists and will start a new timer for a specific round @@ -49,7 +52,7 @@ type RoundTimer struct { // result holds the result of the timer done OnRoundTimeoutF // round is the current round of the timer - round int64 + round uint64 // timeoutOptions holds the timeoutOptions for the timer timeoutOptions TimeoutOptions // role is the role of the instance @@ -120,10 +123,10 @@ func (t *RoundTimer) RoundTimeout(height specqbft.Height, round specqbft.Round) // Calculate additional timeout based on round var additionalTimeout time.Duration if round <= t.timeoutOptions.quickThreshold { - additionalTimeout = time.Duration(int(round)) * t.timeoutOptions.quick + additionalTimeout = casts.DurationFromUint64(uint64(round)) * t.timeoutOptions.quick } else { - quickPortion := time.Duration(t.timeoutOptions.quickThreshold) * t.timeoutOptions.quick - slowPortion := time.Duration(int(round-t.timeoutOptions.quickThreshold)) * t.timeoutOptions.slow + quickPortion := casts.DurationFromUint64(uint64(t.timeoutOptions.quickThreshold)) * t.timeoutOptions.quick + slowPortion := casts.DurationFromUint64(uint64(round-t.timeoutOptions.quickThreshold)) * t.timeoutOptions.slow additionalTimeout = quickPortion + slowPortion } @@ -147,12 +150,12 @@ func (t *RoundTimer) OnTimeout(done OnRoundTimeoutF) { // Round returns a round. func (t *RoundTimer) Round() specqbft.Round { - return specqbft.Round(atomic.LoadInt64(&t.round)) + return specqbft.Round(atomic.LoadUint64(&t.round)) // #nosec G115 } // TimeoutForRound times out for a given round. func (t *RoundTimer) TimeoutForRound(height specqbft.Height, round specqbft.Round) { - atomic.StoreInt64(&t.round, int64(round)) + atomic.StoreUint64(&t.round, uint64(round)) timeout := t.RoundTimeout(height, round) // preparing the underlying timer diff --git a/protocol/v2/qbft/spectest/controller_type.go b/protocol/v2/qbft/spectest/controller_type.go index 37368f96d9..5cf150b850 100644 --- a/protocol/v2/qbft/spectest/controller_type.go +++ b/protocol/v2/qbft/spectest/controller_type.go @@ -5,12 +5,13 @@ import ( "encoding/hex" "encoding/json" "fmt" - "github.com/ssvlabs/ssv/exporter/convert" "os" "path/filepath" "reflect" "testing" + "github.com/ssvlabs/ssv/exporter/convert" + specqbft "github.com/ssvlabs/ssv-spec/qbft" spectests "github.com/ssvlabs/ssv-spec/qbft/spectest/tests" spectypes "github.com/ssvlabs/ssv-spec/types" @@ -39,14 +40,15 @@ func RunControllerSpecTest(t *testing.T, test *spectests.ControllerSpecTest) { } var lastErr error - for i, runData := range test.RunInstanceData { - height := specqbft.Height(i) + height := specqbft.Height(0) + for _, runData := range test.RunInstanceData { if runData.Height != nil { height = *runData.Height } if err := runInstanceWithData(t, logger, height, contr, runData); err != nil { lastErr = err } + height++ } if len(test.ExpectedError) != 0 { diff --git a/protocol/v2/qbft/spectest/msg_processing_type.go b/protocol/v2/qbft/spectest/msg_processing_type.go index 2fb808818b..c11d8ada11 100644 --- a/protocol/v2/qbft/spectest/msg_processing_type.go +++ b/protocol/v2/qbft/spectest/msg_processing_type.go @@ -34,7 +34,7 @@ func RunMsgProcessing(t *testing.T, test *spectests.MsgProcessingSpecTest) { ks := spectestingutils.KeySetForCommitteeMember(test.Pre.State.CommitteeMember) signer := spectestingutils.NewOperatorSigner(ks, 1) pre := instance.NewInstance( - qbfttesting.TestingConfig(logger, ks, convert.RunnerRole(msgId.GetRoleType())), + qbfttesting.TestingConfig(logger, ks, convert.RunnerRole(msgId.GetRoleType())), // #nosec G104 test.Pre.State.CommitteeMember, test.Pre.State.ID, test.Pre.State.Height, diff --git a/protocol/v2/ssv/queue/messages.go b/protocol/v2/ssv/queue/messages.go index e41c8222fe..240046378b 100644 --- a/protocol/v2/ssv/queue/messages.go +++ b/protocol/v2/ssv/queue/messages.go @@ -237,7 +237,7 @@ func isDecidedMessage(s *State, m *SSVMessage) bool { return false } return consensusMessage.MsgType == specqbft.CommitMsgType && - len(m.SignedSSVMessage.OperatorIDs) > int(s.Quorum) + uint64(len(m.SignedSSVMessage.OperatorIDs)) > s.Quorum } // scoreCommitteeMessageSubtype returns an integer score for the message's type. diff --git a/protocol/v2/ssv/runner/committee.go b/protocol/v2/ssv/runner/committee.go index d9b2aa65ef..31fa8095b3 100644 --- a/protocol/v2/ssv/runner/committee.go +++ b/protocol/v2/ssv/runner/committee.go @@ -243,7 +243,7 @@ func (cr *CommitteeRunner) ProcessConsensus(logger *zap.Logger, msg *spectypes.S if err != nil { return errors.Wrap(err, "failed to hash attestation data") } - logger.Debug("signed attestation data", zap.Int("validator_index", int(duty.ValidatorIndex)), + logger.Debug("signed attestation data", zap.Uint64("validator_index", uint64(duty.ValidatorIndex)), zap.String("pub_key", hex.EncodeToString(duty.PubKey[:])), zap.Any("attestation_data", attestationData), zap.String("attestation_data_root", hex.EncodeToString(attDataRoot[:])), @@ -308,20 +308,20 @@ func (cr *CommitteeRunner) ProcessPostConsensus(logger *zap.Logger, signedMsg *s logger = logger.With(fields.Slot(signedMsg.Slot)) // TODO: (Alan) revert? - indices := make([]int, len(signedMsg.Messages)) + indices := make([]uint64, len(signedMsg.Messages)) signers := make([]uint64, len(signedMsg.Messages)) for i, msg := range signedMsg.Messages { signers[i] = msg.Signer - indices[i] = int(msg.ValidatorIndex) + indices[i] = uint64(msg.ValidatorIndex) } logger = logger.With(fields.ConsensusTime(cr.metrics.GetConsensusTime())) logger.Debug("🧩 got partial signatures", zap.Bool("quorum", quorum), fields.Slot(cr.BaseRunner.State.StartingDuty.DutySlot()), - zap.Int("signer", int(signedMsg.Messages[0].Signer)), + zap.Uint64("signer", signedMsg.Messages[0].Signer), zap.Int("sigs", len(roots)), - zap.Ints("validators", indices)) + zap.Uint64s("validators", indices)) if !quorum { return nil @@ -378,7 +378,7 @@ func (cr *CommitteeRunner) ProcessPostConsensus(logger *zap.Logger, signedMsg *s // Reconstruct signature share := cr.BaseRunner.Share[validator] pubKey := share.ValidatorPubKey - vlogger := logger.With(zap.Int("validator_index", int(validator)), zap.String("pubkey", hex.EncodeToString(pubKey[:]))) + vlogger := logger.With(zap.Uint64("validator_index", uint64(validator)), zap.String("pubkey", hex.EncodeToString(pubKey[:]))) sig, err := cr.BaseRunner.State.ReconstructBeaconSig(cr.BaseRunner.State.PostConsensusContainer, root, pubKey[:], validator) diff --git a/protocol/v2/ssv/validator/committee_queue.go b/protocol/v2/ssv/validator/committee_queue.go index b8c2563e7d..bfe8776df7 100644 --- a/protocol/v2/ssv/validator/committee_queue.go +++ b/protocol/v2/ssv/validator/committee_queue.go @@ -161,15 +161,15 @@ func (c *Committee) logMsg(logger *zap.Logger, msg *queue.SSVMessage, logMsg str case spectypes.SSVConsensusMsgType: sm := msg.Body.(*specqbft.Message) baseFields = []zap.Field{ - zap.Int64("msg_height", int64(sm.Height)), - zap.Int64("msg_round", int64(sm.Round)), - zap.Int64("consensus_msg_type", int64(sm.MsgType)), + zap.Uint64("msg_height", uint64(sm.Height)), + zap.Uint64("msg_round", uint64(sm.Round)), + zap.Uint64("consensus_msg_type", uint64(sm.MsgType)), zap.Any("signers", msg.SignedSSVMessage.OperatorIDs), } case spectypes.SSVPartialSignatureMsgType: psm := msg.Body.(*spectypes.PartialSignatureMessages) baseFields = []zap.Field{ - zap.Int64("signer", int64(psm.Messages[0].Signer)), + zap.Uint64("signer", psm.Messages[0].Signer), fields.Slot(psm.Slot), } } diff --git a/protocol/v2/ssv/validator/msgqueue_consumer.go b/protocol/v2/ssv/validator/msgqueue_consumer.go index 9330d51986..502f0ca503 100644 --- a/protocol/v2/ssv/validator/msgqueue_consumer.go +++ b/protocol/v2/ssv/validator/msgqueue_consumer.go @@ -168,15 +168,15 @@ func (v *Validator) logMsg(logger *zap.Logger, msg *queue.SSVMessage, logMsg str qbftMsg := msg.Body.(*specqbft.Message) baseFields = []zap.Field{ - zap.Int64("msg_height", int64(qbftMsg.Height)), - zap.Int64("msg_round", int64(qbftMsg.Round)), - zap.Int64("consensus_msg_type", int64(qbftMsg.MsgType)), + zap.Uint64("msg_height", uint64(qbftMsg.Height)), + zap.Uint64("msg_round", uint64(qbftMsg.Round)), + zap.Uint64("consensus_msg_type", uint64(qbftMsg.MsgType)), zap.Any("signers", msg.SignedSSVMessage.OperatorIDs), } case spectypes.SSVPartialSignatureMsgType: psm := msg.Body.(*spectypes.PartialSignatureMessages) baseFields = []zap.Field{ - zap.Int64("signer", int64(psm.Messages[0].Signer)), // TODO: only one signer? + zap.Uint64("signer", psm.Messages[0].Signer), // TODO: only one signer? fields.Slot(psm.Slot), } } diff --git a/protocol/v2/ssv/validator/non_committee_validator.go b/protocol/v2/ssv/validator/non_committee_validator.go index bb4af784c4..aa8879b0ec 100644 --- a/protocol/v2/ssv/validator/non_committee_validator.go +++ b/protocol/v2/ssv/validator/non_committee_validator.go @@ -2,6 +2,7 @@ package validator import ( "fmt" + "github.com/ssvlabs/ssv/protocol/v2/qbft/roundtimer" "slices" "strconv" "strings" @@ -54,7 +55,7 @@ func NewCommitteeObserver(identifier convert.MessageID, opts CommitteeObserverOp Domain: opts.NetworkConfig.DomainType(), Storage: opts.Storage.Get(identifier.GetRoleType()), Network: opts.Network, - CutOffRound: specqbft.Round(specqbft.CutoffRound), + CutOffRound: roundtimer.CutOffRound, } // TODO: does the specific operator matters? diff --git a/protocol/v2/ssv/validator/validator.go b/protocol/v2/ssv/validator/validator.go index 3189d9adc6..3f20b3ddf5 100644 --- a/protocol/v2/ssv/validator/validator.go +++ b/protocol/v2/ssv/validator/validator.go @@ -11,6 +11,7 @@ import ( spectypes "github.com/ssvlabs/ssv-spec/types" "go.uber.org/zap" + "github.com/ssvlabs/ssv/utils/casts" "github.com/ssvlabs/ssv/utils/hashmap" "github.com/ssvlabs/ssv/ibft/storage" @@ -162,7 +163,7 @@ func (v *Validator) ProcessMessage(logger *zap.Logger, msg *queue.SSVMessage) er if err := qbftMsg.Validate(); err != nil { return errors.Wrap(err, "invalid qbft Message") } - logger = v.loggerForDuty(logger, spectypes.BeaconRole(messageID.GetRoleType()), phase0.Slot(qbftMsg.Height)) + logger = v.loggerForDuty(logger, casts.RunnerRoleToBeaconRole(messageID.GetRoleType()), phase0.Slot(qbftMsg.Height)) logger = logger.With(fields.Height(qbftMsg.Height)) return dutyRunner.ProcessConsensus(logger, msg.SignedSSVMessage) case spectypes.SSVPartialSignatureMsgType: @@ -172,7 +173,7 @@ func (v *Validator) ProcessMessage(logger *zap.Logger, msg *queue.SSVMessage) er if !ok { return errors.New("could not decode post consensus message from network message") } - logger = v.loggerForDuty(logger, spectypes.BeaconRole(messageID.GetRoleType()), signedMsg.Slot) + logger = v.loggerForDuty(logger, casts.RunnerRoleToBeaconRole(messageID.GetRoleType()), signedMsg.Slot) if len(msg.SignedSSVMessage.OperatorIDs) != 1 { return errors.New("PartialSignatureMessage has more than 1 signer") @@ -195,7 +196,7 @@ func (v *Validator) ProcessMessage(logger *zap.Logger, msg *queue.SSVMessage) er func (v *Validator) loggerForDuty(logger *zap.Logger, role spectypes.BeaconRole, slot phase0.Slot) *zap.Logger { logger = logger.With(fields.Slot(slot)) - if dutyID, ok := v.dutyIDs.Get(spectypes.RunnerRole(role)); ok { + if dutyID, ok := v.dutyIDs.Get(casts.BeaconRoleToRunnerRole(role)); ok { return logger.With(fields.DutyID(dutyID)) } return logger diff --git a/protocol/v2/testing/test_utils.go b/protocol/v2/testing/test_utils.go index 99130e03d9..2ae1c05dbb 100644 --- a/protocol/v2/testing/test_utils.go +++ b/protocol/v2/testing/test_utils.go @@ -48,7 +48,7 @@ func GenerateOperatorSigner(oids ...spectypes.OperatorID) ([]*rsa.PrivateKey, [] } nodes = append(nodes, &spectypes.Operator{ - OperatorID: spectypes.OperatorID(i), + OperatorID: oids[i], SSVOperatorPubKey: pubKey, }) diff --git a/protocol/v2/types/ssvshare.go b/protocol/v2/types/ssvshare.go index e5a931dab8..2b796b76bc 100644 --- a/protocol/v2/types/ssvshare.go +++ b/protocol/v2/types/ssvshare.go @@ -87,12 +87,12 @@ func (s *SSVShare) OperatorIDs() []spectypes.OperatorID { return ids } -func (s *SSVShare) HasQuorum(cnt int) bool { - return uint64(cnt) >= s.Quorum() +func (s *SSVShare) HasQuorum(cnt uint64) bool { + return cnt >= s.Quorum() } func (s *SSVShare) Quorum() uint64 { - q, _ := ComputeQuorumAndPartialQuorum(len(s.Committee)) + q, _ := ComputeQuorumAndPartialQuorum(uint64(len((s.Committee)))) return q } @@ -116,16 +116,16 @@ func ComputeClusterIDHash(address common.Address, operatorIds []uint64) []byte { return hash } -func ComputeQuorumAndPartialQuorum(committeeSize int) (quorum uint64, partialQuorum uint64) { +func ComputeQuorumAndPartialQuorum(committeeSize uint64) (quorum uint64, partialQuorum uint64) { f := ComputeF(committeeSize) return f*2 + 1, f + 1 } -func ComputeF(committeeSize int) uint64 { - return uint64(committeeSize-1) / 3 +func ComputeF(committeeSize uint64) uint64 { + return (committeeSize - 1) / 3 } -func ValidCommitteeSize(committeeSize int) bool { +func ValidCommitteeSize(committeeSize uint64) bool { f := ComputeF(committeeSize) return (committeeSize-1)%3 == 0 && f >= 1 && f <= 4 } @@ -157,8 +157,8 @@ func ComputeCommitteeID(committee []spectypes.OperatorID) spectypes.CommitteeID // Convert to bytes bytes := make([]byte, len(committee)*4) for i, v := range committee { - binary.LittleEndian.PutUint32(bytes[i*4:], uint32(v)) + binary.LittleEndian.PutUint32(bytes[i*4:], uint32(v)) // nolint:gosec } // Hash - return spectypes.CommitteeID(sha256.Sum256(bytes)) + return sha256.Sum256(bytes) } diff --git a/protocol/v2/types/ssvshare_test.go b/protocol/v2/types/ssvshare_test.go index 877989b70c..56c46397fd 100644 --- a/protocol/v2/types/ssvshare_test.go +++ b/protocol/v2/types/ssvshare_test.go @@ -84,17 +84,17 @@ func TestValidCommitteeSize(t *testing.T) { tt := []struct { name string valid bool - sizes []int + sizes []uint64 }{ { name: "valid", valid: true, - sizes: []int{4, 7, 10, 13}, + sizes: []uint64{4, 7, 10, 13}, }, { name: "invalid", valid: false, - sizes: []int{0, 1, 2, 3, 5, 6, 8, 9, 11, 12, 14, 15, 16, 17, 18, 19, -1, -4, -7}, + sizes: []uint64{0, 1, 2, 3, 5, 6, 8, 9, 11, 12, 14, 15, 16, 17, 18, 19}, }, } diff --git a/registry/storage/shares.go b/registry/storage/shares.go index 331aa4538c..46cb1c852f 100644 --- a/registry/storage/shares.go +++ b/registry/storage/shares.go @@ -105,7 +105,7 @@ func (s *storageShare) Decode(data []byte) error { if err := d.Decode(s); err != nil { return fmt.Errorf("decode storageShare: %w", err) } - s.Quorum, s.PartialQuorum = types.ComputeQuorumAndPartialQuorum(len(s.Committee)) + s.Quorum, s.PartialQuorum = types.ComputeQuorumAndPartialQuorum(uint64(len(s.Committee))) return nil } @@ -262,7 +262,7 @@ func specShareToStorageShare(share *types.SSVShare) *storageShare { PubKey: c.SharePubKey, } } - quorum, partialQuorum := types.ComputeQuorumAndPartialQuorum(len(committee)) + quorum, partialQuorum := types.ComputeQuorumAndPartialQuorum(uint64(len(committee))) stShare := &storageShare{ Share: Share{ ValidatorPubKey: share.ValidatorPubKey[:], diff --git a/registry/storage/shares_test.go b/registry/storage/shares_test.go index e2e26398bc..513cc76cf4 100644 --- a/registry/storage/shares_test.go +++ b/registry/storage/shares_test.go @@ -207,7 +207,7 @@ func generateRandomValidatorStorageShare(splitKeys map[uint64]*bls.SecretKey) (* return ibftCommittee[i].OperatorID < ibftCommittee[j].OperatorID }) - quorum, partialQuorum := ssvtypes.ComputeQuorumAndPartialQuorum(len(splitKeys)) + quorum, partialQuorum := ssvtypes.ComputeQuorumAndPartialQuorum(uint64(len(splitKeys))) return &storageShare{ Share: Share{ diff --git a/scripts/differ/parser.go b/scripts/differ/parser.go index 9c1d9ed379..841ce8f0f0 100644 --- a/scripts/differ/parser.go +++ b/scripts/differ/parser.go @@ -85,7 +85,9 @@ func (p *Parser) addElement(n ast.Node) bool { // Get the code. var buf bytes.Buffer - printer.Fprint(&buf, p.fset, n) + if err := printer.Fprint(&buf, p.fset, n); err != nil { + log.Fatal(err) + } code := regexpRemains.ReplaceAllString(buf.String(), "") // Get the file. diff --git a/scripts/spec-alignment/differ.config.yaml b/scripts/spec-alignment/differ.config.yaml index d4c4f3cb63..3c30ab9a4d 100644 --- a/scripts/spec-alignment/differ.config.yaml +++ b/scripts/spec-alignment/differ.config.yaml @@ -1,4 +1,4 @@ -ApprovedChanges: ["50e5bb7eda99594e", "870a3a66aeccd737","4e22a08543b079b","56ceb03cd44ff702","188adfe8914e04c1","2438f9c5b82b69a3","1a716ee3bdb3170","90b166f78390af18","68219b82a1d9d829","c4c4caa5d0938b85","dfe99ce1d27b6cb1","35f5dab1f128d193","9a3973b64d7e8932","f33f07301a770d03","3e9e0dddfad3b302","d4fef6512374c1f5","b49f54cb45787e4b","59b2375130aef5df","f094cd0460432170","8e51881e527dd603","a7d6d58d9fa06379","1d124224ca4d0fe3","39ea06bfd1477d2d","7e2550bab51f22b2","87ebd29bd49fc52f","ef39dd5223e0d080","fe14e7f0503ea188","6146023d4d5708a2","aebb8e4348b6d667","973a2e6704dbf3","fb4cac598a68c592","257c7eb81d6eb245","2a8e94fe037e13fd","5e7eb878de54eec6","960a9c64cd4ec93c","57dfd255520bd849","ec333ff8a708db69","1cc1ff39ad91ee69","5714652b88e2d44f","7a53b3b037c56325","8c02ef1964464c30","19a268910a20da3d","af6e01ed565029f3","318b5169ac4dabb6","372c6b332e8ba699","c0d8a364c0db855a","4287381be4fb1841","b1614afc1da7794f","c214975412f3fd7","8bbf7eba3fa0cf7e","8e4ec8debe331b36","7a671d8fcefc3793","e2b0e9c6454c1c08","6707ecfefa5fec21","d5a7389d730464f1","8dfae3b3223d2de0","a81c092c985de728","968df5082c727ed6","9e53c73ee60b1cc2","9d265e99dd31d4f5","a34619e078d2e42f","17e8cec4f0625d53","e913f373aa88f333","cfc1e05c372d88dc","e5de6901d78b8833","57c1885b43dd8d19","e8a49856a5edd893","22ea21d10a2f861c","954e4fce01631c4e","108b9575f7c1d4bc","1f8d076449068f64","5a7ad98296703f6","159536003eeddac8","8ca8f82e67ddd3dd","16ebe47404323cc1","48bfe5cf1e578b47","dd83182b693a7216","308d21d9830f7047","6dde03147e012b1a","730c3e5e59393b7d","5b44a4b425ecc397","df5debc50ec8babc","92a41554b2910bb8","c36c680554dde59f","447feaa5cdc1a010","fda90c61f44cb149","cdbb4930eced584c","274336ec1127e6c0","2a496f5b3ad542d2","6b395912dde33b0e","cac56ec14994216b","8850900b5d9bcc65","15e7706486c6359e","cc22f28953b787ea","3bad6ae11596a574","8f84422a240d889c","5b265432dfbbaac7","43794bf5953db193","7975821460ebe1e7","173c505e12aabb8f","47ee0d148148a56f","8cc38593ebe049b6","bda3aec7157b095a","248712911696a851","f4d9c910f1dbaef7","1a2146fcad37acb8","b0b146f9bdab64b6","edfd442b4d725fbb","122f053573538a32","d720d714a20833e1", "f9c984e71b685f9b","8c6b4fee5a4c13ce","c0a8d2019a2c30d5", "717bef26105c733f","2f70630c27062353","2f70337ba7566a69","dd607a44e1341e6b","5210501625ac3de5","f786bf475b5085aa","18a66ed6e613d9c1","e8943e7741f6843d","276a489bd5a00032","ba3bba59f10bf6b","3c50ce0c8089d871","89ee72f6c610ab84","c92b95a85da2cb11","927ea6aed3f98f20","9338904026a0ce37","9683cfa19dc544a3","4d3fa2b8dfcb5f5b", "f19e9a2b295bcfb3", "b10199b2de6f03b8", "1afc17e358f9ca79","4b58762c0b433442","d293ec1bc61bb707","3e88c3b49d093605","4890ff80c88cc41d","5227ff3a225dd20d","81a60407a3a0ba80","db2ad807eb66254a","d308bd7c553ccdcf","bdaf172971637cbe","6ade9202843071fe","2fe8e14083997744","19c9a5362d1e1d3a","5956f803d239f178","92c55a4548a8b760","9a95524213bccfff","2f51a7338b86c229","e96966a281d74505","3ee479b9cbbc3a1d","82b392ba39c6c594","b9d2404e5c570019","24f528d85fb021f2","fe9609a785305d81","b0934079dcd986cc","a9c520a19b26049","d19a9403fd732d94","74a928f5dcb2fdd9","cbbfdb5e68cdac80","10e39d2ceda91f34","f99a004cf6697875","8fa5e8ebf7d223ec","6c80c145ba705243","fbabbc90d0b4178a","b110cba51df9f8d2","c4ff2ed3d20dc419","9295a5bb10efcec7","ab56ea44a75f898a","ff51ef26ab53ba58","df3771e2589008f9","106e5689655bcfc6","f90e0fb6883bff93","667656095cec39ee"] +ApprovedChanges: ["50e5bb7eda99594e", "870a3a66aeccd737","4e22a08543b079b","56ceb03cd44ff702","188adfe8914e04c1","2438f9c5b82b69a3","1a716ee3bdb3170","90b166f78390af18","68219b82a1d9d829","c4c4caa5d0938b85","dfe99ce1d27b6cb1","35f5dab1f128d193","9a3973b64d7e8932","f33f07301a770d03","3e9e0dddfad3b302","d4fef6512374c1f5","b49f54cb45787e4b","59b2375130aef5df","f094cd0460432170","8e51881e527dd603","a7d6d58d9fa06379","1d124224ca4d0fe3","39ea06bfd1477d2d","7e2550bab51f22b2","87ebd29bd49fc52f","ef39dd5223e0d080","fe14e7f0503ea188","6146023d4d5708a2","aebb8e4348b6d667","973a2e6704dbf3","fb4cac598a68c592","257c7eb81d6eb245","2a8e94fe037e13fd","5e7eb878de54eec6","960a9c64cd4ec93c","57dfd255520bd849","ec333ff8a708db69","1cc1ff39ad91ee69","5714652b88e2d44f","7a53b3b037c56325","8c02ef1964464c30","19a268910a20da3d","af6e01ed565029f3","318b5169ac4dabb6","372c6b332e8ba699","c0d8a364c0db855a","4287381be4fb1841","b1614afc1da7794f","c214975412f3fd7","8bbf7eba3fa0cf7e","8e4ec8debe331b36","7a671d8fcefc3793","e2b0e9c6454c1c08","6707ecfefa5fec21","d5a7389d730464f1","8dfae3b3223d2de0","a81c092c985de728","968df5082c727ed6","9e53c73ee60b1cc2","9d265e99dd31d4f5","a34619e078d2e42f","17e8cec4f0625d53","e913f373aa88f333","cfc1e05c372d88dc","e5de6901d78b8833","57c1885b43dd8d19","e8a49856a5edd893","22ea21d10a2f861c","954e4fce01631c4e","108b9575f7c1d4bc","1f8d076449068f64","5a7ad98296703f6","159536003eeddac8","8ca8f82e67ddd3dd","16ebe47404323cc1","48bfe5cf1e578b47","dd83182b693a7216","308d21d9830f7047","6dde03147e012b1a","730c3e5e59393b7d","5b44a4b425ecc397","df5debc50ec8babc","92a41554b2910bb8","c36c680554dde59f","447feaa5cdc1a010","fda90c61f44cb149","cdbb4930eced584c","274336ec1127e6c0","2a496f5b3ad542d2","6b395912dde33b0e","cac56ec14994216b","8850900b5d9bcc65","15e7706486c6359e","cc22f28953b787ea","3bad6ae11596a574","8f84422a240d889c","5b265432dfbbaac7","43794bf5953db193","7975821460ebe1e7","173c505e12aabb8f","47ee0d148148a56f","8cc38593ebe049b6","bda3aec7157b095a","248712911696a851","f4d9c910f1dbaef7","1a2146fcad37acb8","b0b146f9bdab64b6","edfd442b4d725fbb","122f053573538a32","d720d714a20833e1", "f9c984e71b685f9b","8c6b4fee5a4c13ce","c0a8d2019a2c30d5", "717bef26105c733f","2f70630c27062353","2f70337ba7566a69","dd607a44e1341e6b","5210501625ac3de5","f786bf475b5085aa","18a66ed6e613d9c1","e8943e7741f6843d","276a489bd5a00032","ba3bba59f10bf6b","3c50ce0c8089d871","89ee72f6c610ab84","c92b95a85da2cb11","927ea6aed3f98f20","9338904026a0ce37","9683cfa19dc544a3","4d3fa2b8dfcb5f5b", "f19e9a2b295bcfb3", "b10199b2de6f03b8", "1afc17e358f9ca79","4b58762c0b433442","d293ec1bc61bb707","3e88c3b49d093605","4890ff80c88cc41d","5227ff3a225dd20d","81a60407a3a0ba80","db2ad807eb66254a","d308bd7c553ccdcf","bdaf172971637cbe","6ade9202843071fe","2fe8e14083997744","19c9a5362d1e1d3a","5956f803d239f178","92c55a4548a8b760","9a95524213bccfff","2f51a7338b86c229","e96966a281d74505","3ee479b9cbbc3a1d","82b392ba39c6c594","b9d2404e5c570019","24f528d85fb021f2","fe9609a785305d81","b0934079dcd986cc","a9c520a19b26049","d19a9403fd732d94","74a928f5dcb2fdd9","cbbfdb5e68cdac80","10e39d2ceda91f34","f99a004cf6697875","8fa5e8ebf7d223ec","6c80c145ba705243","fbabbc90d0b4178a","ab0c7f24e551ca6","af38a11cb8682c75","b110cba51df9f8d2","c4ff2ed3d20dc419","9295a5bb10efcec7","ab56ea44a75f898a","ff51ef26ab53ba58","df3771e2589008f9","106e5689655bcfc6","f90e0fb6883bff93","667656095cec39ee","9a5597af260c748a"] IgnoredIdentifiers: - logger diff --git a/scripts/spec-alignment/genesis_differ.config.yaml b/scripts/spec-alignment/genesis_differ.config.yaml index 2e890c45b6..8c46ba8ac9 100644 --- a/scripts/spec-alignment/genesis_differ.config.yaml +++ b/scripts/spec-alignment/genesis_differ.config.yaml @@ -1,4 +1,4 @@ -ApprovedChanges: ["52b93267ba812308","9f2881f9e89b4c3","f8718ef9598a2d28","ea4da0c78bc1e930","11481543a56b03e7","4bc55d173976f499","5a326429bd7d816a","57938492d36e5b72","ea83b3555f29e44e","39a395cc56c381d8","2092a46a009de5e9","f9e12bb821abda59","74490095cad1f871","fef6a577794897e9","e243efb1fef8baca","b612f4f4bee5726c","b4072ece06d92c84","487d349a6296651e","1329fd2f0f7101e5","6ea163caa000821c","cb2a3fac03c9f70d","c155c7005d298b8a","5462556ab33327ae","66591f5d3e9c299d","ef530512222fa3a3","54f7ee00c5223d56","136792991a713119","519dec1f394a29bb","c16537938b23bb1c","930f8003cc73658a","b4d4b7c288d15580","8e871e3dc302502c","264f6c3cb6c044e","73b442121276436f","a5d665260b9545e7","e76da25dcc1b8b7b","3021b027e65343dc","37abca362396de40","10331350bdd5cea5","ff66465e82a0bce","7008ba0e5fb3bc50","17d86fc521251084","60ee89aed3dca274","cfb5a31338393004","774c67a1117bb600","bbbac3fd539c9da","4120ef6224cd7653","c13c14ac8b912a99","e34eb83c1de17e7b","d60440779e512dda","8b474f07634c3b5b","ac42b9ed129f763c","67809ff9e1f78fba","436d37b16e59e851","d201c716184904d6","422221ab59ac4658","30ed9a822232b7e1","c08c6d84582b86c1","c07315929c5bfdae","751997d95ea9340","7715acc5b4c5aa2","14d6cdfdf92136fc","c9db895746d32d2","a0a0164bd2ecb327","c0cb3984d0a20d8","1c8beb7d60ffa18a","b44005e951042d3","45749213deaece88","6afb57c28a55854c","5619c6b724070584","81385e7b399b3235","856eb69df47300bc","68ab7316969c38b","c8f63fe574c9cd3","a1dd0a169df78d67","4bb11f08323af914","466839f492add207","6c3507bea504fcc","560bb093d1aea569","a13eb5088003f50b","9f5f0eff2dca5e9", "397220931cab52bc","453245a906210497","1eb92714f465d122", "2015d5566f6182e1","9f0799ecd4426e43","20a0cfb49029370f","b5ae2491e369931a","94800037492ba19a","57f89a48ccc5bdb0","a825ac16288e518f","1a8754cea558330e","837449174a662384", "eb4770deec3d69ae","88c54a52b5c156ec","7b39050170e98fbe"] +ApprovedChanges: ["52b93267ba812308","9f2881f9e89b4c3","f8718ef9598a2d28","ea4da0c78bc1e930","11481543a56b03e7","4bc55d173976f499","5a326429bd7d816a","57938492d36e5b72","ea83b3555f29e44e","39a395cc56c381d8","2092a46a009de5e9","f9e12bb821abda59","74490095cad1f871","fef6a577794897e9","e243efb1fef8baca","b612f4f4bee5726c","b4072ece06d92c84","487d349a6296651e","1329fd2f0f7101e5","6ea163caa000821c","cb2a3fac03c9f70d","c155c7005d298b8a","5462556ab33327ae","66591f5d3e9c299d","ef530512222fa3a3","54f7ee00c5223d56","136792991a713119","519dec1f394a29bb","c16537938b23bb1c","930f8003cc73658a","b4d4b7c288d15580","8e871e3dc302502c","264f6c3cb6c044e","73b442121276436f","a5d665260b9545e7","e76da25dcc1b8b7b","3021b027e65343dc","37abca362396de40","10331350bdd5cea5","ff66465e82a0bce","7008ba0e5fb3bc50","17d86fc521251084","60ee89aed3dca274","cfb5a31338393004","774c67a1117bb600","bbbac3fd539c9da","4120ef6224cd7653","c13c14ac8b912a99","e34eb83c1de17e7b","d60440779e512dda","8b474f07634c3b5b","ac42b9ed129f763c","67809ff9e1f78fba","436d37b16e59e851","d201c716184904d6","422221ab59ac4658","30ed9a822232b7e1","c08c6d84582b86c1","c07315929c5bfdae","751997d95ea9340","7715acc5b4c5aa2","14d6cdfdf92136fc","c9db895746d32d2","a0a0164bd2ecb327","c0cb3984d0a20d8","1c8beb7d60ffa18a","b44005e951042d3","45749213deaece88","6afb57c28a55854c","5619c6b724070584","81385e7b399b3235","856eb69df47300bc","68ab7316969c38b","c8f63fe574c9cd3","a1dd0a169df78d67","4bb11f08323af914","466839f492add207","6c3507bea504fcc","560bb093d1aea569","a13eb5088003f50b","9f5f0eff2dca5e9", "397220931cab52bc","453245a906210497","1eb92714f465d122", "2015d5566f6182e1","9f0799ecd4426e43","20a0cfb49029370f","b5ae2491e369931a","94800037492ba19a","57f89a48ccc5bdb0","a825ac16288e518f","1a8754cea558330e","837449174a662384", "eb4770deec3d69ae","88c54a52b5c156ec","7b39050170e98fbe","a94f7bc050a7c5e0"] IgnoredIdentifiers: - logger diff --git a/utils/boot_node/node.go b/utils/boot_node/node.go index c1735c3eb2..dabf348e9d 100644 --- a/utils/boot_node/node.go +++ b/utils/boot_node/node.go @@ -30,8 +30,8 @@ var SSVProtocolID = [6]byte{'s', 's', 'v', 'd', 'v', '5'} type Options struct { PrivateKey string `yaml:"PrivateKey" env:"BOOT_NODE_PRIVATE_KEY" env-description:"boot node private key (default will generate new)"` ExternalIP string `yaml:"ExternalIP" env:"BOOT_NODE_EXTERNAL_IP" env-description:"Override boot node's external IP"` - TCPPort int `yaml:"TcpPort" env:"TCP_PORT" env-default:"5000" env-description:"TCP port for p2p transport"` - UDPPort int `yaml:"UdpPort" env:"UDP_PORT" env-default:"4000" env-description:"UDP port for discovery"` + TCPPort uint16 `yaml:"TcpPort" env:"TCP_PORT" env-default:"5000" env-description:"TCP port for p2p transport"` + UDPPort uint16 `yaml:"UdpPort" env:"UDP_PORT" env-default:"4000" env-description:"UDP port for discovery"` DbPath string `yaml:"DbPath" env:"BOOT_NODE_DB_PATH" env-default:"/data/bootnode" env-description:"Path to the boot node's database"` Network string `yaml:"Network" env:"NETWORK" env-default:"mainnet"` } @@ -45,10 +45,10 @@ type Node interface { // bootNode implements Node interface type bootNode struct { privateKey string - discv5port int + discv5port uint16 forkVersion []byte externalIP string - tcpPort int + tcpPort uint16 dbPath string network networkconfig.NetworkConfig } @@ -138,7 +138,7 @@ func (n *bootNode) Start(ctx context.Context, logger *zap.Logger) error { return nil } -func (n *bootNode) createListener(logger *zap.Logger, ipAddr string, port int, cfg discover.Config) *discover.UDPv5 { +func (n *bootNode) createListener(logger *zap.Logger, ipAddr string, port uint16, cfg discover.Config) *discover.UDPv5 { ip := net.ParseIP(ipAddr) if ip.To4() == nil { logger.Fatal("IPV4 address not provided", fields.Address(ipAddr)) @@ -157,7 +157,7 @@ func (n *bootNode) createListener(logger *zap.Logger, ipAddr string, port int, c } udpAddr := &net.UDPAddr{ IP: bindIP, - Port: port, + Port: int(port), } conn, err := net.ListenUDP(networkVersion, udpAddr) if err != nil { @@ -175,7 +175,7 @@ func (n *bootNode) createListener(logger *zap.Logger, ipAddr string, port int, c return network } -func (n *bootNode) createLocalNode(logger *zap.Logger, privKey *ecdsa.PrivateKey, ipAddr net.IP, port int) (*enode.LocalNode, error) { +func (n *bootNode) createLocalNode(logger *zap.Logger, privKey *ecdsa.PrivateKey, ipAddr net.IP, port uint16) (*enode.LocalNode, error) { db, err := enode.OpenDB(filepath.Join(n.dbPath, "enode")) if err != nil { return nil, errors.Wrap(err, "Could not open node's peer database") @@ -211,7 +211,7 @@ func (n *bootNode) createLocalNode(logger *zap.Logger, privKey *ecdsa.PrivateKey localNode := enode.NewLocalNode(db, privKey) localNode.Set(enr.WithEntry("ssv", true)) localNode.SetFallbackIP(external) - localNode.SetFallbackUDP(port) + localNode.SetFallbackUDP(int(port)) ipEntry := enr.IP(external) udpEntry := enr.UDP(port) diff --git a/utils/casts/casts.go b/utils/casts/casts.go new file mode 100644 index 0000000000..38aa186519 --- /dev/null +++ b/utils/casts/casts.go @@ -0,0 +1,44 @@ +package casts + +import ( + "fmt" + "math" + "time" + + spectypes "github.com/ssvlabs/ssv-spec/types" + + "github.com/ssvlabs/ssv/exporter/convert" +) + +var ( + ErrNegativeTime = fmt.Errorf("time can't be negative") + ErrMaxDurationOverflow = fmt.Errorf("duration can't exceed max int64") +) + +// DurationFromUint64 converts uint64 to time.Duration +func DurationFromUint64(t uint64) time.Duration { + if t > math.MaxInt64 { + return time.Duration(math.MaxInt64) // todo: error handling refactor + } + return time.Duration(t) // #nosec G115 +} + +func BeaconRoleToConvertRole(beaconRole spectypes.BeaconRole) convert.RunnerRole { + return convert.RunnerRole(beaconRole) // #nosec G115 +} + +func BeaconRoleToRunnerRole(beaconRole spectypes.BeaconRole) spectypes.RunnerRole { + return spectypes.RunnerRole(beaconRole) // #nosec G115 +} + +func RunnerRoleToBeaconRole(role spectypes.RunnerRole) spectypes.BeaconRole { + return spectypes.BeaconRole(role) // #nosec G115 +} + +// DurationToUint64 returns error if duration is negative and converts time.Duration to uint64 safe otherwise +func DurationToUint64(t time.Duration) (uint64, error) { + if t < 0 { + return 0, ErrNegativeTime + } + return uint64(t), nil // #nosec G115 +} From dc0c830e8f0718cc17d61d20fc802887df252569 Mon Sep 17 00:00:00 2001 From: Nikita Kryuchkov Date: Wed, 2 Oct 2024 07:25:50 -0300 Subject: [PATCH 28/35] discovery: fork with protocol ID (#1709) * discovery: fork with protocol ID * simplify empty protocol ID check * Use both services even after alan fork * Make forkListener to stop the PreForkListener after the fork * On restart, use only the post fork discovery * Remove duplicated SSVProtocolID definition * Add testing utils for discovery * Add tests for main service function * Refactor filter logic in checkPeer * Upon Close, close channel from sharedConn. Use only one bootnode list * Test pre and post fork listeners * Use listener in ForkListener instead of discover.UDPv5 * Add fork listener tests * Use t.Run for test cases * Update sharedUDPConn due to update of go-ethereum library * Align PublishENR test according to changes in stage * Refactor testing utils functions * Update network/discovery/fork_listener.go Co-authored-by: Nikita Kryuchkov * Update network/discovery/fork_listener.go Co-authored-by: Nikita Kryuchkov * Update network/discovery/fork_listener.go Co-authored-by: Nikita Kryuchkov * FIx lint issue * Revert change on limitNodeFilter return value * Rename forkListener -> forkingDV5Listener; Close postForkIterator once iterator changes to preFork * Adjust tests due to renaming * Add test to check that the postFork iterator is closed after the switch * fork at 81891 * fix to not drop postForkIterator when Next() returns false by calling both iterators with different probabilities * add metric * fix * debug * prefork * remove log * add log * log * fix protocol id * fix * log * try fair mix * metrics * deploy to holesky prod 1 & 2 * deploy to holesky prod 1 & 2 * fix Dockerfile and deploy holesky 1 & 2 * deploy holesky 1 & 2 * deploy holesky 1 & 2 * fix tests * deploy post fork * logs * pre-fork * test only pre discovery * boot node * bootnode fix * fix * fix * try only pre again * logs * deploy ssvdv5 for postfork * refactor * changes * reorder * set testports to uint16 * fix test order * fix test data race --------- Co-authored-by: MatheusFranco99 <48058141+MatheusFranco99@users.noreply.github.com> Co-authored-by: moshe-blox Co-authored-by: y0sher --- .gitlab-ci.yml | 114 ++++- Dockerfile | 2 +- cli/bootnode/boot_node.go | 7 +- logging/fields/fields.go | 5 + network/discovery/dv5_bootnode.go | 8 +- network/discovery/dv5_filters.go | 5 +- network/discovery/dv5_service.go | 133 ++++-- network/discovery/dv5_service_test.go | 32 +- network/discovery/forking_dv5_listener.go | 131 ++++++ .../discovery/forking_dv5_listener_test.go | 305 +++++++++++++ network/discovery/iterator_test.go | 123 ++++++ network/discovery/metrics.go | 7 + network/discovery/options.go | 33 +- network/discovery/service.go | 16 +- network/discovery/service_test.go | 330 +++++++++++++++ network/discovery/shared_conn.go | 36 ++ network/discovery/util_test.go | 399 ++++++++++++++++++ network/p2p/p2p_setup.go | 14 +- network/p2p/test_utils.go | 2 +- networkconfig/config.go | 2 +- networkconfig/holesky-stage.go | 3 +- networkconfig/holesky.go | 2 + networkconfig/test-network.go | 2 +- utils/boot_node/node.go | 49 ++- 24 files changed, 1654 insertions(+), 106 deletions(-) create mode 100644 network/discovery/forking_dv5_listener.go create mode 100644 network/discovery/forking_dv5_listener_test.go create mode 100644 network/discovery/iterator_test.go create mode 100644 network/discovery/service_test.go create mode 100644 network/discovery/shared_conn.go create mode 100644 network/discovery/util_test.go diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 08e654369a..84a2367630 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -7,14 +7,101 @@ variables: IMAGE_NAME: ssv-node DOCKER_BUILDKIT: 1 + #STAGE + ACCOUNT_ID_INFRA_STAGE: 121827225315 + AWS_REGION_INFRA_STAGE: "us-west-2" + DOCKER_REPO_INFRA_STAGE: $ACCOUNT_ID_INFRA_STAGE.dkr.ecr.$AWS_REGION_INFRA_STAGE.amazonaws.com/$IMAGE_NAME + APP_REPLICAS_INFRA_STAGE: "1" + ECRLOGIN_INFRA_STAGE: "aws ecr get-login --registry-ids $ACCOUNT_ID_INFRA_STAGE --region $AWS_REGION_INFRA_STAGE --no-include-email" + STAGE_HEALTH_CHECK_IMAGE: 121827225315.dkr.ecr.us-west-2.amazonaws.com/infra-stage-repo:ubuntu20 + #PRODUCTION ACCOUNT_ID_INFRA_PROD: 764289642555 AWS_REGION_INFRA_PROD: "us-west-2" - DOCKER_REPO_INFRA_PROD: $ACCOUNT_ID_INFRA_PROD.dkr.ecr.$AWS_REGION_INFRA_STAGE.amazonaws.com/$IMAGE_NAME + DOCKER_REPO_INFRA_PROD: $ACCOUNT_ID_INFRA_PROD.dkr.ecr.$AWS_REGION_INFRA_PROD.amazonaws.com/$IMAGE_NAME APP_REPLICAS_INFRA_PROD: "1" ECRLOGIN_INFRA_PROD: "aws ecr get-login --registry-ids $ACCOUNT_ID_INFRA_PROD --region $AWS_REGION_INFRA_PROD --no-include-email" PROD_HEALTH_CHECK_IMAGE: 764289642555.dkr.ecr.us-west-2.amazonaws.com/infra-prod-repo:ubuntu20 +# +-------+ +# | STAGE | +# +-------+ +Build stage Docker image: + image: docker:20.10.23 + stage: build + tags: + - blox-infra-stage + script: + - apk add --no-cache py-pip + - pip install pyyaml==5.3.1 + - pip install awscli + - docker build -t $IMAGE_NAME:$CI_COMMIT_SHA -f Dockerfile . + - DOCKER_LOGIN_TO_INFRA_STAGE_REPO=`$ECRLOGIN_INFRA_STAGE` + - docker tag $IMAGE_NAME:$CI_COMMIT_SHA $DOCKER_REPO_INFRA_STAGE:$CI_COMMIT_SHA + - $DOCKER_LOGIN_TO_INFRA_STAGE_REPO && docker push $DOCKER_REPO_INFRA_STAGE:$CI_COMMIT_SHA + only: + - stage + +# +---------------------+ +# | STAGE HETZNER NODES | +# +---------------------+ + + +Deploy nodes to hetzner stage: + stage: deploy + tags: + - hetzner-k8s-stage + image: bitnami/kubectl:1.27.5 + script: + - export K8S_API_VERSION=$INFRA_STAGE_K8_API_VERSION + - export SSV_NODES_CPU_LIMIT=$HETZNER_STAGE_SSV_NODES_CPU_LIMIT + - export SSV_NODES_MEM_LIMIT=$HETZNER_STAGE_SSV_NODES_MEM_LIMIT + - echo $HETZNER_KUBECONFIG | base64 -d > kubeconfig + - mv kubeconfig ~/.kube/ + - export KUBECONFIG=~/.kube/kubeconfig + - kubectl config get-contexts + # + # +--------------------+ + # | Deploy SSV nodes | + # +--------------------+ + - .k8/hetzner-stage/scripts/deploy-cluster-1--4.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-5--8.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-9--12.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-13--16.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-17--20.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-21--24.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-25--28.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-29--32.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-33--36.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-37--40.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-41--44.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-45--48.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-49--52.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-53--56.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-57--60.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-61--64.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-65--68.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-69--72.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + only: + - stage + +Deploy exporter to hetzner stage: + stage: deploy + tags: + - hetzner-k8s-stage + image: bitnami/kubectl:1.27.5 + script: + - export K8S_API_VERSION=$INFRA_STAGE_K8_API_VERSION + - export SSV_EXPORTER_CPU_LIMIT=$STAGE_SSV_EXPORTER_CPU_LIMIT + - export SSV_EXPORTER_MEM_LIMIT=$STAGE_SSV_EXPORTER_MEM_LIMIT + - echo $HETZNER_KUBECONFIG | base64 -d > kubeconfig + - mv kubeconfig ~/.kube/ + - export KUBECONFIG=~/.kube/kubeconfig + - kubectl config get-contexts + - .k8/hetzner-stage/scripts/deploy-holesky-exporters.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $SSV_EXPORTER_CPU_LIMIT $SSV_EXPORTER_MEM_LIMIT + only: + - stage + # +---------------+ # | Prod | # +---------------+ @@ -33,14 +120,14 @@ Build prod Docker image: - $DOCKER_LOGIN_TO_INFRA_PROD_REPO && docker push $DOCKER_REPO_INFRA_PROD:$CI_COMMIT_SHA only: - - main + - unstable Deploy nodes to prod: stage: deploy tags: - blox-infra-prod script: - - apk add bash + - apk add bash curl - export K8S_API_VERSION=$INFRA_PROD_K8_API_VERSION - export SSV_EXPORTER_CPU_LIMIT=$PROD_SSV_EXPORTER_CPU_LIMIT - export SSV_EXPORTER_MEM_LIMIT=$PROD_SSV_EXPORTER_MEM_LIMIT @@ -49,6 +136,11 @@ Deploy nodes to prod: - curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl - chmod +x ./kubectl - mv ./kubectl /usr/bin/kubectl + - mkdir ~/.kube/ + - echo $PROD_KUBECONFIG | base64 -d > kubeconfig + - mv kubeconfig ~/.kube/ + - export KUBECONFIG=~/.kube/kubeconfig + - kubectl config get-contexts # +-------------------------------+ # | 🟠 Deploy SSV Holesky nodes | # +-------------------------------+ @@ -63,7 +155,7 @@ Deploy nodes to prod: # +----------------------------+ # | 🔴 Deploy SSV Mainnet nodes | # +----------------------------+ - # - .k8/production/mainnet/scripts/deploy-cluster-1-4.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $PROD_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT_V3 $SSV_NODES_MEM_LIMIT_V3 +# - .k8/production/mainnet/scripts/deploy-cluster-1-4.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $PROD_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT_V3 $SSV_NODES_MEM_LIMIT_V3 # # +------------------------------+ # │ 🔴 Deploy Mainnet Bootnode | @@ -72,7 +164,7 @@ Deploy nodes to prod: # - .k8/production/mainnet/scripts/deploy-boot-nodes.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $PROD_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT_V3 $SSV_NODES_MEM_LIMIT_V3 only: - - main + - unstable Deploy exporter to prod: stage: deploy @@ -85,6 +177,8 @@ Deploy exporter to prod: - export SSV_EXPORTER_MEM_LIMIT=$PROD_SSV_EXPORTER_MEM_LIMIT - export SSV_EXPORTER_MAINNET_CPU_LIMIT=$PROD_SSV_EXPORTER_MAINNET_CPU_LIMIT - export SSV_EXPORTER_MAINNET_MEM_LIMIT=$PROD_SSV_EXPORTER_MAINNET_MEM_LIMIT + - export SSV_EXPORTER_2_MAINNET_CPU_LIMIT=$PROD_SSV_EXPORTER_2_MAINNET_CPU_LIMIT + - export SSV_EXPORTER_2_MAINNET_MEM_LIMIT=$PROD_SSV_EXPORTER_2_MAINNET_MEM_LIMIT - export SSV_NODES_CPU_LIMIT_V3=$PROD_SSV_NODES_CPU_LIMIT_V3 - export SSV_NODES_MEM_LIMIT_V3=$PROD_SSV_NODES_MEM_LIMIT_V3 - curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl @@ -100,6 +194,14 @@ Deploy exporter to prod: # │ 🔴 Deploy Mainnet exporter | # +------------------------------+ # - .k8/production/mainnet/scripts/deploy-exporters.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $SSV_EXPORTER_MAINNET_CPU_LIMIT $SSV_EXPORTER_MAINNET_MEM_LIMIT - + # + # +------------------------------+ + # │ 🔴 Deploy Mainnet exporter 2| + # +------------------------------+ + - .k8/production/mainnet/scripts/deploy-exporters-2.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $SSV_EXPORTER_2_MAINNET_CPU_LIMIT $SSV_EXPORTER_2_MAINNET_MEM_LIMIT + # +------------------------------+ + # │ 🔴 Deploy Mainnet exporter 3| + # +------------------------------+ + - .k8/production/mainnet/scripts/deploy-exporters-3.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION only: - main diff --git a/Dockerfile b/Dockerfile index 1634eaa646..a78b97b28c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,7 +6,7 @@ FROM golang:1.22 AS preparer RUN apt-get update && \ DEBIAN_FRONTEND=noninteractive apt-get install -yq --no-install-recommends \ curl=7.88.1-10+deb12u7 \ - git=1:2.39.2-1.1 \ + git=1:2.39.5-0+deb12u1 \ zip=3.0-13 \ unzip=6.0-28 \ g++=4:12.2.0-3 \ diff --git a/cli/bootnode/boot_node.go b/cli/bootnode/boot_node.go index 2ba2644833..5bcc72dbb5 100644 --- a/cli/bootnode/boot_node.go +++ b/cli/bootnode/boot_node.go @@ -4,6 +4,7 @@ import ( "fmt" "log" + "github.com/ssvlabs/ssv/networkconfig" "github.com/ssvlabs/ssv/utils/commons" "github.com/ssvlabs/ssv/logging" @@ -55,7 +56,11 @@ var StartBootNodeCmd = &cobra.Command{ logger.Info(fmt.Sprintf("starting %v", commons.GetBuildData())) - bootNode, err := bootnode.New(cfg.Options) + networkConfig, err := networkconfig.GetNetworkConfigByName(cfg.Options.Network) + if err != nil { + logger.Fatal("failed to get network config", zap.Error(err)) + } + bootNode, err := bootnode.New(networkConfig, cfg.Options) if err != nil { logger.Fatal("failed to set up boot node", zap.Error(err)) } diff --git a/logging/fields/fields.go b/logging/fields/fields.go index 3592e565b0..61d53747b3 100644 --- a/logging/fields/fields.go +++ b/logging/fields/fields.go @@ -76,6 +76,7 @@ const ( FieldPeerID = "peer_id" FieldPeerScore = "peer_score" FieldPrivKey = "privkey" + FieldProtocolID = "protocol_id" FieldPubKey = "pubkey" FieldQuorumTime = "quorum_time" FieldRole = "role" @@ -327,6 +328,10 @@ func Domain(val spectypes.DomainType) zap.Field { return zap.Stringer(FieldDomain, format.DomainType(val)) } +func ProtocolID(val [6]byte) zap.Field { + return zap.String(FieldProtocolID, hex.EncodeToString(val[:])) +} + func Network(val string) zap.Field { return zap.String(FieldNetwork, val) } diff --git a/network/discovery/dv5_bootnode.go b/network/discovery/dv5_bootnode.go index d4e619770f..7379eb04fc 100644 --- a/network/discovery/dv5_bootnode.go +++ b/network/discovery/dv5_bootnode.go @@ -7,6 +7,7 @@ import ( "github.com/ssvlabs/ssv/logging" "github.com/ssvlabs/ssv/logging/fields" + "github.com/ssvlabs/ssv/networkconfig" "github.com/ssvlabs/ssv/utils" ) @@ -27,9 +28,9 @@ type Bootnode struct { } // NewBootnode creates a new bootnode -func NewBootnode(pctx context.Context, logger *zap.Logger, opts *BootnodeOptions) (*Bootnode, error) { +func NewBootnode(pctx context.Context, logger *zap.Logger, networkCfg networkconfig.NetworkConfig, opts *BootnodeOptions) (*Bootnode, error) { ctx, cancel := context.WithCancel(pctx) - disc, err := createBootnodeDiscovery(ctx, logger, opts) + disc, err := createBootnodeDiscovery(ctx, logger, networkCfg, opts) if err != nil { cancel() return nil, err @@ -51,12 +52,13 @@ func (b *Bootnode) Close() error { return nil } -func createBootnodeDiscovery(ctx context.Context, logger *zap.Logger, opts *BootnodeOptions) (Service, error) { +func createBootnodeDiscovery(ctx context.Context, logger *zap.Logger, networkCfg networkconfig.NetworkConfig, opts *BootnodeOptions) (Service, error) { privKey, err := utils.ECDSAPrivateKey(logger.Named(logging.NameBootNode), opts.PrivateKey) if err != nil { return nil, err } discOpts := &Options{ + NetworkConfig: networkCfg, DiscV5Opts: &DiscV5Options{ IP: opts.ExternalIP, BindIP: "", // net.IPv4zero.String() diff --git a/network/discovery/dv5_filters.go b/network/discovery/dv5_filters.go index 10111e6ea5..e32bddbbf7 100644 --- a/network/discovery/dv5_filters.go +++ b/network/discovery/dv5_filters.go @@ -8,7 +8,7 @@ import ( "go.uber.org/zap" ) -// limitNodeFilter checks if limit exceeded +// limitNodeFilter returns true if the limit is exceeded func (dvs *DiscV5Service) limitNodeFilter(node *enode.Node) bool { return !dvs.conns.AtLimit(libp2pnetwork.DirOutbound) } @@ -64,7 +64,8 @@ func (dvs *DiscV5Service) subnetFilter(subnets ...uint64) func(node *enode.Node) } } -// sharedSubnetsFilter checks if the node has an interest in the given subnet +// sharedSubnetsFilter returns a function that +// returns true if the peer has at least [n] subnets in common func (dvs *DiscV5Service) sharedSubnetsFilter(n int) func(node *enode.Node) bool { return func(node *enode.Node) bool { if n == 0 { diff --git a/network/discovery/dv5_service.go b/network/discovery/dv5_service.go index 055ffbc0d0..67184ae7d1 100644 --- a/network/discovery/dv5_service.go +++ b/network/discovery/dv5_service.go @@ -35,6 +35,15 @@ type NodeProvider interface { // NodeFilter can be used for nodes filtering during discovery type NodeFilter func(*enode.Node) bool +type Listener interface { + Lookup(enode.ID) []*enode.Node + RandomNodes() enode.Iterator + AllNodes() []*enode.Node + Ping(*enode.Node) error + LocalNode() *enode.LocalNode + Close() +} + // DiscV5Service wraps discover.UDPv5 with additional functionality // it implements go-libp2p/core/discovery.Discovery // currently using ENR entry (subnets) to facilitate subnets discovery @@ -43,16 +52,17 @@ type DiscV5Service struct { ctx context.Context cancel context.CancelFunc - dv5Listener *discover.UDPv5 + dv5Listener Listener bootnodes []*enode.Node conns peers.ConnectionIndex subnetsIdx peers.SubnetsIndex - conn *net.UDPConn + conn *net.UDPConn + sharedConn *SharedUDPConn - domainType networkconfig.DomainTypeProvider - subnets []byte + networkConfig networkconfig.NetworkConfig + subnets []byte publishLock chan struct{} } @@ -60,13 +70,13 @@ type DiscV5Service struct { func newDiscV5Service(pctx context.Context, logger *zap.Logger, discOpts *Options) (Service, error) { ctx, cancel := context.WithCancel(pctx) dvs := DiscV5Service{ - ctx: ctx, - cancel: cancel, - conns: discOpts.ConnIndex, - subnetsIdx: discOpts.SubnetsIdx, - domainType: discOpts.DomainType, - subnets: discOpts.DiscV5Opts.Subnets, - publishLock: make(chan struct{}, 1), + ctx: ctx, + cancel: cancel, + conns: discOpts.ConnIndex, + subnetsIdx: discOpts.SubnetsIdx, + networkConfig: discOpts.NetworkConfig, + subnets: discOpts.DiscV5Opts.Subnets, + publishLock: make(chan struct{}, 1), } logger.Debug("configuring discv5 discovery", zap.Any("discOpts", discOpts)) @@ -86,6 +96,9 @@ func (dvs *DiscV5Service) Close() error { return err } } + if dvs.sharedConn != nil { + close(dvs.sharedConn.Unhandled) + } if dvs.dv5Listener != nil { dvs.dv5Listener.Close() } @@ -153,10 +166,10 @@ func (dvs *DiscV5Service) checkPeer(logger *zap.Logger, e PeerEvent) error { if err != nil && !errors.Is(err, records.ErrEntryNotFound) { return errors.Wrap(err, "could not read domain type") } - if dvs.domainType.DomainType() != nodeDomainType && - dvs.domainType.DomainType() != nodeNextDomainType { + if dvs.networkConfig.DomainType() != nodeDomainType && + dvs.networkConfig.DomainType() != nodeNextDomainType { return fmt.Errorf("mismatched domain type: neither %x nor %x match %x", - nodeDomainType, nodeNextDomainType, dvs.domainType.DomainType()) + nodeDomainType, nodeNextDomainType, dvs.networkConfig.DomainType()) } // Get the peer's subnets, skipping if it has none. @@ -169,10 +182,17 @@ func (dvs *DiscV5Service) checkPeer(logger *zap.Logger, e PeerEvent) error { } dvs.subnetsIdx.UpdatePeerSubnets(e.AddrInfo.ID, nodeSubnets) - if !dvs.limitNodeFilter(e.Node) && !dvs.sharedSubnetsFilter(1)(e.Node) { + + // Filters + if !dvs.limitNodeFilter(e.Node) { + metricRejectedNodes.Inc() + return errors.New("reached limit") + } + if !dvs.sharedSubnetsFilter(1)(e.Node) { metricRejectedNodes.Inc() return errors.New("no shared subnets") } + metricFoundNodes.Inc() return nil } @@ -197,19 +217,80 @@ func (dvs *DiscV5Service) initDiscV5Listener(logger *zap.Logger, discOpts *Optio return errors.Wrap(err, "could not create local node") } - dv5Cfg, err := opts.DiscV5Cfg(logger) + // Get the protocol ID, or set to default if not provided + protocolID := dvs.networkConfig.DiscoveryProtocolID + emptyProtocolID := [6]byte{} + if protocolID == emptyProtocolID { + protocolID = DefaultSSVProtocolID + } + + // After the Alan fork, on a restart, we only use the discovery with the ProtocolID restriction + if dvs.networkConfig.PastAlanFork() { + dv5Cfg, err := opts.DiscV5Cfg(logger, WithProtocolID(protocolID)) + if err != nil { + return err + } + dv5Listener, err := discover.ListenV5(udpConn, localNode, *dv5Cfg) + if err != nil { + return errors.Wrap(err, "could not create discV5 listener") + } + dvs.dv5Listener = dv5Listener + dvs.bootnodes = dv5Cfg.Bootnodes + + logger.Debug("started discv5 listener (UDP)", + fields.BindIP(bindIP), + zap.Uint16("UdpPort", opts.Port), + fields.ENRLocalNode(localNode), + fields.Domain(discOpts.NetworkConfig.DomainType()), + fields.ProtocolID(discOpts.NetworkConfig.DiscoveryProtocolID), + ) + + return nil + } + + // New discovery, with ProtocolID restriction, to be kept post-fork + unhandled := make(chan discover.ReadPacket, 100) // size taken from https://github.com/ethereum/go-ethereum/blob/v1.13.5/p2p/server.go#L551 + sharedConn := &SharedUDPConn{udpConn, unhandled} + dvs.sharedConn = sharedConn + + dv5PostForkCfg, err := opts.DiscV5Cfg(logger, WithProtocolID(protocolID), WithUnhandled(unhandled)) if err != nil { return err } - dv5Listener, err := discover.ListenV5(udpConn, localNode, *dv5Cfg) + + dv5PostForkListener, err := discover.ListenV5(udpConn, localNode, *dv5PostForkCfg) if err != nil { return errors.Wrap(err, "could not create discV5 listener") } - dvs.dv5Listener = dv5Listener - dvs.bootnodes = dv5Cfg.Bootnodes - logger.Debug("started discv5 listener (UDP)", fields.BindIP(bindIP), - zap.Uint16("UdpPort", opts.Port), fields.ENRLocalNode(localNode), fields.Domain(discOpts.DomainType.DomainType())) + logger.Debug("started discv5 post-fork listener (UDP)", + fields.BindIP(bindIP), + zap.Uint16("UdpPort", opts.Port), + fields.ENRLocalNode(localNode), + fields.Domain(discOpts.NetworkConfig.NextDomainType()), + fields.ProtocolID(protocolID), + ) + + // Previous discovery, without ProtocolID restriction, to be discontinued after the fork + dv5PreForkCfg, err := opts.DiscV5Cfg(logger) + if err != nil { + return err + } + + dv5PreForkListener, err := discover.ListenV5(sharedConn, localNode, *dv5PreForkCfg) + if err != nil { + return errors.Wrap(err, "could not create discV5 pre-fork listener") + } + + logger.Debug("started discv5 pre-fork listener (UDP)", + fields.BindIP(bindIP), + zap.Uint16("UdpPort", opts.Port), + fields.ENRLocalNode(localNode), + fields.Domain(discOpts.NetworkConfig.DomainType()), + ) + + dvs.dv5Listener = NewForkingDV5Listener(logger, dv5PreForkListener, dv5PostForkListener, 5*time.Second, dvs.networkConfig) + dvs.bootnodes = dv5PreForkCfg.Bootnodes // Just take bootnodes from one of the config since they're equal return nil } @@ -298,12 +379,12 @@ func (dvs *DiscV5Service) DeregisterSubnets(logger *zap.Logger, subnets ...uint6 // PublishENR publishes the ENR with the current domain type across the network func (dvs *DiscV5Service) PublishENR(logger *zap.Logger) { // Update own node record. - err := records.SetDomainTypeEntry(dvs.dv5Listener.LocalNode(), records.KeyDomainType, dvs.domainType.DomainType()) + err := records.SetDomainTypeEntry(dvs.dv5Listener.LocalNode(), records.KeyDomainType, dvs.networkConfig.DomainType()) if err != nil { logger.Error("could not set domain type", zap.Error(err)) return } - err = records.SetDomainTypeEntry(dvs.dv5Listener.LocalNode(), records.KeyNextDomainType, dvs.domainType.NextDomainType()) + err = records.SetDomainTypeEntry(dvs.dv5Listener.LocalNode(), records.KeyNextDomainType, dvs.networkConfig.NextDomainType()) if err != nil { logger.Error("could not set next domain type", zap.Error(err)) return @@ -370,15 +451,15 @@ func (dvs *DiscV5Service) createLocalNode(logger *zap.Logger, discOpts *Options, localNode, // Satisfy decorations of forks supported by this node. - DecorateWithDomainType(records.KeyDomainType, dvs.domainType.DomainType()), - DecorateWithDomainType(records.KeyNextDomainType, dvs.domainType.NextDomainType()), + DecorateWithDomainType(records.KeyDomainType, dvs.networkConfig.DomainType()), + DecorateWithDomainType(records.KeyNextDomainType, dvs.networkConfig.NextDomainType()), DecorateWithSubnets(opts.Subnets), ) if err != nil { return nil, errors.Wrap(err, "could not decorate local node") } - logger.Debug("node record is ready", fields.ENRLocalNode(localNode), fields.Domain(dvs.domainType.DomainType()), fields.Subnets(opts.Subnets)) + logger.Debug("node record is ready", fields.ENRLocalNode(localNode), fields.Domain(dvs.networkConfig.DomainType()), fields.Subnets(opts.Subnets)) return localNode, nil } diff --git a/network/discovery/dv5_service_test.go b/network/discovery/dv5_service_test.go index 1677991111..e02a8991d6 100644 --- a/network/discovery/dv5_service_test.go +++ b/network/discovery/dv5_service_test.go @@ -2,11 +2,11 @@ package discovery import ( "context" + "math" "net" "os" "testing" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/pkg/errors" spectypes "github.com/ssvlabs/ssv-spec/types" @@ -17,22 +17,16 @@ import ( "github.com/ssvlabs/ssv/network/peers" "github.com/ssvlabs/ssv/network/peers/connections/mock" "github.com/ssvlabs/ssv/network/records" + "github.com/ssvlabs/ssv/networkconfig" + "github.com/ssvlabs/ssv/protocol/v2/blockchain/beacon" "github.com/ssvlabs/ssv/utils" ) -type TestDomainTypeProvider struct { -} - -func (td *TestDomainTypeProvider) DomainType() spectypes.DomainType { - return spectypes.DomainType{0x1, 0x2, 0x3, 0x4} -} - -func (td *TestDomainTypeProvider) NextDomainType() spectypes.DomainType { - return spectypes.DomainType{0x1, 0x2, 0x3, 0x5} -} - -func (td *TestDomainTypeProvider) DomainTypeAtEpoch(epoch phase0.Epoch) spectypes.DomainType { - return spectypes.DomainType{0x1, 0x2, 0x3, 0x4} +var TestNetwork = networkconfig.NetworkConfig{ + Beacon: beacon.NewNetwork(spectypes.BeaconTestNetwork), + GenesisDomainType: spectypes.DomainType{0x1, 0x2, 0x3, 0x4}, + AlanDomainType: spectypes.DomainType{0x1, 0x2, 0x3, 0x5}, + AlanForkEpoch: math.MaxUint64, } func TestCheckPeer(t *testing.T) { @@ -156,11 +150,11 @@ func TestCheckPeer(t *testing.T) { // Run the tests. subnetIndex := peers.NewSubnetsIndex(commons.Subnets()) dvs := &DiscV5Service{ - ctx: ctx, - conns: &mock.MockConnectionIndex{LimitValue: true}, - subnetsIdx: subnetIndex, - domainType: &TestDomainTypeProvider{}, - subnets: mySubnets, + ctx: ctx, + conns: &mock.MockConnectionIndex{LimitValue: false}, + subnetsIdx: subnetIndex, + networkConfig: TestNetwork, + subnets: mySubnets, } for _, test := range tests { diff --git a/network/discovery/forking_dv5_listener.go b/network/discovery/forking_dv5_listener.go new file mode 100644 index 0000000000..8903a73db0 --- /dev/null +++ b/network/discovery/forking_dv5_listener.go @@ -0,0 +1,131 @@ +package discovery + +import ( + "sync" + "time" + + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ssvlabs/ssv/networkconfig" + "go.uber.org/zap" +) + +const ( + defaultIteratorTimeout = 5 * time.Second +) + +// forkingDV5Listener wraps a pre-fork and a post-fork listener. +// Before the fork, it performs operations on both services. +// Aftet the fork, it performs operations only on the post-fork service. +type forkingDV5Listener struct { + logger *zap.Logger + preForkListener Listener + postForkListener Listener + iteratorTimeout time.Duration + closeOnce sync.Once + netCfg networkconfig.NetworkConfig +} + +func NewForkingDV5Listener(logger *zap.Logger, preFork, postFork Listener, iteratorTimeout time.Duration, netConfig networkconfig.NetworkConfig) *forkingDV5Listener { + if iteratorTimeout == 0 { + iteratorTimeout = defaultIteratorTimeout + } + return &forkingDV5Listener{ + logger: logger, + preForkListener: preFork, + postForkListener: postFork, + iteratorTimeout: iteratorTimeout, + netCfg: netConfig, + } +} + +// Before the fork, returns the result of a Lookup in both pre and post-fork services. +// After the fork, returns only the result from the post-fork service. +func (l *forkingDV5Listener) Lookup(id enode.ID) []*enode.Node { + if l.netCfg.PastAlanFork() { + l.closePreForkListener() + return l.postForkListener.Lookup(id) + } + + nodes := l.postForkListener.Lookup(id) + nodes = append(nodes, l.preForkListener.Lookup(id)...) + return nodes +} + +// Before the fork, returns an iterator for both pre and post-fork services. +// After the fork, returns only the iterator from the post-fork service. +func (l *forkingDV5Listener) RandomNodes() enode.Iterator { + if l.netCfg.PastAlanFork() { + l.closePreForkListener() + return l.postForkListener.RandomNodes() + } + + fairMix := enode.NewFairMix(l.iteratorTimeout) + fairMix.AddSource(&annotatedIterator{l.postForkListener.RandomNodes(), "post"}) + fairMix.AddSource(&annotatedIterator{l.preForkListener.RandomNodes(), "pre"}) + return fairMix +} + +// Before the fork, returns all nodes from the pre and post-fork listeners. +// After the fork, returns only the result from the post-fork service. +func (l *forkingDV5Listener) AllNodes() []*enode.Node { + if l.netCfg.PastAlanFork() { + l.closePreForkListener() + return l.postForkListener.AllNodes() + } + + enodes := l.postForkListener.AllNodes() + enodes = append(enodes, l.preForkListener.AllNodes()...) + return enodes +} + +// Sends a ping in the post-fork service. +// Before the fork, it also tries to ping with the pre-fork service in case of error. +func (l *forkingDV5Listener) Ping(node *enode.Node) error { + if l.netCfg.PastAlanFork() { + l.closePreForkListener() + return l.postForkListener.Ping(node) + } + + err := l.postForkListener.Ping(node) + if err != nil { + return l.preForkListener.Ping(node) + } + return nil +} + +// Returns the LocalNode using the post-fork listener. +// Both pre and post-fork listeners should have the same LocalNode. +func (l *forkingDV5Listener) LocalNode() *enode.LocalNode { + if l.netCfg.PastAlanFork() { + l.closePreForkListener() + return l.postForkListener.LocalNode() + } + return l.postForkListener.LocalNode() +} + +// Closes both listeners +func (l *forkingDV5Listener) Close() { + l.closePreForkListener() + l.postForkListener.Close() +} + +// closePreForkListener ensures preForkListener is closed once +func (l *forkingDV5Listener) closePreForkListener() { + l.closeOnce.Do(func() { + l.preForkListener.Close() + }) +} + +// annotatedIterator wraps an enode.Iterator with metrics collection. +type annotatedIterator struct { + enode.Iterator + fork string +} + +func (i *annotatedIterator) Next() bool { + if !i.Iterator.Next() { + return false + } + metricIterations.WithLabelValues(i.fork).Inc() + return true +} diff --git a/network/discovery/forking_dv5_listener_test.go b/network/discovery/forking_dv5_listener_test.go new file mode 100644 index 0000000000..11aabdf3f4 --- /dev/null +++ b/network/discovery/forking_dv5_listener_test.go @@ -0,0 +1,305 @@ +package discovery + +import ( + "testing" + "time" + + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +const iteratorTimeout = 5 * time.Millisecond + +func TestForkListener_Create(t *testing.T) { + localNode := NewLocalNode(t) + + preForkListener := NewMockListener(localNode, []*enode.Node{}) + postForkListener := NewMockListener(localNode, []*enode.Node{}) + + t.Run("Pre-Fork", func(t *testing.T) { + netCfg := PreForkNetworkConfig() + _ = NewForkingDV5Listener(zap.NewNop(), preForkListener, postForkListener, iteratorTimeout, netCfg) + + assert.False(t, preForkListener.closed) + assert.False(t, postForkListener.closed) + }) + + t.Run("Post-Fork", func(t *testing.T) { + netCfg := PostForkNetworkConfig() + _ = NewForkingDV5Listener(zap.NewNop(), preForkListener, postForkListener, iteratorTimeout, netCfg) + + assert.False(t, preForkListener.closed) + assert.False(t, postForkListener.closed) + }) +} + +func TestForkListener_Lookup(t *testing.T) { + nodeFromPreForkListener := NewTestingNode(t) // pre-fork node + nodeFromPostForkListener := NewTestingNode(t) // post-fork node + localNode := NewLocalNode(t) + + preForkListener := NewMockListener(localNode, []*enode.Node{nodeFromPreForkListener}) + postForkListener := NewMockListener(localNode, []*enode.Node{nodeFromPostForkListener}) + + t.Run("Pre-Fork", func(t *testing.T) { + netCfg := PreForkNetworkConfig() + forkListener := NewForkingDV5Listener(zap.NewNop(), preForkListener, postForkListener, iteratorTimeout, netCfg) + + nodes := forkListener.Lookup(enode.ID{}) + assert.Len(t, nodes, 2) + // post-fork nodes are set first + assert.Equal(t, nodes[0], nodeFromPostForkListener) + assert.Equal(t, nodes[1], nodeFromPreForkListener) + }) + + t.Run("Post-Fork", func(t *testing.T) { + netCfg := PostForkNetworkConfig() + forkListener := NewForkingDV5Listener(zap.NewNop(), preForkListener, postForkListener, iteratorTimeout, netCfg) + + nodes := forkListener.Lookup(enode.ID{}) + // only post-fork nodes + assert.Len(t, nodes, 1) + assert.Equal(t, nodes[0], nodeFromPostForkListener) + }) +} + +func TestForkListener_RandomNodes(t *testing.T) { + nodeFromPreForkListener := NewTestingNode(t) // pre-fork node + nodeFromPostForkListener := NewTestingNode(t) // post-fork node + localNode := NewLocalNode(t) + + t.Run("Pre-Fork", func(t *testing.T) { + preForkListener := NewMockListener(localNode, []*enode.Node{nodeFromPreForkListener}) + postForkListener := NewMockListener(localNode, []*enode.Node{nodeFromPostForkListener}) + + netCfg := PreForkNetworkConfig() + forkListener := NewForkingDV5Listener(zap.NewNop(), preForkListener, postForkListener, iteratorTimeout, netCfg) + + iter := forkListener.RandomNodes() + defer iter.Close() + var nodes []*enode.Node + for i := 0; i < 2; i++ { + require.True(t, iter.Next()) + nodes = append(nodes, iter.Node()) + } + iter.Close() + + assert.Len(t, nodes, 2) + // post-fork nodes are set first + assert.Equal(t, nodes[0], nodeFromPreForkListener) + assert.Equal(t, nodes[1], nodeFromPostForkListener) + + // No more next + requireNextTimeout(t, false, iter, 10*time.Millisecond) + }) + + t.Run("Post-Fork", func(t *testing.T) { + preForkListener := NewMockListener(localNode, []*enode.Node{nodeFromPreForkListener}) + postForkListener := NewMockListener(localNode, []*enode.Node{nodeFromPostForkListener}) + + netCfg := PostForkNetworkConfig() + forkListener := NewForkingDV5Listener(zap.NewNop(), preForkListener, postForkListener, iteratorTimeout, netCfg) + + iter := forkListener.RandomNodes() + defer iter.Close() + var nodes []*enode.Node + for iter.Next() { + nodes = append(nodes, iter.Node()) + } + iter.Close() + + // only post-fork nodes + assert.Len(t, nodes, 1) + assert.Equal(t, nodes[0], nodeFromPostForkListener) + }) +} + +func TestForkListener_AllNodes(t *testing.T) { + nodeFromPreForkListener := NewTestingNode(t) // pre-fork node + nodeFromPostForkListener := NewTestingNode(t) // post-fork node + localNode := NewLocalNode(t) + + preForkListener := NewMockListener(localNode, []*enode.Node{nodeFromPreForkListener}) + postForkListener := NewMockListener(localNode, []*enode.Node{nodeFromPostForkListener}) + + t.Run("Pre-Fork", func(t *testing.T) { + netCfg := PreForkNetworkConfig() + forkListener := NewForkingDV5Listener(zap.NewNop(), preForkListener, postForkListener, iteratorTimeout, netCfg) + + nodes := forkListener.AllNodes() + assert.Len(t, nodes, 2) + // post-fork nodes are set first + assert.Equal(t, nodes[0], nodeFromPostForkListener) + assert.Equal(t, nodes[1], nodeFromPreForkListener) + }) + + t.Run("Post-Fork", func(t *testing.T) { + netCfg := PostForkNetworkConfig() + forkListener := NewForkingDV5Listener(zap.NewNop(), preForkListener, postForkListener, iteratorTimeout, netCfg) + + nodes := forkListener.AllNodes() + // only post-fork nodes + assert.Len(t, nodes, 1) + assert.Equal(t, nodes[0], nodeFromPostForkListener) + }) +} + +func TestForkListener_PingPreFork(t *testing.T) { + pingPeer := NewTestingNode(t) // any peer to ping + localNode := NewLocalNode(t) + + preForkListener := NewMockListener(localNode, []*enode.Node{}) + postForkListener := NewMockListener(localNode, []*enode.Node{}) + + netCfg := PreForkNetworkConfig() + forkListener := NewForkingDV5Listener(zap.NewNop(), preForkListener, postForkListener, iteratorTimeout, netCfg) + + t.Run("Post-Fork succeeds", func(t *testing.T) { + postForkListener.SetNodesForPingError([]*enode.Node{}) + preForkListener.SetNodesForPingError([]*enode.Node{pingPeer}) + err := forkListener.Ping(pingPeer) + assert.NoError(t, err) + }) + + t.Run("Post-Fork fails and Pre-Fork succeeds", func(t *testing.T) { + postForkListener.SetNodesForPingError([]*enode.Node{pingPeer}) + preForkListener.SetNodesForPingError([]*enode.Node{}) + err := forkListener.Ping(pingPeer) + assert.NoError(t, err) + }) + + t.Run("Post-Fork and Pre-Fork fails", func(t *testing.T) { + postForkListener.SetNodesForPingError([]*enode.Node{pingPeer}) + preForkListener.SetNodesForPingError([]*enode.Node{pingPeer}) + err := forkListener.Ping(pingPeer) + assert.ErrorContains(t, err, "failed ping") + }) +} + +func TestForkListener_PingPostFork(t *testing.T) { + pingPeer := NewTestingNode(t) // any peer to ping + localNode := NewLocalNode(t) + + preForkListener := NewMockListener(localNode, []*enode.Node{}) + postForkListener := NewMockListener(localNode, []*enode.Node{}) + + netCfg := PostForkNetworkConfig() + forkListener := NewForkingDV5Listener(zap.NewNop(), preForkListener, postForkListener, iteratorTimeout, netCfg) + + t.Run("Post-Fork succeeds", func(t *testing.T) { + postForkListener.SetNodesForPingError([]*enode.Node{}) + preForkListener.SetNodesForPingError([]*enode.Node{pingPeer}) + err := forkListener.Ping(pingPeer) + assert.NoError(t, err) + }) + + t.Run("Post-Fork fails and Pre-Fork succeeds", func(t *testing.T) { + postForkListener.SetNodesForPingError([]*enode.Node{pingPeer}) + preForkListener.SetNodesForPingError([]*enode.Node{}) + err := forkListener.Ping(pingPeer) + + // Pre-Fork would succeed but it's not called since we're on post-fork + assert.ErrorContains(t, err, "failed ping") + }) +} + +func TestForkListener_LocalNode(t *testing.T) { + localNode := NewLocalNode(t) + + preForkListener := NewMockListener(localNode, []*enode.Node{}) + postForkListener := NewMockListener(localNode, []*enode.Node{}) + + t.Run("Pre-Fork", func(t *testing.T) { + netCfg := PreForkNetworkConfig() + forkListener := NewForkingDV5Listener(zap.NewNop(), preForkListener, postForkListener, iteratorTimeout, netCfg) + + assert.Equal(t, localNode, forkListener.LocalNode()) + }) + + t.Run("Post-Fork", func(t *testing.T) { + netCfg := PostForkNetworkConfig() + forkListener := NewForkingDV5Listener(zap.NewNop(), preForkListener, postForkListener, iteratorTimeout, netCfg) + + assert.Equal(t, localNode, forkListener.LocalNode()) + }) +} + +func TestForkListener_Close(t *testing.T) { + + t.Run("Pre-Fork", func(t *testing.T) { + preForkListener := NewMockListener(&enode.LocalNode{}, []*enode.Node{}) + postForkListener := NewMockListener(&enode.LocalNode{}, []*enode.Node{}) + + netCfg := PreForkNetworkConfig() + forkListener := NewForkingDV5Listener(zap.NewNop(), preForkListener, postForkListener, iteratorTimeout, netCfg) + + // Call any method so that it will check whether to close the pre-fork listener + _ = forkListener.AllNodes() + + assert.False(t, preForkListener.closed) + assert.False(t, postForkListener.closed) + + // Close + forkListener.Close() + + assert.True(t, preForkListener.closed) + assert.True(t, postForkListener.closed) + }) + + t.Run("Post-Fork", func(t *testing.T) { + preForkListener := NewMockListener(&enode.LocalNode{}, []*enode.Node{}) + postForkListener := NewMockListener(&enode.LocalNode{}, []*enode.Node{}) + + netCfg := PostForkNetworkConfig() + forkListener := NewForkingDV5Listener(zap.NewNop(), preForkListener, postForkListener, iteratorTimeout, netCfg) + + // Call any method so that it will check whether to close the pre-fork listener + _ = forkListener.AllNodes() + + assert.True(t, preForkListener.closed) // pre-fork listener is closed + assert.False(t, postForkListener.closed) + + // Close + forkListener.Close() + + assert.True(t, preForkListener.closed) + assert.True(t, postForkListener.closed) + }) +} + +func requireNextTimeout(t *testing.T, expected bool, iter enode.Iterator, timeout time.Duration) { + const maxTries = 10 + var deadline = time.After(timeout) + next := make(chan bool) + go func() { + defer close(next) + for { + ok := iter.Next() + select { + case next <- ok: + case <-deadline: + return + } + if ok { + return + } + time.Sleep(timeout / maxTries) + } + }() + for { + select { + case ok := <-next: + require.Equal(t, expected, ok, "expected next to be %v", expected) + if ok { + return + } + case <-deadline: + if expected { + require.Fail(t, "expected next to be %v", expected) + } + return + } + } +} diff --git a/network/discovery/iterator_test.go b/network/discovery/iterator_test.go new file mode 100644 index 0000000000..e89dc1ddcd --- /dev/null +++ b/network/discovery/iterator_test.go @@ -0,0 +1,123 @@ +package discovery + +import ( + "testing" + "time" + + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/stretchr/testify/require" +) + +func TestFairMixIterator_Next(t *testing.T) { + // Mock iterators + preforkNodes := NewTestingNodes(t, 2) + postForkNodes := NewTestingNodes(t, 2) + + iterator := enode.NewFairMix(5 * time.Millisecond) + defer iterator.Close() + iterator.AddSource(NewMockIterator(preforkNodes)) + iterator.AddSource(NewMockIterator(postForkNodes)) + + expectedNodes := append(preforkNodes, postForkNodes...) + actualNodes := make([]*enode.Node, 0, len(expectedNodes)) + for i := 0; i < 4; i++ { + require.True(t, iterator.Next()) + actualNodes = append(actualNodes, iterator.Node()) + } + require.ElementsMatch(t, expectedNodes, actualNodes) + + // No more elements + requireNextTimeout(t, false, iterator, 15*time.Millisecond) +} + +func TestFairMixIterator_Next_False(t *testing.T) { + // Mock iterators + // Nil means return false on Next() + preforkNodes := []*enode.Node{NewTestingNode(t), nil, NewTestingNode(t)} + postForkNodes := []*enode.Node{NewTestingNode(t), NewTestingNode(t), nil} + preFork := NewMockIterator(preforkNodes) + postFork := NewMockIterator(postForkNodes) + + iterator := enode.NewFairMix(5 * time.Millisecond) + defer iterator.Close() + iterator.AddSource(preFork) + iterator.AddSource(postFork) + + var expectedNodes []*enode.Node + for _, node := range preforkNodes { + if node == nil { + break + } + expectedNodes = append(expectedNodes, node) + } + for _, node := range postForkNodes { + if node == nil { + break + } + expectedNodes = append(expectedNodes, node) + } + + var actualNodes []*enode.Node + for i := 0; i < len(expectedNodes); i++ { + requireNextTimeout(t, true, iterator, 10*time.Millisecond) + actualNodes = append(actualNodes, iterator.Node()) + } + require.ElementsMatch(t, expectedNodes, actualNodes) + + // No more elements + requireNextTimeout(t, false, iterator, 15*time.Millisecond) +} + +func TestFairMixIterator_PostForkEmpty(t *testing.T) { + // Mock nodes + node1 := NewTestingNode(t) + node2 := NewTestingNode(t) + + // Mock iterators + preFork := NewMockIterator([]*enode.Node{node2, node1}) // preFork has 2 nodes + postFork := NewMockIterator([]*enode.Node{}) // postFork has no node + + iterator := enode.NewFairMix(5 * time.Millisecond) + defer iterator.Close() + iterator.AddSource(preFork) + iterator.AddSource(postFork) + + require.False(t, postFork.closed) // postFork iterator must start openened + + // First check: preFork first node after switch + requireNextTimeout(t, true, iterator, 10*time.Millisecond) + require.Equal(t, node2, iterator.Node()) + + // Second check: preFork second node + requireNextTimeout(t, true, iterator, 10*time.Millisecond) + require.Equal(t, node1, iterator.Node()) + + // No more elements + requireNextTimeout(t, false, iterator, 15*time.Millisecond) +} + +func TestFairMixIterator_PreForkEmpty(t *testing.T) { + // Mock nodes + node1 := NewTestingNode(t) + node2 := NewTestingNode(t) + + // Mock iterators + preFork := NewMockIterator([]*enode.Node{}) // preFork has no node + postFork := NewMockIterator([]*enode.Node{node1, node2}) // postFork has 2 nodes + + iterator := enode.NewFairMix(5 * time.Millisecond) + defer iterator.Close() + iterator.AddSource(preFork) + iterator.AddSource(postFork) + + // First check: postFork first node + requireNextTimeout(t, true, iterator, 10*time.Millisecond) + require.Equal(t, node1, iterator.Node()) + + // Second check: postFork second node + requireNextTimeout(t, true, iterator, 10*time.Millisecond) + require.Equal(t, node2, iterator.Node()) + + // No more elements even after switch + requireNextTimeout(t, false, iterator, 15*time.Millisecond) +} diff --git a/network/discovery/metrics.go b/network/discovery/metrics.go index 6ac842c5ef..a5fa52cff4 100644 --- a/network/discovery/metrics.go +++ b/network/discovery/metrics.go @@ -23,6 +23,10 @@ var ( Name: "ssv:network:discovery:enr_pong", Help: "Counts the number of pong responses we got as part of ENR publishing", }) + metricIterations = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv:network:discovery:iterations", + Help: "Counts the number of times a node was iterated using the mixed iterator", + }, []string{"fork"}) ) func init() { @@ -39,4 +43,7 @@ func init() { if err := prometheus.Register(metricPublishEnrPongs); err != nil { logger.Debug("could not register prometheus collector") } + if err := prometheus.Register(metricIterations); err != nil { + logger.Debug("could not register prometheus collector") + } } diff --git a/network/discovery/options.go b/network/discovery/options.go index da54032003..15e1bcb7e8 100644 --- a/network/discovery/options.go +++ b/network/discovery/options.go @@ -7,16 +7,15 @@ import ( "github.com/ssvlabs/ssv/logging" compatible_logger "github.com/ssvlabs/ssv/network/discovery/logger" - "github.com/pkg/errors" - "go.uber.org/zap" - "github.com/ssvlabs/ssv/network/commons" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/pkg/errors" + "go.uber.org/zap" ) -var SSVProtocolID = [6]byte{'s', 's', 'v', 'd', 'v', '5'} +var DefaultSSVProtocolID = [6]byte{'s', 's', 'v', 'd', 'v', '5'} // DiscV5Options for creating a new discv5 listener type DiscV5Options struct { @@ -85,12 +84,28 @@ func (opts *DiscV5Options) IPs() (net.IP, net.IP, string) { return ipAddr, bindIP, n } +func WithProtocolID(protocolID [6]byte) func(config *discover.Config) { + return func(config *discover.Config) { + config.V5ProtocolID = &protocolID + } +} + +func WithUnhandled(unhandled chan<- discover.ReadPacket) func(config *discover.Config) { + return func(config *discover.Config) { + config.Unhandled = unhandled + } +} + // DiscV5Cfg creates discv5 config from the options -func (opts *DiscV5Options) DiscV5Cfg(logger *zap.Logger) (*discover.Config, error) { - dv5Cfg := discover.Config{ - PrivateKey: opts.NetworkKey, - V5ProtocolID: &SSVProtocolID, +func (opts *DiscV5Options) DiscV5Cfg(logger *zap.Logger, funcOpts ...func(config *discover.Config)) (*discover.Config, error) { + dv5Cfg := &discover.Config{ + PrivateKey: opts.NetworkKey, } + + for _, fn := range funcOpts { + fn(dv5Cfg) + } + if len(opts.Bootnodes) > 0 { bootnodes, err := ParseENR(nil, false, opts.Bootnodes...) if err != nil { @@ -107,5 +122,5 @@ func (opts *DiscV5Options) DiscV5Cfg(logger *zap.Logger) (*discover.Config, erro dv5Cfg.Log = newLogger } - return &dv5Cfg, nil + return dv5Cfg, nil } diff --git a/network/discovery/service.go b/network/discovery/service.go index 15c66fb449..aa9bed5498 100644 --- a/network/discovery/service.go +++ b/network/discovery/service.go @@ -34,15 +34,13 @@ type HandleNewPeer func(e PeerEvent) // Options represents the options passed to create a service type Options struct { - Host host.Host - DiscV5Opts *DiscV5Options - ConnIndex peers.ConnectionIndex - SubnetsIdx peers.SubnetsIndex - HostAddress string - HostDNS string - - // DomainType is the SSV network domain of the node - DomainType networkconfig.DomainTypeProvider + Host host.Host + DiscV5Opts *DiscV5Options + ConnIndex peers.ConnectionIndex + SubnetsIdx peers.SubnetsIndex + HostAddress string + HostDNS string + NetworkConfig networkconfig.NetworkConfig } // Service is the interface for discovery diff --git a/network/discovery/service_test.go b/network/discovery/service_test.go new file mode 100644 index 0000000000..8baa399c13 --- /dev/null +++ b/network/discovery/service_test.go @@ -0,0 +1,330 @@ +package discovery + +import ( + "context" + "testing" + "time" + + "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + spectypes "github.com/ssvlabs/ssv-spec/types" + "github.com/ssvlabs/ssv/network/records" + "github.com/ssvlabs/ssv/networkconfig" +) + +func CheckBootnodes(t *testing.T, dvs *DiscV5Service, netConfig networkconfig.NetworkConfig) { + + require.Len(t, dvs.bootnodes, len(netConfig.Bootnodes)) + + for _, bootnode := range netConfig.Bootnodes { + nodes, err := ParseENR(nil, false, bootnode) + require.NoError(t, err) + require.Contains(t, dvs.bootnodes, nodes[0]) + } +} + +func TestNewDiscV5Service(t *testing.T) { + dvs := testingDiscovery(t) + + assert.NotNil(t, dvs.dv5Listener) + assert.NotNil(t, dvs.conns) + assert.NotNil(t, dvs.subnetsIdx) + assert.NotNil(t, dvs.networkConfig) + + // Check bootnodes + CheckBootnodes(t, dvs, testNetConfig) + + // Close + err := dvs.Close() + require.NoError(t, err) +} + +func TestDiscV5Service_Close(t *testing.T) { + dvs := testingDiscovery(t) + + err := dvs.Close() + assert.NoError(t, err) +} + +func TestDiscV5Service_RegisterSubnets(t *testing.T) { + dvs := testingDiscovery(t) + + // Register subnets 1, 3, and 5 + updated, err := dvs.RegisterSubnets(testLogger, 1, 3, 5) + assert.NoError(t, err) + assert.True(t, updated) + + require.Equal(t, byte(1), dvs.subnets[1]) + require.Equal(t, byte(1), dvs.subnets[3]) + require.Equal(t, byte(1), dvs.subnets[5]) + require.Equal(t, byte(0), dvs.subnets[2]) + + // Register the same subnets. Should not update the state + updated, err = dvs.RegisterSubnets(testLogger, 1, 3, 5) + assert.NoError(t, err) + assert.False(t, updated) + + require.Equal(t, byte(1), dvs.subnets[1]) + require.Equal(t, byte(1), dvs.subnets[3]) + require.Equal(t, byte(1), dvs.subnets[5]) + require.Equal(t, byte(0), dvs.subnets[2]) + + // Register different subnets + updated, err = dvs.RegisterSubnets(testLogger, 2, 4) + assert.NoError(t, err) + assert.True(t, updated) + require.Equal(t, byte(1), dvs.subnets[1]) + require.Equal(t, byte(1), dvs.subnets[2]) + require.Equal(t, byte(1), dvs.subnets[3]) + require.Equal(t, byte(1), dvs.subnets[4]) + require.Equal(t, byte(1), dvs.subnets[5]) + require.Equal(t, byte(0), dvs.subnets[6]) + + // Close + err = dvs.Close() + require.NoError(t, err) +} + +func TestDiscV5Service_DeregisterSubnets(t *testing.T) { + dvs := testingDiscovery(t) + + // Register subnets first + _, err := dvs.RegisterSubnets(testLogger, 1, 2, 3) + require.NoError(t, err) + + require.Equal(t, byte(1), dvs.subnets[1]) + require.Equal(t, byte(1), dvs.subnets[2]) + require.Equal(t, byte(1), dvs.subnets[3]) + + // Deregister from 2 and 3 + updated, err := dvs.DeregisterSubnets(testLogger, 2, 3) + assert.NoError(t, err) + assert.True(t, updated) + + require.Equal(t, byte(1), dvs.subnets[1]) + require.Equal(t, byte(0), dvs.subnets[2]) + require.Equal(t, byte(0), dvs.subnets[3]) + + // Deregistering non-existent subnets should not update + updated, err = dvs.DeregisterSubnets(testLogger, 4, 5) + assert.NoError(t, err) + assert.False(t, updated) + + // Close + err = dvs.Close() + require.NoError(t, err) +} + +func checkLocalNodeDomainTypeAlignment(t *testing.T, localNode *enode.LocalNode, netConfig networkconfig.NetworkConfig) { + // Check domain entry + domainEntry := records.DomainTypeEntry{ + Key: records.KeyDomainType, + DomainType: spectypes.DomainType{}, + } + err := localNode.Node().Record().Load(&domainEntry) + require.NoError(t, err) + require.Equal(t, netConfig.DomainType(), domainEntry.DomainType) + + // Check next domain entry + nextDomainEntry := records.DomainTypeEntry{ + Key: records.KeyNextDomainType, + DomainType: spectypes.DomainType{}, + } + err = localNode.Node().Record().Load(&nextDomainEntry) + require.NoError(t, err) + require.Equal(t, netConfig.NextDomainType(), nextDomainEntry.DomainType) +} + +func TestDiscV5Service_PublishENR(t *testing.T) { + logger := zap.NewNop() + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + opts := testingDiscoveryOptions(t, testNetConfig) + service, err := newDiscV5Service(ctx, testLogger, opts) + require.NoError(t, err) + dvs := service.(*DiscV5Service) + + // Replace listener + localNode := dvs.Self() + err = dvs.Close() + require.NoError(t, err) + dvs.dv5Listener = NewMockListener(localNode, []*enode.Node{NewTestingNode(t)}) + + // Check LocalNode has the correct domain and next domain entries + checkLocalNodeDomainTypeAlignment(t, localNode, testNetConfig) + + // Change network config + dvs.networkConfig = networkconfig.HoleskyStage + // Test PublishENR method + dvs.PublishENR(logger) + + // Check LocalNode has been updated + checkLocalNodeDomainTypeAlignment(t, localNode, networkconfig.HoleskyStage) +} + +func TestDiscV5Service_Bootstrap(t *testing.T) { + logger := zap.NewNop() + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + opts := testingDiscoveryOptions(t, testNetConfig) + + service, err := newDiscV5Service(testCtx, testLogger, opts) + require.NoError(t, err) + + dvs := service.(*DiscV5Service) + + // Replace listener + err = dvs.conn.Close() + require.NoError(t, err) + testingNode := NewTestingNode(t) + dvs.dv5Listener = NewMockListener(dvs.Self(), []*enode.Node{testingNode}) + + // testing handler. It's called whenever a new peer is found + handlerCalled := make(chan struct{}) + handler := func(e PeerEvent) { + require.Equal(t, testingNode, e.Node) + close(handlerCalled) + } + + // Run bootstrap + go func() { + err := dvs.Bootstrap(logger, handler) + assert.NoError(t, err) + }() + + // Wait for testing peer to be found + select { + case <-handlerCalled: + // Test passed + case <-ctx.Done(): + t.Fatal("Bootstrap timed out") + } +} + +func TestDiscV5Service_Node(t *testing.T) { + dvs := testingDiscovery(t) + + // Replace listener + err := dvs.conn.Close() + require.NoError(t, err) + testingNode := NewTestingNode(t) + dvs.dv5Listener = NewMockListener(dvs.Self(), []*enode.Node{testingNode}) + + // Create a mock peer.AddrInfo + unknownPeer := NewTestingNode(t) + unknownPeerAddrInfo, err := ToPeer(unknownPeer) + assert.NoError(t, err) + + // Test looking for an unknown peer + node, err := dvs.Node(testLogger, *unknownPeerAddrInfo) + assert.NoError(t, err) + assert.Nil(t, node) + + // Test looking for a known peer + addrInfo, err := ToPeer(testingNode) + assert.NoError(t, err) + node, err = dvs.Node(testLogger, *addrInfo) + assert.NoError(t, err) + assert.Equal(t, testingNode, node) +} + +func TestDiscV5Service_checkPeer(t *testing.T) { + dvs := testingDiscovery(t) + + defer func() { + err := dvs.conn.Close() + require.NoError(t, err) + }() + + // Valid peer + err := dvs.checkPeer(testLogger, ToPeerEvent(NewTestingNode(t))) + require.NoError(t, err) + + // No domain + err = dvs.checkPeer(testLogger, ToPeerEvent(NodeWithoutDomain(t))) + require.ErrorContains(t, err, "could not read domain type: not found") + + // No next domain. No error since it's not enforced + err = dvs.checkPeer(testLogger, ToPeerEvent(NodeWithoutNextDomain(t))) + require.NoError(t, err) + + // Matching main domain + err = dvs.checkPeer(testLogger, ToPeerEvent(NodeWithCustomDomains(t, testNetConfig.DomainType(), spectypes.DomainType{}))) + require.NoError(t, err) + + // Matching next domain + err = dvs.checkPeer(testLogger, ToPeerEvent(NodeWithCustomDomains(t, spectypes.DomainType{}, testNetConfig.DomainType()))) + require.NoError(t, err) + + // Mismatching domains + err = dvs.checkPeer(testLogger, ToPeerEvent(NodeWithCustomDomains(t, spectypes.DomainType{}, spectypes.DomainType{}))) + require.ErrorContains(t, err, "mismatched domain type: neither 00000000 nor 00000000 match 00000302") + + // No subnets + err = dvs.checkPeer(testLogger, ToPeerEvent(NodeWithoutSubnets(t))) + require.ErrorContains(t, err, "could not read subnets: not found") + + // Zero subnets + err = dvs.checkPeer(testLogger, ToPeerEvent(NodeWithZeroSubnets(t))) + require.ErrorContains(t, err, "zero subnets") + + // Valid peer but reached limit + dvs.conns.(*MockConnection).SetAtLimit(true) + err = dvs.checkPeer(testLogger, ToPeerEvent(NewTestingNode(t))) + require.ErrorContains(t, err, "reached limit") + dvs.conns.(*MockConnection).SetAtLimit(false) + + // Valid peer but no common subnet + subnets := make([]byte, len(records.ZeroSubnets)) + subnets[10] = 1 + err = dvs.checkPeer(testLogger, ToPeerEvent(NodeWithCustomSubnets(t, subnets))) + require.ErrorContains(t, err, "no shared subnets") +} + +func TestDiscV5ServiceListenerType(t *testing.T) { + + t.Run("Post-Fork", func(t *testing.T) { + netConfig := PostForkNetworkConfig() + dvs := testingDiscoveryWithNetworkConfig(t, netConfig) + + // Check listener type + _, ok := dvs.dv5Listener.(*forkingDV5Listener) + require.False(t, ok) + + _, ok = dvs.dv5Listener.(*discover.UDPv5) + require.True(t, ok) + + // Check bootnodes + CheckBootnodes(t, dvs, netConfig) + + // Close + err := dvs.Close() + require.NoError(t, err) + }) + + t.Run("Pre-Fork", func(t *testing.T) { + + netConfig := PreForkNetworkConfig() + dvs := testingDiscoveryWithNetworkConfig(t, netConfig) + + // Check listener type + _, ok := dvs.dv5Listener.(*discover.UDPv5) + require.False(t, ok) + + _, ok = dvs.dv5Listener.(*forkingDV5Listener) + require.True(t, ok) + + // Check bootnodes + CheckBootnodes(t, dvs, netConfig) + + // Close + err := dvs.Close() + require.NoError(t, err) + }) +} diff --git a/network/discovery/shared_conn.go b/network/discovery/shared_conn.go new file mode 100644 index 0000000000..0d537b514d --- /dev/null +++ b/network/discovery/shared_conn.go @@ -0,0 +1,36 @@ +package discovery + +import ( + "net" + "net/netip" + + "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/pkg/errors" +) + +// SharedUDPConn implements a shared connection. Write sends messages to the underlying connection while read returns +// messages that were found unprocessable and sent to the unhandled channel by the primary listener. +// It's copied from https://github.com/ethereum/go-ethereum/blob/v1.14.8/p2p/server.go#L435 +type SharedUDPConn struct { + *net.UDPConn + Unhandled chan discover.ReadPacket +} + +// ReadFromUDPAddrPort implements discover.UDPConn +func (s *SharedUDPConn) ReadFromUDPAddrPort(b []byte) (n int, addr netip.AddrPort, err error) { + packet, ok := <-s.Unhandled + if !ok { + return 0, netip.AddrPort{}, errors.New("connection was closed") + } + l := len(packet.Data) + if l > len(b) { + l = len(b) + } + copy(b[:l], packet.Data[:l]) + return l, packet.Addr, nil +} + +// Close implements discover.UDPConn +func (s *SharedUDPConn) Close() error { + return nil +} diff --git a/network/discovery/util_test.go b/network/discovery/util_test.go new file mode 100644 index 0000000000..183d222f5a --- /dev/null +++ b/network/discovery/util_test.go @@ -0,0 +1,399 @@ +package discovery + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "encoding/hex" + "net" + "sync" + "testing" + + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/enr" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/pkg/errors" + "github.com/prysmaticlabs/go-bitfield" + spectypes "github.com/ssvlabs/ssv-spec/types" + "github.com/ssvlabs/ssv/network/peers" + "github.com/ssvlabs/ssv/network/records" + "github.com/ssvlabs/ssv/networkconfig" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +var ( + testLogger = zap.NewNop() + testCtx = context.Background() + testNetConfig = networkconfig.TestNetwork + + testIP = "127.0.0.1" + testBindIP = "127.0.0.1" + testPort uint16 = 12001 + testTCPPort uint16 = 13001 +) + +// Options for the discovery service +func testingDiscoveryOptions(t *testing.T, networkConfig networkconfig.NetworkConfig) *Options { + // Generate key + privKey, err := crypto.GenerateKey() + require.NoError(t, err) + + // Discv5 options + discV5Opts := &DiscV5Options{ + StoragePath: t.TempDir(), + IP: testIP, + BindIP: testBindIP, + + Port: testPort, + TCPPort: testTCPPort, + NetworkKey: privKey, + Bootnodes: networkConfig.Bootnodes, + Subnets: mockSubnets(1), + EnableLogging: false, + } + + // Discovery options + allSubs, _ := records.Subnets{}.FromString(records.AllSubnets) + subnetsIndex := peers.NewSubnetsIndex(len(allSubs)) + connectionIndex := NewMockConnection() + + return &Options{ + DiscV5Opts: discV5Opts, + ConnIndex: connectionIndex, + SubnetsIdx: subnetsIndex, + NetworkConfig: networkConfig, + } +} + +// Testing discovery with a given NetworkConfig +func testingDiscoveryWithNetworkConfig(t *testing.T, netConfig networkconfig.NetworkConfig) *DiscV5Service { + opts := testingDiscoveryOptions(t, netConfig) + service, err := newDiscV5Service(testCtx, testLogger, opts) + require.NoError(t, err) + require.NotNil(t, service) + + dvs, ok := service.(*DiscV5Service) + require.True(t, ok) + + return dvs +} + +// Testing discovery service +func testingDiscovery(t *testing.T) *DiscV5Service { + return testingDiscoveryWithNetworkConfig(t, testNetConfig) +} + +// NetworkConfig with fork epoch +func testingNetConfigWithForkEpoch(forkEpoch phase0.Epoch) networkconfig.NetworkConfig { + n := networkconfig.HoleskyStage + return networkconfig.NetworkConfig{ + Name: n.Name, + Beacon: n.Beacon, + GenesisDomainType: n.GenesisDomainType, + AlanDomainType: n.AlanDomainType, + GenesisEpoch: n.GenesisEpoch, + RegistrySyncOffset: n.RegistrySyncOffset, + RegistryContractAddr: n.RegistryContractAddr, + Bootnodes: n.Bootnodes, + // Fork epoch + AlanForkEpoch: forkEpoch, + } +} + +// NetworkConfig for staying in pre-fork +func PreForkNetworkConfig() networkconfig.NetworkConfig { + forkEpoch := networkconfig.HoleskyStage.Beacon.EstimatedCurrentEpoch() + 1000 + return testingNetConfigWithForkEpoch(forkEpoch) +} + +// NetworkConfig for staying in post-fork +func PostForkNetworkConfig() networkconfig.NetworkConfig { + forkEpoch := networkconfig.HoleskyStage.Beacon.EstimatedCurrentEpoch() - 1000 + return testingNetConfigWithForkEpoch(forkEpoch) +} + +// Testing LocalNode +func NewLocalNode(t *testing.T) *enode.LocalNode { + // Generate key + nodeKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err) + + // Create local node + localNode, err := records.CreateLocalNode(nodeKey, t.TempDir(), net.IP(testIP), testPort, testTCPPort) + require.NoError(t, err) + + // Set entries + err = records.SetDomainTypeEntry(localNode, records.KeyDomainType, testNetConfig.DomainType()) + require.NoError(t, err) + err = records.SetDomainTypeEntry(localNode, records.KeyNextDomainType, testNetConfig.NextDomainType()) + require.NoError(t, err) + err = records.SetSubnetsEntry(localNode, mockSubnets(1)) + require.NoError(t, err) + + return localNode +} + +// Testing node +func NewTestingNode(t *testing.T) *enode.Node { + return CustomNode(t, true, testNetConfig.DomainType(), true, testNetConfig.NextDomainType(), true, mockSubnets(1)) +} + +func NewTestingNodes(t *testing.T, count int) []*enode.Node { + nodes := make([]*enode.Node, count) + for i := 0; i < count; i++ { + nodes[i] = NewTestingNode(t) + } + return nodes +} + +func NodeWithoutDomain(t *testing.T) *enode.Node { + return CustomNode(t, false, spectypes.DomainType{}, true, testNetConfig.NextDomainType(), true, mockSubnets(1)) +} + +func NodeWithoutNextDomain(t *testing.T) *enode.Node { + return CustomNode(t, true, testNetConfig.DomainType(), false, spectypes.DomainType{}, true, mockSubnets(1)) +} + +func NodeWithoutSubnets(t *testing.T) *enode.Node { + return CustomNode(t, true, testNetConfig.DomainType(), true, testNetConfig.NextDomainType(), false, nil) +} + +func NodeWithCustomDomains(t *testing.T, domainType spectypes.DomainType, nextDomainType spectypes.DomainType) *enode.Node { + return CustomNode(t, true, domainType, true, nextDomainType, true, mockSubnets(1)) +} + +func NodeWithZeroSubnets(t *testing.T) *enode.Node { + return CustomNode(t, true, testNetConfig.DomainType(), true, testNetConfig.NextDomainType(), true, zeroSubnets) +} + +func NodeWithCustomSubnets(t *testing.T, subnets []byte) *enode.Node { + return CustomNode(t, true, testNetConfig.DomainType(), true, testNetConfig.NextDomainType(), true, subnets) +} + +func CustomNode(t *testing.T, + setDomainType bool, domainType spectypes.DomainType, + setNextDomainType bool, nextDomainType spectypes.DomainType, + setSubnets bool, subnets []byte) *enode.Node { + + // Generate key + nodeKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err) + + // Encoding and decoding (hack so that SignV4 works) + hexPrivKey := hex.EncodeToString(crypto.FromECDSA(nodeKey)) + sk, err := crypto.HexToECDSA(hexPrivKey) + require.NoError(t, err) + + // Create record + record := enr.Record{} + + // Set entries + record.Set(enr.IP(net.IPv4(127, 0, 0, 1))) + record.Set(enr.UDP(12000)) + record.Set(enr.TCP(13000)) + if setDomainType { + record.Set(records.DomainTypeEntry{ + Key: records.KeyDomainType, + DomainType: domainType, + }) + } + if setNextDomainType { + record.Set(records.DomainTypeEntry{ + Key: records.KeyNextDomainType, + DomainType: nextDomainType, + }) + } + if setSubnets { + subnetsVec := bitfield.NewBitvector128() + for i, subnet := range subnets { + subnetsVec.SetBitAt(uint64(i), subnet > 0) + } + record.Set(enr.WithEntry("subnets", &subnetsVec)) + } + + // Sign + err = enode.SignV4(&record, sk) + require.NoError(t, err) + + // Create node + node, err := enode.New(enode.V4ID{}, &record) + require.NoError(t, err) + + return node +} + +// Transform node into PeerEvent +func ToPeerEvent(node *enode.Node) PeerEvent { + addrInfo, err := ToPeer(node) + if err != nil { + panic(err) + } + return PeerEvent{ + AddrInfo: *addrInfo, + Node: node, + } +} + +// Mock enode.Iterator +type MockIterator struct { + nodes []*enode.Node + position int + closed bool + mtx sync.Mutex +} + +func NewMockIterator(nodes []*enode.Node) *MockIterator { + return &MockIterator{ + nodes: nodes, + position: -1, + } +} + +func (m *MockIterator) Next() bool { + m.mtx.Lock() + defer m.mtx.Unlock() + if m.closed || m.position >= len(m.nodes)-1 { + return false + } + m.position++ + return m.nodes[m.position] != nil +} + +func (m *MockIterator) Node() *enode.Node { + m.mtx.Lock() + defer m.mtx.Unlock() + if m.closed || m.position == -1 || m.position >= len(m.nodes) { + return nil + } + return m.nodes[m.position] +} + +func (m *MockIterator) Close() { + m.mtx.Lock() + defer m.mtx.Unlock() + m.closed = true +} + +// Mock peers.ConnectionIndex +type MockConnection struct { + connectedness map[peer.ID]network.Connectedness + canConnect map[peer.ID]bool + atLimit bool + isBad map[peer.ID]bool + mu sync.RWMutex +} + +func NewMockConnection() *MockConnection { + return &MockConnection{ + connectedness: make(map[peer.ID]network.Connectedness), + canConnect: make(map[peer.ID]bool), + isBad: make(map[peer.ID]bool), + atLimit: false, + } +} + +func (mc *MockConnection) Connectedness(id peer.ID) network.Connectedness { + mc.mu.RLock() + defer mc.mu.RUnlock() + if conn, ok := mc.connectedness[id]; ok { + return conn + } + return network.NotConnected +} + +func (mc *MockConnection) CanConnect(id peer.ID) bool { + mc.mu.RLock() + defer mc.mu.RUnlock() + if can, ok := mc.canConnect[id]; ok { + return can + } + return false +} + +func (mc *MockConnection) AtLimit(dir network.Direction) bool { + mc.mu.RLock() + defer mc.mu.RUnlock() + return mc.atLimit +} + +func (mc *MockConnection) IsBad(logger *zap.Logger, id peer.ID) bool { + mc.mu.RLock() + defer mc.mu.RUnlock() + if bad, ok := mc.isBad[id]; ok { + return bad + } + return false +} + +func (mc *MockConnection) SetConnectedness(id peer.ID, conn network.Connectedness) { + mc.mu.Lock() + defer mc.mu.Unlock() + mc.connectedness[id] = conn +} + +func (mc *MockConnection) SetCanConnect(id peer.ID, canConnect bool) { + mc.mu.Lock() + defer mc.mu.Unlock() + mc.canConnect[id] = canConnect +} + +func (mc *MockConnection) SetAtLimit(atLimit bool) { + mc.mu.Lock() + defer mc.mu.Unlock() + mc.atLimit = atLimit +} + +func (mc *MockConnection) SetIsBad(id peer.ID, isBad bool) { + mc.mu.Lock() + defer mc.mu.Unlock() + mc.isBad[id] = isBad +} + +// Mock listener +type MockListener struct { + localNode *enode.LocalNode + nodes []*enode.Node + closed bool + nodesForPingError []*enode.Node +} + +func NewMockListener(localNode *enode.LocalNode, nodes []*enode.Node) *MockListener { + return &MockListener{ + localNode: localNode, + nodes: nodes, + nodesForPingError: make([]*enode.Node, 0), + } +} + +func (l *MockListener) Lookup(enode.ID) []*enode.Node { + return l.nodes +} +func (l *MockListener) RandomNodes() enode.Iterator { + return NewMockIterator(l.nodes) +} +func (l *MockListener) AllNodes() []*enode.Node { + return l.nodes +} +func (l *MockListener) Ping(node *enode.Node) error { + nodeStr := node.String() + for _, storedNode := range l.nodesForPingError { + if storedNode.String() == nodeStr { + return errors.New("failed ping") + } + } + return nil +} +func (l *MockListener) LocalNode() *enode.LocalNode { + return l.localNode +} +func (l *MockListener) Close() { + l.closed = true +} +func (l *MockListener) SetNodesForPingError(nodes []*enode.Node) { + l.nodesForPingError = nodes +} diff --git a/network/p2p/p2p_setup.go b/network/p2p/p2p_setup.go index 0a4893a80c..6225b0f0e3 100644 --- a/network/p2p/p2p_setup.go +++ b/network/p2p/p2p_setup.go @@ -264,13 +264,13 @@ func (n *p2pNetwork) setupDiscovery(logger *zap.Logger) error { logger.Info("discovery: using mdns (local)") } discOpts := discovery.Options{ - Host: n.host, - DiscV5Opts: discV5Opts, - ConnIndex: n.idx, - SubnetsIdx: n.idx, - HostAddress: n.cfg.HostAddress, - HostDNS: n.cfg.HostDNS, - DomainType: n.cfg.Network, + Host: n.host, + DiscV5Opts: discV5Opts, + ConnIndex: n.idx, + SubnetsIdx: n.idx, + HostAddress: n.cfg.HostAddress, + HostDNS: n.cfg.HostDNS, + NetworkConfig: n.cfg.Network, } disc, err := discovery.NewService(n.ctx, logger, discOpts) if err != nil { diff --git a/network/p2p/test_utils.go b/network/p2p/test_utils.go index 04e5bd0d59..8b4b79d6e4 100644 --- a/network/p2p/test_utils.go +++ b/network/p2p/test_utils.go @@ -56,7 +56,7 @@ func (ln *LocalNet) WithBootnode(ctx context.Context, logger *zap.Logger) error if err != nil { return err } - bn, err := discovery.NewBootnode(ctx, logger, &discovery.BootnodeOptions{ + bn, err := discovery.NewBootnode(ctx, logger, networkconfig.TestNetwork, &discovery.BootnodeOptions{ PrivateKey: hex.EncodeToString(b), ExternalIP: "127.0.0.1", Port: ln.udpRand.Next(13001, 13999), diff --git a/networkconfig/config.go b/networkconfig/config.go index fcfb001996..2438fd1eba 100644 --- a/networkconfig/config.go +++ b/networkconfig/config.go @@ -7,7 +7,6 @@ import ( "time" "github.com/attestantio/go-eth2-client/spec/phase0" - spectypes "github.com/ssvlabs/ssv-spec/types" "github.com/ssvlabs/ssv/protocol/v2/blockchain/beacon" @@ -47,6 +46,7 @@ type NetworkConfig struct { RegistrySyncOffset *big.Int RegistryContractAddr string // TODO: ethcommon.Address Bootnodes []string + DiscoveryProtocolID [6]byte AlanForkEpoch phase0.Epoch } diff --git a/networkconfig/holesky-stage.go b/networkconfig/holesky-stage.go index 07d8747794..a92d45d001 100644 --- a/networkconfig/holesky-stage.go +++ b/networkconfig/holesky-stage.go @@ -16,7 +16,8 @@ var HoleskyStage = NetworkConfig{ GenesisEpoch: 1, RegistrySyncOffset: new(big.Int).SetInt64(84599), RegistryContractAddr: "0x0d33801785340072C452b994496B19f196b7eE15", - AlanForkEpoch: 99999999, + AlanForkEpoch: 999999999, + DiscoveryProtocolID: [6]byte{'s', 's', 'v', 'd', 'v', '5'}, Bootnodes: []string{ // Public bootnode: // "enr:-Ja4QDYHVgUs9NvlMqq93ot6VNqbmrIlMrwKnq4X3DPRgyUNB4ospDp8ubMvsf-KsgqY8rzpZKy4GbE1DLphabpRBc-GAY_diLjngmlkgnY0gmlwhDQrLYqJc2VjcDI1NmsxoQKnAiuSlgSR8asjCH0aYoVKM8uPbi4noFuFHZHaAHqknYNzc3YBg3RjcIITiYN1ZHCCD6E", diff --git a/networkconfig/holesky.go b/networkconfig/holesky.go index 0b8ca1c891..2d7d52a517 100644 --- a/networkconfig/holesky.go +++ b/networkconfig/holesky.go @@ -16,6 +16,8 @@ var Holesky = NetworkConfig{ GenesisEpoch: 1, RegistrySyncOffset: new(big.Int).SetInt64(181612), RegistryContractAddr: "0x38A4794cCEd47d3baf7370CcC43B560D3a1beEFA", + AlanForkEpoch: 999999999, + DiscoveryProtocolID: [6]byte{'s', 's', 'v', 'd', 'v', '5'}, Bootnodes: []string{ "enr:-Li4QFIQzamdvTxGJhvcXG_DFmCeyggSffDnllY5DiU47pd_K_1MRnSaJimWtfKJ-MD46jUX9TwgW5Jqe0t4pH41RYWGAYuFnlyth2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhCLdu_SJc2VjcDI1NmsxoQN4v-N9zFYwEqzGPBBX37q24QPFvAVUtokIo1fblIsmTIN0Y3CCE4uDdWRwgg-j", }, diff --git a/networkconfig/test-network.go b/networkconfig/test-network.go index 976a2c5c8c..02002fd5c7 100644 --- a/networkconfig/test-network.go +++ b/networkconfig/test-network.go @@ -17,6 +17,6 @@ var TestNetwork = NetworkConfig{ RegistrySyncOffset: new(big.Int).SetInt64(9015219), RegistryContractAddr: "0x4B133c68A084B8A88f72eDCd7944B69c8D545f03", Bootnodes: []string{ - "enr:-Li4QO86ZMZr_INMW_WQBsP2jS56yjrHnZXxAUOKJz4_qFPKD1Cr3rghQD2FtXPk2_VPnJUi8BBiMngOGVXC0wTYpJGGAYgqnGSNh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDkvpOTAAAQIP__________gmlkgnY0gmlwhArqAsGJc2VjcDI1NmsxoQKNW0Mf-xTXcevRSkZOvoN0Q0T9OkTjGZQyQeOl3bYU3YN0Y3CCE4iDdWRwgg-g;enr:-Li4QBoH15fXLV78y1_nmD5sODveptALORh568iWLS_eju3SUvF2ZfGE2j-nERKU1zb2g5KlS8L70SRLdRUJ-pHH-fmGAYgvh9oGh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDkvpOTAAAQIP__________gmlkgnY0gmlwhArqAsGJc2VjcDI1NmsxoQO_tV3JP75ZUZPjhOgc2VqEu_FQEMeHc4AyOz6Lz33M2IN0Y3CCE4mDdWRwgg-h", + "enr:-Li4QFIQzamdvTxGJhvcXG_DFmCeyggSffDnllY5DiU47pd_K_1MRnSaJimWtfKJ-MD46jUX9TwgW5Jqe0t4pH41RYWGAYuFnlyth2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhCLdu_SJc2VjcDI1NmsxoQN4v-N9zFYwEqzGPBBX37q24QPFvAVUtokIo1fblIsmTIN0Y3CCE4uDdWRwgg-j", }, } diff --git a/utils/boot_node/node.go b/utils/boot_node/node.go index dabf348e9d..265a3476a3 100644 --- a/utils/boot_node/node.go +++ b/utils/boot_node/node.go @@ -20,12 +20,11 @@ import ( "github.com/ssvlabs/ssv/logging" "github.com/ssvlabs/ssv/logging/fields" + "github.com/ssvlabs/ssv/network/discovery" "github.com/ssvlabs/ssv/networkconfig" "github.com/ssvlabs/ssv/utils" ) -var SSVProtocolID = [6]byte{'s', 's', 'v', 'd', 'v', '5'} - // Options contains options to create the node type Options struct { PrivateKey string `yaml:"PrivateKey" env:"BOOT_NODE_PRIVATE_KEY" env-description:"boot node private key (default will generate new)"` @@ -54,11 +53,7 @@ type bootNode struct { } // New is the constructor of ssvNode -func New(opts Options) (Node, error) { - networkConfig, err := networkconfig.GetNetworkConfigByName(opts.Network) - if err != nil { - return nil, err - } +func New(networkConfig networkconfig.NetworkConfig, opts Options) (Node, error) { return &bootNode{ privateKey: opts.PrivateKey, discv5port: opts.UDPPort, @@ -71,7 +66,7 @@ func New(opts Options) (Node, error) { } type handler struct { - listener *discover.UDPv5 + listener discovery.Listener } func (h *handler) httpHandler(logger *zap.Logger) func(w http.ResponseWriter, _ *http.Request) { @@ -102,19 +97,19 @@ func (n *bootNode) Start(ctx context.Context, logger *zap.Logger) error { if err != nil { log.Fatal("Failed to get p2p privateKey", zap.Error(err)) } - cfg := discover.Config{ - PrivateKey: privKey, - V5ProtocolID: &SSVProtocolID, - } ipAddr, err := network.ExternalIP() // ipAddr = "127.0.0.1" log.Print("TEST Ip addr----", ipAddr) if err != nil { logger.Fatal("Failed to get ExternalIP", zap.Error(err)) } - listener := n.createListener(logger, ipAddr, n.discv5port, cfg) - node := listener.Self() - logger.Info("Running", zap.String("node", node.String())) + listener := n.createListener(logger, ipAddr, n.discv5port, privKey) + node := listener.LocalNode().Node() + logger.Info("Running", + zap.String("node", node.String()), + zap.String("network", n.network.Name), + fields.ProtocolID(n.network.DiscoveryProtocolID), + ) handler := &handler{ listener: listener, @@ -138,7 +133,8 @@ func (n *bootNode) Start(ctx context.Context, logger *zap.Logger) error { return nil } -func (n *bootNode) createListener(logger *zap.Logger, ipAddr string, port uint16, cfg discover.Config) *discover.UDPv5 { +func (n *bootNode) createListener(logger *zap.Logger, ipAddr string, port uint16, privateKey *ecdsa.PrivateKey) discovery.Listener { + // Create the UDP listener and the LocalNode record. ip := net.ParseIP(ipAddr) if ip.To4() == nil { logger.Fatal("IPV4 address not provided", fields.Address(ipAddr)) @@ -163,16 +159,31 @@ func (n *bootNode) createListener(logger *zap.Logger, ipAddr string, port uint16 if err != nil { log.Fatal(err) } - localNode, err := n.createLocalNode(logger, cfg.PrivateKey, ip, port) + localNode, err := n.createLocalNode(logger, privateKey, ip, port) + if err != nil { + log.Fatal(err) + } + + // Allocate a fake connection to forward postFork packets to the preFork listener. + unhandled := make(chan discover.ReadPacket, 100) // size taken from https://github.com/ethereum/go-ethereum/blob/v1.13.5/p2p/server.go#L551 + sharedConn := &discovery.SharedUDPConn{UDPConn: conn, Unhandled: unhandled} + + postForkListener, err := discover.ListenV5(conn, localNode, discover.Config{ + PrivateKey: privateKey, + Unhandled: unhandled, + V5ProtocolID: &n.network.DiscoveryProtocolID, + }) if err != nil { log.Fatal(err) } - network, err := discover.ListenV5(conn, localNode, cfg) + preForkListener, err := discover.ListenV5(sharedConn, localNode, discover.Config{ + PrivateKey: privateKey, + }) if err != nil { log.Fatal(err) } - return network + return discovery.NewForkingDV5Listener(logger, preForkListener, postForkListener, 5*time.Second, n.network) } func (n *bootNode) createLocalNode(logger *zap.Logger, privKey *ecdsa.PrivateKey, ipAddr net.IP, port uint16) (*enode.LocalNode, error) { From b626db6a064a0198d5d05712b69cd10437ad6097 Mon Sep 17 00:00:00 2001 From: rehs0y Date: Mon, 7 Oct 2024 13:05:43 +0300 Subject: [PATCH 29/35] refactor: use a ticker instead of a timer, and call it first at the loop. this makes us wait the remaining time after `Next()` and not full interval each time. (#1775) --- network/discovery/dv5_service.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/network/discovery/dv5_service.go b/network/discovery/dv5_service.go index 67184ae7d1..f2116b69de 100644 --- a/network/discovery/dv5_service.go +++ b/network/discovery/dv5_service.go @@ -310,15 +310,15 @@ func (dvs *DiscV5Service) discover(ctx context.Context, handler HandleNewPeer, i // selfID is used to exclude current node selfID := dvs.dv5Listener.LocalNode().Node().ID().TerminalString() - t := time.NewTimer(interval) - defer t.Stop() - wait := func() { - t.Reset(interval) - <-t.C - } + ticker := time.NewTicker(interval) + defer ticker.Stop() for ctx.Err() == nil { - wait() + select { + case <-ticker.C: + case <-ctx.Done(): + return + } exists := iterator.Next() if !exists { continue From 31ef069f7f57fb791ecce0e3445cb988b3a12499 Mon Sep 17 00:00:00 2001 From: rehs0y Date: Mon, 7 Oct 2024 13:31:20 +0300 Subject: [PATCH 30/35] fix: (alan) dont stop prefork discovery (#1774) * don't close prefork listenr on fork. * remove once in closing the forkingDV5Listener * fix tests --------- Co-authored-by: moshe-blox --- network/discovery/forking_dv5_listener.go | 30 +--- .../discovery/forking_dv5_listener_test.go | 158 +++++++----------- 2 files changed, 61 insertions(+), 127 deletions(-) diff --git a/network/discovery/forking_dv5_listener.go b/network/discovery/forking_dv5_listener.go index 8903a73db0..5f0826178d 100644 --- a/network/discovery/forking_dv5_listener.go +++ b/network/discovery/forking_dv5_listener.go @@ -1,7 +1,6 @@ package discovery import ( - "sync" "time" "github.com/ethereum/go-ethereum/p2p/enode" @@ -21,7 +20,6 @@ type forkingDV5Listener struct { preForkListener Listener postForkListener Listener iteratorTimeout time.Duration - closeOnce sync.Once netCfg networkconfig.NetworkConfig } @@ -41,11 +39,6 @@ func NewForkingDV5Listener(logger *zap.Logger, preFork, postFork Listener, itera // Before the fork, returns the result of a Lookup in both pre and post-fork services. // After the fork, returns only the result from the post-fork service. func (l *forkingDV5Listener) Lookup(id enode.ID) []*enode.Node { - if l.netCfg.PastAlanFork() { - l.closePreForkListener() - return l.postForkListener.Lookup(id) - } - nodes := l.postForkListener.Lookup(id) nodes = append(nodes, l.preForkListener.Lookup(id)...) return nodes @@ -54,11 +47,6 @@ func (l *forkingDV5Listener) Lookup(id enode.ID) []*enode.Node { // Before the fork, returns an iterator for both pre and post-fork services. // After the fork, returns only the iterator from the post-fork service. func (l *forkingDV5Listener) RandomNodes() enode.Iterator { - if l.netCfg.PastAlanFork() { - l.closePreForkListener() - return l.postForkListener.RandomNodes() - } - fairMix := enode.NewFairMix(l.iteratorTimeout) fairMix.AddSource(&annotatedIterator{l.postForkListener.RandomNodes(), "post"}) fairMix.AddSource(&annotatedIterator{l.preForkListener.RandomNodes(), "pre"}) @@ -68,11 +56,6 @@ func (l *forkingDV5Listener) RandomNodes() enode.Iterator { // Before the fork, returns all nodes from the pre and post-fork listeners. // After the fork, returns only the result from the post-fork service. func (l *forkingDV5Listener) AllNodes() []*enode.Node { - if l.netCfg.PastAlanFork() { - l.closePreForkListener() - return l.postForkListener.AllNodes() - } - enodes := l.postForkListener.AllNodes() enodes = append(enodes, l.preForkListener.AllNodes()...) return enodes @@ -81,11 +64,6 @@ func (l *forkingDV5Listener) AllNodes() []*enode.Node { // Sends a ping in the post-fork service. // Before the fork, it also tries to ping with the pre-fork service in case of error. func (l *forkingDV5Listener) Ping(node *enode.Node) error { - if l.netCfg.PastAlanFork() { - l.closePreForkListener() - return l.postForkListener.Ping(node) - } - err := l.postForkListener.Ping(node) if err != nil { return l.preForkListener.Ping(node) @@ -96,10 +74,6 @@ func (l *forkingDV5Listener) Ping(node *enode.Node) error { // Returns the LocalNode using the post-fork listener. // Both pre and post-fork listeners should have the same LocalNode. func (l *forkingDV5Listener) LocalNode() *enode.LocalNode { - if l.netCfg.PastAlanFork() { - l.closePreForkListener() - return l.postForkListener.LocalNode() - } return l.postForkListener.LocalNode() } @@ -111,9 +85,7 @@ func (l *forkingDV5Listener) Close() { // closePreForkListener ensures preForkListener is closed once func (l *forkingDV5Listener) closePreForkListener() { - l.closeOnce.Do(func() { - l.preForkListener.Close() - }) + l.preForkListener.Close() } // annotatedIterator wraps an enode.Iterator with metrics collection. diff --git a/network/discovery/forking_dv5_listener_test.go b/network/discovery/forking_dv5_listener_test.go index 11aabdf3f4..c5db74b8cc 100644 --- a/network/discovery/forking_dv5_listener_test.go +++ b/network/discovery/forking_dv5_listener_test.go @@ -5,6 +5,7 @@ import ( "time" "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ssvlabs/ssv/networkconfig" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/zap" @@ -59,9 +60,10 @@ func TestForkListener_Lookup(t *testing.T) { forkListener := NewForkingDV5Listener(zap.NewNop(), preForkListener, postForkListener, iteratorTimeout, netCfg) nodes := forkListener.Lookup(enode.ID{}) - // only post-fork nodes - assert.Len(t, nodes, 1) + assert.Len(t, nodes, 2) + // post-fork nodes are set first assert.Equal(t, nodes[0], nodeFromPostForkListener) + assert.Equal(t, nodes[1], nodeFromPreForkListener) }) } @@ -84,7 +86,6 @@ func TestForkListener_RandomNodes(t *testing.T) { require.True(t, iter.Next()) nodes = append(nodes, iter.Node()) } - iter.Close() assert.Len(t, nodes, 2) // post-fork nodes are set first @@ -105,14 +106,17 @@ func TestForkListener_RandomNodes(t *testing.T) { iter := forkListener.RandomNodes() defer iter.Close() var nodes []*enode.Node - for iter.Next() { + for i := 0; i < 2; i++ { + require.True(t, iter.Next()) nodes = append(nodes, iter.Node()) } - iter.Close() - // only post-fork nodes - assert.Len(t, nodes, 1) - assert.Equal(t, nodes[0], nodeFromPostForkListener) + // there should be no difference between pre-fork and post-fork + assert.Equal(t, nodes[0], nodeFromPreForkListener) + assert.Equal(t, nodes[1], nodeFromPostForkListener) + + // No more next + requireNextTimeout(t, false, iter, 10*time.Millisecond) }) } @@ -140,69 +144,44 @@ func TestForkListener_AllNodes(t *testing.T) { forkListener := NewForkingDV5Listener(zap.NewNop(), preForkListener, postForkListener, iteratorTimeout, netCfg) nodes := forkListener.AllNodes() - // only post-fork nodes - assert.Len(t, nodes, 1) + assert.Len(t, nodes, 2) + // there should be no difference between pre-fork and post-fork assert.Equal(t, nodes[0], nodeFromPostForkListener) + assert.Equal(t, nodes[1], nodeFromPreForkListener) }) } func TestForkListener_PingPreFork(t *testing.T) { - pingPeer := NewTestingNode(t) // any peer to ping - localNode := NewLocalNode(t) - - preForkListener := NewMockListener(localNode, []*enode.Node{}) - postForkListener := NewMockListener(localNode, []*enode.Node{}) - - netCfg := PreForkNetworkConfig() - forkListener := NewForkingDV5Listener(zap.NewNop(), preForkListener, postForkListener, iteratorTimeout, netCfg) - - t.Run("Post-Fork succeeds", func(t *testing.T) { - postForkListener.SetNodesForPingError([]*enode.Node{}) - preForkListener.SetNodesForPingError([]*enode.Node{pingPeer}) - err := forkListener.Ping(pingPeer) - assert.NoError(t, err) - }) - - t.Run("Post-Fork fails and Pre-Fork succeeds", func(t *testing.T) { - postForkListener.SetNodesForPingError([]*enode.Node{pingPeer}) - preForkListener.SetNodesForPingError([]*enode.Node{}) - err := forkListener.Ping(pingPeer) - assert.NoError(t, err) - }) - - t.Run("Post-Fork and Pre-Fork fails", func(t *testing.T) { - postForkListener.SetNodesForPingError([]*enode.Node{pingPeer}) - preForkListener.SetNodesForPingError([]*enode.Node{pingPeer}) - err := forkListener.Ping(pingPeer) - assert.ErrorContains(t, err, "failed ping") - }) -} - -func TestForkListener_PingPostFork(t *testing.T) { - pingPeer := NewTestingNode(t) // any peer to ping - localNode := NewLocalNode(t) + for _, netCfg := range []networkconfig.NetworkConfig{PreForkNetworkConfig(), PostForkNetworkConfig()} { + pingPeer := NewTestingNode(t) // any peer to ping + localNode := NewLocalNode(t) - preForkListener := NewMockListener(localNode, []*enode.Node{}) - postForkListener := NewMockListener(localNode, []*enode.Node{}) + preForkListener := NewMockListener(localNode, []*enode.Node{}) + postForkListener := NewMockListener(localNode, []*enode.Node{}) - netCfg := PostForkNetworkConfig() - forkListener := NewForkingDV5Listener(zap.NewNop(), preForkListener, postForkListener, iteratorTimeout, netCfg) - - t.Run("Post-Fork succeeds", func(t *testing.T) { - postForkListener.SetNodesForPingError([]*enode.Node{}) - preForkListener.SetNodesForPingError([]*enode.Node{pingPeer}) - err := forkListener.Ping(pingPeer) - assert.NoError(t, err) - }) - - t.Run("Post-Fork fails and Pre-Fork succeeds", func(t *testing.T) { - postForkListener.SetNodesForPingError([]*enode.Node{pingPeer}) - preForkListener.SetNodesForPingError([]*enode.Node{}) - err := forkListener.Ping(pingPeer) + forkListener := NewForkingDV5Listener(zap.NewNop(), preForkListener, postForkListener, iteratorTimeout, netCfg) - // Pre-Fork would succeed but it's not called since we're on post-fork - assert.ErrorContains(t, err, "failed ping") - }) + t.Run("Post-Fork succeeds", func(t *testing.T) { + postForkListener.SetNodesForPingError([]*enode.Node{}) + preForkListener.SetNodesForPingError([]*enode.Node{pingPeer}) + err := forkListener.Ping(pingPeer) + assert.NoError(t, err) + }) + + t.Run("Post-Fork fails and Pre-Fork succeeds", func(t *testing.T) { + postForkListener.SetNodesForPingError([]*enode.Node{pingPeer}) + preForkListener.SetNodesForPingError([]*enode.Node{}) + err := forkListener.Ping(pingPeer) + assert.NoError(t, err) + }) + + t.Run("Post-Fork and Pre-Fork fails", func(t *testing.T) { + postForkListener.SetNodesForPingError([]*enode.Node{pingPeer}) + preForkListener.SetNodesForPingError([]*enode.Node{pingPeer}) + err := forkListener.Ping(pingPeer) + assert.ErrorContains(t, err, "failed ping") + }) + } } func TestForkListener_LocalNode(t *testing.T) { @@ -227,46 +206,29 @@ func TestForkListener_LocalNode(t *testing.T) { } func TestForkListener_Close(t *testing.T) { + for name, netCfg := range map[string]networkconfig.NetworkConfig{ + "Pre-Fork": PreForkNetworkConfig(), + "Post-Fork": PostForkNetworkConfig(), + } { + t.Run(name, func(t *testing.T) { + preForkListener := NewMockListener(&enode.LocalNode{}, []*enode.Node{}) + postForkListener := NewMockListener(&enode.LocalNode{}, []*enode.Node{}) - t.Run("Pre-Fork", func(t *testing.T) { - preForkListener := NewMockListener(&enode.LocalNode{}, []*enode.Node{}) - postForkListener := NewMockListener(&enode.LocalNode{}, []*enode.Node{}) + forkListener := NewForkingDV5Listener(zap.NewNop(), preForkListener, postForkListener, iteratorTimeout, netCfg) - netCfg := PreForkNetworkConfig() - forkListener := NewForkingDV5Listener(zap.NewNop(), preForkListener, postForkListener, iteratorTimeout, netCfg) + // Call any method so that it will check whether to close the pre-fork listener + _ = forkListener.AllNodes() - // Call any method so that it will check whether to close the pre-fork listener - _ = forkListener.AllNodes() - - assert.False(t, preForkListener.closed) - assert.False(t, postForkListener.closed) + assert.False(t, preForkListener.closed) + assert.False(t, postForkListener.closed) - // Close - forkListener.Close() + // Close + forkListener.Close() - assert.True(t, preForkListener.closed) - assert.True(t, postForkListener.closed) - }) - - t.Run("Post-Fork", func(t *testing.T) { - preForkListener := NewMockListener(&enode.LocalNode{}, []*enode.Node{}) - postForkListener := NewMockListener(&enode.LocalNode{}, []*enode.Node{}) - - netCfg := PostForkNetworkConfig() - forkListener := NewForkingDV5Listener(zap.NewNop(), preForkListener, postForkListener, iteratorTimeout, netCfg) - - // Call any method so that it will check whether to close the pre-fork listener - _ = forkListener.AllNodes() - - assert.True(t, preForkListener.closed) // pre-fork listener is closed - assert.False(t, postForkListener.closed) - - // Close - forkListener.Close() - - assert.True(t, preForkListener.closed) - assert.True(t, postForkListener.closed) - }) + assert.True(t, preForkListener.closed) + assert.True(t, postForkListener.closed) + }) + } } func requireNextTimeout(t *testing.T, expected bool, iter enode.Iterator, timeout time.Duration) { From ec5e0adfa64faf639b41c1116ab8c9f6c10a914a Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Thu, 10 Oct 2024 14:15:28 +0200 Subject: [PATCH 31/35] fix: gosec warnings (#1785) * resolve gosec issues * add comment to clarify handleOperatorRemoved implementation * fix review comments --- e2e/cmd/ssv-e2e/logs_catcher.go | 3 ++- e2e/cmd/ssv-e2e/share_update.go | 3 ++- ekm/signer_storage.go | 1 + eth/eventhandler/handlers.go | 13 +------------ protocol/v2/types/ssvshare.go | 2 +- 5 files changed, 7 insertions(+), 15 deletions(-) diff --git a/e2e/cmd/ssv-e2e/logs_catcher.go b/e2e/cmd/ssv-e2e/logs_catcher.go index 59aaffdaba..2c76680272 100644 --- a/e2e/cmd/ssv-e2e/logs_catcher.go +++ b/e2e/cmd/ssv-e2e/logs_catcher.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "os" + "path/filepath" "go.uber.org/zap" @@ -73,7 +74,7 @@ func (cmd *LogsCatcherCmd) Run(logger *zap.Logger, globals Globals) error { // UnmarshalBlsVerificationJSON reads the JSON file and unmarshals it into []*CorruptedShare. func UnmarshalBlsVerificationJSON(filePath string) ([]*logs_catcher.CorruptedShare, error) { - contents, err := os.ReadFile(filePath) + contents, err := os.ReadFile(filepath.Clean(filePath)) if err != nil { return nil, fmt.Errorf("error reading json file for BLS verification: %s, %w", filePath, err) } diff --git a/e2e/cmd/ssv-e2e/share_update.go b/e2e/cmd/ssv-e2e/share_update.go index bef16302dc..0f2bf0ac1b 100644 --- a/e2e/cmd/ssv-e2e/share_update.go +++ b/e2e/cmd/ssv-e2e/share_update.go @@ -6,6 +6,7 @@ import ( "encoding/hex" "fmt" "os" + "path/filepath" "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ssvlabs/ssv-spec/types" @@ -164,7 +165,7 @@ func openDB(logger *zap.Logger, dbPath string) (*kv.BadgerDB, error) { func readOperatorPrivateKeyFromFile(filePath string) (string, error) { var config ShareUpdateCmd - data, err := os.ReadFile(filePath) + data, err := os.ReadFile(filepath.Clean(filePath)) if err != nil { return "", fmt.Errorf("failed to read file: %s, error: %w", filePath, err) } diff --git a/ekm/signer_storage.go b/ekm/signer_storage.go index 008e0eb257..70a3d98106 100644 --- a/ekm/signer_storage.go +++ b/ekm/signer_storage.go @@ -406,6 +406,7 @@ func (s *storage) decrypt(data []byte) ([]byte, error) { } nonce, ciphertext := data[:nonceSize], data[nonceSize:] + // #nosec G407 false positive: https://github.com/securego/gosec/issues/1211 return gcm.Open(nil, nonce, ciphertext, nil) } diff --git a/eth/eventhandler/handlers.go b/eth/eventhandler/handlers.go index be06fc9db5..2b4ce2782c 100644 --- a/eth/eventhandler/handlers.go +++ b/eth/eventhandler/handlers.go @@ -117,18 +117,7 @@ func (eh *EventHandler) handleOperatorRemoved(txn basedb.Txn, event *contract.Co fields.Owner(od.OwnerAddress), ) - // TODO: In original handler we didn't delete operator data, so this behavior was preserved. However we likely need to. - // TODO: Delete operator from all the shares. - // var shares []Share - // for _, s := range nodeStorage.Shares().List() { - // // if operator in committee, delete him from it: - // // shares = append(shares, s) - // } - // nodeStorage.Shares().Save(shares) - // err = eh.nodeStorage.DeleteOperatorData(txn, od.ID) - // if err != nil { - // return err - // } + // This function is currently no-op and it will do nothing. Operator removed event is not used in the current implementation. logger.Debug("processed event") return nil diff --git a/protocol/v2/types/ssvshare.go b/protocol/v2/types/ssvshare.go index 2b796b76bc..6cddbd7c18 100644 --- a/protocol/v2/types/ssvshare.go +++ b/protocol/v2/types/ssvshare.go @@ -157,7 +157,7 @@ func ComputeCommitteeID(committee []spectypes.OperatorID) spectypes.CommitteeID // Convert to bytes bytes := make([]byte, len(committee)*4) for i, v := range committee { - binary.LittleEndian.PutUint32(bytes[i*4:], uint32(v)) // nolint:gosec + binary.LittleEndian.PutUint32(bytes[i*4:], uint32(v)) // #nosec G115 } // Hash return sha256.Sum256(bytes) From d26f68533b541ff7bfa054c3c9a7bfde62fedd26 Mon Sep 17 00:00:00 2001 From: iurii-ssv <183610124+iurii-ssv@users.noreply.github.com> Date: Sun, 13 Oct 2024 12:30:09 +0300 Subject: [PATCH 32/35] gitignore: .idea folder (#1773) * gitignore: idea folder * remove .idea files --- .gitignore | 22 +--------------------- .idea/modules.xml | 8 -------- .idea/runConfigurations/node_1.xml | 5 ----- .idea/runConfigurations/node_2.xml | 5 ----- .idea/runConfigurations/node_3.xml | 5 ----- .idea/runConfigurations/node_4.xml | 5 ----- .idea/ssv.iml | 18 ------------------ .idea/vcs.xml | 6 ------ 8 files changed, 1 insertion(+), 73 deletions(-) delete mode 100644 .idea/modules.xml delete mode 100644 .idea/runConfigurations/node_1.xml delete mode 100644 .idea/runConfigurations/node_2.xml delete mode 100644 .idea/runConfigurations/node_3.xml delete mode 100644 .idea/runConfigurations/node_4.xml delete mode 100644 .idea/ssv.iml delete mode 100644 .idea/vcs.xml diff --git a/.gitignore b/.gitignore index 4a05c4caa1..fddffc3963 100644 --- a/.gitignore +++ b/.gitignore @@ -24,27 +24,7 @@ vendor/ /config/config.yaml /config/config*/ -# User-specific stuff: -.idea/**/workspace.xml -.idea/**/tasks.xml -.idea/watcherTasks.xml -.idea/misc.xml -.idea/dictionaries -.idea/codeStyles -.idea/shelf - -# Sensitive or high-churn files: -.idea/**/dataSources/ -.idea/**/dataSources.ids -.idea/**/dataSources.xml -.idea/**/dataSources.local.xml -.idea/**/sqlDataSources.xml -.idea/**/dynamic.xml -.idea/**/uiDesigner.xml - -# Gradle: -.idea/**/gradle.xml -.idea/**/libraries +.idea/ docker-compose-local.yaml diff --git a/.idea/modules.xml b/.idea/modules.xml deleted file mode 100644 index 629e30ffe0..0000000000 --- a/.idea/modules.xml +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - - - \ No newline at end of file diff --git a/.idea/runConfigurations/node_1.xml b/.idea/runConfigurations/node_1.xml deleted file mode 100644 index 3d97bad68e..0000000000 --- a/.idea/runConfigurations/node_1.xml +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/.idea/runConfigurations/node_2.xml b/.idea/runConfigurations/node_2.xml deleted file mode 100644 index 34bab7fff9..0000000000 --- a/.idea/runConfigurations/node_2.xml +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/.idea/runConfigurations/node_3.xml b/.idea/runConfigurations/node_3.xml deleted file mode 100644 index 6ed0150f71..0000000000 --- a/.idea/runConfigurations/node_3.xml +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/.idea/runConfigurations/node_4.xml b/.idea/runConfigurations/node_4.xml deleted file mode 100644 index f45de47f22..0000000000 --- a/.idea/runConfigurations/node_4.xml +++ /dev/null @@ -1,5 +0,0 @@ - - - - - diff --git a/.idea/ssv.iml b/.idea/ssv.iml deleted file mode 100644 index 3aed5f3bf6..0000000000 --- a/.idea/ssv.iml +++ /dev/null @@ -1,18 +0,0 @@ - - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml deleted file mode 100644 index 94a25f7f4c..0000000000 --- a/.idea/vcs.xml +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file From d45ae2129300d3b636131300dd973935877952a8 Mon Sep 17 00:00:00 2001 From: rehs0y Date: Tue, 15 Oct 2024 13:23:38 +0300 Subject: [PATCH 33/35] set discovery interval to 1ms (#1792) --- network/discovery/dv5_service.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/discovery/dv5_service.go b/network/discovery/dv5_service.go index f2116b69de..bf233091df 100644 --- a/network/discovery/dv5_service.go +++ b/network/discovery/dv5_service.go @@ -22,7 +22,7 @@ import ( ) var ( - defaultDiscoveryInterval = time.Millisecond * 100 + defaultDiscoveryInterval = time.Millisecond * 1 publishENRTimeout = time.Minute ) From 8297d92cd9655301a51ed802e1d2fee0b275e7ca Mon Sep 17 00:00:00 2001 From: rehs0y Date: Tue, 15 Oct 2024 13:24:35 +0300 Subject: [PATCH 34/35] chore: align latest unstable changes into stage (#1797) * update .gitlab-ci.yml from main * deploy to holesky prod 1 & 2 * apk add curl * fix .gitlab-ci.yml * Updated github ci * deploy to mainnet-1 * gitlab * Revert "Revert "alan testnet: disable protocol ID & set fork epochs (#1750)"" This reverts commit 0dd67f29e0b94dc128bef841ac897b775cc5e81a. * deploy to testnet * replace docker hub bloxstaking to ssvlabs (#1655) Co-authored-by: guy muroch * fix * dummy commit to deploy * deploy to mainnet 1 * deploy holesky bootnode with both discoveries (pre-fork and post-fork) * deploy holesky 1 & 2 with latest discovery fixes * deploy holesky 3 & 4 * fake post-fork protocol ID * revert fake post-fork protocol ID and instead start at post-fork * redeploy holesky bootnode but with correct network * fork on 84388 * Changed ports names * Added label * don't stop pre-fork discovery after the fork. * set test fork epoch * fix fork epoch * remove once in closing the forkingDV5Listener * fix tests * fix tests for discovery startup at post-fork * don't stop discovery on fork. * set fork epochs - holesky 84600, // Oct-08-2024 12:00:00 PM UTC mainnet 327375, // Nov-25-2024 12:00:23 PM UTC * add Eridian's bootnode --------- Co-authored-by: moshe-blox Co-authored-by: stoyan.peev Co-authored-by: guym-blox <83158283+guym-blox@users.noreply.github.com> Co-authored-by: guy muroch Co-authored-by: systemblox <40427708+systemblox@users.noreply.github.com> --- .gitlab-ci.yml | 5 ++-- .../holesky/boot-node-holesky-deployment.yml | 2 ++ Makefile | 2 +- docs/OPERATOR_GETTING_STARTED.md | 10 ++++---- docs/bootnode.md | 2 +- network/discovery/dv5_service.go | 24 ------------------- network/discovery/service_test.go | 4 ++-- networkconfig/holesky.go | 2 +- networkconfig/mainnet.go | 5 ++-- scripts/generate_local_config.sh | 2 +- 10 files changed, 19 insertions(+), 39 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 84a2367630..148f8fd3ac 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -150,12 +150,12 @@ Deploy nodes to prod: # │ 🟠 Deploy Holesky Bootnode | # +-------------------------------+ # █▓▒░ Keep commented unless you're testing the bootnode ░▒▓█ - #- .k8/production/holesky/scripts/deploy-boot-nodes.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $PROD_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT_V3 $SSV_NODES_MEM_LIMIT_V3 + # - .k8/production/holesky/scripts/deploy-boot-nodes.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $PROD_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT_V3 $SSV_NODES_MEM_LIMIT_V3 # # +----------------------------+ # | 🔴 Deploy SSV Mainnet nodes | # +----------------------------+ -# - .k8/production/mainnet/scripts/deploy-cluster-1-4.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $PROD_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT_V3 $SSV_NODES_MEM_LIMIT_V3 + # - .k8/production/mainnet/scripts/deploy-cluster-1-4.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $PROD_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT_V3 $SSV_NODES_MEM_LIMIT_V3 # # +------------------------------+ # │ 🔴 Deploy Mainnet Bootnode | @@ -205,3 +205,4 @@ Deploy exporter to prod: - .k8/production/mainnet/scripts/deploy-exporters-3.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION only: - main + diff --git a/.k8/production/holesky/boot-node-holesky-deployment.yml b/.k8/production/holesky/boot-node-holesky-deployment.yml index daa89e7c9b..8416a8e45c 100644 --- a/.k8/production/holesky/boot-node-holesky-deployment.yml +++ b/.k8/production/holesky/boot-node-holesky-deployment.yml @@ -107,6 +107,8 @@ spec: value: "5003" - name: UDP_PORT value: "4003" + - name: NETWORK + value: "holesky" volumeMounts: - mountPath: /data/bootnode name: boot-node-holesky diff --git a/Makefile b/Makefile index 57a1d00b44..6696eb355f 100644 --- a/Makefile +++ b/Makefile @@ -132,7 +132,7 @@ docker: .PHONY: docker-image docker-image: @echo "node ${NODES_ID}" - @sudo docker rm -f ssv_node && docker run -d --env-file .env --restart unless-stopped --name=ssv_node -p 13000:13000 -p 12000:12000/udp 'bloxstaking/ssv-node:latest' make BUILD_PATH=/go/bin/ssvnode start-node + @sudo docker rm -f ssv_node && docker run -d --env-file .env --restart unless-stopped --name=ssv_node -p 13000:13000 -p 12000:12000/udp 'ssvlabs/ssv-node:latest' make BUILD_PATH=/go/bin/ssvnode start-node NODES=ssv-node-1 ssv-node-2 ssv-node-3 ssv-node-4 .PHONY: docker-all diff --git a/docs/OPERATOR_GETTING_STARTED.md b/docs/OPERATOR_GETTING_STARTED.md index aaf3b9fb4c..1cedbb66ab 100644 --- a/docs/OPERATOR_GETTING_STARTED.md +++ b/docs/OPERATOR_GETTING_STARTED.md @@ -81,7 +81,7 @@ $ ./install.sh The following command will generate your operator's public and private keys (appear as "pk" and "sk" in the output). ``` -$ docker run --rm -it 'bloxstaking/ssv-node:latest' /go/bin/ssvnode generate-operator-keys +$ docker run --rm -it 'ssvlabs/ssv-node:latest' /go/bin/ssvnode generate-operator-keys ``` ### 5. Create a Configuration File @@ -153,7 +153,7 @@ Before start, make sure the clock is synced with NTP servers. Then, run the docker image in the same folder you created the `config.yaml`: ```shell -$ docker run -d --restart unless-stopped --name=ssv_node -e CONFIG_PATH=./config.yaml -p 13001:13001 -p 12001:12001/udp -v $(pwd)/config.yaml:/config.yaml -v $(pwd):/data --log-opt max-size=500m --log-opt max-file=10 -it 'bloxstaking/ssv-node:latest' make BUILD_PATH=/go/bin/ssvnode start-node \ +$ docker run -d --restart unless-stopped --name=ssv_node -e CONFIG_PATH=./config.yaml -p 13001:13001 -p 12001:12001/udp -v $(pwd)/config.yaml:/config.yaml -v $(pwd):/data --log-opt max-size=500m --log-opt max-file=10 -it 'ssvlabs/ssv-node:latest' make BUILD_PATH=/go/bin/ssvnode start-node \ && docker logs ssv_node --follow ``` @@ -162,13 +162,13 @@ $ docker run -d --restart unless-stopped --name=ssv_node -e CONFIG_PATH=./config The current version is available through logs or a cmd: ```shell -$ docker run --rm -it 'bloxstaking/ssv-node:latest' /go/bin/ssvnode version +$ docker run --rm -it 'ssvlabs/ssv-node:latest' /go/bin/ssvnode version ``` -In order to update, kill running container and pull the latest image or a specific version (`bloxstaking/ssv-node:`) +In order to update, kill running container and pull the latest image or a specific version (`ssvlabs/ssv-node:`) ```shell -$ docker rm -f ssv_node && docker pull bloxstaking/ssv-node:latest +$ docker rm -f ssv_node && docker pull ssvlabs/ssv-node:latest ``` Now run the container again as specified above in step 6. diff --git a/docs/bootnode.md b/docs/bootnode.md index 799352869a..99d94150bb 100644 --- a/docs/bootnode.md +++ b/docs/bootnode.md @@ -54,7 +54,7 @@ _Note: This is an example. Replace the placeholders as explained below._ docker rm -f ssv_bootnode && docker run -d --restart unless-stopped --name=ssv_bootnode \ -e CONFIG_PATH=/config.yaml -p 5000:5000 -p 4000:4000/udp \ -v $(pwd)/config.yaml:/config.yaml -v $(pwd):/data -it \ - 'bloxstaking/ssv-node-unstable:latest' make BUILD_PATH=/go/bin/ssvnode start-boot-node + 'ssvlabs/ssv-node-unstable:latest' make BUILD_PATH=/go/bin/ssvnode start-boot-node ``` _Note: `/data` must be a persistent volume to preserve the ENR across restarts!_ diff --git a/network/discovery/dv5_service.go b/network/discovery/dv5_service.go index bf233091df..8733662c82 100644 --- a/network/discovery/dv5_service.go +++ b/network/discovery/dv5_service.go @@ -224,30 +224,6 @@ func (dvs *DiscV5Service) initDiscV5Listener(logger *zap.Logger, discOpts *Optio protocolID = DefaultSSVProtocolID } - // After the Alan fork, on a restart, we only use the discovery with the ProtocolID restriction - if dvs.networkConfig.PastAlanFork() { - dv5Cfg, err := opts.DiscV5Cfg(logger, WithProtocolID(protocolID)) - if err != nil { - return err - } - dv5Listener, err := discover.ListenV5(udpConn, localNode, *dv5Cfg) - if err != nil { - return errors.Wrap(err, "could not create discV5 listener") - } - dvs.dv5Listener = dv5Listener - dvs.bootnodes = dv5Cfg.Bootnodes - - logger.Debug("started discv5 listener (UDP)", - fields.BindIP(bindIP), - zap.Uint16("UdpPort", opts.Port), - fields.ENRLocalNode(localNode), - fields.Domain(discOpts.NetworkConfig.DomainType()), - fields.ProtocolID(discOpts.NetworkConfig.DiscoveryProtocolID), - ) - - return nil - } - // New discovery, with ProtocolID restriction, to be kept post-fork unhandled := make(chan discover.ReadPacket, 100) // size taken from https://github.com/ethereum/go-ethereum/blob/v1.13.5/p2p/server.go#L551 sharedConn := &SharedUDPConn{udpConn, unhandled} diff --git a/network/discovery/service_test.go b/network/discovery/service_test.go index 8baa399c13..54c503ac32 100644 --- a/network/discovery/service_test.go +++ b/network/discovery/service_test.go @@ -295,10 +295,10 @@ func TestDiscV5ServiceListenerType(t *testing.T) { // Check listener type _, ok := dvs.dv5Listener.(*forkingDV5Listener) - require.False(t, ok) + require.True(t, ok) _, ok = dvs.dv5Listener.(*discover.UDPv5) - require.True(t, ok) + require.False(t, ok) // Check bootnodes CheckBootnodes(t, dvs, netConfig) diff --git a/networkconfig/holesky.go b/networkconfig/holesky.go index 2d7d52a517..32eec0ec32 100644 --- a/networkconfig/holesky.go +++ b/networkconfig/holesky.go @@ -14,9 +14,9 @@ var Holesky = NetworkConfig{ GenesisDomainType: spectypes.DomainType{0x0, 0x0, 0x5, 0x1}, AlanDomainType: spectypes.DomainType{0x0, 0x0, 0x5, 0x2}, GenesisEpoch: 1, + AlanForkEpoch: 84600, // Oct-08-2024 12:00:00 PM UTC RegistrySyncOffset: new(big.Int).SetInt64(181612), RegistryContractAddr: "0x38A4794cCEd47d3baf7370CcC43B560D3a1beEFA", - AlanForkEpoch: 999999999, DiscoveryProtocolID: [6]byte{'s', 's', 'v', 'd', 'v', '5'}, Bootnodes: []string{ "enr:-Li4QFIQzamdvTxGJhvcXG_DFmCeyggSffDnllY5DiU47pd_K_1MRnSaJimWtfKJ-MD46jUX9TwgW5Jqe0t4pH41RYWGAYuFnlyth2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhCLdu_SJc2VjcDI1NmsxoQN4v-N9zFYwEqzGPBBX37q24QPFvAVUtokIo1fblIsmTIN0Y3CCE4uDdWRwgg-j", diff --git a/networkconfig/mainnet.go b/networkconfig/mainnet.go index 05fa2e92be..dd3042b3b7 100644 --- a/networkconfig/mainnet.go +++ b/networkconfig/mainnet.go @@ -14,6 +14,7 @@ var Mainnet = NetworkConfig{ GenesisDomainType: spectypes.GenesisMainnet, AlanDomainType: spectypes.AlanMainnet, GenesisEpoch: 218450, + AlanForkEpoch: 327375, // Nov-25-2024 12:00:23 PM UTC RegistrySyncOffset: new(big.Int).SetInt64(17507487), RegistryContractAddr: "0xDD9BC35aE942eF0cFa76930954a156B3fF30a4E1", Bootnodes: []string{ @@ -23,8 +24,8 @@ var Mainnet = NetworkConfig{ // 0NEinfra bootnode "enr:-Li4QDwrOuhEq5gBJBzFUPkezoYiy56SXZUwkSD7bxYo8RAhPnHyS0de0nOQrzl-cL47RY9Jg8k6Y_MgaUd9a5baYXeGAYnfZE76h2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhDaTS0mJc2VjcDI1NmsxoQMZzUHaN3eClRgF9NAqRNc-ilGpJDDJxdenfo4j-zWKKYN0Y3CCE4iDdWRwgg-g", - // Taiga - "enr:-Li4QOg_lfX8uhSKGfm0RDbARe9j1ujim6JiQ-h8E1QB175DWIaGAvzXLxa-OsLjrX24zYstxMQkDHkQTdm-Qq406wuGAYj8K5H3h2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhJbmcQeJc2VjcDI1NmsxoQIYVg92mRyqn519Og6VA6fdgqeFxKgQO87IX64zJcmqhoN0Y3CCE4mDdWRwgg-h", + // Eridian (eridianalpha.com) + "enr:-Li4QIzHQ2H82twhvsu8EePZ6CA1gl0_B0WWsKaT07245TkHUqXay-MXEgObJB7BxMFl8TylFxfnKNxQyGTXh-2nAlOGAYuraxUEh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBKCzUSJc2VjcDI1NmsxoQNKskkQ6-mBdBWr_ORJfyHai5uD0vL6Fuw90X0sPwmRsoN0Y3CCE4iDdWRwgg-g", // CryptoManufaktur "enr:-Li4QH7FwJcL8gJj0zHAITXqghMkG-A5bfWh2-3Q7vosy9D1BS8HZk-1ITuhK_rfzG3v_UtBDI6uNJZWpdcWfrQFCxKGAYnQ1DRCh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLb3g2Jc2VjcDI1NmsxoQKeSDcZWSaY9FC723E9yYX1Li18bswhLNlxBZdLfgOKp4N0Y3CCE4mDdWRwgg-h", diff --git a/scripts/generate_local_config.sh b/scripts/generate_local_config.sh index f40d9d4f89..d6725af366 100755 --- a/scripts/generate_local_config.sh +++ b/scripts/generate_local_config.sh @@ -19,7 +19,7 @@ function create_operators() { mkdir -p config for ((i=1;i<=OP_SIZE;i++)); do - docker run --rm -it 'bloxstaking/ssv-node:latest' /go/bin/ssvnode generate-operator-keys > tmp.log + docker run --rm -it 'ssvlabs/ssv-node:latest' /go/bin/ssvnode generate-operator-keys > tmp.log PUB="$(extract_pubkey "tmp.log")" val="$PUB" yq e '.publicKeys += [env(val)]' -i "./operators.yaml" PRIV="$(extract_privkey "tmp.log")" From 3de919b6a06efbd54a754ae656ade35b4e32233d Mon Sep 17 00:00:00 2001 From: Nikita Kryuchkov Date: Thu, 17 Oct 2024 16:40:23 +0400 Subject: [PATCH 35/35] exporter: add pubkey to FullData on API response (#1782) --- exporter/api/msg.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/exporter/api/msg.go b/exporter/api/msg.go index de35f7371e..dc3ef2234a 100644 --- a/exporter/api/msg.go +++ b/exporter/api/msg.go @@ -63,11 +63,15 @@ func ParticipantsAPIData(msgs ...qbftstorage.ParticipantsRangeEntry) (interface{ apiMsgs := make([]*ParticipantsAPI, 0) for _, msg := range msgs { + dutyExecutorID := msg.Identifier.GetDutyExecutorID() + blsPubKey := phase0.BLSPubKey{} + copy(blsPubKey[:], dutyExecutorID) + apiMsg := &ParticipantsAPI{ Signers: msg.Signers, Slot: msg.Slot, Identifier: msg.Identifier[:], - ValidatorPK: hex.EncodeToString(msg.Identifier.GetDutyExecutorID()), + ValidatorPK: hex.EncodeToString(dutyExecutorID), Role: msg.Identifier.GetRoleType().String(), Message: specqbft.Message{ MsgType: specqbft.CommitMsgType, @@ -75,7 +79,12 @@ func ParticipantsAPIData(msgs ...qbftstorage.ParticipantsRangeEntry) (interface{ Identifier: msg.Identifier[:], Round: specqbft.FirstRound, }, - FullData: &spectypes.ValidatorConsensusData{Duty: spectypes.ValidatorDuty{Slot: msg.Slot}}, + FullData: &spectypes.ValidatorConsensusData{ + Duty: spectypes.ValidatorDuty{ + PubKey: blsPubKey, + Slot: msg.Slot, + }, + }, } apiMsgs = append(apiMsgs, apiMsg)