diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 000000000..8e3029333 --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,84 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL" + +on: + push: + branches: [ "dev", "master" ] + pull_request: + branches: [ "dev", "master" ] + schedule: + - cron: '22 22 * * 4' + +jobs: + analyze: + name: Analyze + # Runner size impacts CodeQL analysis time. To learn more, please see: + # - https://gh.io/recommended-hardware-resources-for-running-codeql + # - https://gh.io/supported-runners-and-hardware-resources + # - https://gh.io/using-larger-runners + # Consider using larger runners for possible analysis time improvements. + runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }} + timeout-minutes: ${{ (matrix.language == 'swift' && 120) || 360 }} + permissions: + # required for all workflows + security-events: write + + # only required for workflows in private repositories + actions: read + contents: read + + strategy: + fail-fast: false + matrix: + language: [ 'go' ] + # CodeQL supports [ 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift' ] + # Use only 'java-kotlin' to analyze code written in Java, Kotlin or both + # Use only 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both + # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + + # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs + # queries: security-extended,security-and-quality + + + # Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v3 + + # ℹī¸ Command-line programs to run using the OS shell. + # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun + + # If the Autobuild fails above, remove it and uncomment the following three lines. + # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. + + # - run: | + # echo "Run, Build Application using script" + # ./location_of_script_within_repo/buildscript.sh + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 + with: + category: "/language:${{matrix.language}}" diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 8e4c21faf..bb56bbdb2 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -47,10 +47,20 @@ jobs: uses: n8maninger/action-golang-test@v1 with: args: "-race;-short" + - name: Test Stores - MySQL + if: matrix.os == 'ubuntu-latest' + uses: n8maninger/action-golang-test@v1 + env: + RENTERD_DB_URI: 127.0.0.1:3800 + RENTERD_DB_USER: root + RENTERD_DB_PASSWORD: test + with: + package: "./stores" + args: "-race;-short" - name: Test Integration uses: n8maninger/action-golang-test@v1 with: - package: "./internal/testing/..." + package: "./internal/test/e2e/..." args: "-failfast;-race;-tags=testing;-timeout=30m" - name: Test Integration - MySQL if: matrix.os == 'ubuntu-latest' @@ -60,7 +70,7 @@ jobs: RENTERD_DB_USER: root RENTERD_DB_PASSWORD: test with: - package: "./internal/testing/..." + package: "./internal/test/e2e/..." args: "-failfast;-race;-tags=testing;-timeout=30m" - name: Build run: go build -o bin/ ./cmd/renterd diff --git a/README.md b/README.md index bcaca045a..a4ccc8681 100644 --- a/README.md +++ b/README.md @@ -214,14 +214,14 @@ setting does not have a default value, it can be updated using the settings API: In most cases the default set should match the set from your autopilot configuration in order for migrations to work properly. The contract set can be -overriden by passing it as a query string parameter to the worker's upload and +overridden by passing it as a query string parameter to the worker's upload and migrate endpoints. - `PUT /api/worker/objects/foo?contractset=foo` ### Redundancy -The default redundancy on mainnet is 30-10, on testnet it is 6-2. The redunancy +The default redundancy on mainnet is 30-10, on testnet it is 6-2. The redundancy can be updated using the settings API: - `GET /api/bus/setting/redundancy` diff --git a/alerts/alerts.go b/alerts/alerts.go index 4d6463fa2..6b009360d 100644 --- a/alerts/alerts.go +++ b/alerts/alerts.go @@ -35,6 +35,7 @@ const ( type ( Alerter interface { + Alerts(_ context.Context, opts AlertsOpts) (resp AlertsResponse, err error) RegisterAlert(_ context.Context, a Alert) error DismissAlerts(_ context.Context, ids ...types.Hash256) error } @@ -63,8 +64,29 @@ type ( alerts map[types.Hash256]Alert webhookBroadcaster webhooks.Broadcaster } + + AlertsOpts struct { + Offset int + Limit int + Severity Severity + } + + AlertsResponse struct { + Alerts []Alert `json:"alerts"` + HasMore bool `json:"hasMore"` + Totals struct { + Info int `json:"info"` + Warning int `json:"warning"` + Error int `json:"error"` + Critical int `json:"critical"` + } `json:"totals"` + } ) +func (ar AlertsResponse) Total() int { + return ar.Totals.Info + ar.Totals.Warning + ar.Totals.Error + ar.Totals.Critical +} + // String implements the fmt.Stringer interface. func (s Severity) String() string { switch s { @@ -81,15 +103,8 @@ func (s Severity) String() string { } } -// MarshalJSON implements the json.Marshaler interface. -func (s Severity) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf(`%q`, s.String())), nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (s *Severity) UnmarshalJSON(b []byte) error { - status := strings.Trim(string(b), `"`) - switch status { +func (s *Severity) LoadString(str string) error { + switch str { case severityInfoStr: *s = SeverityInfo case severityWarningStr: @@ -99,11 +114,21 @@ func (s *Severity) UnmarshalJSON(b []byte) error { case severityCriticalStr: *s = SeverityCritical default: - return fmt.Errorf("unrecognized severity: %v", status) + return fmt.Errorf("unrecognized severity: %v", str) } return nil } +// MarshalJSON implements the json.Marshaler interface. +func (s Severity) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`%q`, s.String())), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (s *Severity) UnmarshalJSON(b []byte) error { + return s.LoadString(strings.Trim(string(b), `"`)) +} + // RegisterAlert implements the Alerter interface. func (m *Manager) RegisterAlert(ctx context.Context, alert Alert) error { if alert.ID == (types.Hash256{}) { @@ -158,19 +183,46 @@ func (m *Manager) DismissAlerts(ctx context.Context, ids ...types.Hash256) error }) } -// Active returns the host's active alerts. -func (m *Manager) Active() []Alert { +// Alerts returns the host's active alerts. +func (m *Manager) Alerts(_ context.Context, opts AlertsOpts) (AlertsResponse, error) { m.mu.Lock() defer m.mu.Unlock() + offset, limit := opts.Offset, opts.Limit + resp := AlertsResponse{} + + if offset >= len(m.alerts) { + return resp, nil + } else if limit == -1 { + limit = len(m.alerts) + } + alerts := make([]Alert, 0, len(m.alerts)) for _, a := range m.alerts { + if a.Severity == SeverityInfo { + resp.Totals.Info++ + } else if a.Severity == SeverityWarning { + resp.Totals.Warning++ + } else if a.Severity == SeverityError { + resp.Totals.Error++ + } else if a.Severity == SeverityCritical { + resp.Totals.Critical++ + } + if opts.Severity != 0 && a.Severity != opts.Severity { + continue // filter by severity + } alerts = append(alerts, a) } sort.Slice(alerts, func(i, j int) bool { return alerts[i].Timestamp.After(alerts[j].Timestamp) }) - return alerts + alerts = alerts[offset:] + if limit < len(alerts) { + alerts = alerts[:limit] + resp.HasMore = true + } + resp.Alerts = alerts + return resp, nil } func (m *Manager) RegisterWebhookBroadcaster(b webhooks.Broadcaster) { @@ -204,6 +256,11 @@ func WithOrigin(alerter Alerter, origin string) Alerter { } } +// Alerts implements the Alerter interface. +func (a *originAlerter) Alerts(ctx context.Context, opts AlertsOpts) (resp AlertsResponse, err error) { + return a.alerter.Alerts(ctx, opts) +} + // RegisterAlert implements the Alerter interface. func (a *originAlerter) RegisterAlert(ctx context.Context, alert Alert) error { if alert.Data == nil { diff --git a/api/autopilot.go b/api/autopilot.go index 6283f64f3..fdd6c4942 100644 --- a/api/autopilot.go +++ b/api/autopilot.go @@ -91,9 +91,37 @@ type ( StartTime TimeRFC3339 `json:"startTime"` BuildState } -) -type ( + ConfigEvaluationRequest struct { + AutopilotConfig AutopilotConfig `json:"autopilotConfig"` + GougingSettings GougingSettings `json:"gougingSettings"` + RedundancySettings RedundancySettings `json:"redundancySettings"` + } + + ConfigRecommendation struct { + GougingSettings GougingSettings `json:"gougingSettings"` + } + + // ConfigEvaluationResponse is the response type for /evaluate + ConfigEvaluationResponse struct { + Hosts uint64 `json:"hosts"` + Usable uint64 `json:"usable"` + Unusable struct { + Blocked uint64 `json:"blocked"` + Gouging struct { + Contract uint64 `json:"contract"` + Download uint64 `json:"download"` + Gouging uint64 `json:"gouging"` + Pruning uint64 `json:"pruning"` + Upload uint64 `json:"upload"` + } `json:"gouging"` + NotAcceptingContracts uint64 `json:"notAcceptingContracts"` + NotScanned uint64 `json:"notScanned"` + Unknown uint64 `json:"unknown"` + } + Recommendation *ConfigRecommendation `json:"recommendation,omitempty"` + } + // HostHandlerResponse is the response type for the /host/:hostkey endpoint. HostHandlerResponse struct { Host hostdb.Host `json:"host"` diff --git a/api/host.go b/api/host.go index 0ad52e8ef..aea80a9fe 100644 --- a/api/host.go +++ b/api/host.go @@ -112,6 +112,6 @@ func (opts HostsForScanningOptions) Apply(values url.Values) { values.Set("limit", fmt.Sprint(opts.Limit)) } if !opts.MaxLastScan.IsZero() { - values.Set("maxLastScan", fmt.Sprint(TimeRFC3339(opts.MaxLastScan))) + values.Set("lastScan", fmt.Sprint(TimeRFC3339(opts.MaxLastScan))) } } diff --git a/api/multipart.go b/api/multipart.go index 955b78849..a191b2b13 100644 --- a/api/multipart.go +++ b/api/multipart.go @@ -46,9 +46,10 @@ type ( } CreateMultipartOptions struct { - Key object.EncryptionKey - MimeType string - Metadata ObjectUserMetadata + GenerateKey bool + Key *object.EncryptionKey + MimeType string + Metadata ObjectUserMetadata } ) @@ -81,11 +82,15 @@ type ( } MultipartCreateRequest struct { - Bucket string `json:"bucket"` - Path string `json:"path"` - Key object.EncryptionKey `json:"key"` - MimeType string `json:"mimeType"` - Metadata ObjectUserMetadata `json:"metadata"` + Bucket string `json:"bucket"` + Path string `json:"path"` + Key *object.EncryptionKey `json:"key"` + MimeType string `json:"mimeType"` + Metadata ObjectUserMetadata `json:"metadata"` + + // TODO: The next major version change should invert this to create a + // key by default + GenerateKey bool `json:"generateKey"` } MultipartCreateResponse struct { diff --git a/api/object.go b/api/object.go index 73bb9c45c..4b1993341 100644 --- a/api/object.go +++ b/api/object.go @@ -54,7 +54,7 @@ type ( Object struct { Metadata ObjectUserMetadata `json:"metadata,omitempty"` ObjectMetadata - object.Object + *object.Object } // ObjectMetadata contains various metadata about an object. @@ -83,9 +83,14 @@ type ( Object *Object `json:"object,omitempty"` } - // GetObjectResponse is the response type for the /worker/object endpoint. + // GetObjectResponse is the response type for the GET /worker/object endpoint. GetObjectResponse struct { - Content io.ReadCloser `json:"content"` + Content io.ReadCloser `json:"content"` + HeadObjectResponse + } + + // HeadObjectResponse is the response type for the HEAD /worker/object endpoint. + HeadObjectResponse struct { ContentType string `json:"contentType"` LastModified string `json:"lastModified"` Range *DownloadRange `json:"range,omitempty"` @@ -119,6 +124,10 @@ type ( Mode string `json:"mode"` } + ObjectsStatsOpts struct { + Bucket string + } + // ObjectsStatsResponse is the response type for the /bus/stats/objects endpoint. ObjectsStatsResponse struct { NumObjects uint64 `json:"numObjects"` // number of objects @@ -202,19 +211,24 @@ type ( Batch bool } + HeadObjectOptions struct { + Range DownloadRange + } + DownloadObjectOptions struct { GetObjectOptions Range DownloadRange } GetObjectOptions struct { - Prefix string - Offset int - Limit int - IgnoreDelim bool - Marker string - SortBy string - SortDir string + Prefix string + Offset int + Limit int + IgnoreDelim bool + Marker string + OnlyMetadata bool + SortBy string + SortDir string } ListObjectOptions struct { @@ -231,20 +245,18 @@ type ( // UploadObjectOptions is the options type for the worker client. UploadObjectOptions struct { - Offset int - MinShards int - TotalShards int - ContractSet string - DisablePreshardingEncryption bool - ContentLength int64 - MimeType string - Metadata ObjectUserMetadata + Offset int + MinShards int + TotalShards int + ContractSet string + ContentLength int64 + MimeType string + Metadata ObjectUserMetadata } UploadMultipartUploadPartOptions struct { - DisablePreshardingEncryption bool - EncryptionOffset int - ContentLength int64 + EncryptionOffset *int + ContentLength int64 } ) @@ -264,9 +276,6 @@ func (opts UploadObjectOptions) ApplyValues(values url.Values) { if opts.MimeType != "" { values.Set("mimetype", opts.MimeType) } - if opts.DisablePreshardingEncryption { - values.Set("disablepreshardingencryption", "true") - } } func (opts UploadObjectOptions) ApplyHeaders(h http.Header) { @@ -276,11 +285,8 @@ func (opts UploadObjectOptions) ApplyHeaders(h http.Header) { } func (opts UploadMultipartUploadPartOptions) Apply(values url.Values) { - if opts.DisablePreshardingEncryption { - values.Set("disablepreshardingencryption", "true") - } - if !opts.DisablePreshardingEncryption || opts.EncryptionOffset != 0 { - values.Set("offset", fmt.Sprint(opts.EncryptionOffset)) + if opts.EncryptionOffset != nil { + values.Set("offset", fmt.Sprint(*opts.EncryptionOffset)) } } @@ -304,6 +310,16 @@ func (opts DeleteObjectOptions) Apply(values url.Values) { } } +func (opts HeadObjectOptions) ApplyHeaders(h http.Header) { + if opts.Range != (DownloadRange{}) { + if opts.Range.Length == -1 { + h.Set("Range", fmt.Sprintf("bytes=%v-", opts.Range.Offset)) + } else { + h.Set("Range", fmt.Sprintf("bytes=%v-%v", opts.Range.Offset, opts.Range.Offset+opts.Range.Length-1)) + } + } +} + func (opts GetObjectOptions) Apply(values url.Values) { if opts.Prefix != "" { values.Set("prefix", opts.Prefix) @@ -320,6 +336,9 @@ func (opts GetObjectOptions) Apply(values url.Values) { if opts.Marker != "" { values.Set("marker", opts.Marker) } + if opts.OnlyMetadata { + values.Set("onlymetadata", "true") + } if opts.SortBy != "" { values.Set("sortBy", opts.SortBy) } diff --git a/api/setting.go b/api/setting.go index d11089010..47785c9aa 100644 --- a/api/setting.go +++ b/api/setting.go @@ -112,6 +112,11 @@ func (gs GougingSettings) Validate() error { if gs.MinPriceTableValidity < 10*time.Second { return errors.New("MinPriceTableValidity must be at least 10 seconds") } + _, overflow := gs.MaxDownloadPrice.Mul64WithOverflow(gs.MigrationSurchargeMultiplier) + if overflow { + maxMultiplier := types.MaxCurrency.Div(gs.MaxDownloadPrice).Big().Uint64() + return fmt.Errorf("MigrationSurchargeMultiplier must be less than %v, otherwise applying it to MaxDownloadPrice overflows the currency type", maxMultiplier) + } return nil } @@ -121,7 +126,12 @@ func (rs RedundancySettings) Redundancy() float64 { return float64(rs.TotalShards) / float64(rs.MinShards) } -// SlabSizeNoRedundancy returns the size of a slab without added redundancy. +// SlabSize returns the size of a slab. +func (rs RedundancySettings) SlabSize() uint64 { + return uint64(rs.TotalShards) * rhpv2.SectorSize +} + +// SlabSizeNoRedundancy returns the size of a slab without redundancy. func (rs RedundancySettings) SlabSizeNoRedundancy() uint64 { return uint64(rs.MinShards) * rhpv2.SectorSize } diff --git a/api/worker.go b/api/worker.go index 7ee2800f4..39f075718 100644 --- a/api/worker.go +++ b/api/worker.go @@ -19,6 +19,10 @@ var ( // ErrContractSetNotSpecified is returned by the worker API by endpoints that // need a contract set to be able to upload data. ErrContractSetNotSpecified = errors.New("contract set is not specified") + + // ErrHostOnPrivateNetwork is returned by the worker API when a host can't + // be scanned since it is on a private network. + ErrHostOnPrivateNetwork = errors.New("host is on a private network") ) type ( diff --git a/autopilot/alerts.go b/autopilot/alerts.go index 7b42991e1..f4762c4d4 100644 --- a/autopilot/alerts.go +++ b/autopilot/alerts.go @@ -14,12 +14,13 @@ import ( ) var ( - alertAccountRefillID = frand.Entropy256() // constant until restarted - alertLostSectorsID = frand.Entropy256() // constant until restarted - alertLowBalanceID = frand.Entropy256() // constant until restarted - alertMigrationID = frand.Entropy256() // constant until restarted - alertPruningID = frand.Entropy256() // constant until restarted - alertRenewalFailedID = frand.Entropy256() // constant until restarted + alertAccountRefillID = randomAlertID() // constant until restarted + alertChurnID = randomAlertID() // constant until restarted + alertLostSectorsID = randomAlertID() // constant until restarted + alertLowBalanceID = randomAlertID() // constant until restarted + alertMigrationID = randomAlertID() // constant until restarted + alertPruningID = randomAlertID() // constant until restarted + alertRenewalFailedID = randomAlertID() // constant until restarted ) func alertIDForAccount(alertID [32]byte, id rhpv3.Account) types.Hash256 { @@ -48,12 +49,26 @@ func (ap *Autopilot) RegisterAlert(ctx context.Context, a alerts.Alert) { } } -func (ap *Autopilot) DismissAlert(ctx context.Context, id types.Hash256) { - if err := ap.alerts.DismissAlerts(ctx, id); err != nil { +func (ap *Autopilot) DismissAlert(ctx context.Context, ids ...types.Hash256) { + if err := ap.alerts.DismissAlerts(ctx, ids...); err != nil { ap.logger.Errorf("failed to dismiss alert: %v", err) } } +func (ap *Autopilot) HasAlert(ctx context.Context, id types.Hash256) bool { + ar, err := ap.alerts.Alerts(ctx, alerts.AlertsOpts{Offset: 0, Limit: -1}) + if err != nil { + ap.logger.Errorf("failed to fetch alerts: %v", err) + return false + } + for _, alert := range ar.Alerts { + if alert.ID == id { + return true + } + } + return false +} + func newAccountLowBalanceAlert(address types.Address, balance, allowance types.Currency, bh, renewWindow, endHeight uint64) alerts.Alert { severity := alerts.SeverityInfo if bh+renewWindow/2 >= endHeight { @@ -137,27 +152,6 @@ func newContractPruningFailedAlert(hk types.PublicKey, version string, fcid type } } -func newContractSetChangeAlert(name string, added, removed int, removedReasons map[string]string) alerts.Alert { - var hint string - if removed > 0 { - hint = "A high churn rate can lead to a lot of unnecessary migrations, it might be necessary to tweak your configuration depending on the reason hosts are being discarded from the set." - } - - return alerts.Alert{ - ID: randomAlertID(), - Severity: alerts.SeverityInfo, - Message: "Contract set changed", - Data: map[string]any{ - "name": name, - "added": added, - "removed": removed, - "removals": removedReasons, - "hint": hint, - }, - Timestamp: time.Now(), - } -} - func newLostSectorsAlert(hk types.PublicKey, lostSectors uint64) alerts.Alert { return alerts.Alert{ ID: alertIDForHost(alertLostSectorsID, hk), diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index e5ddd8411..7367003e0 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -21,6 +21,7 @@ import ( "go.sia.tech/renterd/object" "go.sia.tech/renterd/wallet" "go.sia.tech/renterd/webhooks" + "go.sia.tech/renterd/worker" "go.uber.org/zap" ) @@ -166,6 +167,7 @@ func (ap *Autopilot) Handler() http.Handler { return jape.Mux(map[string]jape.Handler{ "GET /config": ap.configHandlerGET, "PUT /config": ap.configHandlerPUT, + "POST /config": ap.configHandlerPOST, "POST /hosts": ap.hostsHandlerPOST, "GET /host/:hostKey": ap.hostHandlerGET, "GET /state": ap.stateHandlerGET, @@ -173,6 +175,35 @@ func (ap *Autopilot) Handler() http.Handler { }) } +func (ap *Autopilot) configHandlerPOST(jc jape.Context) { + ctx := jc.Request.Context() + + // decode request + var req api.ConfigEvaluationRequest + if jc.Decode(&req) != nil { + return + } + + // fetch necessary information + cfg := req.AutopilotConfig + gs := req.GougingSettings + rs := req.RedundancySettings + cs, err := ap.bus.ConsensusState(ctx) + if jc.Check("failed to get consensus state", err) != nil { + return + } + state := ap.State() + + // fetch hosts + hosts, err := ap.bus.Hosts(ctx, api.GetHostsOptions{}) + if jc.Check("failed to get hosts", err) != nil { + return + } + + // evaluate the config + jc.Encode(evaluateConfig(cfg, cs, state.fee, state.period, rs, gs, hosts)) +} + func (ap *Autopilot) Run() error { ap.startStopMu.Lock() if ap.isRunning() { @@ -195,13 +226,15 @@ func (ap *Autopilot) Run() error { // schedule a trigger when the wallet receives its first deposit if err := ap.tryScheduleTriggerWhenFunded(); err != nil { - ap.logger.Error(err) + if !errors.Is(err, context.Canceled) { + ap.logger.Error(err) + } return nil } var forceScan bool var launchAccountRefillsOnce sync.Once - for { + for !ap.isStopped() { ap.logger.Info("autopilot iteration starting") tickerFired := make(chan struct{}) ap.workers.withWorker(func(w Worker) { @@ -220,7 +253,7 @@ func (ap *Autopilot) Run() error { close(tickerFired) return } - ap.logger.Error("autopilot stopped before consensus was synced") + ap.logger.Info("autopilot stopped before consensus was synced") return } else if blocked { if scanning, _ := ap.s.Status(); !scanning { @@ -234,7 +267,7 @@ func (ap *Autopilot) Run() error { close(tickerFired) return } - ap.logger.Error("autopilot stopped before it was able to confirm it was configured in the bus") + ap.logger.Info("autopilot stopped before it was able to confirm it was configured in the bus") return } @@ -308,6 +341,7 @@ func (ap *Autopilot) Run() error { case <-tickerFired: } } + return nil } // Shutdown shuts down the autopilot. @@ -463,11 +497,12 @@ func (ap *Autopilot) blockUntilSynced(interrupt <-chan time.Time) (synced, block } func (ap *Autopilot) tryScheduleTriggerWhenFunded() error { - ctx, cancel := context.WithTimeout(ap.shutdownCtx, 30*time.Second) - wallet, err := ap.bus.Wallet(ctx) - cancel() + // apply sane timeout + ctx, cancel := context.WithTimeout(ap.shutdownCtx, time.Minute) + defer cancel() // no need to schedule a trigger if the wallet is already funded + wallet, err := ap.bus.Wallet(ctx) if err != nil { return err } else if !wallet.Confirmed.Add(wallet.Unconfirmed).IsZero() { @@ -698,3 +733,172 @@ func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { } jc.Encode(hosts) } + +func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.Host) (usables uint64) { + gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) + for _, host := range hosts { + usable, _ := isUsableHost(cfg, rs, gc, host, smallestValidScore, 0) + if usable { + usables++ + } + } + return +} + +// evaluateConfig evaluates the given configuration and if the gouging settings +// are too strict for the number of contracts required by 'cfg', it will provide +// a recommendation on how to loosen it. +func evaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.Host) (resp api.ConfigEvaluationResponse) { + gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) + + resp.Hosts = uint64(len(hosts)) + for _, host := range hosts { + usable, usableBreakdown := isUsableHost(cfg, rs, gc, host, 0, 0) + if usable { + resp.Usable++ + continue + } + if usableBreakdown.blocked > 0 { + resp.Unusable.Blocked++ + } + if usableBreakdown.notacceptingcontracts > 0 { + resp.Unusable.NotAcceptingContracts++ + } + if usableBreakdown.notcompletingscan > 0 { + resp.Unusable.NotScanned++ + } + if usableBreakdown.unknown > 0 { + resp.Unusable.Unknown++ + } + if usableBreakdown.gougingBreakdown.ContractErr != "" { + resp.Unusable.Gouging.Contract++ + } + if usableBreakdown.gougingBreakdown.DownloadErr != "" { + resp.Unusable.Gouging.Download++ + } + if usableBreakdown.gougingBreakdown.GougingErr != "" { + resp.Unusable.Gouging.Gouging++ + } + if usableBreakdown.gougingBreakdown.PruneErr != "" { + resp.Unusable.Gouging.Pruning++ + } + if usableBreakdown.gougingBreakdown.UploadErr != "" { + resp.Unusable.Gouging.Upload++ + } + } + + if resp.Usable >= cfg.Contracts.Amount { + return // no recommendation needed + } + + // optimise gouging settings + maxGS := func() api.GougingSettings { + return api.GougingSettings{ + // these are the fields we optimise one-by-one + MaxRPCPrice: types.MaxCurrency, + MaxContractPrice: types.MaxCurrency, + MaxDownloadPrice: types.MaxCurrency, + MaxUploadPrice: types.MaxCurrency, + MaxStoragePrice: types.MaxCurrency, + + // these are not optimised, so we keep the same values as the user + // provided + MinMaxCollateral: gs.MinMaxCollateral, + HostBlockHeightLeeway: gs.HostBlockHeightLeeway, + MinPriceTableValidity: gs.MinPriceTableValidity, + MinAccountExpiry: gs.MinAccountExpiry, + MinMaxEphemeralAccountBalance: gs.MinMaxEphemeralAccountBalance, + MigrationSurchargeMultiplier: gs.MigrationSurchargeMultiplier, + } + } + + // use the input gouging settings as the starting point and try to optimise + // each field independent of the other fields we want to optimise + optimisedGS := gs + success := false + + // MaxRPCPrice + tmpGS := maxGS() + tmpGS.MaxRPCPrice = gs.MaxRPCPrice + if optimiseGougingSetting(&tmpGS, &tmpGS.MaxRPCPrice, cfg, cs, fee, currentPeriod, rs, hosts) { + optimisedGS.MaxRPCPrice = tmpGS.MaxRPCPrice + success = true + } + // MaxContractPrice + tmpGS = maxGS() + tmpGS.MaxContractPrice = gs.MaxContractPrice + if optimiseGougingSetting(&tmpGS, &tmpGS.MaxContractPrice, cfg, cs, fee, currentPeriod, rs, hosts) { + optimisedGS.MaxContractPrice = tmpGS.MaxContractPrice + success = true + } + // MaxDownloadPrice + tmpGS = maxGS() + tmpGS.MaxDownloadPrice = gs.MaxDownloadPrice + if optimiseGougingSetting(&tmpGS, &tmpGS.MaxDownloadPrice, cfg, cs, fee, currentPeriod, rs, hosts) { + optimisedGS.MaxDownloadPrice = tmpGS.MaxDownloadPrice + success = true + } + // MaxUploadPrice + tmpGS = maxGS() + tmpGS.MaxUploadPrice = gs.MaxUploadPrice + if optimiseGougingSetting(&tmpGS, &tmpGS.MaxUploadPrice, cfg, cs, fee, currentPeriod, rs, hosts) { + optimisedGS.MaxUploadPrice = tmpGS.MaxUploadPrice + success = true + } + // MaxStoragePrice + tmpGS = maxGS() + tmpGS.MaxStoragePrice = gs.MaxStoragePrice + if optimiseGougingSetting(&tmpGS, &tmpGS.MaxStoragePrice, cfg, cs, fee, currentPeriod, rs, hosts) { + optimisedGS.MaxStoragePrice = tmpGS.MaxStoragePrice + success = true + } + // If one of the optimisations was successful, we return the optimised + // gouging settings + if success { + resp.Recommendation = &api.ConfigRecommendation{ + GougingSettings: optimisedGS, + } + } + return +} + +// optimiseGougingSetting tries to optimise one field of the gouging settings to +// try and hit the target number of contracts. +func optimiseGougingSetting(gs *api.GougingSettings, field *types.Currency, cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, hosts []hostdb.Host) bool { + if cfg.Contracts.Amount == 0 { + return true // nothing to do + } + stepSize := []uint64{200, 150, 125, 110, 105} + maxSteps := 12 + + stepIdx := 0 + nSteps := 0 + prevVal := *field // to keep accurate value + for { + nUsable := countUsableHosts(cfg, cs, fee, currentPeriod, rs, *gs, hosts) + targetHit := nUsable >= cfg.Contracts.Amount + + if targetHit && nSteps == 0 { + return true // target already hit without optimising + } else if targetHit && stepIdx == len(stepSize)-1 { + return true // target hit after optimising + } else if targetHit { + // move one step back and decrease step size + stepIdx++ + nSteps-- + *field = prevVal + } else if nSteps >= maxSteps { + return false // ran out of steps + } + + // apply next step + prevVal = *field + newValue, overflow := prevVal.Mul64WithOverflow(stepSize[stepIdx]) + if overflow { + return false + } + newValue = newValue.Div64(100) + *field = newValue + nSteps++ + } +} diff --git a/autopilot/autopilot_test.go b/autopilot/autopilot_test.go new file mode 100644 index 000000000..f818c312b --- /dev/null +++ b/autopilot/autopilot_test.go @@ -0,0 +1,115 @@ +package autopilot + +import ( + "math" + "testing" + "time" + + rhpv2 "go.sia.tech/core/rhp/v2" + rhpv3 "go.sia.tech/core/rhp/v3" + "go.sia.tech/core/types" + "go.sia.tech/renterd/api" + "go.sia.tech/renterd/hostdb" +) + +func TestOptimiseGougingSetting(t *testing.T) { + // create 10 hosts that should all be usable + var hosts []hostdb.Host + for i := 0; i < 10; i++ { + hosts = append(hosts, hostdb.Host{ + KnownSince: time.Unix(0, 0), + PriceTable: hostdb.HostPriceTable{ + HostPriceTable: rhpv3.HostPriceTable{ + CollateralCost: types.Siacoins(1), + MaxCollateral: types.Siacoins(1000), + }, + }, + Settings: rhpv2.HostSettings{ + AcceptingContracts: true, + Collateral: types.Siacoins(1), + MaxCollateral: types.Siacoins(1000), + Version: "1.6.0", + }, + Interactions: hostdb.Interactions{ + Uptime: time.Hour * 1000, + LastScan: time.Now(), + LastScanSuccess: true, + SecondToLastScanSuccess: true, + TotalScans: 100, + }, + LastAnnouncement: time.Unix(0, 0), + Scanned: true, + }) + } + + // prepare settings that result in all hosts being usable + cfg := api.AutopilotConfig{ + Contracts: api.ContractsConfig{ + Allowance: types.Siacoins(100000), + Amount: 10, + }, + Hosts: api.HostsConfig{}, + } + cs := api.ConsensusState{ + BlockHeight: 100, + LastBlockTime: api.TimeNow(), + Synced: true, + } + fee := types.ZeroCurrency + rs := api.RedundancySettings{MinShards: 10, TotalShards: 30} + gs := api.GougingSettings{ + MaxRPCPrice: types.Siacoins(1), + MaxContractPrice: types.Siacoins(1), + MaxDownloadPrice: types.Siacoins(1), + MaxUploadPrice: types.Siacoins(1), + MaxStoragePrice: types.Siacoins(1), + HostBlockHeightLeeway: math.MaxInt32, + } + + // confirm all hosts are usable + assertUsable := func(n int) { + t.Helper() + nUsable := countUsableHosts(cfg, cs, fee, 0, rs, gs, hosts) + if nUsable != uint64(n) { + t.Fatalf("expected %v usable hosts, got %v", len(hosts), nUsable) + } + } + assertUsable(len(hosts)) + + // Case1: test optimising a field which gets us back to a full set of hosts + for i := range hosts { + hosts[i].Settings.StoragePrice = types.Siacoins(uint32(i + 1)) + } + assertUsable(1) + if !optimiseGougingSetting(&gs, &gs.MaxStoragePrice, cfg, cs, fee, 0, rs, hosts) { + t.Fatal("optimising failed") + } + assertUsable(len(hosts)) + if gs.MaxStoragePrice.ExactString() != "10164000000000000000000000" { // 10.164 SC + t.Fatal("unexpected storage price", gs.MaxStoragePrice.ExactString()) + } + + // Case2: test optimising a field where we can't get back to a full set of + // hosts + hosts[0].Settings.StoragePrice = types.Siacoins(100000) + assertUsable(9) + if optimiseGougingSetting(&gs, &gs.MaxStoragePrice, cfg, cs, fee, 0, rs, hosts) { + t.Fatal("optimising succeeded") + } + if gs.MaxStoragePrice.ExactString() != "41631744000000000000000000000" { // ~41.63 KS + t.Fatal("unexpected storage price", gs.MaxStoragePrice.ExactString()) + } + + // Case3: force overflow + for i := range hosts { + hosts[i].Settings.StoragePrice = types.MaxCurrency + } + gs.MaxStoragePrice = types.MaxCurrency.Sub(types.Siacoins(1)) + assertUsable(0) + if optimiseGougingSetting(&gs, &gs.MaxStoragePrice, cfg, cs, fee, 0, rs, hosts) { + t.Fatal("optimising succeeded") + } + if gs.MaxStoragePrice.ExactString() != "340282366920937463463374607431768211455" { // ~340.3 TS + t.Fatal("unexpected storage price", gs.MaxStoragePrice.ExactString()) + } +} diff --git a/autopilot/churn.go b/autopilot/churn.go new file mode 100644 index 000000000..31a1073cf --- /dev/null +++ b/autopilot/churn.go @@ -0,0 +1,68 @@ +package autopilot + +import ( + "time" + + "go.sia.tech/core/types" + "go.sia.tech/renterd/alerts" +) + +type ( + accumulatedChurn struct { + additions map[types.FileContractID]contractSetAdditions + removals map[types.FileContractID]contractSetRemovals + } +) + +func newAccumulatedChurn() *accumulatedChurn { + return &accumulatedChurn{ + additions: make(map[types.FileContractID]contractSetAdditions), + removals: make(map[types.FileContractID]contractSetRemovals), + } +} + +func (c *accumulatedChurn) Alert(name string) alerts.Alert { + var hint string + if len(c.removals) > 0 { + hint = "A high churn rate can lead to a lot of unnecessary migrations, it might be necessary to tweak your configuration depending on the reason hosts are being discarded from the set." + } + + return alerts.Alert{ + ID: alertChurnID, + Severity: alerts.SeverityInfo, + Message: "Contract set changed", + Data: map[string]any{ + "name": name, + "setAdditions": c.additions, + "setRemovals": c.removals, + "hint": hint, + }, + Timestamp: time.Now(), + } +} + +func (c *accumulatedChurn) Apply(additions map[types.FileContractID]contractSetAdditions, removals map[types.FileContractID]contractSetRemovals) { + for fcid, a := range additions { + if _, exists := c.additions[fcid]; !exists { + c.additions[fcid] = a + } else { + additions := c.additions[fcid] + additions.Additions = append(additions.Additions, a.Additions...) + c.additions[fcid] = additions + } + } + for fcid, r := range removals { + if _, exists := c.removals[fcid]; !exists { + c.removals[fcid] = r + } else { + removals := c.removals[fcid] + removals.Removals = append(removals.Removals, r.Removals...) + c.removals[fcid] = removals + } + } +} + +func (c *accumulatedChurn) Reset() { + c.additions = make(map[types.FileContractID]contractSetAdditions) + c.removals = make(map[types.FileContractID]contractSetRemovals) +} diff --git a/autopilot/client.go b/autopilot/client.go index 35e3981aa..ba16754a5 100644 --- a/autopilot/client.go +++ b/autopilot/client.go @@ -64,3 +64,14 @@ func (c *Client) Trigger(forceScan bool) (_ bool, err error) { err = c.c.POST("/trigger", api.AutopilotTriggerRequest{ForceScan: forceScan}, &resp) return resp.Triggered, err } + +// EvalutateConfig evaluates an autopilot config using the given gouging and +// redundancy settings. +func (c *Client) EvaluateConfig(ctx context.Context, cfg api.AutopilotConfig, gs api.GougingSettings, rs api.RedundancySettings) (resp api.ConfigEvaluationResponse, err error) { + err = c.c.WithContext(ctx).POST("/config", api.ConfigEvaluationRequest{ + AutopilotConfig: cfg, + GougingSettings: gs, + RedundancySettings: rs, + }, &resp) + return +} diff --git a/autopilot/contractor.go b/autopilot/contractor.go index adad5d1b7..4e5e8c842 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -85,6 +85,7 @@ const ( type ( contractor struct { ap *Autopilot + churn *accumulatedChurn resolver *ipResolver logger *zap.SugaredLogger @@ -122,9 +123,30 @@ type ( recoverable bool } + contractSetAdditions struct { + HostKey types.PublicKey `json:"hostKey"` + Additions []contractSetAddition `json:"additions"` + } + + contractSetAddition struct { + Size uint64 `json:"size"` + Time api.TimeRFC3339 `json:"time"` + } + + contractSetRemovals struct { + HostKey types.PublicKey `json:"hostKey"` + Removals []contractSetRemoval `json:"removals"` + } + + contractSetRemoval struct { + Size uint64 `json:"size"` + Reason string `json:"reasons"` + Time api.TimeRFC3339 `json:"time"` + } + renewal struct { - from types.FileContractID - to types.FileContractID + from api.ContractMetadata + to api.ContractMetadata ci contractInfo } ) @@ -132,6 +154,7 @@ type ( func newContractor(ap *Autopilot, revisionSubmissionBuffer uint64, revisionBroadcastInterval time.Duration) *contractor { return &contractor{ ap: ap, + churn: newAccumulatedChurn(), logger: ap.logger.Named("contractor"), revisionBroadcastInterval: revisionBroadcastInterval, @@ -231,14 +254,20 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( } // check if any used hosts have lost data to warn the user + var toDismiss []types.Hash256 for _, h := range hosts { if h.Interactions.LostSectors > 0 { c.ap.RegisterAlert(ctx, newLostSectorsAlert(h.PublicKey, h.Interactions.LostSectors)) + } else { + toDismiss = append(toDismiss, alertIDForHost(alertLostSectorsID, h.PublicKey)) } } + if len(toDismiss) > 0 { + c.ap.DismissAlert(ctx, toDismiss...) + } // fetch candidate hosts - candidates, unusableHosts, err := c.candidateHosts(ctx, hosts, usedHosts, hostData, math.SmallestNonzeroFloat64) // avoid 0 score hosts + candidates, unusableHosts, err := c.candidateHosts(ctx, hosts, usedHosts, hostData, smallestValidScore) // avoid 0 score hosts if err != nil { return false, err } @@ -325,17 +354,15 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( // set afterwards var renewed []renewal if limit > 0 { - var toKeep []contractInfo + var toKeep []api.ContractMetadata renewed, toKeep = c.runContractRenewals(ctx, w, toRenew, &remaining, limit) for _, ri := range renewed { if ri.ci.usable || ri.ci.recoverable { updatedSet = append(updatedSet, ri.to) } - contractData[ri.to] = contractData[ri.from] - } - for _, ci := range toKeep { - updatedSet = append(updatedSet, ci.contract.ID) + contractData[ri.to.ID] = contractData[ri.from.ID] } + updatedSet = append(updatedSet, toKeep...) } // run contract refreshes @@ -347,7 +374,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( if ri.ci.usable || ri.ci.recoverable { updatedSet = append(updatedSet, ri.to) } - contractData[ri.to] = contractData[ri.from] + contractData[ri.to.ID] = contractData[ri.from.ID] } } @@ -360,7 +387,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( } // check if we need to form contracts and add them to the contract set - var formed []types.FileContractID + var formed []api.ContractMetadata if uint64(len(updatedSet)) < threshold { // no need to try and form contracts if wallet is completely empty wallet, err := c.ap.bus.Wallet(ctx) @@ -376,34 +403,40 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( } else { for _, fc := range formed { updatedSet = append(updatedSet, fc) - contractData[fc] = 0 + contractData[fc.ID] = 0 } } } } // cap the amount of contracts we want to keep to the configured amount - for _, fcid := range updatedSet { - if _, exists := contractData[fcid]; !exists { - c.logger.Errorf("contract %v not found in contractData", fcid) + for _, contract := range updatedSet { + if _, exists := contractData[contract.ID]; !exists { + c.logger.Errorf("contract %v not found in contractData", contract.ID) } } if len(updatedSet) > int(state.cfg.Contracts.Amount) { // sort by contract size sort.Slice(updatedSet, func(i, j int) bool { - return contractData[updatedSet[i]] > contractData[updatedSet[j]] + return contractData[updatedSet[i].ID] > contractData[updatedSet[j].ID] }) - for _, c := range updatedSet[state.cfg.Contracts.Amount:] { - toStopUsing[c] = "truncated" + for _, contract := range updatedSet[state.cfg.Contracts.Amount:] { + toStopUsing[contract.ID] = "truncated" } updatedSet = updatedSet[:state.cfg.Contracts.Amount] } + // convert to set of file contract ids + var newSet []types.FileContractID + for _, contract := range updatedSet { + newSet = append(newSet, contract.ID) + } + // update contract set if c.ap.isStopped() { return false, errors.New("autopilot stopped before maintenance could be completed") } - err = c.ap.bus.SetContractSet(ctx, state.cfg.Contracts.Set, updatedSet) + err = c.ap.bus.SetContractSet(ctx, state.cfg.Contracts.Set, newSet) if err != nil { return false, err } @@ -412,54 +445,77 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( return c.computeContractSetChanged(ctx, state.cfg.Contracts.Set, currentSet, updatedSet, formed, refreshed, renewed, toStopUsing, contractData), nil } -func (c *contractor) computeContractSetChanged(ctx context.Context, name string, oldSet []api.ContractMetadata, newSet, formed []types.FileContractID, refreshed, renewed []renewal, toStopUsing map[types.FileContractID]string, contractData map[types.FileContractID]uint64) bool { - // build some maps for easier lookups - previous := make(map[types.FileContractID]struct{}) +func (c *contractor) computeContractSetChanged(ctx context.Context, name string, oldSet, newSet []api.ContractMetadata, formed []api.ContractMetadata, refreshed, renewed []renewal, toStopUsing map[types.FileContractID]string, contractData map[types.FileContractID]uint64) bool { + // build set lookups + inOldSet := make(map[types.FileContractID]struct{}) for _, c := range oldSet { - previous[c.ID] = struct{}{} + inOldSet[c.ID] = struct{}{} } - updated := make(map[types.FileContractID]struct{}) + inNewSet := make(map[types.FileContractID]struct{}) for _, c := range newSet { - updated[c] = struct{}{} + inNewSet[c.ID] = struct{}{} } + + // build renewal lookups renewalsFromTo := make(map[types.FileContractID]types.FileContractID) renewalsToFrom := make(map[types.FileContractID]types.FileContractID) for _, c := range append(refreshed, renewed...) { - renewalsFromTo[c.from] = c.to - renewalsToFrom[c.to] = c.from + renewalsFromTo[c.from.ID] = c.to.ID + renewalsToFrom[c.to.ID] = c.from.ID } // log added and removed contracts - var added []types.FileContractID - var removed []types.FileContractID - removedReasons := make(map[string]string) + setAdditions := make(map[types.FileContractID]contractSetAdditions) + setRemovals := make(map[types.FileContractID]contractSetRemovals) + now := api.TimeNow() for _, contract := range oldSet { - _, exists := updated[contract.ID] - _, renewed := updated[renewalsFromTo[contract.ID]] + _, exists := inNewSet[contract.ID] + _, renewed := inNewSet[renewalsFromTo[contract.ID]] if !exists && !renewed { - removed = append(removed, contract.ID) reason, ok := toStopUsing[contract.ID] if !ok { reason = "unknown" } - removedReasons[contract.ID.String()] = reason + + if _, exists := setRemovals[contract.ID]; !exists { + setRemovals[contract.ID] = contractSetRemovals{ + HostKey: contract.HostKey, + } + } + removals := setRemovals[contract.ID] + removals.Removals = append(removals.Removals, contractSetRemoval{ + Size: contractData[contract.ID], + Reason: reason, + Time: now, + }) + setRemovals[contract.ID] = removals c.logger.Debugf("contract %v was removed from the contract set, size: %v, reason: %v", contract.ID, contractData[contract.ID], reason) } } - for _, fcid := range newSet { - _, existed := previous[fcid] - _, renewed := renewalsToFrom[fcid] + for _, contract := range newSet { + _, existed := inOldSet[contract.ID] + _, renewed := renewalsToFrom[contract.ID] if !existed && !renewed { - added = append(added, fcid) - c.logger.Debugf("contract %v was added to the contract set, size: %v", fcid, contractData[fcid]) + if _, exists := setAdditions[contract.ID]; !exists { + setAdditions[contract.ID] = contractSetAdditions{ + HostKey: contract.HostKey, + } + } + additions := setAdditions[contract.ID] + additions.Additions = append(additions.Additions, contractSetAddition{ + Size: contractData[contract.ID], + Time: now, + }) + setAdditions[contract.ID] = additions + c.logger.Debugf("contract %v was added to the contract set, size: %v", contract.ID, contractData[contract.ID]) } } // log renewed contracts that did not make it into the contract set for _, fcid := range renewed { - _, exists := updated[fcid.to] + _, exists := inNewSet[fcid.to.ID] if !exists { - c.logger.Debugf("contract %v was renewed but did not make it into the contract set, size: %v", fcid, contractData[fcid.to]) + c.logger.Debugf("contract %v was renewed but did not make it into the contract set, size: %v", fcid, contractData[fcid.to.ID]) } } @@ -470,9 +526,8 @@ func (c *contractor) computeContractSetChanged(ctx context.Context, name string, } // record churn metrics - now := api.TimeNow() var metrics []api.ContractSetChurnMetric - for _, fcid := range added { + for fcid := range setAdditions { metrics = append(metrics, api.ContractSetChurnMetric{ Name: c.ap.state.cfg.Contracts.Set, ContractID: fcid, @@ -480,12 +535,12 @@ func (c *contractor) computeContractSetChanged(ctx context.Context, name string, Timestamp: now, }) } - for _, fcid := range removed { + for fcid, removal := range setRemovals { metrics = append(metrics, api.ContractSetChurnMetric{ Name: c.ap.state.cfg.Contracts.Set, ContractID: fcid, Direction: api.ChurnDirRemoved, - Reason: removedReasons[fcid.String()], + Reason: removal.Removals[0].Reason, Timestamp: now, }) } @@ -502,12 +557,16 @@ func (c *contractor) computeContractSetChanged(ctx context.Context, name string, "renewed", len(renewed), "refreshed", len(refreshed), "contracts", len(newSet), - "added", len(added), - "removed", len(removed), + "added", len(setAdditions), + "removed", len(setRemovals), ) - hasChanged := len(added)+len(removed) > 0 + hasChanged := len(setAdditions)+len(setRemovals) > 0 if hasChanged { - c.ap.RegisterAlert(ctx, newContractSetChangeAlert(name, len(added), len(removed), removedReasons)) + if !c.ap.HasAlert(ctx, alertChurnID) { + c.churn.Reset() + } + c.churn.Apply(setAdditions, setRemovals) + c.ap.RegisterAlert(ctx, c.churn.Alert(name)) } return hasChanged } @@ -602,7 +661,7 @@ func (c *contractor) performWalletMaintenance(ctx context.Context) error { return nil } -func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts []api.Contract, inCurrentSet map[types.FileContractID]struct{}, minScore float64) (toKeep []types.FileContractID, toArchive, toStopUsing map[types.FileContractID]string, toRefresh, toRenew []contractInfo, _ error) { +func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts []api.Contract, inCurrentSet map[types.FileContractID]struct{}, minScore float64) (toKeep []api.ContractMetadata, toArchive, toStopUsing map[types.FileContractID]string, toRefresh, toRenew []contractInfo, _ error) { if c.ap.isStopped() { return } @@ -693,11 +752,14 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts continue } - // if the host doesn't have a valid pricetable, update it - var invalidPT bool - if err := refreshPriceTable(ctx, w, &host.Host); err != nil { - c.logger.Errorf("could not fetch price table for host %v: %v", host.PublicKey, err) - invalidPT = true + // if the host doesn't have a valid pricetable, update it if we were + // able to obtain a revision + invalidPT := contract.Revision == nil + if contract.Revision != nil { + if err := refreshPriceTable(ctx, w, &host.Host); err != nil { + c.logger.Errorf("could not fetch price table for host %v: %v", host.PublicKey, err) + invalidPT = true + } } // refresh the consensus state @@ -734,7 +796,7 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts } else if !state.cfg.Hosts.AllowRedundantIPs && ipFilter.IsRedundantIP(contract.HostIP, contract.HostKey) { toStopUsing[fcid] = fmt.Sprintf("%v; %v", errHostRedundantIP, errContractNoRevision) } else { - toKeep = append(toKeep, fcid) + toKeep = append(toKeep, contract.ContractMetadata) remainingKeepLeeway-- // we let it slide } continue // can't perform contract checks without revision @@ -777,18 +839,17 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts } else if refresh { toRefresh = append(toRefresh, ci) } else if usable { - toKeep = append(toKeep, ci.contract.ID) + toKeep = append(toKeep, ci.contract.ContractMetadata) } } return toKeep, toArchive, toStopUsing, toRefresh, toRenew, nil } -func (c *contractor) runContractFormations(ctx context.Context, w Worker, candidates scoredHosts, usedHosts map[types.PublicKey]struct{}, unusableHosts unusableHostResult, missing uint64, budget *types.Currency) ([]types.FileContractID, error) { +func (c *contractor) runContractFormations(ctx context.Context, w Worker, candidates scoredHosts, usedHosts map[types.PublicKey]struct{}, unusableHosts unusableHostResult, missing uint64, budget *types.Currency) (formed []api.ContractMetadata, _ error) { if c.ap.isStopped() { return nil, nil } - var formed []types.FileContractID // convenience variables state := c.ap.State() @@ -890,7 +951,7 @@ func (c *contractor) runContractFormations(ctx context.Context, w Worker, candid formedContract, proceed, err := c.formContract(ctx, w, host, minInitialContractFunds, maxInitialContractFunds, budget) if err == nil { // add contract to contract set - formed = append(formed, formedContract.ID) + formed = append(formed, formedContract) missing-- } if !proceed { @@ -970,7 +1031,7 @@ func (c *contractor) runRevisionBroadcast(ctx context.Context, w Worker, allCont } } -func (c *contractor) runContractRenewals(ctx context.Context, w Worker, toRenew []contractInfo, budget *types.Currency, limit int) (renewals []renewal, toKeep []contractInfo) { +func (c *contractor) runContractRenewals(ctx context.Context, w Worker, toRenew []contractInfo, budget *types.Currency, limit int) (renewals []renewal, toKeep []api.ContractMetadata) { c.logger.Debugw( "run contracts renewals", "torenew", len(toRenew), @@ -1004,11 +1065,11 @@ func (c *contractor) runContractRenewals(ctx context.Context, w Worker, toRenew if err != nil { c.ap.RegisterAlert(ctx, newContractRenewalFailedAlert(contract, !proceed, err)) if toRenew[i].usable { - toKeep = append(toKeep, toRenew[i]) + toKeep = append(toKeep, toRenew[i].contract.ContractMetadata) } } else { c.ap.DismissAlert(ctx, alertIDForContract(alertRenewalFailedID, contract.ID)) - renewals = append(renewals, renewal{from: contract.ID, to: renewed.ID, ci: toRenew[i]}) + renewals = append(renewals, renewal{from: contract, to: renewed, ci: toRenew[i]}) } // break if we don't want to proceed @@ -1021,7 +1082,7 @@ func (c *contractor) runContractRenewals(ctx context.Context, w Worker, toRenew // they're usable and we have 'limit' left for j := i; j < len(toRenew); j++ { if len(renewals)+len(toKeep) < limit && toRenew[j].usable { - toKeep = append(toKeep, toRenew[j]) + toKeep = append(toKeep, toRenew[j].contract.ContractMetadata) } } @@ -1051,7 +1112,7 @@ func (c *contractor) runContractRefreshes(ctx context.Context, w Worker, toRefre // refresh and add if it succeeds renewed, proceed, err := c.refreshContract(ctx, w, ci, budget) if err == nil { - refreshed = append(refreshed, renewal{from: ci.contract.ID, to: renewed.ID, ci: ci}) + refreshed = append(refreshed, renewal{from: ci.contract.ContractMetadata, to: renewed, ci: ci}) } // break if we don't want to proceed @@ -1191,7 +1252,7 @@ func (c *contractor) calculateMinScore(ctx context.Context, candidates []scoredH // return early if there's no hosts if len(candidates) == 0 { c.logger.Warn("min host score is set to the smallest non-zero float because there are no candidate hosts") - return math.SmallestNonzeroFloat64 + return smallestValidScore } // determine the number of random hosts we fetch per iteration when @@ -1225,7 +1286,7 @@ func (c *contractor) calculateMinScore(ctx context.Context, candidates []scoredH return candidates[i].score > candidates[j].score }) if len(candidates) < int(numContracts) { - return math.SmallestNonzeroFloat64 + return smallestValidScore } else if cutoff := candidates[numContracts-1].score; minScore > cutoff { minScore = cutoff } diff --git a/autopilot/hostfilter.go b/autopilot/hostfilter.go index 2ebc81f38..574862a97 100644 --- a/autopilot/hostfilter.go +++ b/autopilot/hostfilter.go @@ -204,7 +204,7 @@ func isUsableHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker. gougingBreakdown = gc.Check(&h.Settings, &h.PriceTable.HostPriceTable) if gougingBreakdown.Gouging() { errs = append(errs, fmt.Errorf("%w: %v", errHostPriceGouging, gougingBreakdown)) - } else { + } else if minScore > 0 { // perform scoring checks // // NOTE: only perform these scoring checks if we know the host is diff --git a/autopilot/hostscore.go b/autopilot/hostscore.go index f0f103c6c..b15857d19 100644 --- a/autopilot/hostscore.go +++ b/autopilot/hostscore.go @@ -13,6 +13,8 @@ import ( "go.sia.tech/siad/build" ) +const smallestValidScore = math.SmallestNonzeroFloat64 + func hostScore(cfg api.AutopilotConfig, h hostdb.Host, storedData uint64, expectedRedundancy float64) api.HostScoreBreakdown { // idealDataPerHost is the amount of data that we would have to put on each // host assuming that our storage requirements were spread evenly across diff --git a/autopilot/ipfilter.go b/autopilot/ipfilter.go index 1844955f6..6aa244047 100644 --- a/autopilot/ipfilter.go +++ b/autopilot/ipfilter.go @@ -23,7 +23,7 @@ const ( ipCacheEntryValidity = 24 * time.Hour // resolverLookupTimeout is the timeout we apply when resolving a host's IP address - resolverLookupTimeout = 5 * time.Second + resolverLookupTimeout = 10 * time.Second ) var ( diff --git a/build/network.go b/build/network.go index 4183a62bc..a0a452189 100644 --- a/build/network.go +++ b/build/network.go @@ -3,9 +3,9 @@ package build //go:generate go run gen.go import ( - "go.sia.tech/core/chain" "go.sia.tech/core/consensus" "go.sia.tech/core/types" + "go.sia.tech/coreutils/chain" ) // Network returns the Sia network consts and genesis block for the current build. diff --git a/bus/bus.go b/bus/bus.go index d11550595..045b8e82a 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -126,7 +126,7 @@ type ( ContractSizes(ctx context.Context) (map[types.FileContractID]api.ContractSize, error) ContractSize(ctx context.Context, id types.FileContractID) (api.ContractSize, error) - DeleteHostSector(ctx context.Context, hk types.PublicKey, root types.Hash256) error + DeleteHostSector(ctx context.Context, hk types.PublicKey, root types.Hash256) (int, error) Bucket(_ context.Context, bucketName string) (api.Bucket, error) CreateBucket(_ context.Context, bucketName string, policy api.BucketPolicy) error @@ -137,9 +137,10 @@ type ( CopyObject(ctx context.Context, srcBucket, dstBucket, srcPath, dstPath, mimeType string, metadata api.ObjectUserMetadata) (api.ObjectMetadata, error) ListObjects(ctx context.Context, bucketName, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) Object(ctx context.Context, bucketName, path string) (api.Object, error) + ObjectMetadata(ctx context.Context, bucketName, path string) (api.Object, error) ObjectEntries(ctx context.Context, bucketName, path, prefix, sortBy, sortDir, marker string, offset, limit int) ([]api.ObjectMetadata, bool, error) ObjectsBySlabKey(ctx context.Context, bucketName string, slabKey object.EncryptionKey) ([]api.ObjectMetadata, error) - ObjectsStats(ctx context.Context) (api.ObjectsStatsResponse, error) + ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) (api.ObjectsStatsResponse, error) RemoveObject(ctx context.Context, bucketName, path string) error RemoveObjects(ctx context.Context, bucketName, prefix string) error RenameObject(ctx context.Context, bucketName, from, to string, force bool) error @@ -605,6 +606,11 @@ func (b *bus) walletRedistributeHandler(jc jape.Context) { } var ids []types.TransactionID + if len(txns) == 0 { + jc.Encode(ids) + return + } + for i := 0; i < len(txns); i++ { err = b.w.SignTransaction(cs, &txns[i], toSign, types.CoveredFields{WholeTransaction: true}) if jc.Check("couldn't sign the transaction", err) != nil { @@ -1191,13 +1197,22 @@ func (b *bus) objectsHandlerGET(jc jape.Context) { if jc.DecodeForm("bucket", &bucket) != nil { return } + var onlymetadata bool + if jc.DecodeForm("onlymetadata", &onlymetadata) != nil { + return + } - o, err := b.ms.Object(jc.Request.Context(), bucket, path) + var o api.Object + var err error + if onlymetadata { + o, err = b.ms.ObjectMetadata(jc.Request.Context(), bucket, path) + } else { + o, err = b.ms.Object(jc.Request.Context(), bucket, path) + } if errors.Is(err, api.ErrObjectNotFound) { jc.Error(err, http.StatusNotFound) return - } - if jc.Check("couldn't load object", err) != nil { + } else if jc.Check("couldn't load object", err) != nil { return } jc.Encode(api.ObjectsResponse{Object: &o}) @@ -1348,7 +1363,11 @@ func (b *bus) slabbuffersHandlerGET(jc jape.Context) { } func (b *bus) objectsStatshandlerGET(jc jape.Context) { - info, err := b.ms.ObjectsStats(jc.Request.Context()) + opts := api.ObjectsStatsOpts{} + if jc.DecodeForm("bucket", &opts.Bucket) != nil { + return + } + info, err := b.ms.ObjectsStats(jc.Request.Context(), opts) if jc.Check("couldn't get objects stats", err) != nil { return } @@ -1395,9 +1414,11 @@ func (b *bus) sectorsHostRootHandlerDELETE(jc jape.Context) { } else if jc.DecodeParam("root", &root) != nil { return } - err := b.ms.DeleteHostSector(jc.Request.Context(), hk, root) + n, err := b.ms.DeleteHostSector(jc.Request.Context(), hk, root) if jc.Check("failed to mark sector as lost", err) != nil { return + } else if n > 0 { + b.logger.Infow("successfully marked sector as lost", "hk", hk, "root", root) } } @@ -1711,8 +1732,40 @@ func (b *bus) gougingParams(ctx context.Context) (api.GougingParams, error) { }, nil } -func (b *bus) handleGETAlerts(c jape.Context) { - c.Encode(b.alertMgr.Active()) +func (b *bus) handleGETAlertsDeprecated(jc jape.Context) { + ar, err := b.alertMgr.Alerts(jc.Request.Context(), alerts.AlertsOpts{Offset: 0, Limit: -1}) + if jc.Check("failed to fetch alerts", err) != nil { + return + } + jc.Encode(ar.Alerts) +} + +func (b *bus) handleGETAlerts(jc jape.Context) { + if jc.Request.FormValue("offset") == "" && jc.Request.FormValue("limit") == "" { + b.handleGETAlertsDeprecated(jc) + return + } + offset, limit := 0, -1 + var severity alerts.Severity + if jc.DecodeForm("offset", &offset) != nil { + return + } else if jc.DecodeForm("limit", &limit) != nil { + return + } else if offset < 0 { + jc.Error(errors.New("offset must be non-negative"), http.StatusBadRequest) + return + } else if jc.DecodeForm("severity", &severity) != nil { + return + } + ar, err := b.alertMgr.Alerts(jc.Request.Context(), alerts.AlertsOpts{ + Offset: offset, + Limit: limit, + Severity: severity, + }) + if jc.Check("failed to fetch alerts", err) != nil { + return + } + jc.Encode(ar) } func (b *bus) handlePOSTAlertsDismiss(jc jape.Context) { @@ -2159,9 +2212,13 @@ func (b *bus) multipartHandlerCreatePOST(jc jape.Context) { return } - key := req.Key - if key == (object.EncryptionKey{}) { + var key object.EncryptionKey + if req.GenerateKey { + key = object.GenerateEncryptionKey() + } else if req.Key == nil { key = object.NoOpKey + } else { + key = *req.Key } resp, err := b.ms.CreateMultipartUpload(jc.Request.Context(), req.Bucket, req.Path, key, req.MimeType, req.Metadata) diff --git a/bus/client/alerts.go b/bus/client/alerts.go index 6af68c78d..28c3b9a84 100644 --- a/bus/client/alerts.go +++ b/bus/client/alerts.go @@ -2,20 +2,38 @@ package client import ( "context" + "fmt" + "net/url" "go.sia.tech/core/types" "go.sia.tech/renterd/alerts" ) // Alerts fetches the active alerts from the bus. -func (c *Client) Alerts() (alerts []alerts.Alert, err error) { - err = c.c.GET("/alerts", &alerts) +func (c *Client) Alerts(ctx context.Context, opts alerts.AlertsOpts) (resp alerts.AlertsResponse, err error) { + values := url.Values{} + values.Set("offset", fmt.Sprint(opts.Offset)) + if opts.Limit != 0 { + values.Set("limit", fmt.Sprint(opts.Limit)) + } + if opts.Severity != 0 { + values.Set("severity", opts.Severity.String()) + } + err = c.c.WithContext(ctx).GET("/alerts?"+values.Encode(), &resp) return } // DismissAlerts dimisses the alerts with the given IDs. func (c *Client) DismissAlerts(ctx context.Context, ids ...types.Hash256) error { - return c.c.WithContext(ctx).POST("/alerts/dismiss", ids, nil) + return c.dismissAlerts(ctx, false, ids...) +} + +func (c *Client) dismissAlerts(ctx context.Context, all bool, ids ...types.Hash256) error { + values := url.Values{} + if all { + values.Set("all", fmt.Sprint(true)) + } + return c.c.WithContext(ctx).POST("/alerts/dismiss?"+values.Encode(), ids, nil) } // RegisterAlert registers the given alert. diff --git a/bus/client/multipart-upload.go b/bus/client/multipart-upload.go index ffa4d8dc8..281019487 100644 --- a/bus/client/multipart-upload.go +++ b/bus/client/multipart-upload.go @@ -46,11 +46,12 @@ func (c *Client) CompleteMultipartUpload(ctx context.Context, bucket, path, uplo // CreateMultipartUpload creates a new multipart upload. func (c *Client) CreateMultipartUpload(ctx context.Context, bucket, path string, opts api.CreateMultipartOptions) (resp api.MultipartCreateResponse, err error) { err = c.c.WithContext(ctx).POST("/multipart/create", api.MultipartCreateRequest{ - Bucket: bucket, - Path: path, - Key: opts.Key, - MimeType: opts.MimeType, - Metadata: opts.Metadata, + Bucket: bucket, + GenerateKey: opts.GenerateKey, + Path: path, + Key: opts.Key, + MimeType: opts.MimeType, + Metadata: opts.Metadata, }, &resp) return } diff --git a/bus/client/objects.go b/bus/client/objects.go index 38a7b14cd..23011a9ba 100644 --- a/bus/client/objects.go +++ b/bus/client/objects.go @@ -82,8 +82,12 @@ func (c *Client) ObjectsBySlabKey(ctx context.Context, bucket string, key object } // ObjectsStats returns information about the number of objects and their size. -func (c *Client) ObjectsStats() (osr api.ObjectsStatsResponse, err error) { - err = c.c.GET("/stats/objects", &osr) +func (c *Client) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) (osr api.ObjectsStatsResponse, err error) { + values := url.Values{} + if opts.Bucket != "" { + values.Set("bucket", opts.Bucket) + } + err = c.c.WithContext(ctx).GET("/stats/objects?"+values.Encode(), &osr) return } diff --git a/cmd/renterd/config.go b/cmd/renterd/config.go index 391d77ea3..47668ff94 100644 --- a/cmd/renterd/config.go +++ b/cmd/renterd/config.go @@ -11,7 +11,7 @@ import ( "strings" "go.sia.tech/core/types" - "go.sia.tech/core/wallet" + "go.sia.tech/coreutils/wallet" "golang.org/x/term" "gopkg.in/yaml.v3" "lukechampine.com/frand" diff --git a/cmd/renterd/main.go b/cmd/renterd/main.go index 79d1e31b4..98e075d92 100644 --- a/cmd/renterd/main.go +++ b/cmd/renterd/main.go @@ -16,6 +16,7 @@ import ( "time" "go.sia.tech/core/types" + "go.sia.tech/coreutils/wallet" "go.sia.tech/jape" "go.sia.tech/renterd/api" "go.sia.tech/renterd/autopilot" @@ -25,7 +26,6 @@ import ( "go.sia.tech/renterd/internal/node" "go.sia.tech/renterd/s3" "go.sia.tech/renterd/stores" - "go.sia.tech/renterd/wallet" "go.sia.tech/renterd/worker" "go.sia.tech/web/renterd" "go.uber.org/zap" @@ -160,11 +160,11 @@ func getSeed() types.PrivateKey { fmt.Println() phrase = string(pw) } - key, err := wallet.KeyFromPhrase(phrase) - if err != nil { - log.Fatal(err) + var rawSeed [32]byte + if err := wallet.SeedFromPhrase(&rawSeed, phrase); err != nil { + panic(err) } - seed = key + seed = wallet.KeyFromSeed(&rawSeed, 0) } return seed } @@ -315,8 +315,15 @@ func main() { log.Println("Build Date:", build.BuildTime()) return } else if flag.Arg(0) == "seed" { - log.Println("Seed phrase:") - fmt.Println(wallet.NewSeedPhrase()) + var seed [32]byte + phrase := wallet.NewSeedPhrase() + if err := wallet.SeedFromPhrase(&seed, phrase); err != nil { + println(err.Error()) + os.Exit(1) + } + key := wallet.KeyFromSeed(&seed, 0) + fmt.Println("Recovery Phrase:", phrase) + fmt.Println("Address", types.StandardUnlockHash(key.PublicKey())) return } else if flag.Arg(0) == "config" { cmdBuildConfig() diff --git a/go.mod b/go.mod index bfa1a7511..43208cfc1 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,6 @@ module go.sia.tech/renterd -go 1.21 - -toolchain go1.21.6 +go 1.21.6 require ( github.com/gabriel-vasile/mimetype v1.4.3 @@ -10,36 +8,37 @@ require ( github.com/google/go-cmp v0.6.0 github.com/gotd/contrib v0.19.0 github.com/klauspost/reedsolomon v1.12.1 - github.com/minio/minio-go/v7 v7.0.66 + github.com/minio/minio-go/v7 v7.0.68 github.com/montanaflynn/stats v0.7.1 gitlab.com/NebulousLabs/encoding v0.0.0-20200604091946-456c3dc907fe - go.sia.tech/core v0.1.12-0.20231211182757-77190f04f90b + go.sia.tech/core v0.2.1 + go.sia.tech/coreutils v0.0.3 go.sia.tech/gofakes3 v0.0.0-20231109151325-e0d47c10dce2 - go.sia.tech/hostd v0.3.0-beta.1 - go.sia.tech/jape v0.11.1 + go.sia.tech/hostd v1.0.2 + go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640 go.sia.tech/mux v1.2.0 go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca - go.sia.tech/web/renterd v0.44.0 - go.uber.org/zap v1.26.0 - golang.org/x/crypto v0.18.0 - golang.org/x/term v0.16.0 + go.sia.tech/web/renterd v0.49.0 + go.uber.org/zap v1.27.0 + golang.org/x/crypto v0.20.0 + golang.org/x/term v0.17.0 gopkg.in/yaml.v3 v3.0.1 - gorm.io/driver/mysql v1.5.2 - gorm.io/driver/sqlite v1.5.4 + gorm.io/driver/mysql v1.5.4 + gorm.io/driver/sqlite v1.5.5 gorm.io/gorm v1.25.7 lukechampine.com/frand v1.4.2 ) require ( github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da // indirect - github.com/aws/aws-sdk-go v1.49.1 // indirect - github.com/cloudflare/cloudflare-go v0.75.0 // indirect + github.com/aws/aws-sdk-go v1.50.1 // indirect + github.com/cloudflare/cloudflare-go v0.86.0 // indirect github.com/dchest/threefish v0.0.0-20120919164726-3ecf4c494abf // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/go-sql-driver/mysql v1.7.1 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/uuid v1.5.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.1 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.5 // indirect @@ -49,7 +48,7 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect - github.com/klauspost/compress v1.17.4 // indirect + github.com/klauspost/compress v1.17.6 // indirect github.com/klauspost/cpuid/v2 v2.2.6 // indirect github.com/mattn/go-sqlite3 v1.14.18 // indirect github.com/minio/md5-simd v1.1.2 // indirect @@ -60,7 +59,6 @@ require ( github.com/rs/xid v1.5.0 // indirect github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df // indirect - github.com/sirupsen/logrus v1.9.3 // indirect gitlab.com/NebulousLabs/bolt v1.4.4 // indirect gitlab.com/NebulousLabs/demotemutex v0.0.0-20151003192217-235395f71c40 // indirect gitlab.com/NebulousLabs/entropy-mnemonics v0.0.0-20181018051301-7532f67e3500 // indirect @@ -76,8 +74,8 @@ require ( gitlab.com/NebulousLabs/threadgroup v0.0.0-20200608151952-38921fbef213 // indirect go.sia.tech/web v0.0.0-20231213145933-3f175a86abff // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/net v0.19.0 // indirect - golang.org/x/sys v0.16.0 // indirect + golang.org/x/net v0.21.0 // indirect + golang.org/x/sys v0.17.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.16.1 // indirect diff --git a/go.sum b/go.sum index 54719eabd..9b7f15042 100644 --- a/go.sum +++ b/go.sum @@ -9,14 +9,14 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/aws/aws-sdk-go v1.44.256/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= -github.com/aws/aws-sdk-go v1.49.1 h1:Dsamcd8d/nNb3A+bZ0ucfGl0vGZsW5wlRW0vhoYGoeQ= -github.com/aws/aws-sdk-go v1.49.1/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.50.1 h1:AwnLUM7TcH9vMZqA4TcDKmGfLmDW5VXwT5tPH6kXylo= +github.com/aws/aws-sdk-go v1.50.1/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/cloudflare-go v0.75.0 h1:03a4EkwwsDo0yAHjQ/l+D36K9wTkvr0afDiI/uHQ0Xw= -github.com/cloudflare/cloudflare-go v0.75.0/go.mod h1:5ocQT9qQ99QsT1Ii2751490Z5J+W/nv6jOj+lSAe4ug= +github.com/cloudflare/cloudflare-go v0.86.0 h1:jEKN5VHNYNYtfDL2lUFLTRo+nOVNPFxpXTstVx0rqHI= +github.com/cloudflare/cloudflare-go v0.86.0/go.mod h1:wYW/5UP02TUfBToa/yKbQHV+r6h1NnJ1Je7XjuGM4Jw= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -64,8 +64,8 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= @@ -106,8 +106,8 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8 github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI= +github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/klauspost/cpuid v1.2.2/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= @@ -126,8 +126,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-sqlite3 v1.14.18 h1:JL0eqdCOq6DJVNPSvArO/bIV9/P7fbGrV00LZHc+5aI= @@ -135,8 +135,8 @@ github.com/mattn/go-sqlite3 v1.14.18/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.66 h1:bnTOXOHjOqv/gcMuiVbN9o2ngRItvqE774dG9nq0Dzw= -github.com/minio/minio-go/v7 v7.0.66/go.mod h1:DHAgmyQEGdW3Cif0UooKOyrT3Vxs82zNdV6tkKhRtbs= +github.com/minio/minio-go/v7 v7.0.68 h1:hTqSIfLlpXaKuNy4baAp4Jjy2sqZEN9hRxD0M4aOfrQ= +github.com/minio/minio-go/v7 v7.0.68/go.mod h1:XAvOPJQ5Xlzk5o3o/ArO2NMbhSGkimC+bpW/ngRKDmQ= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -178,8 +178,6 @@ github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df h1:S77Pf5fIG github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df/go.mod h1:dcuzJZ83w/SqN9k4eQqwKYMgmKWzg/KzJAURBhRL1tc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= @@ -193,7 +191,6 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -239,31 +236,33 @@ gitlab.com/NebulousLabs/threadgroup v0.0.0-20200608151952-38921fbef213/go.mod h1 gitlab.com/NebulousLabs/writeaheadlog v0.0.0-20200618142844-c59a90f49130/go.mod h1:SxigdS5Q1ui+OMgGAXt1E/Fg3RB6PvKXMov2O3gvIzs= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.sia.tech/core v0.1.12-0.20231211182757-77190f04f90b h1:xJSxYN2kZD3NAijHIwjXhG5+7GoPyjDNIJPEoD3b72g= -go.sia.tech/core v0.1.12-0.20231211182757-77190f04f90b/go.mod h1:3EoY+rR78w1/uGoXXVqcYdwSjSJKuEMI5bL7WROA27Q= +go.sia.tech/core v0.2.1 h1:CqmMd+T5rAhC+Py3NxfvGtvsj/GgwIqQHHVrdts/LqY= +go.sia.tech/core v0.2.1/go.mod h1:3EoY+rR78w1/uGoXXVqcYdwSjSJKuEMI5bL7WROA27Q= +go.sia.tech/coreutils v0.0.3 h1:ZxuzovRpQMvfy/pCOV4om1cPF6sE15GyJyK36kIrF1Y= +go.sia.tech/coreutils v0.0.3/go.mod h1:UBFc77wXiE//eyilO5HLOncIEj7F69j0Nv2OkFujtP0= go.sia.tech/gofakes3 v0.0.0-20231109151325-e0d47c10dce2 h1:ulzfJNjxN5DjXHClkW2pTiDk+eJ+0NQhX87lFDZ03t0= go.sia.tech/gofakes3 v0.0.0-20231109151325-e0d47c10dce2/go.mod h1:PlsiVCn6+wssrR7bsOIlZm0DahsVrDydrlbjY4F14sg= -go.sia.tech/hostd v0.3.0-beta.1 h1:A2RL4wkW18eb28+fJtdyK9OYNiiwpCDO8FO3cyT9r7A= -go.sia.tech/hostd v0.3.0-beta.1/go.mod h1:gVtU631RkbtOEHJKb8qghudhWcYIL8w3phjvV2/bz0A= -go.sia.tech/jape v0.11.1 h1:M7IP+byXL7xOqzxcHUQuXW+q3sYMkYzmMlMw+q8ZZw0= -go.sia.tech/jape v0.11.1/go.mod h1:4QqmBB+t3W7cNplXPj++ZqpoUb2PeiS66RLpXmEGap4= +go.sia.tech/hostd v1.0.2 h1:GjzNIAlwg3/dViF6258Xn5DI3+otQLRqmkoPDugP+9Y= +go.sia.tech/hostd v1.0.2/go.mod h1:zGw+AGVmazAp4ydvo7bZLNKTy1J51RI6Mp/oxRtYT6c= +go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640 h1:mSaJ622P7T/M97dAK8iPV+IRIC9M5vV28NHeceoWO3M= +go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640/go.mod h1:4QqmBB+t3W7cNplXPj++ZqpoUb2PeiS66RLpXmEGap4= go.sia.tech/mux v1.2.0 h1:ofa1Us9mdymBbGMY2XH/lSpY8itFsKIo/Aq8zwe+GHU= go.sia.tech/mux v1.2.0/go.mod h1:Yyo6wZelOYTyvrHmJZ6aQfRoer3o4xyKQ4NmQLJrBSo= go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca h1:aZMg2AKevn7jKx+wlusWQfwSM5pNU9aGtRZme29q3O4= go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca/go.mod h1:h/1afFwpxzff6/gG5i1XdAgPK7dEY6FaibhK7N5F86Y= go.sia.tech/web v0.0.0-20231213145933-3f175a86abff h1:/nE7nhewDRxzEdtSKT4SkiUwtjPSiy7Xz7CHEW3MaGQ= go.sia.tech/web v0.0.0-20231213145933-3f175a86abff/go.mod h1:RKODSdOmR3VtObPAcGwQqm4qnqntDVFylbvOBbWYYBU= -go.sia.tech/web/renterd v0.44.0 h1:yKu1Kq/6ssV9Vbv4oa+sn2Pc2TNyfcrv/mRPNOuYuB0= -go.sia.tech/web/renterd v0.44.0/go.mod h1:FgXrdmAnu591a3h96RB/15pMZ74xO9457g902uE06BM= +go.sia.tech/web/renterd v0.49.0 h1:z9iDr3gIJ60zqiydDZ2MUbhANm6GwdvRf4k67+Zrj14= +go.sia.tech/web/renterd v0.49.0/go.mod h1:FgXrdmAnu591a3h96RB/15pMZ74xO9457g902uE06BM= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= -go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -275,8 +274,8 @@ golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.20.0 h1:jmAMJJZXr5KiCw05dfYK9QnqaqKLYXijU23lsEdcQqg= +golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= @@ -301,8 +300,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -325,21 +324,20 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210421210424-b80969c67360/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= -golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -386,14 +384,13 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gorm.io/driver/mysql v1.5.2 h1:QC2HRskSE75wBuOxe0+iCkyJZ+RqpudsQtqkp+IMuXs= -gorm.io/driver/mysql v1.5.2/go.mod h1:pQLhh1Ut/WUAySdTHwBpBv6+JKcj+ua4ZFx1QQTBzb8= -gorm.io/driver/sqlite v1.5.4 h1:IqXwXi8M/ZlPzH/947tn5uik3aYQslP9BVveoax0nV0= -gorm.io/driver/sqlite v1.5.4/go.mod h1:qxAuCol+2r6PannQDpOP1FP6ag3mKi4esLnB/jHed+4= -gorm.io/gorm v1.25.2-0.20230530020048-26663ab9bf55/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k= +gorm.io/driver/mysql v1.5.4 h1:igQmHfKcbaTVyAIHNhhB888vvxh8EdQ2uSUT0LPcBso= +gorm.io/driver/mysql v1.5.4/go.mod h1:9rYxJph/u9SWkWc9yY4XJ1F/+xO0S/ChOmbk3+Z5Tvs= +gorm.io/driver/sqlite v1.5.5 h1:7MDMtUZhV065SilG62E0MquljeArQZNfJnjd9i9gx3E= +gorm.io/driver/sqlite v1.5.5/go.mod h1:6NgQ7sQWAIFsPrJJl1lSNSu2TABh0ZZ/zm5fosATavE= +gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= gorm.io/gorm v1.25.7 h1:VsD6acwRjz2zFxGO50gPO6AkNs7KKnvfzUjHQhZDz/A= gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/internal/node/node.go b/internal/node/node.go index 6ffe29bf5..e94cfbb4d 100644 --- a/internal/node/node.go +++ b/internal/node/node.go @@ -194,7 +194,7 @@ func NewBus(cfg BusConfig, dir string, seed types.PrivateKey, l *zap.Logger) (ht func NewWorker(cfg config.Worker, b worker.Bus, seed types.PrivateKey, l *zap.Logger) (http.Handler, ShutdownFn, error) { workerKey := blake2b.Sum256(append([]byte("worker"), seed...)) - w, err := worker.New(workerKey, cfg.ID, b, cfg.ContractLockTimeout, cfg.BusFlushInterval, cfg.DownloadOverdriveTimeout, cfg.UploadOverdriveTimeout, cfg.DownloadMaxOverdrive, cfg.DownloadMaxMemory, cfg.UploadMaxMemory, cfg.UploadMaxOverdrive, cfg.AllowPrivateIPs, l) + w, err := worker.New(workerKey, cfg.ID, b, cfg.ContractLockTimeout, cfg.BusFlushInterval, cfg.DownloadOverdriveTimeout, cfg.UploadOverdriveTimeout, cfg.DownloadMaxOverdrive, cfg.UploadMaxOverdrive, cfg.DownloadMaxMemory, cfg.UploadMaxMemory, cfg.AllowPrivateIPs, l) if err != nil { return nil, nil, err } diff --git a/internal/node/transactionpool.go b/internal/node/transactionpool.go index b2226bfb5..c5582a757 100644 --- a/internal/node/transactionpool.go +++ b/internal/node/transactionpool.go @@ -15,7 +15,7 @@ type txpool struct { func (tp txpool) RecommendedFee() (fee types.Currency) { _, max := tp.tp.FeeEstimation() - convertToCore(&max, &fee) + convertToCore(&max, (*types.V1Currency)(&fee)) return } diff --git a/internal/test/config.go b/internal/test/config.go new file mode 100644 index 000000000..7553fa16d --- /dev/null +++ b/internal/test/config.go @@ -0,0 +1,64 @@ +package test + +import ( + "time" + + "github.com/minio/minio-go/v7/pkg/credentials" + rhpv2 "go.sia.tech/core/rhp/v2" + "go.sia.tech/core/types" + "go.sia.tech/renterd/api" +) + +var ( + // AutopilotConfig is the autopilot used for testing unless a different + // one is explicitly set. + AutopilotConfig = api.AutopilotConfig{ + Contracts: api.ContractsConfig{ + Allowance: types.Siacoins(1).Mul64(1e3), + Amount: 3, + Period: 144, + RenewWindow: 72, + + Download: rhpv2.SectorSize * 500, + Upload: rhpv2.SectorSize * 500, + Storage: rhpv2.SectorSize * 5e3, + + Set: ContractSet, + Prune: false, + }, + Hosts: api.HostsConfig{ + MaxDowntimeHours: 10, + MinRecentScanFailures: 10, + AllowRedundantIPs: true, // allow for integration tests by default + }, + } + + ContractSet = "testset" + ContractSetSettings = api.ContractSetSetting{ + Default: ContractSet, + } + + GougingSettings = api.GougingSettings{ + MinMaxCollateral: types.Siacoins(10), // at least up to 10 SC per contract + MaxRPCPrice: types.Siacoins(1).Div64(1000), // 1mS per RPC + MaxContractPrice: types.Siacoins(10), // 10 SC per contract + MaxDownloadPrice: types.Siacoins(1).Mul64(1000), // 1000 SC per 1 TiB + MaxUploadPrice: types.Siacoins(1).Mul64(1000), // 1000 SC per 1 TiB + MaxStoragePrice: types.Siacoins(1000).Div64(144 * 30), // 1000 SC per month + + HostBlockHeightLeeway: 240, // amount of leeway given to host block height + + MinPriceTableValidity: 10 * time.Second, // minimum value for price table validity + MinAccountExpiry: time.Hour, // minimum value for account expiry + MinMaxEphemeralAccountBalance: types.Siacoins(1), // 1SC + } + + RedundancySettings = api.RedundancySettings{ + MinShards: 2, + TotalShards: 3, + } + + S3AccessKeyID = "TESTINGYNHUWCPKOPSYQ" + S3SecretAccessKey = "Rh30BNyj+qNI4ftYRteoZbHJ3X4Ln71QtZkRXzJ9" + S3Credentials = credentials.NewStaticV4(S3AccessKeyID, S3SecretAccessKey, "") +) diff --git a/internal/testing/blocklist_test.go b/internal/test/e2e/blocklist_test.go similarity index 94% rename from internal/testing/blocklist_test.go rename to internal/test/e2e/blocklist_test.go index 9d9a12605..64acc2fba 100644 --- a/internal/testing/blocklist_test.go +++ b/internal/test/e2e/blocklist_test.go @@ -1,4 +1,4 @@ -package testing +package e2e import ( "context" @@ -8,6 +8,7 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/test" ) func TestBlocklist(t *testing.T) { @@ -26,7 +27,7 @@ func TestBlocklist(t *testing.T) { tt := cluster.tt // fetch contracts - contracts, err := b.Contracts(ctx, api.ContractsOpts{ContractSet: testAutopilotConfig.Contracts.Set}) + contracts, err := b.Contracts(ctx, api.ContractsOpts{ContractSet: test.AutopilotConfig.Contracts.Set}) tt.OK(err) if len(contracts) != 3 { t.Fatalf("unexpected number of contracts, %v != 3", len(contracts)) @@ -40,7 +41,7 @@ func TestBlocklist(t *testing.T) { // assert h3 is no longer in the contract set tt.Retry(5, time.Second, func() error { - contracts, err := b.Contracts(ctx, api.ContractsOpts{ContractSet: testAutopilotConfig.Contracts.Set}) + contracts, err := b.Contracts(ctx, api.ContractsOpts{ContractSet: test.AutopilotConfig.Contracts.Set}) tt.OK(err) if len(contracts) != 2 { return fmt.Errorf("unexpected number of contracts, %v != 2", len(contracts)) @@ -60,7 +61,7 @@ func TestBlocklist(t *testing.T) { // assert h1 is no longer in the contract set tt.Retry(5, time.Second, func() error { - contracts, err := b.Contracts(ctx, api.ContractsOpts{ContractSet: testAutopilotConfig.Contracts.Set}) + contracts, err := b.Contracts(ctx, api.ContractsOpts{ContractSet: test.AutopilotConfig.Contracts.Set}) tt.OK(err) if len(contracts) != 1 { return fmt.Errorf("unexpected number of contracts, %v != 1", len(contracts)) @@ -77,7 +78,7 @@ func TestBlocklist(t *testing.T) { tt.OK(b.UpdateHostAllowlist(ctx, nil, []types.PublicKey{hk1, hk2}, false)) tt.OK(b.UpdateHostBlocklist(ctx, nil, []string{h1.NetAddress}, false)) tt.Retry(5, time.Second, func() error { - contracts, err := b.Contracts(ctx, api.ContractsOpts{ContractSet: testAutopilotConfig.Contracts.Set}) + contracts, err := b.Contracts(ctx, api.ContractsOpts{ContractSet: test.AutopilotConfig.Contracts.Set}) tt.OK(err) if len(contracts) != 3 { return fmt.Errorf("unexpected number of contracts, %v != 3", len(contracts)) diff --git a/internal/testing/cluster.go b/internal/test/e2e/cluster.go similarity index 84% rename from internal/testing/cluster.go rename to internal/test/e2e/cluster.go index d55539cd7..16b3acbfd 100644 --- a/internal/testing/cluster.go +++ b/internal/test/e2e/cluster.go @@ -1,4 +1,4 @@ -package testing +package e2e import ( "context" @@ -9,15 +9,12 @@ import ( "net/http" "os" "path/filepath" - "strings" "sync" "testing" "time" "github.com/minio/minio-go/v7" - "github.com/minio/minio-go/v7/pkg/credentials" "go.sia.tech/core/consensus" - rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/jape" "go.sia.tech/renterd/api" @@ -25,6 +22,7 @@ import ( "go.sia.tech/renterd/bus" "go.sia.tech/renterd/config" "go.sia.tech/renterd/internal/node" + "go.sia.tech/renterd/internal/test" "go.sia.tech/renterd/s3" "go.sia.tech/renterd/stores" "go.uber.org/zap" @@ -36,109 +34,16 @@ import ( ) const ( - testBusFlushInterval = 100 * time.Millisecond - testContractSet = "testset" - testPersistInterval = 2 * time.Second - latestHardforkHeight = 50 // foundation hardfork height in testing + testBusFlushInterval = 100 * time.Millisecond + testBusPersistInterval = 2 * time.Second + latestHardforkHeight = 50 // foundation hardfork height in testing ) var ( clusterOptsDefault = testClusterOptions{} clusterOptNoFunding = false - - // testAutopilotConfig is the autopilot used for testing unless a different - // one is explicitly set. - testAutopilotConfig = api.AutopilotConfig{ - Contracts: api.ContractsConfig{ - Allowance: types.Siacoins(1).Mul64(1e3), - Amount: 3, - Period: 144, - RenewWindow: 72, - - Download: rhpv2.SectorSize * 500, - Upload: rhpv2.SectorSize * 500, - Storage: rhpv2.SectorSize * 5e3, - - Set: testContractSet, - Prune: false, - }, - Hosts: api.HostsConfig{ - MaxDowntimeHours: 10, - MinRecentScanFailures: 10, - AllowRedundantIPs: true, // allow for integration tests by default - }, - } - - testContractSetSettings = api.ContractSetSetting{ - Default: testContractSet, - } - - testGougingSettings = api.GougingSettings{ - MinMaxCollateral: types.Siacoins(10), // at least up to 10 SC per contract - MaxRPCPrice: types.Siacoins(1).Div64(1000), // 1mS per RPC - MaxContractPrice: types.Siacoins(10), // 10 SC per contract - MaxDownloadPrice: types.Siacoins(1).Mul64(1000), // 1000 SC per 1 TiB - MaxUploadPrice: types.Siacoins(1).Mul64(1000), // 1000 SC per 1 TiB - MaxStoragePrice: types.Siacoins(1000).Div64(144 * 30), // 1000 SC per month - - HostBlockHeightLeeway: 240, // amount of leeway given to host block height - - MinPriceTableValidity: 10 * time.Second, // minimum value for price table validity - MinAccountExpiry: time.Hour, // minimum value for account expiry - MinMaxEphemeralAccountBalance: types.Siacoins(1), // 1SC - } - - testRedundancySettings = api.RedundancySettings{ - MinShards: 2, - TotalShards: 3, - } - - testS3AccessKeyID = "TESTINGYNHUWCPKOPSYQ" - testS3SecretAccessKey = "Rh30BNyj+qNI4ftYRteoZbHJ3X4Ln71QtZkRXzJ9" - testS3Credentials = credentials.NewStaticV4(testS3AccessKeyID, testS3SecretAccessKey, "") ) -type TT struct { - *testing.T -} - -func (t TT) AssertContains(err error, target string) { - t.Helper() - if err == nil || !strings.Contains(err.Error(), target) { - t.Fatalf("err: %v != target: %v", err, target) - } -} - -func (t TT) AssertIs(err, target error) { - t.Helper() - t.AssertContains(err, target.Error()) -} - -func (t TT) OK(err error) { - t.Helper() - if err != nil { - t.Fatal(err) - } -} - -func (t TT) OKAll(vs ...interface{}) { - t.Helper() - for _, v := range vs { - if err, ok := v.(error); ok && err != nil { - t.Fatal(err) - } - } -} - -func (t TT) FailAll(vs ...interface{}) { - t.Helper() - for _, v := range vs { - if err, ok := v.(error); ok && err == nil { - t.Fatal("should've failed") - } - } -} - // TestCluster is a helper type that allows for easily creating a number of // nodes connected to each other and ready for testing. type TestCluster struct { @@ -161,7 +66,7 @@ type TestCluster struct { dbName string dir string logger *zap.Logger - tt *TT + tt test.TT wk types.PrivateKey wg sync.WaitGroup } @@ -203,33 +108,17 @@ func randomPassword() string { return hex.EncodeToString(frand.Bytes(32)) } -// Retry will call 'fn' 'tries' times, waiting 'durationBetweenAttempts' -// between each attempt, returning 'nil' the first time that 'fn' returns nil. -// If 'nil' is never returned, then the final error returned by 'fn' is -// returned. -func (tt *TT) Retry(tries int, durationBetweenAttempts time.Duration, fn func() error) { - tt.Helper() - for i := 1; i < tries; i++ { - err := fn() - if err == nil { - return - } - time.Sleep(durationBetweenAttempts) - } - tt.OK(fn()) -} - // Reboot simulates a reboot of the cluster by calling Shutdown and creating a // new cluster using the same settings as the previous one. // NOTE: Simulating a reboot means that the hosts stay active and are not // restarted. -func (c *TestCluster) Reboot(ctx context.Context) *TestCluster { +func (c *TestCluster) Reboot(t *testing.T) *TestCluster { c.tt.Helper() hosts := c.hosts c.hosts = nil c.Shutdown() - newCluster := newTestCluster(c.tt.T, testClusterOptions{ + newCluster := newTestCluster(t, testClusterOptions{ dir: c.dir, dbName: c.dbName, logger: c.logger, @@ -265,6 +154,7 @@ type testClusterOptions struct { logger *zap.Logger uploadPacking bool skipSettingAutopilot bool + skipRunningAutopilot bool walletKey *types.PrivateKey autopilotCfg *node.AutopilotConfig @@ -302,7 +192,7 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { if testing.Short() { t.SkipNow() } - tt := &TT{t} + tt := test.NewTT(t) // Ensure we don't hang ctx, cancel := context.WithTimeout(context.Background(), time.Minute) @@ -344,7 +234,7 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { if opts.uploadPacking { enableUploadPacking = opts.uploadPacking } - apSettings := testAutopilotConfig + apSettings := test.AutopilotConfig if opts.autopilotSettings != nil { apSettings = *opts.autopilotSettings } @@ -397,14 +287,14 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { busClient := bus.NewClient(busAddr, busPassword) workerClient := worker.NewClient(workerAddr, workerPassword) s3Client, err := minio.New(s3Addr, &minio.Options{ - Creds: testS3Credentials, + Creds: test.S3Credentials, Secure: false, }) tt.OK(err) url := s3Client.EndpointURL() s3Core, err := minio.NewCore(url.Host+url.Path, &minio.Options{ - Creds: testS3Credentials, + Creds: test.S3Credentials, }) tt.OK(err) @@ -504,15 +394,17 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { _ = autopilotServer.Serve(autopilotListener) cluster.wg.Done() }() - cluster.wg.Add(1) - go func() { - _ = aStartFn() - cluster.wg.Done() - }() + if !opts.skipRunningAutopilot { + cluster.wg.Add(1) + go func() { + _ = aStartFn() + cluster.wg.Done() + }() + } // Set the test contract set to make sure we can add objects at the // beginning of a test right away. - tt.OK(busClient.SetContractSet(ctx, testContractSet, []types.FileContractID{})) + tt.OK(busClient.SetContractSet(ctx, test.ContractSet, []types.FileContractID{})) // Update the autopilot to use test settings if !opts.skipSettingAutopilot { @@ -523,11 +415,11 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { } // Update the bus settings. - tt.OK(busClient.UpdateSetting(ctx, api.SettingGouging, testGougingSettings)) - tt.OK(busClient.UpdateSetting(ctx, api.SettingRedundancy, testRedundancySettings)) - tt.OK(busClient.UpdateSetting(ctx, api.SettingContractSet, testContractSetSettings)) + tt.OK(busClient.UpdateSetting(ctx, api.SettingGouging, test.GougingSettings)) + tt.OK(busClient.UpdateSetting(ctx, api.SettingRedundancy, test.RedundancySettings)) + tt.OK(busClient.UpdateSetting(ctx, api.SettingContractSet, test.ContractSetSettings)) tt.OK(busClient.UpdateSetting(ctx, api.SettingS3Authentication, api.S3AuthenticationSettings{ - V4Keypairs: map[string]string{testS3AccessKeyID: testS3SecretAccessKey}, + V4Keypairs: map[string]string{test.S3AccessKeyID: test.S3SecretAccessKey}, })) tt.OK(busClient.UpdateSetting(ctx, api.SettingUploadPacking, api.UploadPackingSettings{Enabled: enableUploadPacking})) @@ -558,7 +450,7 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { if nHosts > 0 { cluster.AddHostsBlocking(nHosts) cluster.WaitForContracts() - cluster.WaitForContractSet(testContractSet, nHosts) + cluster.WaitForContractSet(test.ContractSet, nHosts) _ = cluster.WaitForAccounts() } @@ -983,7 +875,7 @@ func testBusCfg() node.BusConfig { AnnouncementMaxAgeHours: 24 * 7 * 52, // 1 year Bootstrap: false, GatewayAddr: "127.0.0.1:0", - PersistInterval: testPersistInterval, + PersistInterval: testBusPersistInterval, UsedUTXOExpiry: time.Minute, SlabBufferCompletionThreshold: 0, }, diff --git a/internal/testing/cluster_test.go b/internal/test/e2e/cluster_test.go similarity index 87% rename from internal/testing/cluster_test.go rename to internal/test/e2e/cluster_test.go index b0de2946e..5ca7141d5 100644 --- a/internal/testing/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -1,4 +1,4 @@ -package testing +package e2e import ( "bytes" @@ -24,6 +24,7 @@ import ( "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" "go.sia.tech/renterd/hostdb" + "go.sia.tech/renterd/internal/test" "go.sia.tech/renterd/object" "go.sia.tech/renterd/wallet" "go.uber.org/zap" @@ -264,7 +265,7 @@ func TestObjectEntries(t *testing.T) { // create a test cluster cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, }) defer cluster.Shutdown() @@ -435,7 +436,7 @@ func TestObjectsRename(t *testing.T) { // create a test cluster cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, }) defer cluster.Shutdown() @@ -491,7 +492,7 @@ func TestUploadDownloadEmpty(t *testing.T) { // create a test cluster cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, }) defer cluster.Shutdown() @@ -519,13 +520,13 @@ func TestUploadDownloadBasic(t *testing.T) { } // sanity check the default settings - if testAutopilotConfig.Contracts.Amount < uint64(testRedundancySettings.MinShards) { + if test.AutopilotConfig.Contracts.Amount < uint64(test.RedundancySettings.MinShards) { t.Fatal("too few hosts to support the redundancy settings") } // create a test cluster cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, }) defer cluster.Shutdown() @@ -546,8 +547,8 @@ func TestUploadDownloadBasic(t *testing.T) { for _, slab := range resp.Object.Slabs { hosts := make(map[types.PublicKey]struct{}) roots := make(map[types.Hash256]struct{}) - if len(slab.Shards) != testRedundancySettings.TotalShards { - t.Fatal("wrong amount of shards", len(slab.Shards), testRedundancySettings.TotalShards) + if len(slab.Shards) != test.RedundancySettings.TotalShards { + t.Fatal("wrong amount of shards", len(slab.Shards), test.RedundancySettings.TotalShards) } for _, shard := range slab.Shards { if shard.LatestHost == (types.PublicKey{}) { @@ -631,13 +632,13 @@ func TestUploadDownloadExtended(t *testing.T) { } // sanity check the default settings - if testAutopilotConfig.Contracts.Amount < uint64(testRedundancySettings.MinShards) { + if test.AutopilotConfig.Contracts.Amount < uint64(test.RedundancySettings.MinShards) { t.Fatal("too few hosts to support the redundancy settings") } // create a test cluster cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, }) defer cluster.Shutdown() @@ -697,25 +698,33 @@ func TestUploadDownloadExtended(t *testing.T) { } // check objects stats. - info, err := cluster.Bus.ObjectsStats() - tt.OK(err) - objectsSize := uint64(len(file1) + len(file2) + len(small) + len(large)) - if info.TotalObjectsSize != objectsSize { - t.Error("wrong size", info.TotalObjectsSize, objectsSize) - } - sectorsSize := 15 * rhpv2.SectorSize - if info.TotalSectorsSize != uint64(sectorsSize) { - t.Error("wrong size", info.TotalSectorsSize, sectorsSize) - } - if info.TotalUploadedSize != uint64(sectorsSize) { - t.Error("wrong size", info.TotalUploadedSize, sectorsSize) - } - if info.NumObjects != 4 { - t.Error("wrong number of objects", info.NumObjects, 4) - } - if info.MinHealth != 1 { - t.Errorf("expected minHealth of 1, got %v", info.MinHealth) - } + tt.Retry(100, 100*time.Millisecond, func() error { + for _, opts := range []api.ObjectsStatsOpts{ + {}, // any bucket + {Bucket: api.DefaultBucketName}, // specific bucket + } { + info, err := cluster.Bus.ObjectsStats(context.Background(), opts) + tt.OK(err) + objectsSize := uint64(len(file1) + len(file2) + len(small) + len(large)) + if info.TotalObjectsSize != objectsSize { + return fmt.Errorf("wrong size %v %v", info.TotalObjectsSize, objectsSize) + } + sectorsSize := 15 * rhpv2.SectorSize + if info.TotalSectorsSize != uint64(sectorsSize) { + return fmt.Errorf("wrong size %v %v", info.TotalSectorsSize, sectorsSize) + } + if info.TotalUploadedSize != uint64(sectorsSize) { + return fmt.Errorf("wrong size %v %v", info.TotalUploadedSize, sectorsSize) + } + if info.NumObjects != 4 { + return fmt.Errorf("wrong number of objects %v %v", info.NumObjects, 4) + } + if info.MinHealth != 1 { + return fmt.Errorf("expected minHealth of 1, got %v", info.MinHealth) + } + } + return nil + }) // download the data for _, data := range [][]byte{small, large} { @@ -763,18 +772,18 @@ func TestUploadDownloadExtended(t *testing.T) { // and download spending metrics are tracked properly. func TestUploadDownloadSpending(t *testing.T) { // sanity check the default settings - if testAutopilotConfig.Contracts.Amount < uint64(testRedundancySettings.MinShards) { + if test.AutopilotConfig.Contracts.Amount < uint64(test.RedundancySettings.MinShards) { t.Fatal("too few hosts to support the redundancy settings") } // create a test cluster cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, }) defer cluster.Shutdown() w := cluster.Worker - rs := testRedundancySettings + rs := test.RedundancySettings tt := cluster.tt // check that the funding was recorded @@ -883,7 +892,7 @@ func TestUploadDownloadSpending(t *testing.T) { } // fetch contract set contracts - contracts, err := cluster.Bus.Contracts(context.Background(), api.ContractsOpts{ContractSet: testAutopilotConfig.Contracts.Set}) + contracts, err := cluster.Bus.Contracts(context.Background(), api.ContractsOpts{ContractSet: test.AutopilotConfig.Contracts.Set}) tt.OK(err) currentSet := make(map[types.FileContractID]struct{}) for _, c := range contracts { @@ -1044,7 +1053,7 @@ func TestEphemeralAccounts(t *testing.T) { } // Reboot cluster. - cluster2 := cluster.Reboot(context.Background()) + cluster2 := cluster.Reboot(t) defer cluster2.Shutdown() // Check that accounts were loaded from the bus. @@ -1082,7 +1091,7 @@ func TestParallelUpload(t *testing.T) { // create a test cluster cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, }) defer cluster.Shutdown() @@ -1160,7 +1169,7 @@ func TestParallelDownload(t *testing.T) { // create a test cluster cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, }) defer cluster.Shutdown() @@ -1238,7 +1247,7 @@ func TestEphemeralAccountSync(t *testing.T) { } // Restart cluster to have worker fetch the account from the bus again. - cluster2 := cluster.Reboot(context.Background()) + cluster2 := cluster.Reboot(t) defer cluster2.Shutdown() // Account should need a sync. @@ -1277,7 +1286,7 @@ func TestUploadDownloadSameHost(t *testing.T) { // create a test cluster cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, }) defer cluster.Shutdown() tt := cluster.tt @@ -1308,7 +1317,7 @@ func TestUploadDownloadSameHost(t *testing.T) { // build a frankenstein object constructed with all sectors on the same host res.Object.Slabs[0].Shards = shards[res.Object.Slabs[0].Shards[0].LatestHost] - tt.OK(b.AddObject(context.Background(), api.DefaultBucketName, "frankenstein", testContractSet, res.Object.Object, api.AddObjectOptions{})) + tt.OK(b.AddObject(context.Background(), api.DefaultBucketName, "frankenstein", test.ContractSet, *res.Object.Object, api.AddObjectOptions{})) // assert we can download this object tt.OK(w.DownloadObject(context.Background(), io.Discard, api.DefaultBucketName, "frankenstein", api.DownloadObjectOptions{})) @@ -1516,20 +1525,20 @@ func TestUploadPacking(t *testing.T) { } // sanity check the default settings - if testAutopilotConfig.Contracts.Amount < uint64(testRedundancySettings.MinShards) { + if test.AutopilotConfig.Contracts.Amount < uint64(test.RedundancySettings.MinShards) { t.Fatal("too few hosts to support the redundancy settings") } // create a test cluster cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, uploadPacking: true, }) defer cluster.Shutdown() b := cluster.Bus w := cluster.Worker - rs := testRedundancySettings + rs := test.RedundancySettings tt := cluster.tt // prepare 3 files which are all smaller than a slab but together make up @@ -1633,7 +1642,7 @@ func TestUploadPacking(t *testing.T) { download("file4", data4, 0, int64(len(data4))) // assert number of objects - os, err := b.ObjectsStats() + os, err := b.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) tt.OK(err) if os.NumObjects != 5 { t.Fatalf("expected 5 objects, got %v", os.NumObjects) @@ -1642,7 +1651,7 @@ func TestUploadPacking(t *testing.T) { // check the object size stats, we use a retry loop since packed slabs are // uploaded in a separate goroutine, so the object stats might lag a bit tt.Retry(60, time.Second, func() error { - os, err := b.ObjectsStats() + os, err := b.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) if err != nil { t.Fatal(err) } @@ -1764,7 +1773,7 @@ func TestSlabBufferStats(t *testing.T) { } // sanity check the default settings - if testAutopilotConfig.Contracts.Amount < uint64(testRedundancySettings.MinShards) { + if test.AutopilotConfig.Contracts.Amount < uint64(test.RedundancySettings.MinShards) { t.Fatal("too few hosts to support the redundancy settings") } @@ -1774,14 +1783,14 @@ func TestSlabBufferStats(t *testing.T) { busCfg.SlabBufferCompletionThreshold = int64(threshold) cluster := newTestCluster(t, testClusterOptions{ busCfg: &busCfg, - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, uploadPacking: true, }) defer cluster.Shutdown() b := cluster.Bus w := cluster.Worker - rs := testRedundancySettings + rs := test.RedundancySettings tt := cluster.tt // prepare 3 files which are all smaller than a slab but together make up @@ -1796,7 +1805,7 @@ func TestSlabBufferStats(t *testing.T) { tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(data1), api.DefaultBucketName, "1", api.UploadObjectOptions{})) // assert number of objects - os, err := b.ObjectsStats() + os, err := b.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) tt.OK(err) if os.NumObjects != 1 { t.Fatalf("expected 1 object, got %d", os.NumObjects) @@ -1805,7 +1814,7 @@ func TestSlabBufferStats(t *testing.T) { // check the object size stats, we use a retry loop since packed slabs are // uploaded in a separate goroutine, so the object stats might lag a bit tt.Retry(60, time.Second, func() error { - os, err := b.ObjectsStats() + os, err := b.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) if err != nil { t.Fatal(err) } @@ -1830,8 +1839,8 @@ func TestSlabBufferStats(t *testing.T) { if len(buffers) != 1 { t.Fatal("expected 1 slab buffer, got", len(buffers)) } - if buffers[0].ContractSet != testContractSet { - t.Fatalf("expected slab buffer contract set of %v, got %v", testContractSet, buffers[0].ContractSet) + if buffers[0].ContractSet != test.ContractSet { + t.Fatalf("expected slab buffer contract set of %v, got %v", test.ContractSet, buffers[0].ContractSet) } if buffers[0].Size != int64(len(data1)) { t.Fatalf("expected slab buffer size of %v, got %v", len(data1), buffers[0].Size) @@ -1853,7 +1862,7 @@ func TestSlabBufferStats(t *testing.T) { tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(data2), api.DefaultBucketName, "2", api.UploadObjectOptions{})) // assert number of objects - os, err = b.ObjectsStats() + os, err = b.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) tt.OK(err) if os.NumObjects != 2 { t.Fatalf("expected 1 object, got %d", os.NumObjects) @@ -1862,7 +1871,7 @@ func TestSlabBufferStats(t *testing.T) { // check the object size stats, we use a retry loop since packed slabs are // uploaded in a separate goroutine, so the object stats might lag a bit tt.Retry(60, time.Second, func() error { - os, err := b.ObjectsStats() + os, err := b.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) tt.OK(err) if os.TotalObjectsSize != uint64(len(data1)+len(data2)) { return fmt.Errorf("expected totalObjectSize of %d, got %d", len(data1)+len(data2), os.TotalObjectsSize) @@ -1915,9 +1924,9 @@ func TestAlerts(t *testing.T) { tt.OK(b.RegisterAlert(context.Background(), alert)) findAlert := func(id types.Hash256) *alerts.Alert { t.Helper() - alerts, err := b.Alerts() + ar, err := b.Alerts(context.Background(), alerts.AlertsOpts{}) tt.OK(err) - for _, alert := range alerts { + for _, alert := range ar.Alerts { if alert.ID == id { return &alert } @@ -1938,6 +1947,72 @@ func TestAlerts(t *testing.T) { if foundAlert != nil { t.Fatal("alert found") } + + // register 2 alerts + alert2 := alert + alert2.ID = frand.Entropy256() + alert2.Timestamp = time.Now().Add(time.Second) + tt.OK(b.RegisterAlert(context.Background(), alert)) + tt.OK(b.RegisterAlert(context.Background(), alert2)) + if foundAlert := findAlert(alert.ID); foundAlert == nil { + t.Fatal("alert not found") + } else if foundAlert := findAlert(alert2.ID); foundAlert == nil { + t.Fatal("alert not found") + } + + // try to find with offset = 1 + ar, err := b.Alerts(context.Background(), alerts.AlertsOpts{Offset: 1}) + foundAlerts := ar.Alerts + tt.OK(err) + if len(foundAlerts) != 1 || foundAlerts[0].ID != alert.ID { + t.Fatal("wrong alert") + } + + // try to find with limit = 1 + ar, err = b.Alerts(context.Background(), alerts.AlertsOpts{Limit: 1}) + foundAlerts = ar.Alerts + tt.OK(err) + if len(foundAlerts) != 1 || foundAlerts[0].ID != alert2.ID { + t.Fatal("wrong alert") + } + + // register more alerts + for severity := alerts.SeverityInfo; severity <= alerts.SeverityCritical; severity++ { + for j := 0; j < 3*int(severity); j++ { + tt.OK(b.RegisterAlert(context.Background(), alerts.Alert{ + ID: frand.Entropy256(), + Severity: severity, + Message: "test", + Data: map[string]interface{}{ + "origin": "test", + }, + Timestamp: time.Now(), + })) + } + } + for severity := alerts.SeverityInfo; severity <= alerts.SeverityCritical; severity++ { + ar, err = b.Alerts(context.Background(), alerts.AlertsOpts{Severity: severity}) + tt.OK(err) + if ar.Total() != 32 { + t.Fatal("expected 32 alerts", ar.Total()) + } else if ar.Totals.Info != 3 { + t.Fatal("expected 3 info alerts", ar.Totals.Info) + } else if ar.Totals.Warning != 6 { + t.Fatal("expected 6 warning alerts", ar.Totals.Warning) + } else if ar.Totals.Error != 9 { + t.Fatal("expected 9 error alerts", ar.Totals.Error) + } else if ar.Totals.Critical != 14 { + t.Fatal("expected 14 critical alerts", ar.Totals.Critical) + } else if severity == alerts.SeverityInfo && len(ar.Alerts) != ar.Totals.Info { + t.Fatalf("expected %v info alerts, got %v", ar.Totals.Info, len(ar.Alerts)) + } else if severity == alerts.SeverityWarning && len(ar.Alerts) != ar.Totals.Warning { + t.Fatalf("expected %v warning alerts, got %v", ar.Totals.Warning, len(ar.Alerts)) + } else if severity == alerts.SeverityError && len(ar.Alerts) != ar.Totals.Error { + t.Fatalf("expected %v error alerts, got %v", ar.Totals.Error, len(ar.Alerts)) + } else if severity == alerts.SeverityCritical && len(ar.Alerts) != ar.Totals.Critical { + t.Fatalf("expected %v critical alerts, got %v", ar.Totals.Critical, len(ar.Alerts)) + } + } } func TestMultipartUploads(t *testing.T) { @@ -1946,7 +2021,7 @@ func TestMultipartUploads(t *testing.T) { } cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, uploadPacking: true, }) defer cluster.Shutdown() @@ -1957,7 +2032,7 @@ func TestMultipartUploads(t *testing.T) { // Start a new multipart upload. objPath := "/foo" - mpr, err := b.CreateMultipartUpload(context.Background(), api.DefaultBucketName, objPath, api.CreateMultipartOptions{Key: object.GenerateEncryptionKey()}) + mpr, err := b.CreateMultipartUpload(context.Background(), api.DefaultBucketName, objPath, api.CreateMultipartOptions{GenerateKey: true}) tt.OK(err) if mpr.UploadID == "" { t.Fatal("expected non-empty upload ID") @@ -1976,7 +2051,7 @@ func TestMultipartUploads(t *testing.T) { // correctly. putPart := func(partNum int, offset int, data []byte) string { t.Helper() - res, err := w.UploadMultipartUploadPart(context.Background(), bytes.NewReader(data), api.DefaultBucketName, objPath, mpr.UploadID, partNum, api.UploadMultipartUploadPartOptions{EncryptionOffset: offset}) + res, err := w.UploadMultipartUploadPart(context.Background(), bytes.NewReader(data), api.DefaultBucketName, objPath, mpr.UploadID, partNum, api.UploadMultipartUploadPartOptions{EncryptionOffset: &offset}) tt.OK(err) if res.ETag == "" { t.Fatal("expected non-empty ETag") @@ -2006,7 +2081,7 @@ func TestMultipartUploads(t *testing.T) { } // Check objects stats. - os, err := b.ObjectsStats() + os, err := b.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) tt.OK(err) if os.NumObjects != 0 { t.Fatalf("expected 0 object, got %v", os.NumObjects) @@ -2065,7 +2140,7 @@ func TestMultipartUploads(t *testing.T) { } // Check objects stats. - os, err = b.ObjectsStats() + os, err = b.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) tt.OK(err) if os.NumObjects != 1 { t.Fatalf("expected 1 object, got %v", os.NumObjects) @@ -2192,7 +2267,7 @@ func TestWalletFormUnconfirmed(t *testing.T) { } // Enable autopilot by setting it. - cluster.UpdateAutopilotConfig(context.Background(), testAutopilotConfig) + cluster.UpdateAutopilotConfig(context.Background(), test.AutopilotConfig) // Wait for a contract to form. contractsFormed := cluster.WaitForContracts() @@ -2226,8 +2301,8 @@ func TestBusRecordedMetrics(t *testing.T) { for _, m := range csMetrics { if m.Contracts != 1 { t.Fatalf("expected 1 contract, got %v", m.Contracts) - } else if m.Name != testContractSet { - t.Fatalf("expected contract set %v, got %v", testContractSet, m.Name) + } else if m.Name != test.ContractSet { + t.Fatalf("expected contract set %v, got %v", test.ContractSet, m.Name) } else if m.Timestamp.Std().Before(startTime) { t.Fatalf("expected time to be after start time %v, got %v", startTime, m.Timestamp.Std()) } @@ -2243,8 +2318,8 @@ func TestBusRecordedMetrics(t *testing.T) { t.Fatalf("expected added churn, got %v", m.Direction) } else if m.ContractID == (types.FileContractID{}) { t.Fatal("expected non-zero FCID") - } else if m.Name != testContractSet { - t.Fatalf("expected contract set %v, got %v", testContractSet, m.Name) + } else if m.Name != test.ContractSet { + t.Fatalf("expected contract set %v, got %v", test.ContractSet, m.Name) } else if m.Timestamp.Std().Before(startTime) { t.Fatalf("expected time to be after start time %v, got %v", startTime, m.Timestamp.Std()) } @@ -2303,19 +2378,20 @@ func TestMultipartUploadWrappedByPartialSlabs(t *testing.T) { } cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, uploadPacking: true, }) defer cluster.Shutdown() - defer cluster.Shutdown() + b := cluster.Bus w := cluster.Worker - slabSize := testRedundancySettings.SlabSizeNoRedundancy() + slabSize := test.RedundancySettings.SlabSizeNoRedundancy() tt := cluster.tt // start a new multipart upload. We upload the parts in reverse order objPath := "/foo" - mpr, err := b.CreateMultipartUpload(context.Background(), api.DefaultBucketName, objPath, api.CreateMultipartOptions{Key: object.GenerateEncryptionKey()}) + key := object.GenerateEncryptionKey() + mpr, err := b.CreateMultipartUpload(context.Background(), api.DefaultBucketName, objPath, api.CreateMultipartOptions{Key: &key}) tt.OK(err) if mpr.UploadID == "" { t.Fatal("expected non-empty upload ID") @@ -2323,22 +2399,25 @@ func TestMultipartUploadWrappedByPartialSlabs(t *testing.T) { // upload a part that is a partial slab part3Data := bytes.Repeat([]byte{3}, int(slabSize)/4) + offset := int(slabSize + slabSize/4) resp3, err := w.UploadMultipartUploadPart(context.Background(), bytes.NewReader(part3Data), api.DefaultBucketName, objPath, mpr.UploadID, 3, api.UploadMultipartUploadPartOptions{ - EncryptionOffset: int(slabSize + slabSize/4), + EncryptionOffset: &offset, }) tt.OK(err) // upload a part that is exactly a full slab part2Data := bytes.Repeat([]byte{2}, int(slabSize)) + offset = int(slabSize / 4) resp2, err := w.UploadMultipartUploadPart(context.Background(), bytes.NewReader(part2Data), api.DefaultBucketName, objPath, mpr.UploadID, 2, api.UploadMultipartUploadPartOptions{ - EncryptionOffset: int(slabSize / 4), + EncryptionOffset: &offset, }) tt.OK(err) // upload another part the same size as the first one part1Data := bytes.Repeat([]byte{1}, int(slabSize)/4) + offset = 0 resp1, err := w.UploadMultipartUploadPart(context.Background(), bytes.NewReader(part1Data), api.DefaultBucketName, objPath, mpr.UploadID, 1, api.UploadMultipartUploadPartOptions{ - EncryptionOffset: 0, + EncryptionOffset: &offset, }) tt.OK(err) @@ -2369,3 +2448,149 @@ func TestMultipartUploadWrappedByPartialSlabs(t *testing.T) { t.Fatal("unexpected data") } } + +func TestWalletRedistribute(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + + cluster := newTestCluster(t, testClusterOptions{ + hosts: test.RedundancySettings.TotalShards, + uploadPacking: true, + }) + defer cluster.Shutdown() + + // redistribute into 5 outputs + _, err := cluster.Bus.WalletRedistribute(context.Background(), 5, types.Siacoins(10)) + if err != nil { + t.Fatal(err) + } + cluster.MineBlocks(1) + + // assert we have 5 outputs with 10 SC + outputs, err := cluster.Bus.WalletOutputs(context.Background()) + if err != nil { + t.Fatal(err) + } + + var cnt int + for _, output := range outputs { + if output.Value.Cmp(types.Siacoins(10)) == 0 { + cnt++ + } + } + if cnt != 5 { + t.Fatalf("expected 5 outputs with 10 SC, got %v", cnt) + } + + // assert redistributing into 3 outputs succeeds, used to fail because we + // were broadcasting an empty transaction set + _, err = cluster.Bus.WalletRedistribute(context.Background(), 3, types.Siacoins(10)) + if err != nil { + t.Fatal(err) + } +} + +func TestHostScan(t *testing.T) { + // New cluster with autopilot disabled + cfg := clusterOptsDefault + cfg.skipRunningAutopilot = true + cluster := newTestCluster(t, cfg) + defer cluster.Shutdown() + + b := cluster.Bus + w := cluster.Worker + tt := cluster.tt + + // add 2 hosts to the cluster, 1 to scan and 1 to make sure we always have 1 + // peer and consider ourselves connected to the internet + hosts := cluster.AddHosts(2) + host := hosts[0] + + settings, err := host.RHPv2Settings() + tt.OK(err) + + hk := host.PublicKey() + hostIP := settings.NetAddress + + assertHost := func(ls time.Time, lss, slss bool, ts uint64) { + t.Helper() + + hi, err := b.Host(context.Background(), host.PublicKey()) + tt.OK(err) + + if ls.IsZero() && !hi.Interactions.LastScan.IsZero() { + t.Fatal("expected last scan to be zero") + } else if !ls.IsZero() && !hi.Interactions.LastScan.After(ls) { + t.Fatal("expected last scan to be after", ls) + } else if hi.Interactions.LastScanSuccess != lss { + t.Fatalf("expected last scan success to be %v, got %v", lss, hi.Interactions.LastScanSuccess) + } else if hi.Interactions.SecondToLastScanSuccess != slss { + t.Fatalf("expected second to last scan success to be %v, got %v", slss, hi.Interactions.SecondToLastScanSuccess) + } else if hi.Interactions.TotalScans != ts { + t.Fatalf("expected total scans to be %v, got %v", ts, hi.Interactions.TotalScans) + } + } + + scanHost := func() error { + // timing on the CI can be weird, wait a bit to make sure time passes + // between scans + time.Sleep(time.Millisecond) + + resp, err := w.RHPScan(context.Background(), hk, hostIP, 10*time.Second) + tt.OK(err) + if resp.ScanError != "" { + return errors.New(resp.ScanError) + } + return nil + } + + assertHost(time.Time{}, false, false, 0) + + // scan the host the first time + ls := time.Now() + if err := scanHost(); err != nil { + t.Fatal(err) + } + assertHost(ls, true, false, 1) + + // scan the host the second time + ls = time.Now() + if err := scanHost(); err != nil { + t.Fatal(err) + } + assertHost(ls, true, true, 2) + + // close the host to make scans fail + tt.OK(host.Close()) + + // scan the host a third time + ls = time.Now() + if err := scanHost(); err == nil { + t.Fatal("expected scan error") + } + assertHost(ls, false, true, 3) + + // fetch hosts for scanning with maxLastScan set to now which should return + // all hosts + tt.Retry(100, 100*time.Millisecond, func() error { + toScan, err := b.HostsForScanning(context.Background(), api.HostsForScanningOptions{ + MaxLastScan: api.TimeRFC3339(time.Now()), + }) + tt.OK(err) + if len(toScan) != 2 { + return fmt.Errorf("expected 2 hosts, got %v", len(toScan)) + } + return nil + }) + + // fetch hosts again with the unix epoch timestamp which should only return + // 1 host since that one hasn't been scanned yet + toScan, err := b.HostsForScanning(context.Background(), api.HostsForScanningOptions{ + MaxLastScan: api.TimeRFC3339(time.Unix(0, 1)), + }) + tt.OK(err) + if len(toScan) != 1 { + t.Fatalf("expected 1 hosts, got %v", len(toScan)) + } +} diff --git a/internal/testing/gouging_test.go b/internal/test/e2e/gouging_test.go similarity index 67% rename from internal/testing/gouging_test.go rename to internal/test/e2e/gouging_test.go index 7a812354f..68dc264eb 100644 --- a/internal/testing/gouging_test.go +++ b/internal/test/e2e/gouging_test.go @@ -1,4 +1,4 @@ -package testing +package e2e import ( "bytes" @@ -10,6 +10,7 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/test" "go.uber.org/zap/zapcore" "lukechampine.com/frand" ) @@ -21,12 +22,12 @@ func TestGouging(t *testing.T) { // create a new test cluster cluster := newTestCluster(t, testClusterOptions{ - hosts: int(testAutopilotConfig.Contracts.Amount), + hosts: int(test.AutopilotConfig.Contracts.Amount), logger: newTestLoggerCustom(zapcore.ErrorLevel), }) defer cluster.Shutdown() - cfg := testAutopilotConfig.Contracts + cfg := test.AutopilotConfig.Contracts b := cluster.Bus w := cluster.Worker tt := cluster.tt @@ -52,17 +53,21 @@ func TestGouging(t *testing.T) { t.Fatal("unexpected data") } + // update the gouging settings to limit the max storage price to 100H + gs := test.GougingSettings + gs.MaxStoragePrice = types.NewCurrency64(100) + if err := b.UpdateSetting(context.Background(), api.SettingGouging, gs); err != nil { + t.Fatal(err) + } // fetch current contract set contracts, err := b.Contracts(context.Background(), api.ContractsOpts{ContractSet: cfg.Set}) tt.OK(err) - // update the host settings so it's gouging + // update one host's settings so it's gouging hk := contracts[0].HostKey host := hostsMap[hk.String()] settings := host.settings.Settings() - settings.IngressPrice = types.Siacoins(1) - settings.EgressPrice = types.Siacoins(1) - settings.ContractPrice = types.Siacoins(11) + settings.StoragePrice = types.NewCurrency64(101) // gouging tt.OK(host.UpdateSettings(settings)) // make sure the price table expires so the worker is forced to fetch it @@ -75,7 +80,7 @@ func TestGouging(t *testing.T) { // update all host settings so they're gouging for _, h := range cluster.hosts { settings := h.settings.Settings() - settings.EgressPrice = types.Siacoins(1) + settings.StoragePrice = types.NewCurrency64(101) if err := h.UpdateSettings(settings); err != nil { t.Fatal(err) } @@ -90,4 +95,20 @@ func TestGouging(t *testing.T) { if err := w.DownloadObject(context.Background(), &buffer, api.DefaultBucketName, path, api.DownloadObjectOptions{}); err == nil { t.Fatal("expected download to fail", err) } + + // try optimising gouging settings + resp, err := cluster.Autopilot.EvaluateConfig(context.Background(), test.AutopilotConfig, gs, test.RedundancySettings) + tt.OK(err) + if resp.Recommendation == nil { + t.Fatal("expected recommendation") + } + + // set optimised settings + tt.OK(b.UpdateSetting(context.Background(), api.SettingGouging, resp.Recommendation.GougingSettings)) + + // upload some data - should work now once contract maintenance is done + tt.Retry(30, time.Second, func() error { + _, err := w.UploadObject(context.Background(), bytes.NewReader(data), api.DefaultBucketName, path, api.UploadObjectOptions{}) + return err + }) } diff --git a/internal/testing/host.go b/internal/test/e2e/host.go similarity index 99% rename from internal/testing/host.go rename to internal/test/e2e/host.go index e7943a7d3..6100adad5 100644 --- a/internal/testing/host.go +++ b/internal/test/e2e/host.go @@ -1,4 +1,4 @@ -package testing +package e2e import ( "context" diff --git a/internal/testing/interactions_test.go b/internal/test/e2e/interactions_test.go similarity index 99% rename from internal/testing/interactions_test.go rename to internal/test/e2e/interactions_test.go index 686003e02..021d75cb6 100644 --- a/internal/testing/interactions_test.go +++ b/internal/test/e2e/interactions_test.go @@ -1,4 +1,4 @@ -package testing +package e2e import ( "context" diff --git a/internal/test/e2e/metadata_test.go b/internal/test/e2e/metadata_test.go new file mode 100644 index 000000000..d11f6ba4e --- /dev/null +++ b/internal/test/e2e/metadata_test.go @@ -0,0 +1,88 @@ +package e2e + +import ( + "bytes" + "context" + "reflect" + "testing" + + "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/test" + "go.uber.org/zap" +) + +func TestObjectMetadata(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + + // create cluster + cluster := newTestCluster(t, testClusterOptions{ + hosts: test.RedundancySettings.TotalShards, + logger: zap.NewNop(), + }) + defer cluster.Shutdown() + + // convenience variables + w := cluster.Worker + b := cluster.Bus + + // create options to pass metadata + opts := api.UploadObjectOptions{ + Metadata: api.ObjectUserMetadata{"Foo": "bar", "Baz": "quux"}, + } + + // upload the object + data := []byte(t.Name()) + _, err := w.UploadObject(context.Background(), bytes.NewReader(data), api.DefaultBucketName, t.Name(), opts) + if err != nil { + t.Fatal(err) + } + + // get the object from the bus and assert it has the metadata + or, err := b.Object(context.Background(), api.DefaultBucketName, t.Name(), api.GetObjectOptions{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(or.Object.Metadata, opts.Metadata) { + t.Fatal("metadata mismatch", or.Object.Metadata) + } + + // get the object from the worker and assert it has the metadata + gor, err := w.GetObject(context.Background(), api.DefaultBucketName, t.Name(), api.DownloadObjectOptions{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(gor.Metadata, opts.Metadata) { + t.Fatal("metadata mismatch", gor.Metadata) + } + + // perform a HEAD request and assert the headers are all present + hor, err := w.HeadObject(context.Background(), api.DefaultBucketName, t.Name(), api.HeadObjectOptions{Range: api.DownloadRange{Offset: 1, Length: 1}}) + if err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(hor, &api.HeadObjectResponse{ + ContentType: or.Object.ContentType(), + LastModified: or.Object.LastModified(), + Range: &api.DownloadRange{Offset: 1, Length: 1, Size: int64(len(data))}, + Size: int64(len(data)), + Metadata: gor.Metadata, + }) { + t.Fatalf("unexpected response: %+v", hor) + } + + // re-upload the object + _, err = w.UploadObject(context.Background(), bytes.NewReader([]byte(t.Name())), api.DefaultBucketName, t.Name(), api.UploadObjectOptions{}) + if err != nil { + t.Fatal(err) + } + + // assert metadata was removed + gor, err = w.GetObject(context.Background(), api.DefaultBucketName, t.Name(), api.DownloadObjectOptions{}) + if err != nil { + t.Fatal(err) + } + if len(gor.Metadata) > 0 { + t.Fatal("unexpected metadata", gor.Metadata) + } +} diff --git a/internal/testing/metrics_test.go b/internal/test/e2e/metrics_test.go similarity index 94% rename from internal/testing/metrics_test.go rename to internal/test/e2e/metrics_test.go index 7dd0195f5..aaa139102 100644 --- a/internal/testing/metrics_test.go +++ b/internal/test/e2e/metrics_test.go @@ -1,4 +1,4 @@ -package testing +package e2e import ( "bytes" @@ -10,6 +10,7 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/test" "lukechampine.com/frand" ) @@ -22,12 +23,12 @@ func TestMetrics(t *testing.T) { start := time.Now() // enable pruning - apCfg := testAutopilotConfig + apCfg := test.AutopilotConfig apCfg.Contracts.Prune = true // create a test cluster cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, autopilotSettings: &apCfg, }) defer cluster.Shutdown() diff --git a/internal/testing/migrations_test.go b/internal/test/e2e/migrations_test.go similarity index 88% rename from internal/testing/migrations_test.go rename to internal/test/e2e/migrations_test.go index 2afbcebb6..91bcc20b7 100644 --- a/internal/testing/migrations_test.go +++ b/internal/test/e2e/migrations_test.go @@ -1,4 +1,4 @@ -package testing +package e2e import ( "bytes" @@ -10,6 +10,7 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/test" "lukechampine.com/frand" ) @@ -19,13 +20,13 @@ func TestMigrations(t *testing.T) { } // create a new test cluster - cfg := testAutopilotConfig - cfg.Contracts.Amount = uint64(testRedundancySettings.TotalShards) + 1 + cfg := test.AutopilotConfig + cfg.Contracts.Amount = uint64(test.RedundancySettings.TotalShards) + 1 cluster := newTestCluster(t, testClusterOptions{ // configure the cluster to use 1 more host than the total shards in the // redundancy settings. autopilotSettings: &cfg, - hosts: int(testRedundancySettings.TotalShards) + 1, + hosts: int(test.RedundancySettings.TotalShards) + 1, }) defer cluster.Shutdown() @@ -60,8 +61,8 @@ func TestMigrations(t *testing.T) { // assert amount of hosts used used := usedHosts(path) - if len(used) != testRedundancySettings.TotalShards { - t.Fatal("unexpected amount of hosts used", len(used), testRedundancySettings.TotalShards) + if len(used) != test.RedundancySettings.TotalShards { + t.Fatal("unexpected amount of hosts used", len(used), test.RedundancySettings.TotalShards) } // select one host to remove diff --git a/internal/testing/pruning_test.go b/internal/test/e2e/pruning_test.go similarity index 98% rename from internal/testing/pruning_test.go rename to internal/test/e2e/pruning_test.go index 80e6ab29d..de948c970 100644 --- a/internal/testing/pruning_test.go +++ b/internal/test/e2e/pruning_test.go @@ -1,4 +1,4 @@ -package testing +package e2e import ( "bytes" @@ -9,10 +9,10 @@ import ( "testing" "time" - rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/renterd/api" "go.sia.tech/renterd/hostdb" + "go.sia.tech/renterd/internal/test" ) func TestHostPruning(t *testing.T) { @@ -136,8 +136,8 @@ func TestSectorPruning(t *testing.T) { } // convenience variables - cfg := testAutopilotConfig - rs := testRedundancySettings + cfg := test.AutopilotConfig + rs := test.RedundancySettings w := cluster.Worker b := cluster.Bus tt := cluster.tt @@ -207,7 +207,7 @@ func TestSectorPruning(t *testing.T) { tt.Retry(100, 100*time.Millisecond, func() error { res, err = b.PrunableData(context.Background()) tt.OK(err) - if res.TotalPrunable != uint64(math.Ceil(float64(numObjects)/2))*uint64(rs.TotalShards)*rhpv2.SectorSize { + if res.TotalPrunable != uint64(math.Ceil(float64(numObjects)/2))*rs.SlabSize() { return fmt.Errorf("unexpected prunable data %v", n) } return nil diff --git a/internal/testing/s3_test.go b/internal/test/e2e/s3_test.go similarity index 97% rename from internal/testing/s3_test.go rename to internal/test/e2e/s3_test.go index ced1fbcc0..b25e11871 100644 --- a/internal/testing/s3_test.go +++ b/internal/test/e2e/s3_test.go @@ -1,4 +1,4 @@ -package testing +package e2e import ( "bytes" @@ -15,6 +15,7 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/gofakes3" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/test" "go.uber.org/zap" "lukechampine.com/frand" ) @@ -30,7 +31,7 @@ func TestS3Basic(t *testing.T) { start := time.Now() cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, }) defer cluster.Shutdown() @@ -176,7 +177,7 @@ func TestS3ObjectMetadata(t *testing.T) { // create cluster opts := testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, logger: zap.NewNop(), } cluster := newTestCluster(t, opts) @@ -288,7 +289,7 @@ func TestS3Authentication(t *testing.T) { // Create client with credentials and try again.. s3Authenticated, err := minio.NewCore(url, &minio.Options{ - Creds: testS3Credentials, + Creds: test.S3Credentials, }) tt.OK(err) @@ -328,7 +329,7 @@ func TestS3Authentication(t *testing.T) { func TestS3List(t *testing.T) { cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, uploadPacking: true, }) defer cluster.Shutdown() @@ -463,7 +464,7 @@ func TestS3MultipartUploads(t *testing.T) { } cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, uploadPacking: true, }) defer cluster.Shutdown() @@ -594,7 +595,7 @@ func TestS3MultipartPruneSlabs(t *testing.T) { } cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, uploadPacking: true, }) defer cluster.Shutdown() @@ -623,7 +624,7 @@ func TestS3MultipartPruneSlabs(t *testing.T) { // Upload 1 regular object. It will share the same packed slab, cause the // packed slab to be complete and start a new one. - data = frand.Bytes(testRedundancySettings.MinShards*rhpv2.SectorSize - 1) + data = frand.Bytes(test.RedundancySettings.MinShards*rhpv2.SectorSize - 1) tt.OKAll(s3.PutObject(context.Background(), bucket, "bar", bytes.NewReader(data), int64(len(data)), minio.PutObjectOptions{})) // Block until the buffer is uploaded. @@ -648,7 +649,7 @@ func TestS3SpecialChars(t *testing.T) { } cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, uploadPacking: true, }) defer cluster.Shutdown() diff --git a/internal/testing/uploads_test.go b/internal/test/e2e/uploads_test.go similarity index 96% rename from internal/testing/uploads_test.go rename to internal/test/e2e/uploads_test.go index e3e938120..3f83fd7e4 100644 --- a/internal/testing/uploads_test.go +++ b/internal/test/e2e/uploads_test.go @@ -1,4 +1,4 @@ -package testing +package e2e import ( "bytes" @@ -10,6 +10,7 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/test" "lukechampine.com/frand" ) @@ -49,12 +50,12 @@ func TestUploadingSectorsCache(t *testing.T) { } cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, }) defer cluster.Shutdown() w := cluster.Worker b := cluster.Bus - rs := testRedundancySettings + rs := test.RedundancySettings tt := cluster.tt // generate some random data diff --git a/internal/test/tt.go b/internal/test/tt.go new file mode 100644 index 000000000..d44152eda --- /dev/null +++ b/internal/test/tt.go @@ -0,0 +1,106 @@ +package test + +import ( + "strings" + "time" +) + +type ( + TT interface { + TestingCommon + + AssertContains(err error, target string) + AssertIs(err, target error) + FailAll(vs ...interface{}) + OK(err error) + OKAll(vs ...interface{}) + + // Retry will call 'fn' 'tries' times, waiting 'durationBetweenAttempts' + // between each attempt, returning 'nil' the first time that 'fn' + // returns nil. If 'nil' is never returned, then the final error + // returned by 'fn' is returned. + Retry(tries int, durationBetweenAttempts time.Duration, fn func() error) + } + + // TestingCommon is an interface that describes the common methods of + // testing.T and testing.B ensuring this testutil can be used in both + // contexts. + TestingCommon interface { + Log(args ...any) + Logf(format string, args ...any) + Error(args ...any) + Errorf(format string, args ...any) + Fatal(args ...any) + Fatalf(format string, args ...any) + Skip(args ...any) + Skipf(format string, args ...any) + SkipNow() + Skipped() bool + Helper() + Cleanup(f func()) + TempDir() string + Setenv(key, value string) + } + + impl struct { + TestingCommon + } +) + +func NewTT(tc TestingCommon) TT { + return &impl{TestingCommon: tc} +} + +func (t impl) AssertContains(err error, target string) { + t.Helper() + if err == nil || !strings.Contains(err.Error(), target) { + t.Fatalf("err: %v != target: %v", err, target) + } +} + +func (t impl) AssertIs(err, target error) { + t.Helper() + t.AssertContains(err, target.Error()) +} + +func (t impl) FailAll(vs ...interface{}) { + t.Helper() + for _, v := range vs { + if err, ok := v.(error); ok && err == nil { + t.Fatal("should've failed") + } + } +} + +func (t impl) OK(err error) { + t.Helper() + if err != nil { + t.Fatal(err) + } +} + +func (t impl) OKAll(vs ...interface{}) { + t.Helper() + for _, v := range vs { + if err, ok := v.(error); ok && err != nil { + t.Fatal(err) + } + } +} + +func (t impl) Retry(tries int, durationBetweenAttempts time.Duration, fn func() error) { + t.Helper() + t.OK(Retry(tries, durationBetweenAttempts, fn)) +} + +func Retry(tries int, durationBetweenAttempts time.Duration, fn func() error) error { + var err error + for i := 0; i < tries; i++ { + err = fn() + if err == nil { + break + } + time.Sleep(durationBetweenAttempts) + } + return err +} diff --git a/internal/testing/metadata_test.go b/internal/testing/metadata_test.go deleted file mode 100644 index c88c8650d..000000000 --- a/internal/testing/metadata_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package testing - -import ( - "bytes" - "context" - "reflect" - "testing" - - "go.sia.tech/renterd/api" - "go.uber.org/zap" -) - -func TestObjectMetadata(t *testing.T) { - if testing.Short() { - t.SkipNow() - } - - // create cluster - cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, - logger: zap.NewNop(), - }) - defer cluster.Shutdown() - - // convenience variables - w := cluster.Worker - b := cluster.Bus - - // create options to pass metadata - opts := api.UploadObjectOptions{ - Metadata: api.ObjectUserMetadata{"Foo": "bar", "Baz": "quux"}, - } - - // upload the object - _, err := w.UploadObject(context.Background(), bytes.NewReader([]byte(t.Name())), api.DefaultBucketName, t.Name(), opts) - if err != nil { - t.Fatal(err) - } - - // get the object from the bus and assert it has the metadata - ress, err := b.Object(context.Background(), api.DefaultBucketName, t.Name(), api.GetObjectOptions{}) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(ress.Object.Metadata, opts.Metadata) { - t.Fatal("metadata mismatch", ress.Object.Metadata) - } - - // get the object from the worker and assert it has the metadata - res, err := w.GetObject(context.Background(), api.DefaultBucketName, t.Name(), api.DownloadObjectOptions{}) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(res.Metadata, opts.Metadata) { - t.Fatal("metadata mismatch", res.Metadata) - } - - // re-upload the object - _, err = w.UploadObject(context.Background(), bytes.NewReader([]byte(t.Name())), api.DefaultBucketName, t.Name(), api.UploadObjectOptions{}) - if err != nil { - t.Fatal(err) - } - - // assert metadata was removed - res, err = w.GetObject(context.Background(), api.DefaultBucketName, t.Name(), api.DownloadObjectOptions{}) - if err != nil { - t.Fatal(err) - } - if len(res.Metadata) > 0 { - t.Fatal("unexpected metadata", res.Metadata) - } -} diff --git a/object/object.go b/object/object.go index 49375f3b4..965ebce2a 100644 --- a/object/object.go +++ b/object/object.go @@ -3,6 +3,7 @@ package object import ( "bytes" "crypto/cipher" + "crypto/md5" "encoding/binary" "encoding/hex" "fmt" @@ -43,6 +44,9 @@ func (k *EncryptionKey) UnmarshalBinary(b []byte) error { // String implements fmt.Stringer. func (k EncryptionKey) String() string { + if k.entropy == nil { + return "" + } return "key:" + hex.EncodeToString(k.entropy[:]) } @@ -110,9 +114,12 @@ func GenerateEncryptionKey() EncryptionKey { } // An Object is a unit of data that has been stored on a host. +// NOTE: Object is embedded in the API's Object type, so all fields should be +// tagged omitempty to make sure responses where no object is returned remain +// clean. type Object struct { - Key EncryptionKey `json:"key"` - Slabs []SlabSlice `json:"slabs"` + Key EncryptionKey `json:"key,omitempty"` + Slabs []SlabSlice `json:"slabs,omitempty"` } // NewObject returns a new Object with a random key. @@ -139,6 +146,22 @@ func (o Object) Contracts() map[types.PublicKey]map[types.FileContractID]struct{ return usedContracts } +func (o *Object) ComputeETag() string { + // calculate the eTag using the precomputed sector roots to avoid having to + // hash the entire object again. + h := md5.New() + b := make([]byte, 8) + for _, slab := range o.Slabs { + binary.LittleEndian.PutUint32(b[:4], slab.Offset) + binary.LittleEndian.PutUint32(b[4:], slab.Length) + h.Write(b) + for _, shard := range slab.Shards { + h.Write(shard.Root[:]) + } + } + return string(hex.EncodeToString(h.Sum(nil))) +} + // TotalSize returns the total size of the object. func (o Object) TotalSize() int64 { var n int64 diff --git a/object/slab.go b/object/slab.go index 9c3afa608..f2762abf3 100644 --- a/object/slab.go +++ b/object/slab.go @@ -3,6 +3,7 @@ package object import ( "bytes" "io" + "sync" "github.com/klauspost/reedsolomon" rhpv2 "go.sia.tech/core/rhp/v2" @@ -79,11 +80,17 @@ func (s Slab) Length() int { // Encrypt xors shards with the keystream derived from s.Key, using a // different nonce for each shard. func (s Slab) Encrypt(shards [][]byte) { - for i, shard := range shards { - nonce := [24]byte{1: byte(i)} - c, _ := chacha20.NewUnauthenticatedCipher(s.Key.entropy[:], nonce[:]) - c.XORKeyStream(shard, shard) + var wg sync.WaitGroup + for i := range shards { + wg.Add(1) + go func(i int) { + nonce := [24]byte{1: byte(i)} + c, _ := chacha20.NewUnauthenticatedCipher(s.Key.entropy[:], nonce[:]) + c.XORKeyStream(shards[i], shards[i]) + wg.Done() + }(i) } + wg.Wait() } // Encode encodes slab data into sector-sized shards. The supplied shards should @@ -151,12 +158,18 @@ func (ss SlabSlice) SectorRegion() (offset, length uint32) { // slice offset), using a different nonce for each shard. func (ss SlabSlice) Decrypt(shards [][]byte) { offset := ss.Offset / (rhpv2.LeafSize * uint32(ss.MinShards)) - for i, shard := range shards { - nonce := [24]byte{1: byte(i)} - c, _ := chacha20.NewUnauthenticatedCipher(ss.Key.entropy[:], nonce[:]) - c.SetCounter(offset) - c.XORKeyStream(shard, shard) + var wg sync.WaitGroup + for i := range shards { + wg.Add(1) + go func(i int) { + nonce := [24]byte{1: byte(i)} + c, _ := chacha20.NewUnauthenticatedCipher(ss.Key.entropy[:], nonce[:]) + c.SetCounter(offset) + c.XORKeyStream(shards[i], shards[i]) + wg.Done() + }(i) } + wg.Wait() } // Recover recovers a slice of slab data from the supplied shards. diff --git a/s3/backend.go b/s3/backend.go index a481da727..c05a3ec98 100644 --- a/s3/backend.go +++ b/s3/backend.go @@ -287,7 +287,10 @@ func (s *s3) GetObject(ctx context.Context, bucketName, objectName string, range // HeadObject should return a NotFound() error if the object does not // exist. func (s *s3) HeadObject(ctx context.Context, bucketName, objectName string) (*gofakes3.Object, error) { - res, err := s.b.Object(ctx, bucketName, objectName, api.GetObjectOptions{IgnoreDelim: true}) + res, err := s.b.Object(ctx, bucketName, objectName, api.GetObjectOptions{ + IgnoreDelim: true, + OnlyMetadata: true, + }) if err != nil && strings.Contains(err.Error(), api.ErrObjectNotFound.Error()) { return nil, gofakes3.KeyNotFound(objectName) } else if err != nil { @@ -405,7 +408,7 @@ func (s *s3) CopyObject(ctx context.Context, srcBucket, srcKey, dstBucket, dstKe func (s *s3) CreateMultipartUpload(ctx context.Context, bucket, key string, meta map[string]string) (gofakes3.UploadID, error) { convertToSiaMetadataHeaders(meta) resp, err := s.b.CreateMultipartUpload(ctx, bucket, "/"+key, api.CreateMultipartOptions{ - Key: object.NoOpKey, + Key: &object.NoOpKey, MimeType: meta["Content-Type"], Metadata: api.ExtractObjectUserMetadataFrom(meta), }) @@ -418,8 +421,7 @@ func (s *s3) CreateMultipartUpload(ctx context.Context, bucket, key string, meta func (s *s3) UploadPart(ctx context.Context, bucket, object string, id gofakes3.UploadID, partNumber int, contentLength int64, input io.Reader) (*gofakes3.UploadPartResult, error) { res, err := s.w.UploadMultipartUploadPart(ctx, input, bucket, object, string(id), partNumber, api.UploadMultipartUploadPartOptions{ - DisablePreshardingEncryption: true, - ContentLength: contentLength, + ContentLength: contentLength, }) if err != nil { return nil, gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index a61f9eea3..35872ea2d 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -63,15 +63,8 @@ func TestSQLHostDB(t *testing.T) { // Insert an announcement for the host and another one for an unknown // host. - a := hostdb.Announcement{ - Index: types.ChainIndex{ - Height: 42, - ID: types.BlockID{1, 2, 3}, - }, - Timestamp: time.Now().UTC().Round(time.Second), - NetAddress: "address", - } - err = ss.insertTestAnnouncement(hk, a) + ann := newTestHostDBAnnouncement("address") + err = ss.insertTestAnnouncement(hk, ann) if err != nil { t.Fatal(err) } @@ -79,7 +72,7 @@ func TestSQLHostDB(t *testing.T) { // Read the host and verify that the announcement related fields were // set. var h dbHost - tx := ss.db.Where("last_announcement = ? AND net_address = ?", a.Timestamp, a.NetAddress).Find(&h) + tx := ss.db.Where("last_announcement = ? AND net_address = ?", ann.Timestamp, ann.NetAddress).Find(&h) if tx.Error != nil { t.Fatal(tx.Error) } @@ -116,7 +109,7 @@ func TestSQLHostDB(t *testing.T) { // Insert another announcement for an unknown host. unknownKey := types.PublicKey{1, 4, 7} - err = ss.insertTestAnnouncement(unknownKey, a) + err = ss.insertTestAnnouncement(unknownKey, ann) if err != nil { t.Fatal(err) } @@ -124,7 +117,7 @@ func TestSQLHostDB(t *testing.T) { if err != nil { t.Fatal(err) } - if h3.NetAddress != a.NetAddress { + if h3.NetAddress != ann.NetAddress { t.Fatal("wrong net address") } if h3.KnownSince.IsZero() { @@ -510,22 +503,18 @@ func TestInsertAnnouncements(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() - // Create announcements for 2 hosts. + // Create announcements for 3 hosts. ann1 := announcement{ - hostKey: publicKey(types.GeneratePrivateKey().PublicKey()), - announcement: hostdb.Announcement{ - Index: types.ChainIndex{Height: 1, ID: types.BlockID{1}}, - Timestamp: time.Now(), - NetAddress: "foo.bar:1000", - }, + hostKey: publicKey(types.GeneratePrivateKey().PublicKey()), + announcement: newTestHostDBAnnouncement("foo.bar:1000"), } ann2 := announcement{ hostKey: publicKey(types.GeneratePrivateKey().PublicKey()), - announcement: hostdb.Announcement{}, + announcement: newTestHostDBAnnouncement("bar.baz:1000"), } ann3 := announcement{ hostKey: publicKey(types.GeneratePrivateKey().PublicKey()), - announcement: hostdb.Announcement{}, + announcement: newTestHostDBAnnouncement("quz.qux:1000"), } // Insert the first one and check that all fields are set. @@ -1101,7 +1090,7 @@ func (s *SQLStore) addCustomTestHost(hk types.PublicKey, na string) error { s.unappliedHostKeys[hk] = struct{}{} s.unappliedAnnouncements = append(s.unappliedAnnouncements, []announcement{{ hostKey: publicKey(hk), - announcement: hostdb.Announcement{NetAddress: na}, + announcement: newTestHostDBAnnouncement(na), }}...) s.lastSave = time.Now().Add(s.persistInterval * -2) return s.applyUpdates(false) @@ -1153,6 +1142,14 @@ func newTestHostAnnouncement(na modules.NetAddress) (modules.HostAnnouncement, t }, sk } +func newTestHostDBAnnouncement(addr string) hostdb.Announcement { + return hostdb.Announcement{ + Index: types.ChainIndex{Height: 1, ID: types.BlockID{1}}, + Timestamp: time.Now().UTC().Round(time.Second), + NetAddress: addr, + } +} + func newTestTransaction(ha modules.HostAnnouncement, sk types.PrivateKey) stypes.Transaction { var buf bytes.Buffer buf.Write(encoding.Marshal(ha)) diff --git a/stores/metadata.go b/stores/metadata.go index f20f7dbb0..529d7ec89 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -410,14 +410,14 @@ func (s dbSlab) convert() (slab object.Slab, err error) { } func (raw rawObjectMetadata) convert() api.ObjectMetadata { - return api.ObjectMetadata{ - ETag: raw.ETag, - Health: raw.Health, - MimeType: raw.MimeType, - ModTime: api.TimeRFC3339(time.Time(raw.ModTime).UTC()), - Name: raw.Name, - Size: raw.Size, - } + return newObjectMetadata( + raw.Name, + raw.ETag, + raw.MimeType, + raw.Health, + time.Time(raw.ModTime), + raw.Size, + ) } func (raw rawObject) toSlabSlice() (slice object.SlabSlice, _ error) { @@ -582,70 +582,85 @@ func (s *SQLStore) ListBuckets(ctx context.Context) ([]api.Bucket, error) { // ObjectsStats returns some info related to the objects stored in the store. To // reduce locking and make sure all results are consistent, everything is done // within a single transaction. -func (s *SQLStore) ObjectsStats(ctx context.Context) (api.ObjectsStatsResponse, error) { +func (s *SQLStore) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) (api.ObjectsStatsResponse, error) { + // fetch bucket id if a bucket was specified + var bucketID uint + if opts.Bucket != "" { + err := s.db.Model(&dbBucket{}).Select("id").Where("name = ?", opts.Bucket).Take(&bucketID).Error + if err != nil { + return api.ObjectsStatsResponse{}, err + } + } + // number of objects var objInfo struct { NumObjects uint64 MinHealth float64 TotalObjectsSize uint64 } - err := s.db. + objInfoQuery := s.db. Model(&dbObject{}). - Select("COUNT(*) AS NumObjects, COALESCE(MIN(health), 1) as MinHealth, SUM(size) AS TotalObjectsSize"). - Scan(&objInfo). - Error + Select("COUNT(*) AS NumObjects, COALESCE(MIN(health), 1) as MinHealth, SUM(size) AS TotalObjectsSize") + if opts.Bucket != "" { + objInfoQuery = objInfoQuery.Where("db_bucket_id", bucketID) + } + err := objInfoQuery.Scan(&objInfo).Error if err != nil { return api.ObjectsStatsResponse{}, err } // number of unfinished objects var unfinishedObjects uint64 - err = s.db. + unfinishedObjectsQuery := s.db. Model(&dbMultipartUpload{}). - Select("COUNT(*)"). - Scan(&unfinishedObjects). - Error + Select("COUNT(*)") + if opts.Bucket != "" { + unfinishedObjectsQuery = unfinishedObjectsQuery.Where("db_bucket_id", bucketID) + } + err = unfinishedObjectsQuery.Scan(&unfinishedObjects).Error if err != nil { return api.ObjectsStatsResponse{}, err } // size of unfinished objects var totalUnfinishedObjectsSize uint64 - err = s.db. + totalUnfinishedObjectsSizeQuery := s.db. Model(&dbMultipartPart{}). - Select("COALESCE(SUM(size), 0)"). - Scan(&totalUnfinishedObjectsSize). - Error + Joins("INNER JOIN multipart_uploads mu ON multipart_parts.db_multipart_upload_id = mu.id"). + Select("COALESCE(SUM(size), 0)") + if opts.Bucket != "" { + totalUnfinishedObjectsSizeQuery = totalUnfinishedObjectsSizeQuery.Where("db_bucket_id", bucketID) + } + err = totalUnfinishedObjectsSizeQuery.Scan(&totalUnfinishedObjectsSize).Error if err != nil { return api.ObjectsStatsResponse{}, err } - var totalSectors uint64 + var totalSectors int64 + totalSectorsQuery := s.db. + Table("slabs sla"). + Select("COALESCE(SUM(total_shards), 0)"). + Where("db_buffered_slab_id IS NULL") - batchSize := 500000 - marker := uint64(0) - for offset := 0; ; offset += batchSize { - var result struct { - Sectors uint64 - Marker uint64 - } - res := s.db. - Model(&dbSector{}). - Raw("SELECT COUNT(*) as Sectors, MAX(sectors.db_sector_id) as Marker FROM (SELECT cs.db_sector_id FROM contract_sectors cs WHERE cs.db_sector_id > ? GROUP BY cs.db_sector_id LIMIT ?) sectors", marker, batchSize). - Scan(&result) - if err := res.Error; err != nil { - return api.ObjectsStatsResponse{}, err - } else if result.Sectors == 0 { - break // done - } - totalSectors += result.Sectors - marker = result.Marker + if opts.Bucket != "" { + totalSectorsQuery = totalSectorsQuery.Where(` + EXISTS ( + SELECT 1 FROM slices sli + INNER JOIN objects o ON o.id = sli.db_object_id AND o.db_bucket_id = ? + WHERE sli.db_slab_id = sla.id + ) + `, bucketID) + } + err = totalSectorsQuery.Scan(&totalSectors).Error + if err != nil { + return api.ObjectsStatsResponse{}, err } var totalUploaded int64 err = s.db. - Model(&dbContractSector{}). - Count(&totalUploaded). + Model(&dbContract{}). + Select("COALESCE(SUM(size), 0)"). + Scan(&totalUploaded). Error if err != nil { return api.ObjectsStatsResponse{}, err @@ -657,8 +672,8 @@ func (s *SQLStore) ObjectsStats(ctx context.Context) (api.ObjectsStatsResponse, NumUnfinishedObjects: unfinishedObjects, TotalUnfinishedObjectsSize: totalUnfinishedObjectsSize, TotalObjectsSize: objInfo.TotalObjectsSize, - TotalSectorsSize: totalSectors * rhpv2.SectorSize, - TotalUploadedSize: uint64(totalUploaded) * rhpv2.SectorSize, + TotalSectorsSize: uint64(totalSectors) * rhpv2.SectorSize, + TotalUploadedSize: uint64(totalUploaded), }, nil } @@ -1484,6 +1499,10 @@ func (s *SQLStore) RenameObjects(ctx context.Context, bucket, prefixOld, prefixN gorm.Expr(sqlConcat(tx, "?", "SUBSTR(object_id, ?)")), prefixNew, utf8.RuneCountInString(prefixOld)+1, prefixOld+"%", utf8.RuneCountInString(prefixOld), prefixOld, sqlWhereBucket("objects", bucket)) + + if !isSQLite(tx) { + inner = tx.Raw("SELECT * FROM (?) as i", inner) + } resp := tx.Model(&dbObject{}). Where("object_id IN (?)", inner). Delete(&dbObject{}) @@ -1533,13 +1552,14 @@ func (s *SQLStore) CopyObject(ctx context.Context, srcBucket, dstBucket, srcPath // No copying is happening. We just update the metadata on the src // object. srcObj.MimeType = mimeType - om = api.ObjectMetadata{ - Health: srcObj.Health, - MimeType: srcObj.MimeType, - ModTime: api.TimeRFC3339(srcObj.CreatedAt.UTC()), - Name: srcObj.ObjectID, - Size: srcObj.Size, - } + om = newObjectMetadata( + srcObj.ObjectID, + srcObj.Etag, + srcObj.MimeType, + srcObj.Health, + srcObj.CreatedAt, + srcObj.Size, + ) if err := s.updateUserMetadata(tx, srcObj.ID, metadata); err != nil { return fmt.Errorf("failed to update user metadata: %w", err) } @@ -1587,21 +1607,22 @@ func (s *SQLStore) CopyObject(ctx context.Context, srcBucket, dstBucket, srcPath return fmt.Errorf("failed to create object metadata: %w", err) } - om = api.ObjectMetadata{ - MimeType: dstObj.MimeType, - ETag: dstObj.Etag, - Health: srcObj.Health, - ModTime: api.TimeRFC3339(dstObj.CreatedAt.UTC()), - Name: dstObj.ObjectID, - Size: dstObj.Size, - } + om = newObjectMetadata( + dstObj.ObjectID, + dstObj.Etag, + dstObj.MimeType, + dstObj.Health, + dstObj.CreatedAt, + dstObj.Size, + ) return nil }) return } -func (s *SQLStore) DeleteHostSector(ctx context.Context, hk types.PublicKey, root types.Hash256) error { - return s.retryTransaction(func(tx *gorm.DB) error { +func (s *SQLStore) DeleteHostSector(ctx context.Context, hk types.PublicKey, root types.Hash256) (int, error) { + var deletedSectors int + err := s.retryTransaction(func(tx *gorm.DB) error { // Fetch contract_sectors to delete. var sectors []dbContractSector err := tx.Raw(` @@ -1640,6 +1661,7 @@ func (s *SQLStore) DeleteHostSector(ctx context.Context, hk types.PublicKey, roo } else if res.RowsAffected != int64(len(sectors)) { return fmt.Errorf("expected %v affected rows but got %v", len(sectors), res.RowsAffected) } + deletedSectors = len(sectors) // Increment the host's lostSectors by the number of lost sectors. if err := tx.Exec("UPDATE hosts SET lost_sectors = lost_sectors + ? WHERE public_key = ?", len(sectors), publicKey(hk)).Error; err != nil { @@ -1667,6 +1689,7 @@ func (s *SQLStore) DeleteHostSector(ctx context.Context, hk types.PublicKey, roo } return nil }) + return deletedSectors, err } func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, eTag, mimeType string, metadata api.ObjectUserMetadata, o object.Object) error { @@ -1822,13 +1845,19 @@ func (ss *SQLStore) UpdateSlab(ctx context.Context, s object.Slab, contractSet s // Update slab. return ss.retryTransaction(func(tx *gorm.DB) (err error) { - // fetch contract set - var cs dbContractSet - if err := tx.Take(&cs, "name = ?", contractSet).Error; err != nil { + // update slab + if err := tx.Model(&dbSlab{}). + Where("key", key). + Updates(map[string]interface{}{ + "db_contract_set_id": gorm.Expr("(SELECT id FROM contract_sets WHERE name = ?)", contractSet), + "health_valid_until": time.Now().Unix(), + "health": 1, + }). + Error; err != nil { return err } - // find all contracts of that shard + // find all used contracts contracts, err := fetchUsedContracts(tx, usedContracts) if err != nil { return err @@ -1862,18 +1891,6 @@ func (ss *SQLStore) UpdateSlab(ctx context.Context, s object.Slab, contractSet s } } - // update fields - if err := tx.Model(&slab). - Where(&slab). - Updates(map[string]interface{}{ - "db_contract_set_id": cs.ID, - "health_valid_until": time.Now().Unix(), - "health": 1, - }). - Error; err != nil { - return err - } - // prepare sectors to update sectors := make([]dbSector, len(s.Shards)) for i := range s.Shards { @@ -1938,8 +1955,8 @@ func (s *SQLStore) RefreshHealth(ctx context.Context) error { // Update slab health in batches. now := time.Now() - for { - healthQuery := s.db.Raw(` + // build health query + healthQuery := s.db.Raw(` SELECT slabs.id, slabs.db_contract_set_id, CASE WHEN (slabs.min_shards = slabs.total_shards) THEN CASE WHEN (COUNT(DISTINCT(CASE WHEN cs.name IS NULL THEN NULL ELSE c.host_id END)) < slabs.min_shards) @@ -1958,50 +1975,32 @@ WHERE slabs.health_valid_until <= ? GROUP BY slabs.id LIMIT ? `, now.Unix(), refreshHealthBatchSize) + + for { var rowsAffected int64 err := s.retryTransaction(func(tx *gorm.DB) error { - // create temp table from the health query since we will reuse it - if err := tx.Exec("DROP TABLE IF EXISTS src").Error; err != nil { - return err - } else if err = tx.Exec("CREATE TEMPORARY TABLE src AS ?", healthQuery).Error; err != nil { - return err - } else if err = tx.Exec("CREATE INDEX src_id ON src (id)").Error; err != nil { - return err - } - var res *gorm.DB if isSQLite(s.db) { - res = tx.Exec("UPDATE slabs SET health = src.health, health_valid_until = (?) FROM src WHERE slabs.id=src.id", sqlRandomTimestamp(s.db, now, refreshHealthMinHealthValidity, refreshHealthMaxHealthValidity)) + res = tx.Exec("UPDATE slabs SET health = inner.health, health_valid_until = (?) FROM (?) AS inner WHERE slabs.id=inner.id", sqlRandomTimestamp(s.db, now, refreshHealthMinHealthValidity, refreshHealthMaxHealthValidity), healthQuery) } else { - res = tx.Exec("UPDATE slabs sla INNER JOIN src h ON sla.id = h.id SET sla.health = h.health, health_valid_until = (?)", sqlRandomTimestamp(s.db, now, refreshHealthMinHealthValidity, refreshHealthMaxHealthValidity)) + res = tx.Exec("UPDATE slabs sla INNER JOIN (?) h ON sla.id = h.id SET sla.health = h.health, health_valid_until = (?)", healthQuery, sqlRandomTimestamp(s.db, now, refreshHealthMinHealthValidity, refreshHealthMaxHealthValidity)) } if res.Error != nil { return res.Error } rowsAffected = res.RowsAffected - // Update the health of the objects associated with the updated slabs. - if isSQLite(s.db) { - return tx.Exec(`UPDATE objects SET health = i.health FROM ( - SELECT slices.db_object_id, MIN(s.health) AS health - FROM slices - INNER JOIN src s ON s.id = slices.db_slab_id - INNER JOIN objects o ON o.id = slices.db_object_id - GROUP BY slices.db_object_id - ) i - WHERE i.db_object_id = objects.id AND objects.health != i.health`).Error - } else { - return tx.Exec(`UPDATE objects - INNER JOIN ( - SELECT slices.db_object_id, MIN(s.health) as health - FROM slices - INNER JOIN src s ON s.id = slices.db_slab_id - GROUP BY slices.db_object_id - ) i ON objects.id = i.db_object_id - SET objects.health = i.health - WHERE objects.health != i.health - `).Error - } + // Update the health of objects with outdated health. + return tx.Exec(` +UPDATE objects SET health = ( + SELECT MIN(slabs.health) + FROM slabs + INNER JOIN slices ON slices.db_slab_id = slabs.id AND slices.db_object_id = objects.id +) WHERE health != ( + SELECT MIN(slabs.health) + FROM slabs + INNER JOIN slices ON slices.db_slab_id = slabs.id AND slices.db_object_id = objects.id +)`).Error }) if err != nil { return err @@ -2297,21 +2296,57 @@ func (s *SQLStore) objectHydrate(ctx context.Context, tx *gorm.DB, bucket, path // return object return api.Object{ Metadata: metadata, - ObjectMetadata: api.ObjectMetadata{ - ETag: obj[0].ObjectETag, - Health: obj[0].ObjectHealth, - MimeType: obj[0].ObjectMimeType, - ModTime: api.TimeRFC3339(obj[0].ObjectModTime.UTC()), - Name: obj[0].ObjectName, - Size: obj[0].ObjectSize, - }, - Object: object.Object{ + ObjectMetadata: newObjectMetadata( + obj[0].ObjectName, + obj[0].ObjectETag, + obj[0].ObjectMimeType, + obj[0].ObjectHealth, + obj[0].ObjectModTime, + obj[0].ObjectSize, + ), + Object: &object.Object{ Key: key, Slabs: slabs, }, }, nil } +// ObjectMetadata returns an object's metadata +func (s *SQLStore) ObjectMetadata(ctx context.Context, bucket, path string) (api.Object, error) { + var resp api.Object + err := s.db.Transaction(func(tx *gorm.DB) error { + var obj dbObject + err := tx.Model(&dbObject{}). + Joins("INNER JOIN buckets b ON objects.db_bucket_id = b.id"). + Where("b.name", bucket). + Where("object_id", path). + Take(&obj). + Error + if errors.Is(err, gorm.ErrRecordNotFound) { + return api.ErrObjectNotFound + } else if err != nil { + return err + } + oum, err := s.objectMetadata(ctx, tx, bucket, path) + if err != nil { + return err + } + resp = api.Object{ + ObjectMetadata: newObjectMetadata( + obj.ObjectID, + obj.Etag, + obj.MimeType, + obj.Health, + obj.CreatedAt, + obj.Size, + ), + Metadata: oum, + } + return nil + }) + return resp, err +} + func (s *SQLStore) objectMetadata(ctx context.Context, tx *gorm.DB, bucket, path string) (api.ObjectUserMetadata, error) { var rows []dbObjectUserMetadata err := tx. @@ -2332,6 +2367,17 @@ func (s *SQLStore) objectMetadata(ctx context.Context, tx *gorm.DB, bucket, path return metadata, nil } +func newObjectMetadata(name, etag, mimeType string, health float64, modTime time.Time, size int64) api.ObjectMetadata { + return api.ObjectMetadata{ + ETag: etag, + Health: health, + ModTime: api.TimeRFC3339(modTime.UTC()), + Name: name, + Size: size, + MimeType: mimeType, + } +} + func (s *SQLStore) objectRaw(ctx context.Context, txn *gorm.DB, bucket string, path string) (rows rawObject, err error) { // NOTE: we LEFT JOIN here because empty objects are valid and need to be // included in the result set, when we convert the rawObject before @@ -2662,20 +2708,32 @@ func archiveContracts(ctx context.Context, tx *gorm.DB, contracts []dbContract, return nil } +func pruneSlabs(tx *gorm.DB) error { + // delete slabs without any associated slices or buffers + return tx.Exec(` +DELETE +FROM slabs +WHERE NOT EXISTS (SELECT 1 FROM slices WHERE slices.db_slab_id = slabs.id) +AND slabs.db_buffered_slab_id IS NULL +`).Error +} + // deleteObject deletes an object from the store and prunes all slabs which are // without an obect after the deletion. That means in case of packed uploads, // the slab is only deleted when no more objects point to it. -func (s *SQLStore) deleteObject(tx *gorm.DB, bucket string, path string) (numDeleted int64, _ error) { +func (s *SQLStore) deleteObject(tx *gorm.DB, bucket string, path string) (int64, error) { tx = tx.Where("object_id = ? AND ?", path, sqlWhereBucket("objects", bucket)). Delete(&dbObject{}) if tx.Error != nil { return 0, tx.Error } - numDeleted = tx.RowsAffected + numDeleted := tx.RowsAffected if numDeleted == 0 { return 0, nil // nothing to prune if no object was deleted + } else if err := pruneSlabs(tx); err != nil { + return numDeleted, err } - return + return numDeleted, nil } // deleteObjects deletes a batch of objects from the database. The order of @@ -2704,8 +2762,12 @@ func (s *SQLStore) deleteObjects(bucket string, path string) (numDeleted int64, if err := res.Error; err != nil { return res.Error } - duration = time.Since(start) + // prune slabs if we deleted an object rowsAffected = res.RowsAffected + if rowsAffected > 0 { + return pruneSlabs(tx) + } + duration = time.Since(start) return nil }); err != nil { return 0, fmt.Errorf("failed to delete objects: %w", err) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 18f34dee4..f5461147c 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -8,6 +8,7 @@ import ( "fmt" "os" "reflect" + "sort" "strings" "testing" "time" @@ -16,7 +17,6 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/renterd/api" - "go.sia.tech/renterd/hostdb" "go.sia.tech/renterd/object" "gorm.io/gorm" "gorm.io/gorm/schema" @@ -87,7 +87,7 @@ func TestObjectBasic(t *testing.T) { if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(got.Object, want) { + if !reflect.DeepEqual(*got.Object, want) { t.Fatal("object mismatch", cmp.Diff(got.Object, want)) } @@ -118,7 +118,7 @@ func TestObjectBasic(t *testing.T) { if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(got2.Object, want2) { + if !reflect.DeepEqual(*got2.Object, want2) { t.Fatal("object mismatch", cmp.Diff(got2.Object, want2)) } } @@ -175,7 +175,7 @@ func TestObjectMetadata(t *testing.T) { } // assert it matches - if !reflect.DeepEqual(got.Object, want) { + if !reflect.DeepEqual(*got.Object, want) { t.Log(got.Object) t.Log(want) t.Fatal("object mismatch", cmp.Diff(got.Object, want, cmp.AllowUnexported(object.EncryptionKey{}))) @@ -218,7 +218,7 @@ func TestSQLContractStore(t *testing.T) { } // Add an announcement. - err = ss.insertTestAnnouncement(hk, hostdb.Announcement{NetAddress: "address"}) + err = ss.insertTestAnnouncement(hk, newTestHostDBAnnouncement("address")) if err != nil { t.Fatal(err) } @@ -509,11 +509,11 @@ func TestRenewedContract(t *testing.T) { hk, hk2 := hks[0], hks[1] // Add announcements. - err = ss.insertTestAnnouncement(hk, hostdb.Announcement{NetAddress: "address"}) + err = ss.insertTestAnnouncement(hk, newTestHostDBAnnouncement("address")) if err != nil { t.Fatal(err) } - err = ss.insertTestAnnouncement(hk2, hostdb.Announcement{NetAddress: "address2"}) + err = ss.insertTestAnnouncement(hk2, newTestHostDBAnnouncement("address2")) if err != nil { t.Fatal(err) } @@ -1006,7 +1006,7 @@ func TestSQLMetadataStore(t *testing.T) { one := uint(1) expectedObj := dbObject{ - DBBucketID: 1, + DBBucketID: ss.DefaultBucketID(), Health: 1, ObjectID: objID, Key: obj1Key, @@ -1069,7 +1069,7 @@ func TestSQLMetadataStore(t *testing.T) { if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(fullObj.Object, obj1) { + if !reflect.DeepEqual(*fullObj.Object, obj1) { t.Fatal("object mismatch", cmp.Diff(fullObj, obj1)) } @@ -1167,6 +1167,7 @@ func TestSQLMetadataStore(t *testing.T) { slabs[i].Shards[0].Model = Model{} slabs[i].Shards[0].Contracts[0].Model = Model{} slabs[i].Shards[0].Contracts[0].Host.Model = Model{} + slabs[i].Shards[0].Contracts[0].Host.LastAnnouncement = time.Time{} slabs[i].HealthValidUntil = 0 } if !reflect.DeepEqual(slab1, expectedObjSlab1) { @@ -1182,7 +1183,7 @@ func TestSQLMetadataStore(t *testing.T) { if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(fullObj.Object, obj1) { + if !reflect.DeepEqual(*fullObj.Object, obj1) { t.Fatal("object mismatch") } @@ -2211,10 +2212,9 @@ func TestUpdateSlab(t *testing.T) { t.Fatal(err) } var s dbSlab - if err := ss.db.Model(&dbSlab{}). + if err := ss.db.Where(&dbSlab{Key: key}). Joins("DBContractSet"). Preload("Shards"). - Where("key = ?", key). Take(&s). Error; err != nil { t.Fatal(err) @@ -2263,7 +2263,7 @@ func TestRecordContractSpending(t *testing.T) { } // Add an announcement. - err = ss.insertTestAnnouncement(hk, hostdb.Announcement{NetAddress: "address"}) + err = ss.insertTestAnnouncement(hk, newTestHostDBAnnouncement("address")) if err != nil { t.Fatal(err) } @@ -2435,7 +2435,7 @@ func TestObjectsStats(t *testing.T) { defer ss.Close() // Fetch stats on clean database. - info, err := ss.ObjectsStats(context.Background()) + info, err := ss.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) if err != nil { t.Fatal(err) } @@ -2446,6 +2446,7 @@ func TestObjectsStats(t *testing.T) { // Create a few objects of different size. var objectsSize uint64 var sectorsSize uint64 + var totalUploadedSize uint64 for i := 0; i < 2; i++ { obj := newTestObject(1) objectsSize += uint64(obj.TotalSize()) @@ -2458,10 +2459,11 @@ func TestObjectsStats(t *testing.T) { t.Fatal(err) } for _, fcid := range fcids { - _, err := ss.addTestContract(fcid, hpk) + c, err := ss.addTestContract(fcid, hpk) if err != nil { t.Fatal(err) } + totalUploadedSize += c.Size } } } @@ -2482,10 +2484,11 @@ func TestObjectsStats(t *testing.T) { } var newContractID types.FileContractID frand.Read(newContractID[:]) - _, err = ss.addTestContract(newContractID, types.PublicKey{}) + c, err := ss.addTestContract(newContractID, types.PublicKey{}) if err != nil { t.Fatal(err) } + totalUploadedSize += c.Size newContract, err := ss.contract(context.Background(), fileContractID(newContractID)) if err != nil { t.Fatal(err) @@ -2499,21 +2502,37 @@ func TestObjectsStats(t *testing.T) { } // Check sizes. - info, err = ss.ObjectsStats(context.Background()) - if err != nil { - t.Fatal(err) - } - if info.TotalObjectsSize != objectsSize { - t.Fatal("wrong size", info.TotalObjectsSize, objectsSize) - } - if info.TotalSectorsSize != sectorsSize { - t.Fatal("wrong size", info.TotalSectorsSize, sectorsSize) - } - if info.TotalUploadedSize != sectorsSize*2 { - t.Fatal("wrong size", info.TotalUploadedSize, sectorsSize*2) + for _, opts := range []api.ObjectsStatsOpts{ + {}, // any bucket + {Bucket: api.DefaultBucketName}, // specific bucket + } { + info, err = ss.ObjectsStats(context.Background(), opts) + if err != nil { + t.Fatal(err) + } else if info.TotalObjectsSize != objectsSize { + t.Fatal("wrong size", info.TotalObjectsSize, objectsSize) + } else if info.TotalSectorsSize != sectorsSize { + t.Fatal("wrong size", info.TotalSectorsSize, sectorsSize) + } else if info.TotalUploadedSize != totalUploadedSize { + t.Fatal("wrong size", info.TotalUploadedSize, totalUploadedSize) + } else if info.NumObjects != 2 { + t.Fatal("wrong number of objects", info.NumObjects, 2) + } } - if info.NumObjects != 2 { - t.Fatal("wrong number of objects", info.NumObjects, 2) + + // Check other bucket. + if err := ss.CreateBucket(context.Background(), "other", api.BucketPolicy{}); err != nil { + t.Fatal(err) + } else if info, err := ss.ObjectsStats(context.Background(), api.ObjectsStatsOpts{Bucket: "other"}); err != nil { + t.Fatal(err) + } else if info.TotalObjectsSize != 0 { + t.Fatal("wrong size", info.TotalObjectsSize) + } else if info.TotalSectorsSize != 0 { + t.Fatal("wrong size", info.TotalSectorsSize, 0) + } else if info.TotalUploadedSize != totalUploadedSize { + t.Fatal("wrong size", info.TotalUploadedSize, totalUploadedSize) + } else if info.NumObjects != 0 { + t.Fatal("wrong number of objects", info.NumObjects) } } @@ -2643,7 +2662,7 @@ func TestPartialSlab(t *testing.T) { if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(obj, fetched.Object) { + if !reflect.DeepEqual(obj, *fetched.Object) { t.Fatal("mismatch", cmp.Diff(obj, fetched.Object, cmp.AllowUnexported(object.EncryptionKey{}))) } @@ -2679,7 +2698,7 @@ func TestPartialSlab(t *testing.T) { if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(obj2, fetched.Object) { + if !reflect.DeepEqual(obj2, *fetched.Object) { t.Fatal("mismatch", cmp.Diff(obj2, fetched.Object)) } @@ -2727,7 +2746,7 @@ func TestPartialSlab(t *testing.T) { if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(obj3, fetched.Object) { + if !reflect.DeepEqual(obj3, *fetched.Object) { t.Fatal("mismatch", cmp.Diff(obj3, fetched.Object, cmp.AllowUnexported(object.EncryptionKey{}))) } @@ -2908,7 +2927,7 @@ func TestContractSizes(t *testing.T) { } // assert there's two objects - s, err := ss.ObjectsStats(context.Background()) + s, err := ss.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) if err != nil { t.Fatal(err) } @@ -3563,8 +3582,10 @@ func TestDeleteHostSector(t *testing.T) { } // Prune the sector from hk1. - if err := ss.DeleteHostSector(context.Background(), hk1, root); err != nil { + if n, err := ss.DeleteHostSector(context.Background(), hk1, root); err != nil { t.Fatal(err) + } else if n != 2 { + t.Fatal("no sectors were pruned", n) } // Make sure 2 contractSector entries exist. @@ -3858,6 +3879,147 @@ func TestSlabHealthInvalidation(t *testing.T) { } } +func TestRefreshHealth(t *testing.T) { + ss := newTestSQLStore(t, defaultTestSQLStoreConfig) + defer ss.Close() + + // define a helper function to return an object's health + health := func(name string) float64 { + t.Helper() + o, err := ss.Object(context.Background(), api.DefaultBucketName, name) + if err != nil { + t.Fatal(err) + } + return o.Health + } + + // add test hosts + hks, err := ss.addTestHosts(2) + if err != nil { + t.Fatal(err) + } + + // add test contract & set it as contract set + fcids, _, err := ss.addTestContracts(hks) + if err != nil { + t.Fatal(err) + } + err = ss.SetContractSet(context.Background(), testContractSet, fcids) + if err != nil { + t.Fatal(err) + } + + // add two test objects + o1 := t.Name() + "1" + if added, err := ss.addTestObject(o1, object.Object{ + Key: object.GenerateEncryptionKey(), + Slabs: []object.SlabSlice{{Slab: object.Slab{ + Key: object.GenerateEncryptionKey(), + Shards: []object.Sector{ + newTestShard(hks[0], fcids[0], types.Hash256{0}), + newTestShard(hks[1], fcids[1], types.Hash256{1}), + }, + }}}, + }); err != nil { + t.Fatal(err) + } else if added.Health != 1 { + t.Fatal("expected health to be 1, got", added.Health) + } + + o2 := t.Name() + "2" + if added, err := ss.addTestObject(o2, object.Object{ + Key: object.GenerateEncryptionKey(), + Slabs: []object.SlabSlice{{Slab: object.Slab{ + Key: object.GenerateEncryptionKey(), + Shards: []object.Sector{ + newTestShard(hks[0], fcids[0], types.Hash256{2}), + newTestShard(hks[1], fcids[1], types.Hash256{3}), + }, + }}}, + }); err != nil { + t.Fatal(err) + } else if added.Health != 1 { + t.Fatal("expected health to be 1, got", added.Health) + } + + // update contract set and refresh health, assert health is .5 + err = ss.SetContractSet(context.Background(), testContractSet, fcids[:1]) + if err != nil { + t.Fatal(err) + } + err = ss.RefreshHealth(context.Background()) + if err != nil { + t.Fatal(err) + } + if health(o1) != .5 { + t.Fatal("expected health to be .5, got", health(o1)) + } else if health(o2) != .5 { + t.Fatal("expected health to be .5, got", health(o2)) + } + + // set the health of s1 to be lower than .5 + err = ss.overrideSlabHealth(o1, 0.4) + if err != nil { + t.Fatal(err) + } + + // refresh health and assert only object 1's health got updated + err = ss.RefreshHealth(context.Background()) + if err != nil { + t.Fatal(err) + } + if health(o1) != .4 { + t.Fatal("expected health to be .4, got", health(o1)) + } else if health(o2) != .5 { + t.Fatal("expected health to be .5, got", health(o2)) + } + + // set the health of s2 to be higher than .5 + err = ss.overrideSlabHealth(o2, 0.6) + if err != nil { + t.Fatal(err) + } + + // refresh health and assert only object 2's health got updated + err = ss.RefreshHealth(context.Background()) + if err != nil { + t.Fatal(err) + } + if health(o1) != .4 { + t.Fatal("expected health to be .4, got", health(o1)) + } else if health(o2) != .6 { + t.Fatal("expected health to be .6, got", health(o2)) + } + + // add another object that is empty + o3 := t.Name() + "3" + if added, err := ss.addTestObject(o3, object.Object{ + Key: object.GenerateEncryptionKey(), + }); err != nil { + t.Fatal(err) + } else if added.Health != 1 { + t.Fatal("expected health to be 1, got", added.Health) + } + + // update its health to .1 + if err := ss.db. + Model(&dbObject{}). + Where("object_id", o3). + Update("health", 0.1). + Error; err != nil { + t.Fatal(err) + } else if health(o3) != .1 { + t.Fatalf("expected health to be .1, got %v", health(o3)) + } + + // a refresh should not update its health + if err := ss.RefreshHealth(context.Background()); err != nil { + t.Fatal(err) + } else if health(o3) != .1 { + t.Fatalf("expected health to be .1, got %v", health(o3)) + } +} + func TestSlabCleanupTrigger(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() @@ -3879,7 +4041,7 @@ func TestSlabCleanupTrigger(t *testing.T) { // create objects obj1 := dbObject{ ObjectID: "1", - DBBucketID: 1, + DBBucketID: ss.DefaultBucketID(), Health: 1, } if err := ss.db.Create(&obj1).Error; err != nil { @@ -3887,7 +4049,7 @@ func TestSlabCleanupTrigger(t *testing.T) { } obj2 := dbObject{ ObjectID: "2", - DBBucketID: 1, + DBBucketID: ss.DefaultBucketID(), Health: 1, } if err := ss.db.Create(&obj2).Error; err != nil { @@ -3923,7 +4085,8 @@ func TestSlabCleanupTrigger(t *testing.T) { } // delete the object - if err := ss.db.Delete(&obj1).Error; err != nil { + err := ss.RemoveObject(context.Background(), api.DefaultBucketName, obj1.ObjectID) + if err != nil { t.Fatal(err) } @@ -3936,7 +4099,8 @@ func TestSlabCleanupTrigger(t *testing.T) { } // delete second object - if err := ss.db.Delete(&obj2).Error; err != nil { + err = ss.RemoveObject(context.Background(), api.DefaultBucketName, obj2.ObjectID) + if err != nil { t.Fatal(err) } @@ -3960,7 +4124,7 @@ func TestSlabCleanupTrigger(t *testing.T) { } obj3 := dbObject{ ObjectID: "3", - DBBucketID: 1, + DBBucketID: ss.DefaultBucketID(), Health: 1, } if err := ss.db.Create(&obj3).Error; err != nil { @@ -3980,7 +4144,8 @@ func TestSlabCleanupTrigger(t *testing.T) { } // delete third object - if err := ss.db.Delete(&obj3).Error; err != nil { + err = ss.RemoveObject(context.Background(), api.DefaultBucketName, obj3.ObjectID) + if err != nil { t.Fatal(err) } if err := ss.db.Model(&dbSlab{}).Count(&slabCntr).Error; err != nil { @@ -4042,3 +4207,341 @@ func TestUpsertSectors(t *testing.T) { } } } + +func TestUpdateObjectReuseSlab(t *testing.T) { + ss := newTestSQLStore(t, defaultTestSQLStoreConfig) + defer ss.Close() + + minShards, totalShards := 10, 30 + + // create 90 hosts, enough for 3 slabs with 30 each + hks, err := ss.addTestHosts(3 * totalShards) + if err != nil { + t.Fatal(err) + } + + // create one contract each + fcids, _, err := ss.addTestContracts(hks) + if err != nil { + t.Fatal(err) + } + + // create an object + obj := object.Object{ + Key: object.GenerateEncryptionKey(), + } + // add 2 slabs + for i := 0; i < 2; i++ { + obj.Slabs = append(obj.Slabs, object.SlabSlice{ + Offset: 0, + Length: uint32(minShards) * rhpv2.SectorSize, + Slab: object.Slab{ + Key: object.GenerateEncryptionKey(), + MinShards: uint8(minShards), + }, + }) + } + // 30 shards each + for i := 0; i < len(obj.Slabs); i++ { + for j := 0; j < totalShards; j++ { + obj.Slabs[i].Shards = append(obj.Slabs[i].Shards, object.Sector{ + Contracts: map[types.PublicKey][]types.FileContractID{ + hks[i*totalShards+j]: { + fcids[i*totalShards+j], + }, + }, + LatestHost: hks[i*totalShards+j], + Root: frand.Entropy256(), + }) + } + } + + // add the object + _, err = ss.addTestObject("1", obj) + if err != nil { + t.Fatal(err) + } + + // fetch the object + var dbObj dbObject + if err := ss.db.Where("db_bucket_id", ss.DefaultBucketID()).Take(&dbObj).Error; err != nil { + t.Fatal(err) + } else if dbObj.ID != 1 { + t.Fatal("unexpected id", dbObj.ID) + } else if dbObj.DBBucketID != ss.DefaultBucketID() { + t.Fatal("bucket id mismatch", dbObj.DBBucketID) + } else if dbObj.ObjectID != "1" { + t.Fatal("object id mismatch", dbObj.ObjectID) + } else if dbObj.Health != 1 { + t.Fatal("health mismatch", dbObj.Health) + } else if dbObj.Size != obj.TotalSize() { + t.Fatal("size mismatch", dbObj.Size) + } + + // fetch its slices + var dbSlices []dbSlice + if err := ss.db.Where("db_object_id", dbObj.ID).Find(&dbSlices).Error; err != nil { + t.Fatal(err) + } else if len(dbSlices) != 2 { + t.Fatal("invalid number of slices", len(dbSlices)) + } + for i, dbSlice := range dbSlices { + if dbSlice.ID != uint(i+1) { + t.Fatal("unexpected id", dbSlice.ID) + } else if dbSlice.ObjectIndex != uint(i+1) { + t.Fatal("unexpected object index", dbSlice.ObjectIndex) + } else if dbSlice.Offset != 0 || dbSlice.Length != uint32(minShards)*rhpv2.SectorSize { + t.Fatal("invalid offset/length", dbSlice.Offset, dbSlice.Length) + } + + // fetch the slab + var dbSlab dbSlab + key, _ := obj.Slabs[i].Key.MarshalBinary() + if err := ss.db.Where("id", dbSlice.DBSlabID).Take(&dbSlab).Error; err != nil { + t.Fatal(err) + } else if dbSlab.ID != uint(i+1) { + t.Fatal("unexpected id", dbSlab.ID) + } else if dbSlab.DBContractSetID != 1 { + t.Fatal("invalid contract set id", dbSlab.DBContractSetID) + } else if dbSlab.Health != 1 { + t.Fatal("invalid health", dbSlab.Health) + } else if dbSlab.HealthValidUntil != 0 { + t.Fatal("invalid health validity", dbSlab.HealthValidUntil) + } else if dbSlab.MinShards != uint8(minShards) { + t.Fatal("invalid minShards", dbSlab.MinShards) + } else if dbSlab.TotalShards != uint8(totalShards) { + t.Fatal("invalid totalShards", dbSlab.TotalShards) + } else if !bytes.Equal(dbSlab.Key, key) { + t.Fatal("wrong key") + } + + // fetch the sectors + var dbSectors []dbSector + if err := ss.db.Where("db_slab_id", dbSlab.ID).Find(&dbSectors).Error; err != nil { + t.Fatal(err) + } else if len(dbSectors) != totalShards { + t.Fatal("invalid number of sectors", len(dbSectors)) + } + for j, dbSector := range dbSectors { + if dbSector.ID != uint(i*totalShards+j+1) { + t.Fatal("invalid id", dbSector.ID) + } else if dbSector.DBSlabID != dbSlab.ID { + t.Fatal("invalid slab id", dbSector.DBSlabID) + } else if dbSector.LatestHost != publicKey(hks[i*totalShards+j]) { + t.Fatal("invalid host") + } else if !bytes.Equal(dbSector.Root, obj.Slabs[i].Shards[j].Root[:]) { + t.Fatal("invalid root") + } + } + } + + obj2 := object.Object{ + Key: object.GenerateEncryptionKey(), + } + // add 1 slab with 30 shards + obj2.Slabs = append(obj2.Slabs, object.SlabSlice{ + Offset: 0, + Length: uint32(minShards) * rhpv2.SectorSize, + Slab: object.Slab{ + Key: object.GenerateEncryptionKey(), + MinShards: uint8(minShards), + }, + }) + // 30 shards each + for i := 0; i < totalShards; i++ { + obj2.Slabs[0].Shards = append(obj2.Slabs[0].Shards, object.Sector{ + Contracts: map[types.PublicKey][]types.FileContractID{ + hks[len(obj.Slabs)*totalShards+i]: { + fcids[len(obj.Slabs)*totalShards+i], + }, + }, + LatestHost: hks[len(obj.Slabs)*totalShards+i], + Root: frand.Entropy256(), + }) + } + // add the second slab of the first object too + obj2.Slabs = append(obj2.Slabs, obj.Slabs[1]) + + // add the object + _, err = ss.addTestObject("2", obj2) + if err != nil { + t.Fatal(err) + } + + // fetch the object + var dbObj2 dbObject + if err := ss.db.Where("db_bucket_id", ss.DefaultBucketID()). + Where("object_id", "2"). + Take(&dbObj2).Error; err != nil { + t.Fatal(err) + } else if dbObj2.ID != 2 { + t.Fatal("unexpected id", dbObj2.ID) + } else if dbObj.Size != obj2.TotalSize() { + t.Fatal("size mismatch", dbObj2.Size) + } + + // fetch its slices + var dbSlices2 []dbSlice + if err := ss.db.Where("db_object_id", dbObj2.ID).Find(&dbSlices2).Error; err != nil { + t.Fatal(err) + } else if len(dbSlices2) != 2 { + t.Fatal("invalid number of slices", len(dbSlices)) + } + + // check the first one + dbSlice2 := dbSlices2[0] + if dbSlice2.ID != uint(len(dbSlices)+1) { + t.Fatal("unexpected id", dbSlice2.ID) + } else if dbSlice2.ObjectIndex != uint(1) { + t.Fatal("unexpected object index", dbSlice2.ObjectIndex) + } else if dbSlice2.Offset != 0 || dbSlice2.Length != uint32(minShards)*rhpv2.SectorSize { + t.Fatal("invalid offset/length", dbSlice2.Offset, dbSlice2.Length) + } + + // fetch the slab + var dbSlab2 dbSlab + key, _ := obj2.Slabs[0].Key.MarshalBinary() + if err := ss.db.Where("id", dbSlice2.DBSlabID).Take(&dbSlab2).Error; err != nil { + t.Fatal(err) + } else if dbSlab2.ID != uint(len(dbSlices)+1) { + t.Fatal("unexpected id", dbSlab2.ID) + } else if dbSlab2.DBContractSetID != 1 { + t.Fatal("invalid contract set id", dbSlab2.DBContractSetID) + } else if !bytes.Equal(dbSlab2.Key, key) { + t.Fatal("wrong key") + } + + // fetch the sectors + var dbSectors2 []dbSector + if err := ss.db.Where("db_slab_id", dbSlab2.ID).Find(&dbSectors2).Error; err != nil { + t.Fatal(err) + } else if len(dbSectors2) != totalShards { + t.Fatal("invalid number of sectors", len(dbSectors2)) + } + for j, dbSector := range dbSectors2 { + if dbSector.ID != uint((len(obj.Slabs))*totalShards+j+1) { + t.Fatal("invalid id", dbSector.ID) + } else if dbSector.DBSlabID != dbSlab2.ID { + t.Fatal("invalid slab id", dbSector.DBSlabID) + } else if dbSector.LatestHost != publicKey(hks[(len(obj.Slabs))*totalShards+j]) { + t.Fatal("invalid host") + } else if !bytes.Equal(dbSector.Root, obj2.Slabs[0].Shards[j].Root[:]) { + t.Fatal("invalid root") + } + } + + // the second slab of obj2 should be the same as the first in obj + if dbSlices2[1].DBSlabID != 2 { + t.Fatal("wrong slab") + } + + var contractSectors []dbContractSector + if err := ss.db.Find(&contractSectors).Error; err != nil { + t.Fatal(err) + } else if len(contractSectors) != 3*totalShards { + t.Fatal("invalid number of contract sectors", len(contractSectors)) + } + for i, cs := range contractSectors { + if cs.DBContractID != uint(i+1) { + t.Fatal("invalid contract id") + } else if cs.DBSectorID != uint(i+1) { + t.Fatal("invalid sector id") + } + } +} + +func TestTypeCurrency(t *testing.T) { + ss := newTestSQLStore(t, defaultTestSQLStoreConfig) + defer ss.Close() + + // prepare the table + if isSQLite(ss.db) { + if err := ss.db.Exec("CREATE TABLE currencies (id INTEGER PRIMARY KEY AUTOINCREMENT,c BLOB);").Error; err != nil { + t.Fatal(err) + } + } else { + if err := ss.db.Exec("CREATE TABLE currencies (id INT AUTO_INCREMENT PRIMARY KEY, c BLOB);").Error; err != nil { + t.Fatal(err) + } + } + + // insert currencies in random order + if err := ss.db.Exec("INSERT INTO currencies (c) VALUES (?),(?),(?);", bCurrency(types.MaxCurrency), bCurrency(types.NewCurrency64(1)), bCurrency(types.ZeroCurrency)).Error; err != nil { + t.Fatal(err) + } + + // fetch currencies and assert they're sorted + var currencies []bCurrency + if err := ss.db.Raw(`SELECT c FROM currencies ORDER BY c ASC`).Scan(¤cies).Error; err != nil { + t.Fatal(err) + } else if !sort.SliceIsSorted(currencies, func(i, j int) bool { + return types.Currency(currencies[i]).Cmp(types.Currency(currencies[j])) < 0 + }) { + t.Fatal("currencies not sorted", currencies) + } + + // convenience variables + c0 := currencies[0] + c1 := currencies[1] + cM := currencies[2] + + tests := []struct { + a bCurrency + b bCurrency + cmp string + }{ + { + a: c0, + b: c1, + cmp: "<", + }, + { + a: c1, + b: c0, + cmp: ">", + }, + { + a: c0, + b: c1, + cmp: "!=", + }, + { + a: c1, + b: c1, + cmp: "=", + }, + { + a: c0, + b: cM, + cmp: "<", + }, + { + a: cM, + b: c0, + cmp: ">", + }, + { + a: cM, + b: cM, + cmp: "=", + }, + } + for i, test := range tests { + var result bool + query := fmt.Sprintf("SELECT ? %s ?", test.cmp) + if !isSQLite(ss.db) { + query = strings.Replace(query, "?", "HEX(?)", -1) + } + if err := ss.db.Raw(query, test.a, test.b).Scan(&result).Error; err != nil { + t.Fatal(err) + } else if !result { + t.Errorf("unexpected result in case %d/%d: expected %v %s %v to be true", i+1, len(tests), types.Currency(test.a).String(), test.cmp, types.Currency(test.b).String()) + } else if test.cmp == "<" && types.Currency(test.a).Cmp(types.Currency(test.b)) >= 0 { + t.Fatal("invalid result") + } else if test.cmp == ">" && types.Currency(test.a).Cmp(types.Currency(test.b)) <= 0 { + t.Fatal("invalid result") + } else if test.cmp == "=" && types.Currency(test.a).Cmp(types.Currency(test.b)) != 0 { + t.Fatal("invalid result") + } + } +} diff --git a/stores/metrics.go b/stores/metrics.go index 203ed3b71..333ed8a42 100644 --- a/stores/metrics.go +++ b/stores/metrics.go @@ -14,6 +14,10 @@ import ( "gorm.io/gorm/clause" ) +const ( + contractMetricGranularity = 5 * time.Minute +) + type ( // dbContractMetric tracks information about a contract's funds. It is // supposed to be reported by a worker every time a contract is revised. @@ -246,6 +250,21 @@ func (s *SQLStore) RecordContractMetric(ctx context.Context, metrics ...api.Cont } } return s.dbMetrics.Transaction(func(tx *gorm.DB) error { + // delete any existing metric for the same contract that has happened + // within the same 5' window by diving the timestamp by 5' and use integer division. + for _, metric := range metrics { + intervalStart := metric.Timestamp.Std().Truncate(contractMetricGranularity) + intervalEnd := intervalStart.Add(contractMetricGranularity) + err := tx. + Where("timestamp >= ?", unixTimeMS(intervalStart)). + Where("timestamp < ?", unixTimeMS(intervalEnd)). + Where("fcid", fileContractID(metric.ContractID)). + Delete(&dbContractMetric{}). + Error + if err != nil { + return err + } + } return tx.Create(&dbMetrics).Error }) } @@ -522,43 +541,43 @@ func (s *SQLStore) findAggregatedContractPeriods(start time.Time, n uint64, inte return nil, api.ErrMaxIntervalsExceeded } end := start.Add(time.Duration(n) * interval) - var metricsWithPeriod []struct { + + type metricWithPeriod struct { Metric dbContractMetric `gorm:"embedded"` Period int64 } - err := s.dbMetrics.Raw(` - WITH RECURSIVE periods AS ( - SELECT ? AS period_start - UNION ALL - SELECT period_start + ? - FROM periods - WHERE period_start < ? - ? - ) - SELECT contracts.*, i.Period FROM contracts - INNER JOIN ( - SELECT - p.period_start as Period, - MIN(c.id) AS id - FROM - periods p - INNER JOIN - contracts c ON c.timestamp >= p.period_start AND c.timestamp < p.period_start + ? - GROUP BY - p.period_start, c.fcid - ORDER BY - p.period_start ASC - ) i ON contracts.id = i.id - `, unixTimeMS(start), - interval.Milliseconds(), - unixTimeMS(end), - interval.Milliseconds(), - interval.Milliseconds(), - ). - Scan(&metricsWithPeriod). - Error + var metricsWithPeriod []metricWithPeriod + + err := s.dbMetrics.Transaction(func(tx *gorm.DB) error { + var fcids []fileContractID + if err := tx.Raw("SELECT DISTINCT fcid FROM contracts WHERE contracts.timestamp >= ? AND contracts.timestamp < ?", unixTimeMS(start), unixTimeMS(end)). + Scan(&fcids).Error; err != nil { + return fmt.Errorf("failed to fetch distinct contract ids: %w", err) + } + + for intervalStart := start; intervalStart.Before(end); intervalStart = intervalStart.Add(interval) { + intervalEnd := intervalStart.Add(interval) + for _, fcid := range fcids { + var metrics []dbContractMetric + err := tx.Raw("SELECT * FROM contracts WHERE contracts.timestamp >= ? AND contracts.timestamp < ? AND contracts.fcid = ? LIMIT 1", unixTimeMS(intervalStart), unixTimeMS(intervalEnd), fileContractID(fcid)). + Scan(&metrics).Error + if err != nil { + return fmt.Errorf("failed to fetch contract metrics: %w", err) + } else if len(metrics) == 0 { + continue + } + metricsWithPeriod = append(metricsWithPeriod, metricWithPeriod{ + Metric: metrics[0], + Period: intervalStart.UnixMilli(), + }) + } + } + return nil + }) if err != nil { - return nil, fmt.Errorf("failed to fetch aggregate metrics: %w", err) + return nil, err } + currentPeriod := int64(math.MinInt64) var metrics []dbContractMetric for _, m := range metricsWithPeriod { @@ -605,9 +624,7 @@ func (s *SQLStore) findPeriods(table string, dst interface{}, start time.Time, n WHERE ? GROUP BY p.period_start - ORDER BY - p.period_start ASC - ) i ON %s.id = i.id + ) i ON %s.id = i.id ORDER BY Period ASC `, table, table, table, table), unixTimeMS(start), interval.Milliseconds(), diff --git a/stores/metrics_test.go b/stores/metrics_test.go index 2b2f572a7..ec97099ba 100644 --- a/stores/metrics_test.go +++ b/stores/metrics_test.go @@ -488,6 +488,30 @@ func TestContractMetrics(t *testing.T) { } else if len(metrics) != 1 { t.Fatalf("expected 1 metric, got %v", len(metrics)) } + + // Drop all metrics. + if err := ss.dbMetrics.Where("TRUE").Delete(&dbContractMetric{}).Error; err != nil { + t.Fatal(err) + } + + // Record multiple metrics for the same contract - one per second over 10 minutes + for i := int64(0); i < 600; i++ { + err := ss.RecordContractMetric(context.Background(), api.ContractMetric{ + ContractID: types.FileContractID{1}, + Timestamp: api.TimeRFC3339(time.Unix(i, 0)), + }) + if err != nil { + t.Fatal(err) + } + } + + // Check how many metrics were recorded. + var n int64 + if err := ss.dbMetrics.Model(&dbContractMetric{}).Count(&n).Error; err != nil { + t.Fatal(err) + } else if n != 2 { + t.Fatalf("expected 2 metrics, got %v", n) + } } func TestWalletMetrics(t *testing.T) { @@ -517,7 +541,7 @@ func TestWalletMetrics(t *testing.T) { } else if !sort.SliceIsSorted(metrics, func(i, j int) bool { return time.Time(metrics[i].Timestamp).Before(time.Time(metrics[j].Timestamp)) }) { - t.Fatal("expected metrics to be sorted by time") + t.Fatalf("expected metrics to be sorted by time, %+v", metrics) } // Prune metrics diff --git a/stores/migrations.go b/stores/migrations.go index e79c6c36b..cb0a38b18 100644 --- a/stores/migrations.go +++ b/stores/migrations.go @@ -6,7 +6,6 @@ import ( "strings" "github.com/go-gormigrate/gormigrate/v2" - "go.sia.tech/renterd/api" "go.uber.org/zap" "gorm.io/gorm" ) @@ -16,33 +15,8 @@ var ( errMySQLNoSuperPrivilege = errors.New("You do not have the SUPER privilege and binary logging is enabled") ) -// initSchema is executed only on a clean database. Otherwise the individual -// migrations are executed. -func initSchema(tx *gorm.DB) (err error) { - // Pick the right migrations. - var schema []byte - if isSQLite(tx) { - schema, err = migrations.ReadFile("migrations/sqlite/main/schema.sql") - } else { - schema, err = migrations.ReadFile("migrations/mysql/main/schema.sql") - } - if err != nil { - return - } - - // Run it. - err = tx.Exec(string(schema)).Error - if err != nil { - return fmt.Errorf("failed to init schema: %w", err) - } - - // Add default bucket. - return tx.Create(&dbBucket{ - Name: api.DefaultBucketName, - }).Error -} - func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { + dbIdentifier := "main" migrations := []*gormigrate.Migration{ { ID: "00001_init", @@ -51,26 +25,44 @@ func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { { ID: "00001_object_metadata", Migrate: func(tx *gorm.DB) error { - return performMigration(tx, "00001_object_metadata", logger) + return performMigration(tx, dbIdentifier, "00001_object_metadata", logger) }, }, { ID: "00002_prune_slabs_trigger", Migrate: func(tx *gorm.DB) error { - err := performMigration(tx, "00002_prune_slabs_trigger", logger) + err := performMigration(tx, dbIdentifier, "00002_prune_slabs_trigger", logger) if err != nil && strings.Contains(err.Error(), errMySQLNoSuperPrivilege.Error()) { logger.Warn("migration 00002_prune_slabs_trigger requires the user to have the SUPER privilege to register triggers") } return err }, }, + { + ID: "00003_idx_objects_size", + Migrate: func(tx *gorm.DB) error { + return performMigration(tx, dbIdentifier, "00003_idx_objects_size", logger) + }, + }, + { + ID: "00004_prune_slabs_cascade", + Migrate: func(tx *gorm.DB) error { + return performMigration(tx, dbIdentifier, "00004_prune_slabs_cascade", logger) + }, + }, + { + ID: "00005_zero_size_object_health", + Migrate: func(tx *gorm.DB) error { + return performMigration(tx, dbIdentifier, "00005_zero_size_object_health", logger) + }, + }, } // Create migrator. m := gormigrate.New(db, gormigrate.DefaultOptions, migrations) // Set init function. - m.InitSchema(initSchema) + m.InitSchema(initSchema(db, dbIdentifier, logger)) // Perform migrations. if err := m.Migrate(); err != nil { @@ -78,30 +70,3 @@ func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { } return nil } - -func performMigration(db *gorm.DB, name string, logger *zap.SugaredLogger) error { - logger.Infof("performing migration %s", name) - - // build path - var path string - if isSQLite(db) { - path = fmt.Sprintf("migrations/sqlite/main/migration_" + name + ".sql") - } else { - path = fmt.Sprintf("migrations/mysql/main/migration_" + name + ".sql") - } - - // read migration file - migration, err := migrations.ReadFile(path) - if err != nil { - return fmt.Errorf("migration %s failed: %w", name, err) - } - - // execute it - err = db.Exec(string(migration)).Error - if err != nil { - return fmt.Errorf("migration %s failed: %w", name, err) - } - - logger.Infof("migration %s complete", name) - return nil -} diff --git a/stores/migrations/mysql/main/migration_00003_idx_objects_size.sql b/stores/migrations/mysql/main/migration_00003_idx_objects_size.sql new file mode 100644 index 000000000..0df0b5d58 --- /dev/null +++ b/stores/migrations/mysql/main/migration_00003_idx_objects_size.sql @@ -0,0 +1 @@ +CREATE INDEX `idx_objects_size` ON `objects`(`size`); diff --git a/stores/migrations/mysql/main/migration_00004_prune_slabs_cascade.sql b/stores/migrations/mysql/main/migration_00004_prune_slabs_cascade.sql new file mode 100644 index 000000000..c2efe3467 --- /dev/null +++ b/stores/migrations/mysql/main/migration_00004_prune_slabs_cascade.sql @@ -0,0 +1,16 @@ +-- drop triggers +DROP TRIGGER IF EXISTS before_delete_on_objects_delete_slices; +DROP TRIGGER IF EXISTS before_delete_on_multipart_uploads_delete_multipart_parts; +DROP TRIGGER IF EXISTS before_delete_on_multipart_parts_delete_slices; +DROP TRIGGER IF EXISTS after_delete_on_slices_delete_slabs; + +-- add ON DELETE CASCADE to slices +ALTER TABLE slices DROP FOREIGN KEY fk_objects_slabs; +ALTER TABLE slices ADD CONSTRAINT fk_objects_slabs FOREIGN KEY (db_object_id) REFERENCES objects (id) ON DELETE CASCADE; + +ALTER TABLE slices DROP FOREIGN KEY fk_multipart_parts_slabs; +ALTER TABLE slices ADD CONSTRAINT fk_multipart_parts_slabs FOREIGN KEY (db_multipart_part_id) REFERENCES multipart_parts (id) ON DELETE CASCADE; + +-- add ON DELETE CASCADE to multipart_parts +ALTER TABLE multipart_parts DROP FOREIGN KEY fk_multipart_uploads_parts; +ALTER TABLE multipart_parts ADD CONSTRAINT fk_multipart_uploads_parts FOREIGN KEY (db_multipart_upload_id) REFERENCES multipart_uploads (id) ON DELETE CASCADE; \ No newline at end of file diff --git a/stores/migrations/mysql/main/migration_00005_zero_size_object_health.sql b/stores/migrations/mysql/main/migration_00005_zero_size_object_health.sql new file mode 100644 index 000000000..1a0799394 --- /dev/null +++ b/stores/migrations/mysql/main/migration_00005_zero_size_object_health.sql @@ -0,0 +1 @@ +UPDATE objects SET health = 1 WHERE size = 0; diff --git a/stores/migrations/mysql/main/schema.sql b/stores/migrations/mysql/main/schema.sql index 39bf279f0..a5ed86807 100644 --- a/stores/migrations/mysql/main/schema.sql +++ b/stores/migrations/mysql/main/schema.sql @@ -310,7 +310,7 @@ CREATE TABLE `multipart_parts` ( KEY `idx_multipart_parts_etag` (`etag`), KEY `idx_multipart_parts_part_number` (`part_number`), KEY `idx_multipart_parts_db_multipart_upload_id` (`db_multipart_upload_id`), - CONSTRAINT `fk_multipart_uploads_parts` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads` (`id`) + CONSTRAINT `fk_multipart_uploads_parts` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads` (`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -- dbObject @@ -330,6 +330,7 @@ CREATE TABLE `objects` ( KEY `idx_objects_object_id` (`object_id`), KEY `idx_objects_health` (`health`), KEY `idx_objects_etag` (`etag`), + KEY `idx_objects_size` (`size`), CONSTRAINT `fk_objects_db_bucket` FOREIGN KEY (`db_bucket_id`) REFERENCES `buckets` (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; @@ -373,8 +374,8 @@ CREATE TABLE `slices` ( KEY `idx_slices_object_index` (`object_index`), KEY `idx_slices_db_multipart_part_id` (`db_multipart_part_id`), KEY `idx_slices_db_slab_id` (`db_slab_id`), - CONSTRAINT `fk_multipart_parts_slabs` FOREIGN KEY (`db_multipart_part_id`) REFERENCES `multipart_parts` (`id`), - CONSTRAINT `fk_objects_slabs` FOREIGN KEY (`db_object_id`) REFERENCES `objects` (`id`), + CONSTRAINT `fk_multipart_parts_slabs` FOREIGN KEY (`db_multipart_part_id`) REFERENCES `multipart_parts` (`id`) ON DELETE CASCADE, + CONSTRAINT `fk_objects_slabs` FOREIGN KEY (`db_object_id`) REFERENCES `objects` (`id`) ON DELETE CASCADE, CONSTRAINT `fk_slabs_slices` FOREIGN KEY (`db_slab_id`) REFERENCES `slabs` (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; @@ -420,36 +421,5 @@ CREATE TABLE `object_user_metadata` ( CONSTRAINT `fk_multipart_upload_user_metadata` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads` (`id`) ON DELETE SET NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; --- dbObject trigger to delete from slices -CREATE TRIGGER before_delete_on_objects_delete_slices -BEFORE DELETE -ON objects FOR EACH ROW -DELETE FROM slices -WHERE slices.db_object_id = OLD.id; - --- dbMultipartUpload trigger to delete from dbMultipartPart -CREATE TRIGGER before_delete_on_multipart_uploads_delete_multipart_parts -BEFORE DELETE -ON multipart_uploads FOR EACH ROW -DELETE FROM multipart_parts -WHERE multipart_parts.db_multipart_upload_id = OLD.id; - --- dbMultipartPart trigger to delete from slices -CREATE TRIGGER before_delete_on_multipart_parts_delete_slices -BEFORE DELETE -ON multipart_parts FOR EACH ROW -DELETE FROM slices -WHERE slices.db_multipart_part_id = OLD.id; - --- dbSlices trigger to prune slabs -CREATE TRIGGER after_delete_on_slices_delete_slabs -AFTER DELETE -ON slices FOR EACH ROW -DELETE FROM slabs -WHERE slabs.id = OLD.db_slab_id -AND slabs.db_buffered_slab_id IS NULL -AND NOT EXISTS ( - SELECT 1 - FROM slices - WHERE slices.db_slab_id = OLD.db_slab_id -); \ No newline at end of file +-- create default bucket +INSERT INTO buckets (created_at, name) VALUES (CURRENT_TIMESTAMP, 'default'); \ No newline at end of file diff --git a/stores/migrations/mysql/metrics/migration_00001_idx_contracts_fcid_timestamp.sql b/stores/migrations/mysql/metrics/migration_00001_idx_contracts_fcid_timestamp.sql new file mode 100644 index 000000000..5276a3083 --- /dev/null +++ b/stores/migrations/mysql/metrics/migration_00001_idx_contracts_fcid_timestamp.sql @@ -0,0 +1 @@ +CREATE INDEX `idx_contracts_fcid_timestamp` ON `contracts`(`fcid`,`timestamp`); diff --git a/stores/migrations/mysql/metrics/schema.sql b/stores/migrations/mysql/metrics/schema.sql index 6d993f0cb..da4db5a6e 100644 --- a/stores/migrations/mysql/metrics/schema.sql +++ b/stores/migrations/mysql/metrics/schema.sql @@ -82,7 +82,8 @@ CREATE TABLE `contracts` ( KEY `idx_contracts_timestamp` (`timestamp`), KEY `idx_remaining_funds` (`remaining_funds_lo`,`remaining_funds_hi`), KEY `idx_delete_spending` (`delete_spending_lo`,`delete_spending_hi`), - KEY `idx_list_spending` (`list_spending_lo`,`list_spending_hi`) + KEY `idx_list_spending` (`list_spending_lo`,`list_spending_hi`), + KEY `idx_contracts_fcid_timestamp` (`fcid`,`timestamp`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -- dbPerformanceMetric diff --git a/stores/migrations/sqlite/main/migration_00003_idx_objects_size.sql b/stores/migrations/sqlite/main/migration_00003_idx_objects_size.sql new file mode 100644 index 000000000..0df0b5d58 --- /dev/null +++ b/stores/migrations/sqlite/main/migration_00003_idx_objects_size.sql @@ -0,0 +1 @@ +CREATE INDEX `idx_objects_size` ON `objects`(`size`); diff --git a/stores/migrations/sqlite/main/migration_00004_prune_slabs_cascade.sql b/stores/migrations/sqlite/main/migration_00004_prune_slabs_cascade.sql new file mode 100644 index 000000000..03f006acd --- /dev/null +++ b/stores/migrations/sqlite/main/migration_00004_prune_slabs_cascade.sql @@ -0,0 +1,30 @@ +-- drop triggers +DROP TRIGGER IF EXISTS before_delete_on_objects_delete_slices; +DROP TRIGGER IF EXISTS before_delete_on_multipart_uploads_delete_multipart_parts; +DROP TRIGGER IF EXISTS before_delete_on_multipart_parts_delete_slices; +DROP TRIGGER IF EXISTS after_delete_on_slices_delete_slabs; + +PRAGMA foreign_keys=off; +-- update constraints on slices +DROP TABLE IF EXISTS slices_temp; +CREATE TABLE `slices_temp` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_object_id` integer,`object_index` integer,`db_multipart_part_id` integer,`db_slab_id` integer,`offset` integer,`length` integer,CONSTRAINT `fk_objects_slabs` FOREIGN KEY (`db_object_id`) REFERENCES `objects`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_multipart_parts_slabs` FOREIGN KEY (`db_multipart_part_id`) REFERENCES `multipart_parts`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_slabs_slices` FOREIGN KEY (`db_slab_id`) REFERENCES `slabs`(`id`)); +INSERT INTO slices_temp SELECT `id`, `created_at`, `db_object_id`, `object_index`, `db_multipart_part_id`, `db_slab_id`, `offset`, `length` FROM slices; +DROP TABLE slices; +ALTER TABLE slices_temp RENAME TO slices; + +CREATE INDEX `idx_slices_object_index` ON `slices`(`object_index`); +CREATE INDEX `idx_slices_db_object_id` ON `slices`(`db_object_id`); +CREATE INDEX `idx_slices_db_slab_id` ON `slices`(`db_slab_id`); +CREATE INDEX `idx_slices_db_multipart_part_id` ON `slices`(`db_multipart_part_id`); + +-- update constraints multipart_parts +DROP TABLE IF EXISTS multipart_parts_temp; +CREATE TABLE `multipart_parts_temp` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`etag` text,`part_number` integer,`size` integer,`db_multipart_upload_id` integer NOT NULL,CONSTRAINT `fk_multipart_uploads_parts` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads`(`id`) ON DELETE CASCADE); +INSERT INTO multipart_parts_temp SELECT * FROM multipart_parts; +DROP TABLE multipart_parts; +ALTER TABLE multipart_parts_temp RENAME TO multipart_parts; + +CREATE INDEX `idx_multipart_parts_db_multipart_upload_id` ON `multipart_parts`(`db_multipart_upload_id`); +CREATE INDEX `idx_multipart_parts_part_number` ON `multipart_parts`(`part_number`); +CREATE INDEX `idx_multipart_parts_etag` ON `multipart_parts`(`etag`); +PRAGMA foreign_keys=on; diff --git a/stores/migrations/sqlite/main/migration_00005_zero_size_object_health.sql b/stores/migrations/sqlite/main/migration_00005_zero_size_object_health.sql new file mode 100644 index 000000000..1a0799394 --- /dev/null +++ b/stores/migrations/sqlite/main/migration_00005_zero_size_object_health.sql @@ -0,0 +1 @@ +UPDATE objects SET health = 1 WHERE size = 0; diff --git a/stores/migrations/sqlite/main/schema.sql b/stores/migrations/sqlite/main/schema.sql index df9fc9a83..8d7afeaa1 100644 --- a/stores/migrations/sqlite/main/schema.sql +++ b/stores/migrations/sqlite/main/schema.sql @@ -50,6 +50,7 @@ CREATE INDEX `idx_objects_db_bucket_id` ON `objects`(`db_bucket_id`); CREATE INDEX `idx_objects_etag` ON `objects`(`etag`); CREATE INDEX `idx_objects_health` ON `objects`(`health`); CREATE INDEX `idx_objects_object_id` ON `objects`(`object_id`); +CREATE INDEX `idx_objects_size` ON `objects`(`size`); CREATE UNIQUE INDEX `idx_object_bucket` ON `objects`(`db_bucket_id`,`object_id`); -- dbMultipartUpload @@ -84,13 +85,13 @@ CREATE INDEX `idx_contract_sectors_db_contract_id` ON `contract_sectors`(`db_con CREATE INDEX `idx_contract_sectors_db_sector_id` ON `contract_sectors`(`db_sector_id`); -- dbMultipartPart -CREATE TABLE `multipart_parts` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`etag` text,`part_number` integer,`size` integer,`db_multipart_upload_id` integer NOT NULL,CONSTRAINT `fk_multipart_uploads_parts` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads`(`id`)); +CREATE TABLE `multipart_parts` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`etag` text,`part_number` integer,`size` integer,`db_multipart_upload_id` integer NOT NULL,CONSTRAINT `fk_multipart_uploads_parts` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads`(`id`) ON DELETE CASCADE); CREATE INDEX `idx_multipart_parts_db_multipart_upload_id` ON `multipart_parts`(`db_multipart_upload_id`); CREATE INDEX `idx_multipart_parts_part_number` ON `multipart_parts`(`part_number`); CREATE INDEX `idx_multipart_parts_etag` ON `multipart_parts`(`etag`); -- dbSlice -CREATE TABLE `slices` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_object_id` integer,`object_index` integer,`db_multipart_part_id` integer,`db_slab_id` integer,`offset` integer,`length` integer,CONSTRAINT `fk_objects_slabs` FOREIGN KEY (`db_object_id`) REFERENCES `objects`(`id`),CONSTRAINT `fk_multipart_parts_slabs` FOREIGN KEY (`db_multipart_part_id`) REFERENCES `multipart_parts`(`id`),CONSTRAINT `fk_slabs_slices` FOREIGN KEY (`db_slab_id`) REFERENCES `slabs`(`id`)); +CREATE TABLE `slices` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_object_id` integer,`object_index` integer,`db_multipart_part_id` integer,`db_slab_id` integer,`offset` integer,`length` integer,CONSTRAINT `fk_objects_slabs` FOREIGN KEY (`db_object_id`) REFERENCES `objects`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_multipart_parts_slabs` FOREIGN KEY (`db_multipart_part_id`) REFERENCES `multipart_parts`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_slabs_slices` FOREIGN KEY (`db_slab_id`) REFERENCES `slabs`(`id`)); CREATE INDEX `idx_slices_object_index` ON `slices`(`object_index`); CREATE INDEX `idx_slices_db_object_id` ON `slices`(`db_object_id`); CREATE INDEX `idx_slices_db_slab_id` ON `slices`(`db_slab_id`); @@ -147,40 +148,5 @@ CREATE UNIQUE INDEX `idx_module_event_url` ON `webhooks`(`module`,`event`,`url`) CREATE TABLE `object_user_metadata` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_object_id` integer DEFAULT NULL,`db_multipart_upload_id` integer DEFAULT NULL,`key` text NOT NULL,`value` text, CONSTRAINT `fk_object_user_metadata` FOREIGN KEY (`db_object_id`) REFERENCES `objects` (`id`) ON DELETE CASCADE, CONSTRAINT `fk_multipart_upload_user_metadata` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads` (`id`) ON DELETE SET NULL); CREATE UNIQUE INDEX `idx_object_user_metadata_key` ON `object_user_metadata`(`db_object_id`,`db_multipart_upload_id`,`key`); --- dbObject trigger to delete from slices -CREATE TRIGGER before_delete_on_objects_delete_slices -BEFORE DELETE ON objects -BEGIN - DELETE FROM slices - WHERE slices.db_object_id = OLD.id; -END; - --- dbMultipartUpload trigger to delete from dbMultipartPart -CREATE TRIGGER before_delete_on_multipart_uploads_delete_multipart_parts -BEFORE DELETE ON multipart_uploads -BEGIN - DELETE FROM multipart_parts - WHERE multipart_parts.db_multipart_upload_id = OLD.id; -END; - --- dbMultipartPart trigger to delete from slices -CREATE TRIGGER before_delete_on_multipart_parts_delete_slices -BEFORE DELETE ON multipart_parts -BEGIN - DELETE FROM slices - WHERE slices.db_multipart_part_id = OLD.id; -END; - --- dbSlices trigger to prune slabs -CREATE TRIGGER after_delete_on_slices_delete_slabs -AFTER DELETE ON slices -BEGIN - DELETE FROM slabs - WHERE slabs.id = OLD.db_slab_id - AND slabs.db_buffered_slab_id IS NULL - AND NOT EXISTS ( - SELECT 1 - FROM slices - WHERE slices.db_slab_id = OLD.db_slab_id - ); -END; \ No newline at end of file +-- create default bucket +INSERT INTO buckets (created_at, name) VALUES (CURRENT_TIMESTAMP, 'default'); diff --git a/stores/migrations/sqlite/metrics/migration_00001_idx_contracts_fcid_timestamp.sql b/stores/migrations/sqlite/metrics/migration_00001_idx_contracts_fcid_timestamp.sql new file mode 100644 index 000000000..5276a3083 --- /dev/null +++ b/stores/migrations/sqlite/metrics/migration_00001_idx_contracts_fcid_timestamp.sql @@ -0,0 +1 @@ +CREATE INDEX `idx_contracts_fcid_timestamp` ON `contracts`(`fcid`,`timestamp`); diff --git a/stores/migrations/sqlite/metrics/schema.sql b/stores/migrations/sqlite/metrics/schema.sql index 4aa174209..63dae7d65 100644 --- a/stores/migrations/sqlite/metrics/schema.sql +++ b/stores/migrations/sqlite/metrics/schema.sql @@ -11,6 +11,7 @@ CREATE INDEX `idx_download_spending` ON `contracts`(`download_spending_lo`,`down CREATE INDEX `idx_upload_spending` ON `contracts`(`upload_spending_lo`,`upload_spending_hi`); CREATE INDEX `idx_contracts_revision_number` ON `contracts`(`revision_number`); CREATE INDEX `idx_remaining_funds` ON `contracts`(`remaining_funds_lo`,`remaining_funds_hi`); +CREATE INDEX `idx_contracts_fcid_timestamp` ON `contracts`(`fcid`,`timestamp`); -- dbContractPruneMetric CREATE TABLE `contract_prunes` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`timestamp` BIGINT NOT NULL,`fcid` blob NOT NULL,`host` blob NOT NULL,`host_version` text,`pruned` BIGINT NOT NULL,`remaining` BIGINT NOT NULL,`duration` integer NOT NULL); diff --git a/stores/migrations_metrics.go b/stores/migrations_metrics.go index a95d7b914..fc3164bee 100644 --- a/stores/migrations_metrics.go +++ b/stores/migrations_metrics.go @@ -8,42 +8,26 @@ import ( "gorm.io/gorm" ) -// initMetricsSchema is executed only on a clean database. Otherwise the individual -// migrations are executed. -func initMetricsSchema(tx *gorm.DB) error { - // Pick the right migrations. - var schema []byte - var err error - if isSQLite(tx) { - schema, err = migrations.ReadFile("migrations/sqlite/metrics/schema.sql") - } else { - schema, err = migrations.ReadFile("migrations/mysql/metrics/schema.sql") - } - if err != nil { - return err - } - - // Run it. - err = tx.Exec(string(schema)).Error - if err != nil { - return fmt.Errorf("failed to init schema: %w", err) - } - return nil -} - func performMetricsMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { + dbIdentifier := "metrics" migrations := []*gormigrate.Migration{ { ID: "00001_init", Migrate: func(tx *gorm.DB) error { return errRunV072 }, }, + { + ID: "00001_idx_contracts_fcid_timestamp", + Migrate: func(tx *gorm.DB) error { + return performMigration(tx, dbIdentifier, "00001_idx_contracts_fcid_timestamp", logger) + }, + }, } // Create migrator. m := gormigrate.New(db, gormigrate.DefaultOptions, migrations) // Set init function. - m.InitSchema(initMetricsSchema) + m.InitSchema(initSchema(db, dbIdentifier, logger)) // Perform migrations. if err := m.Migrate(); err != nil { diff --git a/stores/migrations_utils.go b/stores/migrations_utils.go new file mode 100644 index 000000000..46d7f3dc4 --- /dev/null +++ b/stores/migrations_utils.go @@ -0,0 +1,57 @@ +package stores + +import ( + "fmt" + + gormigrate "github.com/go-gormigrate/gormigrate/v2" + "go.uber.org/zap" + "gorm.io/gorm" +) + +// initSchema is executed only on a clean database. Otherwise the individual +// migrations are executed. +func initSchema(db *gorm.DB, name string, logger *zap.SugaredLogger) gormigrate.InitSchemaFunc { + return func(tx *gorm.DB) error { + logger.Infof("initializing '%s' schema", name) + + // init schema + err := execSQLFile(tx, name, "schema") + if err != nil { + return fmt.Errorf("failed to init schema: %w", err) + } + + logger.Info("initialization complete") + return nil + } +} + +func performMigration(db *gorm.DB, kind, migration string, logger *zap.SugaredLogger) error { + logger.Infof("performing %s migration '%s'", kind, migration) + + // execute migration + err := execSQLFile(db, kind, fmt.Sprintf("migration_%s", migration)) + if err != nil { + return fmt.Errorf("migration '%s' failed: %w", migration, err) + } + + logger.Infof("migration '%s' complete", migration) + return nil +} + +func execSQLFile(db *gorm.DB, folder, filename string) error { + // build path + protocol := "mysql" + if isSQLite(db) { + protocol = "sqlite" + } + path := fmt.Sprintf("migrations/%s/%s/%s.sql", protocol, folder, filename) + + // read file + file, err := migrations.ReadFile(path) + if err != nil { + return err + } + + // execute it + return db.Exec(string(file)).Error +} diff --git a/stores/multipart.go b/stores/multipart.go index 18706ed0c..864503455 100644 --- a/stores/multipart.go +++ b/stores/multipart.go @@ -187,17 +187,18 @@ func (s *SQLStore) MultipartUploads(ctx context.Context, bucket, prefix, keyMark limit++ } - prefixExpr := exprTRUE - if prefix != "" { - prefixExpr = gorm.Expr("SUBSTR(object_id, 1, ?) = ?", utf8.RuneCountInString(prefix), prefix) + // both markers must be used together + if (keyMarker == "" && uploadIDMarker != "") || (keyMarker != "" && uploadIDMarker == "") { + return api.MultipartListUploadsResponse{}, errors.New("both keyMarker and uploadIDMarker must be set or neither") } - keyMarkerExpr := exprTRUE + markerExpr := exprTRUE if keyMarker != "" { - keyMarkerExpr = gorm.Expr("object_id > ?", keyMarker) + markerExpr = gorm.Expr("object_id > ? OR (object_id = ? AND upload_id > ?)", keyMarker, keyMarker, uploadIDMarker) } - uploadIDMarkerExpr := exprTRUE - if uploadIDMarker != "" { - uploadIDMarkerExpr = gorm.Expr("upload_id > ?", keyMarker) + + prefixExpr := exprTRUE + if prefix != "" { + prefixExpr = gorm.Expr("SUBSTR(object_id, 1, ?) = ?", utf8.RuneCountInString(prefix), prefix) } err = s.retryTransaction(func(tx *gorm.DB) error { @@ -205,7 +206,10 @@ func (s *SQLStore) MultipartUploads(ctx context.Context, bucket, prefix, keyMark err := tx. Model(&dbMultipartUpload{}). Joins("DBBucket"). - Where("? AND ? AND ? AND DBBucket.name = ?", prefixExpr, keyMarkerExpr, uploadIDMarkerExpr, bucket). + Where("DBBucket.name", bucket). + Where("?", markerExpr). + Where("?", prefixExpr). + Order("object_id ASC, upload_id ASC"). Limit(limit). Find(&dbUploads). Error @@ -274,26 +278,36 @@ func (s *SQLStore) MultipartUploadParts(ctx context.Context, bucket, object stri func (s *SQLStore) AbortMultipartUpload(ctx context.Context, bucket, path string, uploadID string) error { return s.retryTransaction(func(tx *gorm.DB) error { - // Find multipart upload. - var mu dbMultipartUpload - err := tx.Where("upload_id = ?", uploadID). - Preload("Parts"). - Joins("DBBucket"). - Take(&mu). - Error - if err != nil { - return fmt.Errorf("failed to fetch multipart upload: %w", err) - } - if mu.ObjectID != path { - // Check object id. - return fmt.Errorf("object id mismatch: %v != %v: %w", mu.ObjectID, path, api.ErrObjectNotFound) - } else if mu.DBBucket.Name != bucket { - // Check bucket name. - return fmt.Errorf("bucket name mismatch: %v != %v: %w", mu.DBBucket.Name, bucket, api.ErrBucketNotFound) + // delete multipart upload optimistically + res := tx. + Where("upload_id", uploadID). + Where("object_id", path). + Where("db_bucket_id = (SELECT id FROM buckets WHERE buckets.name = ?)", bucket). + Delete(&dbMultipartUpload{}) + if res.Error != nil { + return fmt.Errorf("failed to fetch multipart upload: %w", res.Error) + } + // if the upload wasn't found, find out why + if res.RowsAffected == 0 { + var mu dbMultipartUpload + err := tx.Where("upload_id = ?", uploadID). + Joins("DBBucket"). + Take(&mu). + Error + if errors.Is(err, gorm.ErrRecordNotFound) { + return api.ErrMultipartUploadNotFound + } else if err != nil { + return fmt.Errorf("failed to fetch multipart upload: %w", err) + } else if mu.ObjectID != path { + return fmt.Errorf("object id mismatch: %v != %v: %w", mu.ObjectID, path, api.ErrObjectNotFound) + } else if mu.DBBucket.Name != bucket { + return fmt.Errorf("bucket name mismatch: %v != %v: %w", mu.DBBucket.Name, bucket, api.ErrBucketNotFound) + } + return errors.New("failed to delete multipart upload for unknown reason") } - err = tx.Delete(&mu).Error - if err != nil { - return fmt.Errorf("failed to delete multipart upload: %w", err) + // Prune the slabs. + if err := pruneSlabs(tx); err != nil { + return fmt.Errorf("failed to prune slabs: %w", err) } return nil }) @@ -435,6 +449,11 @@ func (s *SQLStore) CompleteMultipartUpload(ctx context.Context, bucket, path str if err := tx.Delete(&mu).Error; err != nil { return fmt.Errorf("failed to delete multipart upload: %w", err) } + + // Prune the slabs. + if err := pruneSlabs(tx); err != nil { + return fmt.Errorf("failed to prune slabs: %w", err) + } return nil }) if err != nil { diff --git a/stores/multipart_test.go b/stores/multipart_test.go index eeda43229..37b294418 100644 --- a/stores/multipart_test.go +++ b/stores/multipart_test.go @@ -4,6 +4,8 @@ import ( "context" "encoding/hex" "reflect" + "sort" + "strings" "testing" "time" @@ -168,3 +170,99 @@ func TestMultipartUploadWithUploadPackingRegression(t *testing.T) { t.Fatalf("expected object total size to be %v, got %v", totalSize, obj.TotalSize()) } } + +func TestMultipartUploads(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + ss := newTestSQLStore(t, defaultTestSQLStoreConfig) + defer ss.Close() + + // create 3 multipart uploads, the first 2 have the same path + resp1, err := ss.CreateMultipartUpload(context.Background(), api.DefaultBucketName, "/foo", object.NoOpKey, testMimeType, testMetadata) + if err != nil { + t.Fatal(err) + } + resp2, err := ss.CreateMultipartUpload(context.Background(), api.DefaultBucketName, "/foo", object.NoOpKey, testMimeType, testMetadata) + if err != nil { + t.Fatal(err) + } + resp3, err := ss.CreateMultipartUpload(context.Background(), api.DefaultBucketName, "/foo2", object.NoOpKey, testMimeType, testMetadata) + if err != nil { + t.Fatal(err) + } + + // prepare the expected order of uploads returned by MultipartUploads + orderedUploads := []struct { + uploadID string + objectID string + }{ + {uploadID: resp1.UploadID, objectID: "/foo"}, + {uploadID: resp2.UploadID, objectID: "/foo"}, + {uploadID: resp3.UploadID, objectID: "/foo2"}, + } + sort.Slice(orderedUploads, func(i, j int) bool { + if orderedUploads[i].objectID != orderedUploads[j].objectID { + return strings.Compare(orderedUploads[i].objectID, orderedUploads[j].objectID) < 0 + } + return strings.Compare(orderedUploads[i].uploadID, orderedUploads[j].uploadID) < 0 + }) + + // fetch uploads + mur, err := ss.MultipartUploads(context.Background(), api.DefaultBucketName, "", "", "", 3) + if err != nil { + t.Fatal(err) + } else if len(mur.Uploads) != 3 { + t.Fatal("expected 3 uploads") + } else if mur.Uploads[0].UploadID != orderedUploads[0].uploadID { + t.Fatal("unexpected upload id") + } else if mur.Uploads[1].UploadID != orderedUploads[1].uploadID { + t.Fatal("unexpected upload id") + } else if mur.Uploads[2].UploadID != orderedUploads[2].uploadID { + t.Fatal("unexpected upload id") + } + + // fetch uploads with prefix + mur, err = ss.MultipartUploads(context.Background(), api.DefaultBucketName, "/foo", "", "", 3) + if err != nil { + t.Fatal(err) + } else if len(mur.Uploads) != 3 { + t.Fatal("expected 3 uploads") + } else if mur.Uploads[0].UploadID != orderedUploads[0].uploadID { + t.Fatal("unexpected upload id") + } else if mur.Uploads[1].UploadID != orderedUploads[1].uploadID { + t.Fatal("unexpected upload id") + } else if mur.Uploads[2].UploadID != orderedUploads[2].uploadID { + t.Fatal("unexpected upload id") + } + mur, err = ss.MultipartUploads(context.Background(), api.DefaultBucketName, "/foo2", "", "", 3) + if err != nil { + t.Fatal(err) + } else if len(mur.Uploads) != 1 { + t.Fatal("expected 1 upload") + } else if mur.Uploads[0].UploadID != orderedUploads[2].uploadID { + t.Fatal("unexpected upload id") + } + + // paginate through them one-by-one + keyMarker := "" + uploadIDMarker := "" + hasMore := true + for hasMore { + mur, err = ss.MultipartUploads(context.Background(), api.DefaultBucketName, "", keyMarker, uploadIDMarker, 1) + if err != nil { + t.Fatal(err) + } else if len(mur.Uploads) != 1 { + t.Fatal("expected 1 upload") + } else if mur.Uploads[0].UploadID != orderedUploads[0].uploadID { + t.Fatalf("unexpected upload id: %v != %v", mur.Uploads[0].UploadID, orderedUploads[0].uploadID) + } + orderedUploads = orderedUploads[1:] + keyMarker = mur.NextPathMarker + uploadIDMarker = mur.NextUploadIDMarker + hasMore = mur.HasMore + } + if len(orderedUploads) != 0 { + t.Fatal("expected 3 iterations") + } +} diff --git a/stores/sql_test.go b/stores/sql_test.go index 3a51161ae..776e3e10e 100644 --- a/stores/sql_test.go +++ b/stores/sql_test.go @@ -48,6 +48,9 @@ type testSQLStore struct { } type testSQLStoreConfig struct { + dbURI string + dbUser string + dbPassword string dbName string dbMetricsName string dir string @@ -65,9 +68,26 @@ func newTestSQLStore(t *testing.T, cfg testSQLStoreConfig) *testSQLStore { if dir == "" { dir = t.TempDir() } - dbName := cfg.dbName + + dbURI, dbUser, dbPassword, dbName := DBConfigFromEnv() + if dbURI == "" { + dbURI = cfg.dbURI + } + if cfg.persistent && dbURI != "" { + t.Fatal("invalid store config, can't use both persistent and dbURI") + } + if dbUser == "" { + dbUser = cfg.dbUser + } + if dbPassword == "" { + dbPassword = cfg.dbPassword + } if dbName == "" { - dbName = hex.EncodeToString(frand.Bytes(32)) // random name for db + if cfg.dbName != "" { + dbName = cfg.dbName + } else { + dbName = hex.EncodeToString(frand.Bytes(32)) // random name for db + } } dbMetricsName := cfg.dbMetricsName if dbMetricsName == "" { @@ -75,7 +95,18 @@ func newTestSQLStore(t *testing.T, cfg testSQLStoreConfig) *testSQLStore { } var conn, connMetrics gorm.Dialector - if cfg.persistent { + if dbURI != "" { + if tmpDB, err := gorm.Open(NewMySQLConnection(dbUser, dbPassword, dbURI, "")); err != nil { + t.Fatal(err) + } else if err := tmpDB.Exec(fmt.Sprintf("CREATE DATABASE IF NOT EXISTS `%s`", dbName)).Error; err != nil { + t.Fatal(err) + } else if err := tmpDB.Exec(fmt.Sprintf("CREATE DATABASE IF NOT EXISTS `%s`", dbMetricsName)).Error; err != nil { + t.Fatal(err) + } + + conn = NewMySQLConnection(dbUser, dbPassword, dbURI, dbName) + connMetrics = NewMySQLConnection(dbUser, dbPassword, dbURI, dbMetricsName) + } else if cfg.persistent { conn = NewSQLiteConnection(filepath.Join(cfg.dir, "db.sqlite")) connMetrics = NewSQLiteConnection(filepath.Join(cfg.dir, "metrics.sqlite")) } else { @@ -125,6 +156,18 @@ func (s *testSQLStore) Close() error { return nil } +func (s *testSQLStore) DefaultBucketID() uint { + var b dbBucket + if err := s.db. + Model(&dbBucket{}). + Where("name = ?", api.DefaultBucketName). + Take(&b). + Error; err != nil { + s.t.Fatal(err) + } + return b.ID +} + func (s *testSQLStore) Reopen() *testSQLStore { s.t.Helper() cfg := defaultTestSQLStoreConfig @@ -217,11 +260,13 @@ func (s *SQLStore) contractsCount() (cnt int64, err error) { func (s *SQLStore) overrideSlabHealth(objectID string, health float64) (err error) { err = s.db.Exec(fmt.Sprintf(` UPDATE slabs SET health = %v WHERE id IN ( - SELECT sla.id - FROM objects o - INNER JOIN slices sli ON o.id = sli.db_object_id - INNER JOIN slabs sla ON sli.db_slab_id = sla.id - WHERE o.object_id = "%s" + SELECT * FROM ( + SELECT sla.id + FROM objects o + INNER JOIN slices sli ON o.id = sli.db_object_id + INNER JOIN slabs sla ON sli.db_slab_id = sla.id + WHERE o.object_id = "%s" + ) AS sub )`, health, objectID)).Error return } @@ -283,11 +328,24 @@ func TestConsensusReset(t *testing.T) { } } -type queryPlanExplain struct { - ID int `json:"id"` - Parent int `json:"parent"` - NotUsed bool `json:"notused"` - Detail string `json:"detail"` +type sqliteQueryPlan struct { + Detail string `json:"detail"` +} + +func (p sqliteQueryPlan) usesIndex() bool { + d := strings.ToLower(p.Detail) + return strings.Contains(d, "using index") || strings.Contains(d, "using covering index") +} + +//nolint:tagliatelle +type mysqlQueryPlan struct { + Extra string `json:"Extra"` + PossibleKeys string `json:"possible_keys"` +} + +func (p mysqlQueryPlan) usesIndex() bool { + d := strings.ToLower(p.Extra) + return strings.Contains(d, "using index") || strings.Contains(p.PossibleKeys, "idx_") } func TestQueryPlan(t *testing.T) { @@ -323,14 +381,20 @@ func TestQueryPlan(t *testing.T) { } for _, query := range queries { - var explain queryPlanExplain - err := ss.db.Raw(fmt.Sprintf("EXPLAIN QUERY PLAN %s;", query)).Scan(&explain).Error - if err != nil { - t.Fatal(err) - } - if !(strings.Contains(explain.Detail, "USING INDEX") || - strings.Contains(explain.Detail, "USING COVERING INDEX")) { - t.Fatalf("query '%s' should use an index, instead the plan was '%s'", query, explain.Detail) + if isSQLite(ss.db) { + var explain sqliteQueryPlan + if err := ss.db.Raw(fmt.Sprintf("EXPLAIN QUERY PLAN %s;", query)).Scan(&explain).Error; err != nil { + t.Fatal(err) + } else if !explain.usesIndex() { + t.Fatalf("query '%s' should use an index, instead the plan was %+v", query, explain) + } + } else { + var explain mysqlQueryPlan + if err := ss.db.Raw(fmt.Sprintf("EXPLAIN %s;", query)).Scan(&explain).Error; err != nil { + t.Fatal(err) + } else if !explain.usesIndex() { + t.Fatalf("query '%s' should use an index, instead the plan was %+v", query, explain) + } } } } diff --git a/stores/types.go b/stores/types.go index 6b74f7563..42a8d29e4 100644 --- a/stores/types.go +++ b/stores/types.go @@ -2,6 +2,7 @@ package stores import ( "database/sql/driver" + "encoding/binary" "encoding/json" "errors" "fmt" @@ -25,6 +26,7 @@ type ( unixTimeMS time.Time datetime time.Time currency types.Currency + bCurrency types.Currency fileContractID types.FileContractID hash256 types.Hash256 publicKey types.PublicKey @@ -338,3 +340,29 @@ func (u *unsigned64) Scan(value interface{}) error { func (u unsigned64) Value() (driver.Value, error) { return int64(u), nil } + +func (bCurrency) GormDataType() string { + return "bytes" +} + +// Scan implements the sql.Scanner interface. +func (sc *bCurrency) Scan(src any) error { + buf, ok := src.([]byte) + if !ok { + return fmt.Errorf("cannot scan %T to Currency", src) + } else if len(buf) != 16 { + return fmt.Errorf("cannot scan %d bytes to Currency", len(buf)) + } + + sc.Hi = binary.BigEndian.Uint64(buf[:8]) + sc.Lo = binary.BigEndian.Uint64(buf[8:]) + return nil +} + +// Value implements the driver.Valuer interface. +func (sc bCurrency) Value() (driver.Value, error) { + buf := make([]byte, 16) + binary.BigEndian.PutUint64(buf[:8], sc.Hi) + binary.BigEndian.PutUint64(buf[8:], sc.Lo) + return buf, nil +} diff --git a/stores/wallet.go b/stores/wallet.go index 679e96074..d9bf51c39 100644 --- a/stores/wallet.go +++ b/stores/wallet.go @@ -130,7 +130,7 @@ func (s *SQLStore) processConsensusChangeWallet(cc modules.ConsensusChange) { // Add/Remove siacoin outputs. for _, diff := range cc.SiacoinOutputDiffs { var sco types.SiacoinOutput - convertToCore(diff.SiacoinOutput, &sco) + convertToCore(diff.SiacoinOutput, (*types.V1SiacoinOutput)(&sco)) if sco.Address != s.walletAddress { continue } @@ -166,7 +166,7 @@ func (s *SQLStore) processConsensusChangeWallet(cc modules.ConsensusChange) { continue } var sco types.SiacoinOutput - convertToCore(dsco.SiacoinOutput, &sco) + convertToCore(dsco.SiacoinOutput, (*types.V1SiacoinOutput)(&sco)) s.unappliedTxnChanges = append(s.unappliedTxnChanges, txnChange{ addition: true, txnID: hash256(dsco.ID), // use output id as txn id @@ -213,7 +213,7 @@ func (s *SQLStore) processConsensusChangeWallet(cc modules.ConsensusChange) { for _, diff := range appliedDiff.SiacoinOutputDiffs { if diff.Direction == modules.DiffRevert { var so types.SiacoinOutput - convertToCore(diff.SiacoinOutput, &so) + convertToCore(diff.SiacoinOutput, (*types.V1SiacoinOutput)(&so)) spentOutputs[types.SiacoinOutputID(diff.ID)] = so } } diff --git a/wallet/seed.go b/wallet/seed.go deleted file mode 100644 index afe9e2abf..000000000 --- a/wallet/seed.go +++ /dev/null @@ -1,149 +0,0 @@ -package wallet - -import ( - "crypto/rand" - "crypto/sha256" - "encoding/binary" - "errors" - "fmt" - "strings" - - "go.sia.tech/core/types" - "golang.org/x/crypto/blake2b" -) - -// NOTE: This is not a full implementation of BIP39; only 12-word phrases (128 -// bits of entropy) are supported. - -func memclr(p []byte) { - for i := range p { - p[i] = 0 - } -} - -// NewSeedPhrase returns a random seed phrase. -func NewSeedPhrase() string { - var entropy [16]byte - if _, err := rand.Read(entropy[:]); err != nil { - panic("insufficient system entropy") - } - return encodeBIP39Phrase(&entropy) -} - -// KeyFromPhrase returns the Ed25519 key derived from the supplied seed phrase. -func KeyFromPhrase(phrase string) (types.PrivateKey, error) { - entropy, err := decodeBIP39Phrase(phrase) - if err != nil { - return nil, err - } - h := blake2b.Sum256(entropy[:]) - memclr(entropy[:]) - buf := make([]byte, 32+8) - copy(buf[:32], h[:]) - memclr(h[:]) - binary.LittleEndian.PutUint64(buf[32:], 0) - seed := blake2b.Sum256(buf) - key := types.NewPrivateKeyFromSeed(seed[:]) - memclr(seed[:]) - return key, nil -} - -func bip39checksum(entropy *[16]byte) uint64 { - hash := sha256.Sum256(entropy[:]) - return uint64((hash[0] & 0xF0) >> 4) -} - -func encodeBIP39Phrase(entropy *[16]byte) string { - // convert entropy to a 128-bit integer - hi := binary.BigEndian.Uint64(entropy[:8]) - lo := binary.BigEndian.Uint64(entropy[8:]) - - // convert each group of 11 bits into a word - words := make([]string, 12) - // last word is special: 4 bits are checksum - w := ((lo & 0x7F) << 4) | bip39checksum(entropy) - words[len(words)-1] = bip39EnglishWordList[w] - lo = lo>>7 | hi<<(64-7) - hi >>= 7 - for i := len(words) - 2; i >= 0; i-- { - words[i] = bip39EnglishWordList[lo&0x7FF] - lo = lo>>11 | hi<<(64-11) - hi >>= 11 - } - - return strings.Join(words, " ") -} - -func decodeBIP39Phrase(phrase string) (*[16]byte, error) { - // validate that the phrase is well formed and only contains words that - // are present in the word list - words := strings.Fields(phrase) - if n := len(words); n != 12 { - return nil, errors.New("wrong number of words in seed phrase") - } - for _, word := range words { - if _, ok := wordMap[word]; !ok { - return nil, fmt.Errorf("unrecognized word %q in seed phrase", word) - } - } - - // convert words to 128 bits, 11 bits at a time - var lo, hi uint64 - for _, v := range words[:len(words)-1] { - hi = hi<<11 | lo>>(64-11) - lo = lo<<11 | wordMap[v] - } - // last word is special: least-significant 4 bits are checksum, so shift - // them off and only add the remaining 7 bits - w := wordMap[words[len(words)-1]] - checksum := w & 0xF - hi = hi<<7 | lo>>(64-7) - lo = lo<<7 | w>>4 - - // convert to big-endian byte slice - var entropy [16]byte - binary.BigEndian.PutUint64(entropy[:8], hi) - binary.BigEndian.PutUint64(entropy[8:], lo) - - // validate checksum - if bip39checksum(&entropy) != checksum { - return nil, errors.New("invalid checksum") - } - return &entropy, nil -} - -var wordMap = func() map[string]uint64 { - m := make(map[string]uint64, len(bip39EnglishWordList)) - for i, v := range bip39EnglishWordList { - m[v] = uint64(i) - } - return m -}() - -var bip39EnglishWordList = []string{ - "abandon", "ability", "able", "about", "above", "absent", "absorb", "abstract", "absurd", "abuse", "access", "accident", "account", "accuse", "achieve", "acid", "acoustic", "acquire", "across", "act", "action", "actor", "actress", "actual", "adapt", "add", "addict", "address", "adjust", "admit", "adult", "advance", "advice", "aerobic", "affair", "afford", "afraid", "again", "age", "agent", "agree", "ahead", "aim", "air", "airport", "aisle", "alarm", "album", "alcohol", "alert", "alien", "all", "alley", "allow", "almost", "alone", "alpha", "already", "also", "alter", "always", "amateur", "amazing", "among", "amount", "amused", "analyst", "anchor", "ancient", "anger", "angle", "angry", "animal", "ankle", "announce", "annual", "another", "answer", "antenna", "antique", "anxiety", "any", "apart", "apology", "appear", "apple", "approve", "april", "arch", "arctic", "area", "arena", "argue", "arm", "armed", "armor", "army", "around", "arrange", "arrest", "arrive", "arrow", "art", "artefact", "artist", "artwork", "ask", "aspect", "assault", "asset", "assist", "assume", "asthma", "athlete", "atom", "attack", "attend", "attitude", "attract", "auction", "audit", "august", "aunt", "author", "auto", "autumn", "average", "avocado", "avoid", "awake", "aware", "away", "awesome", "awful", "awkward", "axis", - "baby", "bachelor", "bacon", "badge", "bag", "balance", "balcony", "ball", "bamboo", "banana", "banner", "bar", "barely", "bargain", "barrel", "base", "basic", "basket", "battle", "beach", "bean", "beauty", "because", "become", "beef", "before", "begin", "behave", "behind", "believe", "below", "belt", "bench", "benefit", "best", "betray", "better", "between", "beyond", "bicycle", "bid", "bike", "bind", "biology", "bird", "birth", "bitter", "black", "blade", "blame", "blanket", "blast", "bleak", "bless", "blind", "blood", "blossom", "blouse", "blue", "blur", "blush", "board", "boat", "body", "boil", "bomb", "bone", "bonus", "book", "boost", "border", "boring", "borrow", "boss", "bottom", "bounce", "box", "boy", "bracket", "brain", "brand", "brass", "brave", "bread", "breeze", "brick", "bridge", "brief", "bright", "bring", "brisk", "broccoli", "broken", "bronze", "broom", "brother", "brown", "brush", "bubble", "buddy", "budget", "buffalo", "build", "bulb", "bulk", "bullet", "bundle", "bunker", "burden", "burger", "burst", "bus", "business", "busy", "butter", "buyer", "buzz", - "cabbage", "cabin", "cable", "cactus", "cage", "cake", "call", "calm", "camera", "camp", "can", "canal", "cancel", "candy", "cannon", "canoe", "canvas", "canyon", "capable", "capital", "captain", "car", "carbon", "card", "cargo", "carpet", "carry", "cart", "case", "cash", "casino", "castle", "casual", "cat", "catalog", "catch", "category", "cattle", "caught", "cause", "caution", "cave", "ceiling", "celery", "cement", "census", "century", "cereal", "certain", "chair", "chalk", "champion", "change", "chaos", "chapter", "charge", "chase", "chat", "cheap", "check", "cheese", "chef", "cherry", "chest", "chicken", "chief", "child", "chimney", "choice", "choose", "chronic", "chuckle", "chunk", "churn", "cigar", "cinnamon", "circle", "citizen", "city", "civil", "claim", "clap", "clarify", "claw", "clay", "clean", "clerk", "clever", "click", "client", "cliff", "climb", "clinic", "clip", "clock", "clog", "close", "cloth", "cloud", "clown", "club", "clump", "cluster", "clutch", "coach", "coast", "coconut", "code", "coffee", "coil", "coin", "collect", "color", "column", "combine", "come", "comfort", "comic", "common", "company", "concert", "conduct", "confirm", "congress", "connect", "consider", "control", "convince", "cook", "cool", "copper", "copy", "coral", "core", "corn", "correct", "cost", "cotton", "couch", "country", "couple", "course", "cousin", "cover", "coyote", "crack", "cradle", "craft", "cram", "crane", "crash", "crater", "crawl", "crazy", "cream", "credit", "creek", "crew", "cricket", "crime", "crisp", "critic", "crop", "cross", "crouch", "crowd", "crucial", "cruel", "cruise", "crumble", "crunch", "crush", "cry", "crystal", "cube", "culture", "cup", "cupboard", "curious", "current", "curtain", "curve", "cushion", "custom", "cute", "cycle", - "dad", "damage", "damp", "dance", "danger", "daring", "dash", "daughter", "dawn", "day", "deal", "debate", "debris", "decade", "december", "decide", "decline", "decorate", "decrease", "deer", "defense", "define", "defy", "degree", "delay", "deliver", "demand", "demise", "denial", "dentist", "deny", "depart", "depend", "deposit", "depth", "deputy", "derive", "describe", "desert", "design", "desk", "despair", "destroy", "detail", "detect", "develop", "device", "devote", "diagram", "dial", "diamond", "diary", "dice", "diesel", "diet", "differ", "digital", "dignity", "dilemma", "dinner", "dinosaur", "direct", "dirt", "disagree", "discover", "disease", "dish", "dismiss", "disorder", "display", "distance", "divert", "divide", "divorce", "dizzy", "doctor", "document", "dog", "doll", "dolphin", "domain", "donate", "donkey", "donor", "door", "dose", "double", "dove", "draft", "dragon", "drama", "drastic", "draw", "dream", "dress", "drift", "drill", "drink", "drip", "drive", "drop", "drum", "dry", "duck", "dumb", "dune", "during", "dust", "dutch", "duty", "dwarf", "dynamic", - "eager", "eagle", "early", "earn", "earth", "easily", "east", "easy", "echo", "ecology", "economy", "edge", "edit", "educate", "effort", "egg", "eight", "either", "elbow", "elder", "electric", "elegant", "element", "elephant", "elevator", "elite", "else", "embark", "embody", "embrace", "emerge", "emotion", "employ", "empower", "empty", "enable", "enact", "end", "endless", "endorse", "enemy", "energy", "enforce", "engage", "engine", "enhance", "enjoy", "enlist", "enough", "enrich", "enroll", "ensure", "enter", "entire", "entry", "envelope", "episode", "equal", "equip", "era", "erase", "erode", "erosion", "error", "erupt", "escape", "essay", "essence", "estate", "eternal", "ethics", "evidence", "evil", "evoke", "evolve", "exact", "example", "excess", "exchange", "excite", "exclude", "excuse", "execute", "exercise", "exhaust", "exhibit", "exile", "exist", "exit", "exotic", "expand", "expect", "expire", "explain", "expose", "express", "extend", "extra", "eye", "eyebrow", - "fabric", "face", "faculty", "fade", "faint", "faith", "fall", "false", "fame", "family", "famous", "fan", "fancy", "fantasy", "farm", "fashion", "fat", "fatal", "father", "fatigue", "fault", "favorite", "feature", "february", "federal", "fee", "feed", "feel", "female", "fence", "festival", "fetch", "fever", "few", "fiber", "fiction", "field", "figure", "file", "film", "filter", "final", "find", "fine", "finger", "finish", "fire", "firm", "first", "fiscal", "fish", "fit", "fitness", "fix", "flag", "flame", "flash", "flat", "flavor", "flee", "flight", "flip", "float", "flock", "floor", "flower", "fluid", "flush", "fly", "foam", "focus", "fog", "foil", "fold", "follow", "food", "foot", "force", "forest", "forget", "fork", "fortune", "forum", "forward", "fossil", "foster", "found", "fox", "fragile", "frame", "frequent", "fresh", "friend", "fringe", "frog", "front", "frost", "frown", "frozen", "fruit", "fuel", "fun", "funny", "furnace", "fury", "future", - "gadget", "gain", "galaxy", "gallery", "game", "gap", "garage", "garbage", "garden", "garlic", "garment", "gas", "gasp", "gate", "gather", "gauge", "gaze", "general", "genius", "genre", "gentle", "genuine", "gesture", "ghost", "giant", "gift", "giggle", "ginger", "giraffe", "girl", "give", "glad", "glance", "glare", "glass", "glide", "glimpse", "globe", "gloom", "glory", "glove", "glow", "glue", "goat", "goddess", "gold", "good", "goose", "gorilla", "gospel", "gossip", "govern", "gown", "grab", "grace", "grain", "grant", "grape", "grass", "gravity", "great", "green", "grid", "grief", "grit", "grocery", "group", "grow", "grunt", "guard", "guess", "guide", "guilt", "guitar", "gun", "gym", "habit", - "hair", "half", "hammer", "hamster", "hand", "happy", "harbor", "hard", "harsh", "harvest", "hat", "have", "hawk", "hazard", "head", "health", "heart", "heavy", "hedgehog", "height", "hello", "helmet", "help", "hen", "hero", "hidden", "high", "hill", "hint", "hip", "hire", "history", "hobby", "hockey", "hold", "hole", "holiday", "hollow", "home", "honey", "hood", "hope", "horn", "horror", "horse", "hospital", "host", "hotel", "hour", "hover", "hub", "huge", "human", "humble", "humor", "hundred", "hungry", "hunt", "hurdle", "hurry", "hurt", "husband", "hybrid", - "ice", "icon", "idea", "identify", "idle", "ignore", "ill", "illegal", "illness", "image", "imitate", "immense", "immune", "impact", "impose", "improve", "impulse", "inch", "include", "income", "increase", "index", "indicate", "indoor", "industry", "infant", "inflict", "inform", "inhale", "inherit", "initial", "inject", "injury", "inmate", "inner", "innocent", "input", "inquiry", "insane", "insect", "inside", "inspire", "install", "intact", "interest", "into", "invest", "invite", "involve", "iron", "island", "isolate", "issue", "item", "ivory", - "jacket", "jaguar", "jar", "jazz", "jealous", "jeans", "jelly", "jewel", "job", "join", "joke", "journey", "joy", "judge", "juice", "jump", "jungle", "junior", "junk", "just", - "kangaroo", "keen", "keep", "ketchup", "key", "kick", "kid", "kidney", "kind", "kingdom", "kiss", "kit", "kitchen", "kite", "kitten", "kiwi", "knee", "knife", "knock", "know", - "lab", "label", "labor", "ladder", "lady", "lake", "lamp", "language", "laptop", "large", "later", "latin", "laugh", "laundry", "lava", "law", "lawn", "lawsuit", "layer", "lazy", "leader", "leaf", "learn", "leave", "lecture", "left", "leg", "legal", "legend", "leisure", "lemon", "lend", "length", "lens", "leopard", "lesson", "letter", "level", "liar", "liberty", "library", "license", "life", "lift", "light", "like", "limb", "limit", "link", "lion", "liquid", "list", "little", "live", "lizard", "load", "loan", "lobster", "local", "lock", "logic", "lonely", "long", "loop", "lottery", "loud", "lounge", "love", "loyal", "lucky", "luggage", "lumber", "lunar", "lunch", "luxury", "lyrics", - "machine", "mad", "magic", "magnet", "maid", "mail", "main", "major", "make", "mammal", "man", "manage", "mandate", "mango", "mansion", "manual", "maple", "marble", "march", "margin", "marine", "market", "marriage", "mask", "mass", "master", "match", "material", "math", "matrix", "matter", "maximum", "maze", "meadow", "mean", "measure", "meat", "mechanic", "medal", "media", "melody", "melt", "member", "memory", "mention", "menu", "mercy", "merge", "merit", "merry", "mesh", "message", "metal", "method", "middle", "midnight", "milk", "million", "mimic", "mind", "minimum", "minor", "minute", "miracle", "mirror", "misery", "miss", "mistake", "mix", "mixed", "mixture", "mobile", "model", "modify", "mom", "moment", "monitor", "monkey", "monster", "month", "moon", "moral", "more", "morning", "mosquito", "mother", "motion", "motor", "mountain", "mouse", "move", "movie", "much", "muffin", "mule", "multiply", "muscle", "museum", "mushroom", "music", "must", "mutual", "myself", "mystery", "myth", - "naive", "name", "napkin", "narrow", "nasty", "nation", "nature", "near", "neck", "need", "negative", "neglect", "neither", "nephew", "nerve", "nest", "net", "network", "neutral", "never", "news", "next", "nice", "night", "noble", "noise", "nominee", "noodle", "normal", "north", "nose", "notable", "note", "nothing", "notice", "novel", "now", "nuclear", "number", "nurse", "nut", - "oak", "obey", "object", "oblige", "obscure", "observe", "obtain", "obvious", "occur", "ocean", "october", "odor", "off", "offer", "office", "often", "oil", "okay", "old", "olive", "olympic", "omit", "once", "one", "onion", "online", "only", "open", "opera", "opinion", "oppose", "option", "orange", "orbit", "orchard", "order", "ordinary", "organ", "orient", "original", "orphan", "ostrich", "other", "outdoor", "outer", "output", "outside", "oval", "oven", "over", "own", "owner", "oxygen", "oyster", "ozone", - "pact", "paddle", "page", "pair", "palace", "palm", "panda", "panel", "panic", "panther", "paper", "parade", "parent", "park", "parrot", "party", "pass", "patch", "path", "patient", "patrol", "pattern", "pause", "pave", "payment", "peace", "peanut", "pear", "peasant", "pelican", "pen", "penalty", "pencil", "people", "pepper", "perfect", "permit", "person", "pet", "phone", "photo", "phrase", "physical", "piano", "picnic", "picture", "piece", "pig", "pigeon", "pill", "pilot", "pink", "pioneer", "pipe", "pistol", "pitch", "pizza", "place", "planet", "plastic", "plate", "play", "please", "pledge", "pluck", "plug", "plunge", "poem", "poet", "point", "polar", "pole", "police", "pond", "pony", "pool", "popular", "portion", "position", "possible", "post", "potato", "pottery", "poverty", "powder", "power", "practice", "praise", "predict", "prefer", "prepare", "present", "pretty", "prevent", "price", "pride", "primary", "print", "priority", "prison", "private", "prize", "problem", "process", "produce", "profit", "program", "project", "promote", "proof", "property", "prosper", "protect", "proud", "provide", "public", "pudding", "pull", "pulp", "pulse", "pumpkin", "punch", "pupil", "puppy", "purchase", "purity", "purpose", "purse", "push", "put", "puzzle", "pyramid", - "quality", "quantum", "quarter", "question", "quick", "quit", "quiz", "quote", - "rabbit", "raccoon", "race", "rack", "radar", "radio", "rail", "rain", "raise", "rally", "ramp", "ranch", "random", "range", "rapid", "rare", "rate", "rather", "raven", "raw", "razor", "ready", "real", "reason", "rebel", "rebuild", "recall", "receive", "recipe", "record", "recycle", "reduce", "reflect", "reform", "refuse", "region", "regret", "regular", "reject", "relax", "release", "relief", "rely", "remain", "remember", "remind", "remove", "render", "renew", "rent", "reopen", "repair", "repeat", "replace", "report", "require", "rescue", "resemble", "resist", "resource", "response", "result", "retire", "retreat", "return", "reunion", "reveal", "review", "reward", "rhythm", "rib", "ribbon", "rice", "rich", "ride", "ridge", "rifle", "right", "rigid", "ring", "riot", "ripple", "risk", "ritual", "rival", "river", "road", "roast", "robot", "robust", "rocket", "romance", "roof", "rookie", "room", "rose", "rotate", "rough", "round", "route", "royal", "rubber", "rude", "rug", "rule", "run", "runway", "rural", - "sad", "saddle", "sadness", "safe", "sail", "salad", "salmon", "salon", "salt", "salute", "same", "sample", "sand", "satisfy", "satoshi", "sauce", "sausage", "save", "say", "scale", "scan", "scare", "scatter", "scene", "scheme", "school", "science", "scissors", "scorpion", "scout", "scrap", "screen", "script", "scrub", "sea", "search", "season", "seat", "second", "secret", "section", "security", "seed", "seek", "segment", "select", "sell", "seminar", "senior", "sense", "sentence", "series", "service", "session", "settle", "setup", "seven", "shadow", "shaft", "shallow", "share", "shed", "shell", "sheriff", "shield", "shift", "shine", "ship", "shiver", "shock", "shoe", "shoot", "shop", "short", "shoulder", "shove", "shrimp", "shrug", "shuffle", "shy", "sibling", "sick", "side", "siege", "sight", "sign", "silent", "silk", "silly", "silver", "similar", "simple", "since", "sing", "siren", "sister", "situate", "six", "size", "skate", "sketch", "ski", "skill", "skin", "skirt", "skull", "slab", "slam", "sleep", "slender", "slice", "slide", "slight", "slim", "slogan", "slot", "slow", "slush", "small", "smart", "smile", "smoke", "smooth", "snack", "snake", "snap", "sniff", "snow", "soap", "soccer", "social", "sock", "soda", "soft", "solar", "soldier", "solid", "solution", "solve", "someone", "song", "soon", "sorry", "sort", "soul", "sound", "soup", "source", "south", "space", "spare", "spatial", "spawn", "speak", "special", "speed", "spell", "spend", "sphere", "spice", "spider", "spike", "spin", "spirit", "split", "spoil", "sponsor", "spoon", "sport", "spot", "spray", "spread", "spring", "spy", "square", "squeeze", "squirrel", "stable", "stadium", "staff", "stage", "stairs", "stamp", "stand", "start", "state", "stay", "steak", "steel", "stem", "step", "stereo", "stick", "still", "sting", "stock", "stomach", "stone", "stool", "story", "stove", "strategy", "street", "strike", "strong", "struggle", "student", "stuff", "stumble", "style", "subject", "submit", "subway", "success", "such", "sudden", "suffer", "sugar", "suggest", "suit", "summer", "sun", "sunny", "sunset", "super", "supply", "supreme", "sure", "surface", "surge", "surprise", "surround", "survey", "suspect", "sustain", "swallow", "swamp", "swap", "swarm", "swear", "sweet", "swift", "swim", "swing", "switch", "sword", "symbol", "symptom", "syrup", "system", - "table", "tackle", "tag", "tail", "talent", "talk", "tank", "tape", "target", "task", "taste", "tattoo", "taxi", "teach", "team", "tell", "ten", "tenant", "tennis", "tent", "term", "test", "text", "thank", "that", "theme", "then", "theory", "there", "they", "thing", "this", "thought", "three", "thrive", "throw", "thumb", "thunder", "ticket", "tide", "tiger", "tilt", "timber", "time", "tiny", "tip", "tired", "tissue", "title", "toast", "tobacco", "today", "toddler", "toe", "together", "toilet", "token", "tomato", "tomorrow", "tone", "tongue", "tonight", "tool", "tooth", "top", "topic", "topple", "torch", "tornado", "tortoise", "toss", "total", "tourist", "toward", "tower", "town", "toy", "track", "trade", "traffic", "tragic", "train", "transfer", "trap", "trash", "travel", "tray", "treat", "tree", "trend", "trial", "tribe", "trick", "trigger", "trim", "trip", "trophy", "trouble", "truck", "true", "truly", "trumpet", "trust", "truth", "try", "tube", "tuition", "tumble", "tuna", "tunnel", "turkey", "turn", "turtle", "twelve", "twenty", "twice", "twin", "twist", "two", "type", "typical", - "ugly", "umbrella", "unable", "unaware", "uncle", "uncover", "under", "undo", "unfair", "unfold", "unhappy", "uniform", "unique", "unit", "universe", "unknown", "unlock", "until", "unusual", "unveil", "update", "upgrade", "uphold", "upon", "upper", "upset", "urban", "urge", "usage", "use", "used", "useful", "useless", "usual", "utility", - "vacant", "vacuum", "vague", "valid", "valley", "valve", "van", "vanish", "vapor", "various", "vast", "vault", "vehicle", "velvet", "vendor", "venture", "venue", "verb", "verify", "version", "very", "vessel", "veteran", "viable", "vibrant", "vicious", "victory", "video", "view", "village", "vintage", "violin", "virtual", "virus", "visa", "visit", "visual", "vital", "vivid", "vocal", "voice", "void", "volcano", "volume", "vote", "voyage", - "wage", "wagon", "wait", "walk", "wall", "walnut", "want", "warfare", "warm", "warrior", "wash", "wasp", "waste", "water", "wave", "way", "wealth", "weapon", "wear", "weasel", "weather", "web", "wedding", "weekend", "weird", "welcome", "west", "wet", "whale", "what", "wheat", "wheel", "when", "where", "whip", "whisper", "wide", "width", "wife", "wild", "will", "win", "window", "wine", "wing", "wink", "winner", "winter", "wire", "wisdom", "wise", "wish", "witness", "wolf", "woman", "wonder", "wood", "wool", "word", "work", "world", "worry", "worth", "wrap", "wreck", "wrestle", "wrist", "write", "wrong", - "yard", "year", "yellow", "you", "young", "youth", - "zebra", "zero", "zone", "zoo", -} diff --git a/worker/bench_test.go b/worker/bench_test.go new file mode 100644 index 000000000..cc0034415 --- /dev/null +++ b/worker/bench_test.go @@ -0,0 +1,101 @@ +package worker + +import ( + "bytes" + "context" + "io" + "testing" + + rhpv2 "go.sia.tech/core/rhp/v2" + "go.sia.tech/renterd/api" + "lukechampine.com/frand" +) + +// zeroReader is a reader that leaves the buffer unchanged and returns no error. +// It's useful for benchmarks that need to produce data for uploading and should +// be used together with a io.LimitReader. +type zeroReader struct{} + +func (z *zeroReader) Read(p []byte) (n int, err error) { + return len(p), nil +} + +// BenchmarkDownlaoderSingleObject benchmarks downloading a single, slab-sized +// object. +// 1036.74 MB/s | M2 Pro | c9dc1b6 +func BenchmarkDownloaderSingleObject(b *testing.B) { + w := newTestWorker(b) + + up := testParameters(b.TempDir()) + up.rs.MinShards = 10 + up.rs.TotalShards = 30 + up.packing = false + w.AddHosts(up.rs.TotalShards) + + data := bytes.NewReader(frand.Bytes(int(up.rs.SlabSizeNoRedundancy()))) + _, _, err := w.uploadManager.Upload(context.Background(), data, w.Contracts(), up, lockingPriorityUpload) + if err != nil { + b.Fatal(err) + } + o, err := w.os.Object(context.Background(), testBucket, up.path, api.GetObjectOptions{}) + if err != nil { + b.Fatal(err) + } + + b.SetBytes(o.Object.Size) + b.ResetTimer() + for i := 0; i < b.N; i++ { + err = w.downloadManager.DownloadObject(context.Background(), io.Discard, *o.Object.Object, 0, uint64(o.Object.Size), w.Contracts()) + if err != nil { + b.Fatal(err) + } + } +} + +// BenchmarkUploaderSingleObject benchmarks uploading a single object. +// +// Speed | CPU | Commit +// 433.86 MB/s | M2 Pro | bae6e77 +func BenchmarkUploaderSingleObject(b *testing.B) { + w := newTestWorker(b) + + up := testParameters(b.TempDir()) + up.rs.MinShards = 10 + up.rs.TotalShards = 30 + up.packing = false + w.AddHosts(up.rs.TotalShards) + + data := io.LimitReader(&zeroReader{}, int64(b.N*rhpv2.SectorSize*up.rs.MinShards)) + b.SetBytes(int64(rhpv2.SectorSize * up.rs.MinShards)) + b.ResetTimer() + + _, _, err := w.uploadManager.Upload(context.Background(), data, w.Contracts(), up, lockingPriorityUpload) + if err != nil { + b.Fatal(err) + } +} + +// BenchmarkUploaderSingleObject benchmarks uploading one object per slab. +// +// Speed | CPU | Commit +// 282.47 MB/s | M2 Pro | bae6e77 +func BenchmarkUploaderMultiObject(b *testing.B) { + w := newTestWorker(b) + + up := testParameters(b.TempDir()) + up.rs.MinShards = 10 + up.rs.TotalShards = 30 + up.packing = false + w.AddHosts(up.rs.TotalShards) + + b.SetBytes(int64(rhpv2.SectorSize * up.rs.MinShards)) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + data := io.LimitReader(&zeroReader{}, int64(rhpv2.SectorSize*up.rs.MinShards)) + _, _, err := w.uploadManager.Upload(context.Background(), data, w.Contracts(), up, lockingPriorityUpload) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/worker/client/client.go b/worker/client/client.go index f45789093..410e4c66e 100644 --- a/worker/client/client.go +++ b/worker/client/client.go @@ -77,13 +77,49 @@ func (c *Client) DownloadStats() (resp api.DownloadStatsResponse, err error) { return } +// HeadObject returns the metadata of the object at the given path. +func (c *Client) HeadObject(ctx context.Context, bucket, path string, opts api.HeadObjectOptions) (*api.HeadObjectResponse, error) { + c.c.Custom("HEAD", fmt.Sprintf("/objects/%s", path), nil, nil) + + if strings.HasSuffix(path, "/") { + return nil, errors.New("the given path is a directory, HEAD can only be performed on objects") + } + + values := url.Values{} + values.Set("bucket", url.QueryEscape(bucket)) + path += "?" + values.Encode() + + // TODO: support HEAD in jape client + req, err := http.NewRequestWithContext(ctx, "HEAD", fmt.Sprintf("%s/objects/%s", c.c.BaseURL, path), nil) + if err != nil { + panic(err) + } + req.SetBasicAuth("", c.c.WithContext(ctx).Password) + opts.ApplyHeaders(req.Header) + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + if resp.StatusCode != 200 && resp.StatusCode != 206 { + err, _ := io.ReadAll(resp.Body) + _ = resp.Body.Close() + return nil, errors.New(string(err)) + } + + head, err := parseObjectResponseHeaders(resp.Header) + if err != nil { + return nil, err + } + return &head, nil +} + // GetObject returns the object at given path alongside its metadata. func (c *Client) GetObject(ctx context.Context, bucket, path string, opts api.DownloadObjectOptions) (*api.GetObjectResponse, error) { if strings.HasSuffix(path, "/") { return nil, errors.New("the given path is a directory, use ObjectEntries instead") } - // Start download. path = api.ObjectPathEscape(path) body, header, err := c.object(ctx, bucket, path, opts) if err != nil { @@ -96,41 +132,14 @@ func (c *Client) GetObject(ctx context.Context, bucket, path string, opts api.Do } }() - // Parse header. - var size int64 - _, err = fmt.Sscan(header.Get("Content-Length"), &size) + head, err := parseObjectResponseHeaders(header) if err != nil { return nil, err } - var r *api.DownloadRange - if cr := header.Get("Content-Range"); cr != "" { - dr, err := api.ParseDownloadRange(cr) - if err != nil { - return nil, err - } - r = &dr - - // If a range is set, the size is the size extracted from the range - // since Content-Length will then only be the length of the returned - // range. - size = dr.Size - } - - // Parse headers. - headers := make(map[string]string) - for k, v := range header { - if len(v) > 0 { - headers[k] = v[0] - } - } return &api.GetObjectResponse{ - Content: body, - ContentType: header.Get("Content-Type"), - LastModified: header.Get("Last-Modified"), - Range: r, - Size: size, - Metadata: api.ExtractObjectUserMetadataFrom(headers), + Content: body, + HeadObjectResponse: head, }, nil } @@ -283,6 +292,46 @@ func (c *Client) object(ctx context.Context, bucket, path string, opts api.Downl return resp.Body, resp.Header, err } +func parseObjectResponseHeaders(header http.Header) (api.HeadObjectResponse, error) { + // parse size + var size int64 + _, err := fmt.Sscan(header.Get("Content-Length"), &size) + if err != nil { + return api.HeadObjectResponse{}, err + } + + // parse range + var r *api.DownloadRange + if cr := header.Get("Content-Range"); cr != "" { + dr, err := api.ParseDownloadRange(cr) + if err != nil { + return api.HeadObjectResponse{}, err + } + r = &dr + + // if a range is set, the size is the size extracted from the range + // since Content-Length will then only be the length of the returned + // range. + size = dr.Size + } + + // parse headers + headers := make(map[string]string) + for k, v := range header { + if len(v) > 0 { + headers[k] = v[0] + } + } + + return api.HeadObjectResponse{ + ContentType: header.Get("Content-Type"), + LastModified: header.Get("Last-Modified"), + Range: r, + Size: size, + Metadata: api.ExtractObjectUserMetadataFrom(headers), + }, nil +} + func sizeFromSeeker(r io.Reader) (int64, error) { s, ok := r.(io.Seeker) if !ok { diff --git a/worker/download.go b/worker/download.go index f23c8e640..3a58bbc98 100644 --- a/worker/download.go +++ b/worker/download.go @@ -26,6 +26,7 @@ const ( var ( errDownloadNotEnoughHosts = errors.New("not enough hosts available to download the slab") + errDownloadCancelled = errors.New("download was cancelled") ) type ( @@ -131,7 +132,7 @@ func (w *worker) initDownloadManager(maxMemory, maxOverdrive uint64, overdriveTi panic("download manager already initialized") // developer error } - mm := newMemoryManager(logger, maxMemory) + mm := newMemoryManager(logger.Named("memorymanager"), maxMemory) w.downloadManager = newDownloadManager(w.shutdownCtx, w, mm, w.bus, maxOverdrive, overdriveTimeout, logger) } @@ -194,12 +195,13 @@ func (mgr *downloadManager) DownloadObject(ctx context.Context, w io.Writer, o o hosts[c.HostKey] = struct{}{} } - // buffer the writer - bw := bufio.NewWriter(w) - defer bw.Flush() - // create the cipher writer - cw := o.Key.Decrypt(bw, offset) + cw := o.Key.Decrypt(w, offset) + + // buffer the writer we recover to making sure that we don't hammer the + // response writer with tiny writes + bw := bufio.NewWriter(cw) + defer bw.Flush() // create response chan and ensure it's closed properly var wg sync.WaitGroup @@ -290,7 +292,7 @@ outer: case <-mgr.shutdownCtx.Done(): return ErrShuttingDown case <-ctx.Done(): - return errors.New("download timed out") + return errDownloadCancelled case resp = <-responseChan: } @@ -313,7 +315,7 @@ outer: s := slabs[respIndex] if s.PartialSlab { // Partial slab. - _, err = cw.Write(s.Data) + _, err = bw.Write(s.Data) if err != nil { mgr.logger.Errorf("failed to send partial slab", respIndex, err) return err @@ -321,7 +323,7 @@ outer: } else { // Regular slab. slabs[respIndex].Decrypt(next.shards) - err := slabs[respIndex].Recover(cw, next.shards) + err := slabs[respIndex].Recover(bw, next.shards) if err != nil { mgr.logger.Errorf("failed to recover slab %v: %v", respIndex, err) return err @@ -760,8 +762,6 @@ loop: if isSectorNotFound(resp.err) { if err := s.mgr.os.DeleteHostSector(ctx, resp.req.host.PublicKey(), resp.req.root); err != nil { s.mgr.logger.Errorw("failed to mark sector as lost", "hk", resp.req.host.PublicKey(), "root", resp.req.root, zap.Error(err)) - } else { - s.mgr.logger.Infow("successfully marked sector as lost", "hk", resp.req.host.PublicKey(), "root", resp.req.root) } } else if isPriceTableGouging(resp.err) && s.overpay && !resp.req.overpay { resp.req.overpay = true // ensures we don't retry the same request over and over again diff --git a/worker/downloader.go b/worker/downloader.go index 30c855d80..24be245fc 100644 --- a/worker/downloader.go +++ b/worker/downloader.go @@ -18,9 +18,12 @@ const ( maxConcurrentSectorsPerHost = 3 ) +var ( + errDownloaderStopped = errors.New("downloader was stopped") +) + type ( downloader struct { - hk types.PublicKey host Host statsDownloadSpeedBytesPerMS *stats.DataPoints // keep track of this separately for stats (no decay is applied) @@ -33,6 +36,7 @@ type ( consecutiveFailures uint64 numDownloads uint64 queue []*sectorDownloadReq + stopped bool } ) @@ -55,13 +59,17 @@ func (d *downloader) PublicKey() types.PublicKey { } func (d *downloader) Stop() { + d.mu.Lock() + d.stopped = true + d.mu.Unlock() + for { download := d.pop() if download == nil { break } if !download.done() { - download.fail(errors.New("downloader stopped")) + download.fail(errDownloaderStopped) } } } @@ -80,8 +88,15 @@ func (d *downloader) fillBatch() (batch []*sectorDownloadReq) { } func (d *downloader) enqueue(download *sectorDownloadReq) { - // enqueue the job d.mu.Lock() + // check for stopped + if d.stopped { + d.mu.Unlock() + go download.fail(errDownloaderStopped) // don't block the caller + return + } + + // enqueue the job d.queue = append(d.queue, download) d.mu.Unlock() diff --git a/worker/downloader_test.go b/worker/downloader_test.go new file mode 100644 index 000000000..8097b8304 --- /dev/null +++ b/worker/downloader_test.go @@ -0,0 +1,36 @@ +package worker + +import ( + "errors" + "testing" + "time" +) + +func TestDownloaderStopped(t *testing.T) { + w := newTestWorker(t) + hosts := w.AddHosts(1) + + // convenience variables + dm := w.downloadManager + h := hosts[0] + + dm.refreshDownloaders(w.Contracts()) + dl := w.downloadManager.downloaders[h.PublicKey()] + dl.Stop() + + req := sectorDownloadReq{ + resps: §orResponses{ + c: make(chan struct{}, 1), + }, + } + dl.enqueue(&req) + + select { + case <-req.resps.c: + if err := req.resps.responses[0].err; !errors.Is(err, errDownloaderStopped) { + t.Fatal("unexpected error response", err) + } + case <-time.After(10 * time.Second): + t.Fatal("no response") + } +} diff --git a/worker/gouging.go b/worker/gouging.go index 5e77c3053..a7b2078a1 100644 --- a/worker/gouging.go +++ b/worker/gouging.go @@ -39,6 +39,7 @@ var ( type ( GougingChecker interface { Check(_ *rhpv2.HostSettings, _ *rhpv3.HostPriceTable) api.HostGougingBreakdown + BlocksUntilBlockHeightGouging(hostHeight uint64) int64 } gougingChecker struct { @@ -63,7 +64,7 @@ func GougingCheckerFromContext(ctx context.Context, criticalMigration bool) (Gou return gc(criticalMigration) } -func WithGougingChecker(ctx context.Context, cs consensusState, gp api.GougingParams) context.Context { +func WithGougingChecker(ctx context.Context, cs ConsensusState, gp api.GougingParams) context.Context { return context.WithValue(ctx, keyGougingChecker, func(criticalMigration bool) (GougingChecker, error) { consensusState, err := cs.ConsensusState(ctx) if err != nil { @@ -72,17 +73,16 @@ func WithGougingChecker(ctx context.Context, cs consensusState, gp api.GougingPa // adjust the max download price if we are dealing with a critical // migration that might be failing due to gouging checks + settings := gp.GougingSettings if criticalMigration && gp.GougingSettings.MigrationSurchargeMultiplier > 0 { - if adjustedMaxDownloadPrice, overflow := gp.GougingSettings.MaxDownloadPrice.Mul64WithOverflow(gp.GougingSettings.MigrationSurchargeMultiplier); overflow { - return gougingChecker{}, errors.New("failed to apply the 'MigrationSurchargeMultiplier', overflow detected") - } else { - gp.GougingSettings.MaxDownloadPrice = adjustedMaxDownloadPrice + if adjustedMaxDownloadPrice, overflow := gp.GougingSettings.MaxDownloadPrice.Mul64WithOverflow(gp.GougingSettings.MigrationSurchargeMultiplier); !overflow { + settings.MaxDownloadPrice = adjustedMaxDownloadPrice } } return gougingChecker{ consensusState: consensusState, - settings: gp.GougingSettings, + settings: settings, txFee: gp.TransactionFee, // NOTE: @@ -108,6 +108,16 @@ func NewGougingChecker(gs api.GougingSettings, cs api.ConsensusState, txnFee typ } } +func (gc gougingChecker) BlocksUntilBlockHeightGouging(hostHeight uint64) int64 { + blockHeight := gc.consensusState.BlockHeight + leeway := gc.settings.HostBlockHeightLeeway + var min uint64 + if blockHeight >= uint64(leeway) { + min = blockHeight - uint64(leeway) + } + return int64(hostHeight) - int64(min) +} + func (gc gougingChecker) Check(hs *rhpv2.HostSettings, pt *rhpv3.HostPriceTable) api.HostGougingBreakdown { if hs == nil && pt == nil { panic("gouging checker needs to be provided with at least host settings or a price table") // developer error diff --git a/worker/host.go b/worker/host.go index 86e92ce27..fceeaba00 100644 --- a/worker/host.go +++ b/worker/host.go @@ -21,7 +21,7 @@ type ( PublicKey() types.PublicKey DownloadSector(ctx context.Context, w io.Writer, root types.Hash256, offset, length uint32, overpay bool) error - UploadSector(ctx context.Context, sector *[rhpv2.SectorSize]byte, rev types.FileContractRevision) (types.Hash256, error) + UploadSector(ctx context.Context, sectorRoot types.Hash256, sector *[rhpv2.SectorSize]byte, rev types.FileContractRevision) error FetchPriceTable(ctx context.Context, rev *types.FileContractRevision) (hpt hostdb.HostPriceTable, err error) FetchRevision(ctx context.Context, fetchTimeout time.Duration) (types.FileContractRevision, error) @@ -35,10 +35,6 @@ type ( HostManager interface { Host(hk types.PublicKey, fcid types.FileContractID, siamuxAddr string) Host } - - HostStore interface { - Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) - } ) type ( @@ -52,7 +48,6 @@ type ( acc *account bus Bus contractSpendingRecorder ContractSpendingRecorder - interactionRecorder HostInteractionRecorder logger *zap.SugaredLogger transportPool *transportPoolV3 priceTables *priceTables @@ -70,7 +65,6 @@ func (w *worker) Host(hk types.PublicKey, fcid types.FileContractID, siamuxAddr acc: w.accounts.ForHost(hk), bus: w.bus, contractSpendingRecorder: w.contractSpendingRecorder, - interactionRecorder: w.hostInteractionRecorder, logger: w.logger.Named(hk.String()[:4]), fcid: fcid, siamuxAddr: siamuxAddr, @@ -123,11 +117,11 @@ func (h *host) DownloadSector(ctx context.Context, w io.Writer, root types.Hash2 }) } -func (h *host) UploadSector(ctx context.Context, sector *[rhpv2.SectorSize]byte, rev types.FileContractRevision) (root types.Hash256, err error) { +func (h *host) UploadSector(ctx context.Context, sectorRoot types.Hash256, sector *[rhpv2.SectorSize]byte, rev types.FileContractRevision) (err error) { // fetch price table pt, err := h.priceTable(ctx, nil) if err != nil { - return types.Hash256{}, err + return err } // prepare payment @@ -136,28 +130,28 @@ func (h *host) UploadSector(ctx context.Context, sector *[rhpv2.SectorSize]byte, // insufficient balance error expectedCost, _, _, err := uploadSectorCost(pt, rev.WindowEnd) if err != nil { - return types.Hash256{}, err + return err } if rev.RevisionNumber == math.MaxUint64 { - return types.Hash256{}, fmt.Errorf("revision number has reached max, fcid %v", rev.ParentID) + return fmt.Errorf("revision number has reached max, fcid %v", rev.ParentID) } payment, ok := rhpv3.PayByContract(&rev, expectedCost, h.acc.id, h.renterKey) if !ok { - return types.Hash256{}, errors.New("failed to create payment") + return errors.New("failed to create payment") } var cost types.Currency err = h.transportPool.withTransportV3(ctx, h.hk, h.siamuxAddr, func(ctx context.Context, t *transportV3) error { - root, cost, err = RPCAppendSector(ctx, t, h.renterKey, pt, &rev, &payment, sector) + cost, err = RPCAppendSector(ctx, t, h.renterKey, pt, &rev, &payment, sectorRoot, sector) return err }) if err != nil { - return types.Hash256{}, err + return err } // record spending h.contractSpendingRecorder.Record(rev, api.ContractSpending{Uploads: cost}) - return root, nil + return nil } func (h *host) RenewContract(ctx context.Context, rrr api.RHPRenewRequest) (_ rhpv2.ContractRevision, _ []types.Transaction, _ types.Currency, err error) { @@ -198,11 +192,13 @@ func (h *host) FetchPriceTable(ctx context.Context, rev *types.FileContractRevis fetchPT := func(paymentFn PriceTablePaymentFunc) (hpt hostdb.HostPriceTable, err error) { err = h.transportPool.withTransportV3(ctx, h.hk, h.siamuxAddr, func(ctx context.Context, t *transportV3) (err error) { hpt, err = RPCPriceTable(ctx, t, paymentFn) - h.interactionRecorder.RecordPriceTableUpdate(hostdb.PriceTableUpdate{ - HostKey: h.hk, - Success: isSuccessfulInteraction(err), - Timestamp: time.Now(), - PriceTable: hpt, + h.bus.RecordPriceTables(ctx, []hostdb.PriceTableUpdate{ + { + HostKey: h.hk, + Success: isSuccessfulInteraction(err), + Timestamp: time.Now(), + PriceTable: hpt, + }, }) return }) diff --git a/worker/host_test.go b/worker/host_test.go index 87d35fb36..a993c12e1 100644 --- a/worker/host_test.go +++ b/worker/host_test.go @@ -4,23 +4,133 @@ import ( "bytes" "context" "errors" + "io" + "sync" "testing" + "time" rhpv2 "go.sia.tech/core/rhp/v2" + rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" + "go.sia.tech/renterd/api" + "go.sia.tech/renterd/hostdb" + "go.sia.tech/renterd/internal/test" + "lukechampine.com/frand" ) +type ( + testHost struct { + *hostMock + *contractMock + hptFn func() hostdb.HostPriceTable + } + + testHostManager struct { + tt test.TT + + mu sync.Mutex + hosts map[types.PublicKey]*testHost + } +) + +func newTestHostManager(t test.TestingCommon) *testHostManager { + return &testHostManager{tt: test.NewTT(t), hosts: make(map[types.PublicKey]*testHost)} +} + +func (hm *testHostManager) Host(hk types.PublicKey, fcid types.FileContractID, siamuxAddr string) Host { + hm.mu.Lock() + defer hm.mu.Unlock() + + if _, ok := hm.hosts[hk]; !ok { + hm.tt.Fatal("host not found") + } + return hm.hosts[hk] +} + +func (hm *testHostManager) addHost(h *testHost) { + hm.mu.Lock() + defer hm.mu.Unlock() + hm.hosts[h.hk] = h +} + +func newTestHost(h *hostMock, c *contractMock) *testHost { + return newTestHostCustom(h, c, newTestHostPriceTable) +} + +func newTestHostCustom(h *hostMock, c *contractMock, hptFn func() hostdb.HostPriceTable) *testHost { + return &testHost{ + hostMock: h, + contractMock: c, + hptFn: hptFn, + } +} + +func newTestHostPriceTable() hostdb.HostPriceTable { + var uid rhpv3.SettingsID + frand.Read(uid[:]) + + return hostdb.HostPriceTable{ + HostPriceTable: rhpv3.HostPriceTable{UID: uid, HostBlockHeight: 100, Validity: time.Minute}, + Expiry: time.Now().Add(time.Minute), + } +} + +func (h *testHost) PublicKey() types.PublicKey { + return h.hk +} + +func (h *testHost) DownloadSector(ctx context.Context, w io.Writer, root types.Hash256, offset, length uint32, overpay bool) error { + sector, exist := h.Sector(root) + if !exist { + return errSectorNotFound + } + if offset+length > rhpv2.SectorSize { + return errSectorOutOfBounds + } + _, err := w.Write(sector[offset : offset+length]) + return err +} + +func (h *testHost) UploadSector(ctx context.Context, sectorRoot types.Hash256, sector *[rhpv2.SectorSize]byte, rev types.FileContractRevision) error { + h.AddSector(sector) + return nil +} + +func (h *testHost) FetchRevision(ctx context.Context, fetchTimeout time.Duration) (rev types.FileContractRevision, _ error) { + h.mu.Lock() + defer h.mu.Unlock() + rev = h.rev + return rev, nil +} + +func (h *testHost) FetchPriceTable(ctx context.Context, rev *types.FileContractRevision) (hostdb.HostPriceTable, error) { + return h.hptFn(), nil +} + +func (h *testHost) FundAccount(ctx context.Context, balance types.Currency, rev *types.FileContractRevision) error { + return nil +} + +func (h *testHost) RenewContract(ctx context.Context, rrr api.RHPRenewRequest) (_ rhpv2.ContractRevision, _ []types.Transaction, _ types.Currency, err error) { + return rhpv2.ContractRevision{}, nil, types.ZeroCurrency, nil +} + +func (h *testHost) SyncAccount(ctx context.Context, rev *types.FileContractRevision) error { + return nil +} + func TestHost(t *testing.T) { - h := newMockHost(types.PublicKey{1}) - h.c = newMockContract(h.hk, types.FileContractID{1}) - sector, root := newMockSector() + // create test host + h := newTestHost( + newHostMock(types.PublicKey{1}), + newContractMock(types.PublicKey{1}, types.FileContractID{1}), + ) // upload the sector - uploaded, err := h.UploadSector(context.Background(), sector, types.FileContractRevision{}) + sector, root := newTestSector() + err := h.UploadSector(context.Background(), root, sector, types.FileContractRevision{}) if err != nil { t.Fatal(err) - } else if uploaded != root { - t.Fatal("root mismatch") } // download entire sector diff --git a/worker/interactions.go b/worker/interactions.go index dfe8c4017..2107ae582 100644 --- a/worker/interactions.go +++ b/worker/interactions.go @@ -1,135 +1,16 @@ package worker import ( - "context" - "fmt" - "sync" - "time" - "go.sia.tech/renterd/hostdb" - "go.uber.org/zap" -) - -const ( - keyInteractionRecorder contextKey = "InteractionRecorder" ) type ( HostInteractionRecorder interface { RecordHostScan(...hostdb.HostScan) RecordPriceTableUpdate(...hostdb.PriceTableUpdate) - Stop(context.Context) - } - - hostInteractionRecorder struct { - flushInterval time.Duration - - bus Bus - logger *zap.SugaredLogger - - mu sync.Mutex - hostScans []hostdb.HostScan - priceTableUpdates []hostdb.PriceTableUpdate - - flushCtx context.Context - flushTimer *time.Timer } ) -var ( - _ HostInteractionRecorder = (*hostInteractionRecorder)(nil) -) - -func (w *worker) initHostInteractionRecorder(flushInterval time.Duration) { - if w.hostInteractionRecorder != nil { - panic("HostInteractionRecorder already initialized") // developer error - } - w.hostInteractionRecorder = &hostInteractionRecorder{ - bus: w.bus, - logger: w.logger, - - flushCtx: w.shutdownCtx, - flushInterval: flushInterval, - - hostScans: make([]hostdb.HostScan, 0), - priceTableUpdates: make([]hostdb.PriceTableUpdate, 0), - } -} - -func (r *hostInteractionRecorder) RecordHostScan(scans ...hostdb.HostScan) { - r.mu.Lock() - defer r.mu.Unlock() - r.hostScans = append(r.hostScans, scans...) - r.tryFlushInteractionsBuffer() -} - -func (r *hostInteractionRecorder) RecordPriceTableUpdate(ptUpdates ...hostdb.PriceTableUpdate) { - r.mu.Lock() - defer r.mu.Unlock() - r.priceTableUpdates = append(r.priceTableUpdates, ptUpdates...) - r.tryFlushInteractionsBuffer() -} - -func (r *hostInteractionRecorder) Stop(ctx context.Context) { - // stop the flush timer - r.mu.Lock() - if r.flushTimer != nil { - r.flushTimer.Stop() - } - r.flushCtx = ctx - r.mu.Unlock() - - // flush all interactions - r.flush() - - // log if we weren't able to flush them - r.mu.Lock() - if len(r.hostScans) > 0 { - r.logger.Errorw(fmt.Sprintf("failed to record %d host scans on worker shutdown", len(r.hostScans))) - } - if len(r.priceTableUpdates) > 0 { - r.logger.Errorw(fmt.Sprintf("failed to record %d price table updates on worker shutdown", len(r.priceTableUpdates))) - } - r.mu.Unlock() -} - -func (r *hostInteractionRecorder) flush() { - r.mu.Lock() - defer r.mu.Unlock() - - // NOTE: don't bother flushing if the context is cancelled, we can safely - // ignore the buffered scans and price tables since we'll flush on shutdown - // and log in case we weren't able to flush all interactions to the bus - select { - case <-r.flushCtx.Done(): - r.flushTimer = nil - return - default: - } - - if len(r.hostScans) > 0 { - if err := r.bus.RecordHostScans(r.flushCtx, r.hostScans); err != nil { - r.logger.Errorw(fmt.Sprintf("failed to record scans: %v", err)) - } else if err == nil { - r.hostScans = nil - } - } - if len(r.priceTableUpdates) > 0 { - if err := r.bus.RecordPriceTables(r.flushCtx, r.priceTableUpdates); err != nil { - r.logger.Errorw(fmt.Sprintf("failed to record price table updates: %v", err)) - } else if err == nil { - r.priceTableUpdates = nil - } - } - r.flushTimer = nil -} - -func (r *hostInteractionRecorder) tryFlushInteractionsBuffer() { - if r.flushTimer == nil { - r.flushTimer = time.AfterFunc(r.flushInterval, r.flush) - } -} - func isSuccessfulInteraction(err error) bool { // No error always means success. if err == nil { diff --git a/worker/memory.go b/worker/memory.go index 8b1c7cb5e..1dbd680ec 100644 --- a/worker/memory.go +++ b/worker/memory.go @@ -151,7 +151,6 @@ func (lmm *limitMemoryManager) AcquireMemory(ctx context.Context, amt uint64) Me childMem.Release() return nil } - return &limitAcquiredMemory{ child: childMem, parent: parentMem, diff --git a/worker/mocks_test.go b/worker/mocks_test.go index e6fd62d8e..4f7c24b8f 100644 --- a/worker/mocks_test.go +++ b/worker/mocks_test.go @@ -5,222 +5,410 @@ import ( "encoding/json" "errors" "fmt" - "io" + "math" + "math/big" "sync" "time" rhpv2 "go.sia.tech/core/rhp/v2" rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" + "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" "go.sia.tech/renterd/hostdb" "go.sia.tech/renterd/object" - "go.uber.org/zap" - "lukechampine.com/frand" + "go.sia.tech/renterd/webhooks" ) -type ( - mockContract struct { - rev types.FileContractRevision - metadata api.ContractMetadata +var _ AccountStore = (*accountsMock)(nil) - mu sync.Mutex - sectors map[types.Hash256]*[rhpv2.SectorSize]byte - } +type accountsMock struct{} - mockContractStore struct { - mu sync.Mutex - locks map[types.FileContractID]*sync.Mutex - } +func (*accountsMock) Accounts(context.Context) ([]api.Account, error) { + return nil, nil +} - mockHost struct { - hk types.PublicKey +func (*accountsMock) AddBalance(context.Context, rhpv3.Account, types.PublicKey, *big.Int) error { + return nil +} - mu sync.Mutex - c *mockContract +func (*accountsMock) LockAccount(context.Context, rhpv3.Account, types.PublicKey, bool, time.Duration) (api.Account, uint64, error) { + return api.Account{}, 0, nil +} - hpt hostdb.HostPriceTable - hptBlockChan chan struct{} - } +func (*accountsMock) UnlockAccount(context.Context, rhpv3.Account, uint64) error { + return nil +} - mockHostManager struct { - mu sync.Mutex - hosts map[types.PublicKey]*mockHost - } +func (*accountsMock) ResetDrift(context.Context, rhpv3.Account) error { + return nil +} - mockMemory struct{} - mockMemoryManager struct { - memBlockChan chan struct{} - } +func (*accountsMock) SetBalance(context.Context, rhpv3.Account, types.PublicKey, *big.Int) error { + return nil +} - mockObjectStore struct { - mu sync.Mutex - objects map[string]map[string]object.Object - partials map[string]mockPackedSlab - bufferIDCntr uint // allows marking packed slabs as uploaded - } +func (*accountsMock) ScheduleSync(context.Context, rhpv3.Account, types.PublicKey) error { + return nil +} - mockPackedSlab struct { - parameterKey string // ([minshards]-[totalshards]-[contractset]) - bufferID uint - slabKey object.EncryptionKey - data []byte - } +var _ alerts.Alerter = (*alerterMock)(nil) - mockWorker struct { - cs *mockContractStore - hm *mockHostManager - mm *mockMemoryManager - os *mockObjectStore +type alerterMock struct{} - dl *downloadManager - ul *uploadManager +func (*alerterMock) Alerts(_ context.Context, opts alerts.AlertsOpts) (resp alerts.AlertsResponse, err error) { + return alerts.AlertsResponse{}, nil +} +func (*alerterMock) RegisterAlert(context.Context, alerts.Alert) error { return nil } +func (*alerterMock) DismissAlerts(context.Context, ...types.Hash256) error { return nil } - mu sync.Mutex - hkCntr uint - fcidCntr uint - } -) +var _ ConsensusState = (*chainMock)(nil) -var ( - _ ContractStore = (*mockContractStore)(nil) - _ Host = (*mockHost)(nil) - _ HostManager = (*mockHostManager)(nil) - _ Memory = (*mockMemory)(nil) - _ MemoryManager = (*mockMemoryManager)(nil) - _ ObjectStore = (*mockObjectStore)(nil) -) +type chainMock struct { + cs api.ConsensusState +} -var ( - errBucketNotFound = errors.New("bucket not found") - errContractNotFound = errors.New("contract not found") - errObjectNotFound = errors.New("object not found") - errSlabNotFound = errors.New("slab not found") - errSectorOutOfBounds = errors.New("sector out of bounds") -) +func (c *chainMock) ConsensusState(ctx context.Context) (api.ConsensusState, error) { + return c.cs, nil +} -type ( - mockHosts []*mockHost - mockContracts []*mockContract -) +var _ Bus = (*busMock)(nil) -func (hosts mockHosts) contracts() mockContracts { - contracts := make([]*mockContract, len(hosts)) - for i, host := range hosts { - contracts[i] = host.c - } - return contracts +type busMock struct { + *alerterMock + *accountsMock + *chainMock + *contractLockerMock + *contractStoreMock + *hostStoreMock + *objectStoreMock + *settingStoreMock + *syncerMock + *walletMock + *webhookBroadcasterMock } -func (contracts mockContracts) metadata() []api.ContractMetadata { - metadata := make([]api.ContractMetadata, len(contracts)) - for i, contract := range contracts { - metadata[i] = contract.metadata +func newBusMock(cs *contractStoreMock, hs *hostStoreMock, os *objectStoreMock) *busMock { + return &busMock{ + alerterMock: &alerterMock{}, + accountsMock: &accountsMock{}, + chainMock: &chainMock{}, + contractLockerMock: newContractLockerMock(), + contractStoreMock: cs, + hostStoreMock: hs, + objectStoreMock: os, + settingStoreMock: &settingStoreMock{}, + syncerMock: &syncerMock{}, + walletMock: &walletMock{}, + webhookBroadcasterMock: &webhookBroadcasterMock{}, } - return metadata } -func (m *mockMemory) Release() {} -func (m *mockMemory) ReleaseSome(uint64) {} +type contractMock struct { + rev types.FileContractRevision + metadata api.ContractMetadata -func (mm *mockMemoryManager) Limit(amt uint64) (MemoryManager, error) { - return &mockMemoryManager{}, nil + mu sync.Mutex + sectors map[types.Hash256]*[rhpv2.SectorSize]byte } -func (mm *mockMemoryManager) Status() api.MemoryStatus { return api.MemoryStatus{} } -func (mm *mockMemoryManager) AcquireMemory(ctx context.Context, amt uint64) Memory { - if mm.memBlockChan != nil { - <-mm.memBlockChan + +func newContractMock(hk types.PublicKey, fcid types.FileContractID) *contractMock { + return &contractMock{ + metadata: api.ContractMetadata{ + ID: fcid, + HostKey: hk, + WindowStart: 0, + WindowEnd: 10, + }, + rev: types.FileContractRevision{ParentID: fcid}, + sectors: make(map[types.Hash256]*[rhpv2.SectorSize]byte), } - return &mockMemory{} } -func newMockContractStore() *mockContractStore { - return &mockContractStore{ +func (c *contractMock) AddSector(sector *[rhpv2.SectorSize]byte) (root types.Hash256) { + root = rhpv2.SectorRoot(sector) + c.mu.Lock() + c.sectors[root] = sector + c.mu.Unlock() + return +} + +func (c *contractMock) Sector(root types.Hash256) (sector *[rhpv2.SectorSize]byte, found bool) { + c.mu.Lock() + sector, found = c.sectors[root] + c.mu.Unlock() + return +} + +var _ ContractLocker = (*contractLockerMock)(nil) + +type contractLockerMock struct { + mu sync.Mutex + locks map[types.FileContractID]*sync.Mutex +} + +func newContractLockerMock() *contractLockerMock { + return &contractLockerMock{ locks: make(map[types.FileContractID]*sync.Mutex), } } -func (cs *mockContractStore) AcquireContract(ctx context.Context, fcid types.FileContractID, priority int, d time.Duration) (lockID uint64, err error) { +func (cs *contractLockerMock) AcquireContract(_ context.Context, fcid types.FileContractID, _ int, _ time.Duration) (uint64, error) { cs.mu.Lock() defer cs.mu.Unlock() - if lock, ok := cs.locks[fcid]; !ok { - return 0, errContractNotFound - } else { - lock.Lock() + lock, exists := cs.locks[fcid] + if !exists { + cs.locks[fcid] = new(sync.Mutex) + lock = cs.locks[fcid] } + + lock.Lock() return 0, nil } -func (cs *mockContractStore) ReleaseContract(ctx context.Context, fcid types.FileContractID, lockID uint64) (err error) { +func (cs *contractLockerMock) ReleaseContract(_ context.Context, fcid types.FileContractID, _ uint64) error { cs.mu.Lock() defer cs.mu.Unlock() - if lock, ok := cs.locks[fcid]; !ok { - return errContractNotFound - } else { - lock.Unlock() - } + cs.locks[fcid].Unlock() + delete(cs.locks, fcid) return nil } -func (cs *mockContractStore) KeepaliveContract(ctx context.Context, fcid types.FileContractID, lockID uint64, d time.Duration) (err error) { +func (*contractLockerMock) KeepaliveContract(context.Context, types.FileContractID, uint64, time.Duration) error { return nil } -func (os *mockContractStore) RenewedContract(ctx context.Context, renewedFrom types.FileContractID) (api.ContractMetadata, error) { - return api.ContractMetadata{}, api.ErrContractNotFound +var _ ContractStore = (*contractStoreMock)(nil) + +type contractStoreMock struct { + mu sync.Mutex + contracts map[types.FileContractID]*contractMock + hosts2fcid map[types.PublicKey]types.FileContractID + fcidCntr uint } -func newMockObjectStore() *mockObjectStore { - os := &mockObjectStore{ - objects: make(map[string]map[string]object.Object), - partials: make(map[string]mockPackedSlab), +func newContractStoreMock() *contractStoreMock { + return &contractStoreMock{ + contracts: make(map[types.FileContractID]*contractMock), + hosts2fcid: make(map[types.PublicKey]types.FileContractID), } - os.objects[testBucket] = make(map[string]object.Object) - return os } -func (cs *mockContractStore) addContract(c *mockContract) { +func (*contractStoreMock) RenewedContract(context.Context, types.FileContractID) (api.ContractMetadata, error) { + return api.ContractMetadata{}, nil +} + +func (*contractStoreMock) Contract(context.Context, types.FileContractID) (api.ContractMetadata, error) { + return api.ContractMetadata{}, nil +} + +func (*contractStoreMock) ContractSize(context.Context, types.FileContractID) (api.ContractSize, error) { + return api.ContractSize{}, nil +} + +func (*contractStoreMock) ContractRoots(context.Context, types.FileContractID) ([]types.Hash256, []types.Hash256, error) { + return nil, nil, nil +} + +func (cs *contractStoreMock) Contracts(context.Context, api.ContractsOpts) (metadatas []api.ContractMetadata, _ error) { + cs.mu.Lock() + defer cs.mu.Unlock() + for _, c := range cs.contracts { + metadatas = append(metadatas, c.metadata) + } + return +} + +func (cs *contractStoreMock) addContract(hk types.PublicKey) *contractMock { cs.mu.Lock() defer cs.mu.Unlock() - cs.locks[c.metadata.ID] = new(sync.Mutex) + + fcid := cs.newFileContractID() + cs.contracts[fcid] = newContractMock(hk, fcid) + cs.hosts2fcid[hk] = fcid + return cs.contracts[fcid] +} + +func (cs *contractStoreMock) renewContract(hk types.PublicKey) (*contractMock, error) { + cs.mu.Lock() + defer cs.mu.Unlock() + + curr := cs.hosts2fcid[hk] + c := cs.contracts[curr] + if c == nil { + return nil, errors.New("host does not have a contract to renew") + } + delete(cs.contracts, curr) + + renewal := newContractMock(hk, cs.newFileContractID()) + renewal.metadata.RenewedFrom = c.metadata.ID + renewal.metadata.WindowStart = c.metadata.WindowEnd + renewal.metadata.WindowEnd = renewal.metadata.WindowStart + (c.metadata.WindowEnd - c.metadata.WindowStart) + cs.contracts[renewal.metadata.ID] = renewal + cs.hosts2fcid[hk] = renewal.metadata.ID + return renewal, nil +} + +func (cs *contractStoreMock) newFileContractID() types.FileContractID { + cs.fcidCntr++ + return types.FileContractID{byte(cs.fcidCntr)} +} + +var errSectorOutOfBounds = errors.New("sector out of bounds") + +type hostMock struct { + hk types.PublicKey + hi hostdb.HostInfo +} + +func newHostMock(hk types.PublicKey) *hostMock { + return &hostMock{ + hk: hk, + hi: hostdb.HostInfo{Host: hostdb.Host{PublicKey: hk, Scanned: true}}, + } +} + +var _ HostStore = (*hostStoreMock)(nil) + +type hostStoreMock struct { + mu sync.Mutex + hosts map[types.PublicKey]*hostMock + hkCntr uint +} + +func newHostStoreMock() *hostStoreMock { + return &hostStoreMock{hosts: make(map[types.PublicKey]*hostMock)} +} + +func (hs *hostStoreMock) Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) { + hs.mu.Lock() + defer hs.mu.Unlock() + + h, ok := hs.hosts[hostKey] + if !ok { + return hostdb.HostInfo{}, api.ErrHostNotFound + } + return h.hi, nil +} + +func (hs *hostStoreMock) RecordHostScans(ctx context.Context, scans []hostdb.HostScan) error { + return nil +} + +func (hs *hostStoreMock) RecordPriceTables(ctx context.Context, priceTableUpdate []hostdb.PriceTableUpdate) error { + return nil +} + +func (hs *hostStoreMock) RecordContractSpending(ctx context.Context, records []api.ContractSpendingRecord) error { + return nil +} + +func (hs *hostStoreMock) addHost() *hostMock { + hs.mu.Lock() + defer hs.mu.Unlock() + + hs.hkCntr++ + hk := types.PublicKey{byte(hs.hkCntr)} + hs.hosts[hk] = newHostMock(hk) + return hs.hosts[hk] +} + +var ( + _ MemoryManager = (*memoryManagerMock)(nil) + _ Memory = (*memoryMock)(nil) +) + +type ( + memoryMock struct{} + memoryManagerMock struct{ memBlockChan chan struct{} } +) + +func newMemoryManagerMock() *memoryManagerMock { + mm := &memoryManagerMock{memBlockChan: make(chan struct{})} + close(mm.memBlockChan) + return mm +} + +func (m *memoryMock) Release() {} +func (m *memoryMock) ReleaseSome(uint64) {} + +func (mm *memoryManagerMock) Limit(amt uint64) (MemoryManager, error) { + return mm, nil } -func (os *mockObjectStore) AddMultipartPart(ctx context.Context, bucket, path, contractSet, ETag, uploadID string, partNumber int, slices []object.SlabSlice) (err error) { +func (mm *memoryManagerMock) Status() api.MemoryStatus { return api.MemoryStatus{} } + +func (mm *memoryManagerMock) AcquireMemory(ctx context.Context, amt uint64) Memory { + <-mm.memBlockChan + return &memoryMock{} +} + +var _ ObjectStore = (*objectStoreMock)(nil) + +type ( + objectStoreMock struct { + mu sync.Mutex + objects map[string]map[string]object.Object + partials map[string]*packedSlabMock + slabBufferMaxSizeSoft int + bufferIDCntr uint // allows marking packed slabs as uploaded + } + + packedSlabMock struct { + parameterKey string // ([minshards]-[totalshards]-[contractset]) + bufferID uint + slabKey object.EncryptionKey + data []byte + lockedUntil time.Time + } +) + +func newObjectStoreMock(bucket string) *objectStoreMock { + os := &objectStoreMock{ + objects: make(map[string]map[string]object.Object), + partials: make(map[string]*packedSlabMock), + slabBufferMaxSizeSoft: math.MaxInt64, + } + os.objects[bucket] = make(map[string]object.Object) + return os +} + +func (os *objectStoreMock) AddMultipartPart(ctx context.Context, bucket, path, contractSet, ETag, uploadID string, partNumber int, slices []object.SlabSlice) (err error) { return nil } -func (os *mockObjectStore) AddUploadingSector(ctx context.Context, uID api.UploadID, id types.FileContractID, root types.Hash256) error { +func (os *objectStoreMock) AddUploadingSector(ctx context.Context, uID api.UploadID, id types.FileContractID, root types.Hash256) error { return nil } -func (os *mockObjectStore) TrackUpload(ctx context.Context, uID api.UploadID) error { return nil } +func (os *objectStoreMock) TrackUpload(ctx context.Context, uID api.UploadID) error { return nil } -func (os *mockObjectStore) FinishUpload(ctx context.Context, uID api.UploadID) error { return nil } +func (os *objectStoreMock) FinishUpload(ctx context.Context, uID api.UploadID) error { return nil } -func (os *mockObjectStore) DeleteHostSector(ctx context.Context, hk types.PublicKey, root types.Hash256) error { +func (os *objectStoreMock) DeleteHostSector(ctx context.Context, hk types.PublicKey, root types.Hash256) error { return nil } -func (os *mockObjectStore) DeleteObject(ctx context.Context, bucket, path string, opts api.DeleteObjectOptions) error { +func (os *objectStoreMock) DeleteObject(ctx context.Context, bucket, path string, opts api.DeleteObjectOptions) error { return nil } -func (os *mockObjectStore) AddObject(ctx context.Context, bucket, path, contractSet string, o object.Object, opts api.AddObjectOptions) error { +func (os *objectStoreMock) AddObject(ctx context.Context, bucket, path, contractSet string, o object.Object, opts api.AddObjectOptions) error { os.mu.Lock() defer os.mu.Unlock() // check if the bucket exists if _, exists := os.objects[bucket]; !exists { - return errBucketNotFound + return api.ErrBucketNotFound } os.objects[bucket][path] = o return nil } -func (os *mockObjectStore) AddPartialSlab(ctx context.Context, data []byte, minShards, totalShards uint8, contractSet string) (slabs []object.SlabSlice, slabBufferMaxSizeSoftReached bool, err error) { +func (os *objectStoreMock) AddPartialSlab(ctx context.Context, data []byte, minShards, totalShards uint8, contractSet string) (slabs []object.SlabSlice, slabBufferMaxSizeSoftReached bool, err error) { os.mu.Lock() defer os.mu.Unlock() @@ -239,7 +427,7 @@ func (os *mockObjectStore) AddPartialSlab(ctx context.Context, data []byte, minS } // update store - os.partials[ec.String()] = mockPackedSlab{ + os.partials[ec.String()] = &packedSlabMock{ parameterKey: fmt.Sprintf("%d-%d-%v", minShards, totalShards, contractSet), bufferID: os.bufferIDCntr, slabKey: ec, @@ -247,21 +435,21 @@ func (os *mockObjectStore) AddPartialSlab(ctx context.Context, data []byte, minS } os.bufferIDCntr++ - return []object.SlabSlice{ss}, false, nil + return []object.SlabSlice{ss}, os.totalSlabBufferSize() > os.slabBufferMaxSizeSoft, nil } -func (os *mockObjectStore) Object(ctx context.Context, bucket, path string, opts api.GetObjectOptions) (api.ObjectsResponse, error) { +func (os *objectStoreMock) Object(ctx context.Context, bucket, path string, opts api.GetObjectOptions) (api.ObjectsResponse, error) { os.mu.Lock() defer os.mu.Unlock() // check if the bucket exists if _, exists := os.objects[bucket]; !exists { - return api.ObjectsResponse{}, errBucketNotFound + return api.ObjectsResponse{}, api.ErrBucketNotFound } // check if the object exists if _, exists := os.objects[bucket][path]; !exists { - return api.ObjectsResponse{}, errObjectNotFound + return api.ObjectsResponse{}, api.ErrObjectNotFound } // clone to ensure the store isn't unwillingly modified @@ -274,17 +462,17 @@ func (os *mockObjectStore) Object(ctx context.Context, bucket, path string, opts return api.ObjectsResponse{Object: &api.Object{ ObjectMetadata: api.ObjectMetadata{Name: path, Size: o.TotalSize()}, - Object: o, + Object: &o, }}, nil } -func (os *mockObjectStore) FetchPartialSlab(ctx context.Context, key object.EncryptionKey, offset, length uint32) ([]byte, error) { +func (os *objectStoreMock) FetchPartialSlab(ctx context.Context, key object.EncryptionKey, offset, length uint32) ([]byte, error) { os.mu.Lock() defer os.mu.Unlock() packedSlab, exists := os.partials[key.String()] if !exists { - return nil, errSlabNotFound + return nil, api.ErrSlabNotFound } if offset+length > uint32(len(packedSlab.data)) { return nil, errors.New("offset out of bounds") @@ -293,7 +481,7 @@ func (os *mockObjectStore) FetchPartialSlab(ctx context.Context, key object.Encr return packedSlab.data[offset : offset+length], nil } -func (os *mockObjectStore) Slab(ctx context.Context, key object.EncryptionKey) (slab object.Slab, err error) { +func (os *objectStoreMock) Slab(ctx context.Context, key object.EncryptionKey) (slab object.Slab, err error) { os.mu.Lock() defer os.mu.Unlock() @@ -304,12 +492,12 @@ func (os *mockObjectStore) Slab(ctx context.Context, key object.EncryptionKey) ( return } } - err = errSlabNotFound + err = api.ErrSlabNotFound }) return } -func (os *mockObjectStore) UpdateSlab(ctx context.Context, s object.Slab, contractSet string) error { +func (os *objectStoreMock) UpdateSlab(ctx context.Context, s object.Slab, contractSet string) error { os.mu.Lock() defer os.mu.Unlock() @@ -325,24 +513,32 @@ func (os *mockObjectStore) UpdateSlab(ctx context.Context, s object.Slab, contra return nil } -func (os *mockObjectStore) PackedSlabsForUpload(ctx context.Context, lockingDuration time.Duration, minShards, totalShards uint8, set string, limit int) (pss []api.PackedSlab, _ error) { +func (os *objectStoreMock) PackedSlabsForUpload(ctx context.Context, lockingDuration time.Duration, minShards, totalShards uint8, set string, limit int) (pss []api.PackedSlab, _ error) { os.mu.Lock() defer os.mu.Unlock() + if limit == -1 { + limit = math.MaxInt + } + parameterKey := fmt.Sprintf("%d-%d-%v", minShards, totalShards, set) for _, ps := range os.partials { - if ps.parameterKey == parameterKey { + if ps.parameterKey == parameterKey && time.Now().After(ps.lockedUntil) { + ps.lockedUntil = time.Now().Add(lockingDuration) pss = append(pss, api.PackedSlab{ BufferID: ps.bufferID, Data: ps.data, Key: ps.slabKey, }) + if len(pss) == limit { + break + } } } return } -func (os *mockObjectStore) MarkPackedSlabsUploaded(ctx context.Context, slabs []api.UploadedPackedSlab) error { +func (os *objectStoreMock) MarkPackedSlabsUploaded(ctx context.Context, slabs []api.UploadedPackedSlab) error { os.mu.Lock() defer os.mu.Unlock() @@ -367,228 +563,89 @@ func (os *mockObjectStore) MarkPackedSlabsUploaded(ctx context.Context, slabs [] return nil } -func (os *mockObjectStore) forEachObject(fn func(bucket, path string, o object.Object)) { - for bucket, objects := range os.objects { - for path, object := range objects { - fn(bucket, path, object) - } - } +func (os *objectStoreMock) Bucket(_ context.Context, bucket string) (api.Bucket, error) { + return api.Bucket{}, nil } -func newMockHost(hk types.PublicKey) *mockHost { - return &mockHost{ - hk: hk, - hpt: newTestHostPriceTable(time.Now().Add(time.Minute)), - } +func (os *objectStoreMock) MultipartUpload(ctx context.Context, uploadID string) (resp api.MultipartUpload, err error) { + return api.MultipartUpload{}, nil } -func (h *mockHost) PublicKey() types.PublicKey { return h.hk } - -func (h *mockHost) DownloadSector(ctx context.Context, w io.Writer, root types.Hash256, offset, length uint32, overpay bool) error { - sector, exist := h.contract().sector(root) - if !exist { - return errSectorNotFound - } - if offset+length > rhpv2.SectorSize { - return errSectorOutOfBounds +func (os *objectStoreMock) totalSlabBufferSize() (total int) { + for _, p := range os.partials { + if time.Now().After(p.lockedUntil) { + total += len(p.data) + } } - _, err := w.Write(sector[offset : offset+length]) - return err -} - -func (h *mockHost) UploadSector(ctx context.Context, sector *[rhpv2.SectorSize]byte, rev types.FileContractRevision) (types.Hash256, error) { - return h.contract().addSector(sector), nil -} - -func (h *mockHost) FetchRevision(ctx context.Context, fetchTimeout time.Duration) (rev types.FileContractRevision, _ error) { - h.mu.Lock() - defer h.mu.Unlock() - rev = h.c.rev return } -func (h *mockHost) FetchPriceTable(ctx context.Context, rev *types.FileContractRevision) (hostdb.HostPriceTable, error) { - <-h.hptBlockChan - return h.hpt, nil -} - -func (h *mockHost) FundAccount(ctx context.Context, balance types.Currency, rev *types.FileContractRevision) error { - return nil -} - -func (h *mockHost) RenewContract(ctx context.Context, rrr api.RHPRenewRequest) (_ rhpv2.ContractRevision, _ []types.Transaction, _ types.Currency, err error) { - return rhpv2.ContractRevision{}, nil, types.ZeroCurrency, nil -} - -func (h *mockHost) SyncAccount(ctx context.Context, rev *types.FileContractRevision) error { - return nil -} - -func (h *mockHost) contract() (c *mockContract) { - h.mu.Lock() - c = h.c - h.mu.Unlock() - - if c == nil { - panic("host does not have a contract") - } - return +func (os *objectStoreMock) setSlabBufferMaxSizeSoft(n int) { + os.mu.Lock() + defer os.mu.Unlock() + os.slabBufferMaxSizeSoft = n } -func newMockContract(hk types.PublicKey, fcid types.FileContractID) *mockContract { - return &mockContract{ - metadata: api.ContractMetadata{ - ID: fcid, - HostKey: hk, - WindowStart: 0, - WindowEnd: 10, - }, - rev: types.FileContractRevision{ParentID: fcid}, - sectors: make(map[types.Hash256]*[rhpv2.SectorSize]byte), +func (os *objectStoreMock) forEachObject(fn func(bucket, path string, o object.Object)) { + for bucket, objects := range os.objects { + for path, object := range objects { + fn(bucket, path, object) + } } } -func (c *mockContract) addSector(sector *[rhpv2.SectorSize]byte) (root types.Hash256) { - root = rhpv2.SectorRoot(sector) - c.mu.Lock() - c.sectors[root] = sector - c.mu.Unlock() - return -} +var _ SettingStore = (*settingStoreMock)(nil) -func (c *mockContract) sector(root types.Hash256) (sector *[rhpv2.SectorSize]byte, found bool) { - c.mu.Lock() - sector, found = c.sectors[root] - c.mu.Unlock() - return -} +type settingStoreMock struct{} -func newMockHostManager() *mockHostManager { - return &mockHostManager{ - hosts: make(map[types.PublicKey]*mockHost), - } +func (*settingStoreMock) GougingParams(context.Context) (api.GougingParams, error) { + return api.GougingParams{}, nil } -func (hm *mockHostManager) Host(hk types.PublicKey, fcid types.FileContractID, siamuxAddr string) Host { - hm.mu.Lock() - defer hm.mu.Unlock() - - if _, ok := hm.hosts[hk]; !ok { - panic("host not found") - } - return hm.hosts[hk] +func (*settingStoreMock) UploadParams(context.Context) (api.UploadParams, error) { + return api.UploadParams{}, nil } -func (hm *mockHostManager) newHost(hk types.PublicKey) *mockHost { - hm.mu.Lock() - defer hm.mu.Unlock() - - if _, ok := hm.hosts[hk]; ok { - panic("host already exists") - } +var _ Syncer = (*syncerMock)(nil) - hm.hosts[hk] = newMockHost(hk) - return hm.hosts[hk] -} +type syncerMock struct{} -func (hm *mockHostManager) host(hk types.PublicKey) *mockHost { - hm.mu.Lock() - defer hm.mu.Unlock() - return hm.hosts[hk] +func (*syncerMock) BroadcastTransaction(context.Context, []types.Transaction) error { + return nil } -func newMockSector() (*[rhpv2.SectorSize]byte, types.Hash256) { - var sector [rhpv2.SectorSize]byte - frand.Read(sector[:]) - return §or, rhpv2.SectorRoot(§or) +func (*syncerMock) SyncerPeers(context.Context) ([]string, error) { + return nil, nil } -func newMockWorker() *mockWorker { - cs := newMockContractStore() - hm := newMockHostManager() - os := newMockObjectStore() - mm := &mockMemoryManager{} +var _ Wallet = (*walletMock)(nil) - return &mockWorker{ - cs: cs, - hm: hm, - mm: mm, - os: os, - - dl: newDownloadManager(context.Background(), hm, mm, os, 0, 0, zap.NewNop().Sugar()), - ul: newUploadManager(context.Background(), hm, mm, os, cs, 0, 0, time.Minute, zap.NewNop().Sugar()), - } -} - -func (w *mockWorker) addHosts(n int) { - for i := 0; i < n; i++ { - w.addHost() - } -} +type walletMock struct{} -func (w *mockWorker) addHost() *mockHost { - host := w.hm.newHost(w.newHostKey()) - w.formContract(host) - return host +func (*walletMock) WalletDiscard(context.Context, types.Transaction) error { + return nil } -func (w *mockWorker) formContract(host *mockHost) *mockContract { - if host.c != nil { - panic("host already has contract, use renew") - } - host.c = newMockContract(host.hk, w.newFileContractID()) - w.cs.addContract(host.c) - return host.c +func (*walletMock) WalletFund(context.Context, *types.Transaction, types.Currency, bool) ([]types.Hash256, []types.Transaction, error) { + return nil, nil, nil } -func (w *mockWorker) renewContract(hk types.PublicKey) *mockContract { - host := w.hm.host(hk) - if host == nil { - panic("host not found") - } else if host.c == nil { - panic("host does not have a contract to renew") - } - - curr := host.c.metadata - update := newMockContract(host.hk, w.newFileContractID()) - update.metadata.RenewedFrom = curr.ID - update.metadata.WindowStart = curr.WindowEnd - update.metadata.WindowEnd = update.metadata.WindowStart + (curr.WindowEnd - curr.WindowStart) - host.c = update - - w.cs.addContract(host.c) - return host.c +func (*walletMock) WalletPrepareForm(context.Context, types.Address, types.PublicKey, types.Currency, types.Currency, types.PublicKey, rhpv2.HostSettings, uint64) ([]types.Transaction, error) { + return nil, nil } -func (w *mockWorker) contracts() (metadatas []api.ContractMetadata) { - for _, h := range w.hm.hosts { - metadatas = append(metadatas, h.c.metadata) - } - return +func (*walletMock) WalletPrepareRenew(context.Context, types.FileContractRevision, types.Address, types.Address, types.PrivateKey, types.Currency, types.Currency, rhpv3.HostPriceTable, uint64, uint64, uint64) (api.WalletPrepareRenewResponse, error) { + return api.WalletPrepareRenewResponse{}, nil } -func (w *mockWorker) newHostKey() (hk types.PublicKey) { - w.mu.Lock() - defer w.mu.Unlock() - w.hkCntr++ - hk = types.PublicKey{byte(w.hkCntr)} - return +func (*walletMock) WalletSign(context.Context, *types.Transaction, []types.Hash256, types.CoveredFields) error { + return nil } -func (w *mockWorker) newFileContractID() (fcid types.FileContractID) { - w.mu.Lock() - defer w.mu.Unlock() - w.fcidCntr++ - fcid = types.FileContractID{byte(w.fcidCntr)} - return -} +var _ webhooks.Broadcaster = (*webhookBroadcasterMock)(nil) -func newTestHostPriceTable(expiry time.Time) hostdb.HostPriceTable { - var uid rhpv3.SettingsID - frand.Read(uid[:]) +type webhookBroadcasterMock struct{} - return hostdb.HostPriceTable{ - HostPriceTable: rhpv3.HostPriceTable{UID: uid, Validity: time.Minute}, - Expiry: expiry, - } +func (*webhookBroadcasterMock) BroadcastAction(context.Context, webhooks.Event) error { + return nil } diff --git a/worker/pricetables.go b/worker/pricetables.go index 3f7683b2a..1bc2ee009 100644 --- a/worker/pricetables.go +++ b/worker/pricetables.go @@ -19,6 +19,11 @@ const ( // for use, we essentially add 30 seconds to the current time when checking // whether we are still before a pricetable's expiry time priceTableValidityLeeway = 30 * time.Second + + // priceTableBlockHeightLeeway is the amount of blocks before a price table + // is considered gouging on the block height when we renew it even if it is + // still valid + priceTableBlockHeightLeeway = 2 ) var ( @@ -106,10 +111,19 @@ func (p *priceTable) fetch(ctx context.Context, rev *types.FileContractRevision) hpt = p.hpt p.mu.Unlock() + // get gouging checker to figure out how many blocks we have left before the + // current price table is considered to gouge on the block height + gc, err := GougingCheckerFromContext(ctx, false) + if err != nil { + return hostdb.HostPriceTable{}, err + } + // figure out whether we should update the price table, if not we can return if hpt.UID != (rhpv3.SettingsID{}) { randomUpdateLeeway := frand.Intn(int(math.Floor(hpt.HostPriceTable.Validity.Seconds() * 0.1))) - if time.Now().Add(priceTableValidityLeeway).Add(time.Duration(randomUpdateLeeway) * time.Second).Before(hpt.Expiry) { + closeToGouging := gc.BlocksUntilBlockHeightGouging(hpt.HostBlockHeight) <= priceTableBlockHeightLeeway + closeToExpiring := time.Now().Add(priceTableValidityLeeway).Add(time.Duration(randomUpdateLeeway) * time.Second).After(hpt.Expiry) + if !closeToExpiring && !closeToGouging { return } } diff --git a/worker/pricetables_test.go b/worker/pricetables_test.go index 115abcd31..55b0f7057 100644 --- a/worker/pricetables_test.go +++ b/worker/pricetables_test.go @@ -3,90 +3,70 @@ package worker import ( "context" "errors" - "sync" "testing" "time" - "go.sia.tech/core/types" + "go.sia.tech/renterd/api" "go.sia.tech/renterd/hostdb" ) -var ( - errHostNotFound = errors.New("host not found") -) - -var ( - _ HostStore = (*mockHostStore)(nil) -) - -type mockHostStore struct { - mu sync.Mutex - hosts map[types.PublicKey]hostdb.HostInfo -} - -func (mhs *mockHostStore) Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) { - mhs.mu.Lock() - defer mhs.mu.Unlock() +func TestPriceTables(t *testing.T) { + // create host & contract stores + hs := newHostStoreMock() + cs := newContractStoreMock() - h, ok := mhs.hosts[hostKey] - if !ok { - return hostdb.HostInfo{}, errHostNotFound - } - return h, nil -} + // create host manager & price table + hm := newTestHostManager(t) + pts := newPriceTables(hm, hs) -func newMockHostStore(hosts []*hostdb.HostInfo) *mockHostStore { - hs := &mockHostStore{hosts: make(map[types.PublicKey]hostdb.HostInfo)} - for _, h := range hosts { - hs.hosts[h.PublicKey] = *h - } - return hs -} + // create host & contract mock + h := hs.addHost() + c := cs.addContract(h.hk) -func TestPriceTables(t *testing.T) { - // create two price tables, a valid one and one that expired - expiredPT := newTestHostPriceTable(time.Now()) - validPT := newTestHostPriceTable(time.Now().Add(time.Minute)) - - // create host manager - hm := newMockHostManager() - - // create a mock host that has a valid price table - hk1 := types.PublicKey{1} - h1 := hm.newHost(hk1) - h1.hpt = validPT - - // create a hostdb entry for that host that returns the expired price table - hdb1 := &hostdb.HostInfo{ - Host: hostdb.Host{ - PublicKey: hk1, - PriceTable: expiredPT, - Scanned: true, + cm := &chainMock{ + cs: api.ConsensusState{ + BlockHeight: 1, }, } - // create host store - hs := newMockHostStore([]*hostdb.HostInfo{hdb1}) - - // create price tables - pts := newPriceTables(hm, hs) - - // fetch the price table in a goroutine, make it blocking - h1.hptBlockChan = make(chan struct{}) - go pts.fetch(context.Background(), hk1, nil) + blockHeightLeeway := 10 + gCtx := WithGougingChecker(context.Background(), cm, api.GougingParams{ + ConsensusState: cm.cs, + GougingSettings: api.GougingSettings{ + HostBlockHeightLeeway: blockHeightLeeway, + }, + }) + + // expire its price table + expiredPT := newTestHostPriceTable() + expiredPT.Expiry = time.Now() + h.hi.PriceTable = expiredPT + + // manage the host, make sure fetching the price table blocks + fetchPTBlockChan := make(chan struct{}) + validPT := newTestHostPriceTable() + hm.addHost(newTestHostCustom(h, c, func() hostdb.HostPriceTable { + <-fetchPTBlockChan + return validPT + })) + + // trigger a fetch to make it block + go pts.fetch(gCtx, h.hk, nil) time.Sleep(50 * time.Millisecond) - // fetch it again but with a canceled context to avoid blocking indefinitely, the error will indicate we were blocking on a price table update - ctx, cancel := context.WithCancel(context.Background()) + // fetch it again but with a canceled context to avoid blocking + // indefinitely, the error will indicate we were blocking on a price table + // update + ctx, cancel := context.WithCancel(gCtx) cancel() - _, err := pts.fetch(ctx, hk1, nil) + _, err := pts.fetch(ctx, h.hk, nil) if !errors.Is(err, errPriceTableUpdateTimedOut) { t.Fatal("expected errPriceTableUpdateTimedOut, got", err) } // unblock and assert we receive a valid price table - close(h1.hptBlockChan) - update, err := pts.fetch(context.Background(), hk1, nil) + close(fetchPTBlockChan) + update, err := pts.fetch(gCtx, h.hk, nil) if err != nil { t.Fatal(err) } else if update.UID != validPT.UID { @@ -95,8 +75,8 @@ func TestPriceTables(t *testing.T) { // refresh the price table on the host, update again, assert we receive the // same price table as it hasn't expired yet - h1.hpt = newTestHostPriceTable(time.Now().Add(time.Minute)) - update, err = pts.fetch(context.Background(), hk1, nil) + h.hi.PriceTable = newTestHostPriceTable() + update, err = pts.fetch(gCtx, h.hk, nil) if err != nil { t.Fatal(err) } else if update.UID != validPT.UID { @@ -104,13 +84,37 @@ func TestPriceTables(t *testing.T) { } // manually expire the price table - pts.priceTables[hk1].hpt.Expiry = time.Now() + pts.priceTables[h.hk].hpt.Expiry = time.Now() + + // fetch it again and assert we updated the price table + update, err = pts.fetch(gCtx, h.hk, nil) + if err != nil { + t.Fatal(err) + } else if update.UID != h.hi.PriceTable.UID { + t.Fatal("price table mismatch") + } + + // refresh the price table on the host and make sure fetching doesn't update + // the price table since it's not expired + validPT = h.hi.PriceTable + h.hi.PriceTable = newTestHostPriceTable() + update, err = pts.fetch(gCtx, h.hk, nil) + if err != nil { + t.Fatal(err) + } else if update.UID != validPT.UID { + t.Fatal("price table mismatch") + } + + // increase the current block height to be exactly + // 'priceTableBlockHeightLeeway' blocks before the leeway of the gouging + // settings + cm.cs.BlockHeight = validPT.HostBlockHeight + uint64(blockHeightLeeway) - priceTableBlockHeightLeeway // fetch it again and assert we updated the price table - update, err = pts.fetch(context.Background(), hk1, nil) + update, err = pts.fetch(gCtx, h.hk, nil) if err != nil { t.Fatal(err) - } else if update.UID != h1.hpt.UID { + } else if update.UID != h.hi.PriceTable.UID { t.Fatal("price table mismatch") } } diff --git a/worker/rhpv3.go b/worker/rhpv3.go index 03f67c6f6..9c280f2bd 100644 --- a/worker/rhpv3.go +++ b/worker/rhpv3.go @@ -337,19 +337,17 @@ type ( // accounts stores the balance and other metrics of accounts that the // worker maintains with a host. accounts struct { - as AccountStore - key types.PrivateKey - shutdownCtx context.Context + as AccountStore + key types.PrivateKey } // account contains information regarding a specific account of the // worker. account struct { - as AccountStore - id rhpv3.Account - key types.PrivateKey - host types.PublicKey - shutdownCtx context.Context + as AccountStore + id rhpv3.Account + key types.PrivateKey + host types.PublicKey } ) @@ -358,9 +356,8 @@ func (w *worker) initAccounts(as AccountStore) { panic("accounts already initialized") // developer error } w.accounts = &accounts{ - as: as, - key: w.deriveSubKey("accountkey"), - shutdownCtx: w.shutdownCtx, + as: as, + key: w.deriveSubKey("accountkey"), } } @@ -376,117 +373,95 @@ func (w *worker) initTransportPool() { func (a *accounts) ForHost(hk types.PublicKey) *account { accountID := rhpv3.Account(a.deriveAccountKey(hk).PublicKey()) return &account{ - as: a.as, - id: accountID, - key: a.key, - host: hk, - shutdownCtx: a.shutdownCtx, + as: a.as, + id: accountID, + key: a.key, + host: hk, } } -// WithDeposit increases the balance of an account by the amount returned by -// amtFn if amtFn doesn't return an error. -func (a *account) WithDeposit(ctx context.Context, amtFn func() (types.Currency, error)) error { - _, lockID, err := a.as.LockAccount(ctx, a.id, a.host, false, accountLockingDuration) +func withAccountLock(ctx context.Context, as AccountStore, id rhpv3.Account, hk types.PublicKey, exclusive bool, fn func(a api.Account) error) error { + acc, lockID, err := as.LockAccount(ctx, id, hk, exclusive, accountLockingDuration) if err != nil { return err } - defer func() { - unlockCtx, cancel := context.WithTimeout(a.shutdownCtx, 10*time.Second) - a.as.UnlockAccount(unlockCtx, a.id, lockID) - cancel() - }() + err = fn(acc) - amt, err := amtFn() - if err != nil { - return err - } - return a.as.AddBalance(ctx, a.id, a.host, amt.Big()) + // unlock account + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + _ = as.UnlockAccount(ctx, acc.ID, lockID) // ignore error + cancel() + + return err } -func (a *account) Balance(ctx context.Context) (types.Currency, error) { - account, lockID, err := a.as.LockAccount(ctx, a.id, a.host, false, accountLockingDuration) - if err != nil { - return types.Currency{}, err - } - defer func() { - unlockCtx, cancel := context.WithTimeout(a.shutdownCtx, 10*time.Second) - a.as.UnlockAccount(unlockCtx, a.id, lockID) - cancel() - }() +// Balance returns the account balance. +func (a *account) Balance(ctx context.Context) (balance types.Currency, err error) { + err = withAccountLock(ctx, a.as, a.id, a.host, false, func(account api.Account) error { + balance = types.NewCurrency(account.Balance.Uint64(), new(big.Int).Rsh(account.Balance, 64).Uint64()) + return nil + }) + return +} - return types.NewCurrency(account.Balance.Uint64(), new(big.Int).Rsh(account.Balance, 64).Uint64()), nil +// WithDeposit increases the balance of an account by the amount returned by +// amtFn if amtFn doesn't return an error. +func (a *account) WithDeposit(ctx context.Context, amtFn func() (types.Currency, error)) error { + return withAccountLock(ctx, a.as, a.id, a.host, false, func(_ api.Account) error { + amt, err := amtFn() + if err != nil { + return err + } + return a.as.AddBalance(ctx, a.id, a.host, amt.Big()) + }) +} + +// WithSync syncs an accounts balance with the bus. To do so, the account is +// locked while the balance is fetched through balanceFn. +func (a *account) WithSync(ctx context.Context, balanceFn func() (types.Currency, error)) error { + return withAccountLock(ctx, a.as, a.id, a.host, true, func(_ api.Account) error { + balance, err := balanceFn() + if err != nil { + return err + } + return a.as.SetBalance(ctx, a.id, a.host, balance.Big()) + }) } // WithWithdrawal decreases the balance of an account by the amount returned by // amtFn. The amount is still withdrawn if amtFn returns an error since some // costs are non-refundable. func (a *account) WithWithdrawal(ctx context.Context, amtFn func() (types.Currency, error)) error { - account, lockID, err := a.as.LockAccount(ctx, a.id, a.host, false, accountLockingDuration) - if err != nil { - return err - } - defer func() { - unlockCtx, cancel := context.WithTimeout(a.shutdownCtx, 10*time.Second) - a.as.UnlockAccount(unlockCtx, a.id, lockID) - cancel() - }() + return withAccountLock(ctx, a.as, a.id, a.host, false, func(account api.Account) error { + // return early if the account needs to sync + if account.RequiresSync { + return fmt.Errorf("%w; account requires resync", errBalanceInsufficient) + } - // return early if the account needs to sync - if account.RequiresSync { - return fmt.Errorf("%w; account requires resync", errBalanceInsufficient) - } + // return early if our account is not funded + if account.Balance.Cmp(big.NewInt(0)) <= 0 { + return errBalanceInsufficient + } - // return early if our account is not funded - if account.Balance.Cmp(big.NewInt(0)) <= 0 { - return errBalanceInsufficient - } + // execute amtFn + amt, err := amtFn() - // execute amtFn - amt, err := amtFn() - if isBalanceInsufficient(err) { // in case of an insufficient balance, we schedule a sync - scheduleCtx, cancel := context.WithTimeout(a.shutdownCtx, 10*time.Second) - defer cancel() - err2 := a.as.ScheduleSync(scheduleCtx, a.id, a.host) - if err2 != nil { - err = fmt.Errorf("%w; failed to set requiresSync flag on bus, error: %v", err, err2) + if isBalanceInsufficient(err) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + err = errors.Join(err, a.as.ScheduleSync(ctx, a.id, a.host)) + cancel() } - } - - // if the amount is zero, we are done - if amt.IsZero() { - return err - } - // if an amount was returned, we withdraw it. - addCtx, cancel := context.WithTimeout(a.shutdownCtx, 10*time.Second) - defer cancel() - errAdd := a.as.AddBalance(addCtx, a.id, a.host, new(big.Int).Neg(amt.Big())) - if errAdd != nil { - err = fmt.Errorf("%w; failed to add balance to account, error: %v", err, errAdd) - } - return err -} - -// WithSync syncs an accounts balance with the bus. To do so, the account is -// locked while the balance is fetched through balanceFn. -func (a *account) WithSync(ctx context.Context, balanceFn func() (types.Currency, error)) error { - _, lockID, err := a.as.LockAccount(ctx, a.id, a.host, true, accountLockingDuration) - if err != nil { - return err - } - defer func() { - unlockCtx, cancel := context.WithTimeout(a.shutdownCtx, 10*time.Second) - a.as.UnlockAccount(unlockCtx, a.id, lockID) - cancel() - }() + // if an amount was returned, we withdraw it + if !amt.IsZero() { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + err = errors.Join(err, a.as.AddBalance(ctx, a.id, a.host, new(big.Int).Neg(amt.Big()))) + cancel() + } - balance, err := balanceFn() - if err != nil { return err - } - return a.as.SetBalance(ctx, a.id, a.host, balance.Big()) + }) } // deriveAccountKey derives an account plus key for a given host and worker. @@ -496,7 +471,7 @@ func (a *account) WithSync(ctx context.Context, balanceFn func() (types.Currency func (a *accounts) deriveAccountKey(hostKey types.PublicKey) types.PrivateKey { index := byte(0) // not used yet but can be used to derive more than 1 account per host - // Append the the host for which to create it and the index to the + // Append the host for which to create it and the index to the // corresponding sub-key. subKey := a.key data := append(subKey, hostKey[:]...) @@ -789,17 +764,17 @@ func RPCReadSector(ctx context.Context, t *transportV3, w io.Writer, pt rhpv3.Ho return } -func RPCAppendSector(ctx context.Context, t *transportV3, renterKey types.PrivateKey, pt rhpv3.HostPriceTable, rev *types.FileContractRevision, payment rhpv3.PaymentMethod, sector *[rhpv2.SectorSize]byte) (sectorRoot types.Hash256, cost types.Currency, err error) { +func RPCAppendSector(ctx context.Context, t *transportV3, renterKey types.PrivateKey, pt rhpv3.HostPriceTable, rev *types.FileContractRevision, payment rhpv3.PaymentMethod, sectorRoot types.Hash256, sector *[rhpv2.SectorSize]byte) (cost types.Currency, err error) { defer wrapErr(&err, "AppendSector") // sanity check revision first if rev.RevisionNumber == math.MaxUint64 { - return types.Hash256{}, types.ZeroCurrency, errMaxRevisionReached + return types.ZeroCurrency, errMaxRevisionReached } s, err := t.DialStream(ctx) if err != nil { - return types.Hash256{}, types.ZeroCurrency, err + return types.ZeroCurrency, err } defer s.Close() @@ -829,7 +804,7 @@ func RPCAppendSector(ctx context.Context, t *transportV3, renterKey types.Privat // compute expected collateral and refund expectedCost, expectedCollateral, expectedRefund, err := uploadSectorCost(pt, rev.WindowEnd) if err != nil { - return types.Hash256{}, types.ZeroCurrency, err + return types.ZeroCurrency, err } // apply leeways. @@ -840,13 +815,13 @@ func RPCAppendSector(ctx context.Context, t *transportV3, renterKey types.Privat // check if the cost, collateral and refund match our expectation. if executeResp.TotalCost.Cmp(expectedCost) > 0 { - return types.Hash256{}, types.ZeroCurrency, fmt.Errorf("cost exceeds expectation: %v > %v", executeResp.TotalCost.String(), expectedCost.String()) + return types.ZeroCurrency, fmt.Errorf("cost exceeds expectation: %v > %v", executeResp.TotalCost.String(), expectedCost.String()) } if executeResp.FailureRefund.Cmp(expectedRefund) < 0 { - return types.Hash256{}, types.ZeroCurrency, fmt.Errorf("insufficient refund: %v < %v", executeResp.FailureRefund.String(), expectedRefund.String()) + return types.ZeroCurrency, fmt.Errorf("insufficient refund: %v < %v", executeResp.FailureRefund.String(), expectedRefund.String()) } if executeResp.AdditionalCollateral.Cmp(expectedCollateral) < 0 { - return types.Hash256{}, types.ZeroCurrency, fmt.Errorf("insufficient collateral: %v < %v", executeResp.AdditionalCollateral.String(), expectedCollateral.String()) + return types.ZeroCurrency, fmt.Errorf("insufficient collateral: %v < %v", executeResp.AdditionalCollateral.String(), expectedCollateral.String()) } // set the cost and refund @@ -870,18 +845,17 @@ func RPCAppendSector(ctx context.Context, t *transportV3, renterKey types.Privat collateral := executeResp.AdditionalCollateral.Add(executeResp.FailureRefund) // check proof - sectorRoot = rhpv2.SectorRoot(sector) if rev.Filesize == 0 { // For the first upload to a contract we don't get a proof. So we just // assert that the new contract root matches the root of the sector. if rev.Filesize == 0 && executeResp.NewMerkleRoot != sectorRoot { - return types.Hash256{}, types.ZeroCurrency, fmt.Errorf("merkle root doesn't match the sector root upon first upload to contract: %v != %v", executeResp.NewMerkleRoot, sectorRoot) + return types.ZeroCurrency, fmt.Errorf("merkle root doesn't match the sector root upon first upload to contract: %v != %v", executeResp.NewMerkleRoot, sectorRoot) } } else { // Otherwise we make sure the proof was transmitted and verify it. actions := []rhpv2.RPCWriteAction{{Type: rhpv2.RPCWriteActionAppend}} // TODO: change once rhpv3 support is available if !rhpv2.VerifyDiffProof(actions, rev.Filesize/rhpv2.SectorSize, executeResp.Proof, []types.Hash256{}, rev.FileMerkleRoot, executeResp.NewMerkleRoot, []types.Hash256{sectorRoot}) { - return types.Hash256{}, types.ZeroCurrency, errors.New("proof verification failed") + return types.ZeroCurrency, errors.New("proof verification failed") } } @@ -889,7 +863,7 @@ func RPCAppendSector(ctx context.Context, t *transportV3, renterKey types.Privat newRevision := *rev newValid, newMissed, err := updateRevisionOutputs(&newRevision, types.ZeroCurrency, collateral) if err != nil { - return types.Hash256{}, types.ZeroCurrency, err + return types.ZeroCurrency, err } newRevision.Filesize += rhpv2.SectorSize newRevision.RevisionNumber++ diff --git a/worker/serve.go b/worker/serve.go index 76c1fb2d5..25d0c0412 100644 --- a/worker/serve.go +++ b/worker/serve.go @@ -76,9 +76,6 @@ func serveContent(rw http.ResponseWriter, req *http.Request, obj api.Object, dow } }() - // create a content reader - rs := newContentReader(pr, obj, offset) - // fetch the content type, if not set and we can't infer it from object's // name we default to application/octet-stream, that is important because we // have to avoid http.ServeContent to sniff the content type as it would @@ -87,17 +84,20 @@ func serveContent(rw http.ResponseWriter, req *http.Request, obj api.Object, dow if contentType == "" { contentType = "application/octet-stream" } + rw.Header().Set("Content-Type", contentType) // set the response headers, no need to set Last-Modified header as // serveContent does that for us rw.Header().Set("ETag", api.FormatETag(obj.ETag)) - rw.Header().Set("Content-Type", contentType) // set the user metadata headers for k, v := range obj.Metadata { rw.Header().Set(fmt.Sprintf("%s%s", api.ObjectMetadataPrefix, k), v) } + // create a content reader + rs := newContentReader(pr, obj, offset) + http.ServeContent(rw, req, obj.Name, obj.ModTime.Std(), rs) return http.StatusOK, nil } diff --git a/worker/upload.go b/worker/upload.go index 72c65bf07..c5e86a166 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -39,6 +39,7 @@ type ( hm HostManager mm MemoryManager os ObjectStore + cl ContractLocker cs ContractStore logger *zap.SugaredLogger @@ -137,9 +138,8 @@ type ( } sectorUploadResp struct { - req *sectorUploadReq - root types.Hash256 - err error + req *sectorUploadReq + err error } ) @@ -148,8 +148,8 @@ func (w *worker) initUploadManager(maxMemory, maxOverdrive uint64, overdriveTime panic("upload manager already initialized") // developer error } - mm := newMemoryManager(logger, maxMemory) - w.uploadManager = newUploadManager(w.shutdownCtx, w, mm, w.bus, w.bus, maxOverdrive, overdriveTimeout, w.contractLockingDuration, logger) + mm := newMemoryManager(logger.Named("memorymanager"), maxMemory) + w.uploadManager = newUploadManager(w.shutdownCtx, w, mm, w.bus, w.bus, w.bus, maxOverdrive, overdriveTimeout, w.contractLockingDuration, logger) } func (w *worker) upload(ctx context.Context, r io.Reader, contracts []api.ContractMetadata, up uploadParameters, opts ...UploadOption) (_ string, err error) { @@ -177,119 +177,107 @@ func (w *worker) upload(ctx context.Context, r io.Reader, contracts []api.Contra return "", err } - // if packing was enabled try uploading packed slabs - if up.packing { - if err := w.tryUploadPackedSlabs(ctx, up.rs, up.contractSet, bufferSizeLimitReached); err != nil { - w.logger.Errorf("couldn't upload packed slabs, err: %v", err) + // return early if worker was shut down or if we don't have to consider + // packed uploads + if w.isStopped() || !up.packing { + return eTag, nil + } + + // try and upload one slab synchronously + if bufferSizeLimitReached { + mem := w.uploadManager.mm.AcquireMemory(ctx, up.rs.SlabSize()) + if mem != nil { + defer mem.Release() + + // fetch packed slab to upload + packedSlabs, err := w.bus.PackedSlabsForUpload(ctx, defaultPackedSlabsLockDuration, uint8(up.rs.MinShards), uint8(up.rs.TotalShards), up.contractSet, 1) + if err != nil { + return "", fmt.Errorf("couldn't fetch packed slabs from bus: %v", err) + } + + // upload packed slab + if len(packedSlabs) > 0 { + if err := w.tryUploadPackedSlab(ctx, mem, packedSlabs[0], up.rs, up.contractSet, lockingPriorityBlockedUpload); err != nil { + w.logger.Error(err) + } + } } + + // make sure there's a goroutine uploading the remainder of the packed slabs + go w.threadedUploadPackedSlabs(up.rs, up.contractSet, lockingPriorityBackgroundUpload) } + return eTag, nil } func (w *worker) threadedUploadPackedSlabs(rs api.RedundancySettings, contractSet string, lockPriority int) { key := fmt.Sprintf("%d-%d_%s", rs.MinShards, rs.TotalShards, contractSet) - w.uploadsMu.Lock() - if w.uploadingPackedSlabs[key] { + if _, ok := w.uploadingPackedSlabs[key]; ok { w.uploadsMu.Unlock() return } - w.uploadingPackedSlabs[key] = true + w.uploadingPackedSlabs[key] = struct{}{} w.uploadsMu.Unlock() // make sure we mark uploading packed slabs as false when we're done defer func() { w.uploadsMu.Lock() - w.uploadingPackedSlabs[key] = false + delete(w.uploadingPackedSlabs, key) w.uploadsMu.Unlock() }() - // keep uploading packed slabs until we're done - for { - uploaded, err := w.uploadPackedSlabs(w.shutdownCtx, defaultPackedSlabsLockDuration, rs, contractSet, lockPriority) - if err != nil { - w.logger.Errorf("couldn't upload packed slabs, err: %v", err) - return - } else if uploaded == 0 { - return - } - } -} - -func (w *worker) tryUploadPackedSlabs(ctx context.Context, rs api.RedundancySettings, contractSet string, block bool) (err error) { - // if we want to block, try and upload one packed slab synchronously, we use - // a slightly higher upload priority to avoid reaching the context deadline - if block { - _, err = w.uploadPackedSlabs(ctx, defaultPackedSlabsLockDuration, rs, contractSet, lockingPriorityBlockedUpload) - } - - // make sure there's a goroutine uploading the remainder of the packed slabs - go w.threadedUploadPackedSlabs(rs, contractSet, lockingPriorityBackgroundUpload) - return -} - -func (w *worker) uploadPackedSlabs(ctx context.Context, lockingDuration time.Duration, rs api.RedundancySettings, contractSet string, lockPriority int) (uploaded int, err error) { - // upload packed slabs - var mu sync.Mutex - var errs error + // derive a context that we can use as an interrupt in case of an error or shutdown. + interruptCtx, interruptCancel := context.WithCancel(w.shutdownCtx) + defer interruptCancel() var wg sync.WaitGroup - totalSize := uint64(rs.TotalShards) * rhpv2.SectorSize - - // derive a context that we can use as an interrupt in case of an error. - interruptCtx, cancel := context.WithCancel(ctx) - defer cancel() - for { - // block until we have memory for a slab or until we are interrupted - mem := w.uploadManager.mm.AcquireMemory(interruptCtx, totalSize) + // block until we have memory + mem := w.uploadManager.mm.AcquireMemory(interruptCtx, rs.SlabSize()) if mem == nil { break // interrupted } - // fetch packed slabs to upload - var packedSlabs []api.PackedSlab - packedSlabs, err = w.bus.PackedSlabsForUpload(ctx, lockingDuration, uint8(rs.MinShards), uint8(rs.TotalShards), contractSet, 1) + // fetch packed slab to upload + packedSlabs, err := w.bus.PackedSlabsForUpload(interruptCtx, defaultPackedSlabsLockDuration, uint8(rs.MinShards), uint8(rs.TotalShards), contractSet, 1) if err != nil { - err = fmt.Errorf("couldn't fetch packed slabs from bus: %v", err) + w.logger.Errorf("couldn't fetch packed slabs from bus: %v", err) mem.Release() break - } else if len(packedSlabs) == 0 { + } + + // no more packed slabs to upload + if len(packedSlabs) == 0 { mem.Release() - break // no more slabs + break } - ps := packedSlabs[0] - // launch upload for slab wg.Add(1) go func(ps api.PackedSlab) { - defer mem.Release() defer wg.Done() - err := w.uploadPackedSlab(ctx, rs, ps, mem, contractSet, lockPriority) - mu.Lock() - if err != nil { - errs = errors.Join(errs, err) - cancel() // prevent new uploads from being launched - } else { - uploaded++ + defer mem.Release() + + // we use the background context here, but apply a sane timeout, + // this ensures ongoing uploads are handled gracefully during + // shutdown + ctx, cancel := context.WithTimeout(context.Background(), defaultPackedSlabsUploadTimeout) + defer cancel() + + // try to upload a packed slab, if there were no packed slabs left to upload ok is false + if err := w.tryUploadPackedSlab(ctx, mem, ps, rs, contractSet, lockPriority); err != nil { + w.logger.Error(err) + interruptCancel() // prevent new uploads from being launched } - mu.Unlock() - }(ps) + }(packedSlabs[0]) } // wait for all threads to finish wg.Wait() - - // return collected errors - err = errors.Join(err, errs) return } -func (w *worker) uploadPackedSlab(ctx context.Context, rs api.RedundancySettings, ps api.PackedSlab, mem Memory, contractSet string, lockPriority int) error { - // create a context with sane timeout - ctx, cancel := context.WithTimeout(ctx, defaultPackedSlabsUploadTimeout) - defer cancel() - +func (w *worker) tryUploadPackedSlab(ctx context.Context, mem Memory, ps api.PackedSlab, rs api.RedundancySettings, contractSet string, lockPriority int) error { // fetch contracts contracts, err := w.bus.Contracts(ctx, api.ContractsOpts{ContractSet: contractSet}) if err != nil { @@ -314,11 +302,12 @@ func (w *worker) uploadPackedSlab(ctx context.Context, rs api.RedundancySettings return nil } -func newUploadManager(ctx context.Context, hm HostManager, mm MemoryManager, os ObjectStore, cs ContractStore, maxOverdrive uint64, overdriveTimeout time.Duration, contractLockDuration time.Duration, logger *zap.SugaredLogger) *uploadManager { +func newUploadManager(ctx context.Context, hm HostManager, mm MemoryManager, os ObjectStore, cl ContractLocker, cs ContractStore, maxOverdrive uint64, overdriveTimeout time.Duration, contractLockDuration time.Duration, logger *zap.SugaredLogger) *uploadManager { return &uploadManager{ hm: hm, mm: mm, os: os, + cl: cl, cs: cs, logger: logger, @@ -336,9 +325,10 @@ func newUploadManager(ctx context.Context, hm HostManager, mm MemoryManager, os } } -func (mgr *uploadManager) newUploader(os ObjectStore, cs ContractStore, hm HostManager, c api.ContractMetadata) *uploader { +func (mgr *uploadManager) newUploader(os ObjectStore, cl ContractLocker, cs ContractStore, hm HostManager, c api.ContractMetadata) *uploader { return &uploader{ os: os, + cl: cl, cs: cs, hm: hm, logger: mgr.logger, @@ -400,11 +390,8 @@ func (mgr *uploadManager) Upload(ctx context.Context, r io.Reader, contracts []a // create the object o := object.NewObject(up.ec) - // create the hash reader - hr := newHashReader(r) - // create the cipher reader - cr, err := o.Encrypt(hr, up.encryptionOffset) + cr, err := o.Encrypt(r, up.encryptionOffset) if err != nil { return false, "", err } @@ -435,9 +422,9 @@ func (mgr *uploadManager) Upload(ctx context.Context, r io.Reader, contracts []a // channel to notify main thread of the number of slabs to wait for numSlabsChan := make(chan int, 1) - // prepare slab size - size := int64(up.rs.MinShards) * rhpv2.SectorSize - redundantSize := uint64(up.rs.TotalShards) * rhpv2.SectorSize + // prepare slab sizes + slabSizeNoRedundancy := up.rs.SlabSizeNoRedundancy() + slabSize := up.rs.SlabSize() var partialSlab []byte // launch uploads in a separate goroutine @@ -452,14 +439,14 @@ func (mgr *uploadManager) Upload(ctx context.Context, r io.Reader, contracts []a default: } // acquire memory - mem := mgr.mm.AcquireMemory(ctx, redundantSize) + mem := mgr.mm.AcquireMemory(ctx, slabSize) if mem == nil { return // interrupted } // read next slab's data - data := make([]byte, size) - length, err := io.ReadFull(io.LimitReader(cr, size), data) + data := make([]byte, slabSizeNoRedundancy) + length, err := io.ReadFull(io.LimitReader(cr, int64(slabSizeNoRedundancy)), data) if err == io.EOF { mem.Release() @@ -532,8 +519,8 @@ func (mgr *uploadManager) Upload(ctx context.Context, r io.Reader, contracts []a o.Slabs = append(o.Slabs, resp.slab) } - // calculate the eTag - eTag = hr.Hash() + // compute etag + eTag = o.ComputeETag() // add partial slabs if len(partialSlab) > 0 { @@ -751,7 +738,7 @@ func (mgr *uploadManager) refreshUploaders(contracts []api.ContractMetadata, bh // add missing uploaders for _, c := range contracts { if _, exists := existing[c.ID]; !exists && bh < c.WindowEnd { - uploader := mgr.newUploader(mgr.os, mgr.cs, mgr.hm, c) + uploader := mgr.newUploader(mgr.os, mgr.cl, mgr.cs, mgr.hm, c) refreshed = append(refreshed, uploader) go uploader.Start() } @@ -765,20 +752,32 @@ func (u *upload) newSlabUpload(ctx context.Context, shards [][]byte, uploaders [ responseChan := make(chan sectorUploadResp) // prepare sectors + var wg sync.WaitGroup sectors := make([]*sectorUpload, len(shards)) - for sI, shard := range shards { - // create the ctx - sCtx, sCancel := context.WithCancel(ctx) - - // create the sector - sectors[sI] = §orUpload{ - data: (*[rhpv2.SectorSize]byte)(shard), - index: sI, - root: rhpv2.SectorRoot((*[rhpv2.SectorSize]byte)(shard)), - ctx: sCtx, - cancel: sCancel, - } + for sI := range shards { + wg.Add(1) + go func(idx int) { + // create the ctx + sCtx, sCancel := context.WithCancel(ctx) + + // create the sector + // NOTE: we are computing the sector root here and pass it all the + // way down to the RPC to avoid having to recompute it for the proof + // verification. This is necessary because we need it ahead of time + // for the call to AddUploadingSector in uploader.go + // Once we upload to temp storage we don't need AddUploadingSector + // anymore and can move it back to the RPC. + sectors[idx] = §orUpload{ + data: (*[rhpv2.SectorSize]byte)(shards[idx]), + index: idx, + root: rhpv2.SectorRoot((*[rhpv2.SectorSize]byte)(shards[idx])), + ctx: sCtx, + cancel: sCancel, + } + wg.Done() + }(sI) } + wg.Wait() // prepare candidates candidates := make([]*candidate, len(uploaders)) @@ -833,8 +832,6 @@ func (u *upload) uploadSlab(ctx context.Context, rs api.RedundancySettings, data } func (u *upload) uploadShards(ctx context.Context, shards [][]byte, candidates []*uploader, mem Memory, maxOverdrive uint64, overdriveTimeout time.Duration) (sectors []object.Sector, uploadSpeed int64, overdrivePct float64, err error) { - start := time.Now() - // ensure inflight uploads get cancelled ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -871,6 +868,10 @@ func (u *upload) uploadShards(ctx context.Context, shards [][]byte, candidates [ // create a request buffer var buffer []*sectorUploadReq + // start the timer after the upload has started + // newSlabUpload is quite slow due to computing the sector roots + start := time.Now() + // collect responses var used bool var done bool @@ -878,7 +879,7 @@ loop: for slab.numInflight > 0 && !done { select { case <-u.shutdownCtx.Done(): - return nil, 0, 0, errors.New("upload stopped") + return nil, 0, 0, ErrShuttingDown case <-ctx.Done(): return nil, 0, 0, ctx.Err() case resp := <-respChan: @@ -930,6 +931,9 @@ loop: // calculate the upload speed bytes := slab.numUploaded * rhpv2.SectorSize ms := time.Since(start).Milliseconds() + if ms == 0 { + ms = 1 + } uploadSpeed = int64(bytes) / ms // calculate overdrive pct @@ -1057,12 +1061,6 @@ func (s *slabUpload) receive(resp sectorUploadResp) (bool, bool) { return false, false } - // sanity check we receive the expected root - if resp.root != req.sector.root { - s.errs[req.hk] = fmt.Errorf("root mismatch, %v != %v", resp.root, req.sector.root) - return false, false - } - // redundant sectors can't complete the upload if sector.uploaded.Root != (types.Hash256{}) { return false, false @@ -1072,7 +1070,7 @@ func (s *slabUpload) receive(resp sectorUploadResp) (bool, bool) { sector.finish(object.Sector{ Contracts: map[types.PublicKey][]types.FileContractID{req.hk: {req.fcid}}, LatestHost: req.hk, - Root: resp.root, + Root: req.sector.root, }) // update uploaded sectors @@ -1119,7 +1117,7 @@ func (req *sectorUploadReq) done() bool { } } -func (req *sectorUploadReq) fail(err error) { +func (req *sectorUploadReq) finish(err error) { select { case <-req.sector.ctx.Done(): case req.responseChan <- sectorUploadResp{ @@ -1128,13 +1126,3 @@ func (req *sectorUploadReq) fail(err error) { }: } } - -func (req *sectorUploadReq) succeed(root types.Hash256) { - select { - case <-req.sector.ctx.Done(): - case req.responseChan <- sectorUploadResp{ - req: req, - root: root, - }: - } -} diff --git a/worker/upload_test.go b/worker/upload_test.go index 8d32455bd..1d441693f 100644 --- a/worker/upload_test.go +++ b/worker/upload_test.go @@ -4,48 +4,44 @@ import ( "bytes" "context" "errors" + "fmt" "testing" "time" rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/test" "go.sia.tech/renterd/object" "lukechampine.com/frand" ) -const ( - testBucket = "testbucket" - testContractSet = "testcontractset" -) - var ( + testBucket = "testbucket" + testContractSet = "testcontractset" testRedundancySettings = api.RedundancySettings{MinShards: 2, TotalShards: 6} ) func TestUpload(t *testing.T) { - // mock worker - w := newMockWorker() + // create test worker + w := newTestWorker(t) // add hosts to worker - w.addHosts(testRedundancySettings.TotalShards * 2) + w.AddHosts(testRedundancySettings.TotalShards * 2) // convenience variables os := w.os - dl := w.dl - ul := w.ul + dl := w.downloadManager + ul := w.uploadManager // create test data - data := make([]byte, 128) - if _, err := frand.Read(data); err != nil { - t.Fatal(err) - } + data := frand.Bytes(128) // create upload params params := testParameters(t.Name()) // upload data - _, _, err := ul.Upload(context.Background(), bytes.NewReader(data), w.contracts(), params, lockingPriorityUpload) + _, _, err := ul.Upload(context.Background(), bytes.NewReader(data), w.Contracts(), params, lockingPriorityUpload) if err != nil { t.Fatal(err) } @@ -64,7 +60,7 @@ func TestUpload(t *testing.T) { // download the data and assert it matches var buf bytes.Buffer - err = dl.DownloadObject(context.Background(), &buf, o.Object.Object, 0, uint64(o.Object.Size), w.contracts()) + err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), w.Contracts()) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { @@ -74,7 +70,7 @@ func TestUpload(t *testing.T) { // filter contracts to have (at most) min shards used contracts var n int var filtered []api.ContractMetadata - for _, md := range w.contracts() { + for _, md := range w.Contracts() { // add unused contracts if _, used := used[md.HostKey]; !used { filtered = append(filtered, md) @@ -90,7 +86,7 @@ func TestUpload(t *testing.T) { // download the data again and assert it matches buf.Reset() - err = dl.DownloadObject(context.Background(), &buf, o.Object.Object, 0, uint64(o.Object.Size), filtered) + err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), filtered) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { @@ -107,52 +103,49 @@ func TestUpload(t *testing.T) { // download the data again and assert it fails buf.Reset() - err = dl.DownloadObject(context.Background(), &buf, o.Object.Object, 0, uint64(o.Object.Size), filtered) + err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), filtered) if !errors.Is(err, errDownloadNotEnoughHosts) { t.Fatal("expected not enough hosts error", err) } // try and upload into a bucket that does not exist params.bucket = "doesnotexist" - _, _, err = ul.Upload(context.Background(), bytes.NewReader(data), w.contracts(), params, lockingPriorityUpload) - if !errors.Is(err, errBucketNotFound) { + _, _, err = ul.Upload(context.Background(), bytes.NewReader(data), w.Contracts(), params, lockingPriorityUpload) + if !errors.Is(err, api.ErrBucketNotFound) { t.Fatal("expected bucket not found error", err) } // upload data using a cancelled context - assert we don't hang ctx, cancel := context.WithCancel(context.Background()) cancel() - _, _, err = ul.Upload(ctx, bytes.NewReader(data), w.contracts(), params, lockingPriorityUpload) + _, _, err = ul.Upload(ctx, bytes.NewReader(data), w.Contracts(), params, lockingPriorityUpload) if err == nil || !errors.Is(err, errUploadInterrupted) { t.Fatal(err) } } func TestUploadPackedSlab(t *testing.T) { - // mock worker - w := newMockWorker() + // create test worker + w := newTestWorker(t) // add hosts to worker - w.addHosts(testRedundancySettings.TotalShards * 2) + w.AddHosts(testRedundancySettings.TotalShards) // convenience variables os := w.os - mm := w.mm - dl := w.dl - ul := w.ul - - // create test data - data := make([]byte, 128) - if _, err := frand.Read(data); err != nil { - t.Fatal(err) - } + mm := w.ulmm + dl := w.downloadManager + ul := w.uploadManager // create upload params params := testParameters(t.Name()) params.packing = true + // create test data + data := frand.Bytes(128) + // upload data - _, _, err := ul.Upload(context.Background(), bytes.NewReader(data), w.contracts(), params, lockingPriorityUpload) + _, _, err := ul.Upload(context.Background(), bytes.NewReader(data), w.Contracts(), params, lockingPriorityUpload) if err != nil { t.Fatal(err) } @@ -170,7 +163,7 @@ func TestUploadPackedSlab(t *testing.T) { // download the data and assert it matches var buf bytes.Buffer - err = dl.DownloadObject(context.Background(), &buf, o.Object.Object, 0, uint64(o.Object.Size), w.contracts()) + err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), w.Contracts()) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { @@ -185,10 +178,10 @@ func TestUploadPackedSlab(t *testing.T) { t.Fatal("expected 1 packed slab") } ps := pss[0] - mem := mm.AcquireMemory(context.Background(), uint64(params.rs.TotalShards*rhpv2.SectorSize)) // upload the packed slab - err = ul.UploadPackedSlab(context.Background(), params.rs, ps, mem, w.contracts(), 0, lockingPriorityUpload) + mem := mm.AcquireMemory(context.Background(), params.rs.SlabSize()) + err = ul.UploadPackedSlab(context.Background(), params.rs, ps, mem, w.Contracts(), 0, lockingPriorityUpload) if err != nil { t.Fatal(err) } @@ -206,38 +199,91 @@ func TestUploadPackedSlab(t *testing.T) { // download the data again and assert it matches buf.Reset() - err = dl.DownloadObject(context.Background(), &buf, o.Object.Object, 0, uint64(o.Object.Size), w.contracts()) + err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), w.Contracts()) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { t.Fatal("data mismatch") } + + // define a helper that counts packed slabs + packedSlabsCount := func() int { + t.Helper() + os.mu.Lock() + cnt := len(os.partials) + os.mu.Unlock() + return cnt + } + + // define a helper that uploads data using the worker + var c int + uploadBytes := func(n int) { + t.Helper() + params.path = fmt.Sprintf("%s_%d", t.Name(), c) + _, err := w.upload(context.Background(), bytes.NewReader(frand.Bytes(n)), w.Contracts(), params) + if err != nil { + t.Fatal(err) + } + c++ + } + + // block aysnc packed slab uploads + w.BlockAsyncPackedSlabUploads(params) + + // configure max buffer size + os.setSlabBufferMaxSizeSoft(128) + + // upload 2x64 bytes using the worker and assert we still have two packed + // slabs (buffer limit not reached) + uploadBytes(64) + uploadBytes(64) + if packedSlabsCount() != 2 { + t.Fatal("expected 2 packed slabs") + } + + // upload one more byte and assert we still have two packed slabs (one got + // uploaded synchronously because buffer limit was reached) + uploadBytes(1) + if packedSlabsCount() != 2 { + t.Fatal("expected 2 packed slabs") + } + + // unblock asynchronous uploads + w.UnblockAsyncPackedSlabUploads(params) + uploadBytes(129) // ensure background thread is running + + // assert packed slabs get uploaded asynchronously + if err := test.Retry(100, 100*time.Millisecond, func() error { + if packedSlabsCount() != 0 { + return errors.New("expected 0 packed slabs") + } + return nil + }); err != nil { + t.Fatal(err) + } } func TestUploadShards(t *testing.T) { - // mock worker - w := newMockWorker() + // create test worker + w := newTestWorker(t) // add hosts to worker - w.addHosts(testRedundancySettings.TotalShards * 2) + w.AddHosts(testRedundancySettings.TotalShards * 2) // convenience variables os := w.os - mm := w.mm - dl := w.dl - ul := w.ul + mm := w.ulmm + dl := w.downloadManager + ul := w.uploadManager // create test data - data := make([]byte, 128) - if _, err := frand.Read(data); err != nil { - t.Fatal(err) - } + data := frand.Bytes(128) // create upload params params := testParameters(t.Name()) // upload data - _, _, err := ul.Upload(context.Background(), bytes.NewReader(data), w.contracts(), params, lockingPriorityUpload) + _, _, err := ul.Upload(context.Background(), bytes.NewReader(data), w.Contracts(), params, lockingPriorityUpload) if err != nil { t.Fatal(err) } @@ -268,7 +314,7 @@ func TestUploadShards(t *testing.T) { } // download the slab - shards, _, err := dl.DownloadSlab(context.Background(), slab.Slab, w.contracts()) + shards, _, err := dl.DownloadSlab(context.Background(), slab.Slab, w.Contracts()) if err != nil { t.Fatal(err) } @@ -284,7 +330,7 @@ func TestUploadShards(t *testing.T) { // recreate upload contracts contracts := make([]api.ContractMetadata, 0) - for _, c := range w.contracts() { + for _, c := range w.Contracts() { _, used := usedHosts[c.HostKey] _, bad := badHosts[c.HostKey] if !used && !bad { @@ -317,7 +363,7 @@ func TestUploadShards(t *testing.T) { // create download contracts contracts = contracts[:0] - for _, c := range w.contracts() { + for _, c := range w.Contracts() { if _, bad := badHosts[c.HostKey]; !bad { contracts = append(contracts, c) } @@ -325,7 +371,7 @@ func TestUploadShards(t *testing.T) { // download the data and assert it matches var buf bytes.Buffer - err = dl.DownloadObject(context.Background(), &buf, o.Object.Object, 0, uint64(o.Object.Size), contracts) + err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), contracts) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { @@ -334,29 +380,26 @@ func TestUploadShards(t *testing.T) { } func TestRefreshUploaders(t *testing.T) { - // mock worker - w := newMockWorker() + // create test worker + w := newTestWorker(t) // add hosts to worker - w.addHosts(testRedundancySettings.TotalShards) + w.AddHosts(testRedundancySettings.TotalShards) // convenience variables - ul := w.ul - hm := w.hm + ul := w.uploadManager cs := w.cs + hm := w.hm // create test data - data := make([]byte, 128) - if _, err := frand.Read(data); err != nil { - t.Fatal(err) - } + data := frand.Bytes(128) // create upload params params := testParameters(t.Name()) // upload data - contracts := w.contracts() - _, _, err := ul.Upload(context.Background(), bytes.NewReader(data), contracts, params, lockingPriorityUpload) + contracts := w.Contracts() + _, err := w.upload(context.Background(), bytes.NewReader(data), contracts, params) if err != nil { t.Fatal(err) } @@ -368,18 +411,18 @@ func TestRefreshUploaders(t *testing.T) { // renew the first contract c1 := contracts[0] - c1Renewed := w.renewContract(c1.HostKey) + c1Renewed := w.RenewContract(c1.HostKey) // remove the host from the second contract c2 := contracts[1] delete(hm.hosts, c2.HostKey) - delete(cs.locks, c2.ID) + delete(cs.contracts, c2.ID) // add a new host/contract - hNew := w.addHost() + hNew := w.AddHost() // upload data - contracts = w.contracts() + contracts = w.Contracts() _, _, err = ul.Upload(context.Background(), bytes.NewReader(data), contracts, params, lockingPriorityUpload) if err != nil { t.Fatal(err) @@ -389,7 +432,7 @@ func TestRefreshUploaders(t *testing.T) { var added, renewed int for _, ul := range ul.uploaders { switch ul.ContractID() { - case hNew.c.metadata.ID: + case hNew.metadata.ID: added++ case c1Renewed.metadata.ID: renewed++ @@ -410,7 +453,7 @@ func TestRefreshUploaders(t *testing.T) { // manually add a request to the queue of one of the uploaders we're about to expire responseChan := make(chan sectorUploadResp, 1) for _, ul := range ul.uploaders { - if ul.fcid == hNew.c.metadata.ID { + if ul.fcid == hNew.metadata.ID { ul.mu.Lock() ul.queue = append(ul.queue, §orUploadReq{responseChan: responseChan, sector: §orUpload{ctx: context.Background()}}) ul.mu.Unlock() @@ -436,43 +479,38 @@ func TestRefreshUploaders(t *testing.T) { } func TestUploadRegression(t *testing.T) { - // mock worker - w := newMockWorker() + // create test worker + w := newTestWorker(t) // add hosts to worker - w.addHosts(testRedundancySettings.TotalShards) + w.AddHosts(testRedundancySettings.TotalShards) // convenience variables - mm := w.mm os := w.os - ul := w.ul - dl := w.dl + dl := w.downloadManager // create test data - data := make([]byte, 128) - if _, err := frand.Read(data); err != nil { - t.Fatal(err) - } + data := frand.Bytes(128) // create upload params params := testParameters(t.Name()) // make sure the memory manager blocks - mm.memBlockChan = make(chan struct{}) + unblock := w.BlockUploads() // upload data ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - _, _, err := ul.Upload(ctx, bytes.NewReader(data), w.contracts(), params, lockingPriorityUpload) + _, err := w.upload(ctx, bytes.NewReader(data), w.Contracts(), params) if !errors.Is(err, errUploadInterrupted) { t.Fatal(err) } // unblock the memory manager - close(mm.memBlockChan) + unblock() // upload data - _, _, err = ul.Upload(context.Background(), bytes.NewReader(data), w.contracts(), params, lockingPriorityUpload) + _, err = w.upload(context.Background(), bytes.NewReader(data), w.Contracts(), params) if err != nil { t.Fatal(err) } @@ -485,7 +523,7 @@ func TestUploadRegression(t *testing.T) { // download data for good measure var buf bytes.Buffer - err = dl.DownloadObject(context.Background(), &buf, o.Object.Object, 0, uint64(o.Object.Size), w.contracts()) + err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), w.Contracts()) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { diff --git a/worker/upload_utils.go b/worker/upload_utils.go index 4b5241b4d..306e1774f 100644 --- a/worker/upload_utils.go +++ b/worker/upload_utils.go @@ -2,11 +2,9 @@ package worker import ( "bytes" - "encoding/hex" "io" "github.com/gabriel-vasile/mimetype" - "go.sia.tech/core/types" "go.sia.tech/renterd/object" ) @@ -28,28 +26,3 @@ func newMimeReader(r io.Reader) (mimeType string, recycled io.Reader, err error) recycled = io.MultiReader(buf, r) return mtype.String(), recycled, err } - -type hashReader struct { - r io.Reader - h *types.Hasher -} - -func newHashReader(r io.Reader) *hashReader { - return &hashReader{ - r: r, - h: types.NewHasher(), - } -} - -func (e *hashReader) Read(p []byte) (int, error) { - n, err := e.r.Read(p) - if _, wErr := e.h.E.Write(p[:n]); wErr != nil { - return 0, wErr - } - return n, err -} - -func (e *hashReader) Hash() string { - sum := e.h.Sum() - return hex.EncodeToString(sum[:]) -} diff --git a/worker/uploader.go b/worker/uploader.go index 3ec88c6fa..28b04033d 100644 --- a/worker/uploader.go +++ b/worker/uploader.go @@ -19,10 +19,15 @@ const ( sectorUploadTimeout = 60 * time.Second ) +var ( + errUploaderStopped = errors.New("uploader was stopped") +) + type ( uploader struct { os ObjectStore cs ContractStore + cl ContractLocker hm HostManager logger *zap.SugaredLogger @@ -36,6 +41,7 @@ type ( fcid types.FileContractID host Host queue []*sectorUploadReq + stopped bool // stats related field consecutiveFailures uint64 @@ -109,7 +115,7 @@ outer: } // execute it - root, elapsed, err := u.execute(req) + elapsed, err := u.execute(req) // the uploader's contract got renewed, requeue the request if errors.Is(err, errMaxRevisionReached) { @@ -120,10 +126,12 @@ outer: } // send the response - if err != nil { - req.fail(err) - } else { - req.succeed(root) + select { + case <-req.sector.ctx.Done(): + case req.responseChan <- sectorUploadResp{ + req: req, + err: err, + }: } // track the error, ignore gracefully closed streams and canceled overdrives @@ -136,24 +144,35 @@ outer: } func (u *uploader) Stop(err error) { + u.mu.Lock() + u.stopped = true + u.mu.Unlock() + for { upload := u.pop() if upload == nil { break } if !upload.done() { - upload.fail(err) + upload.finish(err) } } } func (u *uploader) enqueue(req *sectorUploadReq) { + u.mu.Lock() + // check for stopped + if u.stopped { + u.mu.Unlock() + go req.finish(errUploaderStopped) // don't block the caller + return + } + // decorate the request - req.fcid = u.ContractID() + req.fcid = u.fcid req.hk = u.hk // enqueue the request - u.mu.Lock() u.queue = append(u.queue, req) u.mu.Unlock() @@ -176,7 +195,7 @@ func (u *uploader) estimate() float64 { return numSectors * estimateP90 } -func (u *uploader) execute(req *sectorUploadReq) (types.Hash256, time.Duration, error) { +func (u *uploader) execute(req *sectorUploadReq) (time.Duration, error) { // grab fields u.mu.Lock() host := u.host @@ -184,13 +203,13 @@ func (u *uploader) execute(req *sectorUploadReq) (types.Hash256, time.Duration, u.mu.Unlock() // acquire contract lock - lockID, err := u.cs.AcquireContract(req.sector.ctx, fcid, req.contractLockPriority, req.contractLockDuration) + lockID, err := u.cl.AcquireContract(req.sector.ctx, fcid, req.contractLockPriority, req.contractLockDuration) if err != nil { - return types.Hash256{}, 0, err + return 0, err } // defer the release - lock := newContractLock(u.shutdownCtx, fcid, lockID, req.contractLockDuration, u.cs, u.logger) + lock := newContractLock(u.shutdownCtx, fcid, lockID, req.contractLockDuration, u.cl, u.logger) defer func() { ctx, cancel := context.WithTimeout(u.shutdownCtx, 10*time.Second) lock.Release(ctx) @@ -204,26 +223,26 @@ func (u *uploader) execute(req *sectorUploadReq) (types.Hash256, time.Duration, // fetch the revision rev, err := host.FetchRevision(ctx, defaultRevisionFetchTimeout) if err != nil { - return types.Hash256{}, 0, err + return 0, err } else if rev.RevisionNumber == math.MaxUint64 { - return types.Hash256{}, 0, errMaxRevisionReached + return 0, errMaxRevisionReached } // update the bus if err := u.os.AddUploadingSector(ctx, req.uploadID, fcid, req.sector.root); err != nil { - return types.Hash256{}, 0, fmt.Errorf("failed to add uploading sector to contract %v, err: %v", fcid, err) + return 0, fmt.Errorf("failed to add uploading sector to contract %v, err: %v", fcid, err) } // upload the sector start := time.Now() - root, err := host.UploadSector(ctx, req.sector.sectorData(), rev) + err = host.UploadSector(ctx, req.sector.root, req.sector.sectorData(), rev) if err != nil { - return types.Hash256{}, 0, fmt.Errorf("failed to upload sector to contract %v, err: %v", fcid, err) + return 0, fmt.Errorf("failed to upload sector to contract %v, err: %v", fcid, err) } // calculate elapsed time elapsed := time.Since(start) - return root, elapsed, nil + return elapsed, nil } func (u *uploader) pop() *sectorUploadReq { diff --git a/worker/uploader_test.go b/worker/uploader_test.go new file mode 100644 index 000000000..b203827a5 --- /dev/null +++ b/worker/uploader_test.go @@ -0,0 +1,34 @@ +package worker + +import ( + "context" + "errors" + "testing" + "time" +) + +func TestUploaderStopped(t *testing.T) { + w := newTestWorker(t) + w.AddHosts(1) + + um := w.uploadManager + um.refreshUploaders(w.Contracts(), 1) + + ul := um.uploaders[0] + ul.Stop(errors.New("test")) + + req := sectorUploadReq{ + responseChan: make(chan sectorUploadResp), + sector: §orUpload{ctx: context.Background()}, + } + ul.enqueue(&req) + + select { + case res := <-req.responseChan: + if !errors.Is(res.err, errUploaderStopped) { + t.Fatal("expected error response") + } + case <-time.After(10 * time.Second): + t.Fatal("no response") + } +} diff --git a/worker/worker.go b/worker/worker.go index 5ed7e8a05..9e4dacdd2 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -9,6 +9,7 @@ import ( "math/big" "net" "net/http" + "os" "runtime" "sort" "strings" @@ -69,44 +70,21 @@ func NewClient(address, password string) *Client { type ( Bus interface { alerts.Alerter - consensusState + ConsensusState webhooks.Broadcaster AccountStore + ContractLocker ContractStore + HostStore ObjectStore + SettingStore - BroadcastTransaction(ctx context.Context, txns []types.Transaction) error - SyncerPeers(ctx context.Context) (resp []string, err error) - - Contract(ctx context.Context, id types.FileContractID) (api.ContractMetadata, error) - ContractSize(ctx context.Context, id types.FileContractID) (api.ContractSize, error) - ContractRoots(ctx context.Context, id types.FileContractID) ([]types.Hash256, []types.Hash256, error) - Contracts(ctx context.Context, opts api.ContractsOpts) ([]api.ContractMetadata, error) - - RecordHostScans(ctx context.Context, scans []hostdb.HostScan) error - RecordPriceTables(ctx context.Context, priceTableUpdate []hostdb.PriceTableUpdate) error - RecordContractSpending(ctx context.Context, records []api.ContractSpendingRecord) error - - Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) - - GougingParams(ctx context.Context) (api.GougingParams, error) - UploadParams(ctx context.Context) (api.UploadParams, error) - - Object(ctx context.Context, bucket, path string, opts api.GetObjectOptions) (api.ObjectsResponse, error) - DeleteObject(ctx context.Context, bucket, path string, opts api.DeleteObjectOptions) error - MultipartUpload(ctx context.Context, uploadID string) (resp api.MultipartUpload, err error) - PackedSlabsForUpload(ctx context.Context, lockingDuration time.Duration, minShards, totalShards uint8, set string, limit int) ([]api.PackedSlab, error) - - WalletDiscard(ctx context.Context, txn types.Transaction) error - WalletFund(ctx context.Context, txn *types.Transaction, amount types.Currency, useUnconfirmedTxns bool) ([]types.Hash256, []types.Transaction, error) - WalletPrepareForm(ctx context.Context, renterAddress types.Address, renterKey types.PublicKey, renterFunds, hostCollateral types.Currency, hostKey types.PublicKey, hostSettings rhpv2.HostSettings, endHeight uint64) (txns []types.Transaction, err error) - WalletPrepareRenew(ctx context.Context, revision types.FileContractRevision, hostAddress, renterAddress types.Address, renterKey types.PrivateKey, renterFunds, minNewCollateral types.Currency, pt rhpv3.HostPriceTable, endHeight, windowSize, expectedStorage uint64) (api.WalletPrepareRenewResponse, error) - WalletSign(ctx context.Context, txn *types.Transaction, toSign []types.Hash256, cf types.CoveredFields) error - - Bucket(_ context.Context, bucket string) (api.Bucket, error) + Syncer + Wallet } + // An AccountStore manages ephemaral accounts state. AccountStore interface { Accounts(ctx context.Context) ([]api.Account, error) AddBalance(ctx context.Context, id rhpv3.Account, hk types.PublicKey, amt *big.Int) error @@ -120,11 +98,21 @@ type ( } ContractStore interface { - ContractLocker - + Contract(ctx context.Context, id types.FileContractID) (api.ContractMetadata, error) + ContractSize(ctx context.Context, id types.FileContractID) (api.ContractSize, error) + ContractRoots(ctx context.Context, id types.FileContractID) ([]types.Hash256, []types.Hash256, error) + Contracts(ctx context.Context, opts api.ContractsOpts) ([]api.ContractMetadata, error) RenewedContract(ctx context.Context, renewedFrom types.FileContractID) (api.ContractMetadata, error) } + HostStore interface { + RecordHostScans(ctx context.Context, scans []hostdb.HostScan) error + RecordPriceTables(ctx context.Context, priceTableUpdate []hostdb.PriceTableUpdate) error + RecordContractSpending(ctx context.Context, records []api.ContractSpendingRecord) error + + Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) + } + ObjectStore interface { // NOTE: used for download DeleteHostSector(ctx context.Context, hk types.PublicKey, root types.Hash256) error @@ -140,9 +128,34 @@ type ( MarkPackedSlabsUploaded(ctx context.Context, slabs []api.UploadedPackedSlab) error TrackUpload(ctx context.Context, uID api.UploadID) error UpdateSlab(ctx context.Context, s object.Slab, contractSet string) error + + // NOTE: used by worker + Bucket(_ context.Context, bucket string) (api.Bucket, error) + Object(ctx context.Context, bucket, path string, opts api.GetObjectOptions) (api.ObjectsResponse, error) + DeleteObject(ctx context.Context, bucket, path string, opts api.DeleteObjectOptions) error + MultipartUpload(ctx context.Context, uploadID string) (resp api.MultipartUpload, err error) + PackedSlabsForUpload(ctx context.Context, lockingDuration time.Duration, minShards, totalShards uint8, set string, limit int) ([]api.PackedSlab, error) + } + + SettingStore interface { + GougingParams(ctx context.Context) (api.GougingParams, error) + UploadParams(ctx context.Context) (api.UploadParams, error) } - consensusState interface { + Syncer interface { + BroadcastTransaction(ctx context.Context, txns []types.Transaction) error + SyncerPeers(ctx context.Context) (resp []string, err error) + } + + Wallet interface { + WalletDiscard(ctx context.Context, txn types.Transaction) error + WalletFund(ctx context.Context, txn *types.Transaction, amount types.Currency, useUnconfirmedTxns bool) ([]types.Hash256, []types.Transaction, error) + WalletPrepareForm(ctx context.Context, renterAddress types.Address, renterKey types.PublicKey, renterFunds, hostCollateral types.Currency, hostKey types.PublicKey, hostSettings rhpv2.HostSettings, endHeight uint64) (txns []types.Transaction, err error) + WalletPrepareRenew(ctx context.Context, revision types.FileContractRevision, hostAddress, renterAddress types.Address, renterKey types.PrivateKey, renterFunds, minNewCollateral types.Currency, pt rhpv3.HostPriceTable, endHeight, windowSize, expectedStorage uint64) (api.WalletPrepareRenewResponse, error) + WalletSign(ctx context.Context, txn *types.Transaction, toSign []types.Hash256, cf types.CoveredFields) error + } + + ConsensusState interface { ConsensusState(ctx context.Context) (api.ConsensusState, error) } ) @@ -183,7 +196,8 @@ func (w *worker) deriveRenterKey(hostKey types.PublicKey) types.PrivateKey { // A worker talks to Sia hosts to perform contract and storage operations within // a renterd system. type worker struct { - alerts alerts.Alerter + alerts alerts.Alerter + allowPrivateIPs bool id string bus Bus @@ -198,9 +212,8 @@ type worker struct { transportPoolV3 *transportPoolV3 uploadsMu sync.Mutex - uploadingPackedSlabs map[string]bool + uploadingPackedSlabs map[string]struct{} - hostInteractionRecorder HostInteractionRecorder contractSpendingRecorder ContractSpendingRecorder contractLockingDuration time.Duration @@ -210,6 +223,15 @@ type worker struct { logger *zap.SugaredLogger } +func (w *worker) isStopped() bool { + select { + case <-w.shutdownCtx.Done(): + return true + default: + } + return false +} + func (w *worker) withRevision(ctx context.Context, fetchTimeout time.Duration, fcid types.FileContractID, hk types.PublicKey, siamuxAddr string, lockPriority int, fn func(rev types.FileContractRevision) error) error { return w.withContractLock(ctx, fcid, lockPriority, func() error { h := w.Host(hk, fcid, siamuxAddr) @@ -238,13 +260,6 @@ func (w *worker) rhpScanHandler(jc jape.Context) { return } - // apply the timeout - if rsr.Timeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, time.Duration(rsr.Timeout)) - defer cancel() - } - // only scan hosts if we are online peers, err := w.bus.SyncerPeers(ctx) if jc.Check("failed to fetch peers from bus", err) != nil { @@ -257,7 +272,7 @@ func (w *worker) rhpScanHandler(jc jape.Context) { // scan host var errStr string - settings, priceTable, elapsed, err := w.scanHost(ctx, rsr.HostKey, rsr.HostIP) + settings, priceTable, elapsed, err := w.scanHost(ctx, time.Duration(rsr.Timeout), rsr.HostKey, rsr.HostIP) if err != nil { errStr = err.Error() } @@ -342,11 +357,13 @@ func (w *worker) rhpPriceTableHandler(jc jape.Context) { var err error var hpt hostdb.HostPriceTable defer func() { - w.hostInteractionRecorder.RecordPriceTableUpdate(hostdb.PriceTableUpdate{ - HostKey: rptr.HostKey, - Success: isSuccessfulInteraction(err), - Timestamp: time.Now(), - PriceTable: hpt, + w.bus.RecordPriceTables(ctx, []hostdb.PriceTableUpdate{ + { + HostKey: rptr.HostKey, + Success: isSuccessfulInteraction(err), + Timestamp: time.Now(), + PriceTable: hpt, + }, }) }() @@ -837,6 +854,47 @@ func (w *worker) uploadsStatsHandlerGET(jc jape.Context) { }) } +func (w *worker) objectsHandlerHEAD(jc jape.Context) { + // parse bucket + bucket := api.DefaultBucketName + if jc.DecodeForm("bucket", &bucket) != nil { + return + } + + // parse path + path := jc.PathParam("path") + if path == "" || strings.HasSuffix(path, "/") { + jc.Error(errors.New("HEAD requests can only be performed on objects, not directories"), http.StatusBadRequest) + return + } + + // fetch object metadata + res, err := w.bus.Object(jc.Request.Context(), bucket, path, api.GetObjectOptions{ + OnlyMetadata: true, + }) + if errors.Is(err, api.ErrObjectNotFound) { + jc.Error(err, http.StatusNotFound) + return + } else if err != nil { + jc.Error(err, http.StatusInternalServerError) + return + } else if res.Object == nil { + jc.Error(api.ErrObjectNotFound, http.StatusInternalServerError) // should never happen but checking because we deref. later + return + } + + // serve the content to ensure we're setting the exact same headers as we + // would for a GET request + status, err := serveContent(jc.ResponseWriter, jc.Request, *res.Object, func(io.Writer, int64, int64) error { return nil }) + if errors.Is(err, http_range.ErrInvalid) || errors.Is(err, errMultiRangeNotSupported) { + jc.Error(err, http.StatusBadRequest) + } else if errors.Is(err, http_range.ErrNoOverlap) { + jc.Error(err, http.StatusRequestedRangeNotSatisfiable) + } else if err != nil { + jc.Error(err, status) + } +} + func (w *worker) objectsHandlerGET(jc jape.Context) { jc.Custom(nil, []api.ObjectMetadata{}) @@ -919,10 +977,12 @@ func (w *worker) objectsHandlerGET(jc jape.Context) { // create a download function downloadFn := func(wr io.Writer, offset, length int64) (err error) { ctx = WithGougingChecker(ctx, w.bus, gp) - err = w.downloadManager.DownloadObject(ctx, wr, res.Object.Object, uint64(offset), uint64(length), contracts) + err = w.downloadManager.DownloadObject(ctx, wr, *res.Object.Object, uint64(offset), uint64(length), contracts) if err != nil { w.logger.Error(err) - if !errors.Is(err, ErrShuttingDown) { + if !errors.Is(err, ErrShuttingDown) && + !errors.Is(err, errDownloadCancelled) && + !errors.Is(err, io.ErrClosedPipe) { w.registerAlert(newDownloadFailedAlert(bucket, path, prefix, marker, offset, length, int64(len(contracts)), err)) } } @@ -1038,7 +1098,7 @@ func (w *worker) objectsHandlerPUT(jc jape.Context) { if err := jc.Check("couldn't upload object", err); err != nil { if err != nil { w.logger.Error(err) - if !errors.Is(err, ErrShuttingDown) { + if !errors.Is(err, ErrShuttingDown) && !errors.Is(err, errUploadInterrupted) { w.registerAlert(newUploadFailedAlert(bucket, path, up.ContractSet, mimeType, rs.MinShards, rs.TotalShards, len(contracts), up.UploadPacking, false, err)) } } @@ -1126,15 +1186,7 @@ func (w *worker) multipartUploadHandlerPUT(jc jape.Context) { return } - // make sure only one of the following is set - var disablePreshardingEncryption bool - if jc.DecodeForm("disablepreshardingencryption", &disablePreshardingEncryption) != nil { - return - } - if !disablePreshardingEncryption && jc.Request.FormValue("offset") == "" { - jc.Error(errors.New("if presharding encryption isn't disabled, the offset needs to be set"), http.StatusBadRequest) - return - } + // get the offset var offset int if jc.DecodeForm("offset", &offset) != nil { return @@ -1143,23 +1195,30 @@ func (w *worker) multipartUploadHandlerPUT(jc jape.Context) { return } + // fetch upload from bus + upload, err := w.bus.MultipartUpload(ctx, uploadID) + if isError(err, api.ErrMultipartUploadNotFound) { + jc.Error(err, http.StatusNotFound) + return + } else if jc.Check("failed to fetch multipart upload", err) != nil { + return + } + // built options opts := []UploadOption{ WithBlockHeight(up.CurrentHeight), WithContractSet(up.ContractSet), WithPacking(up.UploadPacking), WithRedundancySettings(up.RedundancySettings), + WithCustomKey(upload.Key), } - if disablePreshardingEncryption { - opts = append(opts, WithCustomKey(object.NoOpKey)) - } else { - upload, err := w.bus.MultipartUpload(ctx, uploadID) - if err != nil { - jc.Error(err, http.StatusBadRequest) - return - } + + // make sure only one of the following is set + if encryptionEnabled := !upload.Key.IsNoopKey(); encryptionEnabled && jc.Request.FormValue("offset") == "" { + jc.Error(errors.New("if object encryption (pre-erasure coding) wasn't disabled by creating the multipart upload with the no-op key, the offset needs to be set"), http.StatusBadRequest) + return + } else if encryptionEnabled { opts = append(opts, WithCustomEncryptionOffset(uint64(offset))) - opts = append(opts, WithCustomKey(upload.Key)) } // attach gouging checker to the context @@ -1177,7 +1236,7 @@ func (w *worker) multipartUploadHandlerPUT(jc jape.Context) { if jc.Check("couldn't upload object", err) != nil { if err != nil { w.logger.Error(err) - if !errors.Is(err, ErrShuttingDown) { + if !errors.Is(err, ErrShuttingDown) && !errors.Is(err, errUploadInterrupted) { w.registerAlert(newUploadFailedAlert(bucket, path, up.ContractSet, "", rs.MinShards, rs.TotalShards, len(contracts), up.UploadPacking, true, err)) } } @@ -1272,7 +1331,7 @@ func (w *worker) stateHandlerGET(jc jape.Context) { } // New returns an HTTP handler that serves the worker API. -func New(masterKey [32]byte, id string, b Bus, contractLockingDuration, busFlushInterval, downloadOverdriveTimeout, uploadOverdriveTimeout time.Duration, downloadMaxOverdrive, downloadMaxMemory, uploadMaxMemory, uploadMaxOverdrive uint64, allowPrivateIPs bool, l *zap.Logger) (*worker, error) { +func New(masterKey [32]byte, id string, b Bus, contractLockingDuration, busFlushInterval, downloadOverdriveTimeout, uploadOverdriveTimeout time.Duration, downloadMaxOverdrive, uploadMaxOverdrive, downloadMaxMemory, uploadMaxMemory uint64, allowPrivateIPs bool, l *zap.Logger) (*worker, error) { if contractLockingDuration == 0 { return nil, errors.New("contract lock duration must be positive") } @@ -1292,6 +1351,8 @@ func New(masterKey [32]byte, id string, b Bus, contractLockingDuration, busFlush return nil, errors.New("uploadMaxMemory cannot be 0") } + l = l.Named("worker").Named(id) + ctx, cancel := context.WithCancel(context.Background()) w := &worker{ alerts: alerts.WithOrigin(b, fmt.Sprintf("worker.%s", id)), allowPrivateIPs: allowPrivateIPs, @@ -1299,25 +1360,21 @@ func New(masterKey [32]byte, id string, b Bus, contractLockingDuration, busFlush id: id, bus: b, masterKey: masterKey, - logger: l.Sugar().Named("worker").Named(id), + logger: l.Sugar(), startTime: time.Now(), - uploadingPackedSlabs: make(map[string]bool), + uploadingPackedSlabs: make(map[string]struct{}), + shutdownCtx: ctx, + shutdownCtxCancel: cancel, } - ctx, cancel := context.WithCancel(context.Background()) - ctx = context.WithValue(ctx, keyInteractionRecorder, w) - w.shutdownCtx = ctx - w.shutdownCtxCancel = cancel - w.initAccounts(b) w.initPriceTables() w.initTransportPool() - w.initDownloadManager(downloadMaxMemory, downloadMaxOverdrive, downloadOverdriveTimeout, l.Sugar().Named("downloadmanager")) - w.initUploadManager(uploadMaxMemory, uploadMaxOverdrive, uploadOverdriveTimeout, l.Sugar().Named("uploadmanager")) + w.initDownloadManager(downloadMaxMemory, downloadMaxOverdrive, downloadOverdriveTimeout, l.Named("downloadmanager").Sugar()) + w.initUploadManager(uploadMaxMemory, uploadMaxOverdrive, uploadOverdriveTimeout, l.Named("uploadmanager").Sugar()) w.initContractSpendingRecorder(busFlushInterval) - w.initHostInteractionRecorder(busFlushInterval) return w, nil } @@ -1344,6 +1401,7 @@ func (w *worker) Handler() http.Handler { "GET /stats/uploads": w.uploadsStatsHandlerGET, "POST /slab/migrate": w.slabMigrateHandler, + "HEAD /objects/*path": w.objectsHandlerHEAD, "GET /objects/*path": w.objectsHandlerGET, "PUT /objects/*path": w.objectsHandlerPUT, "DELETE /objects/*path": w.objectsHandlerDELETE, @@ -1364,27 +1422,34 @@ func (w *worker) Shutdown(ctx context.Context) error { w.uploadManager.Stop() // stop recorders - w.hostInteractionRecorder.Stop(ctx) w.contractSpendingRecorder.Stop(ctx) return nil } -func (w *worker) scanHost(ctx context.Context, hostKey types.PublicKey, hostIP string) (rhpv2.HostSettings, rhpv3.HostPriceTable, time.Duration, error) { +func (w *worker) scanHost(ctx context.Context, timeout time.Duration, hostKey types.PublicKey, hostIP string) (rhpv2.HostSettings, rhpv3.HostPriceTable, time.Duration, error) { + logger := w.logger.With("host", hostKey).With("hostIP", hostIP).With("timeout", timeout) // prepare a helper for scanning scan := func() (rhpv2.HostSettings, rhpv3.HostPriceTable, time.Duration, error) { + // apply timeout + scanCtx := ctx + var cancel context.CancelFunc + if timeout > 0 { + scanCtx, cancel = context.WithTimeout(scanCtx, timeout) + defer cancel() + } // resolve hostIP. We don't want to scan hosts on private networks. if !w.allowPrivateIPs { host, _, err := net.SplitHostPort(hostIP) if err != nil { return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, err } - addrs, err := (&net.Resolver{}).LookupIPAddr(ctx, host) + addrs, err := (&net.Resolver{}).LookupIPAddr(scanCtx, host) if err != nil { return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, err } for _, addr := range addrs { if isPrivateIP(addr.IP) { - return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, errors.New("host is on a private network") + return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, api.ErrHostOnPrivateNetwork } } } @@ -1392,13 +1457,15 @@ func (w *worker) scanHost(ctx context.Context, hostKey types.PublicKey, hostIP s // fetch the host settings start := time.Now() var settings rhpv2.HostSettings - err := w.withTransportV2(ctx, hostKey, hostIP, func(t *rhpv2.Transport) (err error) { - if settings, err = RPCSettings(ctx, t); err == nil { - // NOTE: we overwrite the NetAddress with the host address here since we - // just used it to dial the host we know it's valid - settings.NetAddress = hostIP + err := w.withTransportV2(scanCtx, hostKey, hostIP, func(t *rhpv2.Transport) error { + var err error + if settings, err = RPCSettings(scanCtx, t); err != nil { + return fmt.Errorf("failed to fetch host settings: %w", err) } - return err + // NOTE: we overwrite the NetAddress with the host address here + // since we just used it to dial the host we know it's valid + settings.NetAddress = hostIP + return nil }) elapsed := time.Since(start) if err != nil { @@ -1407,9 +1474,9 @@ func (w *worker) scanHost(ctx context.Context, hostKey types.PublicKey, hostIP s // fetch the host pricetable var pt rhpv3.HostPriceTable - err = w.transportPoolV3.withTransportV3(ctx, hostKey, settings.SiamuxAddr(), func(ctx context.Context, t *transportV3) error { + err = w.transportPoolV3.withTransportV3(scanCtx, hostKey, settings.SiamuxAddr(), func(ctx context.Context, t *transportV3) error { if hpt, err := RPCPriceTable(ctx, t, func(pt rhpv3.HostPriceTable) (rhpv3.PaymentMethod, error) { return nil, nil }); err != nil { - return err + return fmt.Errorf("failed to fetch host price table: %w", err) } else { pt = hpt.HostPriceTable return nil @@ -1424,11 +1491,16 @@ func (w *worker) scanHost(ctx context.Context, hostKey types.PublicKey, hostIP s // scan: second try select { case <-ctx.Done(): + return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, ctx.Err() case <-time.After(time.Second): } settings, pt, duration, err = scan() + + logger = logger.With("elapsed", duration) if err == nil { - w.logger.Debug("successfully scanned host %v after retry", hostKey) + logger.Debug("successfully scanned host on second try") + } else if !isErrHostUnreachable(err) { + logger.Debugw("failed to scan host", zap.Error(err)) } } @@ -1436,19 +1508,28 @@ func (w *worker) scanHost(ctx context.Context, hostKey types.PublicKey, hostIP s // just in case since recording a failed scan might have serious // repercussions select { - case <-w.shutdownCtx.Done(): - return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, w.shutdownCtx.Err() + case <-ctx.Done(): + return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, ctx.Err() default: } - // record host scan - w.hostInteractionRecorder.RecordHostScan(hostdb.HostScan{ - HostKey: hostKey, - Success: isSuccessfulInteraction(err), - Timestamp: time.Now(), - Settings: settings, - PriceTable: pt, + // record host scan - make sure this isn't interrupted by the same context + // used to time out the scan itself because otherwise we won't be able to + // record scans that timed out. + recordCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + scanErr := w.bus.RecordHostScans(recordCtx, []hostdb.HostScan{ + { + HostKey: hostKey, + Success: isSuccessfulInteraction(err), + Timestamp: time.Now(), + Settings: settings, + PriceTable: pt, + }, }) + if scanErr != nil { + logger.Errorw("failed to record host scan", zap.Error(scanErr)) + } return settings, pt, duration, err } @@ -1464,6 +1545,17 @@ func discardTxnOnErr(ctx context.Context, bus Bus, l *zap.SugaredLogger, txn typ cancel() } +func isErrHostUnreachable(err error) bool { + return isError(err, os.ErrDeadlineExceeded) || + isError(err, context.DeadlineExceeded) || + isError(err, api.ErrHostOnPrivateNetwork) || + isError(err, errors.New("no route to host")) || + isError(err, errors.New("no such host")) || + isError(err, errors.New("connection refused")) || + isError(err, errors.New("unknown port")) || + isError(err, errors.New("cannot assign requested address")) +} + func isErrDuplicateTransactionSet(err error) bool { return err != nil && strings.Contains(err.Error(), modules.ErrDuplicateTransactionSet.Error()) } diff --git a/worker/worker_test.go b/worker/worker_test.go new file mode 100644 index 000000000..706fae14e --- /dev/null +++ b/worker/worker_test.go @@ -0,0 +1,136 @@ +package worker + +import ( + "context" + "fmt" + "time" + + rhpv2 "go.sia.tech/core/rhp/v2" + "go.sia.tech/core/types" + "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/test" + "go.uber.org/zap" + "golang.org/x/crypto/blake2b" + "lukechampine.com/frand" +) + +type ( + testWorker struct { + tt test.TT + *worker + + cs *contractStoreMock + os *objectStoreMock + hs *hostStoreMock + + dlmm *memoryManagerMock + ulmm *memoryManagerMock + + hm *testHostManager + } +) + +func newTestWorker(t test.TestingCommon) *testWorker { + // create bus dependencies + cs := newContractStoreMock() + os := newObjectStoreMock(testBucket) + hs := newHostStoreMock() + + // create worker dependencies + b := newBusMock(cs, hs, os) + dlmm := newMemoryManagerMock() + ulmm := newMemoryManagerMock() + + // create worker + w, err := New(blake2b.Sum256([]byte("testwork")), "test", b, time.Second, time.Second, time.Second, time.Second, 0, 0, 1, 1, false, zap.NewNop()) + if err != nil { + t.Fatal(err) + } + + // override managers + hm := newTestHostManager(t) + w.priceTables.hm = hm + w.downloadManager.hm = hm + w.downloadManager.mm = dlmm + w.uploadManager.hm = hm + w.uploadManager.mm = ulmm + + return &testWorker{ + test.NewTT(t), + w, + cs, + os, + hs, + dlmm, + ulmm, + hm, + } +} + +func (w *testWorker) AddHosts(n int) (added []*testHost) { + for i := 0; i < n; i++ { + added = append(added, w.AddHost()) + } + return +} + +func (w *testWorker) AddHost() *testHost { + h := w.hs.addHost() + c := w.cs.addContract(h.hk) + host := newTestHost(h, c) + w.hm.addHost(host) + return host +} + +func (w *testWorker) BlockUploads() func() { + select { + case <-w.ulmm.memBlockChan: + case <-time.After(time.Second): + w.tt.Fatal("already blocking") + } + + blockChan := make(chan struct{}) + w.ulmm.memBlockChan = blockChan + return func() { close(blockChan) } +} + +func (w *testWorker) BlockAsyncPackedSlabUploads(up uploadParameters) { + w.uploadsMu.Lock() + defer w.uploadsMu.Unlock() + key := fmt.Sprintf("%d-%d_%s", up.rs.MinShards, up.rs.TotalShards, up.contractSet) + w.uploadingPackedSlabs[key] = struct{}{} +} + +func (w *testWorker) UnblockAsyncPackedSlabUploads(up uploadParameters) { + w.uploadsMu.Lock() + defer w.uploadsMu.Unlock() + key := fmt.Sprintf("%d-%d_%s", up.rs.MinShards, up.rs.TotalShards, up.contractSet) + delete(w.uploadingPackedSlabs, key) +} + +func (w *testWorker) Contracts() []api.ContractMetadata { + metadatas, err := w.cs.Contracts(context.Background(), api.ContractsOpts{}) + if err != nil { + w.tt.Fatal(err) + } + return metadatas +} + +func (w *testWorker) RenewContract(hk types.PublicKey) *contractMock { + h := w.hm.hosts[hk] + if h == nil { + w.tt.Fatal("host not found") + } + + renewal, err := w.cs.renewContract(hk) + if err != nil { + w.tt.Fatal(err) + } + return renewal +} + +func newTestSector() (*[rhpv2.SectorSize]byte, types.Hash256) { + var sector [rhpv2.SectorSize]byte + frand.Read(sector[:]) + return §or, rhpv2.SectorRoot(§or) +}