From 7a0446c7aa6d1b31e6039606b44de842ecb3c4b0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Feb 2024 01:46:23 +0000 Subject: [PATCH 001/172] build(deps): bump golang.org/x/crypto from 0.18.0 to 0.19.0 Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.18.0 to 0.19.0. - [Commits](https://github.com/golang/crypto/compare/v0.18.0...v0.19.0) --- updated-dependencies: - dependency-name: golang.org/x/crypto dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index bfa1a7511..ff700196a 100644 --- a/go.mod +++ b/go.mod @@ -21,8 +21,8 @@ require ( go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca go.sia.tech/web/renterd v0.44.0 go.uber.org/zap v1.26.0 - golang.org/x/crypto v0.18.0 - golang.org/x/term v0.16.0 + golang.org/x/crypto v0.19.0 + golang.org/x/term v0.17.0 gopkg.in/yaml.v3 v3.0.1 gorm.io/driver/mysql v1.5.2 gorm.io/driver/sqlite v1.5.4 @@ -77,7 +77,7 @@ require ( go.sia.tech/web v0.0.0-20231213145933-3f175a86abff // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/net v0.19.0 // indirect - golang.org/x/sys v0.16.0 // indirect + golang.org/x/sys v0.17.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.16.1 // indirect diff --git a/go.sum b/go.sum index 54719eabd..5e2fa38d9 100644 --- a/go.sum +++ b/go.sum @@ -275,8 +275,8 @@ golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= @@ -330,16 +330,16 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210421210424-b80969c67360/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= -golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= From 6b53986acd8792ef16a7c2cb350b406bc2641832 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Feb 2024 08:03:00 +0000 Subject: [PATCH 002/172] build(deps): bump gorm.io/driver/sqlite from 1.5.4 to 1.5.5 Bumps [gorm.io/driver/sqlite](https://github.com/go-gorm/sqlite) from 1.5.4 to 1.5.5. - [Commits](https://github.com/go-gorm/sqlite/compare/v1.5.4...v1.5.5) --- updated-dependencies: - dependency-name: gorm.io/driver/sqlite dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ff700196a..084e6516a 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( golang.org/x/term v0.17.0 gopkg.in/yaml.v3 v3.0.1 gorm.io/driver/mysql v1.5.2 - gorm.io/driver/sqlite v1.5.4 + gorm.io/driver/sqlite v1.5.5 gorm.io/gorm v1.25.7 lukechampine.com/frand v1.4.2 ) diff --git a/go.sum b/go.sum index 5e2fa38d9..d5a601d4b 100644 --- a/go.sum +++ b/go.sum @@ -391,8 +391,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/mysql v1.5.2 h1:QC2HRskSE75wBuOxe0+iCkyJZ+RqpudsQtqkp+IMuXs= gorm.io/driver/mysql v1.5.2/go.mod h1:pQLhh1Ut/WUAySdTHwBpBv6+JKcj+ua4ZFx1QQTBzb8= -gorm.io/driver/sqlite v1.5.4 h1:IqXwXi8M/ZlPzH/947tn5uik3aYQslP9BVveoax0nV0= -gorm.io/driver/sqlite v1.5.4/go.mod h1:qxAuCol+2r6PannQDpOP1FP6ag3mKi4esLnB/jHed+4= +gorm.io/driver/sqlite v1.5.5 h1:7MDMtUZhV065SilG62E0MquljeArQZNfJnjd9i9gx3E= +gorm.io/driver/sqlite v1.5.5/go.mod h1:6NgQ7sQWAIFsPrJJl1lSNSu2TABh0ZZ/zm5fosATavE= gorm.io/gorm v1.25.2-0.20230530020048-26663ab9bf55/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k= gorm.io/gorm v1.25.7 h1:VsD6acwRjz2zFxGO50gPO6AkNs7KKnvfzUjHQhZDz/A= gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= From 279f6206de1f49db4146d5dbfb01c72070016e7f Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 12 Feb 2024 11:13:50 +0100 Subject: [PATCH 003/172] stores: add TestUploadObject --- stores/metadata_test.go | 220 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 220 insertions(+) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 18f34dee4..2161e7081 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -4042,3 +4042,223 @@ func TestUpsertSectors(t *testing.T) { } } } + +func TestUploadObject(t *testing.T) { + ss := newTestSQLStore(t, defaultTestSQLStoreConfig) + defer ss.Close() + + minShards, totalShards := 10, 30 + + // create 90 hosts, enough for 3 slabs with 30 each + hks, err := ss.addTestHosts(3 * totalShards) + if err != nil { + t.Fatal(err) + } + + // create one contract each + fcids, _, err := ss.addTestContracts(hks) + if err != nil { + t.Fatal(err) + } + + // create an object + obj := object.Object{ + Key: object.GenerateEncryptionKey(), + } + // add 2 slabs + for i := 0; i < 2; i++ { + obj.Slabs = append(obj.Slabs, object.SlabSlice{ + Offset: 0, + Length: uint32(minShards) * rhpv2.SectorSize, + Slab: object.Slab{ + Key: object.GenerateEncryptionKey(), + MinShards: uint8(minShards), + }, + }) + } + // 30 shards each + for i := 0; i < len(obj.Slabs); i++ { + for j := 0; j < totalShards; j++ { + obj.Slabs[i].Shards = append(obj.Slabs[i].Shards, object.Sector{ + Contracts: map[types.PublicKey][]types.FileContractID{ + hks[i*totalShards+j]: []types.FileContractID{ + fcids[i*totalShards+j], + }, + }, + LatestHost: hks[i*totalShards+j], + Root: frand.Entropy256(), + }) + } + } + + // add the object + _, err = ss.addTestObject("1", obj) + if err != nil { + t.Fatal(err) + } + + // fetch the object + var dbObj dbObject + if err := ss.db.Where("db_bucket_id", 1).Take(&dbObj).Error; err != nil { + t.Fatal(err) + } else if dbObj.ID != 1 { + t.Fatal("unexpected id", dbObj.ID) + } else if dbObj.DBBucketID != 1 { + t.Fatal("bucket id mismatch", dbObj.DBBucketID) + } else if dbObj.ObjectID != "1" { + t.Fatal("object id mismatch", dbObj.ObjectID) + } else if dbObj.Health != 1 { + t.Fatal("health mismatch", dbObj.Health) + } else if dbObj.Size != obj.TotalSize() { + t.Fatal("size mismatch", dbObj.Size) + } + + // fetch its slices + var dbSlices []dbSlice + if err := ss.db.Where("db_object_id", dbObj.ID).Find(&dbSlices).Error; err != nil { + t.Fatal(err) + } + for i, dbSlice := range dbSlices { + if dbSlice.ID != uint(i+1) { + t.Fatal("unexpected id", dbSlice.ID) + } else if dbSlice.ObjectIndex != uint(i+1) { + t.Fatal("unexpected object index", dbSlice.ObjectIndex) + } else if dbSlice.Offset != 0 || dbSlice.Length != uint32(minShards)*rhpv2.SectorSize { + t.Fatal("invalid offset/length", dbSlice.Offset, dbSlice.Length) + } + + // fetch the slab + var dbSlab dbSlab + key, _ := obj.Slabs[i].Key.MarshalBinary() + if err := ss.db.Where("id", dbSlice.DBSlabID).Take(&dbSlab).Error; err != nil { + t.Fatal(err) + } else if dbSlab.ID != uint(i+1) { + t.Fatal("unexpected id", dbSlab.ID) + } else if dbSlab.DBContractSetID != 1 { + t.Fatal("invalid contract set id", dbSlab.DBContractSetID) + } else if dbSlab.Health != 1 { + t.Fatal("invalid health", dbSlab.Health) + } else if dbSlab.HealthValidUntil != 0 { + t.Fatal("invalid health validity", dbSlab.HealthValidUntil) + } else if dbSlab.MinShards != uint8(minShards) { + t.Fatal("invalid minShards", dbSlab.MinShards) + } else if dbSlab.TotalShards != uint8(totalShards) { + t.Fatal("invalid totalShards", dbSlab.TotalShards) + } else if !bytes.Equal(dbSlab.Key, key) { + t.Fatal("wrong key") + } + + // fetch the sectors + var dbSectors []dbSector + if err := ss.db.Where("db_slab_id", dbSlab.ID).Find(&dbSectors).Error; err != nil { + t.Fatal(err) + } + for j, dbSector := range dbSectors { + if dbSector.ID != uint(i*totalShards+j+1) { + t.Fatal("invalid id", dbSector.ID) + } else if dbSector.DBSlabID != dbSlab.ID { + t.Fatal("invalid slab id", dbSector.DBSlabID) + } else if dbSector.LatestHost != publicKey(hks[i*totalShards+j]) { + t.Fatal("invalid host") + } else if !bytes.Equal(dbSector.Root, obj.Slabs[i].Shards[j].Root[:]) { + t.Fatal("invalid root") + } + } + } + + obj2 := object.Object{ + Key: object.GenerateEncryptionKey(), + } + // add 1 slab with 30 shards + obj2.Slabs = append(obj2.Slabs, object.SlabSlice{ + Offset: 0, + Length: uint32(minShards) * rhpv2.SectorSize, + Slab: object.Slab{ + Key: object.GenerateEncryptionKey(), + MinShards: uint8(minShards), + }, + }) + // 30 shards each + for i := 0; i < totalShards; i++ { + obj2.Slabs[0].Shards = append(obj2.Slabs[0].Shards, object.Sector{ + Contracts: map[types.PublicKey][]types.FileContractID{ + hks[len(obj.Slabs)*totalShards+i]: { + fcids[len(obj.Slabs)*totalShards+i], + }, + }, + LatestHost: hks[len(obj.Slabs)*totalShards+i], + Root: frand.Entropy256(), + }) + } + // add the second slab of the first object too + obj2.Slabs = append(obj2.Slabs, obj.Slabs[1]) + + // add the object + _, err = ss.addTestObject("2", obj2) + if err != nil { + t.Fatal(err) + } + + // fetch the object + var dbObj2 dbObject + if err := ss.db.Where("db_bucket_id", 1). + Where("object_id", "2"). + Take(&dbObj2).Error; err != nil { + t.Fatal(err) + } else if dbObj2.ID != 2 { + t.Fatal("unexpected id", dbObj2.ID) + } else if dbObj.Size != obj2.TotalSize() { + t.Fatal("size mismatch", dbObj2.Size) + } + + // fetch its slices + var dbSlices2 []dbSlice + if err := ss.db.Where("db_object_id", dbObj2.ID).Find(&dbSlices2).Error; err != nil { + t.Fatal(err) + } + + // check the first one + dbSlice2 := dbSlices2[0] + if dbSlice2.ID != uint(len(dbSlices)+1) { + t.Fatal("unexpected id", dbSlice2.ID) + } else if dbSlice2.ObjectIndex != uint(1) { + t.Fatal("unexpected object index", dbSlice2.ObjectIndex) + } else if dbSlice2.Offset != 0 || dbSlice2.Length != uint32(minShards)*rhpv2.SectorSize { + t.Fatal("invalid offset/length", dbSlice2.Offset, dbSlice2.Length) + } + + // fetch the slab + var dbSlab2 dbSlab + key, _ := obj2.Slabs[0].Key.MarshalBinary() + if err := ss.db.Where("id", dbSlice2.DBSlabID).Take(&dbSlab2).Error; err != nil { + t.Fatal(err) + } else if dbSlab2.ID != uint(len(dbSlices)+1) { + t.Fatal("unexpected id", dbSlab2.ID) + } else if dbSlab2.DBContractSetID != 1 { + t.Fatal("invalid contract set id", dbSlab2.DBContractSetID) + } else if !bytes.Equal(dbSlab2.Key, key) { + t.Fatal("wrong key") + } + + // fetch the sectors + var dbSectors2 []dbSector + if err := ss.db.Where("db_slab_id", dbSlab2.ID).Find(&dbSectors2).Error; err != nil { + t.Fatal(err) + } + for j, dbSector := range dbSectors2 { + if dbSector.ID != uint((len(obj.Slabs))*totalShards+j+1) { + t.Fatal("invalid id", dbSector.ID) + } else if dbSector.DBSlabID != dbSlab2.ID { + t.Fatal("invalid slab id", dbSector.DBSlabID) + } else if dbSector.LatestHost != publicKey(hks[(len(obj.Slabs))*totalShards+j]) { + t.Fatal("invalid host") + } else if !bytes.Equal(dbSector.Root, obj2.Slabs[0].Shards[j].Root[:]) { + t.Fatal("invalid root") + } + } + + // the second slab of obj2 should be the same as the first in obj + if dbSlices2[1].DBSlabID != 2 { + t.Fatal("wrong slab") + } +} From c9d27dd07f9b70b6b33113a5f7c85a7fe4fa0142 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 12 Feb 2024 11:35:36 +0100 Subject: [PATCH 004/172] stores: extend test to cover contract sectors --- stores/metadata_test.go | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 2161e7081..886b4e518 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -4117,6 +4117,8 @@ func TestUploadObject(t *testing.T) { var dbSlices []dbSlice if err := ss.db.Where("db_object_id", dbObj.ID).Find(&dbSlices).Error; err != nil { t.Fatal(err) + } else if len(dbSlices) != 2 { + t.Fatal("invalid number of slices", len(dbSlices)) } for i, dbSlice := range dbSlices { if dbSlice.ID != uint(i+1) { @@ -4152,6 +4154,8 @@ func TestUploadObject(t *testing.T) { var dbSectors []dbSector if err := ss.db.Where("db_slab_id", dbSlab.ID).Find(&dbSectors).Error; err != nil { t.Fatal(err) + } else if len(dbSectors) != totalShards { + t.Fatal("invalid number of sectors", len(dbSectors)) } for j, dbSector := range dbSectors { if dbSector.ID != uint(i*totalShards+j+1) { @@ -4215,6 +4219,8 @@ func TestUploadObject(t *testing.T) { var dbSlices2 []dbSlice if err := ss.db.Where("db_object_id", dbObj2.ID).Find(&dbSlices2).Error; err != nil { t.Fatal(err) + } else if len(dbSlices2) != 2 { + t.Fatal("invalid number of slices", len(dbSlices)) } // check the first one @@ -4244,6 +4250,8 @@ func TestUploadObject(t *testing.T) { var dbSectors2 []dbSector if err := ss.db.Where("db_slab_id", dbSlab2.ID).Find(&dbSectors2).Error; err != nil { t.Fatal(err) + } else if len(dbSectors2) != totalShards { + t.Fatal("invalid number of sectors", len(dbSectors2)) } for j, dbSector := range dbSectors2 { if dbSector.ID != uint((len(obj.Slabs))*totalShards+j+1) { @@ -4261,4 +4269,18 @@ func TestUploadObject(t *testing.T) { if dbSlices2[1].DBSlabID != 2 { t.Fatal("wrong slab") } + + var contractSectors []dbContractSector + if err := ss.db.Find(&contractSectors).Error; err != nil { + t.Fatal(err) + } else if len(contractSectors) != 3*totalShards { + t.Fatal("invalid number of contract sectors", len(contractSectors)) + } + for i, cs := range contractSectors { + if cs.DBContractID != uint(i+1) { + t.Fatal("invalid contract id") + } else if cs.DBSectorID != uint(i+1) { + t.Fatal("invalid sector id") + } + } } From 7d75df5e2eb36c72ef9c6f5a598ec8392b9440df Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 12 Feb 2024 11:36:01 +0100 Subject: [PATCH 005/172] stores: rename test --- stores/metadata_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 886b4e518..f56598289 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -4043,7 +4043,7 @@ func TestUpsertSectors(t *testing.T) { } } -func TestUploadObject(t *testing.T) { +func TestUpdateObjectReuseSlab(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() From 53994889a7db540ea7a216b884ce168eaba2dd4b Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 12 Feb 2024 11:37:55 +0100 Subject: [PATCH 006/172] stores: gofmt --- stores/metadata_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index f56598289..96b06c4ec 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -4081,7 +4081,7 @@ func TestUpdateObjectReuseSlab(t *testing.T) { for j := 0; j < totalShards; j++ { obj.Slabs[i].Shards = append(obj.Slabs[i].Shards, object.Sector{ Contracts: map[types.PublicKey][]types.FileContractID{ - hks[i*totalShards+j]: []types.FileContractID{ + hks[i*totalShards+j]: { fcids[i*totalShards+j], }, }, From 122715a24421369a3bf5c3e9bd3c90c6bb0c4bc8 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 12 Feb 2024 11:44:25 +0100 Subject: [PATCH 007/172] worker: fix debug message --- worker/worker.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/worker/worker.go b/worker/worker.go index b917960d4..adbce821d 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -1428,7 +1428,7 @@ func (w *worker) scanHost(ctx context.Context, hostKey types.PublicKey, hostIP s } settings, pt, duration, err = scan() if err == nil { - w.logger.Debug("successfully scanned host %v after retry", hostKey) + w.logger.Debugf("successfully scanned host %v after retry", hostKey) } } From 3b851fde7f0f3eb47162cd06bde548fd5bcec010 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 12 Feb 2024 16:58:40 +0100 Subject: [PATCH 008/172] autopilot: increase resolver timeout to 10s --- autopilot/ipfilter.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autopilot/ipfilter.go b/autopilot/ipfilter.go index 1844955f6..6aa244047 100644 --- a/autopilot/ipfilter.go +++ b/autopilot/ipfilter.go @@ -23,7 +23,7 @@ const ( ipCacheEntryValidity = 24 * time.Hour // resolverLookupTimeout is the timeout we apply when resolving a host's IP address - resolverLookupTimeout = 5 * time.Second + resolverLookupTimeout = 10 * time.Second ) var ( From bbb0590237c9f1393d953c4724051c29000578f6 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 13 Feb 2024 11:44:56 +0100 Subject: [PATCH 009/172] worker: fail upload/download request if uploader/downloader was stopped --- worker/downloader.go | 15 +++++++++++++-- worker/uploader.go | 16 ++++++++++++++-- 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/worker/downloader.go b/worker/downloader.go index 30c855d80..4a50e75f5 100644 --- a/worker/downloader.go +++ b/worker/downloader.go @@ -20,7 +20,6 @@ const ( type ( downloader struct { - hk types.PublicKey host Host statsDownloadSpeedBytesPerMS *stats.DataPoints // keep track of this separately for stats (no decay is applied) @@ -33,6 +32,7 @@ type ( consecutiveFailures uint64 numDownloads uint64 queue []*sectorDownloadReq + stopped bool } ) @@ -55,6 +55,10 @@ func (d *downloader) PublicKey() types.PublicKey { } func (d *downloader) Stop() { + d.mu.Lock() + d.stopped = true + d.mu.Unlock() + for { download := d.pop() if download == nil { @@ -80,8 +84,15 @@ func (d *downloader) fillBatch() (batch []*sectorDownloadReq) { } func (d *downloader) enqueue(download *sectorDownloadReq) { - // enqueue the job d.mu.Lock() + // check for stopped + if d.stopped { + d.mu.Unlock() + go download.fail(errors.New("downloader stopped")) // don't block the caller + return + } + + // enqueue the job d.queue = append(d.queue, download) d.mu.Unlock() diff --git a/worker/uploader.go b/worker/uploader.go index 3ec88c6fa..e20c4dee4 100644 --- a/worker/uploader.go +++ b/worker/uploader.go @@ -36,6 +36,7 @@ type ( fcid types.FileContractID host Host queue []*sectorUploadReq + stopped bool // stats related field consecutiveFailures uint64 @@ -136,6 +137,10 @@ outer: } func (u *uploader) Stop(err error) { + u.mu.Lock() + u.stopped = true + u.mu.Unlock() + for { upload := u.pop() if upload == nil { @@ -148,12 +153,19 @@ func (u *uploader) Stop(err error) { } func (u *uploader) enqueue(req *sectorUploadReq) { + u.mu.Lock() + // check for stopped + if u.stopped { + u.mu.Unlock() + go req.fail(errors.New("uploader stopped")) // don't block the caller + return + } + // decorate the request - req.fcid = u.ContractID() + req.fcid = u.fcid req.hk = u.hk // enqueue the request - u.mu.Lock() u.queue = append(u.queue, req) u.mu.Unlock() From 49475fa38b57ce4d2df702c9731acc07264b79dd Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 13 Feb 2024 12:01:08 +0100 Subject: [PATCH 010/172] worker: add tests --- worker/downloader.go | 8 ++++++-- worker/downloader_test.go | 32 ++++++++++++++++++++++++++++++++ worker/uploader.go | 6 +++++- worker/uploader_test.go | 32 ++++++++++++++++++++++++++++++++ 4 files changed, 75 insertions(+), 3 deletions(-) create mode 100644 worker/downloader_test.go create mode 100644 worker/uploader_test.go diff --git a/worker/downloader.go b/worker/downloader.go index 4a50e75f5..24be245fc 100644 --- a/worker/downloader.go +++ b/worker/downloader.go @@ -18,6 +18,10 @@ const ( maxConcurrentSectorsPerHost = 3 ) +var ( + errDownloaderStopped = errors.New("downloader was stopped") +) + type ( downloader struct { host Host @@ -65,7 +69,7 @@ func (d *downloader) Stop() { break } if !download.done() { - download.fail(errors.New("downloader stopped")) + download.fail(errDownloaderStopped) } } } @@ -88,7 +92,7 @@ func (d *downloader) enqueue(download *sectorDownloadReq) { // check for stopped if d.stopped { d.mu.Unlock() - go download.fail(errors.New("downloader stopped")) // don't block the caller + go download.fail(errDownloaderStopped) // don't block the caller return } diff --git a/worker/downloader_test.go b/worker/downloader_test.go new file mode 100644 index 000000000..0be4bc701 --- /dev/null +++ b/worker/downloader_test.go @@ -0,0 +1,32 @@ +package worker + +import ( + "errors" + "testing" + "time" +) + +func TestDownloaderStopped(t *testing.T) { + w := newMockWorker() + h := w.addHost() + w.dl.refreshDownloaders(w.contracts()) + + dl := w.dl.downloaders[h.PublicKey()] + dl.Stop() + + req := sectorDownloadReq{ + resps: §orResponses{ + c: make(chan struct{}), + }, + } + dl.enqueue(&req) + + select { + case <-req.resps.c: + if err := req.resps.responses[0].err; !errors.Is(err, errDownloaderStopped) { + t.Fatal("unexpected error response", err) + } + case <-time.After(time.Second): + t.Fatal("no response") + } +} diff --git a/worker/uploader.go b/worker/uploader.go index e20c4dee4..dcff27eaf 100644 --- a/worker/uploader.go +++ b/worker/uploader.go @@ -19,6 +19,10 @@ const ( sectorUploadTimeout = 60 * time.Second ) +var ( + errUploaderStopped = errors.New("uploader was stopped") +) + type ( uploader struct { os ObjectStore @@ -157,7 +161,7 @@ func (u *uploader) enqueue(req *sectorUploadReq) { // check for stopped if u.stopped { u.mu.Unlock() - go req.fail(errors.New("uploader stopped")) // don't block the caller + go req.fail(errUploaderStopped) // don't block the caller return } diff --git a/worker/uploader_test.go b/worker/uploader_test.go new file mode 100644 index 000000000..3afba85cb --- /dev/null +++ b/worker/uploader_test.go @@ -0,0 +1,32 @@ +package worker + +import ( + "context" + "errors" + "testing" + "time" +) + +func TestUploaderStopped(t *testing.T) { + w := newMockWorker() + w.addHost() + w.ul.refreshUploaders(w.contracts(), 1) + + ul := w.ul.uploaders[0] + ul.Stop(errors.New("test")) + + req := sectorUploadReq{ + responseChan: make(chan sectorUploadResp), + sector: §orUpload{ctx: context.Background()}, + } + ul.enqueue(&req) + + select { + case res := <-req.responseChan: + if !errors.Is(res.err, errUploaderStopped) { + t.Fatal("expected error response") + } + case <-time.After(time.Second): + t.Fatal("no response") + } +} From 645f93a186645ef8562da5efb1d443b961d49e5b Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 13 Feb 2024 12:11:10 +0100 Subject: [PATCH 011/172] worker: increase timeout in tests for CI --- worker/downloader_test.go | 2 +- worker/uploader_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/worker/downloader_test.go b/worker/downloader_test.go index 0be4bc701..357fc2ee8 100644 --- a/worker/downloader_test.go +++ b/worker/downloader_test.go @@ -26,7 +26,7 @@ func TestDownloaderStopped(t *testing.T) { if err := req.resps.responses[0].err; !errors.Is(err, errDownloaderStopped) { t.Fatal("unexpected error response", err) } - case <-time.After(time.Second): + case <-time.After(10 * time.Second): t.Fatal("no response") } } diff --git a/worker/uploader_test.go b/worker/uploader_test.go index 3afba85cb..7217cbaab 100644 --- a/worker/uploader_test.go +++ b/worker/uploader_test.go @@ -26,7 +26,7 @@ func TestUploaderStopped(t *testing.T) { if !errors.Is(res.err, errUploaderStopped) { t.Fatal("expected error response") } - case <-time.After(time.Second): + case <-time.After(10 * time.Second): t.Fatal("no response") } } From afafc0d73b85296549656bc99da6863d7a2bc091 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 13 Feb 2024 16:39:57 +0100 Subject: [PATCH 012/172] alerts: add pagination to alerts endpoint and add another endpoint to dismiss all alerts at once --- alerts/alerts.go | 32 +++++++++++++++++++++++++++- bus/bus.go | 23 +++++++++++++++----- bus/client/alerts.go | 18 ++++++++++++++-- internal/testing/cluster_test.go | 36 +++++++++++++++++++++++++++++++- 4 files changed, 100 insertions(+), 9 deletions(-) diff --git a/alerts/alerts.go b/alerts/alerts.go index 4d6463fa2..c76898c8d 100644 --- a/alerts/alerts.go +++ b/alerts/alerts.go @@ -37,6 +37,7 @@ type ( Alerter interface { RegisterAlert(_ context.Context, a Alert) error DismissAlerts(_ context.Context, ids ...types.Hash256) error + DismissAllAlerts(_ context.Context) error } // Severity indicates the severity of an alert. @@ -63,6 +64,11 @@ type ( alerts map[types.Hash256]Alert webhookBroadcaster webhooks.Broadcaster } + + AlertsOpts struct { + Offset uint64 + Limit uint64 + } ) // String implements the fmt.Stringer interface. @@ -130,6 +136,17 @@ func (m *Manager) RegisterAlert(ctx context.Context, alert Alert) error { }) } +// DismissAllAlerts implements the Alerter interface. +func (m *Manager) DismissAllAlerts(ctx context.Context) error { + m.mu.Lock() + toDismiss := make([]types.Hash256, 0, len(m.alerts)) + for alertID := range m.alerts { + toDismiss = append(toDismiss, alertID) + } + m.mu.Unlock() + return m.DismissAlerts(ctx, toDismiss...) +} + // DismissAlerts implements the Alerter interface. func (m *Manager) DismissAlerts(ctx context.Context, ids ...types.Hash256) error { var dismissed []types.Hash256 @@ -159,10 +176,14 @@ func (m *Manager) DismissAlerts(ctx context.Context, ids ...types.Hash256) error } // Active returns the host's active alerts. -func (m *Manager) Active() []Alert { +func (m *Manager) Active(offset, limit uint64) []Alert { m.mu.Lock() defer m.mu.Unlock() + if offset >= uint64(len(m.alerts)) { + return nil + } + alerts := make([]Alert, 0, len(m.alerts)) for _, a := range m.alerts { alerts = append(alerts, a) @@ -170,6 +191,10 @@ func (m *Manager) Active() []Alert { sort.Slice(alerts, func(i, j int) bool { return alerts[i].Timestamp.After(alerts[j].Timestamp) }) + alerts = alerts[offset:] + if limit < uint64(len(alerts)) { + alerts = alerts[:limit] + } return alerts } @@ -213,6 +238,11 @@ func (a *originAlerter) RegisterAlert(ctx context.Context, alert Alert) error { return a.alerter.RegisterAlert(ctx, alert) } +// DismissAllAlerts implements the Alerter interface. +func (a *originAlerter) DismissAllAlerts(ctx context.Context) error { + return a.alerter.DismissAllAlerts(ctx) +} + // DismissAlerts implements the Alerter interface. func (a *originAlerter) DismissAlerts(ctx context.Context, ids ...types.Hash256) error { return a.alerter.DismissAlerts(ctx, ids...) diff --git a/bus/bus.go b/bus/bus.go index d11550595..82a89826f 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -245,9 +245,10 @@ func (b *bus) Handler() http.Handler { "POST /account/:id/requiressync": b.accountsRequiresSyncHandlerPOST, "POST /account/:id/resetdrift": b.accountsResetDriftHandlerPOST, - "GET /alerts": b.handleGETAlerts, - "POST /alerts/dismiss": b.handlePOSTAlertsDismiss, - "POST /alerts/register": b.handlePOSTAlertsRegister, + "GET /alerts": b.handleGETAlerts, + "POST /alerts/dismiss": b.handlePOSTAlertsDismiss, + "POST /alerts/dismissall": b.handlePOSTAlertsDismissAll, + "POST /alerts/register": b.handlePOSTAlertsRegister, "GET /autopilots": b.autopilotsListHandlerGET, "GET /autopilot/:id": b.autopilotsHandlerGET, @@ -1711,8 +1712,16 @@ func (b *bus) gougingParams(ctx context.Context) (api.GougingParams, error) { }, nil } -func (b *bus) handleGETAlerts(c jape.Context) { - c.Encode(b.alertMgr.Active()) +func (b *bus) handleGETAlerts(jc jape.Context) { + var offset, limit uint64 + if jc.DecodeForm("offset", &offset) != nil { + return + } else if jc.DecodeForm("limit", &limit) != nil { + return + } else if limit == 0 { + limit = math.MaxUint64 + } + jc.Encode(b.alertMgr.Active(offset, limit)) } func (b *bus) handlePOSTAlertsDismiss(jc jape.Context) { @@ -1723,6 +1732,10 @@ func (b *bus) handlePOSTAlertsDismiss(jc jape.Context) { jc.Check("failed to dismiss alerts", b.alertMgr.DismissAlerts(jc.Request.Context(), ids...)) } +func (b *bus) handlePOSTAlertsDismissAll(jc jape.Context) { + jc.Check("failed to dismiss alerts", b.alertMgr.DismissAllAlerts(jc.Request.Context())) +} + func (b *bus) handlePOSTAlertsRegister(jc jape.Context) { var alert alerts.Alert if jc.Decode(&alert) != nil { diff --git a/bus/client/alerts.go b/bus/client/alerts.go index 6af68c78d..ab1d7f094 100644 --- a/bus/client/alerts.go +++ b/bus/client/alerts.go @@ -2,17 +2,31 @@ package client import ( "context" + "fmt" + "net/url" "go.sia.tech/core/types" "go.sia.tech/renterd/alerts" ) // Alerts fetches the active alerts from the bus. -func (c *Client) Alerts() (alerts []alerts.Alert, err error) { - err = c.c.GET("/alerts", &alerts) +func (c *Client) Alerts(opts alerts.AlertsOpts) (alerts []alerts.Alert, err error) { + values := url.Values{} + if opts.Offset > 0 { + values.Set("offset", fmt.Sprint(opts.Offset)) + } + if opts.Limit != 0 { + values.Set("limit", fmt.Sprint(opts.Limit)) + } + err = c.c.GET("/alerts?"+values.Encode(), &alerts) return } +// DismissAllAlerts dimisses all alerts. +func (c *Client) DismissAllAlerts(ctx context.Context) error { + return c.c.WithContext(ctx).POST("/alerts/dismissall", nil, nil) +} + // DismissAlerts dimisses the alerts with the given IDs. func (c *Client) DismissAlerts(ctx context.Context, ids ...types.Hash256) error { return c.c.WithContext(ctx).POST("/alerts/dismiss", ids, nil) diff --git a/internal/testing/cluster_test.go b/internal/testing/cluster_test.go index b0de2946e..cd26b6519 100644 --- a/internal/testing/cluster_test.go +++ b/internal/testing/cluster_test.go @@ -1915,7 +1915,7 @@ func TestAlerts(t *testing.T) { tt.OK(b.RegisterAlert(context.Background(), alert)) findAlert := func(id types.Hash256) *alerts.Alert { t.Helper() - alerts, err := b.Alerts() + alerts, err := b.Alerts(alerts.AlertsOpts{}) tt.OK(err) for _, alert := range alerts { if alert.ID == id { @@ -1938,6 +1938,40 @@ func TestAlerts(t *testing.T) { if foundAlert != nil { t.Fatal("alert found") } + + // register 2 alerts + alert2 := alert + alert2.ID = frand.Entropy256() + alert2.Timestamp = time.Now().Add(time.Second) + tt.OK(b.RegisterAlert(context.Background(), alert)) + tt.OK(b.RegisterAlert(context.Background(), alert2)) + if foundAlert := findAlert(alert.ID); foundAlert == nil { + t.Fatal("alert not found") + } else if foundAlert := findAlert(alert2.ID); foundAlert == nil { + t.Fatal("alert not found") + } + + // try to find with offset = 1 + foundAlerts, err := b.Alerts(alerts.AlertsOpts{Offset: 1}) + tt.OK(err) + if len(foundAlerts) != 1 || foundAlerts[0].ID != alert.ID { + t.Fatal("wrong alert") + } + + // try to find with limit = 1 + foundAlerts, err = b.Alerts(alerts.AlertsOpts{Limit: 1}) + tt.OK(err) + if len(foundAlerts) != 1 || foundAlerts[0].ID != alert2.ID { + t.Fatal("wrong alert") + } + + // dismiss all + tt.OK(b.DismissAllAlerts(context.Background())) + foundAlerts, err = b.Alerts(alerts.AlertsOpts{}) + tt.OK(err) + if len(foundAlerts) != 0 { + t.Fatal("expected 0 alerts", len(foundAlerts)) + } } func TestMultipartUploads(t *testing.T) { From 87de90414b6ef72383d4e0ec3a5b018686e83e19 Mon Sep 17 00:00:00 2001 From: Nate Maninger Date: Tue, 13 Feb 2024 14:05:23 -0800 Subject: [PATCH 013/172] worker: remove context value interaction recorder --- worker/host.go | 4 +++- worker/interactions.go | 8 -------- worker/worker.go | 4 ++-- 3 files changed, 5 insertions(+), 11 deletions(-) diff --git a/worker/host.go b/worker/host.go index fcaac0cf1..2aa97d57a 100644 --- a/worker/host.go +++ b/worker/host.go @@ -55,6 +55,7 @@ type ( logger *zap.SugaredLogger transportPool *transportPoolV3 priceTables *priceTables + interactionRecorder HostInteractionRecorder } ) @@ -76,6 +77,7 @@ func (w *worker) Host(hk types.PublicKey, fcid types.FileContractID, siamuxAddr accountKey: w.accounts.deriveAccountKey(hk), transportPool: w.transportPoolV3, priceTables: w.priceTables, + interactionRecorder: w.hostInteractionRecorder, } } @@ -196,7 +198,7 @@ func (h *host) FetchPriceTable(ctx context.Context, rev *types.FileContractRevis fetchPT := func(paymentFn PriceTablePaymentFunc) (hpt hostdb.HostPriceTable, err error) { err = h.transportPool.withTransportV3(ctx, h.hk, h.siamuxAddr, func(ctx context.Context, t *transportV3) (err error) { hpt, err = RPCPriceTable(ctx, t, paymentFn) - HostInteractionRecorderFromContext(ctx).RecordPriceTableUpdate(hostdb.PriceTableUpdate{ + h.interactionRecorder.RecordPriceTableUpdate(hostdb.PriceTableUpdate{ HostKey: h.hk, Success: isSuccessfulInteraction(err), Timestamp: time.Now(), diff --git a/worker/interactions.go b/worker/interactions.go index 70629c1f0..a011cc0d4 100644 --- a/worker/interactions.go +++ b/worker/interactions.go @@ -42,14 +42,6 @@ var ( _ HostInteractionRecorder = (*hostInteractionRecorder)(nil) ) -func HostInteractionRecorderFromContext(ctx context.Context) HostInteractionRecorder { - ir, ok := ctx.Value(keyInteractionRecorder).(HostInteractionRecorder) - if !ok { - panic("no interaction recorder attached to the context") // developer error - } - return ir -} - func interactionMiddleware(ir HostInteractionRecorder, routes map[string]jape.Handler) map[string]jape.Handler { for route, handler := range routes { routes[route] = jape.Adapt(func(h http.Handler) http.Handler { diff --git a/worker/worker.go b/worker/worker.go index adbce821d..863c8e27d 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -342,7 +342,7 @@ func (w *worker) rhpPriceTableHandler(jc jape.Context) { var err error var hpt hostdb.HostPriceTable defer func() { - HostInteractionRecorderFromContext(ctx).RecordPriceTableUpdate(hostdb.PriceTableUpdate{ + w.hostInteractionRecorder.RecordPriceTableUpdate(hostdb.PriceTableUpdate{ HostKey: rptr.HostKey, Success: isSuccessfulInteraction(err), Timestamp: time.Now(), @@ -1442,7 +1442,7 @@ func (w *worker) scanHost(ctx context.Context, hostKey types.PublicKey, hostIP s } // record host scan - HostInteractionRecorderFromContext(ctx).RecordHostScan(hostdb.HostScan{ + w.hostInteractionRecorder.RecordHostScan(hostdb.HostScan{ HostKey: hostKey, Success: isSuccessfulInteraction(err), Timestamp: time.Now(), From fee0b6ca788ce82e8173862ec2463f962bc1396a Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 14 Feb 2024 09:19:19 +0100 Subject: [PATCH 014/172] worker: ensure applying MigrationSurchargeMultiplier never prevents a download --- api/setting.go | 5 +++++ worker/gouging.go | 9 ++++----- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/api/setting.go b/api/setting.go index d11089010..348b3a62d 100644 --- a/api/setting.go +++ b/api/setting.go @@ -112,6 +112,11 @@ func (gs GougingSettings) Validate() error { if gs.MinPriceTableValidity < 10*time.Second { return errors.New("MinPriceTableValidity must be at least 10 seconds") } + _, overflow := gs.MaxDownloadPrice.Mul64WithOverflow(gs.MigrationSurchargeMultiplier) + if overflow { + maxMultiplier := types.MaxCurrency.Div(gs.MaxDownloadPrice).Big().Uint64() + return fmt.Errorf("MigrationSurchargeMultiplier must be less than %v, otherwise applying it to MaxDownloadPrice overflows the currency type", maxMultiplier) + } return nil } diff --git a/worker/gouging.go b/worker/gouging.go index 5e77c3053..36963e24a 100644 --- a/worker/gouging.go +++ b/worker/gouging.go @@ -72,17 +72,16 @@ func WithGougingChecker(ctx context.Context, cs consensusState, gp api.GougingPa // adjust the max download price if we are dealing with a critical // migration that might be failing due to gouging checks + settings := gp.GougingSettings if criticalMigration && gp.GougingSettings.MigrationSurchargeMultiplier > 0 { - if adjustedMaxDownloadPrice, overflow := gp.GougingSettings.MaxDownloadPrice.Mul64WithOverflow(gp.GougingSettings.MigrationSurchargeMultiplier); overflow { - return gougingChecker{}, errors.New("failed to apply the 'MigrationSurchargeMultiplier', overflow detected") - } else { - gp.GougingSettings.MaxDownloadPrice = adjustedMaxDownloadPrice + if adjustedMaxDownloadPrice, overflow := gp.GougingSettings.MaxDownloadPrice.Mul64WithOverflow(gp.GougingSettings.MigrationSurchargeMultiplier); !overflow { + settings.MaxDownloadPrice = adjustedMaxDownloadPrice } } return gougingChecker{ consensusState: consensusState, - settings: gp.GougingSettings, + settings: settings, txFee: gp.TransactionFee, // NOTE: From 3ca11cb858615f443f6c564e1c79dbb7b05c225d Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 14 Feb 2024 09:33:19 +0100 Subject: [PATCH 015/172] worker: get rid of interaction middleware --- worker/host.go | 4 ++-- worker/interactions.go | 14 -------------- worker/worker.go | 4 ++-- 3 files changed, 4 insertions(+), 18 deletions(-) diff --git a/worker/host.go b/worker/host.go index 2aa97d57a..86e92ce27 100644 --- a/worker/host.go +++ b/worker/host.go @@ -52,10 +52,10 @@ type ( acc *account bus Bus contractSpendingRecorder ContractSpendingRecorder + interactionRecorder HostInteractionRecorder logger *zap.SugaredLogger transportPool *transportPoolV3 priceTables *priceTables - interactionRecorder HostInteractionRecorder } ) @@ -70,6 +70,7 @@ func (w *worker) Host(hk types.PublicKey, fcid types.FileContractID, siamuxAddr acc: w.accounts.ForHost(hk), bus: w.bus, contractSpendingRecorder: w.contractSpendingRecorder, + interactionRecorder: w.hostInteractionRecorder, logger: w.logger.Named(hk.String()[:4]), fcid: fcid, siamuxAddr: siamuxAddr, @@ -77,7 +78,6 @@ func (w *worker) Host(hk types.PublicKey, fcid types.FileContractID, siamuxAddr accountKey: w.accounts.deriveAccountKey(hk), transportPool: w.transportPoolV3, priceTables: w.priceTables, - interactionRecorder: w.hostInteractionRecorder, } } diff --git a/worker/interactions.go b/worker/interactions.go index a011cc0d4..dfe8c4017 100644 --- a/worker/interactions.go +++ b/worker/interactions.go @@ -3,11 +3,9 @@ package worker import ( "context" "fmt" - "net/http" "sync" "time" - "go.sia.tech/jape" "go.sia.tech/renterd/hostdb" "go.uber.org/zap" ) @@ -42,18 +40,6 @@ var ( _ HostInteractionRecorder = (*hostInteractionRecorder)(nil) ) -func interactionMiddleware(ir HostInteractionRecorder, routes map[string]jape.Handler) map[string]jape.Handler { - for route, handler := range routes { - routes[route] = jape.Adapt(func(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx := context.WithValue(r.Context(), keyInteractionRecorder, ir) - h.ServeHTTP(w, r.WithContext(ctx)) - }) - })(handler) - } - return routes -} - func (w *worker) initHostInteractionRecorder(flushInterval time.Duration) { if w.hostInteractionRecorder != nil { panic("HostInteractionRecorder already initialized") // developer error diff --git a/worker/worker.go b/worker/worker.go index 863c8e27d..8a2a9b1f3 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -1323,7 +1323,7 @@ func New(masterKey [32]byte, id string, b Bus, contractLockingDuration, busFlush // Handler returns an HTTP handler that serves the worker API. func (w *worker) Handler() http.Handler { - return jape.Mux(interactionMiddleware(w.hostInteractionRecorder, map[string]jape.Handler{ + return jape.Mux(map[string]jape.Handler{ "GET /account/:hostkey": w.accountHandlerGET, "GET /id": w.idHandlerGET, @@ -1351,7 +1351,7 @@ func (w *worker) Handler() http.Handler { "PUT /multipart/*path": w.multipartUploadHandlerPUT, "GET /state": w.stateHandlerGET, - })) + }) } // Shutdown shuts down the worker. From d90e42f0410ed6c3a1f4e09291e5f87e70c91723 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 14 Feb 2024 11:59:49 +0100 Subject: [PATCH 016/172] stores: allow for filtering object stats by bucket --- api/object.go | 4 +++ bus/bus.go | 8 +++-- bus/client/objects.go | 8 +++-- internal/testing/cluster_test.go | 57 +++++++++++++++++--------------- stores/metadata.go | 29 ++++++++++++++-- stores/metadata_test.go | 48 ++++++++++++++++++--------- 6 files changed, 105 insertions(+), 49 deletions(-) diff --git a/api/object.go b/api/object.go index 73bb9c45c..e4fd4b465 100644 --- a/api/object.go +++ b/api/object.go @@ -119,6 +119,10 @@ type ( Mode string `json:"mode"` } + ObjectsStatsOpts struct { + Bucket string + } + // ObjectsStatsResponse is the response type for the /bus/stats/objects endpoint. ObjectsStatsResponse struct { NumObjects uint64 `json:"numObjects"` // number of objects diff --git a/bus/bus.go b/bus/bus.go index d11550595..10c610296 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -139,7 +139,7 @@ type ( Object(ctx context.Context, bucketName, path string) (api.Object, error) ObjectEntries(ctx context.Context, bucketName, path, prefix, sortBy, sortDir, marker string, offset, limit int) ([]api.ObjectMetadata, bool, error) ObjectsBySlabKey(ctx context.Context, bucketName string, slabKey object.EncryptionKey) ([]api.ObjectMetadata, error) - ObjectsStats(ctx context.Context) (api.ObjectsStatsResponse, error) + ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) (api.ObjectsStatsResponse, error) RemoveObject(ctx context.Context, bucketName, path string) error RemoveObjects(ctx context.Context, bucketName, prefix string) error RenameObject(ctx context.Context, bucketName, from, to string, force bool) error @@ -1348,7 +1348,11 @@ func (b *bus) slabbuffersHandlerGET(jc jape.Context) { } func (b *bus) objectsStatshandlerGET(jc jape.Context) { - info, err := b.ms.ObjectsStats(jc.Request.Context()) + opts := api.ObjectsStatsOpts{} + if jc.DecodeForm(("bucket"), &opts.Bucket) != nil { + return + } + info, err := b.ms.ObjectsStats(jc.Request.Context(), opts) if jc.Check("couldn't get objects stats", err) != nil { return } diff --git a/bus/client/objects.go b/bus/client/objects.go index 38a7b14cd..23011a9ba 100644 --- a/bus/client/objects.go +++ b/bus/client/objects.go @@ -82,8 +82,12 @@ func (c *Client) ObjectsBySlabKey(ctx context.Context, bucket string, key object } // ObjectsStats returns information about the number of objects and their size. -func (c *Client) ObjectsStats() (osr api.ObjectsStatsResponse, err error) { - err = c.c.GET("/stats/objects", &osr) +func (c *Client) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) (osr api.ObjectsStatsResponse, err error) { + values := url.Values{} + if opts.Bucket != "" { + values.Set("bucket", opts.Bucket) + } + err = c.c.WithContext(ctx).GET("/stats/objects?"+values.Encode(), &osr) return } diff --git a/internal/testing/cluster_test.go b/internal/testing/cluster_test.go index b0de2946e..13903115d 100644 --- a/internal/testing/cluster_test.go +++ b/internal/testing/cluster_test.go @@ -697,24 +697,29 @@ func TestUploadDownloadExtended(t *testing.T) { } // check objects stats. - info, err := cluster.Bus.ObjectsStats() - tt.OK(err) - objectsSize := uint64(len(file1) + len(file2) + len(small) + len(large)) - if info.TotalObjectsSize != objectsSize { - t.Error("wrong size", info.TotalObjectsSize, objectsSize) - } - sectorsSize := 15 * rhpv2.SectorSize - if info.TotalSectorsSize != uint64(sectorsSize) { - t.Error("wrong size", info.TotalSectorsSize, sectorsSize) - } - if info.TotalUploadedSize != uint64(sectorsSize) { - t.Error("wrong size", info.TotalUploadedSize, sectorsSize) - } - if info.NumObjects != 4 { - t.Error("wrong number of objects", info.NumObjects, 4) - } - if info.MinHealth != 1 { - t.Errorf("expected minHealth of 1, got %v", info.MinHealth) + for _, opts := range []api.ObjectsStatsOpts{ + {}, // any bucket + {Bucket: api.DefaultBucketName}, // specific bucket + } { + info, err := cluster.Bus.ObjectsStats(context.Background(), opts) + tt.OK(err) + objectsSize := uint64(len(file1) + len(file2) + len(small) + len(large)) + if info.TotalObjectsSize != objectsSize { + t.Error("wrong size", info.TotalObjectsSize, objectsSize) + } + sectorsSize := 15 * rhpv2.SectorSize + if info.TotalSectorsSize != uint64(sectorsSize) { + t.Error("wrong size", info.TotalSectorsSize, sectorsSize) + } + if info.TotalUploadedSize != uint64(sectorsSize) { + t.Error("wrong size", info.TotalUploadedSize, sectorsSize) + } + if info.NumObjects != 4 { + t.Error("wrong number of objects", info.NumObjects, 4) + } + if info.MinHealth != 1 { + t.Errorf("expected minHealth of 1, got %v", info.MinHealth) + } } // download the data @@ -1633,7 +1638,7 @@ func TestUploadPacking(t *testing.T) { download("file4", data4, 0, int64(len(data4))) // assert number of objects - os, err := b.ObjectsStats() + os, err := b.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) tt.OK(err) if os.NumObjects != 5 { t.Fatalf("expected 5 objects, got %v", os.NumObjects) @@ -1642,7 +1647,7 @@ func TestUploadPacking(t *testing.T) { // check the object size stats, we use a retry loop since packed slabs are // uploaded in a separate goroutine, so the object stats might lag a bit tt.Retry(60, time.Second, func() error { - os, err := b.ObjectsStats() + os, err := b.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) if err != nil { t.Fatal(err) } @@ -1796,7 +1801,7 @@ func TestSlabBufferStats(t *testing.T) { tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(data1), api.DefaultBucketName, "1", api.UploadObjectOptions{})) // assert number of objects - os, err := b.ObjectsStats() + os, err := b.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) tt.OK(err) if os.NumObjects != 1 { t.Fatalf("expected 1 object, got %d", os.NumObjects) @@ -1805,7 +1810,7 @@ func TestSlabBufferStats(t *testing.T) { // check the object size stats, we use a retry loop since packed slabs are // uploaded in a separate goroutine, so the object stats might lag a bit tt.Retry(60, time.Second, func() error { - os, err := b.ObjectsStats() + os, err := b.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) if err != nil { t.Fatal(err) } @@ -1853,7 +1858,7 @@ func TestSlabBufferStats(t *testing.T) { tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(data2), api.DefaultBucketName, "2", api.UploadObjectOptions{})) // assert number of objects - os, err = b.ObjectsStats() + os, err = b.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) tt.OK(err) if os.NumObjects != 2 { t.Fatalf("expected 1 object, got %d", os.NumObjects) @@ -1862,7 +1867,7 @@ func TestSlabBufferStats(t *testing.T) { // check the object size stats, we use a retry loop since packed slabs are // uploaded in a separate goroutine, so the object stats might lag a bit tt.Retry(60, time.Second, func() error { - os, err := b.ObjectsStats() + os, err := b.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) tt.OK(err) if os.TotalObjectsSize != uint64(len(data1)+len(data2)) { return fmt.Errorf("expected totalObjectSize of %d, got %d", len(data1)+len(data2), os.TotalObjectsSize) @@ -2006,7 +2011,7 @@ func TestMultipartUploads(t *testing.T) { } // Check objects stats. - os, err := b.ObjectsStats() + os, err := b.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) tt.OK(err) if os.NumObjects != 0 { t.Fatalf("expected 0 object, got %v", os.NumObjects) @@ -2065,7 +2070,7 @@ func TestMultipartUploads(t *testing.T) { } // Check objects stats. - os, err = b.ObjectsStats() + os, err = b.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) tt.OK(err) if os.NumObjects != 1 { t.Fatalf("expected 1 object, got %v", os.NumObjects) diff --git a/stores/metadata.go b/stores/metadata.go index f20f7dbb0..68947ed95 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -582,7 +582,15 @@ func (s *SQLStore) ListBuckets(ctx context.Context) ([]api.Bucket, error) { // ObjectsStats returns some info related to the objects stored in the store. To // reduce locking and make sure all results are consistent, everything is done // within a single transaction. -func (s *SQLStore) ObjectsStats(ctx context.Context) (api.ObjectsStatsResponse, error) { +func (s *SQLStore) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) (api.ObjectsStatsResponse, error) { + // if no bucket is specified, we consider all objects + whereBucket := func(table string) clause.Expr { + if opts.Bucket == "" { + return exprTRUE + } + return sqlWhereBucket(table, opts.Bucket) + } + // number of objects var objInfo struct { NumObjects uint64 @@ -592,6 +600,7 @@ func (s *SQLStore) ObjectsStats(ctx context.Context) (api.ObjectsStatsResponse, err := s.db. Model(&dbObject{}). Select("COUNT(*) AS NumObjects, COALESCE(MIN(health), 1) as MinHealth, SUM(size) AS TotalObjectsSize"). + Where(whereBucket(dbObject{}.TableName())). Scan(&objInfo). Error if err != nil { @@ -603,6 +612,7 @@ func (s *SQLStore) ObjectsStats(ctx context.Context) (api.ObjectsStatsResponse, err = s.db. Model(&dbMultipartUpload{}). Select("COUNT(*)"). + Where(whereBucket(dbMultipartUpload{}.TableName())). Scan(&unfinishedObjects). Error if err != nil { @@ -613,13 +623,26 @@ func (s *SQLStore) ObjectsStats(ctx context.Context) (api.ObjectsStatsResponse, var totalUnfinishedObjectsSize uint64 err = s.db. Model(&dbMultipartPart{}). + Joins("INNER JOIN multipart_uploads mu ON multipart_parts.db_multipart_upload_id = mu.id"). Select("COALESCE(SUM(size), 0)"). + Where(whereBucket("mu")). Scan(&totalUnfinishedObjectsSize). Error if err != nil { return api.ObjectsStatsResponse{}, err } + fromContractSectors := gorm.Expr("contract_sectors cs") + if opts.Bucket != "" { + fromContractSectors = gorm.Expr(` + contract_sectors cs + INNER JOIN sectors s ON s.id = cs.db_sector_id + INNER JOIN slabs sla ON sla.id = s.db_slab_id + INNER JOIN slices sli ON sli.db_slab_id = sla.id + INNER JOIN objects o ON o.id = sli.db_object_id AND (?) + `, whereBucket("o")) + } + var totalSectors uint64 batchSize := 500000 @@ -631,7 +654,7 @@ func (s *SQLStore) ObjectsStats(ctx context.Context) (api.ObjectsStatsResponse, } res := s.db. Model(&dbSector{}). - Raw("SELECT COUNT(*) as Sectors, MAX(sectors.db_sector_id) as Marker FROM (SELECT cs.db_sector_id FROM contract_sectors cs WHERE cs.db_sector_id > ? GROUP BY cs.db_sector_id LIMIT ?) sectors", marker, batchSize). + Raw("SELECT COUNT(*) as Sectors, MAX(sectors.db_sector_id) as Marker FROM (SELECT cs.db_sector_id FROM ? WHERE cs.db_sector_id > ? GROUP BY cs.db_sector_id LIMIT ?) sectors", fromContractSectors, marker, batchSize). Scan(&result) if err := res.Error; err != nil { return api.ObjectsStatsResponse{}, err @@ -644,7 +667,7 @@ func (s *SQLStore) ObjectsStats(ctx context.Context) (api.ObjectsStatsResponse, var totalUploaded int64 err = s.db. - Model(&dbContractSector{}). + Table("?", fromContractSectors). Count(&totalUploaded). Error if err != nil { diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 96b06c4ec..a05c0be17 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -2435,7 +2435,7 @@ func TestObjectsStats(t *testing.T) { defer ss.Close() // Fetch stats on clean database. - info, err := ss.ObjectsStats(context.Background()) + info, err := ss.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) if err != nil { t.Fatal(err) } @@ -2499,21 +2499,37 @@ func TestObjectsStats(t *testing.T) { } // Check sizes. - info, err = ss.ObjectsStats(context.Background()) - if err != nil { - t.Fatal(err) - } - if info.TotalObjectsSize != objectsSize { - t.Fatal("wrong size", info.TotalObjectsSize, objectsSize) - } - if info.TotalSectorsSize != sectorsSize { - t.Fatal("wrong size", info.TotalSectorsSize, sectorsSize) - } - if info.TotalUploadedSize != sectorsSize*2 { - t.Fatal("wrong size", info.TotalUploadedSize, sectorsSize*2) + for _, opts := range []api.ObjectsStatsOpts{ + {}, // any bucket + {Bucket: api.DefaultBucketName}, // specific bucket + } { + info, err = ss.ObjectsStats(context.Background(), opts) + if err != nil { + t.Fatal(err) + } else if info.TotalObjectsSize != objectsSize { + t.Fatal("wrong size", info.TotalObjectsSize, objectsSize) + } else if info.TotalSectorsSize != sectorsSize { + t.Fatal("wrong size", info.TotalSectorsSize, sectorsSize) + } else if info.TotalUploadedSize != sectorsSize*2 { + t.Fatal("wrong size", info.TotalUploadedSize, sectorsSize*2) + } else if info.NumObjects != 2 { + t.Fatal("wrong number of objects", info.NumObjects, 2) + } } - if info.NumObjects != 2 { - t.Fatal("wrong number of objects", info.NumObjects, 2) + + // Check other bucket. + if err := ss.CreateBucket(context.Background(), "other", api.BucketPolicy{}); err != nil { + t.Fatal(err) + } else if info, err := ss.ObjectsStats(context.Background(), api.ObjectsStatsOpts{Bucket: "other"}); err != nil { + t.Fatal(err) + } else if info.TotalObjectsSize != 0 { + t.Fatal("wrong size", info.TotalObjectsSize) + } else if info.TotalSectorsSize != 0 { + t.Fatal("wrong size", info.TotalSectorsSize) + } else if info.TotalUploadedSize != 0 { + t.Fatal("wrong size", info.TotalUploadedSize) + } else if info.NumObjects != 0 { + t.Fatal("wrong number of objects", info.NumObjects) } } @@ -2908,7 +2924,7 @@ func TestContractSizes(t *testing.T) { } // assert there's two objects - s, err := ss.ObjectsStats(context.Background()) + s, err := ss.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) if err != nil { t.Fatal(err) } From b22e96e8987da028ec75cc3a0ccf27d815d50ae4 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 14 Feb 2024 13:10:29 +0100 Subject: [PATCH 017/172] bus: address comments --- alerts/alerts.go | 12 +++++++----- bus/bus.go | 7 ++++--- bus/client/alerts.go | 2 +- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/alerts/alerts.go b/alerts/alerts.go index c76898c8d..424196d4f 100644 --- a/alerts/alerts.go +++ b/alerts/alerts.go @@ -66,8 +66,8 @@ type ( } AlertsOpts struct { - Offset uint64 - Limit uint64 + Offset int + Limit int } ) @@ -176,12 +176,14 @@ func (m *Manager) DismissAlerts(ctx context.Context, ids ...types.Hash256) error } // Active returns the host's active alerts. -func (m *Manager) Active(offset, limit uint64) []Alert { +func (m *Manager) Active(offset, limit int) []Alert { m.mu.Lock() defer m.mu.Unlock() - if offset >= uint64(len(m.alerts)) { + if offset >= len(m.alerts) { return nil + } else if limit == -1 { + limit = len(m.alerts) } alerts := make([]Alert, 0, len(m.alerts)) @@ -192,7 +194,7 @@ func (m *Manager) Active(offset, limit uint64) []Alert { return alerts[i].Timestamp.After(alerts[j].Timestamp) }) alerts = alerts[offset:] - if limit < uint64(len(alerts)) { + if limit < len(alerts) { alerts = alerts[:limit] } return alerts diff --git a/bus/bus.go b/bus/bus.go index 82a89826f..4342e493b 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -1713,13 +1713,14 @@ func (b *bus) gougingParams(ctx context.Context) (api.GougingParams, error) { } func (b *bus) handleGETAlerts(jc jape.Context) { - var offset, limit uint64 + offset, limit := 0, -1 if jc.DecodeForm("offset", &offset) != nil { return } else if jc.DecodeForm("limit", &limit) != nil { return - } else if limit == 0 { - limit = math.MaxUint64 + } else if offset < 0 { + jc.Error(errors.New("offset must be non-negative"), http.StatusBadRequest) + return } jc.Encode(b.alertMgr.Active(offset, limit)) } diff --git a/bus/client/alerts.go b/bus/client/alerts.go index ab1d7f094..6151db598 100644 --- a/bus/client/alerts.go +++ b/bus/client/alerts.go @@ -12,7 +12,7 @@ import ( // Alerts fetches the active alerts from the bus. func (c *Client) Alerts(opts alerts.AlertsOpts) (alerts []alerts.Alert, err error) { values := url.Values{} - if opts.Offset > 0 { + if opts.Offset != 0 { values.Set("offset", fmt.Sprint(opts.Offset)) } if opts.Limit != 0 { From dff33cd61a457e553b40744ddc644860a5d6f211 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 14 Feb 2024 13:15:41 +0100 Subject: [PATCH 018/172] bus: fix jape --- bus/bus.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bus/bus.go b/bus/bus.go index 10c610296..5c3aea504 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -1349,7 +1349,7 @@ func (b *bus) slabbuffersHandlerGET(jc jape.Context) { func (b *bus) objectsStatshandlerGET(jc jape.Context) { opts := api.ObjectsStatsOpts{} - if jc.DecodeForm(("bucket"), &opts.Bucket) != nil { + if jc.DecodeForm("bucket", &opts.Bucket) != nil { return } info, err := b.ms.ObjectsStats(jc.Request.Context(), opts) From ea173eb3cc3beac3a1170260995c5f5d59faf99d Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 14 Feb 2024 16:08:48 +0100 Subject: [PATCH 019/172] stores: binary currency type --- stores/metadata_test.go | 61 +++++++++++++++++++++++++++++++++++++++++ stores/types.go | 28 +++++++++++++++++++ 2 files changed, 89 insertions(+) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 96b06c4ec..0b0a696a5 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -6,8 +6,10 @@ import ( "encoding/hex" "errors" "fmt" + "math" "os" "reflect" + "sort" "strings" "testing" "time" @@ -4284,3 +4286,62 @@ func TestUpdateObjectReuseSlab(t *testing.T) { } } } + +func TestTypeCurrency(t *testing.T) { + ss := newTestSQLStore(t, defaultTestSQLStoreConfig) + defer ss.Close() + + tests := []struct { + a types.Currency + b types.Currency + cmp string + }{ + { + a: types.ZeroCurrency, + b: types.NewCurrency64(1), + cmp: "<", + }, + { + a: types.NewCurrency64(1), + b: types.NewCurrency64(1), + cmp: "=", + }, + { + a: types.NewCurrency(0, math.MaxUint64), + b: types.NewCurrency(math.MaxUint64, 0), + cmp: "<", + }, + { + a: types.NewCurrency(math.MaxUint64, 0), + b: types.NewCurrency(0, math.MaxUint64), + cmp: ">", + }, + } + for _, test := range tests { + var result bool + err := ss.db.Raw("SELECT ? "+test.cmp+" ?", bCurrency(test.a), bCurrency(test.b)).Scan(&result).Error + if err != nil { + t.Fatal(err) + } else if !result { + t.Fatal("unexpected result", result) + } + } + + c := func(c uint64) bCurrency { + return bCurrency(types.NewCurrency64(c)) + } + + var currencies []bCurrency + err := ss.db.Raw(` +WITH input(col) as +(values (?),(?),(?)) +SELECT * FROM input ORDER BY col ASC +`, c(3), c(1), c(2)).Scan(¤cies).Error + if err != nil { + t.Fatal(err) + } else if !sort.SliceIsSorted(currencies, func(i, j int) bool { + return types.Currency(currencies[i]).Cmp(types.Currency(currencies[j])) < 0 + }) { + t.Fatal("currencies not sorted", currencies) + } +} diff --git a/stores/types.go b/stores/types.go index 6b74f7563..9a7c72009 100644 --- a/stores/types.go +++ b/stores/types.go @@ -2,6 +2,7 @@ package stores import ( "database/sql/driver" + "encoding/binary" "encoding/json" "errors" "fmt" @@ -25,6 +26,7 @@ type ( unixTimeMS time.Time datetime time.Time currency types.Currency + bCurrency types.Currency fileContractID types.FileContractID hash256 types.Hash256 publicKey types.PublicKey @@ -338,3 +340,29 @@ func (u *unsigned64) Scan(value interface{}) error { func (u unsigned64) Value() (driver.Value, error) { return int64(u), nil } + +func (bCurrency) GormDataType() string { + return "bytes" +} + +// Scan implements the sql.Scanner interface. +func (sc *bCurrency) Scan(src any) error { + buf, ok := src.([]byte) + if !ok { + return fmt.Errorf("cannot scan %T to Currency", src) + } else if len(buf) != 16 { + return fmt.Errorf("cannot scan %d bytes to Currency", len(buf)) + } + + sc.Lo = binary.LittleEndian.Uint64(buf[:8]) + sc.Hi = binary.LittleEndian.Uint64(buf[8:]) + return nil +} + +// Value implements the driver.Valuer interface. +func (sc bCurrency) Value() (driver.Value, error) { + buf := make([]byte, 16) + binary.LittleEndian.PutUint64(buf[:8], sc.Lo) + binary.LittleEndian.PutUint64(buf[8:], sc.Hi) + return buf, nil +} From fe79e0759a22ffe9a81c8262a35fa70aaeb36455 Mon Sep 17 00:00:00 2001 From: ChrisSchinnerl Date: Thu, 15 Feb 2024 00:08:47 +0000 Subject: [PATCH 020/172] ui: v0.45.0 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 084e6516a..2659a1ceb 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( go.sia.tech/jape v0.11.1 go.sia.tech/mux v1.2.0 go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca - go.sia.tech/web/renterd v0.44.0 + go.sia.tech/web/renterd v0.45.0 go.uber.org/zap v1.26.0 golang.org/x/crypto v0.19.0 golang.org/x/term v0.17.0 diff --git a/go.sum b/go.sum index d5a601d4b..b7f5f9cc7 100644 --- a/go.sum +++ b/go.sum @@ -253,8 +253,8 @@ go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca h1:aZMg2AKevn7jKx+wlusWQf go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca/go.mod h1:h/1afFwpxzff6/gG5i1XdAgPK7dEY6FaibhK7N5F86Y= go.sia.tech/web v0.0.0-20231213145933-3f175a86abff h1:/nE7nhewDRxzEdtSKT4SkiUwtjPSiy7Xz7CHEW3MaGQ= go.sia.tech/web v0.0.0-20231213145933-3f175a86abff/go.mod h1:RKODSdOmR3VtObPAcGwQqm4qnqntDVFylbvOBbWYYBU= -go.sia.tech/web/renterd v0.44.0 h1:yKu1Kq/6ssV9Vbv4oa+sn2Pc2TNyfcrv/mRPNOuYuB0= -go.sia.tech/web/renterd v0.44.0/go.mod h1:FgXrdmAnu591a3h96RB/15pMZ74xO9457g902uE06BM= +go.sia.tech/web/renterd v0.45.0 h1:5kSiDnHYRacg3JideH9Cl9qHzcZiKnBR0fWRap169hU= +go.sia.tech/web/renterd v0.45.0/go.mod h1:FgXrdmAnu591a3h96RB/15pMZ74xO9457g902uE06BM= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= From 793dd2fa015eb2a104ac07bc23ba65e3228356f7 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 15 Feb 2024 09:55:31 +0100 Subject: [PATCH 021/172] autopilot: dismiss lost sector alerts when there are no lost sectors --- autopilot/alerts.go | 4 ++-- autopilot/contractor.go | 6 ++++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/autopilot/alerts.go b/autopilot/alerts.go index 7b42991e1..c279075ee 100644 --- a/autopilot/alerts.go +++ b/autopilot/alerts.go @@ -48,8 +48,8 @@ func (ap *Autopilot) RegisterAlert(ctx context.Context, a alerts.Alert) { } } -func (ap *Autopilot) DismissAlert(ctx context.Context, id types.Hash256) { - if err := ap.alerts.DismissAlerts(ctx, id); err != nil { +func (ap *Autopilot) DismissAlert(ctx context.Context, ids ...types.Hash256) { + if err := ap.alerts.DismissAlerts(ctx, ids...); err != nil { ap.logger.Errorf("failed to dismiss alert: %v", err) } } diff --git a/autopilot/contractor.go b/autopilot/contractor.go index adad5d1b7..bc947d500 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -231,11 +231,17 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( } // check if any used hosts have lost data to warn the user + var toDismiss []types.Hash256 for _, h := range hosts { if h.Interactions.LostSectors > 0 { c.ap.RegisterAlert(ctx, newLostSectorsAlert(h.PublicKey, h.Interactions.LostSectors)) + } else { + toDismiss = append(toDismiss, alertIDForHost(alertLostSectorsID, h.PublicKey)) } } + if len(toDismiss) > 0 { + c.ap.DismissAlert(ctx, toDismiss...) + } // fetch candidate hosts candidates, unusableHosts, err := c.candidateHosts(ctx, hosts, usedHosts, hostData, math.SmallestNonzeroFloat64) // avoid 0 score hosts From a52b3a769005e25a6ca2a9a5d16770b4339f9009 Mon Sep 17 00:00:00 2001 From: PJ Date: Thu, 15 Feb 2024 11:01:58 +0100 Subject: [PATCH 022/172] stores: fix metric migrations --- stores/migrations.go | 60 ++------------------------- stores/migrations_metrics.go | 29 ++----------- stores/migrations_utils.go | 79 ++++++++++++++++++++++++++++++++++++ 3 files changed, 85 insertions(+), 83 deletions(-) create mode 100644 stores/migrations_utils.go diff --git a/stores/migrations.go b/stores/migrations.go index e79c6c36b..05bda3f69 100644 --- a/stores/migrations.go +++ b/stores/migrations.go @@ -6,7 +6,6 @@ import ( "strings" "github.com/go-gormigrate/gormigrate/v2" - "go.sia.tech/renterd/api" "go.uber.org/zap" "gorm.io/gorm" ) @@ -16,32 +15,6 @@ var ( errMySQLNoSuperPrivilege = errors.New("You do not have the SUPER privilege and binary logging is enabled") ) -// initSchema is executed only on a clean database. Otherwise the individual -// migrations are executed. -func initSchema(tx *gorm.DB) (err error) { - // Pick the right migrations. - var schema []byte - if isSQLite(tx) { - schema, err = migrations.ReadFile("migrations/sqlite/main/schema.sql") - } else { - schema, err = migrations.ReadFile("migrations/mysql/main/schema.sql") - } - if err != nil { - return - } - - // Run it. - err = tx.Exec(string(schema)).Error - if err != nil { - return fmt.Errorf("failed to init schema: %w", err) - } - - // Add default bucket. - return tx.Create(&dbBucket{ - Name: api.DefaultBucketName, - }).Error -} - func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { migrations := []*gormigrate.Migration{ { @@ -51,13 +24,13 @@ func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { { ID: "00001_object_metadata", Migrate: func(tx *gorm.DB) error { - return performMigration(tx, "00001_object_metadata", logger) + return performMigration(tx, "00001_object_metadata", false, logger) }, }, { ID: "00002_prune_slabs_trigger", Migrate: func(tx *gorm.DB) error { - err := performMigration(tx, "00002_prune_slabs_trigger", logger) + err := performMigration(tx, "00002_prune_slabs_trigger", false, logger) if err != nil && strings.Contains(err.Error(), errMySQLNoSuperPrivilege.Error()) { logger.Warn("migration 00002_prune_slabs_trigger requires the user to have the SUPER privilege to register triggers") } @@ -70,7 +43,7 @@ func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { m := gormigrate.New(db, gormigrate.DefaultOptions, migrations) // Set init function. - m.InitSchema(initSchema) + m.InitSchema(initSchema(db, false, logger)) // Perform migrations. if err := m.Migrate(); err != nil { @@ -78,30 +51,3 @@ func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { } return nil } - -func performMigration(db *gorm.DB, name string, logger *zap.SugaredLogger) error { - logger.Infof("performing migration %s", name) - - // build path - var path string - if isSQLite(db) { - path = fmt.Sprintf("migrations/sqlite/main/migration_" + name + ".sql") - } else { - path = fmt.Sprintf("migrations/mysql/main/migration_" + name + ".sql") - } - - // read migration file - migration, err := migrations.ReadFile(path) - if err != nil { - return fmt.Errorf("migration %s failed: %w", name, err) - } - - // execute it - err = db.Exec(string(migration)).Error - if err != nil { - return fmt.Errorf("migration %s failed: %w", name, err) - } - - logger.Infof("migration %s complete", name) - return nil -} diff --git a/stores/migrations_metrics.go b/stores/migrations_metrics.go index a95d7b914..ddbbe8e4e 100644 --- a/stores/migrations_metrics.go +++ b/stores/migrations_metrics.go @@ -8,30 +8,7 @@ import ( "gorm.io/gorm" ) -// initMetricsSchema is executed only on a clean database. Otherwise the individual -// migrations are executed. -func initMetricsSchema(tx *gorm.DB) error { - // Pick the right migrations. - var schema []byte - var err error - if isSQLite(tx) { - schema, err = migrations.ReadFile("migrations/sqlite/metrics/schema.sql") - } else { - schema, err = migrations.ReadFile("migrations/mysql/metrics/schema.sql") - } - if err != nil { - return err - } - - // Run it. - err = tx.Exec(string(schema)).Error - if err != nil { - return fmt.Errorf("failed to init schema: %w", err) - } - return nil -} - -func performMetricsMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { +func performMetricsMigrations(tx *gorm.DB, logger *zap.SugaredLogger) error { migrations := []*gormigrate.Migration{ { ID: "00001_init", @@ -40,10 +17,10 @@ func performMetricsMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { } // Create migrator. - m := gormigrate.New(db, gormigrate.DefaultOptions, migrations) + m := gormigrate.New(tx, gormigrate.DefaultOptions, migrations) // Set init function. - m.InitSchema(initMetricsSchema) + m.InitSchema(initSchema(tx, true, logger)) // Perform migrations. if err := m.Migrate(); err != nil { diff --git a/stores/migrations_utils.go b/stores/migrations_utils.go new file mode 100644 index 000000000..4832d96a7 --- /dev/null +++ b/stores/migrations_utils.go @@ -0,0 +1,79 @@ +package stores + +import ( + "fmt" + + gormigrate "github.com/go-gormigrate/gormigrate/v2" + "go.sia.tech/renterd/api" + "go.uber.org/zap" + "gorm.io/gorm" +) + +// initSchema is executed only on a clean database. Otherwise the individual +// migrations are executed. +func initSchema(db *gorm.DB, metrics bool, logger *zap.SugaredLogger) gormigrate.InitSchemaFunc { + return func(tx *gorm.DB) error { + if metrics { + logger.Info("initializing metrics schema") + } else { + logger.Info("initializing schema") + } + + // build filename + filename := "schema" + err := execSQLFile(tx, metrics, filename) + if err != nil { + return fmt.Errorf("failed to init schema: %w", err) + } + + // add default bucket. + if !metrics { + if err := tx.Create(&dbBucket{ + Name: api.DefaultBucketName, + }).Error; err != nil { + return fmt.Errorf("failed to create default bucket: %v", err) + } + } + + logger.Info("initialization complete") + return nil + } +} + +func performMigration(db *gorm.DB, name string, metrics bool, logger *zap.SugaredLogger) error { + logger.Infof("performing migration %s", name) + + // build filename + filename := fmt.Sprintf("migration_%s", name) + + // execute migration + err := execSQLFile(db, metrics, filename) + if err != nil { + return fmt.Errorf("migration %s failed: %w", name, err) + } + + logger.Infof("migration %s complete", name) + return nil +} + +func execSQLFile(db *gorm.DB, metrics bool, filename string) error { + // build path + folder := "main" + if metrics { + folder = "metrics" + } + protocol := "mysql" + if isSQLite(db) { + protocol = "sqlite" + } + path := fmt.Sprintf("migrations/%s/%s/%s.sql", protocol, folder, filename) + + // read file + file, err := migrations.ReadFile(path) + if err != nil { + return err + } + + // execute it + return db.Exec(string(file)).Error +} From cc83fc96cb09b2210c7ec8f5846e617d3c05e396 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 15 Feb 2024 14:26:25 +0100 Subject: [PATCH 023/172] stores: improve performance of objects stats --- stores/metadata.go | 63 ++++++++++++++++++++++++++++++---------------- 1 file changed, 41 insertions(+), 22 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 68947ed95..39a053528 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -584,11 +584,18 @@ func (s *SQLStore) ListBuckets(ctx context.Context) ([]api.Bucket, error) { // within a single transaction. func (s *SQLStore) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) (api.ObjectsStatsResponse, error) { // if no bucket is specified, we consider all objects + var bucketID uint + if opts.Bucket != "" { + err := s.db.Model(&dbBucket{}).Select("id").Where("name = ?", opts.Bucket).Take(&bucketID).Error + if err != nil { + return api.ObjectsStatsResponse{}, err + } + } whereBucket := func(table string) clause.Expr { if opts.Bucket == "" { return exprTRUE } - return sqlWhereBucket(table, opts.Bucket) + return gorm.Expr(fmt.Sprintf("%s.db_bucket_id = ?", table), bucketID) } // number of objects @@ -632,37 +639,49 @@ func (s *SQLStore) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) return api.ObjectsStatsResponse{}, err } - fromContractSectors := gorm.Expr("contract_sectors cs") + fromContractSectors := gorm.Expr("contract_sectors cs WHERE 1=1") if opts.Bucket != "" { fromContractSectors = gorm.Expr(` contract_sectors cs INNER JOIN sectors s ON s.id = cs.db_sector_id INNER JOIN slabs sla ON sla.id = s.db_slab_id - INNER JOIN slices sli ON sli.db_slab_id = sla.id - INNER JOIN objects o ON o.id = sli.db_object_id AND (?) - `, whereBucket("o")) + WHERE EXISTS ( + SELECT 1 FROM slices sli + INNER JOIN objects o ON o.id = sli.db_object_id + WHERE sli.db_slab_id = sla.id AND o.db_bucket_id = ? + ) + `, bucketID) } var totalSectors uint64 - batchSize := 500000 - marker := uint64(0) - for offset := 0; ; offset += batchSize { - var result struct { - Sectors uint64 - Marker uint64 - } - res := s.db. - Model(&dbSector{}). - Raw("SELECT COUNT(*) as Sectors, MAX(sectors.db_sector_id) as Marker FROM (SELECT cs.db_sector_id FROM ? WHERE cs.db_sector_id > ? GROUP BY cs.db_sector_id LIMIT ?) sectors", fromContractSectors, marker, batchSize). - Scan(&result) - if err := res.Error; err != nil { - return api.ObjectsStatsResponse{}, err - } else if result.Sectors == 0 { - break // done + var sectorsInfo struct { + MinID uint + MaxID uint + Total uint + } + err = s.db.Model(&dbContractSector{}). + Raw("SELECT MIN(db_sector_id) as MinID, MAX(db_sector_id) as MaxID, COUNT(*) as Total FROM contract_sectors"). + Scan(§orsInfo).Error + if err != nil { + return api.ObjectsStatsResponse{}, err + } + + // compute a good batch size for the ids + batchSize := (sectorsInfo.MaxID - sectorsInfo.MinID) / (sectorsInfo.Total/500000 + 1) + + if sectorsInfo.Total > 0 { + for from, to := sectorsInfo.MinID, sectorsInfo.MinID+batchSize; from <= sectorsInfo.MaxID; from, to = to, to+batchSize { + var nSectors uint64 + err := s.db. + Model(&dbSector{}). + Raw("SELECT COUNT(DISTINCT cs.db_sector_id) FROM ? AND cs.db_sector_id >= ? AND cs.db_sector_id < ?", fromContractSectors, from, to). + Scan(&nSectors).Error + if err != nil { + return api.ObjectsStatsResponse{}, err + } + totalSectors += nSectors } - totalSectors += result.Sectors - marker = result.Marker } var totalUploaded int64 From 159276068278a4d3759ae034fe6fb7675da4ad53 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 15 Feb 2024 14:51:50 +0100 Subject: [PATCH 024/172] stores: use select count with subquery --- stores/metadata.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stores/metadata.go b/stores/metadata.go index 39a053528..cfd417dbb 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -675,7 +675,7 @@ func (s *SQLStore) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) var nSectors uint64 err := s.db. Model(&dbSector{}). - Raw("SELECT COUNT(DISTINCT cs.db_sector_id) FROM ? AND cs.db_sector_id >= ? AND cs.db_sector_id < ?", fromContractSectors, from, to). + Raw("SELECT COUNT(*) FROM (SELECT DISTINCT cs.db_sector_id FROM ? AND cs.db_sector_id >= ? AND cs.db_sector_id < ?)", fromContractSectors, from, to). Scan(&nSectors).Error if err != nil { return api.ObjectsStatsResponse{}, err From 47a91acb19203b6ed889bbbfe77e6a95eda22018 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 15 Feb 2024 15:55:17 +0100 Subject: [PATCH 025/172] stores: use sectors table and don't filter sectors stats by bucket --- stores/metadata.go | 49 +++++------------------------------------ stores/metadata_test.go | 8 +++---- 2 files changed, 10 insertions(+), 47 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index cfd417dbb..c2e07ed00 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -639,54 +639,17 @@ func (s *SQLStore) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) return api.ObjectsStatsResponse{}, err } - fromContractSectors := gorm.Expr("contract_sectors cs WHERE 1=1") - if opts.Bucket != "" { - fromContractSectors = gorm.Expr(` - contract_sectors cs - INNER JOIN sectors s ON s.id = cs.db_sector_id - INNER JOIN slabs sla ON sla.id = s.db_slab_id - WHERE EXISTS ( - SELECT 1 FROM slices sli - INNER JOIN objects o ON o.id = sli.db_object_id - WHERE sli.db_slab_id = sla.id AND o.db_bucket_id = ? - ) - `, bucketID) - } - - var totalSectors uint64 - - var sectorsInfo struct { - MinID uint - MaxID uint - Total uint - } - err = s.db.Model(&dbContractSector{}). - Raw("SELECT MIN(db_sector_id) as MinID, MAX(db_sector_id) as MaxID, COUNT(*) as Total FROM contract_sectors"). - Scan(§orsInfo).Error + var totalSectors int64 + err = s.db. + Model(&dbSector{}). + Count(&totalSectors).Error if err != nil { return api.ObjectsStatsResponse{}, err } - // compute a good batch size for the ids - batchSize := (sectorsInfo.MaxID - sectorsInfo.MinID) / (sectorsInfo.Total/500000 + 1) - - if sectorsInfo.Total > 0 { - for from, to := sectorsInfo.MinID, sectorsInfo.MinID+batchSize; from <= sectorsInfo.MaxID; from, to = to, to+batchSize { - var nSectors uint64 - err := s.db. - Model(&dbSector{}). - Raw("SELECT COUNT(*) FROM (SELECT DISTINCT cs.db_sector_id FROM ? AND cs.db_sector_id >= ? AND cs.db_sector_id < ?)", fromContractSectors, from, to). - Scan(&nSectors).Error - if err != nil { - return api.ObjectsStatsResponse{}, err - } - totalSectors += nSectors - } - } - var totalUploaded int64 err = s.db. - Table("?", fromContractSectors). + Model(&dbContractSector{}). Count(&totalUploaded). Error if err != nil { @@ -699,7 +662,7 @@ func (s *SQLStore) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) NumUnfinishedObjects: unfinishedObjects, TotalUnfinishedObjectsSize: totalUnfinishedObjectsSize, TotalObjectsSize: objInfo.TotalObjectsSize, - TotalSectorsSize: totalSectors * rhpv2.SectorSize, + TotalSectorsSize: uint64(totalSectors) * rhpv2.SectorSize, TotalUploadedSize: uint64(totalUploaded) * rhpv2.SectorSize, }, nil } diff --git a/stores/metadata_test.go b/stores/metadata_test.go index a05c0be17..2d550c1d6 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -2524,10 +2524,10 @@ func TestObjectsStats(t *testing.T) { t.Fatal(err) } else if info.TotalObjectsSize != 0 { t.Fatal("wrong size", info.TotalObjectsSize) - } else if info.TotalSectorsSize != 0 { - t.Fatal("wrong size", info.TotalSectorsSize) - } else if info.TotalUploadedSize != 0 { - t.Fatal("wrong size", info.TotalUploadedSize) + } else if info.TotalSectorsSize != sectorsSize { + t.Fatal("wrong size", info.TotalSectorsSize, sectorsSize) + } else if info.TotalUploadedSize != sectorsSize*2 { + t.Fatal("wrong size", info.TotalUploadedSize, sectorsSize*2) } else if info.NumObjects != 0 { t.Fatal("wrong number of objects", info.NumObjects) } From 3e463cfb4f7103d976ca2d84bb4baeee9438d067 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 15 Feb 2024 16:04:48 +0100 Subject: [PATCH 026/172] stores: use sum over total shards --- stores/metadata.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index c2e07ed00..aae62073d 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -641,8 +641,9 @@ func (s *SQLStore) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) var totalSectors int64 err = s.db. - Model(&dbSector{}). - Count(&totalSectors).Error + Model(&dbSlab{}). + Select("COALESCE(SUM(total_shards), 0)"). + Scan(&totalSectors).Error if err != nil { return api.ObjectsStatsResponse{}, err } From 3f6dad71c9ac5add056534d4d8d6da0f18b68837 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 15 Feb 2024 17:06:29 +0100 Subject: [PATCH 027/172] stores: use contract size for total uploaded size --- stores/metadata.go | 24 ++++++++++++++++++++---- stores/metadata_test.go | 19 +++++++++++-------- 2 files changed, 31 insertions(+), 12 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index aae62073d..3f13097ad 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -639,9 +639,18 @@ func (s *SQLStore) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) return api.ObjectsStatsResponse{}, err } + fromSlabs := gorm.Expr("slabs sla") + if opts.Bucket != "" { + fromSlabs = gorm.Expr(` + slabs sla + INNER JOIN slices sli ON sli.db_slab_id = sla.id + INNER JOIN objects o ON o.id = sli.db_object_id AND (?) + `, whereBucket("o")) + } + var totalSectors int64 err = s.db. - Model(&dbSlab{}). + Table("?", fromSlabs). Select("COALESCE(SUM(total_shards), 0)"). Scan(&totalSectors).Error if err != nil { @@ -650,13 +659,20 @@ func (s *SQLStore) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) var totalUploaded int64 err = s.db. - Model(&dbContractSector{}). - Count(&totalUploaded). + Model(&dbContract{}). + Select("COALESCE(SUM(size), 0)"). + Scan(&totalUploaded). Error if err != nil { return api.ObjectsStatsResponse{}, err } + var contracts []dbContract + err = s.db.Find(&contracts).Error + if err != nil { + return api.ObjectsStatsResponse{}, err + } + return api.ObjectsStatsResponse{ MinHealth: objInfo.MinHealth, NumObjects: objInfo.NumObjects, @@ -664,7 +680,7 @@ func (s *SQLStore) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) TotalUnfinishedObjectsSize: totalUnfinishedObjectsSize, TotalObjectsSize: objInfo.TotalObjectsSize, TotalSectorsSize: uint64(totalSectors) * rhpv2.SectorSize, - TotalUploadedSize: uint64(totalUploaded) * rhpv2.SectorSize, + TotalUploadedSize: uint64(totalUploaded), }, nil } diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 2d550c1d6..da9ecbbef 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -2446,6 +2446,7 @@ func TestObjectsStats(t *testing.T) { // Create a few objects of different size. var objectsSize uint64 var sectorsSize uint64 + var totalUploadedSize uint64 for i := 0; i < 2; i++ { obj := newTestObject(1) objectsSize += uint64(obj.TotalSize()) @@ -2458,10 +2459,11 @@ func TestObjectsStats(t *testing.T) { t.Fatal(err) } for _, fcid := range fcids { - _, err := ss.addTestContract(fcid, hpk) + c, err := ss.addTestContract(fcid, hpk) if err != nil { t.Fatal(err) } + totalUploadedSize += c.Size } } } @@ -2482,10 +2484,11 @@ func TestObjectsStats(t *testing.T) { } var newContractID types.FileContractID frand.Read(newContractID[:]) - _, err = ss.addTestContract(newContractID, types.PublicKey{}) + c, err := ss.addTestContract(newContractID, types.PublicKey{}) if err != nil { t.Fatal(err) } + totalUploadedSize += c.Size newContract, err := ss.contract(context.Background(), fileContractID(newContractID)) if err != nil { t.Fatal(err) @@ -2510,8 +2513,8 @@ func TestObjectsStats(t *testing.T) { t.Fatal("wrong size", info.TotalObjectsSize, objectsSize) } else if info.TotalSectorsSize != sectorsSize { t.Fatal("wrong size", info.TotalSectorsSize, sectorsSize) - } else if info.TotalUploadedSize != sectorsSize*2 { - t.Fatal("wrong size", info.TotalUploadedSize, sectorsSize*2) + } else if info.TotalUploadedSize != totalUploadedSize { + t.Fatal("wrong size", info.TotalUploadedSize, totalUploadedSize) } else if info.NumObjects != 2 { t.Fatal("wrong number of objects", info.NumObjects, 2) } @@ -2524,10 +2527,10 @@ func TestObjectsStats(t *testing.T) { t.Fatal(err) } else if info.TotalObjectsSize != 0 { t.Fatal("wrong size", info.TotalObjectsSize) - } else if info.TotalSectorsSize != sectorsSize { - t.Fatal("wrong size", info.TotalSectorsSize, sectorsSize) - } else if info.TotalUploadedSize != sectorsSize*2 { - t.Fatal("wrong size", info.TotalUploadedSize, sectorsSize*2) + } else if info.TotalSectorsSize != 0 { + t.Fatal("wrong size", info.TotalSectorsSize, 0) + } else if info.TotalUploadedSize != totalUploadedSize { + t.Fatal("wrong size", info.TotalUploadedSize, totalUploadedSize) } else if info.NumObjects != 0 { t.Fatal("wrong number of objects", info.NumObjects) } From 2c627d950532477b22d9be0668f76a29c240b49c Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 15 Feb 2024 17:17:36 +0100 Subject: [PATCH 028/172] testing: fix TestUploadDownloadExtended --- internal/testing/cluster_test.go | 49 +++++++++++++++++--------------- 1 file changed, 26 insertions(+), 23 deletions(-) diff --git a/internal/testing/cluster_test.go b/internal/testing/cluster_test.go index 13903115d..4262e493b 100644 --- a/internal/testing/cluster_test.go +++ b/internal/testing/cluster_test.go @@ -697,30 +697,33 @@ func TestUploadDownloadExtended(t *testing.T) { } // check objects stats. - for _, opts := range []api.ObjectsStatsOpts{ - {}, // any bucket - {Bucket: api.DefaultBucketName}, // specific bucket - } { - info, err := cluster.Bus.ObjectsStats(context.Background(), opts) - tt.OK(err) - objectsSize := uint64(len(file1) + len(file2) + len(small) + len(large)) - if info.TotalObjectsSize != objectsSize { - t.Error("wrong size", info.TotalObjectsSize, objectsSize) - } - sectorsSize := 15 * rhpv2.SectorSize - if info.TotalSectorsSize != uint64(sectorsSize) { - t.Error("wrong size", info.TotalSectorsSize, sectorsSize) - } - if info.TotalUploadedSize != uint64(sectorsSize) { - t.Error("wrong size", info.TotalUploadedSize, sectorsSize) - } - if info.NumObjects != 4 { - t.Error("wrong number of objects", info.NumObjects, 4) - } - if info.MinHealth != 1 { - t.Errorf("expected minHealth of 1, got %v", info.MinHealth) + tt.Retry(100, 100*time.Millisecond, func() error { + for _, opts := range []api.ObjectsStatsOpts{ + {}, // any bucket + {Bucket: api.DefaultBucketName}, // specific bucket + } { + info, err := cluster.Bus.ObjectsStats(context.Background(), opts) + tt.OK(err) + objectsSize := uint64(len(file1) + len(file2) + len(small) + len(large)) + if info.TotalObjectsSize != objectsSize { + return fmt.Errorf("wrong size %v %v", info.TotalObjectsSize, objectsSize) + } + sectorsSize := 15 * rhpv2.SectorSize + if info.TotalSectorsSize != uint64(sectorsSize) { + return fmt.Errorf("wrong size %v %v", info.TotalSectorsSize, sectorsSize) + } + if info.TotalUploadedSize != uint64(sectorsSize) { + return fmt.Errorf("wrong size %v %v", info.TotalUploadedSize, sectorsSize) + } + if info.NumObjects != 4 { + return fmt.Errorf("wrong number of objects %v %v", info.NumObjects, 4) + } + if info.MinHealth != 1 { + return fmt.Errorf("expected minHealth of 1, got %v", info.MinHealth) + } } - } + return nil + }) // download the data for _, data := range [][]byte{small, large} { From c3802d8466d2c7abbf418349a43a1bee41d79239 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 16 Feb 2024 09:03:24 +0100 Subject: [PATCH 029/172] stores: fix TestSlabBufferStats --- stores/metadata.go | 1 + 1 file changed, 1 insertion(+) diff --git a/stores/metadata.go b/stores/metadata.go index 3f13097ad..4a2ab8657 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -652,6 +652,7 @@ func (s *SQLStore) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) err = s.db. Table("?", fromSlabs). Select("COALESCE(SUM(total_shards), 0)"). + Where("db_buffered_slab_id IS NULL"). Scan(&totalSectors).Error if err != nil { return api.ObjectsStatsResponse{}, err From 6ed8e754ba2706d419fe2d80f01a6802415b35fd Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 16 Feb 2024 10:20:23 +0100 Subject: [PATCH 030/172] stores: use WHERE EXISTS --- stores/metadata.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 4a2ab8657..d3622502f 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -642,9 +642,13 @@ func (s *SQLStore) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) fromSlabs := gorm.Expr("slabs sla") if opts.Bucket != "" { fromSlabs = gorm.Expr(` + (SELECT * FROM slabs sla - INNER JOIN slices sli ON sli.db_slab_id = sla.id - INNER JOIN objects o ON o.id = sli.db_object_id AND (?) + WHERE EXISTS ( + SELECT 1 FROM slices sli + INNER JOIN objects o ON o.id = sli.db_object_id AND ? + WHERE sli.db_slab_id = sla.id + )) sla `, whereBucket("o")) } From 5f8233fb17aa431aafb987654fe879db2f9a35c8 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 16 Feb 2024 10:59:46 +0100 Subject: [PATCH 031/172] stores: remove subquery --- stores/metadata.go | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index d3622502f..2a879d7ae 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -639,25 +639,22 @@ func (s *SQLStore) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) return api.ObjectsStatsResponse{}, err } - fromSlabs := gorm.Expr("slabs sla") + var totalSectors int64 + totalSectorsQuery := s.db. + Table("slabs sla"). + Select("COALESCE(SUM(total_shards), 0)"). + Where("db_buffered_slab_id IS NULL") + if opts.Bucket != "" { - fromSlabs = gorm.Expr(` - (SELECT * FROM - slabs sla - WHERE EXISTS ( + totalSectorsQuery = totalSectorsQuery.Where(` + EXISTS ( SELECT 1 FROM slices sli - INNER JOIN objects o ON o.id = sli.db_object_id AND ? + INNER JOIN objects o ON o.id = sli.db_object_id AND o.db_bucket_id = ? WHERE sli.db_slab_id = sla.id - )) sla - `, whereBucket("o")) + ) + `, bucketID) } - - var totalSectors int64 - err = s.db. - Table("?", fromSlabs). - Select("COALESCE(SUM(total_shards), 0)"). - Where("db_buffered_slab_id IS NULL"). - Scan(&totalSectors).Error + err = totalSectorsQuery.Scan(&totalSectors).Error if err != nil { return api.ObjectsStatsResponse{}, err } From f68e6ee779a3aa6a10e1a46475679e227ab6c785 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 16 Feb 2024 11:16:37 +0100 Subject: [PATCH 032/172] stores: remove contract query --- stores/metadata.go | 47 +++++++++++++++++++--------------------------- 1 file changed, 19 insertions(+), 28 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 2a879d7ae..d257bf681 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -583,7 +583,7 @@ func (s *SQLStore) ListBuckets(ctx context.Context) ([]api.Bucket, error) { // reduce locking and make sure all results are consistent, everything is done // within a single transaction. func (s *SQLStore) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) (api.ObjectsStatsResponse, error) { - // if no bucket is specified, we consider all objects + // fetch bucket id if a bucket was specified var bucketID uint if opts.Bucket != "" { err := s.db.Model(&dbBucket{}).Select("id").Where("name = ?", opts.Bucket).Take(&bucketID).Error @@ -591,12 +591,6 @@ func (s *SQLStore) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) return api.ObjectsStatsResponse{}, err } } - whereBucket := func(table string) clause.Expr { - if opts.Bucket == "" { - return exprTRUE - } - return gorm.Expr(fmt.Sprintf("%s.db_bucket_id = ?", table), bucketID) - } // number of objects var objInfo struct { @@ -604,37 +598,40 @@ func (s *SQLStore) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) MinHealth float64 TotalObjectsSize uint64 } - err := s.db. + objInfoQuery := s.db. Model(&dbObject{}). - Select("COUNT(*) AS NumObjects, COALESCE(MIN(health), 1) as MinHealth, SUM(size) AS TotalObjectsSize"). - Where(whereBucket(dbObject{}.TableName())). - Scan(&objInfo). - Error + Select("COUNT(*) AS NumObjects, COALESCE(MIN(health), 1) as MinHealth, SUM(size) AS TotalObjectsSize") + if opts.Bucket != "" { + objInfoQuery = objInfoQuery.Where("db_bucket_id", bucketID) + } + err := objInfoQuery.Scan(&objInfo).Error if err != nil { return api.ObjectsStatsResponse{}, err } // number of unfinished objects var unfinishedObjects uint64 - err = s.db. + unfinishedObjectsQuery := s.db. Model(&dbMultipartUpload{}). - Select("COUNT(*)"). - Where(whereBucket(dbMultipartUpload{}.TableName())). - Scan(&unfinishedObjects). - Error + Select("COUNT(*)") + if opts.Bucket != "" { + unfinishedObjectsQuery = unfinishedObjectsQuery.Where("db_bucket_id", bucketID) + } + err = unfinishedObjectsQuery.Scan(&unfinishedObjects).Error if err != nil { return api.ObjectsStatsResponse{}, err } // size of unfinished objects var totalUnfinishedObjectsSize uint64 - err = s.db. + totalUnfinishedObjectsSizeQuery := s.db. Model(&dbMultipartPart{}). Joins("INNER JOIN multipart_uploads mu ON multipart_parts.db_multipart_upload_id = mu.id"). - Select("COALESCE(SUM(size), 0)"). - Where(whereBucket("mu")). - Scan(&totalUnfinishedObjectsSize). - Error + Select("COALESCE(SUM(size), 0)") + if opts.Bucket != "" { + totalUnfinishedObjectsSizeQuery = totalUnfinishedObjectsSizeQuery.Where("db_bucket_id", bucketID) + } + err = totalUnfinishedObjectsSizeQuery.Scan(&totalUnfinishedObjectsSize).Error if err != nil { return api.ObjectsStatsResponse{}, err } @@ -669,12 +666,6 @@ func (s *SQLStore) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) return api.ObjectsStatsResponse{}, err } - var contracts []dbContract - err = s.db.Find(&contracts).Error - if err != nil { - return api.ObjectsStatsResponse{}, err - } - return api.ObjectsStatsResponse{ MinHealth: objInfo.MinHealth, NumObjects: objInfo.NumObjects, From fe2097a38230bd001326125f8abf89fbdc6ec5cc Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 16 Feb 2024 13:01:55 +0100 Subject: [PATCH 033/172] bus: merge alert dismiss routes --- bus/bus.go | 18 ++++++++++-------- bus/client/alerts.go | 18 +++++++++++++----- 2 files changed, 23 insertions(+), 13 deletions(-) diff --git a/bus/bus.go b/bus/bus.go index b8c21605c..2680c4810 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -245,10 +245,9 @@ func (b *bus) Handler() http.Handler { "POST /account/:id/requiressync": b.accountsRequiresSyncHandlerPOST, "POST /account/:id/resetdrift": b.accountsResetDriftHandlerPOST, - "GET /alerts": b.handleGETAlerts, - "POST /alerts/dismiss": b.handlePOSTAlertsDismiss, - "POST /alerts/dismissall": b.handlePOSTAlertsDismissAll, - "POST /alerts/register": b.handlePOSTAlertsRegister, + "GET /alerts": b.handleGETAlerts, + "POST /alerts/dismiss": b.handlePOSTAlertsDismiss, + "POST /alerts/register": b.handlePOSTAlertsRegister, "GET /autopilots": b.autopilotsListHandlerGET, "GET /autopilot/:id": b.autopilotsHandlerGET, @@ -1730,6 +1729,13 @@ func (b *bus) handleGETAlerts(jc jape.Context) { } func (b *bus) handlePOSTAlertsDismiss(jc jape.Context) { + var all bool + if jc.DecodeForm("all", &all) != nil { + return + } else if all { + jc.Check("failed to dismiss all alerts", b.alertMgr.DismissAllAlerts(jc.Request.Context())) + return + } var ids []types.Hash256 if jc.Decode(&ids) != nil { return @@ -1737,10 +1743,6 @@ func (b *bus) handlePOSTAlertsDismiss(jc jape.Context) { jc.Check("failed to dismiss alerts", b.alertMgr.DismissAlerts(jc.Request.Context(), ids...)) } -func (b *bus) handlePOSTAlertsDismissAll(jc jape.Context) { - jc.Check("failed to dismiss alerts", b.alertMgr.DismissAllAlerts(jc.Request.Context())) -} - func (b *bus) handlePOSTAlertsRegister(jc jape.Context) { var alert alerts.Alert if jc.Decode(&alert) != nil { diff --git a/bus/client/alerts.go b/bus/client/alerts.go index 6151db598..bff3c13a5 100644 --- a/bus/client/alerts.go +++ b/bus/client/alerts.go @@ -22,14 +22,22 @@ func (c *Client) Alerts(opts alerts.AlertsOpts) (alerts []alerts.Alert, err erro return } -// DismissAllAlerts dimisses all alerts. +// DismissAlerts dimisses the alerts with the given IDs. +func (c *Client) DismissAlerts(ctx context.Context, ids ...types.Hash256) error { + return c.dismissAlerts(ctx, false, ids...) +} + +// DismissAllAlerts dimisses all registered alerts. func (c *Client) DismissAllAlerts(ctx context.Context) error { - return c.c.WithContext(ctx).POST("/alerts/dismissall", nil, nil) + return c.dismissAlerts(ctx, true) } -// DismissAlerts dimisses the alerts with the given IDs. -func (c *Client) DismissAlerts(ctx context.Context, ids ...types.Hash256) error { - return c.c.WithContext(ctx).POST("/alerts/dismiss", ids, nil) +func (c *Client) dismissAlerts(ctx context.Context, all bool, ids ...types.Hash256) error { + values := url.Values{} + if all { + values.Set("all", fmt.Sprint(true)) + } + return c.c.WithContext(ctx).POST("/alerts/dismiss?"+values.Encode(), ids, nil) } // RegisterAlert registers the given alert. From a13313bacbd2410782f982a13b06fa774ad2cf66 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 16 Feb 2024 13:49:24 +0100 Subject: [PATCH 034/172] bus: add onlymetadata flag to objects endpoint --- api/object.go | 20 +++++---- bus/bus.go | 16 +++++-- s3/backend.go | 5 ++- stores/metadata.go | 110 ++++++++++++++++++++++++++++++++------------- 4 files changed, 108 insertions(+), 43 deletions(-) diff --git a/api/object.go b/api/object.go index e4fd4b465..09f4b04ab 100644 --- a/api/object.go +++ b/api/object.go @@ -54,7 +54,7 @@ type ( Object struct { Metadata ObjectUserMetadata `json:"metadata,omitempty"` ObjectMetadata - object.Object + object.Object `json:"omitempty"` } // ObjectMetadata contains various metadata about an object. @@ -212,13 +212,14 @@ type ( } GetObjectOptions struct { - Prefix string - Offset int - Limit int - IgnoreDelim bool - Marker string - SortBy string - SortDir string + Prefix string + Offset int + Limit int + IgnoreDelim bool + Marker string + OnlyMetadata bool + SortBy string + SortDir string } ListObjectOptions struct { @@ -324,6 +325,9 @@ func (opts GetObjectOptions) Apply(values url.Values) { if opts.Marker != "" { values.Set("marker", opts.Marker) } + if opts.OnlyMetadata { + values.Set("onlymetadata", "true") + } if opts.SortBy != "" { values.Set("sortBy", opts.SortBy) } diff --git a/bus/bus.go b/bus/bus.go index b8c21605c..66620a684 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -137,6 +137,7 @@ type ( CopyObject(ctx context.Context, srcBucket, dstBucket, srcPath, dstPath, mimeType string, metadata api.ObjectUserMetadata) (api.ObjectMetadata, error) ListObjects(ctx context.Context, bucketName, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) Object(ctx context.Context, bucketName, path string) (api.Object, error) + ObjectMetadata(ctx context.Context, bucketName, path string) (api.Object, error) ObjectEntries(ctx context.Context, bucketName, path, prefix, sortBy, sortDir, marker string, offset, limit int) ([]api.ObjectMetadata, bool, error) ObjectsBySlabKey(ctx context.Context, bucketName string, slabKey object.EncryptionKey) ([]api.ObjectMetadata, error) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) (api.ObjectsStatsResponse, error) @@ -1192,13 +1193,22 @@ func (b *bus) objectsHandlerGET(jc jape.Context) { if jc.DecodeForm("bucket", &bucket) != nil { return } + var onlymetadata bool + if jc.DecodeForm("onlymetadata", &onlymetadata) != nil { + return + } - o, err := b.ms.Object(jc.Request.Context(), bucket, path) + var o api.Object + var err error + if onlymetadata { + o, err = b.ms.ObjectMetadata(jc.Request.Context(), bucket, path) + } else { + o, err = b.ms.Object(jc.Request.Context(), bucket, path) + } if errors.Is(err, api.ErrObjectNotFound) { jc.Error(err, http.StatusNotFound) return - } - if jc.Check("couldn't load object", err) != nil { + } else if jc.Check("couldn't load object", err) != nil { return } jc.Encode(api.ObjectsResponse{Object: &o}) diff --git a/s3/backend.go b/s3/backend.go index a481da727..cb53be625 100644 --- a/s3/backend.go +++ b/s3/backend.go @@ -287,7 +287,10 @@ func (s *s3) GetObject(ctx context.Context, bucketName, objectName string, range // HeadObject should return a NotFound() error if the object does not // exist. func (s *s3) HeadObject(ctx context.Context, bucketName, objectName string) (*gofakes3.Object, error) { - res, err := s.b.Object(ctx, bucketName, objectName, api.GetObjectOptions{IgnoreDelim: true}) + res, err := s.b.Object(ctx, bucketName, objectName, api.GetObjectOptions{ + IgnoreDelim: true, + OnlyMetadata: true, + }) if err != nil && strings.Contains(err.Error(), api.ErrObjectNotFound.Error()) { return nil, gofakes3.KeyNotFound(objectName) } else if err != nil { diff --git a/stores/metadata.go b/stores/metadata.go index 68947ed95..15a4690ad 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -410,14 +410,14 @@ func (s dbSlab) convert() (slab object.Slab, err error) { } func (raw rawObjectMetadata) convert() api.ObjectMetadata { - return api.ObjectMetadata{ - ETag: raw.ETag, - Health: raw.Health, - MimeType: raw.MimeType, - ModTime: api.TimeRFC3339(time.Time(raw.ModTime).UTC()), - Name: raw.Name, - Size: raw.Size, - } + return newObjectMetadata( + raw.Name, + raw.ETag, + raw.MimeType, + raw.Health, + time.Time(raw.ModTime), + raw.Size, + ) } func (raw rawObject) toSlabSlice() (slice object.SlabSlice, _ error) { @@ -1556,13 +1556,14 @@ func (s *SQLStore) CopyObject(ctx context.Context, srcBucket, dstBucket, srcPath // No copying is happening. We just update the metadata on the src // object. srcObj.MimeType = mimeType - om = api.ObjectMetadata{ - Health: srcObj.Health, - MimeType: srcObj.MimeType, - ModTime: api.TimeRFC3339(srcObj.CreatedAt.UTC()), - Name: srcObj.ObjectID, - Size: srcObj.Size, - } + om = newObjectMetadata( + srcObj.ObjectID, + srcObj.Etag, + srcObj.MimeType, + srcObj.Health, + srcObj.CreatedAt, + srcObj.Size, + ) if err := s.updateUserMetadata(tx, srcObj.ID, metadata); err != nil { return fmt.Errorf("failed to update user metadata: %w", err) } @@ -1610,14 +1611,14 @@ func (s *SQLStore) CopyObject(ctx context.Context, srcBucket, dstBucket, srcPath return fmt.Errorf("failed to create object metadata: %w", err) } - om = api.ObjectMetadata{ - MimeType: dstObj.MimeType, - ETag: dstObj.Etag, - Health: srcObj.Health, - ModTime: api.TimeRFC3339(dstObj.CreatedAt.UTC()), - Name: dstObj.ObjectID, - Size: dstObj.Size, - } + om = newObjectMetadata( + dstObj.ObjectID, + dstObj.Etag, + dstObj.MimeType, + dstObj.Health, + dstObj.CreatedAt, + dstObj.Size, + ) return nil }) return @@ -2320,14 +2321,14 @@ func (s *SQLStore) objectHydrate(ctx context.Context, tx *gorm.DB, bucket, path // return object return api.Object{ Metadata: metadata, - ObjectMetadata: api.ObjectMetadata{ - ETag: obj[0].ObjectETag, - Health: obj[0].ObjectHealth, - MimeType: obj[0].ObjectMimeType, - ModTime: api.TimeRFC3339(obj[0].ObjectModTime.UTC()), - Name: obj[0].ObjectName, - Size: obj[0].ObjectSize, - }, + ObjectMetadata: newObjectMetadata( + obj[0].ObjectName, + obj[0].ObjectETag, + obj[0].ObjectMimeType, + obj[0].ObjectHealth, + obj[0].ObjectModTime, + obj[0].ObjectSize, + ), Object: object.Object{ Key: key, Slabs: slabs, @@ -2335,6 +2336,42 @@ func (s *SQLStore) objectHydrate(ctx context.Context, tx *gorm.DB, bucket, path }, nil } +// ObjectMetadata returns an object's metadata +func (s *SQLStore) ObjectMetadata(ctx context.Context, bucket, path string) (api.Object, error) { + var resp api.Object + err := s.db.Transaction(func(tx *gorm.DB) error { + var obj dbObject + err := tx.Model(&dbObject{}). + Joins("INNER JOIN buckets b ON objects.db_bucket_id = b.id"). + Where("b.name", bucket). + Where("object_id", path). + Take(&obj). + Error + if errors.Is(err, gorm.ErrRecordNotFound) { + return api.ErrObjectNotFound + } else if err != nil { + return err + } + oum, err := s.objectMetadata(ctx, tx, bucket, path) + if err != nil { + return err + } + resp = api.Object{ + ObjectMetadata: newObjectMetadata( + obj.ObjectID, + obj.Etag, + obj.MimeType, + obj.Health, + obj.CreatedAt, + obj.Size, + ), + Metadata: oum, + } + return nil + }) + return resp, err +} + func (s *SQLStore) objectMetadata(ctx context.Context, tx *gorm.DB, bucket, path string) (api.ObjectUserMetadata, error) { var rows []dbObjectUserMetadata err := tx. @@ -2355,6 +2392,17 @@ func (s *SQLStore) objectMetadata(ctx context.Context, tx *gorm.DB, bucket, path return metadata, nil } +func newObjectMetadata(name, etag, mimeType string, health float64, modTime time.Time, size int64) api.ObjectMetadata { + return api.ObjectMetadata{ + ETag: etag, + Health: health, + ModTime: api.TimeRFC3339(modTime.UTC()), + Name: name, + Size: size, + MimeType: mimeType, + } +} + func (s *SQLStore) objectRaw(ctx context.Context, txn *gorm.DB, bucket string, path string) (rows rawObject, err error) { // NOTE: we LEFT JOIN here because empty objects are valid and need to be // included in the result set, when we convert the rawObject before From 010037927edaae14420a1bedf1972f5ecd5617d4 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 16 Feb 2024 14:19:20 +0100 Subject: [PATCH 035/172] testing: fix TestS3Basic --- object/object.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/object/object.go b/object/object.go index 49375f3b4..e3c2fe3e7 100644 --- a/object/object.go +++ b/object/object.go @@ -43,6 +43,9 @@ func (k *EncryptionKey) UnmarshalBinary(b []byte) error { // String implements fmt.Stringer. func (k EncryptionKey) String() string { + if k.entropy == nil { + return "" + } return "key:" + hex.EncodeToString(k.entropy[:]) } @@ -53,6 +56,9 @@ func (k EncryptionKey) MarshalText() ([]byte, error) { // UnmarshalText implements the encoding.TextUnmarshaler interface. func (k *EncryptionKey) UnmarshalText(b []byte) error { + if len(b) == 0 { + return nil + } k.entropy = new([32]byte) if n, err := hex.Decode(k.entropy[:], []byte(bytes.TrimPrefix(b, []byte("key:")))); err != nil { return err From 781290d8546147fcf18d1ea0f64f20742749409c Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 16 Feb 2024 14:32:40 +0100 Subject: [PATCH 036/172] worker: fix ndf in TestDownloaderStopped --- worker/downloader_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/worker/downloader_test.go b/worker/downloader_test.go index 357fc2ee8..c1d860c24 100644 --- a/worker/downloader_test.go +++ b/worker/downloader_test.go @@ -16,7 +16,7 @@ func TestDownloaderStopped(t *testing.T) { req := sectorDownloadReq{ resps: §orResponses{ - c: make(chan struct{}), + c: make(chan struct{}, 1), }, } dl.enqueue(&req) From f0ea509bea36674c3cf6d007dca47a51b7fd78f4 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 16 Feb 2024 14:33:10 +0100 Subject: [PATCH 037/172] worker: fix ndf in TestDownloaderStopped --- worker/downloader_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/worker/downloader_test.go b/worker/downloader_test.go index 357fc2ee8..c1d860c24 100644 --- a/worker/downloader_test.go +++ b/worker/downloader_test.go @@ -16,7 +16,7 @@ func TestDownloaderStopped(t *testing.T) { req := sectorDownloadReq{ resps: §orResponses{ - c: make(chan struct{}), + c: make(chan struct{}, 1), }, } dl.enqueue(&req) From 814b8e571d6808aa573e33821e7a1fe4d27c77b6 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 16 Feb 2024 15:09:38 +0100 Subject: [PATCH 038/172] bus: add generateKey option to create multipart endpoint --- api/multipart.go | 11 +++++++--- bus/bus.go | 4 +++- bus/client/multipart-upload.go | 11 +++++----- internal/testing/cluster_test.go | 2 +- worker/worker.go | 35 ++++++++++++++++---------------- 5 files changed, 35 insertions(+), 28 deletions(-) diff --git a/api/multipart.go b/api/multipart.go index 955b78849..e786a7c25 100644 --- a/api/multipart.go +++ b/api/multipart.go @@ -46,9 +46,10 @@ type ( } CreateMultipartOptions struct { - Key object.EncryptionKey - MimeType string - Metadata ObjectUserMetadata + GenerateKey bool + Key object.EncryptionKey + MimeType string + Metadata ObjectUserMetadata } ) @@ -86,6 +87,10 @@ type ( Key object.EncryptionKey `json:"key"` MimeType string `json:"mimeType"` Metadata ObjectUserMetadata `json:"metadata"` + + // TODO: The next major version change should invert this to create a + // key by default + GenerateKey bool `json:"generateKey"` } MultipartCreateResponse struct { diff --git a/bus/bus.go b/bus/bus.go index b8c21605c..58739e11a 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -2178,7 +2178,9 @@ func (b *bus) multipartHandlerCreatePOST(jc jape.Context) { } key := req.Key - if key == (object.EncryptionKey{}) { + if req.GenerateKey { + key = object.GenerateEncryptionKey() + } else if key == (object.EncryptionKey{}) { key = object.NoOpKey } diff --git a/bus/client/multipart-upload.go b/bus/client/multipart-upload.go index ffa4d8dc8..281019487 100644 --- a/bus/client/multipart-upload.go +++ b/bus/client/multipart-upload.go @@ -46,11 +46,12 @@ func (c *Client) CompleteMultipartUpload(ctx context.Context, bucket, path, uplo // CreateMultipartUpload creates a new multipart upload. func (c *Client) CreateMultipartUpload(ctx context.Context, bucket, path string, opts api.CreateMultipartOptions) (resp api.MultipartCreateResponse, err error) { err = c.c.WithContext(ctx).POST("/multipart/create", api.MultipartCreateRequest{ - Bucket: bucket, - Path: path, - Key: opts.Key, - MimeType: opts.MimeType, - Metadata: opts.Metadata, + Bucket: bucket, + GenerateKey: opts.GenerateKey, + Path: path, + Key: opts.Key, + MimeType: opts.MimeType, + Metadata: opts.Metadata, }, &resp) return } diff --git a/internal/testing/cluster_test.go b/internal/testing/cluster_test.go index d290faf27..1b57e1e6c 100644 --- a/internal/testing/cluster_test.go +++ b/internal/testing/cluster_test.go @@ -1996,7 +1996,7 @@ func TestMultipartUploads(t *testing.T) { // Start a new multipart upload. objPath := "/foo" - mpr, err := b.CreateMultipartUpload(context.Background(), api.DefaultBucketName, objPath, api.CreateMultipartOptions{Key: object.GenerateEncryptionKey()}) + mpr, err := b.CreateMultipartUpload(context.Background(), api.DefaultBucketName, objPath, api.CreateMultipartOptions{GenerateKey: true}) tt.OK(err) if mpr.UploadID == "" { t.Fatal("expected non-empty upload ID") diff --git a/worker/worker.go b/worker/worker.go index 8a2a9b1f3..2a2191d2d 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -1126,15 +1126,7 @@ func (w *worker) multipartUploadHandlerPUT(jc jape.Context) { return } - // make sure only one of the following is set - var disablePreshardingEncryption bool - if jc.DecodeForm("disablepreshardingencryption", &disablePreshardingEncryption) != nil { - return - } - if !disablePreshardingEncryption && jc.Request.FormValue("offset") == "" { - jc.Error(errors.New("if presharding encryption isn't disabled, the offset needs to be set"), http.StatusBadRequest) - return - } + // get the offset var offset int if jc.DecodeForm("offset", &offset) != nil { return @@ -1143,23 +1135,30 @@ func (w *worker) multipartUploadHandlerPUT(jc jape.Context) { return } + // fetch upload from bus + upload, err := w.bus.MultipartUpload(ctx, uploadID) + if isError(err, api.ErrMultipartUploadNotFound) { + jc.Error(err, http.StatusNotFound) + return + } else if jc.Check("failed to fetch multipart upload", err) != nil { + return + } + // built options opts := []UploadOption{ WithBlockHeight(up.CurrentHeight), WithContractSet(up.ContractSet), WithPacking(up.UploadPacking), WithRedundancySettings(up.RedundancySettings), + WithCustomKey(upload.Key), } - if disablePreshardingEncryption { - opts = append(opts, WithCustomKey(object.NoOpKey)) - } else { - upload, err := w.bus.MultipartUpload(ctx, uploadID) - if err != nil { - jc.Error(err, http.StatusBadRequest) - return - } + + // make sure only one of the following is set + if encryptionEnabled := upload.Key != object.NoOpKey; encryptionEnabled && jc.Request.FormValue("offset") == "" { + jc.Error(errors.New("if presharding encryption isn't disabled, the offset needs to be set"), http.StatusBadRequest) + return + } else if encryptionEnabled { opts = append(opts, WithCustomEncryptionOffset(uint64(offset))) - opts = append(opts, WithCustomKey(upload.Key)) } // attach gouging checker to the context From a2035a8ffcc18517dcc34a15898ea395bbea6cbf Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 16 Feb 2024 15:15:27 +0100 Subject: [PATCH 039/172] bus: remove DisablePreshardingEncryption --- api/object.go | 30 +++++++++++------------------- internal/testing/cluster_test.go | 11 +++++++---- s3/backend.go | 3 +-- 3 files changed, 19 insertions(+), 25 deletions(-) diff --git a/api/object.go b/api/object.go index e4fd4b465..506f0eed1 100644 --- a/api/object.go +++ b/api/object.go @@ -235,20 +235,18 @@ type ( // UploadObjectOptions is the options type for the worker client. UploadObjectOptions struct { - Offset int - MinShards int - TotalShards int - ContractSet string - DisablePreshardingEncryption bool - ContentLength int64 - MimeType string - Metadata ObjectUserMetadata + Offset int + MinShards int + TotalShards int + ContractSet string + ContentLength int64 + MimeType string + Metadata ObjectUserMetadata } UploadMultipartUploadPartOptions struct { - DisablePreshardingEncryption bool - EncryptionOffset int - ContentLength int64 + EncryptionOffset *int + ContentLength int64 } ) @@ -268,9 +266,6 @@ func (opts UploadObjectOptions) ApplyValues(values url.Values) { if opts.MimeType != "" { values.Set("mimetype", opts.MimeType) } - if opts.DisablePreshardingEncryption { - values.Set("disablepreshardingencryption", "true") - } } func (opts UploadObjectOptions) ApplyHeaders(h http.Header) { @@ -280,11 +275,8 @@ func (opts UploadObjectOptions) ApplyHeaders(h http.Header) { } func (opts UploadMultipartUploadPartOptions) Apply(values url.Values) { - if opts.DisablePreshardingEncryption { - values.Set("disablepreshardingencryption", "true") - } - if !opts.DisablePreshardingEncryption || opts.EncryptionOffset != 0 { - values.Set("offset", fmt.Sprint(opts.EncryptionOffset)) + if opts.EncryptionOffset != nil { + values.Set("offset", fmt.Sprint(*opts.EncryptionOffset)) } } diff --git a/internal/testing/cluster_test.go b/internal/testing/cluster_test.go index 1b57e1e6c..61be076b7 100644 --- a/internal/testing/cluster_test.go +++ b/internal/testing/cluster_test.go @@ -2015,7 +2015,7 @@ func TestMultipartUploads(t *testing.T) { // correctly. putPart := func(partNum int, offset int, data []byte) string { t.Helper() - res, err := w.UploadMultipartUploadPart(context.Background(), bytes.NewReader(data), api.DefaultBucketName, objPath, mpr.UploadID, partNum, api.UploadMultipartUploadPartOptions{EncryptionOffset: offset}) + res, err := w.UploadMultipartUploadPart(context.Background(), bytes.NewReader(data), api.DefaultBucketName, objPath, mpr.UploadID, partNum, api.UploadMultipartUploadPartOptions{EncryptionOffset: &offset}) tt.OK(err) if res.ETag == "" { t.Fatal("expected non-empty ETag") @@ -2362,22 +2362,25 @@ func TestMultipartUploadWrappedByPartialSlabs(t *testing.T) { // upload a part that is a partial slab part3Data := bytes.Repeat([]byte{3}, int(slabSize)/4) + offset := int(slabSize + slabSize/4) resp3, err := w.UploadMultipartUploadPart(context.Background(), bytes.NewReader(part3Data), api.DefaultBucketName, objPath, mpr.UploadID, 3, api.UploadMultipartUploadPartOptions{ - EncryptionOffset: int(slabSize + slabSize/4), + EncryptionOffset: &offset, }) tt.OK(err) // upload a part that is exactly a full slab part2Data := bytes.Repeat([]byte{2}, int(slabSize)) + offset = int(slabSize / 4) resp2, err := w.UploadMultipartUploadPart(context.Background(), bytes.NewReader(part2Data), api.DefaultBucketName, objPath, mpr.UploadID, 2, api.UploadMultipartUploadPartOptions{ - EncryptionOffset: int(slabSize / 4), + EncryptionOffset: &offset, }) tt.OK(err) // upload another part the same size as the first one part1Data := bytes.Repeat([]byte{1}, int(slabSize)/4) + offset = 0 resp1, err := w.UploadMultipartUploadPart(context.Background(), bytes.NewReader(part1Data), api.DefaultBucketName, objPath, mpr.UploadID, 1, api.UploadMultipartUploadPartOptions{ - EncryptionOffset: 0, + EncryptionOffset: &offset, }) tt.OK(err) diff --git a/s3/backend.go b/s3/backend.go index a481da727..62dc8ddf5 100644 --- a/s3/backend.go +++ b/s3/backend.go @@ -418,8 +418,7 @@ func (s *s3) CreateMultipartUpload(ctx context.Context, bucket, key string, meta func (s *s3) UploadPart(ctx context.Context, bucket, object string, id gofakes3.UploadID, partNumber int, contentLength int64, input io.Reader) (*gofakes3.UploadPartResult, error) { res, err := s.w.UploadMultipartUploadPart(ctx, input, bucket, object, string(id), partNumber, api.UploadMultipartUploadPartOptions{ - DisablePreshardingEncryption: true, - ContentLength: contentLength, + ContentLength: contentLength, }) if err != nil { return nil, gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) From 07bbbb11a7f7aeea85d23df6eb32530d6942e809 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 16 Feb 2024 15:39:13 +0100 Subject: [PATCH 040/172] testing: fix TestMultipartUploads --- api/multipart.go | 12 ++++++------ bus/bus.go | 6 ++++-- internal/testing/cluster_test.go | 3 ++- s3/backend.go | 2 +- 4 files changed, 13 insertions(+), 10 deletions(-) diff --git a/api/multipart.go b/api/multipart.go index e786a7c25..a191b2b13 100644 --- a/api/multipart.go +++ b/api/multipart.go @@ -47,7 +47,7 @@ type ( CreateMultipartOptions struct { GenerateKey bool - Key object.EncryptionKey + Key *object.EncryptionKey MimeType string Metadata ObjectUserMetadata } @@ -82,11 +82,11 @@ type ( } MultipartCreateRequest struct { - Bucket string `json:"bucket"` - Path string `json:"path"` - Key object.EncryptionKey `json:"key"` - MimeType string `json:"mimeType"` - Metadata ObjectUserMetadata `json:"metadata"` + Bucket string `json:"bucket"` + Path string `json:"path"` + Key *object.EncryptionKey `json:"key"` + MimeType string `json:"mimeType"` + Metadata ObjectUserMetadata `json:"metadata"` // TODO: The next major version change should invert this to create a // key by default diff --git a/bus/bus.go b/bus/bus.go index 58739e11a..953da9976 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -2177,11 +2177,13 @@ func (b *bus) multipartHandlerCreatePOST(jc jape.Context) { return } - key := req.Key + var key object.EncryptionKey if req.GenerateKey { key = object.GenerateEncryptionKey() - } else if key == (object.EncryptionKey{}) { + } else if req.Key == nil { key = object.NoOpKey + } else { + key = *req.Key } resp, err := b.ms.CreateMultipartUpload(jc.Request.Context(), req.Bucket, req.Path, key, req.MimeType, req.Metadata) diff --git a/internal/testing/cluster_test.go b/internal/testing/cluster_test.go index 61be076b7..61174a18f 100644 --- a/internal/testing/cluster_test.go +++ b/internal/testing/cluster_test.go @@ -2354,7 +2354,8 @@ func TestMultipartUploadWrappedByPartialSlabs(t *testing.T) { // start a new multipart upload. We upload the parts in reverse order objPath := "/foo" - mpr, err := b.CreateMultipartUpload(context.Background(), api.DefaultBucketName, objPath, api.CreateMultipartOptions{Key: object.GenerateEncryptionKey()}) + key := object.GenerateEncryptionKey() + mpr, err := b.CreateMultipartUpload(context.Background(), api.DefaultBucketName, objPath, api.CreateMultipartOptions{Key: &key}) tt.OK(err) if mpr.UploadID == "" { t.Fatal("expected non-empty upload ID") diff --git a/s3/backend.go b/s3/backend.go index 62dc8ddf5..de72bb005 100644 --- a/s3/backend.go +++ b/s3/backend.go @@ -405,7 +405,7 @@ func (s *s3) CopyObject(ctx context.Context, srcBucket, srcKey, dstBucket, dstKe func (s *s3) CreateMultipartUpload(ctx context.Context, bucket, key string, meta map[string]string) (gofakes3.UploadID, error) { convertToSiaMetadataHeaders(meta) resp, err := s.b.CreateMultipartUpload(ctx, bucket, "/"+key, api.CreateMultipartOptions{ - Key: object.NoOpKey, + Key: &object.NoOpKey, MimeType: meta["Content-Type"], Metadata: api.ExtractObjectUserMetadataFrom(meta), }) From 9051e472f06409d5e4613375b47f17f0980933fb Mon Sep 17 00:00:00 2001 From: PJ Date: Fri, 16 Feb 2024 15:46:35 +0100 Subject: [PATCH 041/172] stores: get rid of migrate bool --- stores/migrations.go | 6 ++-- stores/migrations/mysql/main/schema.sql | 5 ++- stores/migrations/sqlite/main/schema.sql | 5 ++- stores/migrations_metrics.go | 2 +- stores/migrations_utils.go | 42 ++++++------------------ 5 files changed, 22 insertions(+), 38 deletions(-) diff --git a/stores/migrations.go b/stores/migrations.go index 05bda3f69..7f225ff17 100644 --- a/stores/migrations.go +++ b/stores/migrations.go @@ -24,13 +24,13 @@ func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { { ID: "00001_object_metadata", Migrate: func(tx *gorm.DB) error { - return performMigration(tx, "00001_object_metadata", false, logger) + return performMigration(tx, "main", "00001_object_metadata", logger) }, }, { ID: "00002_prune_slabs_trigger", Migrate: func(tx *gorm.DB) error { - err := performMigration(tx, "00002_prune_slabs_trigger", false, logger) + err := performMigration(tx, "main", "00002_prune_slabs_trigger", logger) if err != nil && strings.Contains(err.Error(), errMySQLNoSuperPrivilege.Error()) { logger.Warn("migration 00002_prune_slabs_trigger requires the user to have the SUPER privilege to register triggers") } @@ -43,7 +43,7 @@ func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { m := gormigrate.New(db, gormigrate.DefaultOptions, migrations) // Set init function. - m.InitSchema(initSchema(db, false, logger)) + m.InitSchema(initSchema(db, "main", logger)) // Perform migrations. if err := m.Migrate(); err != nil { diff --git a/stores/migrations/mysql/main/schema.sql b/stores/migrations/mysql/main/schema.sql index 39bf279f0..d28bdd13f 100644 --- a/stores/migrations/mysql/main/schema.sql +++ b/stores/migrations/mysql/main/schema.sql @@ -452,4 +452,7 @@ AND NOT EXISTS ( SELECT 1 FROM slices WHERE slices.db_slab_id = OLD.db_slab_id -); \ No newline at end of file +); + +-- create default bucket +INSERT INTO buckets (created_at, name) VALUES (CURRENT_TIMESTAMP, 'default'); \ No newline at end of file diff --git a/stores/migrations/sqlite/main/schema.sql b/stores/migrations/sqlite/main/schema.sql index df9fc9a83..daee619b4 100644 --- a/stores/migrations/sqlite/main/schema.sql +++ b/stores/migrations/sqlite/main/schema.sql @@ -183,4 +183,7 @@ BEGIN FROM slices WHERE slices.db_slab_id = OLD.db_slab_id ); -END; \ No newline at end of file +END; + +-- create default bucket +INSERT INTO buckets (created_at, name) VALUES (CURRENT_TIMESTAMP, 'default'); diff --git a/stores/migrations_metrics.go b/stores/migrations_metrics.go index ddbbe8e4e..940917569 100644 --- a/stores/migrations_metrics.go +++ b/stores/migrations_metrics.go @@ -20,7 +20,7 @@ func performMetricsMigrations(tx *gorm.DB, logger *zap.SugaredLogger) error { m := gormigrate.New(tx, gormigrate.DefaultOptions, migrations) // Set init function. - m.InitSchema(initSchema(tx, true, logger)) + m.InitSchema(initSchema(tx, "metrics", logger)) // Perform migrations. if err := m.Migrate(); err != nil { diff --git a/stores/migrations_utils.go b/stores/migrations_utils.go index 4832d96a7..46d7f3dc4 100644 --- a/stores/migrations_utils.go +++ b/stores/migrations_utils.go @@ -4,64 +4,42 @@ import ( "fmt" gormigrate "github.com/go-gormigrate/gormigrate/v2" - "go.sia.tech/renterd/api" "go.uber.org/zap" "gorm.io/gorm" ) // initSchema is executed only on a clean database. Otherwise the individual // migrations are executed. -func initSchema(db *gorm.DB, metrics bool, logger *zap.SugaredLogger) gormigrate.InitSchemaFunc { +func initSchema(db *gorm.DB, name string, logger *zap.SugaredLogger) gormigrate.InitSchemaFunc { return func(tx *gorm.DB) error { - if metrics { - logger.Info("initializing metrics schema") - } else { - logger.Info("initializing schema") - } + logger.Infof("initializing '%s' schema", name) - // build filename - filename := "schema" - err := execSQLFile(tx, metrics, filename) + // init schema + err := execSQLFile(tx, name, "schema") if err != nil { return fmt.Errorf("failed to init schema: %w", err) } - // add default bucket. - if !metrics { - if err := tx.Create(&dbBucket{ - Name: api.DefaultBucketName, - }).Error; err != nil { - return fmt.Errorf("failed to create default bucket: %v", err) - } - } - logger.Info("initialization complete") return nil } } -func performMigration(db *gorm.DB, name string, metrics bool, logger *zap.SugaredLogger) error { - logger.Infof("performing migration %s", name) - - // build filename - filename := fmt.Sprintf("migration_%s", name) +func performMigration(db *gorm.DB, kind, migration string, logger *zap.SugaredLogger) error { + logger.Infof("performing %s migration '%s'", kind, migration) // execute migration - err := execSQLFile(db, metrics, filename) + err := execSQLFile(db, kind, fmt.Sprintf("migration_%s", migration)) if err != nil { - return fmt.Errorf("migration %s failed: %w", name, err) + return fmt.Errorf("migration '%s' failed: %w", migration, err) } - logger.Infof("migration %s complete", name) + logger.Infof("migration '%s' complete", migration) return nil } -func execSQLFile(db *gorm.DB, metrics bool, filename string) error { +func execSQLFile(db *gorm.DB, folder, filename string) error { // build path - folder := "main" - if metrics { - folder = "metrics" - } protocol := "mysql" if isSQLite(db) { protocol = "sqlite" From e0b3dc03e4069211961f5bad8b88864839547dd1 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 16 Feb 2024 15:46:45 +0100 Subject: [PATCH 042/172] object: fix panic in objects.String --- object/object.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/object/object.go b/object/object.go index e3c2fe3e7..7c74c1c23 100644 --- a/object/object.go +++ b/object/object.go @@ -56,9 +56,6 @@ func (k EncryptionKey) MarshalText() ([]byte, error) { // UnmarshalText implements the encoding.TextUnmarshaler interface. func (k *EncryptionKey) UnmarshalText(b []byte) error { - if len(b) == 0 { - return nil - } k.entropy = new([32]byte) if n, err := hex.Decode(k.entropy[:], []byte(bytes.TrimPrefix(b, []byte("key:")))); err != nil { return err From e7f6bb417c03092cf46aef1903e3e09b1e2bbd82 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 16 Feb 2024 16:23:04 +0100 Subject: [PATCH 043/172] testing: fix TestS3Basic --- api/object.go | 2 +- internal/testing/cluster_test.go | 2 +- stores/metadata.go | 2 +- worker/mocks_test.go | 2 +- worker/upload_test.go | 14 +++++++------- worker/worker.go | 2 +- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/api/object.go b/api/object.go index 09f4b04ab..e8ae7fea8 100644 --- a/api/object.go +++ b/api/object.go @@ -54,7 +54,7 @@ type ( Object struct { Metadata ObjectUserMetadata `json:"metadata,omitempty"` ObjectMetadata - object.Object `json:"omitempty"` + *object.Object `json:"omitempty"` } // ObjectMetadata contains various metadata about an object. diff --git a/internal/testing/cluster_test.go b/internal/testing/cluster_test.go index d290faf27..81779eeac 100644 --- a/internal/testing/cluster_test.go +++ b/internal/testing/cluster_test.go @@ -1313,7 +1313,7 @@ func TestUploadDownloadSameHost(t *testing.T) { // build a frankenstein object constructed with all sectors on the same host res.Object.Slabs[0].Shards = shards[res.Object.Slabs[0].Shards[0].LatestHost] - tt.OK(b.AddObject(context.Background(), api.DefaultBucketName, "frankenstein", testContractSet, res.Object.Object, api.AddObjectOptions{})) + tt.OK(b.AddObject(context.Background(), api.DefaultBucketName, "frankenstein", testContractSet, *res.Object.Object, api.AddObjectOptions{})) // assert we can download this object tt.OK(w.DownloadObject(context.Background(), io.Discard, api.DefaultBucketName, "frankenstein", api.DownloadObjectOptions{})) diff --git a/stores/metadata.go b/stores/metadata.go index 15a4690ad..0e659a243 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -2329,7 +2329,7 @@ func (s *SQLStore) objectHydrate(ctx context.Context, tx *gorm.DB, bucket, path obj[0].ObjectModTime, obj[0].ObjectSize, ), - Object: object.Object{ + Object: &object.Object{ Key: key, Slabs: slabs, }, diff --git a/worker/mocks_test.go b/worker/mocks_test.go index e6fd62d8e..2490941af 100644 --- a/worker/mocks_test.go +++ b/worker/mocks_test.go @@ -274,7 +274,7 @@ func (os *mockObjectStore) Object(ctx context.Context, bucket, path string, opts return api.ObjectsResponse{Object: &api.Object{ ObjectMetadata: api.ObjectMetadata{Name: path, Size: o.TotalSize()}, - Object: o, + Object: &o, }}, nil } diff --git a/worker/upload_test.go b/worker/upload_test.go index 8d32455bd..0b9f6b28b 100644 --- a/worker/upload_test.go +++ b/worker/upload_test.go @@ -64,7 +64,7 @@ func TestUpload(t *testing.T) { // download the data and assert it matches var buf bytes.Buffer - err = dl.DownloadObject(context.Background(), &buf, o.Object.Object, 0, uint64(o.Object.Size), w.contracts()) + err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), w.contracts()) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { @@ -90,7 +90,7 @@ func TestUpload(t *testing.T) { // download the data again and assert it matches buf.Reset() - err = dl.DownloadObject(context.Background(), &buf, o.Object.Object, 0, uint64(o.Object.Size), filtered) + err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), filtered) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { @@ -107,7 +107,7 @@ func TestUpload(t *testing.T) { // download the data again and assert it fails buf.Reset() - err = dl.DownloadObject(context.Background(), &buf, o.Object.Object, 0, uint64(o.Object.Size), filtered) + err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), filtered) if !errors.Is(err, errDownloadNotEnoughHosts) { t.Fatal("expected not enough hosts error", err) } @@ -170,7 +170,7 @@ func TestUploadPackedSlab(t *testing.T) { // download the data and assert it matches var buf bytes.Buffer - err = dl.DownloadObject(context.Background(), &buf, o.Object.Object, 0, uint64(o.Object.Size), w.contracts()) + err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), w.contracts()) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { @@ -206,7 +206,7 @@ func TestUploadPackedSlab(t *testing.T) { // download the data again and assert it matches buf.Reset() - err = dl.DownloadObject(context.Background(), &buf, o.Object.Object, 0, uint64(o.Object.Size), w.contracts()) + err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), w.contracts()) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { @@ -325,7 +325,7 @@ func TestUploadShards(t *testing.T) { // download the data and assert it matches var buf bytes.Buffer - err = dl.DownloadObject(context.Background(), &buf, o.Object.Object, 0, uint64(o.Object.Size), contracts) + err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), contracts) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { @@ -485,7 +485,7 @@ func TestUploadRegression(t *testing.T) { // download data for good measure var buf bytes.Buffer - err = dl.DownloadObject(context.Background(), &buf, o.Object.Object, 0, uint64(o.Object.Size), w.contracts()) + err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), w.contracts()) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { diff --git a/worker/worker.go b/worker/worker.go index 8a2a9b1f3..95497b383 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -919,7 +919,7 @@ func (w *worker) objectsHandlerGET(jc jape.Context) { // create a download function downloadFn := func(wr io.Writer, offset, length int64) (err error) { ctx = WithGougingChecker(ctx, w.bus, gp) - err = w.downloadManager.DownloadObject(ctx, wr, res.Object.Object, uint64(offset), uint64(length), contracts) + err = w.downloadManager.DownloadObject(ctx, wr, *res.Object.Object, uint64(offset), uint64(length), contracts) if err != nil { w.logger.Error(err) if !errors.Is(err, ErrShuttingDown) { From 93bb567a1158646c6f60cf5a7b0d9544e5cd848b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Feb 2024 01:42:17 +0000 Subject: [PATCH 044/172] build(deps): bump github.com/minio/minio-go/v7 from 7.0.66 to 7.0.67 Bumps [github.com/minio/minio-go/v7](https://github.com/minio/minio-go) from 7.0.66 to 7.0.67. - [Release notes](https://github.com/minio/minio-go/releases) - [Commits](https://github.com/minio/minio-go/compare/v7.0.66...v7.0.67) --- updated-dependencies: - dependency-name: github.com/minio/minio-go/v7 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 2659a1ceb..a2e8a45f5 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/google/go-cmp v0.6.0 github.com/gotd/contrib v0.19.0 github.com/klauspost/reedsolomon v1.12.1 - github.com/minio/minio-go/v7 v7.0.66 + github.com/minio/minio-go/v7 v7.0.67 github.com/montanaflynn/stats v0.7.1 gitlab.com/NebulousLabs/encoding v0.0.0-20200604091946-456c3dc907fe go.sia.tech/core v0.1.12-0.20231211182757-77190f04f90b diff --git a/go.sum b/go.sum index b7f5f9cc7..57c101799 100644 --- a/go.sum +++ b/go.sum @@ -135,8 +135,8 @@ github.com/mattn/go-sqlite3 v1.14.18/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.66 h1:bnTOXOHjOqv/gcMuiVbN9o2ngRItvqE774dG9nq0Dzw= -github.com/minio/minio-go/v7 v7.0.66/go.mod h1:DHAgmyQEGdW3Cif0UooKOyrT3Vxs82zNdV6tkKhRtbs= +github.com/minio/minio-go/v7 v7.0.67 h1:BeBvZWAS+kRJm1vGTMJYVjKUNoo0FoEt/wUWdUtfmh8= +github.com/minio/minio-go/v7 v7.0.67/go.mod h1:+UXocnUeZ3wHvVh5s95gcrA4YjMIbccT6ubB+1m054A= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= From 9431952288392047e4e30d250586eec413c8c4bf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Feb 2024 01:42:22 +0000 Subject: [PATCH 045/172] build(deps): bump gorm.io/driver/mysql from 1.5.2 to 1.5.4 Bumps [gorm.io/driver/mysql](https://github.com/go-gorm/mysql) from 1.5.2 to 1.5.4. - [Commits](https://github.com/go-gorm/mysql/compare/v1.5.2...v1.5.4) --- updated-dependencies: - dependency-name: gorm.io/driver/mysql dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 2659a1ceb..03815a315 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,7 @@ require ( golang.org/x/crypto v0.19.0 golang.org/x/term v0.17.0 gopkg.in/yaml.v3 v3.0.1 - gorm.io/driver/mysql v1.5.2 + gorm.io/driver/mysql v1.5.4 gorm.io/driver/sqlite v1.5.5 gorm.io/gorm v1.25.7 lukechampine.com/frand v1.4.2 diff --git a/go.sum b/go.sum index b7f5f9cc7..ee2745f86 100644 --- a/go.sum +++ b/go.sum @@ -389,11 +389,11 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gorm.io/driver/mysql v1.5.2 h1:QC2HRskSE75wBuOxe0+iCkyJZ+RqpudsQtqkp+IMuXs= -gorm.io/driver/mysql v1.5.2/go.mod h1:pQLhh1Ut/WUAySdTHwBpBv6+JKcj+ua4ZFx1QQTBzb8= +gorm.io/driver/mysql v1.5.4 h1:igQmHfKcbaTVyAIHNhhB888vvxh8EdQ2uSUT0LPcBso= +gorm.io/driver/mysql v1.5.4/go.mod h1:9rYxJph/u9SWkWc9yY4XJ1F/+xO0S/ChOmbk3+Z5Tvs= gorm.io/driver/sqlite v1.5.5 h1:7MDMtUZhV065SilG62E0MquljeArQZNfJnjd9i9gx3E= gorm.io/driver/sqlite v1.5.5/go.mod h1:6NgQ7sQWAIFsPrJJl1lSNSu2TABh0ZZ/zm5fosATavE= -gorm.io/gorm v1.25.2-0.20230530020048-26663ab9bf55/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k= +gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= gorm.io/gorm v1.25.7 h1:VsD6acwRjz2zFxGO50gPO6AkNs7KKnvfzUjHQhZDz/A= gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 264e4a64f492ffb04fcb3b9ca7476dc45b85fa9b Mon Sep 17 00:00:00 2001 From: Peter-Jan Brone Date: Mon, 19 Feb 2024 10:25:00 +0100 Subject: [PATCH 046/172] Extend contract set change alerts with host info (#966) * contractor: extend contract set alert with host info * autopilot: get rid of set in contract set change alert --- autopilot/alerts.go | 22 +++++-- autopilot/contractor.go | 136 +++++++++++++++++++++++----------------- 2 files changed, 95 insertions(+), 63 deletions(-) diff --git a/autopilot/alerts.go b/autopilot/alerts.go index c279075ee..292670dc5 100644 --- a/autopilot/alerts.go +++ b/autopilot/alerts.go @@ -137,22 +137,32 @@ func newContractPruningFailedAlert(hk types.PublicKey, version string, fcid type } } -func newContractSetChangeAlert(name string, added, removed int, removedReasons map[string]string) alerts.Alert { +func newContractSetChangeAlert(name string, additions map[types.FileContractID]contractSetAddition, removals map[types.FileContractID]contractSetRemoval) alerts.Alert { var hint string - if removed > 0 { + if len(removals) > 0 { hint = "A high churn rate can lead to a lot of unnecessary migrations, it might be necessary to tweak your configuration depending on the reason hosts are being discarded from the set." } + removedReasons := make(map[string]string, len(removals)) + for k, v := range removals { + removedReasons[k.String()] = v.Reason + } + return alerts.Alert{ ID: randomAlertID(), Severity: alerts.SeverityInfo, Message: "Contract set changed", Data: map[string]any{ - "name": name, - "added": added, - "removed": removed, + "name": name, + "set_additions": additions, + "set_removals": removals, + "hint": hint, + + // TODO: these fields can be removed on the next major release, they + // contain redundant information + "added": len(additions), + "removed": len(removals), "removals": removedReasons, - "hint": hint, }, Timestamp: time.Now(), } diff --git a/autopilot/contractor.go b/autopilot/contractor.go index bc947d500..092f2a831 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -122,9 +122,20 @@ type ( recoverable bool } + contractSetAddition struct { + Size uint64 `json:"size"` + HostKey types.PublicKey `json:"hostKey"` + } + + contractSetRemoval struct { + Size uint64 `json:"size"` + HostKey types.PublicKey `json:"hostKey"` + Reason string `json:"reason"` + } + renewal struct { - from types.FileContractID - to types.FileContractID + from api.ContractMetadata + to api.ContractMetadata ci contractInfo } ) @@ -331,17 +342,15 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( // set afterwards var renewed []renewal if limit > 0 { - var toKeep []contractInfo + var toKeep []api.ContractMetadata renewed, toKeep = c.runContractRenewals(ctx, w, toRenew, &remaining, limit) for _, ri := range renewed { if ri.ci.usable || ri.ci.recoverable { updatedSet = append(updatedSet, ri.to) } - contractData[ri.to] = contractData[ri.from] - } - for _, ci := range toKeep { - updatedSet = append(updatedSet, ci.contract.ID) + contractData[ri.to.ID] = contractData[ri.from.ID] } + updatedSet = append(updatedSet, toKeep...) } // run contract refreshes @@ -353,7 +362,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( if ri.ci.usable || ri.ci.recoverable { updatedSet = append(updatedSet, ri.to) } - contractData[ri.to] = contractData[ri.from] + contractData[ri.to.ID] = contractData[ri.from.ID] } } @@ -366,7 +375,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( } // check if we need to form contracts and add them to the contract set - var formed []types.FileContractID + var formed []api.ContractMetadata if uint64(len(updatedSet)) < threshold { // no need to try and form contracts if wallet is completely empty wallet, err := c.ap.bus.Wallet(ctx) @@ -382,34 +391,40 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( } else { for _, fc := range formed { updatedSet = append(updatedSet, fc) - contractData[fc] = 0 + contractData[fc.ID] = 0 } } } } // cap the amount of contracts we want to keep to the configured amount - for _, fcid := range updatedSet { - if _, exists := contractData[fcid]; !exists { - c.logger.Errorf("contract %v not found in contractData", fcid) + for _, contract := range updatedSet { + if _, exists := contractData[contract.ID]; !exists { + c.logger.Errorf("contract %v not found in contractData", contract.ID) } } if len(updatedSet) > int(state.cfg.Contracts.Amount) { // sort by contract size sort.Slice(updatedSet, func(i, j int) bool { - return contractData[updatedSet[i]] > contractData[updatedSet[j]] + return contractData[updatedSet[i].ID] > contractData[updatedSet[j].ID] }) - for _, c := range updatedSet[state.cfg.Contracts.Amount:] { - toStopUsing[c] = "truncated" + for _, contract := range updatedSet[state.cfg.Contracts.Amount:] { + toStopUsing[contract.ID] = "truncated" } updatedSet = updatedSet[:state.cfg.Contracts.Amount] } + // convert to set of file contract ids + var newSet []types.FileContractID + for _, contract := range updatedSet { + newSet = append(newSet, contract.ID) + } + // update contract set if c.ap.isStopped() { return false, errors.New("autopilot stopped before maintenance could be completed") } - err = c.ap.bus.SetContractSet(ctx, state.cfg.Contracts.Set, updatedSet) + err = c.ap.bus.SetContractSet(ctx, state.cfg.Contracts.Set, newSet) if err != nil { return false, err } @@ -418,54 +433,62 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( return c.computeContractSetChanged(ctx, state.cfg.Contracts.Set, currentSet, updatedSet, formed, refreshed, renewed, toStopUsing, contractData), nil } -func (c *contractor) computeContractSetChanged(ctx context.Context, name string, oldSet []api.ContractMetadata, newSet, formed []types.FileContractID, refreshed, renewed []renewal, toStopUsing map[types.FileContractID]string, contractData map[types.FileContractID]uint64) bool { - // build some maps for easier lookups - previous := make(map[types.FileContractID]struct{}) +func (c *contractor) computeContractSetChanged(ctx context.Context, name string, oldSet, newSet []api.ContractMetadata, formed []api.ContractMetadata, refreshed, renewed []renewal, toStopUsing map[types.FileContractID]string, contractData map[types.FileContractID]uint64) bool { + // build set lookups + inOldSet := make(map[types.FileContractID]struct{}) for _, c := range oldSet { - previous[c.ID] = struct{}{} + inOldSet[c.ID] = struct{}{} } - updated := make(map[types.FileContractID]struct{}) + inNewSet := make(map[types.FileContractID]struct{}) for _, c := range newSet { - updated[c] = struct{}{} + inNewSet[c.ID] = struct{}{} } + + // build renewal lookups renewalsFromTo := make(map[types.FileContractID]types.FileContractID) renewalsToFrom := make(map[types.FileContractID]types.FileContractID) for _, c := range append(refreshed, renewed...) { - renewalsFromTo[c.from] = c.to - renewalsToFrom[c.to] = c.from + renewalsFromTo[c.from.ID] = c.to.ID + renewalsToFrom[c.to.ID] = c.from.ID } // log added and removed contracts - var added []types.FileContractID - var removed []types.FileContractID - removedReasons := make(map[string]string) + setAdditions := make(map[types.FileContractID]contractSetAddition) + setRemovals := make(map[types.FileContractID]contractSetRemoval) for _, contract := range oldSet { - _, exists := updated[contract.ID] - _, renewed := updated[renewalsFromTo[contract.ID]] + _, exists := inNewSet[contract.ID] + _, renewed := inNewSet[renewalsFromTo[contract.ID]] if !exists && !renewed { - removed = append(removed, contract.ID) reason, ok := toStopUsing[contract.ID] if !ok { reason = "unknown" } - removedReasons[contract.ID.String()] = reason + + setRemovals[contract.ID] = contractSetRemoval{ + Size: contractData[contract.ID], + HostKey: contract.HostKey, + Reason: reason, + } c.logger.Debugf("contract %v was removed from the contract set, size: %v, reason: %v", contract.ID, contractData[contract.ID], reason) } } - for _, fcid := range newSet { - _, existed := previous[fcid] - _, renewed := renewalsToFrom[fcid] + for _, contract := range newSet { + _, existed := inOldSet[contract.ID] + _, renewed := renewalsToFrom[contract.ID] if !existed && !renewed { - added = append(added, fcid) - c.logger.Debugf("contract %v was added to the contract set, size: %v", fcid, contractData[fcid]) + setAdditions[contract.ID] = contractSetAddition{ + Size: contractData[contract.ID], + HostKey: contract.HostKey, + } + c.logger.Debugf("contract %v was added to the contract set, size: %v", contract.ID, contractData[contract.ID]) } } // log renewed contracts that did not make it into the contract set for _, fcid := range renewed { - _, exists := updated[fcid.to] + _, exists := inNewSet[fcid.to.ID] if !exists { - c.logger.Debugf("contract %v was renewed but did not make it into the contract set, size: %v", fcid, contractData[fcid.to]) + c.logger.Debugf("contract %v was renewed but did not make it into the contract set, size: %v", fcid, contractData[fcid.to.ID]) } } @@ -478,7 +501,7 @@ func (c *contractor) computeContractSetChanged(ctx context.Context, name string, // record churn metrics now := api.TimeNow() var metrics []api.ContractSetChurnMetric - for _, fcid := range added { + for fcid := range setAdditions { metrics = append(metrics, api.ContractSetChurnMetric{ Name: c.ap.state.cfg.Contracts.Set, ContractID: fcid, @@ -486,12 +509,12 @@ func (c *contractor) computeContractSetChanged(ctx context.Context, name string, Timestamp: now, }) } - for _, fcid := range removed { + for fcid, removal := range setRemovals { metrics = append(metrics, api.ContractSetChurnMetric{ Name: c.ap.state.cfg.Contracts.Set, ContractID: fcid, Direction: api.ChurnDirRemoved, - Reason: removedReasons[fcid.String()], + Reason: removal.Reason, Timestamp: now, }) } @@ -508,12 +531,12 @@ func (c *contractor) computeContractSetChanged(ctx context.Context, name string, "renewed", len(renewed), "refreshed", len(refreshed), "contracts", len(newSet), - "added", len(added), - "removed", len(removed), + "added", len(setAdditions), + "removed", len(setRemovals), ) - hasChanged := len(added)+len(removed) > 0 + hasChanged := len(setAdditions)+len(setRemovals) > 0 if hasChanged { - c.ap.RegisterAlert(ctx, newContractSetChangeAlert(name, len(added), len(removed), removedReasons)) + c.ap.RegisterAlert(ctx, newContractSetChangeAlert(name, setAdditions, setRemovals)) } return hasChanged } @@ -608,7 +631,7 @@ func (c *contractor) performWalletMaintenance(ctx context.Context) error { return nil } -func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts []api.Contract, inCurrentSet map[types.FileContractID]struct{}, minScore float64) (toKeep []types.FileContractID, toArchive, toStopUsing map[types.FileContractID]string, toRefresh, toRenew []contractInfo, _ error) { +func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts []api.Contract, inCurrentSet map[types.FileContractID]struct{}, minScore float64) (toKeep []api.ContractMetadata, toArchive, toStopUsing map[types.FileContractID]string, toRefresh, toRenew []contractInfo, _ error) { if c.ap.isStopped() { return } @@ -740,7 +763,7 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts } else if !state.cfg.Hosts.AllowRedundantIPs && ipFilter.IsRedundantIP(contract.HostIP, contract.HostKey) { toStopUsing[fcid] = fmt.Sprintf("%v; %v", errHostRedundantIP, errContractNoRevision) } else { - toKeep = append(toKeep, fcid) + toKeep = append(toKeep, contract.ContractMetadata) remainingKeepLeeway-- // we let it slide } continue // can't perform contract checks without revision @@ -783,18 +806,17 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts } else if refresh { toRefresh = append(toRefresh, ci) } else if usable { - toKeep = append(toKeep, ci.contract.ID) + toKeep = append(toKeep, ci.contract.ContractMetadata) } } return toKeep, toArchive, toStopUsing, toRefresh, toRenew, nil } -func (c *contractor) runContractFormations(ctx context.Context, w Worker, candidates scoredHosts, usedHosts map[types.PublicKey]struct{}, unusableHosts unusableHostResult, missing uint64, budget *types.Currency) ([]types.FileContractID, error) { +func (c *contractor) runContractFormations(ctx context.Context, w Worker, candidates scoredHosts, usedHosts map[types.PublicKey]struct{}, unusableHosts unusableHostResult, missing uint64, budget *types.Currency) (formed []api.ContractMetadata, _ error) { if c.ap.isStopped() { return nil, nil } - var formed []types.FileContractID // convenience variables state := c.ap.State() @@ -896,7 +918,7 @@ func (c *contractor) runContractFormations(ctx context.Context, w Worker, candid formedContract, proceed, err := c.formContract(ctx, w, host, minInitialContractFunds, maxInitialContractFunds, budget) if err == nil { // add contract to contract set - formed = append(formed, formedContract.ID) + formed = append(formed, formedContract) missing-- } if !proceed { @@ -976,7 +998,7 @@ func (c *contractor) runRevisionBroadcast(ctx context.Context, w Worker, allCont } } -func (c *contractor) runContractRenewals(ctx context.Context, w Worker, toRenew []contractInfo, budget *types.Currency, limit int) (renewals []renewal, toKeep []contractInfo) { +func (c *contractor) runContractRenewals(ctx context.Context, w Worker, toRenew []contractInfo, budget *types.Currency, limit int) (renewals []renewal, toKeep []api.ContractMetadata) { c.logger.Debugw( "run contracts renewals", "torenew", len(toRenew), @@ -1010,11 +1032,11 @@ func (c *contractor) runContractRenewals(ctx context.Context, w Worker, toRenew if err != nil { c.ap.RegisterAlert(ctx, newContractRenewalFailedAlert(contract, !proceed, err)) if toRenew[i].usable { - toKeep = append(toKeep, toRenew[i]) + toKeep = append(toKeep, toRenew[i].contract.ContractMetadata) } } else { c.ap.DismissAlert(ctx, alertIDForContract(alertRenewalFailedID, contract.ID)) - renewals = append(renewals, renewal{from: contract.ID, to: renewed.ID, ci: toRenew[i]}) + renewals = append(renewals, renewal{from: contract, to: renewed, ci: toRenew[i]}) } // break if we don't want to proceed @@ -1027,7 +1049,7 @@ func (c *contractor) runContractRenewals(ctx context.Context, w Worker, toRenew // they're usable and we have 'limit' left for j := i; j < len(toRenew); j++ { if len(renewals)+len(toKeep) < limit && toRenew[j].usable { - toKeep = append(toKeep, toRenew[j]) + toKeep = append(toKeep, toRenew[j].contract.ContractMetadata) } } @@ -1057,7 +1079,7 @@ func (c *contractor) runContractRefreshes(ctx context.Context, w Worker, toRefre // refresh and add if it succeeds renewed, proceed, err := c.refreshContract(ctx, w, ci, budget) if err == nil { - refreshed = append(refreshed, renewal{from: ci.contract.ID, to: renewed.ID, ci: ci}) + refreshed = append(refreshed, renewal{from: ci.contract.ContractMetadata, to: renewed, ci: ci}) } // break if we don't want to proceed From ac15fc80a73d0854831080dbfbcf3acb577f3891 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 19 Feb 2024 10:44:31 +0100 Subject: [PATCH 047/172] go.mod: uprade hostd dependency --- build/network.go | 2 +- cmd/renterd/config.go | 2 +- go.mod | 17 ++++++++--------- go.sum | 30 ++++++++++++++++-------------- internal/node/transactionpool.go | 2 +- stores/wallet.go | 6 +++--- 6 files changed, 30 insertions(+), 29 deletions(-) diff --git a/build/network.go b/build/network.go index 4183a62bc..a0a452189 100644 --- a/build/network.go +++ b/build/network.go @@ -3,9 +3,9 @@ package build //go:generate go run gen.go import ( - "go.sia.tech/core/chain" "go.sia.tech/core/consensus" "go.sia.tech/core/types" + "go.sia.tech/coreutils/chain" ) // Network returns the Sia network consts and genesis block for the current build. diff --git a/cmd/renterd/config.go b/cmd/renterd/config.go index 391d77ea3..47668ff94 100644 --- a/cmd/renterd/config.go +++ b/cmd/renterd/config.go @@ -11,7 +11,7 @@ import ( "strings" "go.sia.tech/core/types" - "go.sia.tech/core/wallet" + "go.sia.tech/coreutils/wallet" "golang.org/x/term" "gopkg.in/yaml.v3" "lukechampine.com/frand" diff --git a/go.mod b/go.mod index db441a0ec..e727483d1 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,6 @@ module go.sia.tech/renterd -go 1.21 - -toolchain go1.21.6 +go 1.21.6 require ( github.com/gabriel-vasile/mimetype v1.4.3 @@ -13,10 +11,11 @@ require ( github.com/minio/minio-go/v7 v7.0.67 github.com/montanaflynn/stats v0.7.1 gitlab.com/NebulousLabs/encoding v0.0.0-20200604091946-456c3dc907fe - go.sia.tech/core v0.1.12-0.20231211182757-77190f04f90b + go.sia.tech/core v0.2.1 + go.sia.tech/coreutils v0.0.1 go.sia.tech/gofakes3 v0.0.0-20231109151325-e0d47c10dce2 - go.sia.tech/hostd v0.3.0-beta.1 - go.sia.tech/jape v0.11.1 + go.sia.tech/hostd v1.0.2 + go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640 go.sia.tech/mux v1.2.0 go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca go.sia.tech/web/renterd v0.45.0 @@ -32,8 +31,8 @@ require ( require ( github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da // indirect - github.com/aws/aws-sdk-go v1.49.1 // indirect - github.com/cloudflare/cloudflare-go v0.75.0 // indirect + github.com/aws/aws-sdk-go v1.50.1 // indirect + github.com/cloudflare/cloudflare-go v0.86.0 // indirect github.com/dchest/threefish v0.0.0-20120919164726-3ecf4c494abf // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/go-sql-driver/mysql v1.7.1 // indirect @@ -76,7 +75,7 @@ require ( gitlab.com/NebulousLabs/threadgroup v0.0.0-20200608151952-38921fbef213 // indirect go.sia.tech/web v0.0.0-20231213145933-3f175a86abff // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/net v0.19.0 // indirect + golang.org/x/net v0.20.0 // indirect golang.org/x/sys v0.17.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect diff --git a/go.sum b/go.sum index 3ed88a7d4..006a31ea6 100644 --- a/go.sum +++ b/go.sum @@ -9,14 +9,14 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/aws/aws-sdk-go v1.44.256/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= -github.com/aws/aws-sdk-go v1.49.1 h1:Dsamcd8d/nNb3A+bZ0ucfGl0vGZsW5wlRW0vhoYGoeQ= -github.com/aws/aws-sdk-go v1.49.1/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.50.1 h1:AwnLUM7TcH9vMZqA4TcDKmGfLmDW5VXwT5tPH6kXylo= +github.com/aws/aws-sdk-go v1.50.1/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/cloudflare-go v0.75.0 h1:03a4EkwwsDo0yAHjQ/l+D36K9wTkvr0afDiI/uHQ0Xw= -github.com/cloudflare/cloudflare-go v0.75.0/go.mod h1:5ocQT9qQ99QsT1Ii2751490Z5J+W/nv6jOj+lSAe4ug= +github.com/cloudflare/cloudflare-go v0.86.0 h1:jEKN5VHNYNYtfDL2lUFLTRo+nOVNPFxpXTstVx0rqHI= +github.com/cloudflare/cloudflare-go v0.86.0/go.mod h1:wYW/5UP02TUfBToa/yKbQHV+r6h1NnJ1Je7XjuGM4Jw= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -126,8 +126,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-sqlite3 v1.14.18 h1:JL0eqdCOq6DJVNPSvArO/bIV9/P7fbGrV00LZHc+5aI= @@ -239,14 +239,16 @@ gitlab.com/NebulousLabs/threadgroup v0.0.0-20200608151952-38921fbef213/go.mod h1 gitlab.com/NebulousLabs/writeaheadlog v0.0.0-20200618142844-c59a90f49130/go.mod h1:SxigdS5Q1ui+OMgGAXt1E/Fg3RB6PvKXMov2O3gvIzs= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.sia.tech/core v0.1.12-0.20231211182757-77190f04f90b h1:xJSxYN2kZD3NAijHIwjXhG5+7GoPyjDNIJPEoD3b72g= -go.sia.tech/core v0.1.12-0.20231211182757-77190f04f90b/go.mod h1:3EoY+rR78w1/uGoXXVqcYdwSjSJKuEMI5bL7WROA27Q= +go.sia.tech/core v0.2.1 h1:CqmMd+T5rAhC+Py3NxfvGtvsj/GgwIqQHHVrdts/LqY= +go.sia.tech/core v0.2.1/go.mod h1:3EoY+rR78w1/uGoXXVqcYdwSjSJKuEMI5bL7WROA27Q= +go.sia.tech/coreutils v0.0.1 h1:Th8iiF9fjkBaxlKRgPJfRtsD3Pb8U4d2m/OahB6wffg= +go.sia.tech/coreutils v0.0.1/go.mod h1:3Mb206QDd3NtRiaHZ2kN87/HKXhcBF6lHVatS7PkViY= go.sia.tech/gofakes3 v0.0.0-20231109151325-e0d47c10dce2 h1:ulzfJNjxN5DjXHClkW2pTiDk+eJ+0NQhX87lFDZ03t0= go.sia.tech/gofakes3 v0.0.0-20231109151325-e0d47c10dce2/go.mod h1:PlsiVCn6+wssrR7bsOIlZm0DahsVrDydrlbjY4F14sg= -go.sia.tech/hostd v0.3.0-beta.1 h1:A2RL4wkW18eb28+fJtdyK9OYNiiwpCDO8FO3cyT9r7A= -go.sia.tech/hostd v0.3.0-beta.1/go.mod h1:gVtU631RkbtOEHJKb8qghudhWcYIL8w3phjvV2/bz0A= -go.sia.tech/jape v0.11.1 h1:M7IP+byXL7xOqzxcHUQuXW+q3sYMkYzmMlMw+q8ZZw0= -go.sia.tech/jape v0.11.1/go.mod h1:4QqmBB+t3W7cNplXPj++ZqpoUb2PeiS66RLpXmEGap4= +go.sia.tech/hostd v1.0.2 h1:GjzNIAlwg3/dViF6258Xn5DI3+otQLRqmkoPDugP+9Y= +go.sia.tech/hostd v1.0.2/go.mod h1:zGw+AGVmazAp4ydvo7bZLNKTy1J51RI6Mp/oxRtYT6c= +go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640 h1:mSaJ622P7T/M97dAK8iPV+IRIC9M5vV28NHeceoWO3M= +go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640/go.mod h1:4QqmBB+t3W7cNplXPj++ZqpoUb2PeiS66RLpXmEGap4= go.sia.tech/mux v1.2.0 h1:ofa1Us9mdymBbGMY2XH/lSpY8itFsKIo/Aq8zwe+GHU= go.sia.tech/mux v1.2.0/go.mod h1:Yyo6wZelOYTyvrHmJZ6aQfRoer3o4xyKQ4NmQLJrBSo= go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca h1:aZMg2AKevn7jKx+wlusWQfwSM5pNU9aGtRZme29q3O4= @@ -301,8 +303,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/internal/node/transactionpool.go b/internal/node/transactionpool.go index b2226bfb5..c5582a757 100644 --- a/internal/node/transactionpool.go +++ b/internal/node/transactionpool.go @@ -15,7 +15,7 @@ type txpool struct { func (tp txpool) RecommendedFee() (fee types.Currency) { _, max := tp.tp.FeeEstimation() - convertToCore(&max, &fee) + convertToCore(&max, (*types.V1Currency)(&fee)) return } diff --git a/stores/wallet.go b/stores/wallet.go index 679e96074..d9bf51c39 100644 --- a/stores/wallet.go +++ b/stores/wallet.go @@ -130,7 +130,7 @@ func (s *SQLStore) processConsensusChangeWallet(cc modules.ConsensusChange) { // Add/Remove siacoin outputs. for _, diff := range cc.SiacoinOutputDiffs { var sco types.SiacoinOutput - convertToCore(diff.SiacoinOutput, &sco) + convertToCore(diff.SiacoinOutput, (*types.V1SiacoinOutput)(&sco)) if sco.Address != s.walletAddress { continue } @@ -166,7 +166,7 @@ func (s *SQLStore) processConsensusChangeWallet(cc modules.ConsensusChange) { continue } var sco types.SiacoinOutput - convertToCore(dsco.SiacoinOutput, &sco) + convertToCore(dsco.SiacoinOutput, (*types.V1SiacoinOutput)(&sco)) s.unappliedTxnChanges = append(s.unappliedTxnChanges, txnChange{ addition: true, txnID: hash256(dsco.ID), // use output id as txn id @@ -213,7 +213,7 @@ func (s *SQLStore) processConsensusChangeWallet(cc modules.ConsensusChange) { for _, diff := range appliedDiff.SiacoinOutputDiffs { if diff.Direction == modules.DiffRevert { var so types.SiacoinOutput - convertToCore(diff.SiacoinOutput, &so) + convertToCore(diff.SiacoinOutput, (*types.V1SiacoinOutput)(&so)) spentOutputs[types.SiacoinOutputID(diff.ID)] = so } } From 0ed4313bcbb3adf1ad124a8e28d7fcfe0c5e1995 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 19 Feb 2024 10:48:40 +0100 Subject: [PATCH 048/172] stores: fix TestPartialSlab --- stores/metadata_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 4a6102399..c0ef54d0f 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -2661,7 +2661,7 @@ func TestPartialSlab(t *testing.T) { if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(obj, fetched.Object) { + if !reflect.DeepEqual(obj, *fetched.Object) { t.Fatal("mismatch", cmp.Diff(obj, fetched.Object, cmp.AllowUnexported(object.EncryptionKey{}))) } @@ -2697,7 +2697,7 @@ func TestPartialSlab(t *testing.T) { if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(obj2, fetched.Object) { + if !reflect.DeepEqual(obj2, *fetched.Object) { t.Fatal("mismatch", cmp.Diff(obj2, fetched.Object)) } @@ -2745,7 +2745,7 @@ func TestPartialSlab(t *testing.T) { if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(obj3, fetched.Object) { + if !reflect.DeepEqual(obj3, *fetched.Object) { t.Fatal("mismatch", cmp.Diff(obj3, fetched.Object, cmp.AllowUnexported(object.EncryptionKey{}))) } From d7a51d2ba59da9eacd4fb149ec1db31e11fe9ea4 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 19 Feb 2024 10:55:19 +0100 Subject: [PATCH 049/172] stores: fix TestSQLMetadataStore --- stores/metadata_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index c0ef54d0f..ee7cd959c 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -1071,7 +1071,7 @@ func TestSQLMetadataStore(t *testing.T) { if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(fullObj.Object, obj1) { + if !reflect.DeepEqual(*fullObj.Object, obj1) { t.Fatal("object mismatch", cmp.Diff(fullObj, obj1)) } @@ -1184,7 +1184,7 @@ func TestSQLMetadataStore(t *testing.T) { if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(fullObj.Object, obj1) { + if !reflect.DeepEqual(*fullObj.Object, obj1) { t.Fatal("object mismatch") } From 3990ece02d492e83945cf150c3d0e0dda0d089f6 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 19 Feb 2024 10:58:01 +0100 Subject: [PATCH 050/172] stores: fix TestObjectBasic --- stores/metadata_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index ee7cd959c..4c7a468b2 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -89,7 +89,7 @@ func TestObjectBasic(t *testing.T) { if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(got.Object, want) { + if !reflect.DeepEqual(*got.Object, want) { t.Fatal("object mismatch", cmp.Diff(got.Object, want)) } @@ -120,7 +120,7 @@ func TestObjectBasic(t *testing.T) { if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(got2.Object, want2) { + if !reflect.DeepEqual(*got2.Object, want2) { t.Fatal("object mismatch", cmp.Diff(got2.Object, want2)) } } From f075c0caa82f7aa46b5bf03e2a7bb2af7ba35b49 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 19 Feb 2024 10:58:42 +0100 Subject: [PATCH 051/172] stores: TestObjectMetadata --- stores/metadata_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 4c7a468b2..d041e590c 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -177,7 +177,7 @@ func TestObjectMetadata(t *testing.T) { } // assert it matches - if !reflect.DeepEqual(got.Object, want) { + if !reflect.DeepEqual(*got.Object, want) { t.Log(got.Object) t.Log(want) t.Fatal("object mismatch", cmp.Diff(got.Object, want, cmp.AllowUnexported(object.EncryptionKey{}))) From 9fcbe97ba9b55854cc1f0e6579004455a2614dda Mon Sep 17 00:00:00 2001 From: PJ Date: Mon, 19 Feb 2024 11:26:17 +0100 Subject: [PATCH 052/172] worker: only upload a single packed slab synchronously --- api/setting.go | 9 +- internal/testing/cluster_test.go | 2 +- internal/testing/pruning_test.go | 3 +- worker/upload.go | 154 ++++++++++++++++--------------- worker/upload_test.go | 2 +- 5 files changed, 89 insertions(+), 81 deletions(-) diff --git a/api/setting.go b/api/setting.go index 348b3a62d..e5fd6da77 100644 --- a/api/setting.go +++ b/api/setting.go @@ -126,11 +126,16 @@ func (rs RedundancySettings) Redundancy() float64 { return float64(rs.TotalShards) / float64(rs.MinShards) } -// SlabSizeNoRedundancy returns the size of a slab without added redundancy. -func (rs RedundancySettings) SlabSizeNoRedundancy() uint64 { +// SlabSize returns the size of a slab. +func (rs RedundancySettings) SlabSize() uint64 { return uint64(rs.MinShards) * rhpv2.SectorSize } +// SlabSizeWithRedundancy returns the size of a slab with redundancy. +func (rs RedundancySettings) SlabSizeWithRedundancy() uint64 { + return uint64(rs.TotalShards) * rhpv2.SectorSize +} + // Validate returns an error if the redundancy settings are not considered // valid. func (rs RedundancySettings) Validate() error { diff --git a/internal/testing/cluster_test.go b/internal/testing/cluster_test.go index d290faf27..26b96f4ba 100644 --- a/internal/testing/cluster_test.go +++ b/internal/testing/cluster_test.go @@ -2349,7 +2349,7 @@ func TestMultipartUploadWrappedByPartialSlabs(t *testing.T) { defer cluster.Shutdown() b := cluster.Bus w := cluster.Worker - slabSize := testRedundancySettings.SlabSizeNoRedundancy() + slabSize := testRedundancySettings.SlabSize() tt := cluster.tt // start a new multipart upload. We upload the parts in reverse order diff --git a/internal/testing/pruning_test.go b/internal/testing/pruning_test.go index 80e6ab29d..333763fa0 100644 --- a/internal/testing/pruning_test.go +++ b/internal/testing/pruning_test.go @@ -9,7 +9,6 @@ import ( "testing" "time" - rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/renterd/api" "go.sia.tech/renterd/hostdb" @@ -207,7 +206,7 @@ func TestSectorPruning(t *testing.T) { tt.Retry(100, 100*time.Millisecond, func() error { res, err = b.PrunableData(context.Background()) tt.OK(err) - if res.TotalPrunable != uint64(math.Ceil(float64(numObjects)/2))*uint64(rs.TotalShards)*rhpv2.SectorSize { + if res.TotalPrunable != uint64(math.Ceil(float64(numObjects)/2))*rs.SlabSizeWithRedundancy() { return fmt.Errorf("unexpected prunable data %v", n) } return nil diff --git a/worker/upload.go b/worker/upload.go index 72c65bf07..5d7c4b706 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -152,6 +152,15 @@ func (w *worker) initUploadManager(maxMemory, maxOverdrive uint64, overdriveTime w.uploadManager = newUploadManager(w.shutdownCtx, w, mm, w.bus, w.bus, maxOverdrive, overdriveTimeout, w.contractLockingDuration, logger) } +func (w *worker) isStopped() bool { + select { + case <-w.shutdownCtx.Done(): + return true + default: + } + return false +} + func (w *worker) upload(ctx context.Context, r io.Reader, contracts []api.ContractMetadata, up uploadParameters, opts ...UploadOption) (_ string, err error) { // apply the options for _, opt := range opts { @@ -177,12 +186,27 @@ func (w *worker) upload(ctx context.Context, r io.Reader, contracts []api.Contra return "", err } - // if packing was enabled try uploading packed slabs - if up.packing { - if err := w.tryUploadPackedSlabs(ctx, up.rs, up.contractSet, bufferSizeLimitReached); err != nil { - w.logger.Errorf("couldn't upload packed slabs, err: %v", err) + // return early if worker was shut down or if we don't have to consider + // packed uploads + if w.isStopped() || !up.packing { + return eTag, nil + } + + // try and upload one slab synchronously + if bufferSizeLimitReached { + mem := w.uploadManager.mm.AcquireMemory(ctx, up.rs.SlabSizeWithRedundancy()) + if mem != nil { + _, err := w.tryUploadPackedSlab(ctx, mem, defaultPackedSlabsLockDuration, up.rs, up.contractSet, lockingPriorityBlockedUpload) + if err != nil { + w.logger.Errorf("couldn't upload packed slabs, err: %v", err) + } + mem.Release() } } + + // make sure there's a goroutine uploading the remainder of the packed slabs + go w.threadedUploadPackedSlabs(up.rs, up.contractSet, lockingPriorityBackgroundUpload) + return eTag, nil } @@ -204,102 +228,82 @@ func (w *worker) threadedUploadPackedSlabs(rs api.RedundancySettings, contractSe w.uploadsMu.Unlock() }() - // keep uploading packed slabs until we're done - for { - uploaded, err := w.uploadPackedSlabs(w.shutdownCtx, defaultPackedSlabsLockDuration, rs, contractSet, lockPriority) - if err != nil { - w.logger.Errorf("couldn't upload packed slabs, err: %v", err) - return - } else if uploaded == 0 { - return - } - } -} - -func (w *worker) tryUploadPackedSlabs(ctx context.Context, rs api.RedundancySettings, contractSet string, block bool) (err error) { - // if we want to block, try and upload one packed slab synchronously, we use - // a slightly higher upload priority to avoid reaching the context deadline - if block { - _, err = w.uploadPackedSlabs(ctx, defaultPackedSlabsLockDuration, rs, contractSet, lockingPriorityBlockedUpload) - } - - // make sure there's a goroutine uploading the remainder of the packed slabs - go w.threadedUploadPackedSlabs(rs, contractSet, lockingPriorityBackgroundUpload) - return -} - -func (w *worker) uploadPackedSlabs(ctx context.Context, lockingDuration time.Duration, rs api.RedundancySettings, contractSet string, lockPriority int) (uploaded int, err error) { // upload packed slabs var mu sync.Mutex var errs error - var wg sync.WaitGroup - totalSize := uint64(rs.TotalShards) * rhpv2.SectorSize - - // derive a context that we can use as an interrupt in case of an error. - interruptCtx, cancel := context.WithCancel(ctx) - defer cancel() + // derive a context that we can use as an interrupt in case of an error or shutdown. + interruptCtx, interruptCancel := context.WithCancel(w.shutdownCtx) + defer interruptCancel() + var wg sync.WaitGroup for { - // block until we have memory for a slab or until we are interrupted - mem := w.uploadManager.mm.AcquireMemory(interruptCtx, totalSize) + // block until we have memory + mem := w.uploadManager.mm.AcquireMemory(interruptCtx, rs.SlabSizeWithRedundancy()) if mem == nil { break // interrupted } - // fetch packed slabs to upload - var packedSlabs []api.PackedSlab - packedSlabs, err = w.bus.PackedSlabsForUpload(ctx, lockingDuration, uint8(rs.MinShards), uint8(rs.TotalShards), contractSet, 1) - if err != nil { - err = fmt.Errorf("couldn't fetch packed slabs from bus: %v", err) - mem.Release() - break - } else if len(packedSlabs) == 0 { - mem.Release() - break // no more slabs - } - ps := packedSlabs[0] - - // launch upload for slab wg.Add(1) - go func(ps api.PackedSlab) { - defer mem.Release() + go func() { defer wg.Done() - err := w.uploadPackedSlab(ctx, rs, ps, mem, contractSet, lockPriority) - mu.Lock() + defer mem.Release() + + // we use the background context here, but apply a sane timeout, + // this ensures ongoing uploads are handled gracefully during + // shutdown + ctx, cancel := context.WithTimeout(context.Background(), defaultPackedSlabsUploadTimeout) + defer cancel() + + // attach interaction recorder to the context + ctx = context.WithValue(ctx, keyInteractionRecorder, w) + + // try to upload a packed slab, if there were no packed slabs left to upload ok is false + ok, err := w.tryUploadPackedSlab(ctx, mem, defaultPackedSlabsLockDuration, rs, contractSet, lockPriority) if err != nil { + mu.Lock() errs = errors.Join(errs, err) - cancel() // prevent new uploads from being launched - } else { - uploaded++ + mu.Unlock() + interruptCancel() // prevent new uploads from being launched + } else if !ok { + interruptCancel() // no more packed slabs to upload + } else if w.isStopped() { + interruptCancel() // worker shut down } - mu.Unlock() - }(ps) + }() } // wait for all threads to finish wg.Wait() // return collected errors - err = errors.Join(err, errs) + if err := errors.Join(errs); err != nil { + w.logger.Errorf("couldn't upload packed slabs, err: %v", err) + } return } -func (w *worker) uploadPackedSlab(ctx context.Context, rs api.RedundancySettings, ps api.PackedSlab, mem Memory, contractSet string, lockPriority int) error { - // create a context with sane timeout - ctx, cancel := context.WithTimeout(ctx, defaultPackedSlabsUploadTimeout) - defer cancel() +func (w *worker) tryUploadPackedSlab(ctx context.Context, mem Memory, lockingDuration time.Duration, rs api.RedundancySettings, contractSet string, lockPriority int) (bool, error) { + // fetch packed slab to upload + packedSlabs, err := w.bus.PackedSlabsForUpload(ctx, lockingDuration, uint8(rs.MinShards), uint8(rs.TotalShards), contractSet, 1) + if err != nil { + err = fmt.Errorf("couldn't fetch packed slabs from bus: %v", err) + return false, err + } else if len(packedSlabs) == 0 { + return false, nil // no more slabs + } + ps := packedSlabs[0] // fetch contracts contracts, err := w.bus.Contracts(ctx, api.ContractsOpts{ContractSet: contractSet}) if err != nil { - return fmt.Errorf("couldn't fetch packed slabs from bus: %v", err) + return false, fmt.Errorf("couldn't fetch packed slabs from bus: %v", err) } // fetch upload params up, err := w.bus.UploadParams(ctx) if err != nil { - return fmt.Errorf("couldn't fetch upload params from bus: %v", err) + return false, fmt.Errorf("couldn't fetch upload params from bus: %v", err) } // attach gouging checker to the context @@ -308,10 +312,10 @@ func (w *worker) uploadPackedSlab(ctx context.Context, rs api.RedundancySettings // upload packed slab err = w.uploadManager.UploadPackedSlab(ctx, rs, ps, mem, contracts, up.CurrentHeight, lockPriority) if err != nil { - return fmt.Errorf("couldn't upload packed slab, err: %v", err) + return false, fmt.Errorf("couldn't upload packed slab, err: %v", err) } - return nil + return true, nil } func newUploadManager(ctx context.Context, hm HostManager, mm MemoryManager, os ObjectStore, cs ContractStore, maxOverdrive uint64, overdriveTimeout time.Duration, contractLockDuration time.Duration, logger *zap.SugaredLogger) *uploadManager { @@ -435,9 +439,9 @@ func (mgr *uploadManager) Upload(ctx context.Context, r io.Reader, contracts []a // channel to notify main thread of the number of slabs to wait for numSlabsChan := make(chan int, 1) - // prepare slab size - size := int64(up.rs.MinShards) * rhpv2.SectorSize - redundantSize := uint64(up.rs.TotalShards) * rhpv2.SectorSize + // prepare slab sizes + slabSize := up.rs.SlabSize() + slabSizeWithRedundancy := up.rs.SlabSizeWithRedundancy() var partialSlab []byte // launch uploads in a separate goroutine @@ -452,14 +456,14 @@ func (mgr *uploadManager) Upload(ctx context.Context, r io.Reader, contracts []a default: } // acquire memory - mem := mgr.mm.AcquireMemory(ctx, redundantSize) + mem := mgr.mm.AcquireMemory(ctx, slabSizeWithRedundancy) if mem == nil { return // interrupted } // read next slab's data - data := make([]byte, size) - length, err := io.ReadFull(io.LimitReader(cr, size), data) + data := make([]byte, slabSize) + length, err := io.ReadFull(io.LimitReader(cr, int64(slabSize)), data) if err == io.EOF { mem.Release() diff --git a/worker/upload_test.go b/worker/upload_test.go index 8d32455bd..9a285efa5 100644 --- a/worker/upload_test.go +++ b/worker/upload_test.go @@ -185,7 +185,7 @@ func TestUploadPackedSlab(t *testing.T) { t.Fatal("expected 1 packed slab") } ps := pss[0] - mem := mm.AcquireMemory(context.Background(), uint64(params.rs.TotalShards*rhpv2.SectorSize)) + mem := mm.AcquireMemory(context.Background(), params.rs.SlabSizeWithRedundancy()) // upload the packed slab err = ul.UploadPackedSlab(context.Background(), params.rs, ps, mem, w.contracts(), 0, lockingPriorityUpload) From 2756576a4012b80fbde57a34bcd5a7675ced39f2 Mon Sep 17 00:00:00 2001 From: PJ Date: Mon, 19 Feb 2024 11:54:19 +0100 Subject: [PATCH 053/172] worker: use background context for unlocking accounts --- worker/rhpv3.go | 179 +++++++++++++++++++++++------------------------- 1 file changed, 86 insertions(+), 93 deletions(-) diff --git a/worker/rhpv3.go b/worker/rhpv3.go index 03f67c6f6..5fbcd3ad6 100644 --- a/worker/rhpv3.go +++ b/worker/rhpv3.go @@ -337,19 +337,17 @@ type ( // accounts stores the balance and other metrics of accounts that the // worker maintains with a host. accounts struct { - as AccountStore - key types.PrivateKey - shutdownCtx context.Context + as AccountStore + key types.PrivateKey } // account contains information regarding a specific account of the // worker. account struct { - as AccountStore - id rhpv3.Account - key types.PrivateKey - host types.PublicKey - shutdownCtx context.Context + as AccountStore + id rhpv3.Account + key types.PrivateKey + host types.PublicKey } ) @@ -358,9 +356,8 @@ func (w *worker) initAccounts(as AccountStore) { panic("accounts already initialized") // developer error } w.accounts = &accounts{ - as: as, - key: w.deriveSubKey("accountkey"), - shutdownCtx: w.shutdownCtx, + as: as, + key: w.deriveSubKey("accountkey"), } } @@ -376,117 +373,113 @@ func (w *worker) initTransportPool() { func (a *accounts) ForHost(hk types.PublicKey) *account { accountID := rhpv3.Account(a.deriveAccountKey(hk).PublicKey()) return &account{ - as: a.as, - id: accountID, - key: a.key, - host: hk, - shutdownCtx: a.shutdownCtx, + as: a.as, + id: accountID, + key: a.key, + host: hk, } } -// WithDeposit increases the balance of an account by the amount returned by -// amtFn if amtFn doesn't return an error. -func (a *account) WithDeposit(ctx context.Context, amtFn func() (types.Currency, error)) error { - _, lockID, err := a.as.LockAccount(ctx, a.id, a.host, false, accountLockingDuration) +func withAccountLock(ctx context.Context, as AccountStore, id rhpv3.Account, hk types.PublicKey, exclusive bool, fn func(a api.Account) error) error { + acc, lockID, err := as.LockAccount(ctx, id, hk, exclusive, accountLockingDuration) if err != nil { return err } + defer func() { - unlockCtx, cancel := context.WithTimeout(a.shutdownCtx, 10*time.Second) - a.as.UnlockAccount(unlockCtx, a.id, lockID) - cancel() + select { + case <-ctx.Done(): + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(context.Background(), time.Minute) + defer cancel() + default: + } + as.UnlockAccount(ctx, acc.ID, lockID) }() - amt, err := amtFn() - if err != nil { - return err - } - return a.as.AddBalance(ctx, a.id, a.host, amt.Big()) + return fn(acc) } -func (a *account) Balance(ctx context.Context) (types.Currency, error) { - account, lockID, err := a.as.LockAccount(ctx, a.id, a.host, false, accountLockingDuration) - if err != nil { - return types.Currency{}, err - } - defer func() { - unlockCtx, cancel := context.WithTimeout(a.shutdownCtx, 10*time.Second) - a.as.UnlockAccount(unlockCtx, a.id, lockID) - cancel() - }() +// Balance returns the account balance. +func (a *account) Balance(ctx context.Context) (balance types.Currency, err error) { + err = withAccountLock(ctx, a.as, a.id, a.host, false, func(account api.Account) error { + balance = types.NewCurrency(account.Balance.Uint64(), new(big.Int).Rsh(account.Balance, 64).Uint64()) + return nil + }) + return +} - return types.NewCurrency(account.Balance.Uint64(), new(big.Int).Rsh(account.Balance, 64).Uint64()), nil +// WithDeposit increases the balance of an account by the amount returned by +// amtFn if amtFn doesn't return an error. +func (a *account) WithDeposit(ctx context.Context, amtFn func() (types.Currency, error)) error { + return withAccountLock(ctx, a.as, a.id, a.host, false, func(_ api.Account) error { + amt, err := amtFn() + if err != nil { + return err + } + return a.as.AddBalance(ctx, a.id, a.host, amt.Big()) + }) +} + +// WithSync syncs an accounts balance with the bus. To do so, the account is +// locked while the balance is fetched through balanceFn. +func (a *account) WithSync(ctx context.Context, balanceFn func() (types.Currency, error)) error { + return withAccountLock(ctx, a.as, a.id, a.host, true, func(_ api.Account) error { + balance, err := balanceFn() + if err != nil { + return err + } + return a.as.SetBalance(ctx, a.id, a.host, balance.Big()) + }) } // WithWithdrawal decreases the balance of an account by the amount returned by // amtFn. The amount is still withdrawn if amtFn returns an error since some // costs are non-refundable. func (a *account) WithWithdrawal(ctx context.Context, amtFn func() (types.Currency, error)) error { - account, lockID, err := a.as.LockAccount(ctx, a.id, a.host, false, accountLockingDuration) - if err != nil { - return err - } - defer func() { - unlockCtx, cancel := context.WithTimeout(a.shutdownCtx, 10*time.Second) - a.as.UnlockAccount(unlockCtx, a.id, lockID) - cancel() - }() + return withAccountLock(ctx, a.as, a.id, a.host, false, func(account api.Account) error { + // return early if the account needs to sync + if account.RequiresSync { + return fmt.Errorf("%w; account requires resync", errBalanceInsufficient) + } - // return early if the account needs to sync - if account.RequiresSync { - return fmt.Errorf("%w; account requires resync", errBalanceInsufficient) - } + // return early if our account is not funded + if account.Balance.Cmp(big.NewInt(0)) <= 0 { + return errBalanceInsufficient + } - // return early if our account is not funded - if account.Balance.Cmp(big.NewInt(0)) <= 0 { - return errBalanceInsufficient - } + // execute amtFn + amt, err := amtFn() + if isBalanceInsufficient(err) { + // in case of an insufficient balance, we schedule a sync + if scheduleErr := a.scheduleSync(); scheduleErr != nil { + err = fmt.Errorf("%w; failed to set requiresSync flag on bus, error: %v", err, scheduleErr) + } + } - // execute amtFn - amt, err := amtFn() - if isBalanceInsufficient(err) { - // in case of an insufficient balance, we schedule a sync - scheduleCtx, cancel := context.WithTimeout(a.shutdownCtx, 10*time.Second) - defer cancel() - err2 := a.as.ScheduleSync(scheduleCtx, a.id, a.host) - if err2 != nil { - err = fmt.Errorf("%w; failed to set requiresSync flag on bus, error: %v", err, err2) + // if an amount was returned, we withdraw it. + if withdrawErr := a.withdrawFromBalance(amt); withdrawErr != nil { + err = fmt.Errorf("%w; failed to withdraw from account, error: %v", err, withdrawErr) } - } - // if the amount is zero, we are done - if amt.IsZero() { return err + }) +} + +func (a *account) withdrawFromBalance(amt types.Currency) error { + if amt.IsZero() { + return nil } - // if an amount was returned, we withdraw it. - addCtx, cancel := context.WithTimeout(a.shutdownCtx, 10*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) defer cancel() - errAdd := a.as.AddBalance(addCtx, a.id, a.host, new(big.Int).Neg(amt.Big())) - if errAdd != nil { - err = fmt.Errorf("%w; failed to add balance to account, error: %v", err, errAdd) - } - return err + return a.as.AddBalance(ctx, a.id, a.host, new(big.Int).Neg(amt.Big())) } -// WithSync syncs an accounts balance with the bus. To do so, the account is -// locked while the balance is fetched through balanceFn. -func (a *account) WithSync(ctx context.Context, balanceFn func() (types.Currency, error)) error { - _, lockID, err := a.as.LockAccount(ctx, a.id, a.host, true, accountLockingDuration) - if err != nil { - return err - } - defer func() { - unlockCtx, cancel := context.WithTimeout(a.shutdownCtx, 10*time.Second) - a.as.UnlockAccount(unlockCtx, a.id, lockID) - cancel() - }() - - balance, err := balanceFn() - if err != nil { - return err - } - return a.as.SetBalance(ctx, a.id, a.host, balance.Big()) +func (a *account) scheduleSync() error { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + return a.as.ScheduleSync(ctx, a.id, a.host) } // deriveAccountKey derives an account plus key for a given host and worker. From 2102b91ec6ba0b8ff36f3534dd4452eccb17aa6e Mon Sep 17 00:00:00 2001 From: PJ Date: Mon, 19 Feb 2024 13:08:32 +0100 Subject: [PATCH 054/172] ci: add Test Stores - MySQL --- .github/workflows/test.yml | 10 ++++++++++ stores/sql_test.go | 29 ++++++++++++++++++++++++++--- 2 files changed, 36 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 8e4c21faf..e8a32e5ec 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -47,6 +47,16 @@ jobs: uses: n8maninger/action-golang-test@v1 with: args: "-race;-short" + - name: Test Stores - MySQL + if: matrix.os == 'ubuntu-latest' + uses: n8maninger/action-golang-test@v1 + env: + RENTERD_DB_URI: 127.0.0.1:3800 + RENTERD_DB_USER: root + RENTERD_DB_PASSWORD: test + with: + package: "./stores" + args: "-race;-short" - name: Test Integration uses: n8maninger/action-golang-test@v1 with: diff --git a/stores/sql_test.go b/stores/sql_test.go index 3a51161ae..ae0c54b0c 100644 --- a/stores/sql_test.go +++ b/stores/sql_test.go @@ -48,6 +48,9 @@ type testSQLStore struct { } type testSQLStoreConfig struct { + dbURI string + dbUser string + dbPassword string dbName string dbMetricsName string dir string @@ -65,9 +68,26 @@ func newTestSQLStore(t *testing.T, cfg testSQLStoreConfig) *testSQLStore { if dir == "" { dir = t.TempDir() } - dbName := cfg.dbName + + dbURI, dbUser, dbPassword, dbName := DBConfigFromEnv() + if dbURI == "" { + dbURI = cfg.dbURI + } + if cfg.persistent && dbURI != "" { + t.Fatal("invalid store config, can't use both persistent and dbURI") + } + if dbUser == "" { + dbUser = cfg.dbUser + } + if dbPassword == "" { + dbPassword = cfg.dbPassword + } if dbName == "" { - dbName = hex.EncodeToString(frand.Bytes(32)) // random name for db + if cfg.dbName != "" { + dbName = cfg.dbName + } else { + dbName = hex.EncodeToString(frand.Bytes(32)) // random name for db + } } dbMetricsName := cfg.dbMetricsName if dbMetricsName == "" { @@ -75,7 +95,10 @@ func newTestSQLStore(t *testing.T, cfg testSQLStoreConfig) *testSQLStore { } var conn, connMetrics gorm.Dialector - if cfg.persistent { + if dbURI != "" { + conn = NewMySQLConnection(dbURI, dbUser, dbPassword, dbName) + connMetrics = NewMySQLConnection(dbURI, dbUser, dbPassword, dbMetricsName) + } else if cfg.persistent { conn = NewSQLiteConnection(filepath.Join(cfg.dir, "db.sqlite")) connMetrics = NewSQLiteConnection(filepath.Join(cfg.dir, "metrics.sqlite")) } else { From d7e0cf5751a84c24ca35ebf84191537cf2a17ff4 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 19 Feb 2024 14:18:45 +0100 Subject: [PATCH 055/172] testing: fix TestS3MultipartUploads --- worker/worker.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/worker/worker.go b/worker/worker.go index 2a2191d2d..b57623b7b 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -1154,7 +1154,7 @@ func (w *worker) multipartUploadHandlerPUT(jc jape.Context) { } // make sure only one of the following is set - if encryptionEnabled := upload.Key != object.NoOpKey; encryptionEnabled && jc.Request.FormValue("offset") == "" { + if encryptionEnabled := !upload.Key.IsNoopKey(); encryptionEnabled && jc.Request.FormValue("offset") == "" { jc.Error(errors.New("if presharding encryption isn't disabled, the offset needs to be set"), http.StatusBadRequest) return } else if encryptionEnabled { From 361c6cf43f6ffb85b86171bf3deedcd805d5ca88 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 19 Feb 2024 16:46:41 +0100 Subject: [PATCH 056/172] stores: add index to object size --- stores/migrations.go | 13 ++++++++++--- .../mysql/main/migration_00003_idx_objects_size.sql | 1 + stores/migrations/mysql/main/schema.sql | 1 + .../main/migration_00003_idx_objects_size.sql | 1 + stores/migrations/sqlite/main/schema.sql | 1 + stores/migrations_metrics.go | 3 ++- 6 files changed, 16 insertions(+), 4 deletions(-) create mode 100644 stores/migrations/mysql/main/migration_00003_idx_objects_size.sql create mode 100644 stores/migrations/sqlite/main/migration_00003_idx_objects_size.sql diff --git a/stores/migrations.go b/stores/migrations.go index 7f225ff17..bf3916ca4 100644 --- a/stores/migrations.go +++ b/stores/migrations.go @@ -16,6 +16,7 @@ var ( ) func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { + dbIdentifier := "main" migrations := []*gormigrate.Migration{ { ID: "00001_init", @@ -24,26 +25,32 @@ func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { { ID: "00001_object_metadata", Migrate: func(tx *gorm.DB) error { - return performMigration(tx, "main", "00001_object_metadata", logger) + return performMigration(tx, dbIdentifier, "00001_object_metadata", logger) }, }, { ID: "00002_prune_slabs_trigger", Migrate: func(tx *gorm.DB) error { - err := performMigration(tx, "main", "00002_prune_slabs_trigger", logger) + err := performMigration(tx, dbIdentifier, "00002_prune_slabs_trigger", logger) if err != nil && strings.Contains(err.Error(), errMySQLNoSuperPrivilege.Error()) { logger.Warn("migration 00002_prune_slabs_trigger requires the user to have the SUPER privilege to register triggers") } return err }, }, + { + ID: "00003_idx_objects_size", + Migrate: func(tx *gorm.DB) error { + return performMigration(tx, dbIdentifier, "00003_idx_objects_size", logger) + }, + }, } // Create migrator. m := gormigrate.New(db, gormigrate.DefaultOptions, migrations) // Set init function. - m.InitSchema(initSchema(db, "main", logger)) + m.InitSchema(initSchema(db, dbIdentifier, logger)) // Perform migrations. if err := m.Migrate(); err != nil { diff --git a/stores/migrations/mysql/main/migration_00003_idx_objects_size.sql b/stores/migrations/mysql/main/migration_00003_idx_objects_size.sql new file mode 100644 index 000000000..0df0b5d58 --- /dev/null +++ b/stores/migrations/mysql/main/migration_00003_idx_objects_size.sql @@ -0,0 +1 @@ +CREATE INDEX `idx_objects_size` ON `objects`(`size`); diff --git a/stores/migrations/mysql/main/schema.sql b/stores/migrations/mysql/main/schema.sql index d28bdd13f..1b39e4669 100644 --- a/stores/migrations/mysql/main/schema.sql +++ b/stores/migrations/mysql/main/schema.sql @@ -330,6 +330,7 @@ CREATE TABLE `objects` ( KEY `idx_objects_object_id` (`object_id`), KEY `idx_objects_health` (`health`), KEY `idx_objects_etag` (`etag`), + KEY `idx_objects_size` (`size`), CONSTRAINT `fk_objects_db_bucket` FOREIGN KEY (`db_bucket_id`) REFERENCES `buckets` (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; diff --git a/stores/migrations/sqlite/main/migration_00003_idx_objects_size.sql b/stores/migrations/sqlite/main/migration_00003_idx_objects_size.sql new file mode 100644 index 000000000..0df0b5d58 --- /dev/null +++ b/stores/migrations/sqlite/main/migration_00003_idx_objects_size.sql @@ -0,0 +1 @@ +CREATE INDEX `idx_objects_size` ON `objects`(`size`); diff --git a/stores/migrations/sqlite/main/schema.sql b/stores/migrations/sqlite/main/schema.sql index daee619b4..e6bb2546d 100644 --- a/stores/migrations/sqlite/main/schema.sql +++ b/stores/migrations/sqlite/main/schema.sql @@ -50,6 +50,7 @@ CREATE INDEX `idx_objects_db_bucket_id` ON `objects`(`db_bucket_id`); CREATE INDEX `idx_objects_etag` ON `objects`(`etag`); CREATE INDEX `idx_objects_health` ON `objects`(`health`); CREATE INDEX `idx_objects_object_id` ON `objects`(`object_id`); +CREATE INDEX `idx_objects_size` ON `objects`(`size`); CREATE UNIQUE INDEX `idx_object_bucket` ON `objects`(`db_bucket_id`,`object_id`); -- dbMultipartUpload diff --git a/stores/migrations_metrics.go b/stores/migrations_metrics.go index 940917569..60c62c476 100644 --- a/stores/migrations_metrics.go +++ b/stores/migrations_metrics.go @@ -9,6 +9,7 @@ import ( ) func performMetricsMigrations(tx *gorm.DB, logger *zap.SugaredLogger) error { + dbIdentifier := "metrics" migrations := []*gormigrate.Migration{ { ID: "00001_init", @@ -20,7 +21,7 @@ func performMetricsMigrations(tx *gorm.DB, logger *zap.SugaredLogger) error { m := gormigrate.New(tx, gormigrate.DefaultOptions, migrations) // Set init function. - m.InitSchema(initSchema(tx, "metrics", logger)) + m.InitSchema(initSchema(tx, dbIdentifier, logger)) // Perform migrations. if err := m.Migrate(); err != nil { From 9bb5972586e6fc26ad0c21758b0c265f8bab76a2 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 20 Feb 2024 09:55:13 +0100 Subject: [PATCH 057/172] stores: migration code --- .../migration_00004_prune_slabs_cascade.sql | 24 +++++++++++ stores/migrations/mysql/main/schema.sql | 40 ++---------------- .../migration_00004_prune_slabs_cascade.sql | 28 +++++++++++++ stores/migrations/sqlite/main/schema.sql | 42 +------------------ 4 files changed, 57 insertions(+), 77 deletions(-) create mode 100644 stores/migrations/mysql/main/migration_00004_prune_slabs_cascade.sql create mode 100644 stores/migrations/sqlite/main/migration_00004_prune_slabs_cascade.sql diff --git a/stores/migrations/mysql/main/migration_00004_prune_slabs_cascade.sql b/stores/migrations/mysql/main/migration_00004_prune_slabs_cascade.sql new file mode 100644 index 000000000..48f98a40a --- /dev/null +++ b/stores/migrations/mysql/main/migration_00004_prune_slabs_cascade.sql @@ -0,0 +1,24 @@ +-- prune manually before creating trigger +DELETE slabs +FROM slabs +LEFT JOIN slices ON slices.db_slab_id = slabs.id +WHERE slices.db_object_id IS NULL +AND slices.db_multipart_part_id IS NULL +AND slabs.db_buffered_slab_id IS NULL; + +-- add ON DELETE CASCADE to slices +ALTER TABLE slices DROP FOREIGN KEY fk_objects_slabs; +ALTER TABLE slices ADD CONSTRAINT fk_objects_slabs FOREIGN KEY (db_object_id) REFERENCES objects (id) ON DELETE CASCADE, + +ALTER TABLE slices DROP FOREIGN KEY fk_multipart_parts_slabs; +ALTER TABLE slices ADD CONSTRAINT fk_multipart_parts_slabs FOREIGN KEY (db_multipart_part_id) REFERENCES multipart_parts (id) ON DELETE CASCADE, + +-- add ON DELETE CASCADE to multipart_parts +ALTER TABLE multipart_parts DROP FOREIGN KEY fk_multipart_uploads_parts; +ALTER TABLE multipart_parts ADD CONSTRAINT fk_multipart_uploads_parts FOREIGN KEY (db_multipart_upload_id) REFERENCES multipart_uploads (id) ON DELETE CASCADE + +-- drop triggers +DROP TRIGGER before_delete_on_objects_delete_slices +DROP TRIGGER before_delete_on_multipart_uploads_delete_multipart_parts +DROP TRIGGER before_delete_on_multipart_parts_delete_slices +DROP TRIGGER after_delete_on_slices_delete_slabs \ No newline at end of file diff --git a/stores/migrations/mysql/main/schema.sql b/stores/migrations/mysql/main/schema.sql index 1b39e4669..a5ed86807 100644 --- a/stores/migrations/mysql/main/schema.sql +++ b/stores/migrations/mysql/main/schema.sql @@ -310,7 +310,7 @@ CREATE TABLE `multipart_parts` ( KEY `idx_multipart_parts_etag` (`etag`), KEY `idx_multipart_parts_part_number` (`part_number`), KEY `idx_multipart_parts_db_multipart_upload_id` (`db_multipart_upload_id`), - CONSTRAINT `fk_multipart_uploads_parts` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads` (`id`) + CONSTRAINT `fk_multipart_uploads_parts` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads` (`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -- dbObject @@ -374,8 +374,8 @@ CREATE TABLE `slices` ( KEY `idx_slices_object_index` (`object_index`), KEY `idx_slices_db_multipart_part_id` (`db_multipart_part_id`), KEY `idx_slices_db_slab_id` (`db_slab_id`), - CONSTRAINT `fk_multipart_parts_slabs` FOREIGN KEY (`db_multipart_part_id`) REFERENCES `multipart_parts` (`id`), - CONSTRAINT `fk_objects_slabs` FOREIGN KEY (`db_object_id`) REFERENCES `objects` (`id`), + CONSTRAINT `fk_multipart_parts_slabs` FOREIGN KEY (`db_multipart_part_id`) REFERENCES `multipart_parts` (`id`) ON DELETE CASCADE, + CONSTRAINT `fk_objects_slabs` FOREIGN KEY (`db_object_id`) REFERENCES `objects` (`id`) ON DELETE CASCADE, CONSTRAINT `fk_slabs_slices` FOREIGN KEY (`db_slab_id`) REFERENCES `slabs` (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; @@ -421,39 +421,5 @@ CREATE TABLE `object_user_metadata` ( CONSTRAINT `fk_multipart_upload_user_metadata` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads` (`id`) ON DELETE SET NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; --- dbObject trigger to delete from slices -CREATE TRIGGER before_delete_on_objects_delete_slices -BEFORE DELETE -ON objects FOR EACH ROW -DELETE FROM slices -WHERE slices.db_object_id = OLD.id; - --- dbMultipartUpload trigger to delete from dbMultipartPart -CREATE TRIGGER before_delete_on_multipart_uploads_delete_multipart_parts -BEFORE DELETE -ON multipart_uploads FOR EACH ROW -DELETE FROM multipart_parts -WHERE multipart_parts.db_multipart_upload_id = OLD.id; - --- dbMultipartPart trigger to delete from slices -CREATE TRIGGER before_delete_on_multipart_parts_delete_slices -BEFORE DELETE -ON multipart_parts FOR EACH ROW -DELETE FROM slices -WHERE slices.db_multipart_part_id = OLD.id; - --- dbSlices trigger to prune slabs -CREATE TRIGGER after_delete_on_slices_delete_slabs -AFTER DELETE -ON slices FOR EACH ROW -DELETE FROM slabs -WHERE slabs.id = OLD.db_slab_id -AND slabs.db_buffered_slab_id IS NULL -AND NOT EXISTS ( - SELECT 1 - FROM slices - WHERE slices.db_slab_id = OLD.db_slab_id -); - -- create default bucket INSERT INTO buckets (created_at, name) VALUES (CURRENT_TIMESTAMP, 'default'); \ No newline at end of file diff --git a/stores/migrations/sqlite/main/migration_00004_prune_slabs_cascade.sql b/stores/migrations/sqlite/main/migration_00004_prune_slabs_cascade.sql new file mode 100644 index 000000000..2cd633c11 --- /dev/null +++ b/stores/migrations/sqlite/main/migration_00004_prune_slabs_cascade.sql @@ -0,0 +1,28 @@ +PRAGMA foreign_keys=off; +-- update constraints on slices +CREATE TABLE `slices_temp` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_object_id` integer,`object_index` integer,`db_multipart_part_id` integer,`db_slab_id` integer,`offset` integer,`length` integer,CONSTRAINT `fk_objects_slabs` FOREIGN KEY (`db_object_id`) REFERENCES `objects`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_multipart_parts_slabs` FOREIGN KEY (`db_multipart_part_id`) REFERENCES `multipart_parts`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_slabs_slices` FOREIGN KEY (`db_slab_id`) REFERENCES `slabs`(`id`)); +INSERT INTO slices_temp SELECT `id`, `created_at`, `db_object_id`, `object_index`, `db_multipart_part_id`, `db_slab_id`, `offset`, `length` FROM slices; +DROP TABLE slices; +ALTER TABLE slices_temp RENAME TO slices; + +CREATE INDEX `idx_slices_object_index` ON `slices`(`object_index`); +CREATE INDEX `idx_slices_db_object_id` ON `slices`(`db_object_id`); +CREATE INDEX `idx_slices_db_slab_id` ON `slices`(`db_slab_id`); +CREATE INDEX `idx_slices_db_multipart_part_id` ON `slices`(`db_multipart_part_id`); + +-- update constraints multipart_parts +CREATE TABLE `multipart_parts_temp` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`etag` text,`part_number` integer,`size` integer,`db_multipart_upload_id` integer NOT NULL,CONSTRAINT `fk_multipart_uploads_parts` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads`(`id`) ON DELETE CASCADE); +INSERT INTO multipart_parts_temp SELECT * FROM multipart_parts; +DROP TABLE multipart_parts; +ALTER TABLE multipart_parts_temp RENAME TO multipart_parts; + +CREATE INDEX `idx_multipart_parts_db_multipart_upload_id` ON `multipart_parts`(`db_multipart_upload_id`); +CREATE INDEX `idx_multipart_parts_part_number` ON `multipart_parts`(`part_number`); +CREATE INDEX `idx_multipart_parts_etag` ON `multipart_parts`(`etag`); +PRAGMA foreign_keys=on; + +-- drop triggers +DROP TRIGGER before_delete_on_objects_delete_slices +DROP TRIGGER before_delete_on_multipart_uploads_delete_multipart_parts +DROP TRIGGER before_delete_on_multipart_parts_delete_slices +DROP TRIGGER after_delete_on_slices_delete_slabs \ No newline at end of file diff --git a/stores/migrations/sqlite/main/schema.sql b/stores/migrations/sqlite/main/schema.sql index e6bb2546d..8d7afeaa1 100644 --- a/stores/migrations/sqlite/main/schema.sql +++ b/stores/migrations/sqlite/main/schema.sql @@ -85,13 +85,13 @@ CREATE INDEX `idx_contract_sectors_db_contract_id` ON `contract_sectors`(`db_con CREATE INDEX `idx_contract_sectors_db_sector_id` ON `contract_sectors`(`db_sector_id`); -- dbMultipartPart -CREATE TABLE `multipart_parts` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`etag` text,`part_number` integer,`size` integer,`db_multipart_upload_id` integer NOT NULL,CONSTRAINT `fk_multipart_uploads_parts` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads`(`id`)); +CREATE TABLE `multipart_parts` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`etag` text,`part_number` integer,`size` integer,`db_multipart_upload_id` integer NOT NULL,CONSTRAINT `fk_multipart_uploads_parts` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads`(`id`) ON DELETE CASCADE); CREATE INDEX `idx_multipart_parts_db_multipart_upload_id` ON `multipart_parts`(`db_multipart_upload_id`); CREATE INDEX `idx_multipart_parts_part_number` ON `multipart_parts`(`part_number`); CREATE INDEX `idx_multipart_parts_etag` ON `multipart_parts`(`etag`); -- dbSlice -CREATE TABLE `slices` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_object_id` integer,`object_index` integer,`db_multipart_part_id` integer,`db_slab_id` integer,`offset` integer,`length` integer,CONSTRAINT `fk_objects_slabs` FOREIGN KEY (`db_object_id`) REFERENCES `objects`(`id`),CONSTRAINT `fk_multipart_parts_slabs` FOREIGN KEY (`db_multipart_part_id`) REFERENCES `multipart_parts`(`id`),CONSTRAINT `fk_slabs_slices` FOREIGN KEY (`db_slab_id`) REFERENCES `slabs`(`id`)); +CREATE TABLE `slices` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_object_id` integer,`object_index` integer,`db_multipart_part_id` integer,`db_slab_id` integer,`offset` integer,`length` integer,CONSTRAINT `fk_objects_slabs` FOREIGN KEY (`db_object_id`) REFERENCES `objects`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_multipart_parts_slabs` FOREIGN KEY (`db_multipart_part_id`) REFERENCES `multipart_parts`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_slabs_slices` FOREIGN KEY (`db_slab_id`) REFERENCES `slabs`(`id`)); CREATE INDEX `idx_slices_object_index` ON `slices`(`object_index`); CREATE INDEX `idx_slices_db_object_id` ON `slices`(`db_object_id`); CREATE INDEX `idx_slices_db_slab_id` ON `slices`(`db_slab_id`); @@ -148,43 +148,5 @@ CREATE UNIQUE INDEX `idx_module_event_url` ON `webhooks`(`module`,`event`,`url`) CREATE TABLE `object_user_metadata` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_object_id` integer DEFAULT NULL,`db_multipart_upload_id` integer DEFAULT NULL,`key` text NOT NULL,`value` text, CONSTRAINT `fk_object_user_metadata` FOREIGN KEY (`db_object_id`) REFERENCES `objects` (`id`) ON DELETE CASCADE, CONSTRAINT `fk_multipart_upload_user_metadata` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads` (`id`) ON DELETE SET NULL); CREATE UNIQUE INDEX `idx_object_user_metadata_key` ON `object_user_metadata`(`db_object_id`,`db_multipart_upload_id`,`key`); --- dbObject trigger to delete from slices -CREATE TRIGGER before_delete_on_objects_delete_slices -BEFORE DELETE ON objects -BEGIN - DELETE FROM slices - WHERE slices.db_object_id = OLD.id; -END; - --- dbMultipartUpload trigger to delete from dbMultipartPart -CREATE TRIGGER before_delete_on_multipart_uploads_delete_multipart_parts -BEFORE DELETE ON multipart_uploads -BEGIN - DELETE FROM multipart_parts - WHERE multipart_parts.db_multipart_upload_id = OLD.id; -END; - --- dbMultipartPart trigger to delete from slices -CREATE TRIGGER before_delete_on_multipart_parts_delete_slices -BEFORE DELETE ON multipart_parts -BEGIN - DELETE FROM slices - WHERE slices.db_multipart_part_id = OLD.id; -END; - --- dbSlices trigger to prune slabs -CREATE TRIGGER after_delete_on_slices_delete_slabs -AFTER DELETE ON slices -BEGIN - DELETE FROM slabs - WHERE slabs.id = OLD.db_slab_id - AND slabs.db_buffered_slab_id IS NULL - AND NOT EXISTS ( - SELECT 1 - FROM slices - WHERE slices.db_slab_id = OLD.db_slab_id - ); -END; - -- create default bucket INSERT INTO buckets (created_at, name) VALUES (CURRENT_TIMESTAMP, 'default'); From 1f004223a11b0c4897438db98584d81460365e7e Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 20 Feb 2024 10:06:21 +0100 Subject: [PATCH 058/172] stores: call pruneSlabs every time an object or multipart object were deleted --- stores/metadata.go | 23 ++++++++++++++++++++--- stores/multipart.go | 9 +++++++++ 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 68947ed95..a9636bf75 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -2685,20 +2685,32 @@ func archiveContracts(ctx context.Context, tx *gorm.DB, contracts []dbContract, return nil } +func pruneSlabs(tx *gorm.DB) error { + // delete slabs without any associated slices or buffers + return tx.Exec(` +DELETE +FROM slabs sla +WHERE NOT EXISTS (SELECT 1 FROM slices sli WHERE sli.db_slab_id = sla.id) +AND sla.db_buffered_slab_id IS NULL +`).Error +} + // deleteObject deletes an object from the store and prunes all slabs which are // without an obect after the deletion. That means in case of packed uploads, // the slab is only deleted when no more objects point to it. -func (s *SQLStore) deleteObject(tx *gorm.DB, bucket string, path string) (numDeleted int64, _ error) { +func (s *SQLStore) deleteObject(tx *gorm.DB, bucket string, path string) (int64, error) { tx = tx.Where("object_id = ? AND ?", path, sqlWhereBucket("objects", bucket)). Delete(&dbObject{}) if tx.Error != nil { return 0, tx.Error } - numDeleted = tx.RowsAffected + numDeleted := tx.RowsAffected if numDeleted == 0 { return 0, nil // nothing to prune if no object was deleted + } else if err := pruneSlabs(tx); err != nil { + return 0, err } - return + return numDeleted, nil } // deleteObjects deletes a batch of objects from the database. The order of @@ -2729,6 +2741,11 @@ func (s *SQLStore) deleteObjects(bucket string, path string) (numDeleted int64, } duration = time.Since(start) rowsAffected = res.RowsAffected + + // prune slabs if we deleted an object + if rowsAffected > 0 { + return pruneSlabs(tx) + } return nil }); err != nil { return 0, fmt.Errorf("failed to delete objects: %w", err) diff --git a/stores/multipart.go b/stores/multipart.go index 18706ed0c..3a5bcd54a 100644 --- a/stores/multipart.go +++ b/stores/multipart.go @@ -295,6 +295,10 @@ func (s *SQLStore) AbortMultipartUpload(ctx context.Context, bucket, path string if err != nil { return fmt.Errorf("failed to delete multipart upload: %w", err) } + // Prune the slabs. + if err := pruneSlabs(tx); err != nil { + return fmt.Errorf("failed to prune slabs: %w", err) + } return nil }) } @@ -435,6 +439,11 @@ func (s *SQLStore) CompleteMultipartUpload(ctx context.Context, bucket, path str if err := tx.Delete(&mu).Error; err != nil { return fmt.Errorf("failed to delete multipart upload: %w", err) } + + // Prune the slabs. + if err := pruneSlabs(tx); err != nil { + return fmt.Errorf("failed to prune slabs: %w", err) + } return nil }) if err != nil { From 671f8da872b0decaf217bd4e4d0119d974b1160d Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 20 Feb 2024 10:19:05 +0100 Subject: [PATCH 059/172] stores: add migration to performMigrations --- stores/metadata.go | 6 +++--- stores/migrations.go | 6 ++++++ 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index a9636bf75..13f274477 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -2689,9 +2689,9 @@ func pruneSlabs(tx *gorm.DB) error { // delete slabs without any associated slices or buffers return tx.Exec(` DELETE -FROM slabs sla -WHERE NOT EXISTS (SELECT 1 FROM slices sli WHERE sli.db_slab_id = sla.id) -AND sla.db_buffered_slab_id IS NULL +FROM slabs +WHERE NOT EXISTS (SELECT 1 FROM slices WHERE slices.db_slab_id = slabs.id) +AND slabs.db_buffered_slab_id IS NULL `).Error } diff --git a/stores/migrations.go b/stores/migrations.go index bf3916ca4..d89be7ab5 100644 --- a/stores/migrations.go +++ b/stores/migrations.go @@ -44,6 +44,12 @@ func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { return performMigration(tx, dbIdentifier, "00003_idx_objects_size", logger) }, }, + { + ID: "00004_prune_slabs_cascade", + Migrate: func(tx *gorm.DB) error { + return performMigration(tx, dbIdentifier, "00004_prune_slabs_cascade", logger) + }, + }, } // Create migrator. From 2fbcc26ca387a203e49cf16c6d299b8f0692b9f9 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 20 Feb 2024 10:51:30 +0100 Subject: [PATCH 060/172] worker: use different context in scanHost --- worker/host.go | 14 ++--- worker/interactions.go | 119 ----------------------------------------- worker/worker.go | 46 +++++++++------- 3 files changed, 33 insertions(+), 146 deletions(-) diff --git a/worker/host.go b/worker/host.go index 86e92ce27..43e0891af 100644 --- a/worker/host.go +++ b/worker/host.go @@ -52,7 +52,6 @@ type ( acc *account bus Bus contractSpendingRecorder ContractSpendingRecorder - interactionRecorder HostInteractionRecorder logger *zap.SugaredLogger transportPool *transportPoolV3 priceTables *priceTables @@ -70,7 +69,6 @@ func (w *worker) Host(hk types.PublicKey, fcid types.FileContractID, siamuxAddr acc: w.accounts.ForHost(hk), bus: w.bus, contractSpendingRecorder: w.contractSpendingRecorder, - interactionRecorder: w.hostInteractionRecorder, logger: w.logger.Named(hk.String()[:4]), fcid: fcid, siamuxAddr: siamuxAddr, @@ -198,11 +196,13 @@ func (h *host) FetchPriceTable(ctx context.Context, rev *types.FileContractRevis fetchPT := func(paymentFn PriceTablePaymentFunc) (hpt hostdb.HostPriceTable, err error) { err = h.transportPool.withTransportV3(ctx, h.hk, h.siamuxAddr, func(ctx context.Context, t *transportV3) (err error) { hpt, err = RPCPriceTable(ctx, t, paymentFn) - h.interactionRecorder.RecordPriceTableUpdate(hostdb.PriceTableUpdate{ - HostKey: h.hk, - Success: isSuccessfulInteraction(err), - Timestamp: time.Now(), - PriceTable: hpt, + h.bus.RecordPriceTables(ctx, []hostdb.PriceTableUpdate{ + { + HostKey: h.hk, + Success: isSuccessfulInteraction(err), + Timestamp: time.Now(), + PriceTable: hpt, + }, }) return }) diff --git a/worker/interactions.go b/worker/interactions.go index dfe8c4017..2107ae582 100644 --- a/worker/interactions.go +++ b/worker/interactions.go @@ -1,135 +1,16 @@ package worker import ( - "context" - "fmt" - "sync" - "time" - "go.sia.tech/renterd/hostdb" - "go.uber.org/zap" -) - -const ( - keyInteractionRecorder contextKey = "InteractionRecorder" ) type ( HostInteractionRecorder interface { RecordHostScan(...hostdb.HostScan) RecordPriceTableUpdate(...hostdb.PriceTableUpdate) - Stop(context.Context) - } - - hostInteractionRecorder struct { - flushInterval time.Duration - - bus Bus - logger *zap.SugaredLogger - - mu sync.Mutex - hostScans []hostdb.HostScan - priceTableUpdates []hostdb.PriceTableUpdate - - flushCtx context.Context - flushTimer *time.Timer } ) -var ( - _ HostInteractionRecorder = (*hostInteractionRecorder)(nil) -) - -func (w *worker) initHostInteractionRecorder(flushInterval time.Duration) { - if w.hostInteractionRecorder != nil { - panic("HostInteractionRecorder already initialized") // developer error - } - w.hostInteractionRecorder = &hostInteractionRecorder{ - bus: w.bus, - logger: w.logger, - - flushCtx: w.shutdownCtx, - flushInterval: flushInterval, - - hostScans: make([]hostdb.HostScan, 0), - priceTableUpdates: make([]hostdb.PriceTableUpdate, 0), - } -} - -func (r *hostInteractionRecorder) RecordHostScan(scans ...hostdb.HostScan) { - r.mu.Lock() - defer r.mu.Unlock() - r.hostScans = append(r.hostScans, scans...) - r.tryFlushInteractionsBuffer() -} - -func (r *hostInteractionRecorder) RecordPriceTableUpdate(ptUpdates ...hostdb.PriceTableUpdate) { - r.mu.Lock() - defer r.mu.Unlock() - r.priceTableUpdates = append(r.priceTableUpdates, ptUpdates...) - r.tryFlushInteractionsBuffer() -} - -func (r *hostInteractionRecorder) Stop(ctx context.Context) { - // stop the flush timer - r.mu.Lock() - if r.flushTimer != nil { - r.flushTimer.Stop() - } - r.flushCtx = ctx - r.mu.Unlock() - - // flush all interactions - r.flush() - - // log if we weren't able to flush them - r.mu.Lock() - if len(r.hostScans) > 0 { - r.logger.Errorw(fmt.Sprintf("failed to record %d host scans on worker shutdown", len(r.hostScans))) - } - if len(r.priceTableUpdates) > 0 { - r.logger.Errorw(fmt.Sprintf("failed to record %d price table updates on worker shutdown", len(r.priceTableUpdates))) - } - r.mu.Unlock() -} - -func (r *hostInteractionRecorder) flush() { - r.mu.Lock() - defer r.mu.Unlock() - - // NOTE: don't bother flushing if the context is cancelled, we can safely - // ignore the buffered scans and price tables since we'll flush on shutdown - // and log in case we weren't able to flush all interactions to the bus - select { - case <-r.flushCtx.Done(): - r.flushTimer = nil - return - default: - } - - if len(r.hostScans) > 0 { - if err := r.bus.RecordHostScans(r.flushCtx, r.hostScans); err != nil { - r.logger.Errorw(fmt.Sprintf("failed to record scans: %v", err)) - } else if err == nil { - r.hostScans = nil - } - } - if len(r.priceTableUpdates) > 0 { - if err := r.bus.RecordPriceTables(r.flushCtx, r.priceTableUpdates); err != nil { - r.logger.Errorw(fmt.Sprintf("failed to record price table updates: %v", err)) - } else if err == nil { - r.priceTableUpdates = nil - } - } - r.flushTimer = nil -} - -func (r *hostInteractionRecorder) tryFlushInteractionsBuffer() { - if r.flushTimer == nil { - r.flushTimer = time.AfterFunc(r.flushInterval, r.flush) - } -} - func isSuccessfulInteraction(err error) bool { // No error always means success. if err == nil { diff --git a/worker/worker.go b/worker/worker.go index 8a2a9b1f3..d01d57323 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -200,7 +200,6 @@ type worker struct { uploadsMu sync.Mutex uploadingPackedSlabs map[string]bool - hostInteractionRecorder HostInteractionRecorder contractSpendingRecorder ContractSpendingRecorder contractLockingDuration time.Duration @@ -342,11 +341,13 @@ func (w *worker) rhpPriceTableHandler(jc jape.Context) { var err error var hpt hostdb.HostPriceTable defer func() { - w.hostInteractionRecorder.RecordPriceTableUpdate(hostdb.PriceTableUpdate{ - HostKey: rptr.HostKey, - Success: isSuccessfulInteraction(err), - Timestamp: time.Now(), - PriceTable: hpt, + w.bus.RecordPriceTables(ctx, []hostdb.PriceTableUpdate{ + { + HostKey: rptr.HostKey, + Success: isSuccessfulInteraction(err), + Timestamp: time.Now(), + PriceTable: hpt, + }, }) }() @@ -1292,6 +1293,7 @@ func New(masterKey [32]byte, id string, b Bus, contractLockingDuration, busFlush return nil, errors.New("uploadMaxMemory cannot be 0") } + ctx, cancel := context.WithCancel(context.Background()) w := &worker{ alerts: alerts.WithOrigin(b, fmt.Sprintf("worker.%s", id)), allowPrivateIPs: allowPrivateIPs, @@ -1302,13 +1304,10 @@ func New(masterKey [32]byte, id string, b Bus, contractLockingDuration, busFlush logger: l.Sugar().Named("worker").Named(id), startTime: time.Now(), uploadingPackedSlabs: make(map[string]bool), + shutdownCtx: ctx, + shutdownCtxCancel: cancel, } - ctx, cancel := context.WithCancel(context.Background()) - ctx = context.WithValue(ctx, keyInteractionRecorder, w) - w.shutdownCtx = ctx - w.shutdownCtxCancel = cancel - w.initAccounts(b) w.initPriceTables() w.initTransportPool() @@ -1317,7 +1316,6 @@ func New(masterKey [32]byte, id string, b Bus, contractLockingDuration, busFlush w.initUploadManager(uploadMaxMemory, uploadMaxOverdrive, uploadOverdriveTimeout, l.Sugar().Named("uploadmanager")) w.initContractSpendingRecorder(busFlushInterval) - w.initHostInteractionRecorder(busFlushInterval) return w, nil } @@ -1364,7 +1362,6 @@ func (w *worker) Shutdown(ctx context.Context) error { w.uploadManager.Stop() // stop recorders - w.hostInteractionRecorder.Stop(ctx) w.contractSpendingRecorder.Stop(ctx) return nil } @@ -1441,14 +1438,23 @@ func (w *worker) scanHost(ctx context.Context, hostKey types.PublicKey, hostIP s default: } - // record host scan - w.hostInteractionRecorder.RecordHostScan(hostdb.HostScan{ - HostKey: hostKey, - Success: isSuccessfulInteraction(err), - Timestamp: time.Now(), - Settings: settings, - PriceTable: pt, + // record host scan - make sure this isn't interrupted by the same context + // used to time out the scan itself because otherwise we won't be able to + // record scans that timed out. + recordCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + scanErr := w.bus.RecordHostScans(recordCtx, []hostdb.HostScan{ + { + HostKey: hostKey, + Success: isSuccessfulInteraction(err), + Timestamp: time.Now(), + Settings: settings, + PriceTable: pt, + }, }) + if scanErr != nil { + w.logger.Errorf("failed to record host scan: %v", scanErr) + } return settings, pt, duration, err } From 0a23dea48598451ab56cb5b836a4f6a3cf4c8240 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 20 Feb 2024 10:57:09 +0100 Subject: [PATCH 061/172] stores: fix TestSlabCleanupTrigger --- stores/metadata.go | 5 ++--- stores/metadata_test.go | 9 ++++++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 13f274477..2ab2cf5da 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -2739,13 +2739,12 @@ func (s *SQLStore) deleteObjects(bucket string, path string) (numDeleted int64, if err := res.Error; err != nil { return res.Error } - duration = time.Since(start) - rowsAffected = res.RowsAffected - // prune slabs if we deleted an object + rowsAffected = res.RowsAffected if rowsAffected > 0 { return pruneSlabs(tx) } + duration = time.Since(start) return nil }); err != nil { return 0, fmt.Errorf("failed to delete objects: %w", err) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 4a6102399..d5a0f9531 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -3941,7 +3941,8 @@ func TestSlabCleanupTrigger(t *testing.T) { } // delete the object - if err := ss.db.Delete(&obj1).Error; err != nil { + err := ss.RemoveObject(context.Background(), api.DefaultBucketName, obj1.ObjectID) + if err != nil { t.Fatal(err) } @@ -3954,7 +3955,8 @@ func TestSlabCleanupTrigger(t *testing.T) { } // delete second object - if err := ss.db.Delete(&obj2).Error; err != nil { + err = ss.RemoveObject(context.Background(), api.DefaultBucketName, obj2.ObjectID) + if err != nil { t.Fatal(err) } @@ -3998,7 +4000,8 @@ func TestSlabCleanupTrigger(t *testing.T) { } // delete third object - if err := ss.db.Delete(&obj3).Error; err != nil { + err = ss.RemoveObject(context.Background(), api.DefaultBucketName, obj3.ObjectID) + if err != nil { t.Fatal(err) } if err := ss.db.Model(&dbSlab{}).Count(&slabCntr).Error; err != nil { From 50c201aa65066abef4984dce47ee87484945d42c Mon Sep 17 00:00:00 2001 From: PJ Date: Mon, 19 Feb 2024 14:42:38 +0100 Subject: [PATCH 062/172] stores: adapt unit tests to MySQL --- .github/workflows/test.yml | 64 +++++++++--------- stores/hostdb_test.go | 41 ++++++------ stores/metadata.go | 4 ++ stores/metadata_test.go | 129 +++++++++++++++++++++++-------------- stores/metrics.go | 4 +- stores/metrics_test.go | 2 +- stores/sql_test.go | 81 +++++++++++++++++------ 7 files changed, 200 insertions(+), 125 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index e8a32e5ec..b96eddebe 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -30,23 +30,23 @@ jobs: uses: actions/setup-go@v3 with: go-version: ${{ matrix.go-version }} - - name: Lint - uses: golangci/golangci-lint-action@v3 - with: - args: --timeout=30m - - name: Jape Analyzer - uses: SiaFoundation/action-golang-analysis@HEAD - with: - analyzers: | - go.sia.tech/jape.Analyzer@master - directories: | - autopilot - bus bus/client - worker worker/client - - name: Test - uses: n8maninger/action-golang-test@v1 - with: - args: "-race;-short" + # - name: Lint + # uses: golangci/golangci-lint-action@v3 + # with: + # args: --timeout=30m + # - name: Jape Analyzer + # uses: SiaFoundation/action-golang-analysis@HEAD + # with: + # analyzers: | + # go.sia.tech/jape.Analyzer@master + # directories: | + # autopilot + # bus bus/client + # worker worker/client + # - name: Test + # uses: n8maninger/action-golang-test@v1 + # with: + # args: "-race;-short" - name: Test Stores - MySQL if: matrix.os == 'ubuntu-latest' uses: n8maninger/action-golang-test@v1 @@ -57,20 +57,20 @@ jobs: with: package: "./stores" args: "-race;-short" - - name: Test Integration - uses: n8maninger/action-golang-test@v1 - with: - package: "./internal/testing/..." - args: "-failfast;-race;-tags=testing;-timeout=30m" - - name: Test Integration - MySQL - if: matrix.os == 'ubuntu-latest' - uses: n8maninger/action-golang-test@v1 - env: - RENTERD_DB_URI: 127.0.0.1:3800 - RENTERD_DB_USER: root - RENTERD_DB_PASSWORD: test - with: - package: "./internal/testing/..." - args: "-failfast;-race;-tags=testing;-timeout=30m" + # - name: Test Integration + # uses: n8maninger/action-golang-test@v1 + # with: + # package: "./internal/testing/..." + # args: "-failfast;-race;-tags=testing;-timeout=30m" + # - name: Test Integration - MySQL + # if: matrix.os == 'ubuntu-latest' + # uses: n8maninger/action-golang-test@v1 + # env: + # RENTERD_DB_URI: 127.0.0.1:3800 + # RENTERD_DB_USER: root + # RENTERD_DB_PASSWORD: test + # with: + # package: "./internal/testing/..." + # args: "-failfast;-race;-tags=testing;-timeout=30m" - name: Build run: go build -o bin/ ./cmd/renterd diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index a61f9eea3..35872ea2d 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -63,15 +63,8 @@ func TestSQLHostDB(t *testing.T) { // Insert an announcement for the host and another one for an unknown // host. - a := hostdb.Announcement{ - Index: types.ChainIndex{ - Height: 42, - ID: types.BlockID{1, 2, 3}, - }, - Timestamp: time.Now().UTC().Round(time.Second), - NetAddress: "address", - } - err = ss.insertTestAnnouncement(hk, a) + ann := newTestHostDBAnnouncement("address") + err = ss.insertTestAnnouncement(hk, ann) if err != nil { t.Fatal(err) } @@ -79,7 +72,7 @@ func TestSQLHostDB(t *testing.T) { // Read the host and verify that the announcement related fields were // set. var h dbHost - tx := ss.db.Where("last_announcement = ? AND net_address = ?", a.Timestamp, a.NetAddress).Find(&h) + tx := ss.db.Where("last_announcement = ? AND net_address = ?", ann.Timestamp, ann.NetAddress).Find(&h) if tx.Error != nil { t.Fatal(tx.Error) } @@ -116,7 +109,7 @@ func TestSQLHostDB(t *testing.T) { // Insert another announcement for an unknown host. unknownKey := types.PublicKey{1, 4, 7} - err = ss.insertTestAnnouncement(unknownKey, a) + err = ss.insertTestAnnouncement(unknownKey, ann) if err != nil { t.Fatal(err) } @@ -124,7 +117,7 @@ func TestSQLHostDB(t *testing.T) { if err != nil { t.Fatal(err) } - if h3.NetAddress != a.NetAddress { + if h3.NetAddress != ann.NetAddress { t.Fatal("wrong net address") } if h3.KnownSince.IsZero() { @@ -510,22 +503,18 @@ func TestInsertAnnouncements(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() - // Create announcements for 2 hosts. + // Create announcements for 3 hosts. ann1 := announcement{ - hostKey: publicKey(types.GeneratePrivateKey().PublicKey()), - announcement: hostdb.Announcement{ - Index: types.ChainIndex{Height: 1, ID: types.BlockID{1}}, - Timestamp: time.Now(), - NetAddress: "foo.bar:1000", - }, + hostKey: publicKey(types.GeneratePrivateKey().PublicKey()), + announcement: newTestHostDBAnnouncement("foo.bar:1000"), } ann2 := announcement{ hostKey: publicKey(types.GeneratePrivateKey().PublicKey()), - announcement: hostdb.Announcement{}, + announcement: newTestHostDBAnnouncement("bar.baz:1000"), } ann3 := announcement{ hostKey: publicKey(types.GeneratePrivateKey().PublicKey()), - announcement: hostdb.Announcement{}, + announcement: newTestHostDBAnnouncement("quz.qux:1000"), } // Insert the first one and check that all fields are set. @@ -1101,7 +1090,7 @@ func (s *SQLStore) addCustomTestHost(hk types.PublicKey, na string) error { s.unappliedHostKeys[hk] = struct{}{} s.unappliedAnnouncements = append(s.unappliedAnnouncements, []announcement{{ hostKey: publicKey(hk), - announcement: hostdb.Announcement{NetAddress: na}, + announcement: newTestHostDBAnnouncement(na), }}...) s.lastSave = time.Now().Add(s.persistInterval * -2) return s.applyUpdates(false) @@ -1153,6 +1142,14 @@ func newTestHostAnnouncement(na modules.NetAddress) (modules.HostAnnouncement, t }, sk } +func newTestHostDBAnnouncement(addr string) hostdb.Announcement { + return hostdb.Announcement{ + Index: types.ChainIndex{Height: 1, ID: types.BlockID{1}}, + Timestamp: time.Now().UTC().Round(time.Second), + NetAddress: addr, + } +} + func newTestTransaction(ha modules.HostAnnouncement, sk types.PrivateKey) stypes.Transaction { var buf bytes.Buffer buf.Write(encoding.Marshal(ha)) diff --git a/stores/metadata.go b/stores/metadata.go index 68947ed95..997b8b343 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1507,6 +1507,10 @@ func (s *SQLStore) RenameObjects(ctx context.Context, bucket, prefixOld, prefixN gorm.Expr(sqlConcat(tx, "?", "SUBSTR(object_id, ?)")), prefixNew, utf8.RuneCountInString(prefixOld)+1, prefixOld+"%", utf8.RuneCountInString(prefixOld), prefixOld, sqlWhereBucket("objects", bucket)) + + if !isSQLite(tx) { + inner = tx.Raw("SELECT * FROM (?) as i", inner) + } resp := tx.Model(&dbObject{}). Where("object_id IN (?)", inner). Delete(&dbObject{}) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 4a6102399..ad886e6ef 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -6,7 +6,6 @@ import ( "encoding/hex" "errors" "fmt" - "math" "os" "reflect" "sort" @@ -18,7 +17,6 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/renterd/api" - "go.sia.tech/renterd/hostdb" "go.sia.tech/renterd/object" "gorm.io/gorm" "gorm.io/gorm/schema" @@ -220,7 +218,7 @@ func TestSQLContractStore(t *testing.T) { } // Add an announcement. - err = ss.insertTestAnnouncement(hk, hostdb.Announcement{NetAddress: "address"}) + err = ss.insertTestAnnouncement(hk, newTestHostDBAnnouncement("address")) if err != nil { t.Fatal(err) } @@ -511,11 +509,11 @@ func TestRenewedContract(t *testing.T) { hk, hk2 := hks[0], hks[1] // Add announcements. - err = ss.insertTestAnnouncement(hk, hostdb.Announcement{NetAddress: "address"}) + err = ss.insertTestAnnouncement(hk, newTestHostDBAnnouncement("address")) if err != nil { t.Fatal(err) } - err = ss.insertTestAnnouncement(hk2, hostdb.Announcement{NetAddress: "address2"}) + err = ss.insertTestAnnouncement(hk2, newTestHostDBAnnouncement("address2")) if err != nil { t.Fatal(err) } @@ -1008,7 +1006,7 @@ func TestSQLMetadataStore(t *testing.T) { one := uint(1) expectedObj := dbObject{ - DBBucketID: 1, + DBBucketID: ss.DefaultBucketID(), Health: 1, ObjectID: objID, Key: obj1Key, @@ -1169,6 +1167,7 @@ func TestSQLMetadataStore(t *testing.T) { slabs[i].Shards[0].Model = Model{} slabs[i].Shards[0].Contracts[0].Model = Model{} slabs[i].Shards[0].Contracts[0].Host.Model = Model{} + slabs[i].Shards[0].Contracts[0].Host.LastAnnouncement = time.Time{} slabs[i].HealthValidUntil = 0 } if !reflect.DeepEqual(slab1, expectedObjSlab1) { @@ -2213,10 +2212,9 @@ func TestUpdateSlab(t *testing.T) { t.Fatal(err) } var s dbSlab - if err := ss.db.Model(&dbSlab{}). + if err := ss.db.Where(&dbSlab{Key: key}). Joins("DBContractSet"). Preload("Shards"). - Where("key = ?", key). Take(&s). Error; err != nil { t.Fatal(err) @@ -2265,7 +2263,7 @@ func TestRecordContractSpending(t *testing.T) { } // Add an announcement. - err = ss.insertTestAnnouncement(hk, hostdb.Announcement{NetAddress: "address"}) + err = ss.insertTestAnnouncement(hk, newTestHostDBAnnouncement("address")) if err != nil { t.Fatal(err) } @@ -3897,7 +3895,7 @@ func TestSlabCleanupTrigger(t *testing.T) { // create objects obj1 := dbObject{ ObjectID: "1", - DBBucketID: 1, + DBBucketID: ss.DefaultBucketID(), Health: 1, } if err := ss.db.Create(&obj1).Error; err != nil { @@ -3905,7 +3903,7 @@ func TestSlabCleanupTrigger(t *testing.T) { } obj2 := dbObject{ ObjectID: "2", - DBBucketID: 1, + DBBucketID: ss.DefaultBucketID(), Health: 1, } if err := ss.db.Create(&obj2).Error; err != nil { @@ -3978,7 +3976,7 @@ func TestSlabCleanupTrigger(t *testing.T) { } obj3 := dbObject{ ObjectID: "3", - DBBucketID: 1, + DBBucketID: ss.DefaultBucketID(), Health: 1, } if err := ss.db.Create(&obj3).Error; err != nil { @@ -4117,11 +4115,11 @@ func TestUpdateObjectReuseSlab(t *testing.T) { // fetch the object var dbObj dbObject - if err := ss.db.Where("db_bucket_id", 1).Take(&dbObj).Error; err != nil { + if err := ss.db.Where("db_bucket_id", ss.DefaultBucketID()).Take(&dbObj).Error; err != nil { t.Fatal(err) } else if dbObj.ID != 1 { t.Fatal("unexpected id", dbObj.ID) - } else if dbObj.DBBucketID != 1 { + } else if dbObj.DBBucketID != ss.DefaultBucketID() { t.Fatal("bucket id mismatch", dbObj.DBBucketID) } else if dbObj.ObjectID != "1" { t.Fatal("object id mismatch", dbObj.ObjectID) @@ -4223,7 +4221,7 @@ func TestUpdateObjectReuseSlab(t *testing.T) { // fetch the object var dbObj2 dbObject - if err := ss.db.Where("db_bucket_id", 1). + if err := ss.db.Where("db_bucket_id", ss.DefaultBucketID()). Where("object_id", "2"). Take(&dbObj2).Error; err != nil { t.Fatal(err) @@ -4307,57 +4305,94 @@ func TestTypeCurrency(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() + // prepare the table + if isSQLite(ss.db) { + if err := ss.db.Exec("CREATE TABLE currencies (id INTEGER PRIMARY KEY AUTOINCREMENT,c BLOB);").Error; err != nil { + t.Fatal(err) + } + } else { + if err := ss.db.Exec("CREATE TABLE currencies (id INT AUTO_INCREMENT PRIMARY KEY, c BLOB);").Error; err != nil { + t.Fatal(err) + } + } + + // insert currencies in random order + values := []interface{}{ + bCurrency(types.ZeroCurrency), + bCurrency(types.NewCurrency64(1)), + bCurrency(types.MaxCurrency), + } + frand.Shuffle(len(values), func(i, j int) { values[i], values[j] = values[j], values[i] }) + if err := ss.db.Exec("INSERT INTO currencies (c) VALUES (?),(?),(?);", values...).Error; err != nil { + t.Fatal(err) + } + + // fetch currencies and assert they're sorted + var currencies []bCurrency + if err := ss.db.Raw(`SELECT c FROM currencies ORDER BY c ASC`).Scan(¤cies).Error; err != nil { + t.Fatal(err) + } else if !sort.SliceIsSorted(currencies, func(i, j int) bool { + return types.Currency(currencies[i]).Cmp(types.Currency(currencies[j])) < 0 + }) { + t.Fatal("currencies not sorted", currencies) + } + + // convenience variables + c0 := currencies[0] + c1 := currencies[1] + cM := currencies[2] + tests := []struct { - a types.Currency - b types.Currency + a bCurrency + b bCurrency cmp string }{ { - a: types.ZeroCurrency, - b: types.NewCurrency64(1), + a: c0, + b: c1, cmp: "<", }, { - a: types.NewCurrency64(1), - b: types.NewCurrency64(1), + a: c1, + b: c0, + cmp: ">", + }, + { + a: c0, + b: c1, + cmp: "!=", + }, + { + a: c1, + b: c1, cmp: "=", }, { - a: types.NewCurrency(0, math.MaxUint64), - b: types.NewCurrency(math.MaxUint64, 0), + a: c0, + b: cM, cmp: "<", }, { - a: types.NewCurrency(math.MaxUint64, 0), - b: types.NewCurrency(0, math.MaxUint64), + a: cM, + b: c0, cmp: ">", }, + { + a: cM, + b: cM, + cmp: "=", + }, } - for _, test := range tests { + for i, test := range tests { var result bool - err := ss.db.Raw("SELECT ? "+test.cmp+" ?", bCurrency(test.a), bCurrency(test.b)).Scan(&result).Error - if err != nil { + query := fmt.Sprintf("SELECT ? %s ?", test.cmp) + if !isSQLite(ss.db) { + query = strings.Replace(query, "?", "HEX(?)", -1) + } + if err := ss.db.Raw(query, test.a, test.b).Scan(&result).Error; err != nil { t.Fatal(err) } else if !result { - t.Fatal("unexpected result", result) + t.Errorf("unexpected result in case %d/%d: expected %v %s %v to be true", i+1, len(tests), types.Currency(test.a).String(), test.cmp, types.Currency(test.b).String()) } } - - c := func(c uint64) bCurrency { - return bCurrency(types.NewCurrency64(c)) - } - - var currencies []bCurrency - err := ss.db.Raw(` -WITH input(col) as -(values (?),(?),(?)) -SELECT * FROM input ORDER BY col ASC -`, c(3), c(1), c(2)).Scan(¤cies).Error - if err != nil { - t.Fatal(err) - } else if !sort.SliceIsSorted(currencies, func(i, j int) bool { - return types.Currency(currencies[i]).Cmp(types.Currency(currencies[j])) < 0 - }) { - t.Fatal("currencies not sorted", currencies) - } } diff --git a/stores/metrics.go b/stores/metrics.go index 203ed3b71..8816d1729 100644 --- a/stores/metrics.go +++ b/stores/metrics.go @@ -605,9 +605,7 @@ func (s *SQLStore) findPeriods(table string, dst interface{}, start time.Time, n WHERE ? GROUP BY p.period_start - ORDER BY - p.period_start ASC - ) i ON %s.id = i.id + ) i ON %s.id = i.id ORDER BY Period ASC `, table, table, table, table), unixTimeMS(start), interval.Milliseconds(), diff --git a/stores/metrics_test.go b/stores/metrics_test.go index 2b2f572a7..f71d985bd 100644 --- a/stores/metrics_test.go +++ b/stores/metrics_test.go @@ -517,7 +517,7 @@ func TestWalletMetrics(t *testing.T) { } else if !sort.SliceIsSorted(metrics, func(i, j int) bool { return time.Time(metrics[i].Timestamp).Before(time.Time(metrics[j].Timestamp)) }) { - t.Fatal("expected metrics to be sorted by time") + t.Fatalf("expected metrics to be sorted by time, %+v", metrics) } // Prune metrics diff --git a/stores/sql_test.go b/stores/sql_test.go index ae0c54b0c..776e3e10e 100644 --- a/stores/sql_test.go +++ b/stores/sql_test.go @@ -96,8 +96,16 @@ func newTestSQLStore(t *testing.T, cfg testSQLStoreConfig) *testSQLStore { var conn, connMetrics gorm.Dialector if dbURI != "" { - conn = NewMySQLConnection(dbURI, dbUser, dbPassword, dbName) - connMetrics = NewMySQLConnection(dbURI, dbUser, dbPassword, dbMetricsName) + if tmpDB, err := gorm.Open(NewMySQLConnection(dbUser, dbPassword, dbURI, "")); err != nil { + t.Fatal(err) + } else if err := tmpDB.Exec(fmt.Sprintf("CREATE DATABASE IF NOT EXISTS `%s`", dbName)).Error; err != nil { + t.Fatal(err) + } else if err := tmpDB.Exec(fmt.Sprintf("CREATE DATABASE IF NOT EXISTS `%s`", dbMetricsName)).Error; err != nil { + t.Fatal(err) + } + + conn = NewMySQLConnection(dbUser, dbPassword, dbURI, dbName) + connMetrics = NewMySQLConnection(dbUser, dbPassword, dbURI, dbMetricsName) } else if cfg.persistent { conn = NewSQLiteConnection(filepath.Join(cfg.dir, "db.sqlite")) connMetrics = NewSQLiteConnection(filepath.Join(cfg.dir, "metrics.sqlite")) @@ -148,6 +156,18 @@ func (s *testSQLStore) Close() error { return nil } +func (s *testSQLStore) DefaultBucketID() uint { + var b dbBucket + if err := s.db. + Model(&dbBucket{}). + Where("name = ?", api.DefaultBucketName). + Take(&b). + Error; err != nil { + s.t.Fatal(err) + } + return b.ID +} + func (s *testSQLStore) Reopen() *testSQLStore { s.t.Helper() cfg := defaultTestSQLStoreConfig @@ -240,11 +260,13 @@ func (s *SQLStore) contractsCount() (cnt int64, err error) { func (s *SQLStore) overrideSlabHealth(objectID string, health float64) (err error) { err = s.db.Exec(fmt.Sprintf(` UPDATE slabs SET health = %v WHERE id IN ( - SELECT sla.id - FROM objects o - INNER JOIN slices sli ON o.id = sli.db_object_id - INNER JOIN slabs sla ON sli.db_slab_id = sla.id - WHERE o.object_id = "%s" + SELECT * FROM ( + SELECT sla.id + FROM objects o + INNER JOIN slices sli ON o.id = sli.db_object_id + INNER JOIN slabs sla ON sli.db_slab_id = sla.id + WHERE o.object_id = "%s" + ) AS sub )`, health, objectID)).Error return } @@ -306,11 +328,24 @@ func TestConsensusReset(t *testing.T) { } } -type queryPlanExplain struct { - ID int `json:"id"` - Parent int `json:"parent"` - NotUsed bool `json:"notused"` - Detail string `json:"detail"` +type sqliteQueryPlan struct { + Detail string `json:"detail"` +} + +func (p sqliteQueryPlan) usesIndex() bool { + d := strings.ToLower(p.Detail) + return strings.Contains(d, "using index") || strings.Contains(d, "using covering index") +} + +//nolint:tagliatelle +type mysqlQueryPlan struct { + Extra string `json:"Extra"` + PossibleKeys string `json:"possible_keys"` +} + +func (p mysqlQueryPlan) usesIndex() bool { + d := strings.ToLower(p.Extra) + return strings.Contains(d, "using index") || strings.Contains(p.PossibleKeys, "idx_") } func TestQueryPlan(t *testing.T) { @@ -346,14 +381,20 @@ func TestQueryPlan(t *testing.T) { } for _, query := range queries { - var explain queryPlanExplain - err := ss.db.Raw(fmt.Sprintf("EXPLAIN QUERY PLAN %s;", query)).Scan(&explain).Error - if err != nil { - t.Fatal(err) - } - if !(strings.Contains(explain.Detail, "USING INDEX") || - strings.Contains(explain.Detail, "USING COVERING INDEX")) { - t.Fatalf("query '%s' should use an index, instead the plan was '%s'", query, explain.Detail) + if isSQLite(ss.db) { + var explain sqliteQueryPlan + if err := ss.db.Raw(fmt.Sprintf("EXPLAIN QUERY PLAN %s;", query)).Scan(&explain).Error; err != nil { + t.Fatal(err) + } else if !explain.usesIndex() { + t.Fatalf("query '%s' should use an index, instead the plan was %+v", query, explain) + } + } else { + var explain mysqlQueryPlan + if err := ss.db.Raw(fmt.Sprintf("EXPLAIN %s;", query)).Scan(&explain).Error; err != nil { + t.Fatal(err) + } else if !explain.usesIndex() { + t.Fatalf("query '%s' should use an index, instead the plan was %+v", query, explain) + } } } } From e8c5c92460bfda480ca0222c4cc675bde3588f83 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 20 Feb 2024 11:32:20 +0100 Subject: [PATCH 063/172] ci: enable all tests --- .github/workflows/test.yml | 64 +++++++++++++++++++------------------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index b96eddebe..e8a32e5ec 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -30,23 +30,23 @@ jobs: uses: actions/setup-go@v3 with: go-version: ${{ matrix.go-version }} - # - name: Lint - # uses: golangci/golangci-lint-action@v3 - # with: - # args: --timeout=30m - # - name: Jape Analyzer - # uses: SiaFoundation/action-golang-analysis@HEAD - # with: - # analyzers: | - # go.sia.tech/jape.Analyzer@master - # directories: | - # autopilot - # bus bus/client - # worker worker/client - # - name: Test - # uses: n8maninger/action-golang-test@v1 - # with: - # args: "-race;-short" + - name: Lint + uses: golangci/golangci-lint-action@v3 + with: + args: --timeout=30m + - name: Jape Analyzer + uses: SiaFoundation/action-golang-analysis@HEAD + with: + analyzers: | + go.sia.tech/jape.Analyzer@master + directories: | + autopilot + bus bus/client + worker worker/client + - name: Test + uses: n8maninger/action-golang-test@v1 + with: + args: "-race;-short" - name: Test Stores - MySQL if: matrix.os == 'ubuntu-latest' uses: n8maninger/action-golang-test@v1 @@ -57,20 +57,20 @@ jobs: with: package: "./stores" args: "-race;-short" - # - name: Test Integration - # uses: n8maninger/action-golang-test@v1 - # with: - # package: "./internal/testing/..." - # args: "-failfast;-race;-tags=testing;-timeout=30m" - # - name: Test Integration - MySQL - # if: matrix.os == 'ubuntu-latest' - # uses: n8maninger/action-golang-test@v1 - # env: - # RENTERD_DB_URI: 127.0.0.1:3800 - # RENTERD_DB_USER: root - # RENTERD_DB_PASSWORD: test - # with: - # package: "./internal/testing/..." - # args: "-failfast;-race;-tags=testing;-timeout=30m" + - name: Test Integration + uses: n8maninger/action-golang-test@v1 + with: + package: "./internal/testing/..." + args: "-failfast;-race;-tags=testing;-timeout=30m" + - name: Test Integration - MySQL + if: matrix.os == 'ubuntu-latest' + uses: n8maninger/action-golang-test@v1 + env: + RENTERD_DB_URI: 127.0.0.1:3800 + RENTERD_DB_USER: root + RENTERD_DB_PASSWORD: test + with: + package: "./internal/testing/..." + args: "-failfast;-race;-tags=testing;-timeout=30m" - name: Build run: go build -o bin/ ./cmd/renterd From 96d4bba92f350ffc4490ff6f53011e9c989f7e47 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 20 Feb 2024 13:37:50 +0100 Subject: [PATCH 064/172] stores: only drop triggers if they exist --- .../mysql/main/migration_00004_prune_slabs_cascade.sql | 8 ++++---- .../sqlite/main/migration_00004_prune_slabs_cascade.sql | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/stores/migrations/mysql/main/migration_00004_prune_slabs_cascade.sql b/stores/migrations/mysql/main/migration_00004_prune_slabs_cascade.sql index 48f98a40a..125da0ecb 100644 --- a/stores/migrations/mysql/main/migration_00004_prune_slabs_cascade.sql +++ b/stores/migrations/mysql/main/migration_00004_prune_slabs_cascade.sql @@ -18,7 +18,7 @@ ALTER TABLE multipart_parts DROP FOREIGN KEY fk_multipart_uploads_parts; ALTER TABLE multipart_parts ADD CONSTRAINT fk_multipart_uploads_parts FOREIGN KEY (db_multipart_upload_id) REFERENCES multipart_uploads (id) ON DELETE CASCADE -- drop triggers -DROP TRIGGER before_delete_on_objects_delete_slices -DROP TRIGGER before_delete_on_multipart_uploads_delete_multipart_parts -DROP TRIGGER before_delete_on_multipart_parts_delete_slices -DROP TRIGGER after_delete_on_slices_delete_slabs \ No newline at end of file +DROP TRIGGER IF EXISTS before_delete_on_objects_delete_slices +DROP TRIGGER IF EXISTS before_delete_on_multipart_uploads_delete_multipart_parts +DROP TRIGGER IF EXISTS before_delete_on_multipart_parts_delete_slices +DROP TRIGGER IF EXISTS after_delete_on_slices_delete_slabs \ No newline at end of file diff --git a/stores/migrations/sqlite/main/migration_00004_prune_slabs_cascade.sql b/stores/migrations/sqlite/main/migration_00004_prune_slabs_cascade.sql index 2cd633c11..b7f5ab128 100644 --- a/stores/migrations/sqlite/main/migration_00004_prune_slabs_cascade.sql +++ b/stores/migrations/sqlite/main/migration_00004_prune_slabs_cascade.sql @@ -22,7 +22,7 @@ CREATE INDEX `idx_multipart_parts_etag` ON `multipart_parts`(`etag`); PRAGMA foreign_keys=on; -- drop triggers -DROP TRIGGER before_delete_on_objects_delete_slices -DROP TRIGGER before_delete_on_multipart_uploads_delete_multipart_parts -DROP TRIGGER before_delete_on_multipart_parts_delete_slices -DROP TRIGGER after_delete_on_slices_delete_slabs \ No newline at end of file +DROP TRIGGER IF EXISTS before_delete_on_objects_delete_slices +DROP TRIGGER IF EXISTS before_delete_on_multipart_uploads_delete_multipart_parts +DROP TRIGGER IF EXISTS before_delete_on_multipart_parts_delete_slices +DROP TRIGGER IF EXISTS after_delete_on_slices_delete_slabs \ No newline at end of file From 4cb5efaf0d1277d3225de8689421a963009490e2 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 20 Feb 2024 14:42:10 +0100 Subject: [PATCH 065/172] worker: address review comment --- worker/worker.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/worker/worker.go b/worker/worker.go index b57623b7b..094722c00 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -1155,7 +1155,7 @@ func (w *worker) multipartUploadHandlerPUT(jc jape.Context) { // make sure only one of the following is set if encryptionEnabled := !upload.Key.IsNoopKey(); encryptionEnabled && jc.Request.FormValue("offset") == "" { - jc.Error(errors.New("if presharding encryption isn't disabled, the offset needs to be set"), http.StatusBadRequest) + jc.Error(errors.New("if object encryption (pre-erasure coding) wasn't disabled by creating the multipart upload with the no-op key, the offset needs to be set"), http.StatusBadRequest) return } else if encryptionEnabled { opts = append(opts, WithCustomEncryptionOffset(uint64(offset))) From adafec043860b8514c71e535eddec1e3729bb407 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 20 Feb 2024 15:24:05 +0100 Subject: [PATCH 066/172] stores: add missing newline --- .../mysql/main/migration_00004_prune_slabs_cascade.sql | 2 +- .../sqlite/main/migration_00004_prune_slabs_cascade.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/stores/migrations/mysql/main/migration_00004_prune_slabs_cascade.sql b/stores/migrations/mysql/main/migration_00004_prune_slabs_cascade.sql index 125da0ecb..0b1c06994 100644 --- a/stores/migrations/mysql/main/migration_00004_prune_slabs_cascade.sql +++ b/stores/migrations/mysql/main/migration_00004_prune_slabs_cascade.sql @@ -21,4 +21,4 @@ ALTER TABLE multipart_parts ADD CONSTRAINT fk_multipart_uploads_parts FOREIGN KE DROP TRIGGER IF EXISTS before_delete_on_objects_delete_slices DROP TRIGGER IF EXISTS before_delete_on_multipart_uploads_delete_multipart_parts DROP TRIGGER IF EXISTS before_delete_on_multipart_parts_delete_slices -DROP TRIGGER IF EXISTS after_delete_on_slices_delete_slabs \ No newline at end of file +DROP TRIGGER IF EXISTS after_delete_on_slices_delete_slabs diff --git a/stores/migrations/sqlite/main/migration_00004_prune_slabs_cascade.sql b/stores/migrations/sqlite/main/migration_00004_prune_slabs_cascade.sql index b7f5ab128..1132dd2f5 100644 --- a/stores/migrations/sqlite/main/migration_00004_prune_slabs_cascade.sql +++ b/stores/migrations/sqlite/main/migration_00004_prune_slabs_cascade.sql @@ -25,4 +25,4 @@ PRAGMA foreign_keys=on; DROP TRIGGER IF EXISTS before_delete_on_objects_delete_slices DROP TRIGGER IF EXISTS before_delete_on_multipart_uploads_delete_multipart_parts DROP TRIGGER IF EXISTS before_delete_on_multipart_parts_delete_slices -DROP TRIGGER IF EXISTS after_delete_on_slices_delete_slabs \ No newline at end of file +DROP TRIGGER IF EXISTS after_delete_on_slices_delete_slabs From c346017cb83c40a351576a6056e6baecd2686e0f Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 20 Feb 2024 18:03:56 +0100 Subject: [PATCH 067/172] bus: change response type for paginated alerts request --- alerts/alerts.go | 18 +++++++++++++++--- bus/bus.go | 9 +++++++++ bus/client/alerts.go | 8 +++----- internal/testing/cluster_test.go | 13 ++++++++----- 4 files changed, 35 insertions(+), 13 deletions(-) diff --git a/alerts/alerts.go b/alerts/alerts.go index 424196d4f..f5e19231a 100644 --- a/alerts/alerts.go +++ b/alerts/alerts.go @@ -69,6 +69,12 @@ type ( Offset int Limit int } + + AlertsResponse struct { + Alerts []Alert `json:"alerts"` + HasMore bool `json:"hasMore"` + Total int `json:"total"` + } ) // String implements the fmt.Stringer interface. @@ -176,12 +182,16 @@ func (m *Manager) DismissAlerts(ctx context.Context, ids ...types.Hash256) error } // Active returns the host's active alerts. -func (m *Manager) Active(offset, limit int) []Alert { +func (m *Manager) Active(offset, limit int) AlertsResponse { m.mu.Lock() defer m.mu.Unlock() + resp := AlertsResponse{ + Total: len(m.alerts), + } + if offset >= len(m.alerts) { - return nil + return resp } else if limit == -1 { limit = len(m.alerts) } @@ -196,8 +206,10 @@ func (m *Manager) Active(offset, limit int) []Alert { alerts = alerts[offset:] if limit < len(alerts) { alerts = alerts[:limit] + resp.HasMore = true } - return alerts + resp.Alerts = alerts + return resp } func (m *Manager) RegisterWebhookBroadcaster(b webhooks.Broadcaster) { diff --git a/bus/bus.go b/bus/bus.go index 9fb4c1254..6cce5256d 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -1716,6 +1716,15 @@ func (b *bus) gougingParams(ctx context.Context) (api.GougingParams, error) { } func (b *bus) handleGETAlerts(jc jape.Context) { + if jc.Request.FormValue("offset") != "" || jc.Request.FormValue("limit") != "" { + b.handleGETAlertsPaginated(jc) + return + } + ar := b.alertMgr.Active(0, -1) + jc.Encode(ar.Alerts) +} + +func (b *bus) handleGETAlertsPaginated(jc jape.Context) { offset, limit := 0, -1 if jc.DecodeForm("offset", &offset) != nil { return diff --git a/bus/client/alerts.go b/bus/client/alerts.go index bff3c13a5..1b876e877 100644 --- a/bus/client/alerts.go +++ b/bus/client/alerts.go @@ -10,15 +10,13 @@ import ( ) // Alerts fetches the active alerts from the bus. -func (c *Client) Alerts(opts alerts.AlertsOpts) (alerts []alerts.Alert, err error) { +func (c *Client) Alerts(opts alerts.AlertsOpts) (resp alerts.AlertsResponse, err error) { values := url.Values{} - if opts.Offset != 0 { - values.Set("offset", fmt.Sprint(opts.Offset)) - } + values.Set("offset", fmt.Sprint(opts.Offset)) if opts.Limit != 0 { values.Set("limit", fmt.Sprint(opts.Limit)) } - err = c.c.GET("/alerts?"+values.Encode(), &alerts) + err = c.c.GET("/alerts?"+values.Encode(), &resp) return } diff --git a/internal/testing/cluster_test.go b/internal/testing/cluster_test.go index 39d6b373a..f0734a0f3 100644 --- a/internal/testing/cluster_test.go +++ b/internal/testing/cluster_test.go @@ -1923,9 +1923,9 @@ func TestAlerts(t *testing.T) { tt.OK(b.RegisterAlert(context.Background(), alert)) findAlert := func(id types.Hash256) *alerts.Alert { t.Helper() - alerts, err := b.Alerts(alerts.AlertsOpts{}) + ar, err := b.Alerts(alerts.AlertsOpts{}) tt.OK(err) - for _, alert := range alerts { + for _, alert := range ar.Alerts { if alert.ID == id { return &alert } @@ -1960,14 +1960,16 @@ func TestAlerts(t *testing.T) { } // try to find with offset = 1 - foundAlerts, err := b.Alerts(alerts.AlertsOpts{Offset: 1}) + ar, err := b.Alerts(alerts.AlertsOpts{Offset: 1}) + foundAlerts := ar.Alerts tt.OK(err) if len(foundAlerts) != 1 || foundAlerts[0].ID != alert.ID { t.Fatal("wrong alert") } // try to find with limit = 1 - foundAlerts, err = b.Alerts(alerts.AlertsOpts{Limit: 1}) + ar, err = b.Alerts(alerts.AlertsOpts{Limit: 1}) + foundAlerts = ar.Alerts tt.OK(err) if len(foundAlerts) != 1 || foundAlerts[0].ID != alert2.ID { t.Fatal("wrong alert") @@ -1975,7 +1977,8 @@ func TestAlerts(t *testing.T) { // dismiss all tt.OK(b.DismissAllAlerts(context.Background())) - foundAlerts, err = b.Alerts(alerts.AlertsOpts{}) + ar, err = b.Alerts(alerts.AlertsOpts{}) + foundAlerts = ar.Alerts tt.OK(err) if len(foundAlerts) != 0 { t.Fatal("expected 0 alerts", len(foundAlerts)) From ce3e6924e1604c61c9b528749185b451107530c9 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 20 Feb 2024 18:52:02 +0100 Subject: [PATCH 068/172] bus: revert dismissall alerts endpoint --- alerts/alerts.go | 17 ----------------- bus/bus.go | 7 ------- bus/client/alerts.go | 5 ----- internal/testing/cluster_test.go | 9 --------- 4 files changed, 38 deletions(-) diff --git a/alerts/alerts.go b/alerts/alerts.go index f5e19231a..b0d4963c6 100644 --- a/alerts/alerts.go +++ b/alerts/alerts.go @@ -37,7 +37,6 @@ type ( Alerter interface { RegisterAlert(_ context.Context, a Alert) error DismissAlerts(_ context.Context, ids ...types.Hash256) error - DismissAllAlerts(_ context.Context) error } // Severity indicates the severity of an alert. @@ -142,17 +141,6 @@ func (m *Manager) RegisterAlert(ctx context.Context, alert Alert) error { }) } -// DismissAllAlerts implements the Alerter interface. -func (m *Manager) DismissAllAlerts(ctx context.Context) error { - m.mu.Lock() - toDismiss := make([]types.Hash256, 0, len(m.alerts)) - for alertID := range m.alerts { - toDismiss = append(toDismiss, alertID) - } - m.mu.Unlock() - return m.DismissAlerts(ctx, toDismiss...) -} - // DismissAlerts implements the Alerter interface. func (m *Manager) DismissAlerts(ctx context.Context, ids ...types.Hash256) error { var dismissed []types.Hash256 @@ -252,11 +240,6 @@ func (a *originAlerter) RegisterAlert(ctx context.Context, alert Alert) error { return a.alerter.RegisterAlert(ctx, alert) } -// DismissAllAlerts implements the Alerter interface. -func (a *originAlerter) DismissAllAlerts(ctx context.Context) error { - return a.alerter.DismissAllAlerts(ctx) -} - // DismissAlerts implements the Alerter interface. func (a *originAlerter) DismissAlerts(ctx context.Context, ids ...types.Hash256) error { return a.alerter.DismissAlerts(ctx, ids...) diff --git a/bus/bus.go b/bus/bus.go index 6cce5256d..6c80f065a 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -1738,13 +1738,6 @@ func (b *bus) handleGETAlertsPaginated(jc jape.Context) { } func (b *bus) handlePOSTAlertsDismiss(jc jape.Context) { - var all bool - if jc.DecodeForm("all", &all) != nil { - return - } else if all { - jc.Check("failed to dismiss all alerts", b.alertMgr.DismissAllAlerts(jc.Request.Context())) - return - } var ids []types.Hash256 if jc.Decode(&ids) != nil { return diff --git a/bus/client/alerts.go b/bus/client/alerts.go index 1b876e877..7f2bf9aa7 100644 --- a/bus/client/alerts.go +++ b/bus/client/alerts.go @@ -25,11 +25,6 @@ func (c *Client) DismissAlerts(ctx context.Context, ids ...types.Hash256) error return c.dismissAlerts(ctx, false, ids...) } -// DismissAllAlerts dimisses all registered alerts. -func (c *Client) DismissAllAlerts(ctx context.Context) error { - return c.dismissAlerts(ctx, true) -} - func (c *Client) dismissAlerts(ctx context.Context, all bool, ids ...types.Hash256) error { values := url.Values{} if all { diff --git a/internal/testing/cluster_test.go b/internal/testing/cluster_test.go index f0734a0f3..5d439f5b7 100644 --- a/internal/testing/cluster_test.go +++ b/internal/testing/cluster_test.go @@ -1974,15 +1974,6 @@ func TestAlerts(t *testing.T) { if len(foundAlerts) != 1 || foundAlerts[0].ID != alert2.ID { t.Fatal("wrong alert") } - - // dismiss all - tt.OK(b.DismissAllAlerts(context.Background())) - ar, err = b.Alerts(alerts.AlertsOpts{}) - foundAlerts = ar.Alerts - tt.OK(err) - if len(foundAlerts) != 0 { - t.Fatal("expected 0 alerts", len(foundAlerts)) - } } func TestMultipartUploads(t *testing.T) { From 11980346ed168492d6f70ecf6812238eb3d9e6d1 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 20 Feb 2024 19:03:25 +0100 Subject: [PATCH 069/172] worker: don't register alert for cancelled download --- worker/download.go | 3 ++- worker/worker.go | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/worker/download.go b/worker/download.go index f23c8e640..462a2292d 100644 --- a/worker/download.go +++ b/worker/download.go @@ -26,6 +26,7 @@ const ( var ( errDownloadNotEnoughHosts = errors.New("not enough hosts available to download the slab") + errDownloadCancelled = errors.New("download was cancelled") ) type ( @@ -290,7 +291,7 @@ outer: case <-mgr.shutdownCtx.Done(): return ErrShuttingDown case <-ctx.Done(): - return errors.New("download timed out") + return errDownloadCancelled case resp = <-responseChan: } diff --git a/worker/worker.go b/worker/worker.go index 094722c00..15b3ae509 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -922,7 +922,7 @@ func (w *worker) objectsHandlerGET(jc jape.Context) { err = w.downloadManager.DownloadObject(ctx, wr, res.Object.Object, uint64(offset), uint64(length), contracts) if err != nil { w.logger.Error(err) - if !errors.Is(err, ErrShuttingDown) { + if !errors.Is(err, ErrShuttingDown) && !errors.Is(err, errDownloadCancelled) { w.registerAlert(newDownloadFailedAlert(bucket, path, prefix, marker, offset, length, int64(len(contracts)), err)) } } From 69bde0662d0bed67e4a8afed271e1d6165bb599b Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 21 Feb 2024 10:45:28 +0100 Subject: [PATCH 070/172] worker: don't register download error alert for errDownloadCancelled or io.ErrClosedPipe --- worker/worker.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/worker/worker.go b/worker/worker.go index 15b3ae509..e8355686b 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -922,7 +922,9 @@ func (w *worker) objectsHandlerGET(jc jape.Context) { err = w.downloadManager.DownloadObject(ctx, wr, res.Object.Object, uint64(offset), uint64(length), contracts) if err != nil { w.logger.Error(err) - if !errors.Is(err, ErrShuttingDown) && !errors.Is(err, errDownloadCancelled) { + if !errors.Is(err, ErrShuttingDown) && + !errors.Is(err, errDownloadCancelled) && + !errors.Is(err, io.ErrClosedPipe) { w.registerAlert(newDownloadFailedAlert(bucket, path, prefix, marker, offset, length, int64(len(contracts)), err)) } } From 709acfe4cccd0f9886d6497afd1646135b0bdb0b Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 21 Feb 2024 10:54:25 +0100 Subject: [PATCH 071/172] bus: fix japecheck --- bus/bus.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/bus/bus.go b/bus/bus.go index 6c80f065a..70471fa05 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -1715,16 +1715,16 @@ func (b *bus) gougingParams(ctx context.Context) (api.GougingParams, error) { }, nil } -func (b *bus) handleGETAlerts(jc jape.Context) { - if jc.Request.FormValue("offset") != "" || jc.Request.FormValue("limit") != "" { - b.handleGETAlertsPaginated(jc) - return - } +func (b *bus) handleGETAlertsDeprecated(jc jape.Context) { ar := b.alertMgr.Active(0, -1) jc.Encode(ar.Alerts) } -func (b *bus) handleGETAlertsPaginated(jc jape.Context) { +func (b *bus) handleGETAlerts(jc jape.Context) { + if jc.Request.FormValue("offset") == "" && jc.Request.FormValue("limit") == "" { + b.handleGETAlertsDeprecated(jc) + return + } offset, limit := 0, -1 if jc.DecodeForm("offset", &offset) != nil { return From aacaaa7b4f6aedadb37458c8e51a02243a50a83c Mon Sep 17 00:00:00 2001 From: PJ Date: Thu, 22 Feb 2024 10:35:46 +0100 Subject: [PATCH 072/172] stores: remove frand.Shuffle --- stores/metadata_test.go | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index ad886e6ef..3c1ca4997 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -4317,13 +4317,7 @@ func TestTypeCurrency(t *testing.T) { } // insert currencies in random order - values := []interface{}{ - bCurrency(types.ZeroCurrency), - bCurrency(types.NewCurrency64(1)), - bCurrency(types.MaxCurrency), - } - frand.Shuffle(len(values), func(i, j int) { values[i], values[j] = values[j], values[i] }) - if err := ss.db.Exec("INSERT INTO currencies (c) VALUES (?),(?),(?);", values...).Error; err != nil { + if err := ss.db.Exec("INSERT INTO currencies (c) VALUES (?),(?),(?);", bCurrency(types.MaxCurrency), bCurrency(types.NewCurrency64(1)), bCurrency(types.ZeroCurrency)).Error; err != nil { t.Fatal(err) } From 414090b2e615d77a93d6397640dda984216e837a Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Thu, 22 Feb 2024 10:51:36 +0100 Subject: [PATCH 073/172] stores: change bCurrency type to BigEndian (#991) --- stores/metadata_test.go | 16 +++++++++++----- stores/types.go | 8 ++++---- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index f4ef64283..07036064e 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -4326,13 +4326,13 @@ func TestTypeCurrency(t *testing.T) { cmp: "=", }, { - a: types.NewCurrency(0, math.MaxUint64), - b: types.NewCurrency(math.MaxUint64, 0), + a: types.NewCurrency(math.MaxUint64, 0), + b: types.NewCurrency(0, math.MaxUint64), cmp: "<", }, { - a: types.NewCurrency(math.MaxUint64, 0), - b: types.NewCurrency(0, math.MaxUint64), + a: types.NewCurrency(0, math.MaxUint64), + b: types.NewCurrency(math.MaxUint64, 0), cmp: ">", }, } @@ -4342,7 +4342,13 @@ func TestTypeCurrency(t *testing.T) { if err != nil { t.Fatal(err) } else if !result { - t.Fatal("unexpected result", result) + t.Fatalf("unexpected result %v for %v %v %v", result, test.a, test.cmp, test.b) + } else if test.cmp == "<" && test.a.Cmp(test.b) >= 0 { + t.Fatal("invalid result") + } else if test.cmp == ">" && test.a.Cmp(test.b) <= 0 { + t.Fatal("invalid result") + } else if test.cmp == "=" && test.a.Cmp(test.b) != 0 { + t.Fatal("invalid result") } } diff --git a/stores/types.go b/stores/types.go index 9a7c72009..42a8d29e4 100644 --- a/stores/types.go +++ b/stores/types.go @@ -354,15 +354,15 @@ func (sc *bCurrency) Scan(src any) error { return fmt.Errorf("cannot scan %d bytes to Currency", len(buf)) } - sc.Lo = binary.LittleEndian.Uint64(buf[:8]) - sc.Hi = binary.LittleEndian.Uint64(buf[8:]) + sc.Hi = binary.BigEndian.Uint64(buf[:8]) + sc.Lo = binary.BigEndian.Uint64(buf[8:]) return nil } // Value implements the driver.Valuer interface. func (sc bCurrency) Value() (driver.Value, error) { buf := make([]byte, 16) - binary.LittleEndian.PutUint64(buf[:8], sc.Lo) - binary.LittleEndian.PutUint64(buf[8:], sc.Hi) + binary.BigEndian.PutUint64(buf[:8], sc.Hi) + binary.BigEndian.PutUint64(buf[8:], sc.Lo) return buf, nil } From 5b968e7e71ccce04f98a3d36f5c17997a2d17907 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 22 Feb 2024 13:49:23 +0100 Subject: [PATCH 074/172] stores: numDeleted --- stores/metadata.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stores/metadata.go b/stores/metadata.go index 2ab2cf5da..eaad76c17 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -2708,7 +2708,7 @@ func (s *SQLStore) deleteObject(tx *gorm.DB, bucket string, path string) (int64, if numDeleted == 0 { return 0, nil // nothing to prune if no object was deleted } else if err := pruneSlabs(tx); err != nil { - return 0, err + return numDeleted, err } return numDeleted, nil } From fb5bc3519bbd4f3321457aca056d8ad3cd90800a Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 21 Feb 2024 16:12:46 +0100 Subject: [PATCH 075/172] worker: add benchmarks for upload through uploader --- worker/bench_test.go | 88 ++++++++++++++++++++++++++++++++++++++++++++ worker/upload.go | 39 +++++++++++++------- 2 files changed, 113 insertions(+), 14 deletions(-) create mode 100644 worker/bench_test.go diff --git a/worker/bench_test.go b/worker/bench_test.go new file mode 100644 index 000000000..d5db86c0a --- /dev/null +++ b/worker/bench_test.go @@ -0,0 +1,88 @@ +package worker + +import ( + "context" + "io" + "sync" + "testing" + + rhpv2 "go.sia.tech/core/rhp/v2" +) + +// zeroReader is a reader that leaves the buffer unchanged and returns no error. +// It's useful for benchmarks that need to produce data for uploading and should +// be used together with a io.LimitReader. +type zeroReader struct{} + +func (z *zeroReader) Read(p []byte) (n int, err error) { + return len(p), nil +} + +// BenchmarkUploaderPacking benchmarks the Upload function with packing +// disabled. +func BenchmarkUploaderNoPacking(b *testing.B) { + w := newMockWorker() + + minDataPieces := 10 + totalDataPieces := 30 + + w.addHosts(totalDataPieces) + + // create a reader that returns dev/null + data := io.LimitReader(&zeroReader{}, int64(b.N*rhpv2.SectorSize*minDataPieces)) + + up := testParameters(b.TempDir()) + up.rs.MinShards = minDataPieces + up.rs.TotalShards = totalDataPieces + up.packing = false + + b.ResetTimer() + + _, _, err := w.ul.Upload(context.Background(), data, w.contracts(), up, lockingPriorityUpload) + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(rhpv2.SectorSize * minDataPieces)) +} + +// BenchmarkSectorRoot30Goroutines benchmarks the SectorRoot function with 30 +// goroutines processing roots in parallel to simulate sequential uploads of +// slabs. +func BenchmarkSectorRoot30Goroutines(b *testing.B) { + data := make([]byte, rhpv2.SectorSize) + b.SetBytes(int64(rhpv2.SectorSize)) + + // spin up workers + c := make(chan struct{}) + work := func() { + for range c { + rhpv2.SectorRoot((*[rhpv2.SectorSize]byte)(data)) + } + } + var wg sync.WaitGroup + for i := 0; i < 30; i++ { + wg.Add(1) + go func() { + work() + wg.Done() + }() + } + b.ResetTimer() + + // run the benchmark + for i := 0; i < b.N; i++ { + c <- struct{}{} + } + close(c) + wg.Wait() +} + +// BenchmarkSectorRootSingleGoroutine benchmarks the SectorRoot function. +func BenchmarkSectorRootSingleGoroutine(b *testing.B) { + data := make([]byte, rhpv2.SectorSize) + b.SetBytes(rhpv2.SectorSize) + b.ResetTimer() + for i := 0; i < b.N; i++ { + rhpv2.SectorRoot((*[rhpv2.SectorSize]byte)(data)) + } +} diff --git a/worker/upload.go b/worker/upload.go index 72c65bf07..911593929 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -765,20 +765,26 @@ func (u *upload) newSlabUpload(ctx context.Context, shards [][]byte, uploaders [ responseChan := make(chan sectorUploadResp) // prepare sectors + var wg sync.WaitGroup sectors := make([]*sectorUpload, len(shards)) - for sI, shard := range shards { - // create the ctx - sCtx, sCancel := context.WithCancel(ctx) - - // create the sector - sectors[sI] = §orUpload{ - data: (*[rhpv2.SectorSize]byte)(shard), - index: sI, - root: rhpv2.SectorRoot((*[rhpv2.SectorSize]byte)(shard)), - ctx: sCtx, - cancel: sCancel, - } + for sI := range shards { + wg.Add(1) + go func(idx int) { + // create the ctx + sCtx, sCancel := context.WithCancel(ctx) + + // create the sector + sectors[idx] = §orUpload{ + data: (*[rhpv2.SectorSize]byte)(shards[idx]), + index: idx, + root: rhpv2.SectorRoot((*[rhpv2.SectorSize]byte)(shards[idx])), + ctx: sCtx, + cancel: sCancel, + } + wg.Done() + }(sI) } + wg.Wait() // prepare candidates candidates := make([]*candidate, len(uploaders)) @@ -833,8 +839,6 @@ func (u *upload) uploadSlab(ctx context.Context, rs api.RedundancySettings, data } func (u *upload) uploadShards(ctx context.Context, shards [][]byte, candidates []*uploader, mem Memory, maxOverdrive uint64, overdriveTimeout time.Duration) (sectors []object.Sector, uploadSpeed int64, overdrivePct float64, err error) { - start := time.Now() - // ensure inflight uploads get cancelled ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -871,6 +875,10 @@ func (u *upload) uploadShards(ctx context.Context, shards [][]byte, candidates [ // create a request buffer var buffer []*sectorUploadReq + // start the timer after the upload has started + // newSlabUpload is quite slow due to computing the sector roots + start := time.Now() + // collect responses var used bool var done bool @@ -930,6 +938,9 @@ loop: // calculate the upload speed bytes := slab.numUploaded * rhpv2.SectorSize ms := time.Since(start).Milliseconds() + if ms == 0 { + ms = 1 + } uploadSpeed = int64(bytes) / ms // calculate overdrive pct From e1d45a46c643bd65dd87c130f51fc049b0df2401 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 21 Feb 2024 16:24:15 +0100 Subject: [PATCH 076/172] worker: add benchmark results --- worker/bench_test.go | 61 ++++++++++++++++++++++++++++++++++---------- 1 file changed, 47 insertions(+), 14 deletions(-) diff --git a/worker/bench_test.go b/worker/bench_test.go index d5db86c0a..d1f31d2a0 100644 --- a/worker/bench_test.go +++ b/worker/bench_test.go @@ -18,36 +18,66 @@ func (z *zeroReader) Read(p []byte) (n int, err error) { return len(p), nil } -// BenchmarkUploaderPacking benchmarks the Upload function with packing -// disabled. -func BenchmarkUploaderNoPacking(b *testing.B) { +// BenchmarkUploaderSingleObjectNoPacking benchmarks uploading a single object +// without packing. +// +// Speed | CPU | Commit +// 201.59 MB/s | M2 Pro | c31245f +func BenchmarkUploaderSingleObjectNoPacking(b *testing.B) { w := newMockWorker() - minDataPieces := 10 - totalDataPieces := 30 + up := testParameters(b.TempDir()) + up.rs.MinShards = 10 + up.rs.TotalShards = 30 + up.packing = false - w.addHosts(totalDataPieces) + w.addHosts(up.rs.TotalShards) // create a reader that returns dev/null - data := io.LimitReader(&zeroReader{}, int64(b.N*rhpv2.SectorSize*minDataPieces)) - - up := testParameters(b.TempDir()) - up.rs.MinShards = minDataPieces - up.rs.TotalShards = totalDataPieces - up.packing = false + data := io.LimitReader(&zeroReader{}, int64(b.N*rhpv2.SectorSize*up.rs.MinShards)) + b.SetBytes(int64(rhpv2.SectorSize * up.rs.MinShards)) b.ResetTimer() - _, _, err := w.ul.Upload(context.Background(), data, w.contracts(), up, lockingPriorityUpload) if err != nil { b.Fatal(err) } - b.SetBytes(int64(rhpv2.SectorSize * minDataPieces)) +} + +// BenchmarkUploaderSingleObjectNoPacking benchmarks uploading one object per +// slab without packing. +// +// Speed | CPU | Commit +// 116.40 MB/s | M2 Pro | c31245f +func BenchmarkUploaderMultiObjectNoPacking(b *testing.B) { + w := newMockWorker() + + up := testParameters(b.TempDir()) + up.rs.MinShards = 10 + up.rs.TotalShards = 30 + up.packing = false + + w.addHosts(up.rs.TotalShards) + + // create a reader that returns dev/null + b.SetBytes(int64(rhpv2.SectorSize * up.rs.MinShards)) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + data := io.LimitReader(&zeroReader{}, int64(rhpv2.SectorSize*up.rs.MinShards)) + _, _, err := w.ul.Upload(context.Background(), data, w.contracts(), up, lockingPriorityUpload) + if err != nil { + b.Fatal(err) + } + } } // BenchmarkSectorRoot30Goroutines benchmarks the SectorRoot function with 30 // goroutines processing roots in parallel to simulate sequential uploads of // slabs. +// +// Speed | CPU | Commit +// 1671.26 MB/s | M2 Pro | c31245f func BenchmarkSectorRoot30Goroutines(b *testing.B) { data := make([]byte, rhpv2.SectorSize) b.SetBytes(int64(rhpv2.SectorSize)) @@ -78,6 +108,9 @@ func BenchmarkSectorRoot30Goroutines(b *testing.B) { } // BenchmarkSectorRootSingleGoroutine benchmarks the SectorRoot function. +// +// Speed | CPU | Commit +// 176.43 MB/s | M2 Pro | c31245f func BenchmarkSectorRootSingleGoroutine(b *testing.B) { data := make([]byte, rhpv2.SectorSize) b.SetBytes(rhpv2.SectorSize) From fd166d80f1c791f879a8ed0801e5e2ff4d60a52c Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 21 Feb 2024 17:10:00 +0100 Subject: [PATCH 077/172] worker: docstring update --- worker/bench_test.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/worker/bench_test.go b/worker/bench_test.go index d1f31d2a0..84a3aaa8a 100644 --- a/worker/bench_test.go +++ b/worker/bench_test.go @@ -18,12 +18,11 @@ func (z *zeroReader) Read(p []byte) (n int, err error) { return len(p), nil } -// BenchmarkUploaderSingleObjectNoPacking benchmarks uploading a single object -// without packing. +// BenchmarkUploaderSingleObject benchmarks uploading a single object. // // Speed | CPU | Commit // 201.59 MB/s | M2 Pro | c31245f -func BenchmarkUploaderSingleObjectNoPacking(b *testing.B) { +func BenchmarkUploaderSingleObject(b *testing.B) { w := newMockWorker() up := testParameters(b.TempDir()) @@ -44,12 +43,11 @@ func BenchmarkUploaderSingleObjectNoPacking(b *testing.B) { } } -// BenchmarkUploaderSingleObjectNoPacking benchmarks uploading one object per -// slab without packing. +// BenchmarkUploaderSingleObject benchmarks uploading one object per slab. // // Speed | CPU | Commit // 116.40 MB/s | M2 Pro | c31245f -func BenchmarkUploaderMultiObjectNoPacking(b *testing.B) { +func BenchmarkUploaderMultiObject(b *testing.B) { w := newMockWorker() up := testParameters(b.TempDir()) From 9f27c17d597252de29e74d6c4d248f65c42e62a3 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 21 Feb 2024 17:26:14 +0100 Subject: [PATCH 078/172] worker: remove hash reader --- worker/upload.go | 15 ++++++++++----- worker/upload_utils.go | 27 --------------------------- 2 files changed, 10 insertions(+), 32 deletions(-) diff --git a/worker/upload.go b/worker/upload.go index 911593929..56444cded 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -2,6 +2,8 @@ package worker import ( "context" + "crypto/md5" + "encoding/hex" "errors" "fmt" "io" @@ -400,11 +402,8 @@ func (mgr *uploadManager) Upload(ctx context.Context, r io.Reader, contracts []a // create the object o := object.NewObject(up.ec) - // create the hash reader - hr := newHashReader(r) - // create the cipher reader - cr, err := o.Encrypt(hr, up.encryptionOffset) + cr, err := o.Encrypt(r, up.encryptionOffset) if err != nil { return false, "", err } @@ -533,7 +532,13 @@ func (mgr *uploadManager) Upload(ctx context.Context, r io.Reader, contracts []a } // calculate the eTag - eTag = hr.Hash() + h := md5.New() + for _, slab := range o.Slabs { + for _, shard := range slab.Shards { + h.Write(shard.Root[:]) + } + } + eTag = string(hex.EncodeToString(h.Sum(nil))) // add partial slabs if len(partialSlab) > 0 { diff --git a/worker/upload_utils.go b/worker/upload_utils.go index 4b5241b4d..306e1774f 100644 --- a/worker/upload_utils.go +++ b/worker/upload_utils.go @@ -2,11 +2,9 @@ package worker import ( "bytes" - "encoding/hex" "io" "github.com/gabriel-vasile/mimetype" - "go.sia.tech/core/types" "go.sia.tech/renterd/object" ) @@ -28,28 +26,3 @@ func newMimeReader(r io.Reader) (mimeType string, recycled io.Reader, err error) recycled = io.MultiReader(buf, r) return mtype.String(), recycled, err } - -type hashReader struct { - r io.Reader - h *types.Hasher -} - -func newHashReader(r io.Reader) *hashReader { - return &hashReader{ - r: r, - h: types.NewHasher(), - } -} - -func (e *hashReader) Read(p []byte) (int, error) { - n, err := e.r.Read(p) - if _, wErr := e.h.E.Write(p[:n]); wErr != nil { - return 0, wErr - } - return n, err -} - -func (e *hashReader) Hash() string { - sum := e.h.Sum() - return hex.EncodeToString(sum[:]) -} From 24fb81a4c249be120d8a59a22f39919f8dbd48e4 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 21 Feb 2024 17:28:32 +0100 Subject: [PATCH 079/172] worker: update benchmark result --- worker/bench_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/worker/bench_test.go b/worker/bench_test.go index 84a3aaa8a..d9264eddc 100644 --- a/worker/bench_test.go +++ b/worker/bench_test.go @@ -21,7 +21,7 @@ func (z *zeroReader) Read(p []byte) (n int, err error) { // BenchmarkUploaderSingleObject benchmarks uploading a single object. // // Speed | CPU | Commit -// 201.59 MB/s | M2 Pro | c31245f +// 217.35 MB/s | M2 Pro | afee1ac func BenchmarkUploaderSingleObject(b *testing.B) { w := newMockWorker() @@ -46,7 +46,7 @@ func BenchmarkUploaderSingleObject(b *testing.B) { // BenchmarkUploaderSingleObject benchmarks uploading one object per slab. // // Speed | CPU | Commit -// 116.40 MB/s | M2 Pro | c31245f +// 139.74 MB/s | M2 Pro | afee1ac func BenchmarkUploaderMultiObject(b *testing.B) { w := newMockWorker() @@ -75,7 +75,7 @@ func BenchmarkUploaderMultiObject(b *testing.B) { // slabs. // // Speed | CPU | Commit -// 1671.26 MB/s | M2 Pro | c31245f +// 1611.98 MB/s | M2 Pro | afee1ac func BenchmarkSectorRoot30Goroutines(b *testing.B) { data := make([]byte, rhpv2.SectorSize) b.SetBytes(int64(rhpv2.SectorSize)) @@ -108,7 +108,7 @@ func BenchmarkSectorRoot30Goroutines(b *testing.B) { // BenchmarkSectorRootSingleGoroutine benchmarks the SectorRoot function. // // Speed | CPU | Commit -// 176.43 MB/s | M2 Pro | c31245f +// 174.71 MB/s | M2 Pro | afee1ac func BenchmarkSectorRootSingleGoroutine(b *testing.B) { data := make([]byte, rhpv2.SectorSize) b.SetBytes(rhpv2.SectorSize) From 11d0152ba37b47c3c6c5867e8a1a43bc36366317 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 21 Feb 2024 18:02:49 +0100 Subject: [PATCH 080/172] worker: encrypt using multiple goroutines --- object/slab.go | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/object/slab.go b/object/slab.go index 9c3afa608..aa8bb7d45 100644 --- a/object/slab.go +++ b/object/slab.go @@ -3,6 +3,7 @@ package object import ( "bytes" "io" + "sync" "github.com/klauspost/reedsolomon" rhpv2 "go.sia.tech/core/rhp/v2" @@ -79,11 +80,17 @@ func (s Slab) Length() int { // Encrypt xors shards with the keystream derived from s.Key, using a // different nonce for each shard. func (s Slab) Encrypt(shards [][]byte) { - for i, shard := range shards { - nonce := [24]byte{1: byte(i)} - c, _ := chacha20.NewUnauthenticatedCipher(s.Key.entropy[:], nonce[:]) - c.XORKeyStream(shard, shard) + var wg sync.WaitGroup + for i := range shards { + wg.Add(1) + go func(i int) { + nonce := [24]byte{1: byte(i)} + c, _ := chacha20.NewUnauthenticatedCipher(s.Key.entropy[:], nonce[:]) + c.XORKeyStream(shards[i], shards[i]) + wg.Done() + }(i) } + wg.Wait() } // Encode encodes slab data into sector-sized shards. The supplied shards should From c91892506bab43f0163e14fa4bf509d0324f25d6 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 21 Feb 2024 18:05:31 +0100 Subject: [PATCH 081/172] worker: update benchmark results --- worker/bench_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/worker/bench_test.go b/worker/bench_test.go index d9264eddc..f864df9ca 100644 --- a/worker/bench_test.go +++ b/worker/bench_test.go @@ -21,7 +21,7 @@ func (z *zeroReader) Read(p []byte) (n int, err error) { // BenchmarkUploaderSingleObject benchmarks uploading a single object. // // Speed | CPU | Commit -// 217.35 MB/s | M2 Pro | afee1ac +// 232.97 MB/s | M2 Pro | 26d3119 func BenchmarkUploaderSingleObject(b *testing.B) { w := newMockWorker() @@ -46,7 +46,7 @@ func BenchmarkUploaderSingleObject(b *testing.B) { // BenchmarkUploaderSingleObject benchmarks uploading one object per slab. // // Speed | CPU | Commit -// 139.74 MB/s | M2 Pro | afee1ac +// 185.10 MB/s | M2 Pro | 26d3119 func BenchmarkUploaderMultiObject(b *testing.B) { w := newMockWorker() @@ -75,7 +75,7 @@ func BenchmarkUploaderMultiObject(b *testing.B) { // slabs. // // Speed | CPU | Commit -// 1611.98 MB/s | M2 Pro | afee1ac +// 1668.87 MB/s | M2 Pro | 26d3119 func BenchmarkSectorRoot30Goroutines(b *testing.B) { data := make([]byte, rhpv2.SectorSize) b.SetBytes(int64(rhpv2.SectorSize)) @@ -108,7 +108,7 @@ func BenchmarkSectorRoot30Goroutines(b *testing.B) { // BenchmarkSectorRootSingleGoroutine benchmarks the SectorRoot function. // // Speed | CPU | Commit -// 174.71 MB/s | M2 Pro | afee1ac +// 176.91 MB/s | M2 Pro | 26d3119 func BenchmarkSectorRootSingleGoroutine(b *testing.B) { data := make([]byte, rhpv2.SectorSize) b.SetBytes(rhpv2.SectorSize) From 5ceccfc411a614b57e7ef4faa995815f4be04f76 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 22 Feb 2024 09:40:54 +0100 Subject: [PATCH 082/172] worker: ComputeEtag method --- object/object.go | 17 +++++++++++++++++ worker/upload.go | 12 ++---------- 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/object/object.go b/object/object.go index 7c74c1c23..2331f6251 100644 --- a/object/object.go +++ b/object/object.go @@ -3,6 +3,7 @@ package object import ( "bytes" "crypto/cipher" + "crypto/md5" "encoding/binary" "encoding/hex" "fmt" @@ -142,6 +143,22 @@ func (o Object) Contracts() map[types.PublicKey]map[types.FileContractID]struct{ return usedContracts } +func (o *Object) ComputeETag() string { + // calculate the eTag using the precomputed sector roots to avoid having to + // hash the entire object again. + h := md5.New() + b := make([]byte, 8) + for _, slab := range o.Slabs { + binary.LittleEndian.PutUint32(b[:4], slab.Offset) + binary.LittleEndian.PutUint32(b[4:], slab.Length) + h.Write(b) + for _, shard := range slab.Shards { + h.Write(shard.Root[:]) + } + } + return string(hex.EncodeToString(h.Sum(nil))) +} + // TotalSize returns the total size of the object. func (o Object) TotalSize() int64 { var n int64 diff --git a/worker/upload.go b/worker/upload.go index 56444cded..232e05981 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -2,8 +2,6 @@ package worker import ( "context" - "crypto/md5" - "encoding/hex" "errors" "fmt" "io" @@ -531,14 +529,8 @@ func (mgr *uploadManager) Upload(ctx context.Context, r io.Reader, contracts []a o.Slabs = append(o.Slabs, resp.slab) } - // calculate the eTag - h := md5.New() - for _, slab := range o.Slabs { - for _, shard := range slab.Shards { - h.Write(shard.Root[:]) - } - } - eTag = string(hex.EncodeToString(h.Sum(nil))) + // compute etag + eTag = o.ComputeETag() // add partial slabs if len(partialSlab) > 0 { From cfb8cb600273f92209e7df1706707bf3c2c741b8 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 22 Feb 2024 13:28:04 +0100 Subject: [PATCH 083/172] worker: pass expected root to UploadSector --- worker/host.go | 18 +++++++++--------- worker/host_test.go | 4 +--- worker/mocks_test.go | 11 +++++------ worker/rhpv3.go | 21 ++++++++++----------- worker/upload.go | 25 ++++--------------------- worker/uploader.go | 32 +++++++++++++++++--------------- 6 files changed, 46 insertions(+), 65 deletions(-) diff --git a/worker/host.go b/worker/host.go index 43e0891af..e5642efdd 100644 --- a/worker/host.go +++ b/worker/host.go @@ -21,7 +21,7 @@ type ( PublicKey() types.PublicKey DownloadSector(ctx context.Context, w io.Writer, root types.Hash256, offset, length uint32, overpay bool) error - UploadSector(ctx context.Context, sector *[rhpv2.SectorSize]byte, rev types.FileContractRevision) (types.Hash256, error) + UploadSector(ctx context.Context, sectorRoot types.Hash256, sector *[rhpv2.SectorSize]byte, rev types.FileContractRevision) error FetchPriceTable(ctx context.Context, rev *types.FileContractRevision) (hpt hostdb.HostPriceTable, err error) FetchRevision(ctx context.Context, fetchTimeout time.Duration) (types.FileContractRevision, error) @@ -121,11 +121,11 @@ func (h *host) DownloadSector(ctx context.Context, w io.Writer, root types.Hash2 }) } -func (h *host) UploadSector(ctx context.Context, sector *[rhpv2.SectorSize]byte, rev types.FileContractRevision) (root types.Hash256, err error) { +func (h *host) UploadSector(ctx context.Context, sectorRoot types.Hash256, sector *[rhpv2.SectorSize]byte, rev types.FileContractRevision) (err error) { // fetch price table pt, err := h.priceTable(ctx, nil) if err != nil { - return types.Hash256{}, err + return err } // prepare payment @@ -134,28 +134,28 @@ func (h *host) UploadSector(ctx context.Context, sector *[rhpv2.SectorSize]byte, // insufficient balance error expectedCost, _, _, err := uploadSectorCost(pt, rev.WindowEnd) if err != nil { - return types.Hash256{}, err + return err } if rev.RevisionNumber == math.MaxUint64 { - return types.Hash256{}, fmt.Errorf("revision number has reached max, fcid %v", rev.ParentID) + return fmt.Errorf("revision number has reached max, fcid %v", rev.ParentID) } payment, ok := rhpv3.PayByContract(&rev, expectedCost, h.acc.id, h.renterKey) if !ok { - return types.Hash256{}, errors.New("failed to create payment") + return errors.New("failed to create payment") } var cost types.Currency err = h.transportPool.withTransportV3(ctx, h.hk, h.siamuxAddr, func(ctx context.Context, t *transportV3) error { - root, cost, err = RPCAppendSector(ctx, t, h.renterKey, pt, &rev, &payment, sector) + cost, err = RPCAppendSector(ctx, t, h.renterKey, pt, &rev, &payment, sectorRoot, sector) return err }) if err != nil { - return types.Hash256{}, err + return err } // record spending h.contractSpendingRecorder.Record(rev, api.ContractSpending{Uploads: cost}) - return root, nil + return nil } func (h *host) RenewContract(ctx context.Context, rrr api.RHPRenewRequest) (_ rhpv2.ContractRevision, _ []types.Transaction, _ types.Currency, err error) { diff --git a/worker/host_test.go b/worker/host_test.go index 87d35fb36..78ce6b74e 100644 --- a/worker/host_test.go +++ b/worker/host_test.go @@ -16,11 +16,9 @@ func TestHost(t *testing.T) { sector, root := newMockSector() // upload the sector - uploaded, err := h.UploadSector(context.Background(), sector, types.FileContractRevision{}) + err := h.UploadSector(context.Background(), rhpv2.SectorRoot(sector), sector, types.FileContractRevision{}) if err != nil { t.Fatal(err) - } else if uploaded != root { - t.Fatal("root mismatch") } // download entire sector diff --git a/worker/mocks_test.go b/worker/mocks_test.go index 2490941af..a28e9256c 100644 --- a/worker/mocks_test.go +++ b/worker/mocks_test.go @@ -396,8 +396,9 @@ func (h *mockHost) DownloadSector(ctx context.Context, w io.Writer, root types.H return err } -func (h *mockHost) UploadSector(ctx context.Context, sector *[rhpv2.SectorSize]byte, rev types.FileContractRevision) (types.Hash256, error) { - return h.contract().addSector(sector), nil +func (h *mockHost) UploadSector(ctx context.Context, sectorRoot types.Hash256, sector *[rhpv2.SectorSize]byte, rev types.FileContractRevision) error { + h.contract().addSector(sectorRoot, sector) + return nil } func (h *mockHost) FetchRevision(ctx context.Context, fetchTimeout time.Duration) (rev types.FileContractRevision, _ error) { @@ -448,12 +449,10 @@ func newMockContract(hk types.PublicKey, fcid types.FileContractID) *mockContrac } } -func (c *mockContract) addSector(sector *[rhpv2.SectorSize]byte) (root types.Hash256) { - root = rhpv2.SectorRoot(sector) +func (c *mockContract) addSector(sectorRoot types.Hash256, sector *[rhpv2.SectorSize]byte) { c.mu.Lock() - c.sectors[root] = sector + c.sectors[sectorRoot] = sector c.mu.Unlock() - return } func (c *mockContract) sector(root types.Hash256) (sector *[rhpv2.SectorSize]byte, found bool) { diff --git a/worker/rhpv3.go b/worker/rhpv3.go index 03f67c6f6..b7ccfd69a 100644 --- a/worker/rhpv3.go +++ b/worker/rhpv3.go @@ -789,17 +789,17 @@ func RPCReadSector(ctx context.Context, t *transportV3, w io.Writer, pt rhpv3.Ho return } -func RPCAppendSector(ctx context.Context, t *transportV3, renterKey types.PrivateKey, pt rhpv3.HostPriceTable, rev *types.FileContractRevision, payment rhpv3.PaymentMethod, sector *[rhpv2.SectorSize]byte) (sectorRoot types.Hash256, cost types.Currency, err error) { +func RPCAppendSector(ctx context.Context, t *transportV3, renterKey types.PrivateKey, pt rhpv3.HostPriceTable, rev *types.FileContractRevision, payment rhpv3.PaymentMethod, sectorRoot types.Hash256, sector *[rhpv2.SectorSize]byte) (cost types.Currency, err error) { defer wrapErr(&err, "AppendSector") // sanity check revision first if rev.RevisionNumber == math.MaxUint64 { - return types.Hash256{}, types.ZeroCurrency, errMaxRevisionReached + return types.ZeroCurrency, errMaxRevisionReached } s, err := t.DialStream(ctx) if err != nil { - return types.Hash256{}, types.ZeroCurrency, err + return types.ZeroCurrency, err } defer s.Close() @@ -829,7 +829,7 @@ func RPCAppendSector(ctx context.Context, t *transportV3, renterKey types.Privat // compute expected collateral and refund expectedCost, expectedCollateral, expectedRefund, err := uploadSectorCost(pt, rev.WindowEnd) if err != nil { - return types.Hash256{}, types.ZeroCurrency, err + return types.ZeroCurrency, err } // apply leeways. @@ -840,13 +840,13 @@ func RPCAppendSector(ctx context.Context, t *transportV3, renterKey types.Privat // check if the cost, collateral and refund match our expectation. if executeResp.TotalCost.Cmp(expectedCost) > 0 { - return types.Hash256{}, types.ZeroCurrency, fmt.Errorf("cost exceeds expectation: %v > %v", executeResp.TotalCost.String(), expectedCost.String()) + return types.ZeroCurrency, fmt.Errorf("cost exceeds expectation: %v > %v", executeResp.TotalCost.String(), expectedCost.String()) } if executeResp.FailureRefund.Cmp(expectedRefund) < 0 { - return types.Hash256{}, types.ZeroCurrency, fmt.Errorf("insufficient refund: %v < %v", executeResp.FailureRefund.String(), expectedRefund.String()) + return types.ZeroCurrency, fmt.Errorf("insufficient refund: %v < %v", executeResp.FailureRefund.String(), expectedRefund.String()) } if executeResp.AdditionalCollateral.Cmp(expectedCollateral) < 0 { - return types.Hash256{}, types.ZeroCurrency, fmt.Errorf("insufficient collateral: %v < %v", executeResp.AdditionalCollateral.String(), expectedCollateral.String()) + return types.ZeroCurrency, fmt.Errorf("insufficient collateral: %v < %v", executeResp.AdditionalCollateral.String(), expectedCollateral.String()) } // set the cost and refund @@ -870,18 +870,17 @@ func RPCAppendSector(ctx context.Context, t *transportV3, renterKey types.Privat collateral := executeResp.AdditionalCollateral.Add(executeResp.FailureRefund) // check proof - sectorRoot = rhpv2.SectorRoot(sector) if rev.Filesize == 0 { // For the first upload to a contract we don't get a proof. So we just // assert that the new contract root matches the root of the sector. if rev.Filesize == 0 && executeResp.NewMerkleRoot != sectorRoot { - return types.Hash256{}, types.ZeroCurrency, fmt.Errorf("merkle root doesn't match the sector root upon first upload to contract: %v != %v", executeResp.NewMerkleRoot, sectorRoot) + return types.ZeroCurrency, fmt.Errorf("merkle root doesn't match the sector root upon first upload to contract: %v != %v", executeResp.NewMerkleRoot, sectorRoot) } } else { // Otherwise we make sure the proof was transmitted and verify it. actions := []rhpv2.RPCWriteAction{{Type: rhpv2.RPCWriteActionAppend}} // TODO: change once rhpv3 support is available if !rhpv2.VerifyDiffProof(actions, rev.Filesize/rhpv2.SectorSize, executeResp.Proof, []types.Hash256{}, rev.FileMerkleRoot, executeResp.NewMerkleRoot, []types.Hash256{sectorRoot}) { - return types.Hash256{}, types.ZeroCurrency, errors.New("proof verification failed") + return types.ZeroCurrency, errors.New("proof verification failed") } } @@ -889,7 +888,7 @@ func RPCAppendSector(ctx context.Context, t *transportV3, renterKey types.Privat newRevision := *rev newValid, newMissed, err := updateRevisionOutputs(&newRevision, types.ZeroCurrency, collateral) if err != nil { - return types.Hash256{}, types.ZeroCurrency, err + return types.ZeroCurrency, err } newRevision.Filesize += rhpv2.SectorSize newRevision.RevisionNumber++ diff --git a/worker/upload.go b/worker/upload.go index 232e05981..63be07b2b 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -137,9 +137,8 @@ type ( } sectorUploadResp struct { - req *sectorUploadReq - root types.Hash256 - err error + req *sectorUploadReq + err error } ) @@ -1065,12 +1064,6 @@ func (s *slabUpload) receive(resp sectorUploadResp) (bool, bool) { return false, false } - // sanity check we receive the expected root - if resp.root != req.sector.root { - s.errs[req.hk] = fmt.Errorf("root mismatch, %v != %v", resp.root, req.sector.root) - return false, false - } - // redundant sectors can't complete the upload if sector.uploaded.Root != (types.Hash256{}) { return false, false @@ -1080,7 +1073,7 @@ func (s *slabUpload) receive(resp sectorUploadResp) (bool, bool) { sector.finish(object.Sector{ Contracts: map[types.PublicKey][]types.FileContractID{req.hk: {req.fcid}}, LatestHost: req.hk, - Root: resp.root, + Root: req.sector.root, }) // update uploaded sectors @@ -1127,7 +1120,7 @@ func (req *sectorUploadReq) done() bool { } } -func (req *sectorUploadReq) fail(err error) { +func (req *sectorUploadReq) finish(err error) { select { case <-req.sector.ctx.Done(): case req.responseChan <- sectorUploadResp{ @@ -1136,13 +1129,3 @@ func (req *sectorUploadReq) fail(err error) { }: } } - -func (req *sectorUploadReq) succeed(root types.Hash256) { - select { - case <-req.sector.ctx.Done(): - case req.responseChan <- sectorUploadResp{ - req: req, - root: root, - }: - } -} diff --git a/worker/uploader.go b/worker/uploader.go index dcff27eaf..fa1d04651 100644 --- a/worker/uploader.go +++ b/worker/uploader.go @@ -114,7 +114,7 @@ outer: } // execute it - root, elapsed, err := u.execute(req) + elapsed, err := u.execute(req) // the uploader's contract got renewed, requeue the request if errors.Is(err, errMaxRevisionReached) { @@ -125,10 +125,12 @@ outer: } // send the response - if err != nil { - req.fail(err) - } else { - req.succeed(root) + select { + case <-req.sector.ctx.Done(): + case req.responseChan <- sectorUploadResp{ + req: req, + err: err, + }: } // track the error, ignore gracefully closed streams and canceled overdrives @@ -151,7 +153,7 @@ func (u *uploader) Stop(err error) { break } if !upload.done() { - upload.fail(err) + upload.finish(err) } } } @@ -161,7 +163,7 @@ func (u *uploader) enqueue(req *sectorUploadReq) { // check for stopped if u.stopped { u.mu.Unlock() - go req.fail(errUploaderStopped) // don't block the caller + go req.finish(errUploaderStopped) // don't block the caller return } @@ -192,7 +194,7 @@ func (u *uploader) estimate() float64 { return numSectors * estimateP90 } -func (u *uploader) execute(req *sectorUploadReq) (types.Hash256, time.Duration, error) { +func (u *uploader) execute(req *sectorUploadReq) (time.Duration, error) { // grab fields u.mu.Lock() host := u.host @@ -202,7 +204,7 @@ func (u *uploader) execute(req *sectorUploadReq) (types.Hash256, time.Duration, // acquire contract lock lockID, err := u.cs.AcquireContract(req.sector.ctx, fcid, req.contractLockPriority, req.contractLockDuration) if err != nil { - return types.Hash256{}, 0, err + return 0, err } // defer the release @@ -220,26 +222,26 @@ func (u *uploader) execute(req *sectorUploadReq) (types.Hash256, time.Duration, // fetch the revision rev, err := host.FetchRevision(ctx, defaultRevisionFetchTimeout) if err != nil { - return types.Hash256{}, 0, err + return 0, err } else if rev.RevisionNumber == math.MaxUint64 { - return types.Hash256{}, 0, errMaxRevisionReached + return 0, errMaxRevisionReached } // update the bus if err := u.os.AddUploadingSector(ctx, req.uploadID, fcid, req.sector.root); err != nil { - return types.Hash256{}, 0, fmt.Errorf("failed to add uploading sector to contract %v, err: %v", fcid, err) + return 0, fmt.Errorf("failed to add uploading sector to contract %v, err: %v", fcid, err) } // upload the sector start := time.Now() - root, err := host.UploadSector(ctx, req.sector.sectorData(), rev) + err = host.UploadSector(ctx, req.sector.root, req.sector.sectorData(), rev) if err != nil { - return types.Hash256{}, 0, fmt.Errorf("failed to upload sector to contract %v, err: %v", fcid, err) + return 0, fmt.Errorf("failed to upload sector to contract %v, err: %v", fcid, err) } // calculate elapsed time elapsed := time.Since(start) - return root, elapsed, nil + return elapsed, nil } func (u *uploader) pop() *sectorUploadReq { From 67a6c871389bc422bd72df7a15bc93d10ac0f627 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 22 Feb 2024 13:38:45 +0100 Subject: [PATCH 084/172] worker: update benchmark results --- worker/bench_test.go | 8 ++++---- worker/upload.go | 6 ++++++ 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/worker/bench_test.go b/worker/bench_test.go index f864df9ca..4748f3d85 100644 --- a/worker/bench_test.go +++ b/worker/bench_test.go @@ -21,7 +21,7 @@ func (z *zeroReader) Read(p []byte) (n int, err error) { // BenchmarkUploaderSingleObject benchmarks uploading a single object. // // Speed | CPU | Commit -// 232.97 MB/s | M2 Pro | 26d3119 +// 433.86 MB/s | M2 Pro | bae6e77 func BenchmarkUploaderSingleObject(b *testing.B) { w := newMockWorker() @@ -46,7 +46,7 @@ func BenchmarkUploaderSingleObject(b *testing.B) { // BenchmarkUploaderSingleObject benchmarks uploading one object per slab. // // Speed | CPU | Commit -// 185.10 MB/s | M2 Pro | 26d3119 +// 282.47 MB/s | M2 Pro | bae6e77 func BenchmarkUploaderMultiObject(b *testing.B) { w := newMockWorker() @@ -75,7 +75,7 @@ func BenchmarkUploaderMultiObject(b *testing.B) { // slabs. // // Speed | CPU | Commit -// 1668.87 MB/s | M2 Pro | 26d3119 +// 1658.49 MB/s | M2 Pro | bae6e77 func BenchmarkSectorRoot30Goroutines(b *testing.B) { data := make([]byte, rhpv2.SectorSize) b.SetBytes(int64(rhpv2.SectorSize)) @@ -108,7 +108,7 @@ func BenchmarkSectorRoot30Goroutines(b *testing.B) { // BenchmarkSectorRootSingleGoroutine benchmarks the SectorRoot function. // // Speed | CPU | Commit -// 176.91 MB/s | M2 Pro | 26d3119 +// 177.33 MB/s | M2 Pro | bae6e77 func BenchmarkSectorRootSingleGoroutine(b *testing.B) { data := make([]byte, rhpv2.SectorSize) b.SetBytes(rhpv2.SectorSize) diff --git a/worker/upload.go b/worker/upload.go index 63be07b2b..8bd87f881 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -770,6 +770,12 @@ func (u *upload) newSlabUpload(ctx context.Context, shards [][]byte, uploaders [ sCtx, sCancel := context.WithCancel(ctx) // create the sector + // NOTE: we are computing the sector root here and pass it all the + // way down to the RPC to avoid having to recompute it for the proof + // verification. This is necessary because we need it ahead of time + // for the call to AddUploadingSector in uploader.go + // Once we upload to temp storage we don't need AddUploadingSector + // anymore and can move it back to the RPC. sectors[idx] = §orUpload{ data: (*[rhpv2.SectorSize]byte)(shards[idx]), index: idx, From 3f11ef698af02f1af58236c297774871401e5546 Mon Sep 17 00:00:00 2001 From: PJ Date: Thu, 22 Feb 2024 14:09:12 +0100 Subject: [PATCH 085/172] worker: implement PR remarks --- worker/rhpv3.go | 50 ++++++++++++++++--------------------------------- 1 file changed, 16 insertions(+), 34 deletions(-) diff --git a/worker/rhpv3.go b/worker/rhpv3.go index 5fbcd3ad6..ee2dfcd85 100644 --- a/worker/rhpv3.go +++ b/worker/rhpv3.go @@ -385,19 +385,14 @@ func withAccountLock(ctx context.Context, as AccountStore, id rhpv3.Account, hk if err != nil { return err } + err = fn(acc) - defer func() { - select { - case <-ctx.Done(): - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(context.Background(), time.Minute) - defer cancel() - default: - } - as.UnlockAccount(ctx, acc.ID, lockID) - }() + // unlock account + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + _ = as.UnlockAccount(ctx, acc.ID, lockID) // ignore error + cancel() - return fn(acc) + return nil } // Balance returns the account balance. @@ -450,38 +445,25 @@ func (a *account) WithWithdrawal(ctx context.Context, amtFn func() (types.Curren // execute amtFn amt, err := amtFn() + + // in case of an insufficient balance, we schedule a sync if isBalanceInsufficient(err) { - // in case of an insufficient balance, we schedule a sync - if scheduleErr := a.scheduleSync(); scheduleErr != nil { - err = fmt.Errorf("%w; failed to set requiresSync flag on bus, error: %v", err, scheduleErr) - } + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + err = errors.Join(err, a.as.ScheduleSync(ctx, a.id, a.host)) + cancel() } - // if an amount was returned, we withdraw it. - if withdrawErr := a.withdrawFromBalance(amt); withdrawErr != nil { - err = fmt.Errorf("%w; failed to withdraw from account, error: %v", err, withdrawErr) + // if an amount was returned, we withdraw it + if !amt.IsZero() { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + err = errors.Join(err, a.as.AddBalance(ctx, a.id, a.host, new(big.Int).Neg(amt.Big()))) + cancel() } return err }) } -func (a *account) withdrawFromBalance(amt types.Currency) error { - if amt.IsZero() { - return nil - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - return a.as.AddBalance(ctx, a.id, a.host, new(big.Int).Neg(amt.Big())) -} - -func (a *account) scheduleSync() error { - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - return a.as.ScheduleSync(ctx, a.id, a.host) -} - // deriveAccountKey derives an account plus key for a given host and worker. // Each worker has its own account for a given host. That makes concurrency // around keeping track of an accounts balance and refilling it a lot easier in From 2827f0d2967cc2c04daf671d935bc7c069e97574 Mon Sep 17 00:00:00 2001 From: PJ Date: Thu, 22 Feb 2024 14:24:57 +0100 Subject: [PATCH 086/172] worker: simplify tryUploadPackedSlab --- worker/upload.go | 71 ++++++++++++++++++++++++------------------------ worker/worker.go | 9 ++++++ 2 files changed, 44 insertions(+), 36 deletions(-) diff --git a/worker/upload.go b/worker/upload.go index 5d7c4b706..84ecae444 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -152,15 +152,6 @@ func (w *worker) initUploadManager(maxMemory, maxOverdrive uint64, overdriveTime w.uploadManager = newUploadManager(w.shutdownCtx, w, mm, w.bus, w.bus, maxOverdrive, overdriveTimeout, w.contractLockingDuration, logger) } -func (w *worker) isStopped() bool { - select { - case <-w.shutdownCtx.Done(): - return true - default: - } - return false -} - func (w *worker) upload(ctx context.Context, r io.Reader, contracts []api.ContractMetadata, up uploadParameters, opts ...UploadOption) (_ string, err error) { // apply the options for _, opt := range opts { @@ -196,11 +187,20 @@ func (w *worker) upload(ctx context.Context, r io.Reader, contracts []api.Contra if bufferSizeLimitReached { mem := w.uploadManager.mm.AcquireMemory(ctx, up.rs.SlabSizeWithRedundancy()) if mem != nil { - _, err := w.tryUploadPackedSlab(ctx, mem, defaultPackedSlabsLockDuration, up.rs, up.contractSet, lockingPriorityBlockedUpload) + defer mem.Release() + + // fetch packed slab to upload + packedSlabs, err := w.bus.PackedSlabsForUpload(ctx, lockingPriorityBlockedUpload, uint8(up.rs.MinShards), uint8(up.rs.TotalShards), up.contractSet, 1) if err != nil { - w.logger.Errorf("couldn't upload packed slabs, err: %v", err) + return "", fmt.Errorf("couldn't fetch packed slabs from bus: %v", err) + } + + // upload packed slab + if len(packedSlabs) > 0 { + if err := w.tryUploadPackedSlab(ctx, mem, packedSlabs[0], up.rs, up.contractSet, lockingPriorityBlockedUpload); err != nil { + w.logger.Errorf("couldn't upload packed slabs, err: %v", err) + } } - mem.Release() } } @@ -244,8 +244,22 @@ func (w *worker) threadedUploadPackedSlabs(rs api.RedundancySettings, contractSe break // interrupted } + // fetch packed slab to upload + packedSlabs, err := w.bus.PackedSlabsForUpload(interruptCtx, defaultPackedSlabsLockDuration, uint8(rs.MinShards), uint8(rs.TotalShards), contractSet, 1) + if err != nil { + mu.Lock() + errs = errors.Join(errs, fmt.Errorf("couldn't fetch packed slabs from bus: %v", err)) + mu.Unlock() + } + + // no more packed slabs to upload + if len(packedSlabs) == 0 { + mem.Release() + break + } + wg.Add(1) - go func() { + go func(ps api.PackedSlab) { defer wg.Done() defer mem.Release() @@ -259,51 +273,36 @@ func (w *worker) threadedUploadPackedSlabs(rs api.RedundancySettings, contractSe ctx = context.WithValue(ctx, keyInteractionRecorder, w) // try to upload a packed slab, if there were no packed slabs left to upload ok is false - ok, err := w.tryUploadPackedSlab(ctx, mem, defaultPackedSlabsLockDuration, rs, contractSet, lockPriority) - if err != nil { + if err := w.tryUploadPackedSlab(ctx, mem, ps, rs, contractSet, lockPriority); err != nil { mu.Lock() errs = errors.Join(errs, err) mu.Unlock() interruptCancel() // prevent new uploads from being launched - } else if !ok { - interruptCancel() // no more packed slabs to upload - } else if w.isStopped() { - interruptCancel() // worker shut down } - }() + }(packedSlabs[0]) } // wait for all threads to finish wg.Wait() - // return collected errors + // log errors if err := errors.Join(errs); err != nil { w.logger.Errorf("couldn't upload packed slabs, err: %v", err) } return } -func (w *worker) tryUploadPackedSlab(ctx context.Context, mem Memory, lockingDuration time.Duration, rs api.RedundancySettings, contractSet string, lockPriority int) (bool, error) { - // fetch packed slab to upload - packedSlabs, err := w.bus.PackedSlabsForUpload(ctx, lockingDuration, uint8(rs.MinShards), uint8(rs.TotalShards), contractSet, 1) - if err != nil { - err = fmt.Errorf("couldn't fetch packed slabs from bus: %v", err) - return false, err - } else if len(packedSlabs) == 0 { - return false, nil // no more slabs - } - ps := packedSlabs[0] - +func (w *worker) tryUploadPackedSlab(ctx context.Context, mem Memory, ps api.PackedSlab, rs api.RedundancySettings, contractSet string, lockPriority int) error { // fetch contracts contracts, err := w.bus.Contracts(ctx, api.ContractsOpts{ContractSet: contractSet}) if err != nil { - return false, fmt.Errorf("couldn't fetch packed slabs from bus: %v", err) + return fmt.Errorf("couldn't fetch packed slabs from bus: %v", err) } // fetch upload params up, err := w.bus.UploadParams(ctx) if err != nil { - return false, fmt.Errorf("couldn't fetch upload params from bus: %v", err) + return fmt.Errorf("couldn't fetch upload params from bus: %v", err) } // attach gouging checker to the context @@ -312,10 +311,10 @@ func (w *worker) tryUploadPackedSlab(ctx context.Context, mem Memory, lockingDur // upload packed slab err = w.uploadManager.UploadPackedSlab(ctx, rs, ps, mem, contracts, up.CurrentHeight, lockPriority) if err != nil { - return false, fmt.Errorf("couldn't upload packed slab, err: %v", err) + return fmt.Errorf("couldn't upload packed slab, err: %v", err) } - return true, nil + return nil } func newUploadManager(ctx context.Context, hm HostManager, mm MemoryManager, os ObjectStore, cs ContractStore, maxOverdrive uint64, overdriveTimeout time.Duration, contractLockDuration time.Duration, logger *zap.SugaredLogger) *uploadManager { diff --git a/worker/worker.go b/worker/worker.go index 8a2a9b1f3..a35809664 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -210,6 +210,15 @@ type worker struct { logger *zap.SugaredLogger } +func (w *worker) isStopped() bool { + select { + case <-w.shutdownCtx.Done(): + return true + default: + } + return false +} + func (w *worker) withRevision(ctx context.Context, fetchTimeout time.Duration, fcid types.FileContractID, hk types.PublicKey, siamuxAddr string, lockPriority int, fn func(rev types.FileContractRevision) error) error { return w.withContractLock(ctx, fcid, lockPriority, func() error { h := w.Host(hk, fcid, siamuxAddr) From 42b09afa898c9b14ebae0cb47f8b07bf3edceb6c Mon Sep 17 00:00:00 2001 From: PJ Date: Thu, 22 Feb 2024 14:27:53 +0100 Subject: [PATCH 087/172] worker: revert SlabSize rename --- api/setting.go | 8 ++++---- internal/testing/cluster_test.go | 2 +- internal/testing/pruning_test.go | 2 +- worker/upload.go | 12 ++++++------ worker/upload_test.go | 2 +- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/api/setting.go b/api/setting.go index e5fd6da77..47785c9aa 100644 --- a/api/setting.go +++ b/api/setting.go @@ -128,12 +128,12 @@ func (rs RedundancySettings) Redundancy() float64 { // SlabSize returns the size of a slab. func (rs RedundancySettings) SlabSize() uint64 { - return uint64(rs.MinShards) * rhpv2.SectorSize + return uint64(rs.TotalShards) * rhpv2.SectorSize } -// SlabSizeWithRedundancy returns the size of a slab with redundancy. -func (rs RedundancySettings) SlabSizeWithRedundancy() uint64 { - return uint64(rs.TotalShards) * rhpv2.SectorSize +// SlabSizeNoRedundancy returns the size of a slab without redundancy. +func (rs RedundancySettings) SlabSizeNoRedundancy() uint64 { + return uint64(rs.MinShards) * rhpv2.SectorSize } // Validate returns an error if the redundancy settings are not considered diff --git a/internal/testing/cluster_test.go b/internal/testing/cluster_test.go index 26b96f4ba..d290faf27 100644 --- a/internal/testing/cluster_test.go +++ b/internal/testing/cluster_test.go @@ -2349,7 +2349,7 @@ func TestMultipartUploadWrappedByPartialSlabs(t *testing.T) { defer cluster.Shutdown() b := cluster.Bus w := cluster.Worker - slabSize := testRedundancySettings.SlabSize() + slabSize := testRedundancySettings.SlabSizeNoRedundancy() tt := cluster.tt // start a new multipart upload. We upload the parts in reverse order diff --git a/internal/testing/pruning_test.go b/internal/testing/pruning_test.go index 333763fa0..477069365 100644 --- a/internal/testing/pruning_test.go +++ b/internal/testing/pruning_test.go @@ -206,7 +206,7 @@ func TestSectorPruning(t *testing.T) { tt.Retry(100, 100*time.Millisecond, func() error { res, err = b.PrunableData(context.Background()) tt.OK(err) - if res.TotalPrunable != uint64(math.Ceil(float64(numObjects)/2))*rs.SlabSizeWithRedundancy() { + if res.TotalPrunable != uint64(math.Ceil(float64(numObjects)/2))*rs.SlabSize() { return fmt.Errorf("unexpected prunable data %v", n) } return nil diff --git a/worker/upload.go b/worker/upload.go index 84ecae444..1411dd056 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -185,7 +185,7 @@ func (w *worker) upload(ctx context.Context, r io.Reader, contracts []api.Contra // try and upload one slab synchronously if bufferSizeLimitReached { - mem := w.uploadManager.mm.AcquireMemory(ctx, up.rs.SlabSizeWithRedundancy()) + mem := w.uploadManager.mm.AcquireMemory(ctx, up.rs.SlabSize()) if mem != nil { defer mem.Release() @@ -239,7 +239,7 @@ func (w *worker) threadedUploadPackedSlabs(rs api.RedundancySettings, contractSe var wg sync.WaitGroup for { // block until we have memory - mem := w.uploadManager.mm.AcquireMemory(interruptCtx, rs.SlabSizeWithRedundancy()) + mem := w.uploadManager.mm.AcquireMemory(interruptCtx, rs.SlabSize()) if mem == nil { break // interrupted } @@ -439,8 +439,8 @@ func (mgr *uploadManager) Upload(ctx context.Context, r io.Reader, contracts []a numSlabsChan := make(chan int, 1) // prepare slab sizes + slabSizeNoRedundancy := up.rs.SlabSizeNoRedundancy() slabSize := up.rs.SlabSize() - slabSizeWithRedundancy := up.rs.SlabSizeWithRedundancy() var partialSlab []byte // launch uploads in a separate goroutine @@ -455,14 +455,14 @@ func (mgr *uploadManager) Upload(ctx context.Context, r io.Reader, contracts []a default: } // acquire memory - mem := mgr.mm.AcquireMemory(ctx, slabSizeWithRedundancy) + mem := mgr.mm.AcquireMemory(ctx, slabSize) if mem == nil { return // interrupted } // read next slab's data - data := make([]byte, slabSize) - length, err := io.ReadFull(io.LimitReader(cr, int64(slabSize)), data) + data := make([]byte, slabSizeNoRedundancy) + length, err := io.ReadFull(io.LimitReader(cr, int64(slabSizeNoRedundancy)), data) if err == io.EOF { mem.Release() diff --git a/worker/upload_test.go b/worker/upload_test.go index 9a285efa5..8020f69f7 100644 --- a/worker/upload_test.go +++ b/worker/upload_test.go @@ -185,7 +185,7 @@ func TestUploadPackedSlab(t *testing.T) { t.Fatal("expected 1 packed slab") } ps := pss[0] - mem := mm.AcquireMemory(context.Background(), params.rs.SlabSizeWithRedundancy()) + mem := mm.AcquireMemory(context.Background(), params.rs.SlabSize()) // upload the packed slab err = ul.UploadPackedSlab(context.Background(), params.rs, ps, mem, w.contracts(), 0, lockingPriorityUpload) From bcdeb0de465e8ac3936b314bca6b6ea641f76ce3 Mon Sep 17 00:00:00 2001 From: PJ Date: Thu, 22 Feb 2024 14:41:01 +0100 Subject: [PATCH 088/172] worker: return err --- worker/rhpv3.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/worker/rhpv3.go b/worker/rhpv3.go index ee2dfcd85..25c26e42d 100644 --- a/worker/rhpv3.go +++ b/worker/rhpv3.go @@ -392,7 +392,7 @@ func withAccountLock(ctx context.Context, as AccountStore, id rhpv3.Account, hk _ = as.UnlockAccount(ctx, acc.ID, lockID) // ignore error cancel() - return nil + return err } // Balance returns the account balance. From 5ec6f3676cfdab2438550063751b87c97dd7bfe6 Mon Sep 17 00:00:00 2001 From: PJ Date: Thu, 22 Feb 2024 14:46:14 +0100 Subject: [PATCH 089/172] worker: remove interaction recorder --- worker/upload.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/worker/upload.go b/worker/upload.go index 1411dd056..becddde21 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -269,9 +269,6 @@ func (w *worker) threadedUploadPackedSlabs(rs api.RedundancySettings, contractSe ctx, cancel := context.WithTimeout(context.Background(), defaultPackedSlabsUploadTimeout) defer cancel() - // attach interaction recorder to the context - ctx = context.WithValue(ctx, keyInteractionRecorder, w) - // try to upload a packed slab, if there were no packed slabs left to upload ok is false if err := w.tryUploadPackedSlab(ctx, mem, ps, rs, contractSet, lockPriority); err != nil { mu.Lock() From aa29aff5bc98430b74680f2c46c0f82e443bea5d Mon Sep 17 00:00:00 2001 From: PJ Date: Thu, 22 Feb 2024 15:15:12 +0100 Subject: [PATCH 090/172] testing: fix TestUploadPacking --- worker/upload.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/worker/upload.go b/worker/upload.go index becddde21..53a7b7842 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -190,7 +190,7 @@ func (w *worker) upload(ctx context.Context, r io.Reader, contracts []api.Contra defer mem.Release() // fetch packed slab to upload - packedSlabs, err := w.bus.PackedSlabsForUpload(ctx, lockingPriorityBlockedUpload, uint8(up.rs.MinShards), uint8(up.rs.TotalShards), up.contractSet, 1) + packedSlabs, err := w.bus.PackedSlabsForUpload(ctx, defaultPackedSlabsLockDuration, uint8(up.rs.MinShards), uint8(up.rs.TotalShards), up.contractSet, 1) if err != nil { return "", fmt.Errorf("couldn't fetch packed slabs from bus: %v", err) } From 9af03a3d007545954baa34b0cbd350ed51ea2081 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 22 Feb 2024 15:53:05 +0100 Subject: [PATCH 091/172] worker: add BenchmarkDownloaderSingleObjecdt --- worker/bench_test.go | 43 +++++++++++++++++++++++++++++++++++++------ 1 file changed, 37 insertions(+), 6 deletions(-) diff --git a/worker/bench_test.go b/worker/bench_test.go index 4748f3d85..f896ce993 100644 --- a/worker/bench_test.go +++ b/worker/bench_test.go @@ -1,12 +1,15 @@ package worker import ( + "bytes" "context" "io" "sync" "testing" rhpv2 "go.sia.tech/core/rhp/v2" + "go.sia.tech/renterd/api" + "lukechampine.com/frand" ) // zeroReader is a reader that leaves the buffer unchanged and returns no error. @@ -18,6 +21,38 @@ func (z *zeroReader) Read(p []byte) (n int, err error) { return len(p), nil } +// BenchmarkDownlaoderSingleObject benchmarks downloading a single, slab-sized +// object. +// 485.48 MB/s | M2 Pro | bae6e77 +func BenchmarkDownloaderSingleObject(b *testing.B) { + w := newMockWorker() + + up := testParameters(b.TempDir()) + up.rs.MinShards = 10 + up.rs.TotalShards = 30 + up.packing = false + w.addHosts(up.rs.TotalShards) + + data := bytes.NewReader(frand.Bytes(int(up.rs.SlabSizeNoRedundancy()))) + _, _, err := w.ul.Upload(context.Background(), data, w.contracts(), up, lockingPriorityUpload) + if err != nil { + b.Fatal(err) + } + o, err := w.os.Object(context.Background(), testBucket, up.path, api.GetObjectOptions{}) + if err != nil { + b.Fatal(err) + } + + b.SetBytes(o.Object.Size) + b.ResetTimer() + for i := 0; i < b.N; i++ { + err = w.dl.DownloadObject(context.Background(), io.Discard, *o.Object.Object, 0, uint64(o.Object.Size), w.contracts()) + if err != nil { + b.Fatal(err) + } + } +} + // BenchmarkUploaderSingleObject benchmarks uploading a single object. // // Speed | CPU | Commit @@ -29,14 +64,12 @@ func BenchmarkUploaderSingleObject(b *testing.B) { up.rs.MinShards = 10 up.rs.TotalShards = 30 up.packing = false - w.addHosts(up.rs.TotalShards) - // create a reader that returns dev/null data := io.LimitReader(&zeroReader{}, int64(b.N*rhpv2.SectorSize*up.rs.MinShards)) b.SetBytes(int64(rhpv2.SectorSize * up.rs.MinShards)) - b.ResetTimer() + _, _, err := w.ul.Upload(context.Background(), data, w.contracts(), up, lockingPriorityUpload) if err != nil { b.Fatal(err) @@ -54,13 +87,11 @@ func BenchmarkUploaderMultiObject(b *testing.B) { up.rs.MinShards = 10 up.rs.TotalShards = 30 up.packing = false - w.addHosts(up.rs.TotalShards) - // create a reader that returns dev/null b.SetBytes(int64(rhpv2.SectorSize * up.rs.MinShards)) - b.ResetTimer() + for i := 0; i < b.N; i++ { data := io.LimitReader(&zeroReader{}, int64(rhpv2.SectorSize*up.rs.MinShards)) _, _, err := w.ul.Upload(context.Background(), data, w.contracts(), up, lockingPriorityUpload) From c9dc1b6cf2e09e7678db1de4eb2b2b7db1ad01f5 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 22 Feb 2024 16:33:09 +0100 Subject: [PATCH 092/172] worker: decrypt in parallel --- object/slab.go | 16 +++++++++++----- worker/download.go | 13 +++++++------ 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/object/slab.go b/object/slab.go index aa8bb7d45..f2762abf3 100644 --- a/object/slab.go +++ b/object/slab.go @@ -158,12 +158,18 @@ func (ss SlabSlice) SectorRegion() (offset, length uint32) { // slice offset), using a different nonce for each shard. func (ss SlabSlice) Decrypt(shards [][]byte) { offset := ss.Offset / (rhpv2.LeafSize * uint32(ss.MinShards)) - for i, shard := range shards { - nonce := [24]byte{1: byte(i)} - c, _ := chacha20.NewUnauthenticatedCipher(ss.Key.entropy[:], nonce[:]) - c.SetCounter(offset) - c.XORKeyStream(shard, shard) + var wg sync.WaitGroup + for i := range shards { + wg.Add(1) + go func(i int) { + nonce := [24]byte{1: byte(i)} + c, _ := chacha20.NewUnauthenticatedCipher(ss.Key.entropy[:], nonce[:]) + c.SetCounter(offset) + c.XORKeyStream(shards[i], shards[i]) + wg.Done() + }(i) } + wg.Wait() } // Recover recovers a slice of slab data from the supplied shards. diff --git a/worker/download.go b/worker/download.go index 462a2292d..a1cc8f501 100644 --- a/worker/download.go +++ b/worker/download.go @@ -195,12 +195,13 @@ func (mgr *downloadManager) DownloadObject(ctx context.Context, w io.Writer, o o hosts[c.HostKey] = struct{}{} } - // buffer the writer - bw := bufio.NewWriter(w) - defer bw.Flush() - // create the cipher writer - cw := o.Key.Decrypt(bw, offset) + cw := o.Key.Decrypt(w, offset) + + // buffer the writer we recover to making sure that we don't hammer the + // response writer with tiny writes + bw := bufio.NewWriter(cw) + defer bw.Flush() // create response chan and ensure it's closed properly var wg sync.WaitGroup @@ -322,7 +323,7 @@ outer: } else { // Regular slab. slabs[respIndex].Decrypt(next.shards) - err := slabs[respIndex].Recover(cw, next.shards) + err := slabs[respIndex].Recover(bw, next.shards) if err != nil { mgr.logger.Errorf("failed to recover slab %v: %v", respIndex, err) return err From aa08202054f0b32a0a622cc988094c0cac1a720a Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 22 Feb 2024 16:34:28 +0100 Subject: [PATCH 093/172] worker: update download benchmark results --- worker/bench_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/worker/bench_test.go b/worker/bench_test.go index f896ce993..575e4640f 100644 --- a/worker/bench_test.go +++ b/worker/bench_test.go @@ -23,7 +23,7 @@ func (z *zeroReader) Read(p []byte) (n int, err error) { // BenchmarkDownlaoderSingleObject benchmarks downloading a single, slab-sized // object. -// 485.48 MB/s | M2 Pro | bae6e77 +// 1036.74 MB/s | M2 Pro | c9dc1b6 func BenchmarkDownloaderSingleObject(b *testing.B) { w := newMockWorker() From 29f4201d2d48802329474607fb3427e8adcebd07 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 22 Feb 2024 16:47:37 +0100 Subject: [PATCH 094/172] worker: remove irrelevant benchmarks --- worker/bench_test.go | 49 -------------------------------------------- 1 file changed, 49 deletions(-) diff --git a/worker/bench_test.go b/worker/bench_test.go index 575e4640f..552eca17c 100644 --- a/worker/bench_test.go +++ b/worker/bench_test.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "io" - "sync" "testing" rhpv2 "go.sia.tech/core/rhp/v2" @@ -100,51 +99,3 @@ func BenchmarkUploaderMultiObject(b *testing.B) { } } } - -// BenchmarkSectorRoot30Goroutines benchmarks the SectorRoot function with 30 -// goroutines processing roots in parallel to simulate sequential uploads of -// slabs. -// -// Speed | CPU | Commit -// 1658.49 MB/s | M2 Pro | bae6e77 -func BenchmarkSectorRoot30Goroutines(b *testing.B) { - data := make([]byte, rhpv2.SectorSize) - b.SetBytes(int64(rhpv2.SectorSize)) - - // spin up workers - c := make(chan struct{}) - work := func() { - for range c { - rhpv2.SectorRoot((*[rhpv2.SectorSize]byte)(data)) - } - } - var wg sync.WaitGroup - for i := 0; i < 30; i++ { - wg.Add(1) - go func() { - work() - wg.Done() - }() - } - b.ResetTimer() - - // run the benchmark - for i := 0; i < b.N; i++ { - c <- struct{}{} - } - close(c) - wg.Wait() -} - -// BenchmarkSectorRootSingleGoroutine benchmarks the SectorRoot function. -// -// Speed | CPU | Commit -// 177.33 MB/s | M2 Pro | bae6e77 -func BenchmarkSectorRootSingleGoroutine(b *testing.B) { - data := make([]byte, rhpv2.SectorSize) - b.SetBytes(rhpv2.SectorSize) - b.ResetTimer() - for i := 0; i < b.N; i++ { - rhpv2.SectorRoot((*[rhpv2.SectorSize]byte)(data)) - } -} From 10e88fd4a00196b467a5b3d3d0ac26564ec861c6 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 22 Feb 2024 18:15:46 +0100 Subject: [PATCH 095/172] stores: fix mysql migration --- .../migration_00004_prune_slabs_cascade.sql | 22 ++++++------------- .../migration_00004_prune_slabs_cascade.sql | 8 +++---- 2 files changed, 11 insertions(+), 19 deletions(-) diff --git a/stores/migrations/mysql/main/migration_00004_prune_slabs_cascade.sql b/stores/migrations/mysql/main/migration_00004_prune_slabs_cascade.sql index 0b1c06994..9014582e0 100644 --- a/stores/migrations/mysql/main/migration_00004_prune_slabs_cascade.sql +++ b/stores/migrations/mysql/main/migration_00004_prune_slabs_cascade.sql @@ -1,24 +1,16 @@ --- prune manually before creating trigger -DELETE slabs -FROM slabs -LEFT JOIN slices ON slices.db_slab_id = slabs.id -WHERE slices.db_object_id IS NULL -AND slices.db_multipart_part_id IS NULL -AND slabs.db_buffered_slab_id IS NULL; - -- add ON DELETE CASCADE to slices ALTER TABLE slices DROP FOREIGN KEY fk_objects_slabs; -ALTER TABLE slices ADD CONSTRAINT fk_objects_slabs FOREIGN KEY (db_object_id) REFERENCES objects (id) ON DELETE CASCADE, +ALTER TABLE slices ADD CONSTRAINT fk_objects_slabs FOREIGN KEY (db_object_id) REFERENCES objects (id) ON DELETE CASCADE; ALTER TABLE slices DROP FOREIGN KEY fk_multipart_parts_slabs; -ALTER TABLE slices ADD CONSTRAINT fk_multipart_parts_slabs FOREIGN KEY (db_multipart_part_id) REFERENCES multipart_parts (id) ON DELETE CASCADE, +ALTER TABLE slices ADD CONSTRAINT fk_multipart_parts_slabs FOREIGN KEY (db_multipart_part_id) REFERENCES multipart_parts (id) ON DELETE CASCADE; -- add ON DELETE CASCADE to multipart_parts ALTER TABLE multipart_parts DROP FOREIGN KEY fk_multipart_uploads_parts; -ALTER TABLE multipart_parts ADD CONSTRAINT fk_multipart_uploads_parts FOREIGN KEY (db_multipart_upload_id) REFERENCES multipart_uploads (id) ON DELETE CASCADE +ALTER TABLE multipart_parts ADD CONSTRAINT fk_multipart_uploads_parts FOREIGN KEY (db_multipart_upload_id) REFERENCES multipart_uploads (id) ON DELETE CASCADE; -- drop triggers -DROP TRIGGER IF EXISTS before_delete_on_objects_delete_slices -DROP TRIGGER IF EXISTS before_delete_on_multipart_uploads_delete_multipart_parts -DROP TRIGGER IF EXISTS before_delete_on_multipart_parts_delete_slices -DROP TRIGGER IF EXISTS after_delete_on_slices_delete_slabs +DROP TRIGGER IF EXISTS before_delete_on_objects_delete_slices; +DROP TRIGGER IF EXISTS before_delete_on_multipart_uploads_delete_multipart_parts; +DROP TRIGGER IF EXISTS before_delete_on_multipart_parts_delete_slices; +DROP TRIGGER IF EXISTS after_delete_on_slices_delete_slabs; diff --git a/stores/migrations/sqlite/main/migration_00004_prune_slabs_cascade.sql b/stores/migrations/sqlite/main/migration_00004_prune_slabs_cascade.sql index 1132dd2f5..38cd40199 100644 --- a/stores/migrations/sqlite/main/migration_00004_prune_slabs_cascade.sql +++ b/stores/migrations/sqlite/main/migration_00004_prune_slabs_cascade.sql @@ -22,7 +22,7 @@ CREATE INDEX `idx_multipart_parts_etag` ON `multipart_parts`(`etag`); PRAGMA foreign_keys=on; -- drop triggers -DROP TRIGGER IF EXISTS before_delete_on_objects_delete_slices -DROP TRIGGER IF EXISTS before_delete_on_multipart_uploads_delete_multipart_parts -DROP TRIGGER IF EXISTS before_delete_on_multipart_parts_delete_slices -DROP TRIGGER IF EXISTS after_delete_on_slices_delete_slabs +DROP TRIGGER IF EXISTS before_delete_on_objects_delete_slices; +DROP TRIGGER IF EXISTS before_delete_on_multipart_uploads_delete_multipart_parts; +DROP TRIGGER IF EXISTS before_delete_on_multipart_parts_delete_slices; +DROP TRIGGER IF EXISTS after_delete_on_slices_delete_slabs; From 3e92f08d4fabab1992970797a9200afe3141b568 Mon Sep 17 00:00:00 2001 From: ChrisSchinnerl Date: Fri, 23 Feb 2024 00:08:30 +0000 Subject: [PATCH 096/172] ui: v0.46.0 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e727483d1..0ce29b965 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640 go.sia.tech/mux v1.2.0 go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca - go.sia.tech/web/renterd v0.45.0 + go.sia.tech/web/renterd v0.46.0 go.uber.org/zap v1.26.0 golang.org/x/crypto v0.19.0 golang.org/x/term v0.17.0 diff --git a/go.sum b/go.sum index 006a31ea6..2570e9fc5 100644 --- a/go.sum +++ b/go.sum @@ -255,8 +255,8 @@ go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca h1:aZMg2AKevn7jKx+wlusWQf go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca/go.mod h1:h/1afFwpxzff6/gG5i1XdAgPK7dEY6FaibhK7N5F86Y= go.sia.tech/web v0.0.0-20231213145933-3f175a86abff h1:/nE7nhewDRxzEdtSKT4SkiUwtjPSiy7Xz7CHEW3MaGQ= go.sia.tech/web v0.0.0-20231213145933-3f175a86abff/go.mod h1:RKODSdOmR3VtObPAcGwQqm4qnqntDVFylbvOBbWYYBU= -go.sia.tech/web/renterd v0.45.0 h1:5kSiDnHYRacg3JideH9Cl9qHzcZiKnBR0fWRap169hU= -go.sia.tech/web/renterd v0.45.0/go.mod h1:FgXrdmAnu591a3h96RB/15pMZ74xO9457g902uE06BM= +go.sia.tech/web/renterd v0.46.0 h1:BMVg4i7LxSlc8wZ4T0EG1k3EK4JxVIzCfD3/cjmwH0k= +go.sia.tech/web/renterd v0.46.0/go.mod h1:FgXrdmAnu591a3h96RB/15pMZ74xO9457g902uE06BM= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= From 99014c79fe65c3bffa5c737a768d3905351c7517 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 23 Feb 2024 09:47:48 +0100 Subject: [PATCH 097/172] stores: drop indices first --- .../main/migration_00004_prune_slabs_cascade.sql | 14 +++++++------- .../main/migration_00004_prune_slabs_cascade.sql | 14 ++++++++------ 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/stores/migrations/mysql/main/migration_00004_prune_slabs_cascade.sql b/stores/migrations/mysql/main/migration_00004_prune_slabs_cascade.sql index 9014582e0..c2efe3467 100644 --- a/stores/migrations/mysql/main/migration_00004_prune_slabs_cascade.sql +++ b/stores/migrations/mysql/main/migration_00004_prune_slabs_cascade.sql @@ -1,3 +1,9 @@ +-- drop triggers +DROP TRIGGER IF EXISTS before_delete_on_objects_delete_slices; +DROP TRIGGER IF EXISTS before_delete_on_multipart_uploads_delete_multipart_parts; +DROP TRIGGER IF EXISTS before_delete_on_multipart_parts_delete_slices; +DROP TRIGGER IF EXISTS after_delete_on_slices_delete_slabs; + -- add ON DELETE CASCADE to slices ALTER TABLE slices DROP FOREIGN KEY fk_objects_slabs; ALTER TABLE slices ADD CONSTRAINT fk_objects_slabs FOREIGN KEY (db_object_id) REFERENCES objects (id) ON DELETE CASCADE; @@ -7,10 +13,4 @@ ALTER TABLE slices ADD CONSTRAINT fk_multipart_parts_slabs FOREIGN KEY (db_multi -- add ON DELETE CASCADE to multipart_parts ALTER TABLE multipart_parts DROP FOREIGN KEY fk_multipart_uploads_parts; -ALTER TABLE multipart_parts ADD CONSTRAINT fk_multipart_uploads_parts FOREIGN KEY (db_multipart_upload_id) REFERENCES multipart_uploads (id) ON DELETE CASCADE; - --- drop triggers -DROP TRIGGER IF EXISTS before_delete_on_objects_delete_slices; -DROP TRIGGER IF EXISTS before_delete_on_multipart_uploads_delete_multipart_parts; -DROP TRIGGER IF EXISTS before_delete_on_multipart_parts_delete_slices; -DROP TRIGGER IF EXISTS after_delete_on_slices_delete_slabs; +ALTER TABLE multipart_parts ADD CONSTRAINT fk_multipart_uploads_parts FOREIGN KEY (db_multipart_upload_id) REFERENCES multipart_uploads (id) ON DELETE CASCADE; \ No newline at end of file diff --git a/stores/migrations/sqlite/main/migration_00004_prune_slabs_cascade.sql b/stores/migrations/sqlite/main/migration_00004_prune_slabs_cascade.sql index 38cd40199..03f006acd 100644 --- a/stores/migrations/sqlite/main/migration_00004_prune_slabs_cascade.sql +++ b/stores/migrations/sqlite/main/migration_00004_prune_slabs_cascade.sql @@ -1,5 +1,12 @@ +-- drop triggers +DROP TRIGGER IF EXISTS before_delete_on_objects_delete_slices; +DROP TRIGGER IF EXISTS before_delete_on_multipart_uploads_delete_multipart_parts; +DROP TRIGGER IF EXISTS before_delete_on_multipart_parts_delete_slices; +DROP TRIGGER IF EXISTS after_delete_on_slices_delete_slabs; + PRAGMA foreign_keys=off; -- update constraints on slices +DROP TABLE IF EXISTS slices_temp; CREATE TABLE `slices_temp` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_object_id` integer,`object_index` integer,`db_multipart_part_id` integer,`db_slab_id` integer,`offset` integer,`length` integer,CONSTRAINT `fk_objects_slabs` FOREIGN KEY (`db_object_id`) REFERENCES `objects`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_multipart_parts_slabs` FOREIGN KEY (`db_multipart_part_id`) REFERENCES `multipart_parts`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_slabs_slices` FOREIGN KEY (`db_slab_id`) REFERENCES `slabs`(`id`)); INSERT INTO slices_temp SELECT `id`, `created_at`, `db_object_id`, `object_index`, `db_multipart_part_id`, `db_slab_id`, `offset`, `length` FROM slices; DROP TABLE slices; @@ -11,6 +18,7 @@ CREATE INDEX `idx_slices_db_slab_id` ON `slices`(`db_slab_id`); CREATE INDEX `idx_slices_db_multipart_part_id` ON `slices`(`db_multipart_part_id`); -- update constraints multipart_parts +DROP TABLE IF EXISTS multipart_parts_temp; CREATE TABLE `multipart_parts_temp` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`etag` text,`part_number` integer,`size` integer,`db_multipart_upload_id` integer NOT NULL,CONSTRAINT `fk_multipart_uploads_parts` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads`(`id`) ON DELETE CASCADE); INSERT INTO multipart_parts_temp SELECT * FROM multipart_parts; DROP TABLE multipart_parts; @@ -20,9 +28,3 @@ CREATE INDEX `idx_multipart_parts_db_multipart_upload_id` ON `multipart_parts`(` CREATE INDEX `idx_multipart_parts_part_number` ON `multipart_parts`(`part_number`); CREATE INDEX `idx_multipart_parts_etag` ON `multipart_parts`(`etag`); PRAGMA foreign_keys=on; - --- drop triggers -DROP TRIGGER IF EXISTS before_delete_on_objects_delete_slices; -DROP TRIGGER IF EXISTS before_delete_on_multipart_uploads_delete_multipart_parts; -DROP TRIGGER IF EXISTS before_delete_on_multipart_parts_delete_slices; -DROP TRIGGER IF EXISTS after_delete_on_slices_delete_slabs; From 89b4729f083145606dedec152c28ebac409c6b3d Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 23 Feb 2024 10:15:34 +0100 Subject: [PATCH 098/172] testing: fix TestUploadPacking --- worker/download.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/worker/download.go b/worker/download.go index a1cc8f501..9048c033b 100644 --- a/worker/download.go +++ b/worker/download.go @@ -315,7 +315,7 @@ outer: s := slabs[respIndex] if s.PartialSlab { // Partial slab. - _, err = cw.Write(s.Data) + _, err = bw.Write(s.Data) if err != nil { mgr.logger.Errorf("failed to send partial slab", respIndex, err) return err From b45b80f70b20c16b8216c99b1d7dd138f055c224 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 21 Feb 2024 13:31:44 +0100 Subject: [PATCH 099/172] autopilot: accumulate churn information into single alert --- alerts/alerts.go | 15 ++++++-- autopilot/alerts.go | 58 +++++++++++------------------ autopilot/churn.go | 63 ++++++++++++++++++++++++++++++++ autopilot/contractor.go | 10 ++++- bus/bus.go | 11 +++++- bus/client/alerts.go | 4 +- internal/testing/cluster_test.go | 6 +-- 7 files changed, 117 insertions(+), 50 deletions(-) create mode 100644 autopilot/churn.go diff --git a/alerts/alerts.go b/alerts/alerts.go index b0d4963c6..f11004dbe 100644 --- a/alerts/alerts.go +++ b/alerts/alerts.go @@ -35,6 +35,7 @@ const ( type ( Alerter interface { + Alerts(_ context.Context, opts AlertsOpts) (resp AlertsResponse, err error) RegisterAlert(_ context.Context, a Alert) error DismissAlerts(_ context.Context, ids ...types.Hash256) error } @@ -169,17 +170,18 @@ func (m *Manager) DismissAlerts(ctx context.Context, ids ...types.Hash256) error }) } -// Active returns the host's active alerts. -func (m *Manager) Active(offset, limit int) AlertsResponse { +// Alerts returns the host's active alerts. +func (m *Manager) Alerts(_ context.Context, opts AlertsOpts) (AlertsResponse, error) { m.mu.Lock() defer m.mu.Unlock() + offset, limit := opts.Offset, opts.Limit resp := AlertsResponse{ Total: len(m.alerts), } if offset >= len(m.alerts) { - return resp + return resp, nil } else if limit == -1 { limit = len(m.alerts) } @@ -197,7 +199,7 @@ func (m *Manager) Active(offset, limit int) AlertsResponse { resp.HasMore = true } resp.Alerts = alerts - return resp + return resp, nil } func (m *Manager) RegisterWebhookBroadcaster(b webhooks.Broadcaster) { @@ -231,6 +233,11 @@ func WithOrigin(alerter Alerter, origin string) Alerter { } } +// Alerts implements the Alerter interface. +func (a *originAlerter) Alerts(ctx context.Context, opts AlertsOpts) (resp AlertsResponse, err error) { + return a.alerter.Alerts(ctx, opts) +} + // RegisterAlert implements the Alerter interface. func (a *originAlerter) RegisterAlert(ctx context.Context, alert Alert) error { if alert.Data == nil { diff --git a/autopilot/alerts.go b/autopilot/alerts.go index 292670dc5..f4762c4d4 100644 --- a/autopilot/alerts.go +++ b/autopilot/alerts.go @@ -14,12 +14,13 @@ import ( ) var ( - alertAccountRefillID = frand.Entropy256() // constant until restarted - alertLostSectorsID = frand.Entropy256() // constant until restarted - alertLowBalanceID = frand.Entropy256() // constant until restarted - alertMigrationID = frand.Entropy256() // constant until restarted - alertPruningID = frand.Entropy256() // constant until restarted - alertRenewalFailedID = frand.Entropy256() // constant until restarted + alertAccountRefillID = randomAlertID() // constant until restarted + alertChurnID = randomAlertID() // constant until restarted + alertLostSectorsID = randomAlertID() // constant until restarted + alertLowBalanceID = randomAlertID() // constant until restarted + alertMigrationID = randomAlertID() // constant until restarted + alertPruningID = randomAlertID() // constant until restarted + alertRenewalFailedID = randomAlertID() // constant until restarted ) func alertIDForAccount(alertID [32]byte, id rhpv3.Account) types.Hash256 { @@ -54,6 +55,20 @@ func (ap *Autopilot) DismissAlert(ctx context.Context, ids ...types.Hash256) { } } +func (ap *Autopilot) HasAlert(ctx context.Context, id types.Hash256) bool { + ar, err := ap.alerts.Alerts(ctx, alerts.AlertsOpts{Offset: 0, Limit: -1}) + if err != nil { + ap.logger.Errorf("failed to fetch alerts: %v", err) + return false + } + for _, alert := range ar.Alerts { + if alert.ID == id { + return true + } + } + return false +} + func newAccountLowBalanceAlert(address types.Address, balance, allowance types.Currency, bh, renewWindow, endHeight uint64) alerts.Alert { severity := alerts.SeverityInfo if bh+renewWindow/2 >= endHeight { @@ -137,37 +152,6 @@ func newContractPruningFailedAlert(hk types.PublicKey, version string, fcid type } } -func newContractSetChangeAlert(name string, additions map[types.FileContractID]contractSetAddition, removals map[types.FileContractID]contractSetRemoval) alerts.Alert { - var hint string - if len(removals) > 0 { - hint = "A high churn rate can lead to a lot of unnecessary migrations, it might be necessary to tweak your configuration depending on the reason hosts are being discarded from the set." - } - - removedReasons := make(map[string]string, len(removals)) - for k, v := range removals { - removedReasons[k.String()] = v.Reason - } - - return alerts.Alert{ - ID: randomAlertID(), - Severity: alerts.SeverityInfo, - Message: "Contract set changed", - Data: map[string]any{ - "name": name, - "set_additions": additions, - "set_removals": removals, - "hint": hint, - - // TODO: these fields can be removed on the next major release, they - // contain redundant information - "added": len(additions), - "removed": len(removals), - "removals": removedReasons, - }, - Timestamp: time.Now(), - } -} - func newLostSectorsAlert(hk types.PublicKey, lostSectors uint64) alerts.Alert { return alerts.Alert{ ID: alertIDForHost(alertLostSectorsID, hk), diff --git a/autopilot/churn.go b/autopilot/churn.go new file mode 100644 index 000000000..70c4651c2 --- /dev/null +++ b/autopilot/churn.go @@ -0,0 +1,63 @@ +package autopilot + +import ( + "time" + + "go.sia.tech/core/types" + "go.sia.tech/renterd/alerts" +) + +type ( + accumulatedChurn struct { + additions map[types.FileContractID][]contractSetAddition + removals map[types.FileContractID][]contractSetRemoval + } +) + +func newAccumulatedChurn() *accumulatedChurn { + return &accumulatedChurn{ + additions: make(map[types.FileContractID][]contractSetAddition), + removals: make(map[types.FileContractID][]contractSetRemoval), + } +} + +func (c *accumulatedChurn) Alert(name string) alerts.Alert { + var hint string + if len(c.removals) > 0 { + hint = "A high churn rate can lead to a lot of unnecessary migrations, it might be necessary to tweak your configuration depending on the reason hosts are being discarded from the set." + } + + removedReasons := make(map[string][]string, len(c.removals)) + for fcid, contractRemovals := range c.removals { + for _, removal := range contractRemovals { + removedReasons[fcid.String()] = append(removedReasons[fcid.String()], removal.Reason) + } + } + + return alerts.Alert{ + ID: alertChurnID, + Severity: alerts.SeverityInfo, + Message: "Contract set changed", + Data: map[string]any{ + "name": name, + "set_additions": c.additions, + "set_removals": c.removals, + "hint": hint, + }, + Timestamp: time.Now(), + } +} + +func (c *accumulatedChurn) Apply(additions map[types.FileContractID]contractSetAddition, removals map[types.FileContractID]contractSetRemoval) { + for fcid, addition := range additions { + c.additions[fcid] = append(c.additions[fcid], addition) + } + for fcid, removal := range removals { + c.removals[fcid] = append(c.removals[fcid], removal) + } +} + +func (c *accumulatedChurn) Reset() { + c.additions = make(map[types.FileContractID][]contractSetAddition) + c.removals = make(map[types.FileContractID][]contractSetRemoval) +} diff --git a/autopilot/contractor.go b/autopilot/contractor.go index 092f2a831..7909277f0 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -85,6 +85,7 @@ const ( type ( contractor struct { ap *Autopilot + churn *accumulatedChurn resolver *ipResolver logger *zap.SugaredLogger @@ -130,7 +131,7 @@ type ( contractSetRemoval struct { Size uint64 `json:"size"` HostKey types.PublicKey `json:"hostKey"` - Reason string `json:"reason"` + Reason string `json:"reasons"` } renewal struct { @@ -143,6 +144,7 @@ type ( func newContractor(ap *Autopilot, revisionSubmissionBuffer uint64, revisionBroadcastInterval time.Duration) *contractor { return &contractor{ ap: ap, + churn: newAccumulatedChurn(), logger: ap.logger.Named("contractor"), revisionBroadcastInterval: revisionBroadcastInterval, @@ -536,7 +538,11 @@ func (c *contractor) computeContractSetChanged(ctx context.Context, name string, ) hasChanged := len(setAdditions)+len(setRemovals) > 0 if hasChanged { - c.ap.RegisterAlert(ctx, newContractSetChangeAlert(name, setAdditions, setRemovals)) + if !c.ap.HasAlert(ctx, alertChurnID) { + c.churn.Reset() + } + c.churn.Apply(setAdditions, setRemovals) + c.ap.RegisterAlert(ctx, c.churn.Alert(name)) } return hasChanged } diff --git a/bus/bus.go b/bus/bus.go index 9ee6e1ba2..e7e6ddaac 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -1726,7 +1726,10 @@ func (b *bus) gougingParams(ctx context.Context) (api.GougingParams, error) { } func (b *bus) handleGETAlertsDeprecated(jc jape.Context) { - ar := b.alertMgr.Active(0, -1) + ar, err := b.alertMgr.Alerts(jc.Request.Context(), alerts.AlertsOpts{Offset: 0, Limit: -1}) + if jc.Check("failed to fetch alerts", err) != nil { + return + } jc.Encode(ar.Alerts) } @@ -1744,7 +1747,11 @@ func (b *bus) handleGETAlerts(jc jape.Context) { jc.Error(errors.New("offset must be non-negative"), http.StatusBadRequest) return } - jc.Encode(b.alertMgr.Active(offset, limit)) + ar, err := b.alertMgr.Alerts(jc.Request.Context(), alerts.AlertsOpts{Offset: offset, Limit: limit}) + if jc.Check("failed to fetch alerts", err) != nil { + return + } + jc.Encode(ar) } func (b *bus) handlePOSTAlertsDismiss(jc jape.Context) { diff --git a/bus/client/alerts.go b/bus/client/alerts.go index 7f2bf9aa7..7eceaeaed 100644 --- a/bus/client/alerts.go +++ b/bus/client/alerts.go @@ -10,13 +10,13 @@ import ( ) // Alerts fetches the active alerts from the bus. -func (c *Client) Alerts(opts alerts.AlertsOpts) (resp alerts.AlertsResponse, err error) { +func (c *Client) Alerts(ctx context.Context, opts alerts.AlertsOpts) (resp alerts.AlertsResponse, err error) { values := url.Values{} values.Set("offset", fmt.Sprint(opts.Offset)) if opts.Limit != 0 { values.Set("limit", fmt.Sprint(opts.Limit)) } - err = c.c.GET("/alerts?"+values.Encode(), &resp) + err = c.c.WithContext(ctx).GET("/alerts?"+values.Encode(), &resp) return } diff --git a/internal/testing/cluster_test.go b/internal/testing/cluster_test.go index 4fb62ff31..f30a0906a 100644 --- a/internal/testing/cluster_test.go +++ b/internal/testing/cluster_test.go @@ -1923,7 +1923,7 @@ func TestAlerts(t *testing.T) { tt.OK(b.RegisterAlert(context.Background(), alert)) findAlert := func(id types.Hash256) *alerts.Alert { t.Helper() - ar, err := b.Alerts(alerts.AlertsOpts{}) + ar, err := b.Alerts(context.Background(), alerts.AlertsOpts{}) tt.OK(err) for _, alert := range ar.Alerts { if alert.ID == id { @@ -1960,7 +1960,7 @@ func TestAlerts(t *testing.T) { } // try to find with offset = 1 - ar, err := b.Alerts(alerts.AlertsOpts{Offset: 1}) + ar, err := b.Alerts(context.Background(), alerts.AlertsOpts{Offset: 1}) foundAlerts := ar.Alerts tt.OK(err) if len(foundAlerts) != 1 || foundAlerts[0].ID != alert.ID { @@ -1968,7 +1968,7 @@ func TestAlerts(t *testing.T) { } // try to find with limit = 1 - ar, err = b.Alerts(alerts.AlertsOpts{Limit: 1}) + ar, err = b.Alerts(context.Background(), alerts.AlertsOpts{Limit: 1}) foundAlerts = ar.Alerts tt.OK(err) if len(foundAlerts) != 1 || foundAlerts[0].ID != alert2.ID { From c17252a8f250cc0184918ec93dab5e7adeb31791 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 23 Feb 2024 13:14:30 +0100 Subject: [PATCH 100/172] autopilot: update churn alert to contain timestamp --- autopilot/churn.go | 41 +++++++++++++++++------------- autopilot/contractor.go | 56 +++++++++++++++++++++++++++++------------ 2 files changed, 63 insertions(+), 34 deletions(-) diff --git a/autopilot/churn.go b/autopilot/churn.go index 70c4651c2..fdc1a0f54 100644 --- a/autopilot/churn.go +++ b/autopilot/churn.go @@ -9,15 +9,15 @@ import ( type ( accumulatedChurn struct { - additions map[types.FileContractID][]contractSetAddition - removals map[types.FileContractID][]contractSetRemoval + additions map[types.FileContractID]contractSetAdditions + removals map[types.FileContractID]contractSetRemovals } ) func newAccumulatedChurn() *accumulatedChurn { return &accumulatedChurn{ - additions: make(map[types.FileContractID][]contractSetAddition), - removals: make(map[types.FileContractID][]contractSetRemoval), + additions: make(map[types.FileContractID]contractSetAdditions), + removals: make(map[types.FileContractID]contractSetRemovals), } } @@ -27,13 +27,6 @@ func (c *accumulatedChurn) Alert(name string) alerts.Alert { hint = "A high churn rate can lead to a lot of unnecessary migrations, it might be necessary to tweak your configuration depending on the reason hosts are being discarded from the set." } - removedReasons := make(map[string][]string, len(c.removals)) - for fcid, contractRemovals := range c.removals { - for _, removal := range contractRemovals { - removedReasons[fcid.String()] = append(removedReasons[fcid.String()], removal.Reason) - } - } - return alerts.Alert{ ID: alertChurnID, Severity: alerts.SeverityInfo, @@ -48,16 +41,28 @@ func (c *accumulatedChurn) Alert(name string) alerts.Alert { } } -func (c *accumulatedChurn) Apply(additions map[types.FileContractID]contractSetAddition, removals map[types.FileContractID]contractSetRemoval) { - for fcid, addition := range additions { - c.additions[fcid] = append(c.additions[fcid], addition) +func (c *accumulatedChurn) Apply(additions map[types.FileContractID]contractSetAdditions, removals map[types.FileContractID]contractSetRemovals) { + for fcid, a := range additions { + if _, exists := c.additions[fcid]; !exists { + c.additions[fcid] = a + } else { + additions := c.additions[fcid] + additions.Additions = append(additions.Additions, a.Additions...) + c.additions[fcid] = additions + } } - for fcid, removal := range removals { - c.removals[fcid] = append(c.removals[fcid], removal) + for fcid, r := range removals { + if _, exists := c.removals[fcid]; !exists { + c.removals[fcid] = r + } else { + removals := c.removals[fcid] + removals.Removals = append(removals.Removals, r.Removals...) + c.removals[fcid] = removals + } } } func (c *accumulatedChurn) Reset() { - c.additions = make(map[types.FileContractID][]contractSetAddition) - c.removals = make(map[types.FileContractID][]contractSetRemoval) + c.additions = make(map[types.FileContractID]contractSetAdditions) + c.removals = make(map[types.FileContractID]contractSetRemovals) } diff --git a/autopilot/contractor.go b/autopilot/contractor.go index 7909277f0..9e2b52cca 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -123,15 +123,25 @@ type ( recoverable bool } + contractSetAdditions struct { + HostKey types.PublicKey `json:"hostKey"` + Additions []contractSetAddition `json:"additions"` + } + contractSetAddition struct { - Size uint64 `json:"size"` - HostKey types.PublicKey `json:"hostKey"` + Size uint64 `json:"size"` + Time api.TimeRFC3339 `json:"time"` + } + + contractSetRemovals struct { + HostKey types.PublicKey `json:"hostKey"` + Removals []contractSetRemoval `json:"removals"` } contractSetRemoval struct { - Size uint64 `json:"size"` - HostKey types.PublicKey `json:"hostKey"` - Reason string `json:"reasons"` + Size uint64 `json:"size"` + Reason string `json:"reasons"` + Time api.TimeRFC3339 `json:"time"` } renewal struct { @@ -455,8 +465,9 @@ func (c *contractor) computeContractSetChanged(ctx context.Context, name string, } // log added and removed contracts - setAdditions := make(map[types.FileContractID]contractSetAddition) - setRemovals := make(map[types.FileContractID]contractSetRemoval) + setAdditions := make(map[types.FileContractID]contractSetAdditions) + setRemovals := make(map[types.FileContractID]contractSetRemovals) + now := api.TimeNow() for _, contract := range oldSet { _, exists := inNewSet[contract.ID] _, renewed := inNewSet[renewalsFromTo[contract.ID]] @@ -466,11 +477,18 @@ func (c *contractor) computeContractSetChanged(ctx context.Context, name string, reason = "unknown" } - setRemovals[contract.ID] = contractSetRemoval{ - Size: contractData[contract.ID], - HostKey: contract.HostKey, - Reason: reason, + if _, exists := setRemovals[contract.ID]; !exists { + setRemovals[contract.ID] = contractSetRemovals{ + HostKey: contract.HostKey, + } } + removals := setRemovals[contract.ID] + removals.Removals = append(removals.Removals, contractSetRemoval{ + Size: contractData[contract.ID], + Reason: reason, + Time: now, + }) + setRemovals[contract.ID] = removals c.logger.Debugf("contract %v was removed from the contract set, size: %v, reason: %v", contract.ID, contractData[contract.ID], reason) } } @@ -478,10 +496,17 @@ func (c *contractor) computeContractSetChanged(ctx context.Context, name string, _, existed := inOldSet[contract.ID] _, renewed := renewalsToFrom[contract.ID] if !existed && !renewed { - setAdditions[contract.ID] = contractSetAddition{ - Size: contractData[contract.ID], - HostKey: contract.HostKey, + if _, exists := setAdditions[contract.ID]; !exists { + setAdditions[contract.ID] = contractSetAdditions{ + HostKey: contract.HostKey, + } } + additions := setAdditions[contract.ID] + additions.Additions = append(additions.Additions, contractSetAddition{ + Size: contractData[contract.ID], + Time: now, + }) + setAdditions[contract.ID] = additions c.logger.Debugf("contract %v was added to the contract set, size: %v", contract.ID, contractData[contract.ID]) } } @@ -501,7 +526,6 @@ func (c *contractor) computeContractSetChanged(ctx context.Context, name string, } // record churn metrics - now := api.TimeNow() var metrics []api.ContractSetChurnMetric for fcid := range setAdditions { metrics = append(metrics, api.ContractSetChurnMetric{ @@ -516,7 +540,7 @@ func (c *contractor) computeContractSetChanged(ctx context.Context, name string, Name: c.ap.state.cfg.Contracts.Set, ContractID: fcid, Direction: api.ChurnDirRemoved, - Reason: removal.Reason, + Reason: removal.Removals[0].Reason, Timestamp: now, }) } From e9a593db12e9e159000f645b38fb0ab3c98c6e55 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 23 Feb 2024 13:55:11 +0100 Subject: [PATCH 101/172] bus: only log lost sectors when there was a contract with that host --- bus/bus.go | 6 ++++-- stores/metadata.go | 7 +++++-- stores/metadata_test.go | 4 +++- worker/download.go | 2 -- 4 files changed, 12 insertions(+), 7 deletions(-) diff --git a/bus/bus.go b/bus/bus.go index 9ee6e1ba2..17220d3d4 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -126,7 +126,7 @@ type ( ContractSizes(ctx context.Context) (map[types.FileContractID]api.ContractSize, error) ContractSize(ctx context.Context, id types.FileContractID) (api.ContractSize, error) - DeleteHostSector(ctx context.Context, hk types.PublicKey, root types.Hash256) error + DeleteHostSector(ctx context.Context, hk types.PublicKey, root types.Hash256) (int, error) Bucket(_ context.Context, bucketName string) (api.Bucket, error) CreateBucket(_ context.Context, bucketName string, policy api.BucketPolicy) error @@ -1409,9 +1409,11 @@ func (b *bus) sectorsHostRootHandlerDELETE(jc jape.Context) { } else if jc.DecodeParam("root", &root) != nil { return } - err := b.ms.DeleteHostSector(jc.Request.Context(), hk, root) + n, err := b.ms.DeleteHostSector(jc.Request.Context(), hk, root) if jc.Check("failed to mark sector as lost", err) != nil { return + } else if n > 0 { + b.logger.Infow("successfully marked sector as lost", "hk", hk, "root", root) } } diff --git a/stores/metadata.go b/stores/metadata.go index 2c84e6624..c281c9800 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1620,8 +1620,9 @@ func (s *SQLStore) CopyObject(ctx context.Context, srcBucket, dstBucket, srcPath return } -func (s *SQLStore) DeleteHostSector(ctx context.Context, hk types.PublicKey, root types.Hash256) error { - return s.retryTransaction(func(tx *gorm.DB) error { +func (s *SQLStore) DeleteHostSector(ctx context.Context, hk types.PublicKey, root types.Hash256) (int, error) { + var deletedSectors int + err := s.retryTransaction(func(tx *gorm.DB) error { // Fetch contract_sectors to delete. var sectors []dbContractSector err := tx.Raw(` @@ -1660,6 +1661,7 @@ func (s *SQLStore) DeleteHostSector(ctx context.Context, hk types.PublicKey, roo } else if res.RowsAffected != int64(len(sectors)) { return fmt.Errorf("expected %v affected rows but got %v", len(sectors), res.RowsAffected) } + deletedSectors = len(sectors) // Increment the host's lostSectors by the number of lost sectors. if err := tx.Exec("UPDATE hosts SET lost_sectors = lost_sectors + ? WHERE public_key = ?", len(sectors), publicKey(hk)).Error; err != nil { @@ -1687,6 +1689,7 @@ func (s *SQLStore) DeleteHostSector(ctx context.Context, hk types.PublicKey, roo } return nil }) + return deletedSectors, err } func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, eTag, mimeType string, metadata api.ObjectUserMetadata, o object.Object) error { diff --git a/stores/metadata_test.go b/stores/metadata_test.go index e36ecbf8e..0b785f6b6 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -3582,8 +3582,10 @@ func TestDeleteHostSector(t *testing.T) { } // Prune the sector from hk1. - if err := ss.DeleteHostSector(context.Background(), hk1, root); err != nil { + if n, err := ss.DeleteHostSector(context.Background(), hk1, root); err != nil { t.Fatal(err) + } else if n == 0 { + t.Fatal("no sectors were pruned") } // Make sure 2 contractSector entries exist. diff --git a/worker/download.go b/worker/download.go index 462a2292d..3f3d63ac4 100644 --- a/worker/download.go +++ b/worker/download.go @@ -761,8 +761,6 @@ loop: if isSectorNotFound(resp.err) { if err := s.mgr.os.DeleteHostSector(ctx, resp.req.host.PublicKey(), resp.req.root); err != nil { s.mgr.logger.Errorw("failed to mark sector as lost", "hk", resp.req.host.PublicKey(), "root", resp.req.root, zap.Error(err)) - } else { - s.mgr.logger.Infow("successfully marked sector as lost", "hk", resp.req.host.PublicKey(), "root", resp.req.root) } } else if isPriceTableGouging(resp.err) && s.overpay && !resp.req.overpay { resp.req.overpay = true // ensures we don't retry the same request over and over again From 9e0250557254fa554b4361b0ffa00ba4ef524d8c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Feb 2024 01:42:33 +0000 Subject: [PATCH 102/172] build(deps): bump go.sia.tech/coreutils from 0.0.1 to 0.0.3 Bumps [go.sia.tech/coreutils](https://github.com/SiaFoundation/coreutils) from 0.0.1 to 0.0.3. - [Commits](https://github.com/SiaFoundation/coreutils/compare/v0.0.1...v0.0.3) --- updated-dependencies: - dependency-name: go.sia.tech/coreutils dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 0ce29b965..7932deecf 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/montanaflynn/stats v0.7.1 gitlab.com/NebulousLabs/encoding v0.0.0-20200604091946-456c3dc907fe go.sia.tech/core v0.2.1 - go.sia.tech/coreutils v0.0.1 + go.sia.tech/coreutils v0.0.3 go.sia.tech/gofakes3 v0.0.0-20231109151325-e0d47c10dce2 go.sia.tech/hostd v1.0.2 go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640 diff --git a/go.sum b/go.sum index 2570e9fc5..aa84a9afb 100644 --- a/go.sum +++ b/go.sum @@ -241,8 +241,8 @@ go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.sia.tech/core v0.2.1 h1:CqmMd+T5rAhC+Py3NxfvGtvsj/GgwIqQHHVrdts/LqY= go.sia.tech/core v0.2.1/go.mod h1:3EoY+rR78w1/uGoXXVqcYdwSjSJKuEMI5bL7WROA27Q= -go.sia.tech/coreutils v0.0.1 h1:Th8iiF9fjkBaxlKRgPJfRtsD3Pb8U4d2m/OahB6wffg= -go.sia.tech/coreutils v0.0.1/go.mod h1:3Mb206QDd3NtRiaHZ2kN87/HKXhcBF6lHVatS7PkViY= +go.sia.tech/coreutils v0.0.3 h1:ZxuzovRpQMvfy/pCOV4om1cPF6sE15GyJyK36kIrF1Y= +go.sia.tech/coreutils v0.0.3/go.mod h1:UBFc77wXiE//eyilO5HLOncIEj7F69j0Nv2OkFujtP0= go.sia.tech/gofakes3 v0.0.0-20231109151325-e0d47c10dce2 h1:ulzfJNjxN5DjXHClkW2pTiDk+eJ+0NQhX87lFDZ03t0= go.sia.tech/gofakes3 v0.0.0-20231109151325-e0d47c10dce2/go.mod h1:PlsiVCn6+wssrR7bsOIlZm0DahsVrDydrlbjY4F14sg= go.sia.tech/hostd v1.0.2 h1:GjzNIAlwg3/dViF6258Xn5DI3+otQLRqmkoPDugP+9Y= From b1efe2c1d71abe095a76d1e2190b9515576f0284 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Feb 2024 01:42:42 +0000 Subject: [PATCH 103/172] build(deps): bump go.uber.org/zap from 1.26.0 to 1.27.0 Bumps [go.uber.org/zap](https://github.com/uber-go/zap) from 1.26.0 to 1.27.0. - [Release notes](https://github.com/uber-go/zap/releases) - [Changelog](https://github.com/uber-go/zap/blob/master/CHANGELOG.md) - [Commits](https://github.com/uber-go/zap/compare/v1.26.0...v1.27.0) --- updated-dependencies: - dependency-name: go.uber.org/zap dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 0ce29b965..1804eda8a 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( go.sia.tech/mux v1.2.0 go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca go.sia.tech/web/renterd v0.46.0 - go.uber.org/zap v1.26.0 + go.uber.org/zap v1.27.0 golang.org/x/crypto v0.19.0 golang.org/x/term v0.17.0 gopkg.in/yaml.v3 v3.0.1 diff --git a/go.sum b/go.sum index 2570e9fc5..24e1f476c 100644 --- a/go.sum +++ b/go.sum @@ -258,14 +258,14 @@ go.sia.tech/web v0.0.0-20231213145933-3f175a86abff/go.mod h1:RKODSdOmR3VtObPAcGw go.sia.tech/web/renterd v0.46.0 h1:BMVg4i7LxSlc8wZ4T0EG1k3EK4JxVIzCfD3/cjmwH0k= go.sia.tech/web/renterd v0.46.0/go.mod h1:FgXrdmAnu591a3h96RB/15pMZ74xO9457g902uE06BM= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= -go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= From 08f008470e8b0f6df8313f09ca50ebbb606d3e3e Mon Sep 17 00:00:00 2001 From: PJ Date: Mon, 26 Feb 2024 14:54:14 +0100 Subject: [PATCH 104/172] worker: add testWorker --- internal/node/node.go | 2 +- worker/download.go | 2 +- worker/downloader_test.go | 12 +- worker/gouging.go | 2 +- worker/host.go | 4 - worker/host_test.go | 116 ++++++- worker/mocks_test.go | 673 +++++++++++++++++++------------------ worker/pricetables_test.go | 105 ++---- worker/upload.go | 13 +- worker/upload_test.go | 63 ++-- worker/uploader.go | 5 +- worker/uploader_test.go | 10 +- worker/worker.go | 90 ++--- worker/worker_test.go | 109 ++++++ 14 files changed, 714 insertions(+), 492 deletions(-) create mode 100644 worker/worker_test.go diff --git a/internal/node/node.go b/internal/node/node.go index 6ffe29bf5..e94cfbb4d 100644 --- a/internal/node/node.go +++ b/internal/node/node.go @@ -194,7 +194,7 @@ func NewBus(cfg BusConfig, dir string, seed types.PrivateKey, l *zap.Logger) (ht func NewWorker(cfg config.Worker, b worker.Bus, seed types.PrivateKey, l *zap.Logger) (http.Handler, ShutdownFn, error) { workerKey := blake2b.Sum256(append([]byte("worker"), seed...)) - w, err := worker.New(workerKey, cfg.ID, b, cfg.ContractLockTimeout, cfg.BusFlushInterval, cfg.DownloadOverdriveTimeout, cfg.UploadOverdriveTimeout, cfg.DownloadMaxOverdrive, cfg.DownloadMaxMemory, cfg.UploadMaxMemory, cfg.UploadMaxOverdrive, cfg.AllowPrivateIPs, l) + w, err := worker.New(workerKey, cfg.ID, b, cfg.ContractLockTimeout, cfg.BusFlushInterval, cfg.DownloadOverdriveTimeout, cfg.UploadOverdriveTimeout, cfg.DownloadMaxOverdrive, cfg.UploadMaxOverdrive, cfg.DownloadMaxMemory, cfg.UploadMaxMemory, cfg.AllowPrivateIPs, l) if err != nil { return nil, nil, err } diff --git a/worker/download.go b/worker/download.go index 462a2292d..288db7f7c 100644 --- a/worker/download.go +++ b/worker/download.go @@ -132,7 +132,7 @@ func (w *worker) initDownloadManager(maxMemory, maxOverdrive uint64, overdriveTi panic("download manager already initialized") // developer error } - mm := newMemoryManager(logger, maxMemory) + mm := newMemoryManager(logger.Named("memorymanager"), maxMemory) w.downloadManager = newDownloadManager(w.shutdownCtx, w, mm, w.bus, maxOverdrive, overdriveTimeout, logger) } diff --git a/worker/downloader_test.go b/worker/downloader_test.go index c1d860c24..cbb48132c 100644 --- a/worker/downloader_test.go +++ b/worker/downloader_test.go @@ -7,11 +7,15 @@ import ( ) func TestDownloaderStopped(t *testing.T) { - w := newMockWorker() - h := w.addHost() - w.dl.refreshDownloaders(w.contracts()) + w := newTestWorker(t) + hosts := w.addHosts(1) - dl := w.dl.downloaders[h.PublicKey()] + // convenience variables + dm := w.downloadManager + h := hosts[0] + + dm.refreshDownloaders(w.contracts()) + dl := w.downloadManager.downloaders[h.PublicKey()] dl.Stop() req := sectorDownloadReq{ diff --git a/worker/gouging.go b/worker/gouging.go index 36963e24a..19ae177aa 100644 --- a/worker/gouging.go +++ b/worker/gouging.go @@ -63,7 +63,7 @@ func GougingCheckerFromContext(ctx context.Context, criticalMigration bool) (Gou return gc(criticalMigration) } -func WithGougingChecker(ctx context.Context, cs consensusState, gp api.GougingParams) context.Context { +func WithGougingChecker(ctx context.Context, cs ConsensusState, gp api.GougingParams) context.Context { return context.WithValue(ctx, keyGougingChecker, func(criticalMigration bool) (GougingChecker, error) { consensusState, err := cs.ConsensusState(ctx) if err != nil { diff --git a/worker/host.go b/worker/host.go index 43e0891af..ac8925872 100644 --- a/worker/host.go +++ b/worker/host.go @@ -35,10 +35,6 @@ type ( HostManager interface { Host(hk types.PublicKey, fcid types.FileContractID, siamuxAddr string) Host } - - HostStore interface { - Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) - } ) type ( diff --git a/worker/host_test.go b/worker/host_test.go index 87d35fb36..9e4ad0471 100644 --- a/worker/host_test.go +++ b/worker/host_test.go @@ -4,18 +4,128 @@ import ( "bytes" "context" "errors" + "io" + "sync" "testing" + "time" rhpv2 "go.sia.tech/core/rhp/v2" + rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" + "go.sia.tech/renterd/api" + "go.sia.tech/renterd/hostdb" + "lukechampine.com/frand" ) +type ( + testHost struct { + *hostMock + *contractMock + hptFn func() hostdb.HostPriceTable + } + + testHostManager struct { + t *testing.T + + mu sync.Mutex + hosts map[types.PublicKey]*testHost + } +) + +func newTestHostManager(t *testing.T) *testHostManager { + return &testHostManager{t: t, hosts: make(map[types.PublicKey]*testHost)} +} + +func (hm *testHostManager) Host(hk types.PublicKey, fcid types.FileContractID, siamuxAddr string) Host { + hm.mu.Lock() + defer hm.mu.Unlock() + + if _, ok := hm.hosts[hk]; !ok { + hm.t.Fatal("host not found") + } + return hm.hosts[hk] +} + +func (hm *testHostManager) addHost(h *testHost) { + hm.mu.Lock() + defer hm.mu.Unlock() + hm.hosts[h.hk] = h +} + +func newTestHost(h *hostMock, c *contractMock) *testHost { + return newTestHostCustom(h, c, newTestHostPriceTable) +} + +func newTestHostCustom(h *hostMock, c *contractMock, hptFn func() hostdb.HostPriceTable) *testHost { + return &testHost{ + hostMock: h, + contractMock: c, + hptFn: hptFn, + } +} + +func newTestHostPriceTable() hostdb.HostPriceTable { + var uid rhpv3.SettingsID + frand.Read(uid[:]) + + return hostdb.HostPriceTable{ + HostPriceTable: rhpv3.HostPriceTable{UID: uid, Validity: time.Minute}, + Expiry: time.Now().Add(time.Minute), + } +} + +func (h *testHost) PublicKey() types.PublicKey { + return h.hk +} + +func (h *testHost) DownloadSector(ctx context.Context, w io.Writer, root types.Hash256, offset, length uint32, overpay bool) error { + sector, exist := h.Sector(root) + if !exist { + return errSectorNotFound + } + if offset+length > rhpv2.SectorSize { + return errSectorOutOfBounds + } + _, err := w.Write(sector[offset : offset+length]) + return err +} + +func (h *testHost) UploadSector(ctx context.Context, sector *[rhpv2.SectorSize]byte, rev types.FileContractRevision) (types.Hash256, error) { + return h.AddSector(sector), nil +} + +func (h *testHost) FetchRevision(ctx context.Context, fetchTimeout time.Duration) (rev types.FileContractRevision, _ error) { + h.mu.Lock() + defer h.mu.Unlock() + rev = h.rev + return rev, nil +} + +func (h *testHost) FetchPriceTable(ctx context.Context, rev *types.FileContractRevision) (hostdb.HostPriceTable, error) { + return h.hptFn(), nil +} + +func (h *testHost) FundAccount(ctx context.Context, balance types.Currency, rev *types.FileContractRevision) error { + return nil +} + +func (h *testHost) RenewContract(ctx context.Context, rrr api.RHPRenewRequest) (_ rhpv2.ContractRevision, _ []types.Transaction, _ types.Currency, err error) { + return rhpv2.ContractRevision{}, nil, types.ZeroCurrency, nil +} + +func (h *testHost) SyncAccount(ctx context.Context, rev *types.FileContractRevision) error { + return nil +} + func TestHost(t *testing.T) { - h := newMockHost(types.PublicKey{1}) - h.c = newMockContract(h.hk, types.FileContractID{1}) - sector, root := newMockSector() + // create test host + h := newTestHost( + newHostMock(types.PublicKey{1}), + newContractMock(types.PublicKey{1}, types.FileContractID{1}), + ) // upload the sector + sector, root := newTestSector() uploaded, err := h.UploadSector(context.Background(), sector, types.FileContractRevision{}) if err != nil { t.Fatal(err) diff --git a/worker/mocks_test.go b/worker/mocks_test.go index 2490941af..e38bd64c0 100644 --- a/worker/mocks_test.go +++ b/worker/mocks_test.go @@ -5,222 +5,397 @@ import ( "encoding/json" "errors" "fmt" - "io" + "math/big" "sync" "time" rhpv2 "go.sia.tech/core/rhp/v2" rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" + "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" "go.sia.tech/renterd/hostdb" "go.sia.tech/renterd/object" - "go.uber.org/zap" - "lukechampine.com/frand" + "go.sia.tech/renterd/webhooks" ) -type ( - mockContract struct { - rev types.FileContractRevision - metadata api.ContractMetadata +var _ AccountStore = (*accountsMock)(nil) - mu sync.Mutex - sectors map[types.Hash256]*[rhpv2.SectorSize]byte - } +type accountsMock struct{} - mockContractStore struct { - mu sync.Mutex - locks map[types.FileContractID]*sync.Mutex - } +func (*accountsMock) Accounts(context.Context) ([]api.Account, error) { + return nil, nil +} - mockHost struct { - hk types.PublicKey +func (*accountsMock) AddBalance(context.Context, rhpv3.Account, types.PublicKey, *big.Int) error { + return nil +} - mu sync.Mutex - c *mockContract +func (*accountsMock) LockAccount(context.Context, rhpv3.Account, types.PublicKey, bool, time.Duration) (api.Account, uint64, error) { + return api.Account{}, 0, nil +} - hpt hostdb.HostPriceTable - hptBlockChan chan struct{} - } +func (*accountsMock) UnlockAccount(context.Context, rhpv3.Account, uint64) error { + return nil +} - mockHostManager struct { - mu sync.Mutex - hosts map[types.PublicKey]*mockHost - } +func (*accountsMock) ResetDrift(context.Context, rhpv3.Account) error { + return nil +} - mockMemory struct{} - mockMemoryManager struct { - memBlockChan chan struct{} - } +func (*accountsMock) SetBalance(context.Context, rhpv3.Account, types.PublicKey, *big.Int) error { + return nil +} - mockObjectStore struct { - mu sync.Mutex - objects map[string]map[string]object.Object - partials map[string]mockPackedSlab - bufferIDCntr uint // allows marking packed slabs as uploaded - } +func (*accountsMock) ScheduleSync(context.Context, rhpv3.Account, types.PublicKey) error { + return nil +} - mockPackedSlab struct { - parameterKey string // ([minshards]-[totalshards]-[contractset]) - bufferID uint - slabKey object.EncryptionKey - data []byte - } +var _ alerts.Alerter = (*alerterMock)(nil) - mockWorker struct { - cs *mockContractStore - hm *mockHostManager - mm *mockMemoryManager - os *mockObjectStore +type alerterMock struct{} - dl *downloadManager - ul *uploadManager +func (*alerterMock) RegisterAlert(context.Context, alerts.Alert) error { return nil } +func (*alerterMock) DismissAlerts(context.Context, ...types.Hash256) error { return nil } - mu sync.Mutex - hkCntr uint - fcidCntr uint - } -) +var _ ConsensusState = (*chainMock)(nil) -var ( - _ ContractStore = (*mockContractStore)(nil) - _ Host = (*mockHost)(nil) - _ HostManager = (*mockHostManager)(nil) - _ Memory = (*mockMemory)(nil) - _ MemoryManager = (*mockMemoryManager)(nil) - _ ObjectStore = (*mockObjectStore)(nil) -) +type chainMock struct{} -var ( - errBucketNotFound = errors.New("bucket not found") - errContractNotFound = errors.New("contract not found") - errObjectNotFound = errors.New("object not found") - errSlabNotFound = errors.New("slab not found") - errSectorOutOfBounds = errors.New("sector out of bounds") -) +func (c *chainMock) ConsensusState(ctx context.Context) (api.ConsensusState, error) { + return api.ConsensusState{}, nil +} -type ( - mockHosts []*mockHost - mockContracts []*mockContract -) +var _ Bus = (*busMock)(nil) -func (hosts mockHosts) contracts() mockContracts { - contracts := make([]*mockContract, len(hosts)) - for i, host := range hosts { - contracts[i] = host.c - } - return contracts +type busMock struct { + *alerterMock + *accountsMock + *chainMock + *contractLockerMock + *contractStoreMock + *hostStoreMock + *objectStoreMock + *settingStoreMock + *syncerMock + *walletMock + *webhookBroadcasterMock } -func (contracts mockContracts) metadata() []api.ContractMetadata { - metadata := make([]api.ContractMetadata, len(contracts)) - for i, contract := range contracts { - metadata[i] = contract.metadata +func newBusMock(cs *contractStoreMock, hs *hostStoreMock, os *objectStoreMock) *busMock { + return &busMock{ + alerterMock: &alerterMock{}, + accountsMock: &accountsMock{}, + chainMock: &chainMock{}, + contractLockerMock: newContractLockerMock(), + contractStoreMock: cs, + hostStoreMock: hs, + objectStoreMock: os, + settingStoreMock: &settingStoreMock{}, + syncerMock: &syncerMock{}, + walletMock: &walletMock{}, + webhookBroadcasterMock: &webhookBroadcasterMock{}, } - return metadata } -func (m *mockMemory) Release() {} -func (m *mockMemory) ReleaseSome(uint64) {} +type contractMock struct { + rev types.FileContractRevision + metadata api.ContractMetadata -func (mm *mockMemoryManager) Limit(amt uint64) (MemoryManager, error) { - return &mockMemoryManager{}, nil + mu sync.Mutex + sectors map[types.Hash256]*[rhpv2.SectorSize]byte } -func (mm *mockMemoryManager) Status() api.MemoryStatus { return api.MemoryStatus{} } -func (mm *mockMemoryManager) AcquireMemory(ctx context.Context, amt uint64) Memory { - if mm.memBlockChan != nil { - <-mm.memBlockChan + +func newContractMock(hk types.PublicKey, fcid types.FileContractID) *contractMock { + return &contractMock{ + metadata: api.ContractMetadata{ + ID: fcid, + HostKey: hk, + WindowStart: 0, + WindowEnd: 10, + }, + rev: types.FileContractRevision{ParentID: fcid}, + sectors: make(map[types.Hash256]*[rhpv2.SectorSize]byte), } - return &mockMemory{} } -func newMockContractStore() *mockContractStore { - return &mockContractStore{ +func (c *contractMock) AddSector(sector *[rhpv2.SectorSize]byte) (root types.Hash256) { + root = rhpv2.SectorRoot(sector) + c.mu.Lock() + c.sectors[root] = sector + c.mu.Unlock() + return +} + +func (c *contractMock) Sector(root types.Hash256) (sector *[rhpv2.SectorSize]byte, found bool) { + c.mu.Lock() + sector, found = c.sectors[root] + c.mu.Unlock() + return +} + +var _ ContractLocker = (*contractLockerMock)(nil) + +type contractLockerMock struct { + mu sync.Mutex + locks map[types.FileContractID]*sync.Mutex +} + +func newContractLockerMock() *contractLockerMock { + return &contractLockerMock{ locks: make(map[types.FileContractID]*sync.Mutex), } } -func (cs *mockContractStore) AcquireContract(ctx context.Context, fcid types.FileContractID, priority int, d time.Duration) (lockID uint64, err error) { +func (cs *contractLockerMock) AcquireContract(_ context.Context, fcid types.FileContractID, _ int, _ time.Duration) (uint64, error) { cs.mu.Lock() defer cs.mu.Unlock() - if lock, ok := cs.locks[fcid]; !ok { - return 0, errContractNotFound - } else { - lock.Lock() + lock, exists := cs.locks[fcid] + if !exists { + cs.locks[fcid] = new(sync.Mutex) + lock = cs.locks[fcid] } + + lock.Lock() return 0, nil } -func (cs *mockContractStore) ReleaseContract(ctx context.Context, fcid types.FileContractID, lockID uint64) (err error) { +func (cs *contractLockerMock) ReleaseContract(_ context.Context, fcid types.FileContractID, _ uint64) error { cs.mu.Lock() defer cs.mu.Unlock() - if lock, ok := cs.locks[fcid]; !ok { - return errContractNotFound - } else { - lock.Unlock() - } + cs.locks[fcid].Unlock() + delete(cs.locks, fcid) return nil } -func (cs *mockContractStore) KeepaliveContract(ctx context.Context, fcid types.FileContractID, lockID uint64, d time.Duration) (err error) { +func (*contractLockerMock) KeepaliveContract(context.Context, types.FileContractID, uint64, time.Duration) error { return nil } -func (os *mockContractStore) RenewedContract(ctx context.Context, renewedFrom types.FileContractID) (api.ContractMetadata, error) { - return api.ContractMetadata{}, api.ErrContractNotFound +var _ ContractStore = (*contractStoreMock)(nil) + +type contractStoreMock struct { + mu sync.Mutex + contracts map[types.FileContractID]*contractMock + hosts2fcid map[types.PublicKey]types.FileContractID + fcidCntr uint } -func newMockObjectStore() *mockObjectStore { - os := &mockObjectStore{ - objects: make(map[string]map[string]object.Object), - partials: make(map[string]mockPackedSlab), +func newContractStoreMock() *contractStoreMock { + return &contractStoreMock{ + contracts: make(map[types.FileContractID]*contractMock), + hosts2fcid: make(map[types.PublicKey]types.FileContractID), } - os.objects[testBucket] = make(map[string]object.Object) - return os } -func (cs *mockContractStore) addContract(c *mockContract) { +func (*contractStoreMock) RenewedContract(context.Context, types.FileContractID) (api.ContractMetadata, error) { + return api.ContractMetadata{}, nil +} + +func (*contractStoreMock) Contract(context.Context, types.FileContractID) (api.ContractMetadata, error) { + return api.ContractMetadata{}, nil +} + +func (*contractStoreMock) ContractSize(context.Context, types.FileContractID) (api.ContractSize, error) { + return api.ContractSize{}, nil +} + +func (*contractStoreMock) ContractRoots(context.Context, types.FileContractID) ([]types.Hash256, []types.Hash256, error) { + return nil, nil, nil +} + +func (cs *contractStoreMock) Contracts(context.Context, api.ContractsOpts) (metadatas []api.ContractMetadata, _ error) { cs.mu.Lock() defer cs.mu.Unlock() - cs.locks[c.metadata.ID] = new(sync.Mutex) + for _, c := range cs.contracts { + metadatas = append(metadatas, c.metadata) + } + return } -func (os *mockObjectStore) AddMultipartPart(ctx context.Context, bucket, path, contractSet, ETag, uploadID string, partNumber int, slices []object.SlabSlice) (err error) { +func (cs *contractStoreMock) addContract(hk types.PublicKey) *contractMock { + cs.mu.Lock() + defer cs.mu.Unlock() + + fcid := cs.newFileContractID() + cs.contracts[fcid] = newContractMock(hk, fcid) + cs.hosts2fcid[hk] = fcid + return cs.contracts[fcid] +} + +func (cs *contractStoreMock) renewContract(hk types.PublicKey) (*contractMock, error) { + cs.mu.Lock() + defer cs.mu.Unlock() + + curr := cs.hosts2fcid[hk] + c := cs.contracts[curr] + if c == nil { + return nil, errors.New("host does not have a contract to renew") + } + delete(cs.contracts, curr) + + renewal := newContractMock(hk, cs.newFileContractID()) + renewal.metadata.RenewedFrom = c.metadata.ID + renewal.metadata.WindowStart = c.metadata.WindowEnd + renewal.metadata.WindowEnd = renewal.metadata.WindowStart + (c.metadata.WindowEnd - c.metadata.WindowStart) + cs.contracts[renewal.metadata.ID] = renewal + cs.hosts2fcid[hk] = renewal.metadata.ID + return renewal, nil +} + +func (cs *contractStoreMock) newFileContractID() types.FileContractID { + cs.fcidCntr++ + return types.FileContractID{byte(cs.fcidCntr)} +} + +var errSectorOutOfBounds = errors.New("sector out of bounds") + +type hostMock struct { + hk types.PublicKey + hi hostdb.HostInfo +} + +func newHostMock(hk types.PublicKey) *hostMock { + return &hostMock{ + hk: hk, + hi: hostdb.HostInfo{Host: hostdb.Host{PublicKey: hk, Scanned: true}}, + } +} + +var _ HostStore = (*hostStoreMock)(nil) + +type hostStoreMock struct { + mu sync.Mutex + hosts map[types.PublicKey]*hostMock + hkCntr uint +} + +func newHostStoreMock() *hostStoreMock { + return &hostStoreMock{hosts: make(map[types.PublicKey]*hostMock)} +} + +func (hs *hostStoreMock) Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) { + hs.mu.Lock() + defer hs.mu.Unlock() + + h, ok := hs.hosts[hostKey] + if !ok { + return hostdb.HostInfo{}, api.ErrHostNotFound + } + return h.hi, nil +} + +func (hs *hostStoreMock) RecordHostScans(ctx context.Context, scans []hostdb.HostScan) error { + return nil +} + +func (hs *hostStoreMock) RecordPriceTables(ctx context.Context, priceTableUpdate []hostdb.PriceTableUpdate) error { return nil } -func (os *mockObjectStore) AddUploadingSector(ctx context.Context, uID api.UploadID, id types.FileContractID, root types.Hash256) error { +func (hs *hostStoreMock) RecordContractSpending(ctx context.Context, records []api.ContractSpendingRecord) error { return nil } -func (os *mockObjectStore) TrackUpload(ctx context.Context, uID api.UploadID) error { return nil } +func (hs *hostStoreMock) addHost() *hostMock { + hs.mu.Lock() + defer hs.mu.Unlock() -func (os *mockObjectStore) FinishUpload(ctx context.Context, uID api.UploadID) error { return nil } + hs.hkCntr++ + hk := types.PublicKey{byte(hs.hkCntr)} + hs.hosts[hk] = newHostMock(hk) + return hs.hosts[hk] +} -func (os *mockObjectStore) DeleteHostSector(ctx context.Context, hk types.PublicKey, root types.Hash256) error { +var ( + _ MemoryManager = (*memoryManagerMock)(nil) + _ Memory = (*memoryMock)(nil) +) + +type ( + memoryMock struct{} + memoryManagerMock struct{ memBlockChan chan struct{} } +) + +func (m *memoryMock) Release() {} +func (m *memoryMock) ReleaseSome(uint64) {} + +func (mm *memoryManagerMock) Limit(amt uint64) (MemoryManager, error) { + return &memoryManagerMock{}, nil +} + +func (mm *memoryManagerMock) Status() api.MemoryStatus { return api.MemoryStatus{} } + +func (mm *memoryManagerMock) AcquireMemory(ctx context.Context, amt uint64) Memory { + if mm.memBlockChan != nil { + <-mm.memBlockChan + } + return &memoryMock{} +} + +var _ ObjectStore = (*objectStoreMock)(nil) + +type ( + objectStoreMock struct { + mu sync.Mutex + objects map[string]map[string]object.Object + partials map[string]packedSlabMock + bufferIDCntr uint // allows marking packed slabs as uploaded + } + + packedSlabMock struct { + parameterKey string // ([minshards]-[totalshards]-[contractset]) + bufferID uint + slabKey object.EncryptionKey + data []byte + } +) + +func newObjectStoreMock(bucket string) *objectStoreMock { + os := &objectStoreMock{ + objects: make(map[string]map[string]object.Object), + partials: make(map[string]packedSlabMock), + } + os.objects[bucket] = make(map[string]object.Object) + return os +} + +func (os *objectStoreMock) AddMultipartPart(ctx context.Context, bucket, path, contractSet, ETag, uploadID string, partNumber int, slices []object.SlabSlice) (err error) { return nil } -func (os *mockObjectStore) DeleteObject(ctx context.Context, bucket, path string, opts api.DeleteObjectOptions) error { +func (os *objectStoreMock) AddUploadingSector(ctx context.Context, uID api.UploadID, id types.FileContractID, root types.Hash256) error { return nil } -func (os *mockObjectStore) AddObject(ctx context.Context, bucket, path, contractSet string, o object.Object, opts api.AddObjectOptions) error { +func (os *objectStoreMock) TrackUpload(ctx context.Context, uID api.UploadID) error { return nil } + +func (os *objectStoreMock) FinishUpload(ctx context.Context, uID api.UploadID) error { return nil } + +func (os *objectStoreMock) DeleteHostSector(ctx context.Context, hk types.PublicKey, root types.Hash256) error { + return nil +} + +func (os *objectStoreMock) DeleteObject(ctx context.Context, bucket, path string, opts api.DeleteObjectOptions) error { + return nil +} + +func (os *objectStoreMock) AddObject(ctx context.Context, bucket, path, contractSet string, o object.Object, opts api.AddObjectOptions) error { os.mu.Lock() defer os.mu.Unlock() // check if the bucket exists if _, exists := os.objects[bucket]; !exists { - return errBucketNotFound + return api.ErrBucketNotFound } os.objects[bucket][path] = o return nil } -func (os *mockObjectStore) AddPartialSlab(ctx context.Context, data []byte, minShards, totalShards uint8, contractSet string) (slabs []object.SlabSlice, slabBufferMaxSizeSoftReached bool, err error) { +func (os *objectStoreMock) AddPartialSlab(ctx context.Context, data []byte, minShards, totalShards uint8, contractSet string) (slabs []object.SlabSlice, slabBufferMaxSizeSoftReached bool, err error) { os.mu.Lock() defer os.mu.Unlock() @@ -239,7 +414,7 @@ func (os *mockObjectStore) AddPartialSlab(ctx context.Context, data []byte, minS } // update store - os.partials[ec.String()] = mockPackedSlab{ + os.partials[ec.String()] = packedSlabMock{ parameterKey: fmt.Sprintf("%d-%d-%v", minShards, totalShards, contractSet), bufferID: os.bufferIDCntr, slabKey: ec, @@ -250,18 +425,18 @@ func (os *mockObjectStore) AddPartialSlab(ctx context.Context, data []byte, minS return []object.SlabSlice{ss}, false, nil } -func (os *mockObjectStore) Object(ctx context.Context, bucket, path string, opts api.GetObjectOptions) (api.ObjectsResponse, error) { +func (os *objectStoreMock) Object(ctx context.Context, bucket, path string, opts api.GetObjectOptions) (api.ObjectsResponse, error) { os.mu.Lock() defer os.mu.Unlock() // check if the bucket exists if _, exists := os.objects[bucket]; !exists { - return api.ObjectsResponse{}, errBucketNotFound + return api.ObjectsResponse{}, api.ErrBucketNotFound } // check if the object exists if _, exists := os.objects[bucket][path]; !exists { - return api.ObjectsResponse{}, errObjectNotFound + return api.ObjectsResponse{}, api.ErrObjectNotFound } // clone to ensure the store isn't unwillingly modified @@ -278,13 +453,13 @@ func (os *mockObjectStore) Object(ctx context.Context, bucket, path string, opts }}, nil } -func (os *mockObjectStore) FetchPartialSlab(ctx context.Context, key object.EncryptionKey, offset, length uint32) ([]byte, error) { +func (os *objectStoreMock) FetchPartialSlab(ctx context.Context, key object.EncryptionKey, offset, length uint32) ([]byte, error) { os.mu.Lock() defer os.mu.Unlock() packedSlab, exists := os.partials[key.String()] if !exists { - return nil, errSlabNotFound + return nil, api.ErrSlabNotFound } if offset+length > uint32(len(packedSlab.data)) { return nil, errors.New("offset out of bounds") @@ -293,7 +468,7 @@ func (os *mockObjectStore) FetchPartialSlab(ctx context.Context, key object.Encr return packedSlab.data[offset : offset+length], nil } -func (os *mockObjectStore) Slab(ctx context.Context, key object.EncryptionKey) (slab object.Slab, err error) { +func (os *objectStoreMock) Slab(ctx context.Context, key object.EncryptionKey) (slab object.Slab, err error) { os.mu.Lock() defer os.mu.Unlock() @@ -304,12 +479,12 @@ func (os *mockObjectStore) Slab(ctx context.Context, key object.EncryptionKey) ( return } } - err = errSlabNotFound + err = api.ErrSlabNotFound }) return } -func (os *mockObjectStore) UpdateSlab(ctx context.Context, s object.Slab, contractSet string) error { +func (os *objectStoreMock) UpdateSlab(ctx context.Context, s object.Slab, contractSet string) error { os.mu.Lock() defer os.mu.Unlock() @@ -325,7 +500,7 @@ func (os *mockObjectStore) UpdateSlab(ctx context.Context, s object.Slab, contra return nil } -func (os *mockObjectStore) PackedSlabsForUpload(ctx context.Context, lockingDuration time.Duration, minShards, totalShards uint8, set string, limit int) (pss []api.PackedSlab, _ error) { +func (os *objectStoreMock) PackedSlabsForUpload(ctx context.Context, lockingDuration time.Duration, minShards, totalShards uint8, set string, limit int) (pss []api.PackedSlab, _ error) { os.mu.Lock() defer os.mu.Unlock() @@ -342,7 +517,7 @@ func (os *mockObjectStore) PackedSlabsForUpload(ctx context.Context, lockingDura return } -func (os *mockObjectStore) MarkPackedSlabsUploaded(ctx context.Context, slabs []api.UploadedPackedSlab) error { +func (os *objectStoreMock) MarkPackedSlabsUploaded(ctx context.Context, slabs []api.UploadedPackedSlab) error { os.mu.Lock() defer os.mu.Unlock() @@ -367,7 +542,15 @@ func (os *mockObjectStore) MarkPackedSlabsUploaded(ctx context.Context, slabs [] return nil } -func (os *mockObjectStore) forEachObject(fn func(bucket, path string, o object.Object)) { +func (os *objectStoreMock) Bucket(_ context.Context, bucket string) (api.Bucket, error) { + return api.Bucket{}, nil +} + +func (os *objectStoreMock) MultipartUpload(ctx context.Context, uploadID string) (resp api.MultipartUpload, err error) { + return api.MultipartUpload{}, nil +} + +func (os *objectStoreMock) forEachObject(fn func(bucket, path string, o object.Object)) { for bucket, objects := range os.objects { for path, object := range objects { fn(bucket, path, object) @@ -375,220 +558,58 @@ func (os *mockObjectStore) forEachObject(fn func(bucket, path string, o object.O } } -func newMockHost(hk types.PublicKey) *mockHost { - return &mockHost{ - hk: hk, - hpt: newTestHostPriceTable(time.Now().Add(time.Minute)), - } -} +var _ SettingStore = (*settingStoreMock)(nil) -func (h *mockHost) PublicKey() types.PublicKey { return h.hk } +type settingStoreMock struct{} -func (h *mockHost) DownloadSector(ctx context.Context, w io.Writer, root types.Hash256, offset, length uint32, overpay bool) error { - sector, exist := h.contract().sector(root) - if !exist { - return errSectorNotFound - } - if offset+length > rhpv2.SectorSize { - return errSectorOutOfBounds - } - _, err := w.Write(sector[offset : offset+length]) - return err +func (*settingStoreMock) GougingParams(context.Context) (api.GougingParams, error) { + return api.GougingParams{}, nil } -func (h *mockHost) UploadSector(ctx context.Context, sector *[rhpv2.SectorSize]byte, rev types.FileContractRevision) (types.Hash256, error) { - return h.contract().addSector(sector), nil +func (*settingStoreMock) UploadParams(context.Context) (api.UploadParams, error) { + return api.UploadParams{}, nil } -func (h *mockHost) FetchRevision(ctx context.Context, fetchTimeout time.Duration) (rev types.FileContractRevision, _ error) { - h.mu.Lock() - defer h.mu.Unlock() - rev = h.c.rev - return -} +var _ Syncer = (*syncerMock)(nil) -func (h *mockHost) FetchPriceTable(ctx context.Context, rev *types.FileContractRevision) (hostdb.HostPriceTable, error) { - <-h.hptBlockChan - return h.hpt, nil -} +type syncerMock struct{} -func (h *mockHost) FundAccount(ctx context.Context, balance types.Currency, rev *types.FileContractRevision) error { +func (*syncerMock) BroadcastTransaction(context.Context, []types.Transaction) error { return nil } -func (h *mockHost) RenewContract(ctx context.Context, rrr api.RHPRenewRequest) (_ rhpv2.ContractRevision, _ []types.Transaction, _ types.Currency, err error) { - return rhpv2.ContractRevision{}, nil, types.ZeroCurrency, nil -} - -func (h *mockHost) SyncAccount(ctx context.Context, rev *types.FileContractRevision) error { - return nil +func (*syncerMock) SyncerPeers(context.Context) ([]string, error) { + return nil, nil } -func (h *mockHost) contract() (c *mockContract) { - h.mu.Lock() - c = h.c - h.mu.Unlock() +var _ Wallet = (*walletMock)(nil) - if c == nil { - panic("host does not have a contract") - } - return -} +type walletMock struct{} -func newMockContract(hk types.PublicKey, fcid types.FileContractID) *mockContract { - return &mockContract{ - metadata: api.ContractMetadata{ - ID: fcid, - HostKey: hk, - WindowStart: 0, - WindowEnd: 10, - }, - rev: types.FileContractRevision{ParentID: fcid}, - sectors: make(map[types.Hash256]*[rhpv2.SectorSize]byte), - } -} - -func (c *mockContract) addSector(sector *[rhpv2.SectorSize]byte) (root types.Hash256) { - root = rhpv2.SectorRoot(sector) - c.mu.Lock() - c.sectors[root] = sector - c.mu.Unlock() - return -} - -func (c *mockContract) sector(root types.Hash256) (sector *[rhpv2.SectorSize]byte, found bool) { - c.mu.Lock() - sector, found = c.sectors[root] - c.mu.Unlock() - return -} - -func newMockHostManager() *mockHostManager { - return &mockHostManager{ - hosts: make(map[types.PublicKey]*mockHost), - } -} - -func (hm *mockHostManager) Host(hk types.PublicKey, fcid types.FileContractID, siamuxAddr string) Host { - hm.mu.Lock() - defer hm.mu.Unlock() - - if _, ok := hm.hosts[hk]; !ok { - panic("host not found") - } - return hm.hosts[hk] -} - -func (hm *mockHostManager) newHost(hk types.PublicKey) *mockHost { - hm.mu.Lock() - defer hm.mu.Unlock() - - if _, ok := hm.hosts[hk]; ok { - panic("host already exists") - } - - hm.hosts[hk] = newMockHost(hk) - return hm.hosts[hk] -} - -func (hm *mockHostManager) host(hk types.PublicKey) *mockHost { - hm.mu.Lock() - defer hm.mu.Unlock() - return hm.hosts[hk] -} - -func newMockSector() (*[rhpv2.SectorSize]byte, types.Hash256) { - var sector [rhpv2.SectorSize]byte - frand.Read(sector[:]) - return §or, rhpv2.SectorRoot(§or) -} - -func newMockWorker() *mockWorker { - cs := newMockContractStore() - hm := newMockHostManager() - os := newMockObjectStore() - mm := &mockMemoryManager{} - - return &mockWorker{ - cs: cs, - hm: hm, - mm: mm, - os: os, - - dl: newDownloadManager(context.Background(), hm, mm, os, 0, 0, zap.NewNop().Sugar()), - ul: newUploadManager(context.Background(), hm, mm, os, cs, 0, 0, time.Minute, zap.NewNop().Sugar()), - } -} - -func (w *mockWorker) addHosts(n int) { - for i := 0; i < n; i++ { - w.addHost() - } -} - -func (w *mockWorker) addHost() *mockHost { - host := w.hm.newHost(w.newHostKey()) - w.formContract(host) - return host +func (*walletMock) WalletDiscard(context.Context, types.Transaction) error { + return nil } -func (w *mockWorker) formContract(host *mockHost) *mockContract { - if host.c != nil { - panic("host already has contract, use renew") - } - host.c = newMockContract(host.hk, w.newFileContractID()) - w.cs.addContract(host.c) - return host.c +func (*walletMock) WalletFund(context.Context, *types.Transaction, types.Currency, bool) ([]types.Hash256, []types.Transaction, error) { + return nil, nil, nil } -func (w *mockWorker) renewContract(hk types.PublicKey) *mockContract { - host := w.hm.host(hk) - if host == nil { - panic("host not found") - } else if host.c == nil { - panic("host does not have a contract to renew") - } - - curr := host.c.metadata - update := newMockContract(host.hk, w.newFileContractID()) - update.metadata.RenewedFrom = curr.ID - update.metadata.WindowStart = curr.WindowEnd - update.metadata.WindowEnd = update.metadata.WindowStart + (curr.WindowEnd - curr.WindowStart) - host.c = update - - w.cs.addContract(host.c) - return host.c +func (*walletMock) WalletPrepareForm(context.Context, types.Address, types.PublicKey, types.Currency, types.Currency, types.PublicKey, rhpv2.HostSettings, uint64) ([]types.Transaction, error) { + return nil, nil } -func (w *mockWorker) contracts() (metadatas []api.ContractMetadata) { - for _, h := range w.hm.hosts { - metadatas = append(metadatas, h.c.metadata) - } - return +func (*walletMock) WalletPrepareRenew(context.Context, types.FileContractRevision, types.Address, types.Address, types.PrivateKey, types.Currency, types.Currency, rhpv3.HostPriceTable, uint64, uint64, uint64) (api.WalletPrepareRenewResponse, error) { + return api.WalletPrepareRenewResponse{}, nil } -func (w *mockWorker) newHostKey() (hk types.PublicKey) { - w.mu.Lock() - defer w.mu.Unlock() - w.hkCntr++ - hk = types.PublicKey{byte(w.hkCntr)} - return +func (*walletMock) WalletSign(context.Context, *types.Transaction, []types.Hash256, types.CoveredFields) error { + return nil } -func (w *mockWorker) newFileContractID() (fcid types.FileContractID) { - w.mu.Lock() - defer w.mu.Unlock() - w.fcidCntr++ - fcid = types.FileContractID{byte(w.fcidCntr)} - return -} +var _ webhooks.Broadcaster = (*webhookBroadcasterMock)(nil) -func newTestHostPriceTable(expiry time.Time) hostdb.HostPriceTable { - var uid rhpv3.SettingsID - frand.Read(uid[:]) +type webhookBroadcasterMock struct{} - return hostdb.HostPriceTable{ - HostPriceTable: rhpv3.HostPriceTable{UID: uid, Validity: time.Minute}, - Expiry: expiry, - } +func (*webhookBroadcasterMock) BroadcastAction(context.Context, webhooks.Event) error { + return nil } diff --git a/worker/pricetables_test.go b/worker/pricetables_test.go index 115abcd31..5e0616092 100644 --- a/worker/pricetables_test.go +++ b/worker/pricetables_test.go @@ -3,90 +3,55 @@ package worker import ( "context" "errors" - "sync" "testing" "time" - "go.sia.tech/core/types" "go.sia.tech/renterd/hostdb" ) -var ( - errHostNotFound = errors.New("host not found") -) - -var ( - _ HostStore = (*mockHostStore)(nil) -) - -type mockHostStore struct { - mu sync.Mutex - hosts map[types.PublicKey]hostdb.HostInfo -} - -func (mhs *mockHostStore) Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) { - mhs.mu.Lock() - defer mhs.mu.Unlock() - - h, ok := mhs.hosts[hostKey] - if !ok { - return hostdb.HostInfo{}, errHostNotFound - } - return h, nil -} - -func newMockHostStore(hosts []*hostdb.HostInfo) *mockHostStore { - hs := &mockHostStore{hosts: make(map[types.PublicKey]hostdb.HostInfo)} - for _, h := range hosts { - hs.hosts[h.PublicKey] = *h - } - return hs -} - func TestPriceTables(t *testing.T) { - // create two price tables, a valid one and one that expired - expiredPT := newTestHostPriceTable(time.Now()) - validPT := newTestHostPriceTable(time.Now().Add(time.Minute)) - - // create host manager - hm := newMockHostManager() - - // create a mock host that has a valid price table - hk1 := types.PublicKey{1} - h1 := hm.newHost(hk1) - h1.hpt = validPT - - // create a hostdb entry for that host that returns the expired price table - hdb1 := &hostdb.HostInfo{ - Host: hostdb.Host{ - PublicKey: hk1, - PriceTable: expiredPT, - Scanned: true, - }, - } - - // create host store - hs := newMockHostStore([]*hostdb.HostInfo{hdb1}) + // create host & contract stores + hs := newHostStoreMock() + cs := newContractStoreMock() - // create price tables + // create host manager & price table + hm := newTestHostManager(t) pts := newPriceTables(hm, hs) - // fetch the price table in a goroutine, make it blocking - h1.hptBlockChan = make(chan struct{}) - go pts.fetch(context.Background(), hk1, nil) + // create host & contract mock + h := hs.addHost() + c := cs.addContract(h.hk) + + // expire its price table + expiredPT := newTestHostPriceTable() + expiredPT.Expiry = time.Now() + h.hi.PriceTable = expiredPT + + // manage the host, make sure fetching the price table blocks + fetchPTBlockChan := make(chan struct{}) + validPT := newTestHostPriceTable() + hm.addHost(newTestHostCustom(h, c, func() hostdb.HostPriceTable { + <-fetchPTBlockChan + return validPT + })) + + // trigger a fetch to make it block + go pts.fetch(context.Background(), h.hk, nil) time.Sleep(50 * time.Millisecond) - // fetch it again but with a canceled context to avoid blocking indefinitely, the error will indicate we were blocking on a price table update + // fetch it again but with a canceled context to avoid blocking + // indefinitely, the error will indicate we were blocking on a price table + // update ctx, cancel := context.WithCancel(context.Background()) cancel() - _, err := pts.fetch(ctx, hk1, nil) + _, err := pts.fetch(ctx, h.hk, nil) if !errors.Is(err, errPriceTableUpdateTimedOut) { t.Fatal("expected errPriceTableUpdateTimedOut, got", err) } // unblock and assert we receive a valid price table - close(h1.hptBlockChan) - update, err := pts.fetch(context.Background(), hk1, nil) + close(fetchPTBlockChan) + update, err := pts.fetch(context.Background(), h.hk, nil) if err != nil { t.Fatal(err) } else if update.UID != validPT.UID { @@ -95,8 +60,8 @@ func TestPriceTables(t *testing.T) { // refresh the price table on the host, update again, assert we receive the // same price table as it hasn't expired yet - h1.hpt = newTestHostPriceTable(time.Now().Add(time.Minute)) - update, err = pts.fetch(context.Background(), hk1, nil) + h.hi.PriceTable = newTestHostPriceTable() + update, err = pts.fetch(context.Background(), h.hk, nil) if err != nil { t.Fatal(err) } else if update.UID != validPT.UID { @@ -104,13 +69,13 @@ func TestPriceTables(t *testing.T) { } // manually expire the price table - pts.priceTables[hk1].hpt.Expiry = time.Now() + pts.priceTables[h.hk].hpt.Expiry = time.Now() // fetch it again and assert we updated the price table - update, err = pts.fetch(context.Background(), hk1, nil) + update, err = pts.fetch(context.Background(), h.hk, nil) if err != nil { t.Fatal(err) - } else if update.UID != h1.hpt.UID { + } else if update.UID != h.hi.PriceTable.UID { t.Fatal("price table mismatch") } } diff --git a/worker/upload.go b/worker/upload.go index 72c65bf07..6048661e3 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -39,6 +39,7 @@ type ( hm HostManager mm MemoryManager os ObjectStore + cl ContractLocker cs ContractStore logger *zap.SugaredLogger @@ -148,8 +149,8 @@ func (w *worker) initUploadManager(maxMemory, maxOverdrive uint64, overdriveTime panic("upload manager already initialized") // developer error } - mm := newMemoryManager(logger, maxMemory) - w.uploadManager = newUploadManager(w.shutdownCtx, w, mm, w.bus, w.bus, maxOverdrive, overdriveTimeout, w.contractLockingDuration, logger) + mm := newMemoryManager(logger.Named("memorymanager"), maxMemory) + w.uploadManager = newUploadManager(w.shutdownCtx, w, mm, w.bus, w.bus, w.bus, maxOverdrive, overdriveTimeout, w.contractLockingDuration, logger) } func (w *worker) upload(ctx context.Context, r io.Reader, contracts []api.ContractMetadata, up uploadParameters, opts ...UploadOption) (_ string, err error) { @@ -314,11 +315,12 @@ func (w *worker) uploadPackedSlab(ctx context.Context, rs api.RedundancySettings return nil } -func newUploadManager(ctx context.Context, hm HostManager, mm MemoryManager, os ObjectStore, cs ContractStore, maxOverdrive uint64, overdriveTimeout time.Duration, contractLockDuration time.Duration, logger *zap.SugaredLogger) *uploadManager { +func newUploadManager(ctx context.Context, hm HostManager, mm MemoryManager, os ObjectStore, cl ContractLocker, cs ContractStore, maxOverdrive uint64, overdriveTimeout time.Duration, contractLockDuration time.Duration, logger *zap.SugaredLogger) *uploadManager { return &uploadManager{ hm: hm, mm: mm, os: os, + cl: cl, cs: cs, logger: logger, @@ -336,9 +338,10 @@ func newUploadManager(ctx context.Context, hm HostManager, mm MemoryManager, os } } -func (mgr *uploadManager) newUploader(os ObjectStore, cs ContractStore, hm HostManager, c api.ContractMetadata) *uploader { +func (mgr *uploadManager) newUploader(os ObjectStore, cl ContractLocker, cs ContractStore, hm HostManager, c api.ContractMetadata) *uploader { return &uploader{ os: os, + cl: cl, cs: cs, hm: hm, logger: mgr.logger, @@ -751,7 +754,7 @@ func (mgr *uploadManager) refreshUploaders(contracts []api.ContractMetadata, bh // add missing uploaders for _, c := range contracts { if _, exists := existing[c.ID]; !exists && bh < c.WindowEnd { - uploader := mgr.newUploader(mgr.os, mgr.cs, mgr.hm, c) + uploader := mgr.newUploader(mgr.os, mgr.cl, mgr.cs, mgr.hm, c) refreshed = append(refreshed, uploader) go uploader.Start() } diff --git a/worker/upload_test.go b/worker/upload_test.go index 0b9f6b28b..e8953db37 100644 --- a/worker/upload_test.go +++ b/worker/upload_test.go @@ -14,26 +14,23 @@ import ( "lukechampine.com/frand" ) -const ( - testBucket = "testbucket" - testContractSet = "testcontractset" -) - var ( + testBucket = "testbucket" + testContractSet = "testcontractset" testRedundancySettings = api.RedundancySettings{MinShards: 2, TotalShards: 6} ) func TestUpload(t *testing.T) { - // mock worker - w := newMockWorker() + // create test worker + w := newTestWorker(t) // add hosts to worker w.addHosts(testRedundancySettings.TotalShards * 2) // convenience variables os := w.os - dl := w.dl - ul := w.ul + dl := w.downloadManager + ul := w.uploadManager // create test data data := make([]byte, 128) @@ -115,7 +112,7 @@ func TestUpload(t *testing.T) { // try and upload into a bucket that does not exist params.bucket = "doesnotexist" _, _, err = ul.Upload(context.Background(), bytes.NewReader(data), w.contracts(), params, lockingPriorityUpload) - if !errors.Is(err, errBucketNotFound) { + if !errors.Is(err, api.ErrBucketNotFound) { t.Fatal("expected bucket not found error", err) } @@ -129,17 +126,17 @@ func TestUpload(t *testing.T) { } func TestUploadPackedSlab(t *testing.T) { - // mock worker - w := newMockWorker() + // create test worker + w := newTestWorker(t) // add hosts to worker w.addHosts(testRedundancySettings.TotalShards * 2) // convenience variables os := w.os - mm := w.mm - dl := w.dl - ul := w.ul + mm := w.ulmm + dl := w.downloadManager + ul := w.uploadManager // create test data data := make([]byte, 128) @@ -215,17 +212,17 @@ func TestUploadPackedSlab(t *testing.T) { } func TestUploadShards(t *testing.T) { - // mock worker - w := newMockWorker() + // create test worker + w := newTestWorker(t) // add hosts to worker w.addHosts(testRedundancySettings.TotalShards * 2) // convenience variables os := w.os - mm := w.mm - dl := w.dl - ul := w.ul + mm := w.ulmm + dl := w.downloadManager + ul := w.uploadManager // create test data data := make([]byte, 128) @@ -334,16 +331,16 @@ func TestUploadShards(t *testing.T) { } func TestRefreshUploaders(t *testing.T) { - // mock worker - w := newMockWorker() + // create test worker + w := newTestWorker(t) // add hosts to worker w.addHosts(testRedundancySettings.TotalShards) // convenience variables - ul := w.ul - hm := w.hm + ul := w.uploadManager cs := w.cs + hm := w.hm // create test data data := make([]byte, 128) @@ -356,7 +353,7 @@ func TestRefreshUploaders(t *testing.T) { // upload data contracts := w.contracts() - _, _, err := ul.Upload(context.Background(), bytes.NewReader(data), contracts, params, lockingPriorityUpload) + _, err := w.upload(context.Background(), bytes.NewReader(data), contracts, params) if err != nil { t.Fatal(err) } @@ -373,7 +370,7 @@ func TestRefreshUploaders(t *testing.T) { // remove the host from the second contract c2 := contracts[1] delete(hm.hosts, c2.HostKey) - delete(cs.locks, c2.ID) + delete(cs.contracts, c2.ID) // add a new host/contract hNew := w.addHost() @@ -389,7 +386,7 @@ func TestRefreshUploaders(t *testing.T) { var added, renewed int for _, ul := range ul.uploaders { switch ul.ContractID() { - case hNew.c.metadata.ID: + case hNew.metadata.ID: added++ case c1Renewed.metadata.ID: renewed++ @@ -410,7 +407,7 @@ func TestRefreshUploaders(t *testing.T) { // manually add a request to the queue of one of the uploaders we're about to expire responseChan := make(chan sectorUploadResp, 1) for _, ul := range ul.uploaders { - if ul.fcid == hNew.c.metadata.ID { + if ul.fcid == hNew.metadata.ID { ul.mu.Lock() ul.queue = append(ul.queue, §orUploadReq{responseChan: responseChan, sector: §orUpload{ctx: context.Background()}}) ul.mu.Unlock() @@ -436,17 +433,17 @@ func TestRefreshUploaders(t *testing.T) { } func TestUploadRegression(t *testing.T) { - // mock worker - w := newMockWorker() + // create test worker + w := newTestWorker(t) // add hosts to worker w.addHosts(testRedundancySettings.TotalShards) // convenience variables - mm := w.mm os := w.os - ul := w.ul - dl := w.dl + mm := w.ulmm + ul := w.uploadManager + dl := w.downloadManager // create test data data := make([]byte, 128) diff --git a/worker/uploader.go b/worker/uploader.go index dcff27eaf..3791f8b27 100644 --- a/worker/uploader.go +++ b/worker/uploader.go @@ -27,6 +27,7 @@ type ( uploader struct { os ObjectStore cs ContractStore + cl ContractLocker hm HostManager logger *zap.SugaredLogger @@ -200,13 +201,13 @@ func (u *uploader) execute(req *sectorUploadReq) (types.Hash256, time.Duration, u.mu.Unlock() // acquire contract lock - lockID, err := u.cs.AcquireContract(req.sector.ctx, fcid, req.contractLockPriority, req.contractLockDuration) + lockID, err := u.cl.AcquireContract(req.sector.ctx, fcid, req.contractLockPriority, req.contractLockDuration) if err != nil { return types.Hash256{}, 0, err } // defer the release - lock := newContractLock(u.shutdownCtx, fcid, lockID, req.contractLockDuration, u.cs, u.logger) + lock := newContractLock(u.shutdownCtx, fcid, lockID, req.contractLockDuration, u.cl, u.logger) defer func() { ctx, cancel := context.WithTimeout(u.shutdownCtx, 10*time.Second) lock.Release(ctx) diff --git a/worker/uploader_test.go b/worker/uploader_test.go index 7217cbaab..514d17aab 100644 --- a/worker/uploader_test.go +++ b/worker/uploader_test.go @@ -8,11 +8,13 @@ import ( ) func TestUploaderStopped(t *testing.T) { - w := newMockWorker() - w.addHost() - w.ul.refreshUploaders(w.contracts(), 1) + w := newTestWorker(t) + w.addHosts(1) - ul := w.ul.uploaders[0] + um := w.uploadManager + um.refreshUploaders(w.contracts(), 1) + + ul := um.uploaders[0] ul.Stop(errors.New("test")) req := sectorUploadReq{ diff --git a/worker/worker.go b/worker/worker.go index 17faca7cb..a1a7dbca7 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -69,44 +69,21 @@ func NewClient(address, password string) *Client { type ( Bus interface { alerts.Alerter - consensusState + ConsensusState webhooks.Broadcaster AccountStore + ContractLocker ContractStore + HostStore ObjectStore + SettingStore - BroadcastTransaction(ctx context.Context, txns []types.Transaction) error - SyncerPeers(ctx context.Context) (resp []string, err error) - - Contract(ctx context.Context, id types.FileContractID) (api.ContractMetadata, error) - ContractSize(ctx context.Context, id types.FileContractID) (api.ContractSize, error) - ContractRoots(ctx context.Context, id types.FileContractID) ([]types.Hash256, []types.Hash256, error) - Contracts(ctx context.Context, opts api.ContractsOpts) ([]api.ContractMetadata, error) - - RecordHostScans(ctx context.Context, scans []hostdb.HostScan) error - RecordPriceTables(ctx context.Context, priceTableUpdate []hostdb.PriceTableUpdate) error - RecordContractSpending(ctx context.Context, records []api.ContractSpendingRecord) error - - Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) - - GougingParams(ctx context.Context) (api.GougingParams, error) - UploadParams(ctx context.Context) (api.UploadParams, error) - - Object(ctx context.Context, bucket, path string, opts api.GetObjectOptions) (api.ObjectsResponse, error) - DeleteObject(ctx context.Context, bucket, path string, opts api.DeleteObjectOptions) error - MultipartUpload(ctx context.Context, uploadID string) (resp api.MultipartUpload, err error) - PackedSlabsForUpload(ctx context.Context, lockingDuration time.Duration, minShards, totalShards uint8, set string, limit int) ([]api.PackedSlab, error) - - WalletDiscard(ctx context.Context, txn types.Transaction) error - WalletFund(ctx context.Context, txn *types.Transaction, amount types.Currency, useUnconfirmedTxns bool) ([]types.Hash256, []types.Transaction, error) - WalletPrepareForm(ctx context.Context, renterAddress types.Address, renterKey types.PublicKey, renterFunds, hostCollateral types.Currency, hostKey types.PublicKey, hostSettings rhpv2.HostSettings, endHeight uint64) (txns []types.Transaction, err error) - WalletPrepareRenew(ctx context.Context, revision types.FileContractRevision, hostAddress, renterAddress types.Address, renterKey types.PrivateKey, renterFunds, minNewCollateral types.Currency, pt rhpv3.HostPriceTable, endHeight, windowSize, expectedStorage uint64) (api.WalletPrepareRenewResponse, error) - WalletSign(ctx context.Context, txn *types.Transaction, toSign []types.Hash256, cf types.CoveredFields) error - - Bucket(_ context.Context, bucket string) (api.Bucket, error) + Syncer + Wallet } + // An AccountStore manages ephemaral accounts state. AccountStore interface { Accounts(ctx context.Context) ([]api.Account, error) AddBalance(ctx context.Context, id rhpv3.Account, hk types.PublicKey, amt *big.Int) error @@ -120,11 +97,21 @@ type ( } ContractStore interface { - ContractLocker - + Contract(ctx context.Context, id types.FileContractID) (api.ContractMetadata, error) + ContractSize(ctx context.Context, id types.FileContractID) (api.ContractSize, error) + ContractRoots(ctx context.Context, id types.FileContractID) ([]types.Hash256, []types.Hash256, error) + Contracts(ctx context.Context, opts api.ContractsOpts) ([]api.ContractMetadata, error) RenewedContract(ctx context.Context, renewedFrom types.FileContractID) (api.ContractMetadata, error) } + HostStore interface { + RecordHostScans(ctx context.Context, scans []hostdb.HostScan) error + RecordPriceTables(ctx context.Context, priceTableUpdate []hostdb.PriceTableUpdate) error + RecordContractSpending(ctx context.Context, records []api.ContractSpendingRecord) error + + Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) + } + ObjectStore interface { // NOTE: used for download DeleteHostSector(ctx context.Context, hk types.PublicKey, root types.Hash256) error @@ -140,9 +127,34 @@ type ( MarkPackedSlabsUploaded(ctx context.Context, slabs []api.UploadedPackedSlab) error TrackUpload(ctx context.Context, uID api.UploadID) error UpdateSlab(ctx context.Context, s object.Slab, contractSet string) error + + // NOTE: used by worker + Bucket(_ context.Context, bucket string) (api.Bucket, error) + Object(ctx context.Context, bucket, path string, opts api.GetObjectOptions) (api.ObjectsResponse, error) + DeleteObject(ctx context.Context, bucket, path string, opts api.DeleteObjectOptions) error + MultipartUpload(ctx context.Context, uploadID string) (resp api.MultipartUpload, err error) + PackedSlabsForUpload(ctx context.Context, lockingDuration time.Duration, minShards, totalShards uint8, set string, limit int) ([]api.PackedSlab, error) } - consensusState interface { + SettingStore interface { + GougingParams(ctx context.Context) (api.GougingParams, error) + UploadParams(ctx context.Context) (api.UploadParams, error) + } + + Syncer interface { + BroadcastTransaction(ctx context.Context, txns []types.Transaction) error + SyncerPeers(ctx context.Context) (resp []string, err error) + } + + Wallet interface { + WalletDiscard(ctx context.Context, txn types.Transaction) error + WalletFund(ctx context.Context, txn *types.Transaction, amount types.Currency, useUnconfirmedTxns bool) ([]types.Hash256, []types.Transaction, error) + WalletPrepareForm(ctx context.Context, renterAddress types.Address, renterKey types.PublicKey, renterFunds, hostCollateral types.Currency, hostKey types.PublicKey, hostSettings rhpv2.HostSettings, endHeight uint64) (txns []types.Transaction, err error) + WalletPrepareRenew(ctx context.Context, revision types.FileContractRevision, hostAddress, renterAddress types.Address, renterKey types.PrivateKey, renterFunds, minNewCollateral types.Currency, pt rhpv3.HostPriceTable, endHeight, windowSize, expectedStorage uint64) (api.WalletPrepareRenewResponse, error) + WalletSign(ctx context.Context, txn *types.Transaction, toSign []types.Hash256, cf types.CoveredFields) error + } + + ConsensusState interface { ConsensusState(ctx context.Context) (api.ConsensusState, error) } ) @@ -183,7 +195,8 @@ func (w *worker) deriveRenterKey(hostKey types.PublicKey) types.PrivateKey { // A worker talks to Sia hosts to perform contract and storage operations within // a renterd system. type worker struct { - alerts alerts.Alerter + alerts alerts.Alerter + allowPrivateIPs bool id string bus Bus @@ -1274,7 +1287,7 @@ func (w *worker) stateHandlerGET(jc jape.Context) { } // New returns an HTTP handler that serves the worker API. -func New(masterKey [32]byte, id string, b Bus, contractLockingDuration, busFlushInterval, downloadOverdriveTimeout, uploadOverdriveTimeout time.Duration, downloadMaxOverdrive, downloadMaxMemory, uploadMaxMemory, uploadMaxOverdrive uint64, allowPrivateIPs bool, l *zap.Logger) (*worker, error) { +func New(masterKey [32]byte, id string, b Bus, contractLockingDuration, busFlushInterval, downloadOverdriveTimeout, uploadOverdriveTimeout time.Duration, downloadMaxOverdrive, uploadMaxOverdrive, downloadMaxMemory, uploadMaxMemory uint64, allowPrivateIPs bool, l *zap.Logger) (*worker, error) { if contractLockingDuration == 0 { return nil, errors.New("contract lock duration must be positive") } @@ -1294,6 +1307,7 @@ func New(masterKey [32]byte, id string, b Bus, contractLockingDuration, busFlush return nil, errors.New("uploadMaxMemory cannot be 0") } + l = l.Named("worker").Named(id) ctx, cancel := context.WithCancel(context.Background()) w := &worker{ alerts: alerts.WithOrigin(b, fmt.Sprintf("worker.%s", id)), @@ -1302,7 +1316,7 @@ func New(masterKey [32]byte, id string, b Bus, contractLockingDuration, busFlush id: id, bus: b, masterKey: masterKey, - logger: l.Sugar().Named("worker").Named(id), + logger: l.Sugar(), startTime: time.Now(), uploadingPackedSlabs: make(map[string]bool), shutdownCtx: ctx, @@ -1313,8 +1327,8 @@ func New(masterKey [32]byte, id string, b Bus, contractLockingDuration, busFlush w.initPriceTables() w.initTransportPool() - w.initDownloadManager(downloadMaxMemory, downloadMaxOverdrive, downloadOverdriveTimeout, l.Sugar().Named("downloadmanager")) - w.initUploadManager(uploadMaxMemory, uploadMaxOverdrive, uploadOverdriveTimeout, l.Sugar().Named("uploadmanager")) + w.initDownloadManager(downloadMaxMemory, downloadMaxOverdrive, downloadOverdriveTimeout, l.Named("downloadmanager").Sugar()) + w.initUploadManager(uploadMaxMemory, uploadMaxOverdrive, uploadOverdriveTimeout, l.Named("uploadmanager").Sugar()) w.initContractSpendingRecorder(busFlushInterval) return w, nil diff --git a/worker/worker_test.go b/worker/worker_test.go new file mode 100644 index 000000000..baa47db5b --- /dev/null +++ b/worker/worker_test.go @@ -0,0 +1,109 @@ +package worker + +import ( + "context" + "testing" + "time" + + rhpv2 "go.sia.tech/core/rhp/v2" + "go.sia.tech/core/types" + "go.sia.tech/renterd/api" + "go.uber.org/zap" + "golang.org/x/crypto/blake2b" + "lukechampine.com/frand" +) + +type ( + testWorker struct { + t *testing.T + *worker + + cs *contractStoreMock + os *objectStoreMock + hs *hostStoreMock + + dlmm *memoryManagerMock + ulmm *memoryManagerMock + + hm *testHostManager + } +) + +func newTestWorker(t *testing.T) *testWorker { + // create bus dependencies + cs := newContractStoreMock() + os := newObjectStoreMock(testBucket) + hs := newHostStoreMock() + + // create worker dependencies + b := newBusMock(cs, hs, os) + dlmm := &memoryManagerMock{} + ulmm := &memoryManagerMock{} + + // create worker + w, err := New(blake2b.Sum256([]byte("testwork")), "test", b, time.Second, time.Second, time.Second, time.Second, 0, 0, 1, 1, false, zap.NewNop()) + if err != nil { + t.Fatal(err) + } + + // override managers + hm := newTestHostManager(t) + w.priceTables.hm = hm + w.downloadManager.hm = hm + w.downloadManager.mm = dlmm + w.uploadManager.hm = hm + w.uploadManager.mm = ulmm + + return &testWorker{ + t, + w, + cs, + os, + hs, + ulmm, + dlmm, + hm, + } +} + +func (w *testWorker) addHosts(n int) (added []*testHost) { + for i := 0; i < n; i++ { + added = append(added, w.addHost()) + } + return +} + +func (w *testWorker) addHost() *testHost { + h := w.hs.addHost() + c := w.cs.addContract(h.hk) + host := newTestHost(h, c) + w.hm.addHost(host) + return host +} + +func (w *testWorker) contracts() []api.ContractMetadata { + metadatas, err := w.cs.Contracts(context.Background(), api.ContractsOpts{}) + if err != nil { + w.t.Fatal(err) + } + return metadatas +} + +func (w *testWorker) renewContract(hk types.PublicKey) *contractMock { + h := w.hm.hosts[hk] + if h == nil { + w.t.Fatal("host not found") + } + + renewal, err := w.cs.renewContract(hk) + if err != nil { + w.t.Fatal(err) + } + return renewal +} + +func newTestSector() (*[rhpv2.SectorSize]byte, types.Hash256) { + var sector [rhpv2.SectorSize]byte + frand.Read(sector[:]) + return §or, rhpv2.SectorRoot(§or) +} From 8baca5b4406c002301c3dc57a4f912a7fe758ec2 Mon Sep 17 00:00:00 2001 From: PJ Date: Mon, 26 Feb 2024 15:13:37 +0100 Subject: [PATCH 105/172] worker: fix lint --- worker/mocks_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/worker/mocks_test.go b/worker/mocks_test.go index e38bd64c0..80c1d5443 100644 --- a/worker/mocks_test.go +++ b/worker/mocks_test.go @@ -55,6 +55,9 @@ var _ alerts.Alerter = (*alerterMock)(nil) type alerterMock struct{} +func (*alerterMock) Alerts(_ context.Context, opts alerts.AlertsOpts) (resp alerts.AlertsResponse, err error) { + return alerts.AlertsResponse{}, nil +} func (*alerterMock) RegisterAlert(context.Context, alerts.Alert) error { return nil } func (*alerterMock) DismissAlerts(context.Context, ...types.Hash256) error { return nil } From 145b505e20b46c8ee55bde8392f14e8ec703a2ff Mon Sep 17 00:00:00 2001 From: PJ Date: Mon, 26 Feb 2024 15:30:01 +0100 Subject: [PATCH 106/172] worker: extend test worker with a way to block uploads --- worker/memory.go | 1 - worker/mocks_test.go | 12 ++++++++---- worker/upload_test.go | 10 ++++------ worker/worker_test.go | 18 +++++++++++++++--- 4 files changed, 27 insertions(+), 14 deletions(-) diff --git a/worker/memory.go b/worker/memory.go index 8b1c7cb5e..1dbd680ec 100644 --- a/worker/memory.go +++ b/worker/memory.go @@ -151,7 +151,6 @@ func (lmm *limitMemoryManager) AcquireMemory(ctx context.Context, amt uint64) Me childMem.Release() return nil } - return &limitAcquiredMemory{ child: childMem, parent: parentMem, diff --git a/worker/mocks_test.go b/worker/mocks_test.go index 80c1d5443..0720565db 100644 --- a/worker/mocks_test.go +++ b/worker/mocks_test.go @@ -322,19 +322,23 @@ type ( memoryManagerMock struct{ memBlockChan chan struct{} } ) +func newMemoryManagerMock() *memoryManagerMock { + mm := &memoryManagerMock{memBlockChan: make(chan struct{})} + close(mm.memBlockChan) + return mm +} + func (m *memoryMock) Release() {} func (m *memoryMock) ReleaseSome(uint64) {} func (mm *memoryManagerMock) Limit(amt uint64) (MemoryManager, error) { - return &memoryManagerMock{}, nil + return mm, nil } func (mm *memoryManagerMock) Status() api.MemoryStatus { return api.MemoryStatus{} } func (mm *memoryManagerMock) AcquireMemory(ctx context.Context, amt uint64) Memory { - if mm.memBlockChan != nil { - <-mm.memBlockChan - } + <-mm.memBlockChan return &memoryMock{} } diff --git a/worker/upload_test.go b/worker/upload_test.go index e8953db37..cc0996519 100644 --- a/worker/upload_test.go +++ b/worker/upload_test.go @@ -441,8 +441,6 @@ func TestUploadRegression(t *testing.T) { // convenience variables os := w.os - mm := w.ulmm - ul := w.uploadManager dl := w.downloadManager // create test data @@ -455,21 +453,21 @@ func TestUploadRegression(t *testing.T) { params := testParameters(t.Name()) // make sure the memory manager blocks - mm.memBlockChan = make(chan struct{}) + unblock := w.blockUploads() // upload data ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - _, _, err := ul.Upload(ctx, bytes.NewReader(data), w.contracts(), params, lockingPriorityUpload) + _, err := w.upload(ctx, bytes.NewReader(data), w.contracts(), params) if !errors.Is(err, errUploadInterrupted) { t.Fatal(err) } // unblock the memory manager - close(mm.memBlockChan) + unblock() // upload data - _, _, err = ul.Upload(context.Background(), bytes.NewReader(data), w.contracts(), params, lockingPriorityUpload) + _, err = w.upload(context.Background(), bytes.NewReader(data), w.contracts(), params) if err != nil { t.Fatal(err) } diff --git a/worker/worker_test.go b/worker/worker_test.go index baa47db5b..c1a19aed1 100644 --- a/worker/worker_test.go +++ b/worker/worker_test.go @@ -37,8 +37,8 @@ func newTestWorker(t *testing.T) *testWorker { // create worker dependencies b := newBusMock(cs, hs, os) - dlmm := &memoryManagerMock{} - ulmm := &memoryManagerMock{} + dlmm := newMemoryManagerMock() + ulmm := newMemoryManagerMock() // create worker w, err := New(blake2b.Sum256([]byte("testwork")), "test", b, time.Second, time.Second, time.Second, time.Second, 0, 0, 1, 1, false, zap.NewNop()) @@ -60,8 +60,8 @@ func newTestWorker(t *testing.T) *testWorker { cs, os, hs, - ulmm, dlmm, + ulmm, hm, } } @@ -81,6 +81,18 @@ func (w *testWorker) addHost() *testHost { return host } +func (w *testWorker) blockUploads() func() { + select { + case <-w.ulmm.memBlockChan: + case <-time.After(time.Second): + w.t.Fatal("already blocking") + } + + blockChan := make(chan struct{}) + w.ulmm.memBlockChan = blockChan + return func() { close(blockChan) } +} + func (w *testWorker) contracts() []api.ContractMetadata { metadatas, err := w.cs.Contracts(context.Background(), api.ContractsOpts{}) if err != nil { From 0ef1757537003c419df3909b925d9f1d76f064a9 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 27 Feb 2024 10:55:58 +0100 Subject: [PATCH 107/172] bus: filter by alert severity --- alerts/alerts.go | 55 +++++++++++++++++++++----------- bus/bus.go | 9 +++++- bus/client/alerts.go | 3 ++ internal/testing/cluster_test.go | 38 ++++++++++++++++++++++ 4 files changed, 86 insertions(+), 19 deletions(-) diff --git a/alerts/alerts.go b/alerts/alerts.go index f11004dbe..a14d460b6 100644 --- a/alerts/alerts.go +++ b/alerts/alerts.go @@ -66,14 +66,19 @@ type ( } AlertsOpts struct { - Offset int - Limit int + Offset int + Limit int + Severity Severity } AlertsResponse struct { - Alerts []Alert `json:"alerts"` - HasMore bool `json:"hasMore"` - Total int `json:"total"` + Alerts []Alert `json:"alerts"` + HasMore bool `json:"hasMore"` + Total int `json:"total"` + TotalInfo int `json:"totalInfo"` + TotalWarning int `json:"totalWarning"` + TotalError int `json:"totalError"` + TotalCritical int `json:"totalCritical"` } ) @@ -93,15 +98,8 @@ func (s Severity) String() string { } } -// MarshalJSON implements the json.Marshaler interface. -func (s Severity) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf(`%q`, s.String())), nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (s *Severity) UnmarshalJSON(b []byte) error { - status := strings.Trim(string(b), `"`) - switch status { +func (s *Severity) LoadString(str string) error { + switch str { case severityInfoStr: *s = SeverityInfo case severityWarningStr: @@ -111,11 +109,21 @@ func (s *Severity) UnmarshalJSON(b []byte) error { case severityCriticalStr: *s = SeverityCritical default: - return fmt.Errorf("unrecognized severity: %v", status) + return fmt.Errorf("unrecognized severity: %v", str) } return nil } +// MarshalJSON implements the json.Marshaler interface. +func (s Severity) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`%q`, s.String())), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (s *Severity) UnmarshalJSON(b []byte) error { + return s.LoadString(strings.Trim(string(b), `"`)) +} + // RegisterAlert implements the Alerter interface. func (m *Manager) RegisterAlert(ctx context.Context, alert Alert) error { if alert.ID == (types.Hash256{}) { @@ -176,9 +184,7 @@ func (m *Manager) Alerts(_ context.Context, opts AlertsOpts) (AlertsResponse, er defer m.mu.Unlock() offset, limit := opts.Offset, opts.Limit - resp := AlertsResponse{ - Total: len(m.alerts), - } + resp := AlertsResponse{} if offset >= len(m.alerts) { return resp, nil @@ -188,6 +194,19 @@ func (m *Manager) Alerts(_ context.Context, opts AlertsOpts) (AlertsResponse, er alerts := make([]Alert, 0, len(m.alerts)) for _, a := range m.alerts { + resp.Total++ + if a.Severity == SeverityInfo { + resp.TotalInfo++ + } else if a.Severity == SeverityWarning { + resp.TotalWarning++ + } else if a.Severity == SeverityError { + resp.TotalError++ + } else if a.Severity == SeverityCritical { + resp.TotalCritical++ + } + if opts.Severity != 0 && a.Severity != opts.Severity { + continue // filter by severity + } alerts = append(alerts, a) } sort.Slice(alerts, func(i, j int) bool { diff --git a/bus/bus.go b/bus/bus.go index e7e6ddaac..4106bc231 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -1739,6 +1739,7 @@ func (b *bus) handleGETAlerts(jc jape.Context) { return } offset, limit := 0, -1 + var severity alerts.Severity if jc.DecodeForm("offset", &offset) != nil { return } else if jc.DecodeForm("limit", &limit) != nil { @@ -1746,8 +1747,14 @@ func (b *bus) handleGETAlerts(jc jape.Context) { } else if offset < 0 { jc.Error(errors.New("offset must be non-negative"), http.StatusBadRequest) return + } else if jc.DecodeForm("severity", &severity) != nil { + return } - ar, err := b.alertMgr.Alerts(jc.Request.Context(), alerts.AlertsOpts{Offset: offset, Limit: limit}) + ar, err := b.alertMgr.Alerts(jc.Request.Context(), alerts.AlertsOpts{ + Offset: offset, + Limit: limit, + Severity: severity, + }) if jc.Check("failed to fetch alerts", err) != nil { return } diff --git a/bus/client/alerts.go b/bus/client/alerts.go index 7eceaeaed..28c3b9a84 100644 --- a/bus/client/alerts.go +++ b/bus/client/alerts.go @@ -16,6 +16,9 @@ func (c *Client) Alerts(ctx context.Context, opts alerts.AlertsOpts) (resp alert if opts.Limit != 0 { values.Set("limit", fmt.Sprint(opts.Limit)) } + if opts.Severity != 0 { + values.Set("severity", opts.Severity.String()) + } err = c.c.WithContext(ctx).GET("/alerts?"+values.Encode(), &resp) return } diff --git a/internal/testing/cluster_test.go b/internal/testing/cluster_test.go index f30a0906a..69b318f66 100644 --- a/internal/testing/cluster_test.go +++ b/internal/testing/cluster_test.go @@ -1974,6 +1974,44 @@ func TestAlerts(t *testing.T) { if len(foundAlerts) != 1 || foundAlerts[0].ID != alert2.ID { t.Fatal("wrong alert") } + + // register more alerts + for severity := alerts.SeverityInfo; severity <= alerts.SeverityCritical; severity++ { + for j := 0; j < 3*int(severity); j++ { + tt.OK(b.RegisterAlert(context.Background(), alerts.Alert{ + ID: frand.Entropy256(), + Severity: severity, + Message: "test", + Data: map[string]interface{}{ + "origin": "test", + }, + Timestamp: time.Now(), + })) + } + } + for severity := alerts.SeverityInfo; severity <= alerts.SeverityCritical; severity++ { + ar, err = b.Alerts(context.Background(), alerts.AlertsOpts{Severity: severity}) + tt.OK(err) + if ar.Total != 32 { + t.Fatal("expected 32 alerts", ar.Total) + } else if ar.TotalInfo != 3 { + t.Fatal("expected 3 info alerts", ar.TotalInfo) + } else if ar.TotalWarning != 6 { + t.Fatal("expected 6 warning alerts", ar.TotalWarning) + } else if ar.TotalError != 9 { + t.Fatal("expected 9 error alerts", ar.TotalError) + } else if ar.TotalCritical != 14 { + t.Fatal("expected 14 critical alerts", ar.TotalCritical) + } else if severity == alerts.SeverityInfo && len(ar.Alerts) != ar.TotalInfo { + t.Fatalf("expected %v info alerts, got %v", ar.TotalInfo, len(ar.Alerts)) + } else if severity == alerts.SeverityWarning && len(ar.Alerts) != ar.TotalWarning { + t.Fatalf("expected %v warning alerts, got %v", ar.TotalWarning, len(ar.Alerts)) + } else if severity == alerts.SeverityError && len(ar.Alerts) != ar.TotalError { + t.Fatalf("expected %v error alerts, got %v", ar.TotalError, len(ar.Alerts)) + } else if severity == alerts.SeverityCritical && len(ar.Alerts) != ar.TotalCritical { + t.Fatalf("expected %v critical alerts, got %v", ar.TotalCritical, len(ar.Alerts)) + } + } } func TestMultipartUploads(t *testing.T) { From b7e14e8b3353cc8c4928b9a677c22a69a8cc6015 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 27 Feb 2024 11:00:00 +0100 Subject: [PATCH 108/172] stores: check for exact value in TestDeleteHostSector --- stores/metadata_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 0b785f6b6..16e104695 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -3584,8 +3584,8 @@ func TestDeleteHostSector(t *testing.T) { // Prune the sector from hk1. if n, err := ss.DeleteHostSector(context.Background(), hk1, root); err != nil { t.Fatal(err) - } else if n == 0 { - t.Fatal("no sectors were pruned") + } else if n != 2 { + t.Fatal("no sectors were pruned", n) } // Make sure 2 contractSector entries exist. From 716091ba82e81dfe74449e926b2386fd445d2128 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 27 Feb 2024 12:21:54 +0100 Subject: [PATCH 109/172] worker: extend TestUploadPackedSlab to test sync and async packed slab uploads --- worker/mocks_test.go | 45 ++++++++++++++---- worker/upload.go | 13 +++-- worker/upload_test.go | 108 ++++++++++++++++++++++++++++++++---------- worker/worker.go | 4 +- worker/worker_test.go | 15 ++++++ 5 files changed, 143 insertions(+), 42 deletions(-) diff --git a/worker/mocks_test.go b/worker/mocks_test.go index 0720565db..aefcf65ce 100644 --- a/worker/mocks_test.go +++ b/worker/mocks_test.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "math" "math/big" "sync" "time" @@ -346,10 +347,11 @@ var _ ObjectStore = (*objectStoreMock)(nil) type ( objectStoreMock struct { - mu sync.Mutex - objects map[string]map[string]object.Object - partials map[string]packedSlabMock - bufferIDCntr uint // allows marking packed slabs as uploaded + mu sync.Mutex + objects map[string]map[string]object.Object + partials map[string]*packedSlabMock + slabBufferMaxSizeSoft int + bufferIDCntr uint // allows marking packed slabs as uploaded } packedSlabMock struct { @@ -357,13 +359,15 @@ type ( bufferID uint slabKey object.EncryptionKey data []byte + lockedUntil time.Time } ) func newObjectStoreMock(bucket string) *objectStoreMock { os := &objectStoreMock{ - objects: make(map[string]map[string]object.Object), - partials: make(map[string]packedSlabMock), + objects: make(map[string]map[string]object.Object), + partials: make(map[string]*packedSlabMock), + slabBufferMaxSizeSoft: math.MaxInt64, } os.objects[bucket] = make(map[string]object.Object) return os @@ -421,7 +425,7 @@ func (os *objectStoreMock) AddPartialSlab(ctx context.Context, data []byte, minS } // update store - os.partials[ec.String()] = packedSlabMock{ + os.partials[ec.String()] = &packedSlabMock{ parameterKey: fmt.Sprintf("%d-%d-%v", minShards, totalShards, contractSet), bufferID: os.bufferIDCntr, slabKey: ec, @@ -429,7 +433,7 @@ func (os *objectStoreMock) AddPartialSlab(ctx context.Context, data []byte, minS } os.bufferIDCntr++ - return []object.SlabSlice{ss}, false, nil + return []object.SlabSlice{ss}, os.totalSlabBufferSize() > os.slabBufferMaxSizeSoft, nil } func (os *objectStoreMock) Object(ctx context.Context, bucket, path string, opts api.GetObjectOptions) (api.ObjectsResponse, error) { @@ -511,14 +515,22 @@ func (os *objectStoreMock) PackedSlabsForUpload(ctx context.Context, lockingDura os.mu.Lock() defer os.mu.Unlock() + if limit == -1 { + limit = math.MaxInt + } + parameterKey := fmt.Sprintf("%d-%d-%v", minShards, totalShards, set) for _, ps := range os.partials { - if ps.parameterKey == parameterKey { + if ps.parameterKey == parameterKey && time.Now().After(ps.lockedUntil) { + ps.lockedUntil = time.Now().Add(lockingDuration) pss = append(pss, api.PackedSlab{ BufferID: ps.bufferID, Data: ps.data, Key: ps.slabKey, }) + if len(pss) == limit { + break + } } } return @@ -557,6 +569,21 @@ func (os *objectStoreMock) MultipartUpload(ctx context.Context, uploadID string) return api.MultipartUpload{}, nil } +func (os *objectStoreMock) totalSlabBufferSize() (total int) { + for _, p := range os.partials { + if time.Now().After(p.lockedUntil) { + total += len(p.data) + } + } + return +} + +func (os *objectStoreMock) setSlabBufferMaxSizeSoft(n int) { + os.mu.Lock() + defer os.mu.Unlock() + os.slabBufferMaxSizeSoft = n +} + func (os *objectStoreMock) forEachObject(fn func(bucket, path string, o object.Object)) { for bucket, objects := range os.objects { for path, object := range objects { diff --git a/worker/upload.go b/worker/upload.go index 75ade3eec..43ba54e16 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -203,29 +203,28 @@ func (w *worker) upload(ctx context.Context, r io.Reader, contracts []api.Contra } } } - } - // make sure there's a goroutine uploading the remainder of the packed slabs - go w.threadedUploadPackedSlabs(up.rs, up.contractSet, lockingPriorityBackgroundUpload) + // make sure there's a goroutine uploading the remainder of the packed slabs + go w.threadedUploadPackedSlabs(up.rs, up.contractSet, lockingPriorityBackgroundUpload) + } return eTag, nil } func (w *worker) threadedUploadPackedSlabs(rs api.RedundancySettings, contractSet string, lockPriority int) { key := fmt.Sprintf("%d-%d_%s", rs.MinShards, rs.TotalShards, contractSet) - w.uploadsMu.Lock() - if w.uploadingPackedSlabs[key] { + if _, ok := w.uploadingPackedSlabs[key]; ok { w.uploadsMu.Unlock() return } - w.uploadingPackedSlabs[key] = true + w.uploadingPackedSlabs[key] = struct{}{} w.uploadsMu.Unlock() // make sure we mark uploading packed slabs as false when we're done defer func() { w.uploadsMu.Lock() - w.uploadingPackedSlabs[key] = false + delete(w.uploadingPackedSlabs, key) w.uploadsMu.Unlock() }() diff --git a/worker/upload_test.go b/worker/upload_test.go index e510d64dd..2f60b6728 100644 --- a/worker/upload_test.go +++ b/worker/upload_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "errors" + "math" "testing" "time" @@ -33,10 +34,7 @@ func TestUpload(t *testing.T) { ul := w.uploadManager // create test data - data := make([]byte, 128) - if _, err := frand.Read(data); err != nil { - t.Fatal(err) - } + data := testData(128) // create upload params params := testParameters(t.Name()) @@ -130,7 +128,7 @@ func TestUploadPackedSlab(t *testing.T) { w := newTestWorker(t) // add hosts to worker - w.addHosts(testRedundancySettings.TotalShards * 2) + w.addHosts(testRedundancySettings.TotalShards) // convenience variables os := w.os @@ -138,16 +136,16 @@ func TestUploadPackedSlab(t *testing.T) { dl := w.downloadManager ul := w.uploadManager - // create test data - data := make([]byte, 128) - if _, err := frand.Read(data); err != nil { - t.Fatal(err) - } - // create upload params params := testParameters(t.Name()) params.packing = true + // block aysnc packed slab uploads + w.blockAsyncPackedSlabUploads(params) + + // create test data + data := testData(128) + // upload data _, _, err := ul.Upload(context.Background(), bytes.NewReader(data), w.contracts(), params, lockingPriorityUpload) if err != nil { @@ -182,9 +180,9 @@ func TestUploadPackedSlab(t *testing.T) { t.Fatal("expected 1 packed slab") } ps := pss[0] - mem := mm.AcquireMemory(context.Background(), params.rs.SlabSize()) // upload the packed slab + mem := mm.AcquireMemory(context.Background(), params.rs.SlabSize()) err = ul.UploadPackedSlab(context.Background(), params.rs, ps, mem, w.contracts(), 0, lockingPriorityUpload) if err != nil { t.Fatal(err) @@ -209,6 +207,69 @@ func TestUploadPackedSlab(t *testing.T) { } else if !bytes.Equal(data, buf.Bytes()) { t.Fatal("data mismatch") } + + // configure max buffer size + os.setSlabBufferMaxSizeSoft(128) + + // upload 2x64 bytes using the worker + params.path = t.Name() + "2" + _, err = w.upload(context.Background(), bytes.NewReader(testData(64)), w.contracts(), params) + if err != nil { + t.Fatal(err) + } + params.path = t.Name() + "3" + _, err = w.upload(context.Background(), bytes.NewReader(testData(64)), w.contracts(), params) + if err != nil { + t.Fatal(err) + } + + // assert we still have two packed slabs (buffer limit not reached) + pss, err = os.PackedSlabsForUpload(context.Background(), 0, uint8(params.rs.MinShards), uint8(params.rs.TotalShards), testContractSet, math.MaxInt) + if err != nil { + t.Fatal(err) + } else if len(pss) != 2 { + t.Fatal("expected 2 packed slab") + } + + // upload one more byte (buffer limit reached) + params.path = t.Name() + "4" + _, err = w.upload(context.Background(), bytes.NewReader(testData(1)), w.contracts(), params) + if err != nil { + t.Fatal(err) + } + + // assert we still have two packed slabs (one got uploaded synchronously) + pss, err = os.PackedSlabsForUpload(context.Background(), 0, uint8(params.rs.MinShards), uint8(params.rs.TotalShards), testContractSet, math.MaxInt) + if err != nil { + t.Fatal(err) + } else if len(pss) != 2 { + t.Fatal("expected 2 packed slab") + } + + // allow some time for the background thread to realise we blocked async + // packed slab uploads + time.Sleep(time.Second) + + // unblock asynchronous uploads + w.unblockAsyncPackedSlabUploads(params) + + // upload 1 byte using the worker + params.path = t.Name() + "5" + _, err = w.upload(context.Background(), bytes.NewReader(testData(129)), w.contracts(), params) + if err != nil { + t.Fatal(err) + } + + // allow some time for the thread to pick up the packed slabs + time.Sleep(time.Second) + + // assert we uploaded all packed slabs + pss, err = os.PackedSlabsForUpload(context.Background(), 0, uint8(params.rs.MinShards), uint8(params.rs.TotalShards), testContractSet, 1) + if err != nil { + t.Fatal(err) + } else if len(pss) != 0 { + t.Fatal("expected 0 packed slab") + } } func TestUploadShards(t *testing.T) { @@ -225,10 +286,7 @@ func TestUploadShards(t *testing.T) { ul := w.uploadManager // create test data - data := make([]byte, 128) - if _, err := frand.Read(data); err != nil { - t.Fatal(err) - } + data := testData(128) // create upload params params := testParameters(t.Name()) @@ -343,10 +401,7 @@ func TestRefreshUploaders(t *testing.T) { hm := w.hm // create test data - data := make([]byte, 128) - if _, err := frand.Read(data); err != nil { - t.Fatal(err) - } + data := testData(128) // create upload params params := testParameters(t.Name()) @@ -444,10 +499,7 @@ func TestUploadRegression(t *testing.T) { dl := w.downloadManager // create test data - data := make([]byte, 128) - if _, err := frand.Read(data); err != nil { - t.Fatal(err) - } + data := testData(128) // create upload params params := testParameters(t.Name()) @@ -500,3 +552,11 @@ func testParameters(path string) uploadParameters { rs: testRedundancySettings, } } + +func testData(n int) []byte { + data := make([]byte, n) + if _, err := frand.Read(data); err != nil { + panic(err) + } + return data +} diff --git a/worker/worker.go b/worker/worker.go index 56f853083..89f7ecc24 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -211,7 +211,7 @@ type worker struct { transportPoolV3 *transportPoolV3 uploadsMu sync.Mutex - uploadingPackedSlabs map[string]bool + uploadingPackedSlabs map[string]struct{} contractSpendingRecorder ContractSpendingRecorder contractLockingDuration time.Duration @@ -1327,7 +1327,7 @@ func New(masterKey [32]byte, id string, b Bus, contractLockingDuration, busFlush masterKey: masterKey, logger: l.Sugar(), startTime: time.Now(), - uploadingPackedSlabs: make(map[string]bool), + uploadingPackedSlabs: make(map[string]struct{}), shutdownCtx: ctx, shutdownCtxCancel: cancel, } diff --git a/worker/worker_test.go b/worker/worker_test.go index c1a19aed1..a61039c5f 100644 --- a/worker/worker_test.go +++ b/worker/worker_test.go @@ -2,6 +2,7 @@ package worker import ( "context" + "fmt" "testing" "time" @@ -93,6 +94,20 @@ func (w *testWorker) blockUploads() func() { return func() { close(blockChan) } } +func (w *testWorker) blockAsyncPackedSlabUploads(up uploadParameters) { + w.uploadsMu.Lock() + defer w.uploadsMu.Unlock() + key := fmt.Sprintf("%d-%d_%s", up.rs.MinShards, up.rs.TotalShards, up.contractSet) + w.uploadingPackedSlabs[key] = struct{}{} +} + +func (w *testWorker) unblockAsyncPackedSlabUploads(up uploadParameters) { + w.uploadsMu.Lock() + defer w.uploadsMu.Unlock() + key := fmt.Sprintf("%d-%d_%s", up.rs.MinShards, up.rs.TotalShards, up.contractSet) + delete(w.uploadingPackedSlabs, key) +} + func (w *testWorker) contracts() []api.ContractMetadata { metadatas, err := w.cs.Contracts(context.Background(), api.ContractsOpts{}) if err != nil { From 7c5b3c91efab2acf8f4b90588dace1240d00c0f7 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 27 Feb 2024 13:56:29 +0100 Subject: [PATCH 110/172] bus: breakdown totals --- alerts/alerts.go | 28 ++++++++++++++----------- internal/testing/cluster_test.go | 36 ++++++++++++++++---------------- 2 files changed, 34 insertions(+), 30 deletions(-) diff --git a/alerts/alerts.go b/alerts/alerts.go index a14d460b6..1ebebbc80 100644 --- a/alerts/alerts.go +++ b/alerts/alerts.go @@ -72,16 +72,21 @@ type ( } AlertsResponse struct { - Alerts []Alert `json:"alerts"` - HasMore bool `json:"hasMore"` - Total int `json:"total"` - TotalInfo int `json:"totalInfo"` - TotalWarning int `json:"totalWarning"` - TotalError int `json:"totalError"` - TotalCritical int `json:"totalCritical"` + Alerts []Alert `json:"alerts"` + HasMore bool `json:"hasMore"` + Totals struct { + Info int `json:"info"` + Warning int `json:"warning"` + Error int `json:"error"` + Critical int `json:"critical"` + } `json:"total"` } ) +func (ar AlertsResponse) Total() int { + return ar.Totals.Info + ar.Totals.Warning + ar.Totals.Error + ar.Totals.Critical +} + // String implements the fmt.Stringer interface. func (s Severity) String() string { switch s { @@ -194,15 +199,14 @@ func (m *Manager) Alerts(_ context.Context, opts AlertsOpts) (AlertsResponse, er alerts := make([]Alert, 0, len(m.alerts)) for _, a := range m.alerts { - resp.Total++ if a.Severity == SeverityInfo { - resp.TotalInfo++ + resp.Totals.Info++ } else if a.Severity == SeverityWarning { - resp.TotalWarning++ + resp.Totals.Warning++ } else if a.Severity == SeverityError { - resp.TotalError++ + resp.Totals.Error++ } else if a.Severity == SeverityCritical { - resp.TotalCritical++ + resp.Totals.Critical++ } if opts.Severity != 0 && a.Severity != opts.Severity { continue // filter by severity diff --git a/internal/testing/cluster_test.go b/internal/testing/cluster_test.go index 69b318f66..6b5f88769 100644 --- a/internal/testing/cluster_test.go +++ b/internal/testing/cluster_test.go @@ -1992,24 +1992,24 @@ func TestAlerts(t *testing.T) { for severity := alerts.SeverityInfo; severity <= alerts.SeverityCritical; severity++ { ar, err = b.Alerts(context.Background(), alerts.AlertsOpts{Severity: severity}) tt.OK(err) - if ar.Total != 32 { - t.Fatal("expected 32 alerts", ar.Total) - } else if ar.TotalInfo != 3 { - t.Fatal("expected 3 info alerts", ar.TotalInfo) - } else if ar.TotalWarning != 6 { - t.Fatal("expected 6 warning alerts", ar.TotalWarning) - } else if ar.TotalError != 9 { - t.Fatal("expected 9 error alerts", ar.TotalError) - } else if ar.TotalCritical != 14 { - t.Fatal("expected 14 critical alerts", ar.TotalCritical) - } else if severity == alerts.SeverityInfo && len(ar.Alerts) != ar.TotalInfo { - t.Fatalf("expected %v info alerts, got %v", ar.TotalInfo, len(ar.Alerts)) - } else if severity == alerts.SeverityWarning && len(ar.Alerts) != ar.TotalWarning { - t.Fatalf("expected %v warning alerts, got %v", ar.TotalWarning, len(ar.Alerts)) - } else if severity == alerts.SeverityError && len(ar.Alerts) != ar.TotalError { - t.Fatalf("expected %v error alerts, got %v", ar.TotalError, len(ar.Alerts)) - } else if severity == alerts.SeverityCritical && len(ar.Alerts) != ar.TotalCritical { - t.Fatalf("expected %v critical alerts, got %v", ar.TotalCritical, len(ar.Alerts)) + if ar.Total() != 32 { + t.Fatal("expected 32 alerts", ar.Total()) + } else if ar.Totals.Info != 3 { + t.Fatal("expected 3 info alerts", ar.Totals.Info) + } else if ar.Totals.Warning != 6 { + t.Fatal("expected 6 warning alerts", ar.Totals.Warning) + } else if ar.Totals.Error != 9 { + t.Fatal("expected 9 error alerts", ar.Totals.Error) + } else if ar.Totals.Critical != 14 { + t.Fatal("expected 14 critical alerts", ar.Totals.Critical) + } else if severity == alerts.SeverityInfo && len(ar.Alerts) != ar.Totals.Info { + t.Fatalf("expected %v info alerts, got %v", ar.Totals.Info, len(ar.Alerts)) + } else if severity == alerts.SeverityWarning && len(ar.Alerts) != ar.Totals.Warning { + t.Fatalf("expected %v warning alerts, got %v", ar.Totals.Warning, len(ar.Alerts)) + } else if severity == alerts.SeverityError && len(ar.Alerts) != ar.Totals.Error { + t.Fatalf("expected %v error alerts, got %v", ar.Totals.Error, len(ar.Alerts)) + } else if severity == alerts.SeverityCritical && len(ar.Alerts) != ar.Totals.Critical { + t.Fatalf("expected %v critical alerts, got %v", ar.Totals.Critical, len(ar.Alerts)) } } } From 6db1e38cf0f415f330c6ba54a24682af69d1dc16 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 27 Feb 2024 13:58:47 +0100 Subject: [PATCH 111/172] Update alerts/alerts.go Co-authored-by: Peter-Jan Brone --- alerts/alerts.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/alerts/alerts.go b/alerts/alerts.go index 1ebebbc80..6b009360d 100644 --- a/alerts/alerts.go +++ b/alerts/alerts.go @@ -79,7 +79,7 @@ type ( Warning int `json:"warning"` Error int `json:"error"` Critical int `json:"critical"` - } `json:"total"` + } `json:"totals"` } ) From 0fceed12cbba2304bc17f75c287e9272aaa8c08e Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 27 Feb 2024 14:54:11 +0100 Subject: [PATCH 112/172] api: fix Object response type --- api/object.go | 2 +- object/object.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/api/object.go b/api/object.go index 35df9b636..cef672a97 100644 --- a/api/object.go +++ b/api/object.go @@ -54,7 +54,7 @@ type ( Object struct { Metadata ObjectUserMetadata `json:"metadata,omitempty"` ObjectMetadata - *object.Object `json:"omitempty"` + *object.Object } // ObjectMetadata contains various metadata about an object. diff --git a/object/object.go b/object/object.go index 2331f6251..a545a4935 100644 --- a/object/object.go +++ b/object/object.go @@ -115,8 +115,8 @@ func GenerateEncryptionKey() EncryptionKey { // An Object is a unit of data that has been stored on a host. type Object struct { - Key EncryptionKey `json:"key"` - Slabs []SlabSlice `json:"slabs"` + Key EncryptionKey `json:"key,omitempty"` + Slabs []SlabSlice `json:"slabs,omitempty"` } // NewObject returns a new Object with a random key. From f29534aef0b996e95fd36cbd1b914f72a6c3fa04 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 27 Feb 2024 15:17:50 +0100 Subject: [PATCH 113/172] object: add docstring to Object type --- object/object.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/object/object.go b/object/object.go index a545a4935..965ebce2a 100644 --- a/object/object.go +++ b/object/object.go @@ -114,6 +114,9 @@ func GenerateEncryptionKey() EncryptionKey { } // An Object is a unit of data that has been stored on a host. +// NOTE: Object is embedded in the API's Object type, so all fields should be +// tagged omitempty to make sure responses where no object is returned remain +// clean. type Object struct { Key EncryptionKey `json:"key,omitempty"` Slabs []SlabSlice `json:"slabs,omitempty"` From a704e9fbae284b892651e047152890819d9856eb Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 27 Feb 2024 15:27:34 +0100 Subject: [PATCH 114/172] testing: add testutils package --- internal/testing/cluster.go | 67 ++------------------ internal/testing/cluster_test.go | 4 +- testutils/tt.go | 101 +++++++++++++++++++++++++++++++ worker/host_test.go | 9 +-- worker/worker_test.go | 19 +++--- 5 files changed, 121 insertions(+), 79 deletions(-) create mode 100644 testutils/tt.go diff --git a/internal/testing/cluster.go b/internal/testing/cluster.go index d55539cd7..d36f143be 100644 --- a/internal/testing/cluster.go +++ b/internal/testing/cluster.go @@ -9,7 +9,6 @@ import ( "net/http" "os" "path/filepath" - "strings" "sync" "testing" "time" @@ -27,6 +26,7 @@ import ( "go.sia.tech/renterd/internal/node" "go.sia.tech/renterd/s3" "go.sia.tech/renterd/stores" + "go.sia.tech/renterd/testutils" "go.uber.org/zap" "go.uber.org/zap/zapcore" "gorm.io/gorm" @@ -98,47 +98,6 @@ var ( testS3Credentials = credentials.NewStaticV4(testS3AccessKeyID, testS3SecretAccessKey, "") ) -type TT struct { - *testing.T -} - -func (t TT) AssertContains(err error, target string) { - t.Helper() - if err == nil || !strings.Contains(err.Error(), target) { - t.Fatalf("err: %v != target: %v", err, target) - } -} - -func (t TT) AssertIs(err, target error) { - t.Helper() - t.AssertContains(err, target.Error()) -} - -func (t TT) OK(err error) { - t.Helper() - if err != nil { - t.Fatal(err) - } -} - -func (t TT) OKAll(vs ...interface{}) { - t.Helper() - for _, v := range vs { - if err, ok := v.(error); ok && err != nil { - t.Fatal(err) - } - } -} - -func (t TT) FailAll(vs ...interface{}) { - t.Helper() - for _, v := range vs { - if err, ok := v.(error); ok && err == nil { - t.Fatal("should've failed") - } - } -} - // TestCluster is a helper type that allows for easily creating a number of // nodes connected to each other and ready for testing. type TestCluster struct { @@ -161,7 +120,7 @@ type TestCluster struct { dbName string dir string logger *zap.Logger - tt *TT + tt testutils.TT wk types.PrivateKey wg sync.WaitGroup } @@ -203,33 +162,17 @@ func randomPassword() string { return hex.EncodeToString(frand.Bytes(32)) } -// Retry will call 'fn' 'tries' times, waiting 'durationBetweenAttempts' -// between each attempt, returning 'nil' the first time that 'fn' returns nil. -// If 'nil' is never returned, then the final error returned by 'fn' is -// returned. -func (tt *TT) Retry(tries int, durationBetweenAttempts time.Duration, fn func() error) { - tt.Helper() - for i := 1; i < tries; i++ { - err := fn() - if err == nil { - return - } - time.Sleep(durationBetweenAttempts) - } - tt.OK(fn()) -} - // Reboot simulates a reboot of the cluster by calling Shutdown and creating a // new cluster using the same settings as the previous one. // NOTE: Simulating a reboot means that the hosts stay active and are not // restarted. -func (c *TestCluster) Reboot(ctx context.Context) *TestCluster { +func (c *TestCluster) Reboot(t *testing.T) *TestCluster { c.tt.Helper() hosts := c.hosts c.hosts = nil c.Shutdown() - newCluster := newTestCluster(c.tt.T, testClusterOptions{ + newCluster := newTestCluster(t, testClusterOptions{ dir: c.dir, dbName: c.dbName, logger: c.logger, @@ -302,7 +245,7 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { if testing.Short() { t.SkipNow() } - tt := &TT{t} + tt := testutils.New(t) // Ensure we don't hang ctx, cancel := context.WithTimeout(context.Background(), time.Minute) diff --git a/internal/testing/cluster_test.go b/internal/testing/cluster_test.go index f30a0906a..d7c458d8b 100644 --- a/internal/testing/cluster_test.go +++ b/internal/testing/cluster_test.go @@ -1052,7 +1052,7 @@ func TestEphemeralAccounts(t *testing.T) { } // Reboot cluster. - cluster2 := cluster.Reboot(context.Background()) + cluster2 := cluster.Reboot(t) defer cluster2.Shutdown() // Check that accounts were loaded from the bus. @@ -1246,7 +1246,7 @@ func TestEphemeralAccountSync(t *testing.T) { } // Restart cluster to have worker fetch the account from the bus again. - cluster2 := cluster.Reboot(context.Background()) + cluster2 := cluster.Reboot(t) defer cluster2.Shutdown() // Account should need a sync. diff --git a/testutils/tt.go b/testutils/tt.go new file mode 100644 index 000000000..9345a2fd7 --- /dev/null +++ b/testutils/tt.go @@ -0,0 +1,101 @@ +package testutils + +import ( + "strings" + "time" +) + +type ( + TT interface { + TestingCommon + + AssertContains(err error, target string) + AssertIs(err, target error) + FailAll(vs ...interface{}) + OK(err error) + OKAll(vs ...interface{}) + + // Retry will call 'fn' 'tries' times, waiting 'durationBetweenAttempts' + // between each attempt, returning 'nil' the first time that 'fn' + // returns nil. If 'nil' is never returned, then the final error + // returned by 'fn' is returned. + Retry(tries int, durationBetweenAttempts time.Duration, fn func() error) + } + + // TestingCommon is an interface that describes the common methods of + // testing.T and testing.B ensuring this testutil can be used in both + // contexts. + TestingCommon interface { + Log(args ...any) + Logf(format string, args ...any) + Error(args ...any) + Errorf(format string, args ...any) + Fatal(args ...any) + Fatalf(format string, args ...any) + Skip(args ...any) + Skipf(format string, args ...any) + SkipNow() + Skipped() bool + Helper() + Cleanup(f func()) + TempDir() string + Setenv(key, value string) + } + + impl struct { + TestingCommon + } +) + +func New(tc TestingCommon) TT { + return &impl{TestingCommon: tc} +} + +func (t impl) AssertContains(err error, target string) { + t.Helper() + if err == nil || !strings.Contains(err.Error(), target) { + t.Fatalf("err: %v != target: %v", err, target) + } +} + +func (t impl) AssertIs(err, target error) { + t.Helper() + t.AssertContains(err, target.Error()) +} + +func (t impl) FailAll(vs ...interface{}) { + t.Helper() + for _, v := range vs { + if err, ok := v.(error); ok && err == nil { + t.Fatal("should've failed") + } + } +} + +func (t impl) OK(err error) { + t.Helper() + if err != nil { + t.Fatal(err) + } +} + +func (t impl) OKAll(vs ...interface{}) { + t.Helper() + for _, v := range vs { + if err, ok := v.(error); ok && err != nil { + t.Fatal(err) + } + } +} + +func (t impl) Retry(tries int, durationBetweenAttempts time.Duration, fn func() error) { + t.Helper() + for i := 1; i < tries; i++ { + err := fn() + if err == nil { + return + } + time.Sleep(durationBetweenAttempts) + } + t.OK(fn()) +} diff --git a/worker/host_test.go b/worker/host_test.go index 8dd0567ff..80096888c 100644 --- a/worker/host_test.go +++ b/worker/host_test.go @@ -14,6 +14,7 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/api" "go.sia.tech/renterd/hostdb" + "go.sia.tech/renterd/testutils" "lukechampine.com/frand" ) @@ -25,15 +26,15 @@ type ( } testHostManager struct { - t test + tt testutils.TT mu sync.Mutex hosts map[types.PublicKey]*testHost } ) -func newTestHostManager(t test) *testHostManager { - return &testHostManager{t: t, hosts: make(map[types.PublicKey]*testHost)} +func newTestHostManager(t testutils.TestingCommon) *testHostManager { + return &testHostManager{tt: testutils.New(t), hosts: make(map[types.PublicKey]*testHost)} } func (hm *testHostManager) Host(hk types.PublicKey, fcid types.FileContractID, siamuxAddr string) Host { @@ -41,7 +42,7 @@ func (hm *testHostManager) Host(hk types.PublicKey, fcid types.FileContractID, s defer hm.mu.Unlock() if _, ok := hm.hosts[hk]; !ok { - hm.t.Fatal("host not found") + hm.tt.Fatal("host not found") } return hm.hosts[hk] } diff --git a/worker/worker_test.go b/worker/worker_test.go index 7b781bf0e..8e973a378 100644 --- a/worker/worker_test.go +++ b/worker/worker_test.go @@ -7,18 +7,15 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/testutils" "go.uber.org/zap" "golang.org/x/crypto/blake2b" "lukechampine.com/frand" ) type ( - test interface { - Fatal(...any) - } - testWorker struct { - t test + tt testutils.TT *worker cs *contractStoreMock @@ -32,7 +29,7 @@ type ( } ) -func newTestWorker(t test) *testWorker { +func newTestWorker(t testutils.TestingCommon) *testWorker { // create bus dependencies cs := newContractStoreMock() os := newObjectStoreMock(testBucket) @@ -58,7 +55,7 @@ func newTestWorker(t test) *testWorker { w.uploadManager.mm = ulmm return &testWorker{ - t, + testutils.New(t), w, cs, os, @@ -88,7 +85,7 @@ func (w *testWorker) blockUploads() func() { select { case <-w.ulmm.memBlockChan: case <-time.After(time.Second): - w.t.Fatal("already blocking") + w.tt.Fatal("already blocking") } blockChan := make(chan struct{}) @@ -99,7 +96,7 @@ func (w *testWorker) blockUploads() func() { func (w *testWorker) contracts() []api.ContractMetadata { metadatas, err := w.cs.Contracts(context.Background(), api.ContractsOpts{}) if err != nil { - w.t.Fatal(err) + w.tt.Fatal(err) } return metadatas } @@ -107,12 +104,12 @@ func (w *testWorker) contracts() []api.ContractMetadata { func (w *testWorker) renewContract(hk types.PublicKey) *contractMock { h := w.hm.hosts[hk] if h == nil { - w.t.Fatal("host not found") + w.tt.Fatal("host not found") } renewal, err := w.cs.renewContract(hk) if err != nil { - w.t.Fatal(err) + w.tt.Fatal(err) } return renewal } From 3a15760bbcb8ebaf60a60ffad4da35f33a33eb4f Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 27 Feb 2024 15:29:19 +0100 Subject: [PATCH 115/172] worker: expose methods --- worker/bench_test.go | 14 +++++------ worker/downloader_test.go | 4 +-- worker/upload_test.go | 52 +++++++++++++++++++-------------------- worker/uploader_test.go | 4 +-- worker/worker_test.go | 12 ++++----- 5 files changed, 43 insertions(+), 43 deletions(-) diff --git a/worker/bench_test.go b/worker/bench_test.go index d6750c8f8..cc0034415 100644 --- a/worker/bench_test.go +++ b/worker/bench_test.go @@ -30,10 +30,10 @@ func BenchmarkDownloaderSingleObject(b *testing.B) { up.rs.MinShards = 10 up.rs.TotalShards = 30 up.packing = false - w.addHosts(up.rs.TotalShards) + w.AddHosts(up.rs.TotalShards) data := bytes.NewReader(frand.Bytes(int(up.rs.SlabSizeNoRedundancy()))) - _, _, err := w.uploadManager.Upload(context.Background(), data, w.contracts(), up, lockingPriorityUpload) + _, _, err := w.uploadManager.Upload(context.Background(), data, w.Contracts(), up, lockingPriorityUpload) if err != nil { b.Fatal(err) } @@ -45,7 +45,7 @@ func BenchmarkDownloaderSingleObject(b *testing.B) { b.SetBytes(o.Object.Size) b.ResetTimer() for i := 0; i < b.N; i++ { - err = w.downloadManager.DownloadObject(context.Background(), io.Discard, *o.Object.Object, 0, uint64(o.Object.Size), w.contracts()) + err = w.downloadManager.DownloadObject(context.Background(), io.Discard, *o.Object.Object, 0, uint64(o.Object.Size), w.Contracts()) if err != nil { b.Fatal(err) } @@ -63,13 +63,13 @@ func BenchmarkUploaderSingleObject(b *testing.B) { up.rs.MinShards = 10 up.rs.TotalShards = 30 up.packing = false - w.addHosts(up.rs.TotalShards) + w.AddHosts(up.rs.TotalShards) data := io.LimitReader(&zeroReader{}, int64(b.N*rhpv2.SectorSize*up.rs.MinShards)) b.SetBytes(int64(rhpv2.SectorSize * up.rs.MinShards)) b.ResetTimer() - _, _, err := w.uploadManager.Upload(context.Background(), data, w.contracts(), up, lockingPriorityUpload) + _, _, err := w.uploadManager.Upload(context.Background(), data, w.Contracts(), up, lockingPriorityUpload) if err != nil { b.Fatal(err) } @@ -86,14 +86,14 @@ func BenchmarkUploaderMultiObject(b *testing.B) { up.rs.MinShards = 10 up.rs.TotalShards = 30 up.packing = false - w.addHosts(up.rs.TotalShards) + w.AddHosts(up.rs.TotalShards) b.SetBytes(int64(rhpv2.SectorSize * up.rs.MinShards)) b.ResetTimer() for i := 0; i < b.N; i++ { data := io.LimitReader(&zeroReader{}, int64(rhpv2.SectorSize*up.rs.MinShards)) - _, _, err := w.uploadManager.Upload(context.Background(), data, w.contracts(), up, lockingPriorityUpload) + _, _, err := w.uploadManager.Upload(context.Background(), data, w.Contracts(), up, lockingPriorityUpload) if err != nil { b.Fatal(err) } diff --git a/worker/downloader_test.go b/worker/downloader_test.go index cbb48132c..8097b8304 100644 --- a/worker/downloader_test.go +++ b/worker/downloader_test.go @@ -8,13 +8,13 @@ import ( func TestDownloaderStopped(t *testing.T) { w := newTestWorker(t) - hosts := w.addHosts(1) + hosts := w.AddHosts(1) // convenience variables dm := w.downloadManager h := hosts[0] - dm.refreshDownloaders(w.contracts()) + dm.refreshDownloaders(w.Contracts()) dl := w.downloadManager.downloaders[h.PublicKey()] dl.Stop() diff --git a/worker/upload_test.go b/worker/upload_test.go index cc0996519..9192e040f 100644 --- a/worker/upload_test.go +++ b/worker/upload_test.go @@ -25,7 +25,7 @@ func TestUpload(t *testing.T) { w := newTestWorker(t) // add hosts to worker - w.addHosts(testRedundancySettings.TotalShards * 2) + w.AddHosts(testRedundancySettings.TotalShards * 2) // convenience variables os := w.os @@ -42,7 +42,7 @@ func TestUpload(t *testing.T) { params := testParameters(t.Name()) // upload data - _, _, err := ul.Upload(context.Background(), bytes.NewReader(data), w.contracts(), params, lockingPriorityUpload) + _, _, err := ul.Upload(context.Background(), bytes.NewReader(data), w.Contracts(), params, lockingPriorityUpload) if err != nil { t.Fatal(err) } @@ -61,7 +61,7 @@ func TestUpload(t *testing.T) { // download the data and assert it matches var buf bytes.Buffer - err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), w.contracts()) + err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), w.Contracts()) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { @@ -71,7 +71,7 @@ func TestUpload(t *testing.T) { // filter contracts to have (at most) min shards used contracts var n int var filtered []api.ContractMetadata - for _, md := range w.contracts() { + for _, md := range w.Contracts() { // add unused contracts if _, used := used[md.HostKey]; !used { filtered = append(filtered, md) @@ -111,7 +111,7 @@ func TestUpload(t *testing.T) { // try and upload into a bucket that does not exist params.bucket = "doesnotexist" - _, _, err = ul.Upload(context.Background(), bytes.NewReader(data), w.contracts(), params, lockingPriorityUpload) + _, _, err = ul.Upload(context.Background(), bytes.NewReader(data), w.Contracts(), params, lockingPriorityUpload) if !errors.Is(err, api.ErrBucketNotFound) { t.Fatal("expected bucket not found error", err) } @@ -119,7 +119,7 @@ func TestUpload(t *testing.T) { // upload data using a cancelled context - assert we don't hang ctx, cancel := context.WithCancel(context.Background()) cancel() - _, _, err = ul.Upload(ctx, bytes.NewReader(data), w.contracts(), params, lockingPriorityUpload) + _, _, err = ul.Upload(ctx, bytes.NewReader(data), w.Contracts(), params, lockingPriorityUpload) if err == nil || !errors.Is(err, errUploadInterrupted) { t.Fatal(err) } @@ -130,7 +130,7 @@ func TestUploadPackedSlab(t *testing.T) { w := newTestWorker(t) // add hosts to worker - w.addHosts(testRedundancySettings.TotalShards * 2) + w.AddHosts(testRedundancySettings.TotalShards * 2) // convenience variables os := w.os @@ -149,7 +149,7 @@ func TestUploadPackedSlab(t *testing.T) { params.packing = true // upload data - _, _, err := ul.Upload(context.Background(), bytes.NewReader(data), w.contracts(), params, lockingPriorityUpload) + _, _, err := ul.Upload(context.Background(), bytes.NewReader(data), w.Contracts(), params, lockingPriorityUpload) if err != nil { t.Fatal(err) } @@ -167,7 +167,7 @@ func TestUploadPackedSlab(t *testing.T) { // download the data and assert it matches var buf bytes.Buffer - err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), w.contracts()) + err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), w.Contracts()) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { @@ -185,7 +185,7 @@ func TestUploadPackedSlab(t *testing.T) { mem := mm.AcquireMemory(context.Background(), uint64(params.rs.TotalShards*rhpv2.SectorSize)) // upload the packed slab - err = ul.UploadPackedSlab(context.Background(), params.rs, ps, mem, w.contracts(), 0, lockingPriorityUpload) + err = ul.UploadPackedSlab(context.Background(), params.rs, ps, mem, w.Contracts(), 0, lockingPriorityUpload) if err != nil { t.Fatal(err) } @@ -203,7 +203,7 @@ func TestUploadPackedSlab(t *testing.T) { // download the data again and assert it matches buf.Reset() - err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), w.contracts()) + err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), w.Contracts()) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { @@ -216,7 +216,7 @@ func TestUploadShards(t *testing.T) { w := newTestWorker(t) // add hosts to worker - w.addHosts(testRedundancySettings.TotalShards * 2) + w.AddHosts(testRedundancySettings.TotalShards * 2) // convenience variables os := w.os @@ -234,7 +234,7 @@ func TestUploadShards(t *testing.T) { params := testParameters(t.Name()) // upload data - _, _, err := ul.Upload(context.Background(), bytes.NewReader(data), w.contracts(), params, lockingPriorityUpload) + _, _, err := ul.Upload(context.Background(), bytes.NewReader(data), w.Contracts(), params, lockingPriorityUpload) if err != nil { t.Fatal(err) } @@ -265,7 +265,7 @@ func TestUploadShards(t *testing.T) { } // download the slab - shards, _, err := dl.DownloadSlab(context.Background(), slab.Slab, w.contracts()) + shards, _, err := dl.DownloadSlab(context.Background(), slab.Slab, w.Contracts()) if err != nil { t.Fatal(err) } @@ -281,7 +281,7 @@ func TestUploadShards(t *testing.T) { // recreate upload contracts contracts := make([]api.ContractMetadata, 0) - for _, c := range w.contracts() { + for _, c := range w.Contracts() { _, used := usedHosts[c.HostKey] _, bad := badHosts[c.HostKey] if !used && !bad { @@ -314,7 +314,7 @@ func TestUploadShards(t *testing.T) { // create download contracts contracts = contracts[:0] - for _, c := range w.contracts() { + for _, c := range w.Contracts() { if _, bad := badHosts[c.HostKey]; !bad { contracts = append(contracts, c) } @@ -335,7 +335,7 @@ func TestRefreshUploaders(t *testing.T) { w := newTestWorker(t) // add hosts to worker - w.addHosts(testRedundancySettings.TotalShards) + w.AddHosts(testRedundancySettings.TotalShards) // convenience variables ul := w.uploadManager @@ -352,7 +352,7 @@ func TestRefreshUploaders(t *testing.T) { params := testParameters(t.Name()) // upload data - contracts := w.contracts() + contracts := w.Contracts() _, err := w.upload(context.Background(), bytes.NewReader(data), contracts, params) if err != nil { t.Fatal(err) @@ -365,7 +365,7 @@ func TestRefreshUploaders(t *testing.T) { // renew the first contract c1 := contracts[0] - c1Renewed := w.renewContract(c1.HostKey) + c1Renewed := w.RenewContract(c1.HostKey) // remove the host from the second contract c2 := contracts[1] @@ -373,10 +373,10 @@ func TestRefreshUploaders(t *testing.T) { delete(cs.contracts, c2.ID) // add a new host/contract - hNew := w.addHost() + hNew := w.AddHost() // upload data - contracts = w.contracts() + contracts = w.Contracts() _, _, err = ul.Upload(context.Background(), bytes.NewReader(data), contracts, params, lockingPriorityUpload) if err != nil { t.Fatal(err) @@ -437,7 +437,7 @@ func TestUploadRegression(t *testing.T) { w := newTestWorker(t) // add hosts to worker - w.addHosts(testRedundancySettings.TotalShards) + w.AddHosts(testRedundancySettings.TotalShards) // convenience variables os := w.os @@ -453,12 +453,12 @@ func TestUploadRegression(t *testing.T) { params := testParameters(t.Name()) // make sure the memory manager blocks - unblock := w.blockUploads() + unblock := w.BlockUploads() // upload data ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - _, err := w.upload(ctx, bytes.NewReader(data), w.contracts(), params) + _, err := w.upload(ctx, bytes.NewReader(data), w.Contracts(), params) if !errors.Is(err, errUploadInterrupted) { t.Fatal(err) } @@ -467,7 +467,7 @@ func TestUploadRegression(t *testing.T) { unblock() // upload data - _, err = w.upload(context.Background(), bytes.NewReader(data), w.contracts(), params) + _, err = w.upload(context.Background(), bytes.NewReader(data), w.Contracts(), params) if err != nil { t.Fatal(err) } @@ -480,7 +480,7 @@ func TestUploadRegression(t *testing.T) { // download data for good measure var buf bytes.Buffer - err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), w.contracts()) + err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), w.Contracts()) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { diff --git a/worker/uploader_test.go b/worker/uploader_test.go index 514d17aab..b203827a5 100644 --- a/worker/uploader_test.go +++ b/worker/uploader_test.go @@ -9,10 +9,10 @@ import ( func TestUploaderStopped(t *testing.T) { w := newTestWorker(t) - w.addHosts(1) + w.AddHosts(1) um := w.uploadManager - um.refreshUploaders(w.contracts(), 1) + um.refreshUploaders(w.Contracts(), 1) ul := um.uploaders[0] ul.Stop(errors.New("test")) diff --git a/worker/worker_test.go b/worker/worker_test.go index 8e973a378..eda968b47 100644 --- a/worker/worker_test.go +++ b/worker/worker_test.go @@ -66,14 +66,14 @@ func newTestWorker(t testutils.TestingCommon) *testWorker { } } -func (w *testWorker) addHosts(n int) (added []*testHost) { +func (w *testWorker) AddHosts(n int) (added []*testHost) { for i := 0; i < n; i++ { - added = append(added, w.addHost()) + added = append(added, w.AddHost()) } return } -func (w *testWorker) addHost() *testHost { +func (w *testWorker) AddHost() *testHost { h := w.hs.addHost() c := w.cs.addContract(h.hk) host := newTestHost(h, c) @@ -81,7 +81,7 @@ func (w *testWorker) addHost() *testHost { return host } -func (w *testWorker) blockUploads() func() { +func (w *testWorker) BlockUploads() func() { select { case <-w.ulmm.memBlockChan: case <-time.After(time.Second): @@ -93,7 +93,7 @@ func (w *testWorker) blockUploads() func() { return func() { close(blockChan) } } -func (w *testWorker) contracts() []api.ContractMetadata { +func (w *testWorker) Contracts() []api.ContractMetadata { metadatas, err := w.cs.Contracts(context.Background(), api.ContractsOpts{}) if err != nil { w.tt.Fatal(err) @@ -101,7 +101,7 @@ func (w *testWorker) contracts() []api.ContractMetadata { return metadatas } -func (w *testWorker) renewContract(hk types.PublicKey) *contractMock { +func (w *testWorker) RenewContract(hk types.PublicKey) *contractMock { h := w.hm.hosts[hk] if h == nil { w.tt.Fatal("host not found") From 31198abe40359ed2edbc1c83cd896594b328618d Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 27 Feb 2024 16:38:07 +0100 Subject: [PATCH 116/172] testing: move tt.go --- internal/{testing => test/e2e}/blocklist_test.go | 2 +- internal/{testing => test/e2e}/cluster.go | 8 ++++---- internal/{testing => test/e2e}/cluster_test.go | 2 +- internal/{testing => test/e2e}/gouging_test.go | 2 +- internal/{testing => test/e2e}/host.go | 2 +- internal/{testing => test/e2e}/interactions_test.go | 2 +- internal/{testing => test/e2e}/metadata_test.go | 2 +- internal/{testing => test/e2e}/metrics_test.go | 2 +- internal/{testing => test/e2e}/migrations_test.go | 2 +- internal/{testing => test/e2e}/pruning_test.go | 2 +- internal/{testing => test/e2e}/s3_test.go | 2 +- internal/{testing => test/e2e}/uploads_test.go | 2 +- {testutils => internal/test}/tt.go | 4 ++-- worker/host_test.go | 8 ++++---- worker/worker_test.go | 8 ++++---- 15 files changed, 25 insertions(+), 25 deletions(-) rename internal/{testing => test/e2e}/blocklist_test.go (99%) rename internal/{testing => test/e2e}/cluster.go (99%) rename internal/{testing => test/e2e}/cluster_test.go (99%) rename internal/{testing => test/e2e}/gouging_test.go (99%) rename internal/{testing => test/e2e}/host.go (99%) rename internal/{testing => test/e2e}/interactions_test.go (99%) rename internal/{testing => test/e2e}/metadata_test.go (99%) rename internal/{testing => test/e2e}/metrics_test.go (99%) rename internal/{testing => test/e2e}/migrations_test.go (99%) rename internal/{testing => test/e2e}/pruning_test.go (99%) rename internal/{testing => test/e2e}/s3_test.go (99%) rename internal/{testing => test/e2e}/uploads_test.go (99%) rename {testutils => internal/test}/tt.go (97%) diff --git a/internal/testing/blocklist_test.go b/internal/test/e2e/blocklist_test.go similarity index 99% rename from internal/testing/blocklist_test.go rename to internal/test/e2e/blocklist_test.go index 9d9a12605..48358b854 100644 --- a/internal/testing/blocklist_test.go +++ b/internal/test/e2e/blocklist_test.go @@ -1,4 +1,4 @@ -package testing +package e2e import ( "context" diff --git a/internal/testing/cluster.go b/internal/test/e2e/cluster.go similarity index 99% rename from internal/testing/cluster.go rename to internal/test/e2e/cluster.go index d36f143be..962aa11ef 100644 --- a/internal/testing/cluster.go +++ b/internal/test/e2e/cluster.go @@ -1,4 +1,4 @@ -package testing +package e2e import ( "context" @@ -24,9 +24,9 @@ import ( "go.sia.tech/renterd/bus" "go.sia.tech/renterd/config" "go.sia.tech/renterd/internal/node" + "go.sia.tech/renterd/internal/test" "go.sia.tech/renterd/s3" "go.sia.tech/renterd/stores" - "go.sia.tech/renterd/testutils" "go.uber.org/zap" "go.uber.org/zap/zapcore" "gorm.io/gorm" @@ -120,7 +120,7 @@ type TestCluster struct { dbName string dir string logger *zap.Logger - tt testutils.TT + tt test.TT wk types.PrivateKey wg sync.WaitGroup } @@ -245,7 +245,7 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { if testing.Short() { t.SkipNow() } - tt := testutils.New(t) + tt := test.NewTest(t) // Ensure we don't hang ctx, cancel := context.WithTimeout(context.Background(), time.Minute) diff --git a/internal/testing/cluster_test.go b/internal/test/e2e/cluster_test.go similarity index 99% rename from internal/testing/cluster_test.go rename to internal/test/e2e/cluster_test.go index d7c458d8b..18d12fa48 100644 --- a/internal/testing/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -1,4 +1,4 @@ -package testing +package e2e import ( "bytes" diff --git a/internal/testing/gouging_test.go b/internal/test/e2e/gouging_test.go similarity index 99% rename from internal/testing/gouging_test.go rename to internal/test/e2e/gouging_test.go index 7a812354f..423733ab6 100644 --- a/internal/testing/gouging_test.go +++ b/internal/test/e2e/gouging_test.go @@ -1,4 +1,4 @@ -package testing +package e2e import ( "bytes" diff --git a/internal/testing/host.go b/internal/test/e2e/host.go similarity index 99% rename from internal/testing/host.go rename to internal/test/e2e/host.go index e7943a7d3..6100adad5 100644 --- a/internal/testing/host.go +++ b/internal/test/e2e/host.go @@ -1,4 +1,4 @@ -package testing +package e2e import ( "context" diff --git a/internal/testing/interactions_test.go b/internal/test/e2e/interactions_test.go similarity index 99% rename from internal/testing/interactions_test.go rename to internal/test/e2e/interactions_test.go index 686003e02..021d75cb6 100644 --- a/internal/testing/interactions_test.go +++ b/internal/test/e2e/interactions_test.go @@ -1,4 +1,4 @@ -package testing +package e2e import ( "context" diff --git a/internal/testing/metadata_test.go b/internal/test/e2e/metadata_test.go similarity index 99% rename from internal/testing/metadata_test.go rename to internal/test/e2e/metadata_test.go index c88c8650d..47fb6a2b1 100644 --- a/internal/testing/metadata_test.go +++ b/internal/test/e2e/metadata_test.go @@ -1,4 +1,4 @@ -package testing +package e2e import ( "bytes" diff --git a/internal/testing/metrics_test.go b/internal/test/e2e/metrics_test.go similarity index 99% rename from internal/testing/metrics_test.go rename to internal/test/e2e/metrics_test.go index 7dd0195f5..ed432c3c2 100644 --- a/internal/testing/metrics_test.go +++ b/internal/test/e2e/metrics_test.go @@ -1,4 +1,4 @@ -package testing +package e2e import ( "bytes" diff --git a/internal/testing/migrations_test.go b/internal/test/e2e/migrations_test.go similarity index 99% rename from internal/testing/migrations_test.go rename to internal/test/e2e/migrations_test.go index 2afbcebb6..66325cc15 100644 --- a/internal/testing/migrations_test.go +++ b/internal/test/e2e/migrations_test.go @@ -1,4 +1,4 @@ -package testing +package e2e import ( "bytes" diff --git a/internal/testing/pruning_test.go b/internal/test/e2e/pruning_test.go similarity index 99% rename from internal/testing/pruning_test.go rename to internal/test/e2e/pruning_test.go index 80e6ab29d..bd1bb822d 100644 --- a/internal/testing/pruning_test.go +++ b/internal/test/e2e/pruning_test.go @@ -1,4 +1,4 @@ -package testing +package e2e import ( "bytes" diff --git a/internal/testing/s3_test.go b/internal/test/e2e/s3_test.go similarity index 99% rename from internal/testing/s3_test.go rename to internal/test/e2e/s3_test.go index ced1fbcc0..92b37af04 100644 --- a/internal/testing/s3_test.go +++ b/internal/test/e2e/s3_test.go @@ -1,4 +1,4 @@ -package testing +package e2e import ( "bytes" diff --git a/internal/testing/uploads_test.go b/internal/test/e2e/uploads_test.go similarity index 99% rename from internal/testing/uploads_test.go rename to internal/test/e2e/uploads_test.go index e3e938120..96daae8c8 100644 --- a/internal/testing/uploads_test.go +++ b/internal/test/e2e/uploads_test.go @@ -1,4 +1,4 @@ -package testing +package e2e import ( "bytes" diff --git a/testutils/tt.go b/internal/test/tt.go similarity index 97% rename from testutils/tt.go rename to internal/test/tt.go index 9345a2fd7..c48e2616b 100644 --- a/testutils/tt.go +++ b/internal/test/tt.go @@ -1,4 +1,4 @@ -package testutils +package test import ( "strings" @@ -47,7 +47,7 @@ type ( } ) -func New(tc TestingCommon) TT { +func NewTest(tc TestingCommon) TT { return &impl{TestingCommon: tc} } diff --git a/worker/host_test.go b/worker/host_test.go index 80096888c..af4052507 100644 --- a/worker/host_test.go +++ b/worker/host_test.go @@ -14,7 +14,7 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/api" "go.sia.tech/renterd/hostdb" - "go.sia.tech/renterd/testutils" + "go.sia.tech/renterd/internal/test" "lukechampine.com/frand" ) @@ -26,15 +26,15 @@ type ( } testHostManager struct { - tt testutils.TT + tt test.TT mu sync.Mutex hosts map[types.PublicKey]*testHost } ) -func newTestHostManager(t testutils.TestingCommon) *testHostManager { - return &testHostManager{tt: testutils.New(t), hosts: make(map[types.PublicKey]*testHost)} +func newTestHostManager(t test.TestingCommon) *testHostManager { + return &testHostManager{tt: test.NewTest(t), hosts: make(map[types.PublicKey]*testHost)} } func (hm *testHostManager) Host(hk types.PublicKey, fcid types.FileContractID, siamuxAddr string) Host { diff --git a/worker/worker_test.go b/worker/worker_test.go index eda968b47..bef0214ac 100644 --- a/worker/worker_test.go +++ b/worker/worker_test.go @@ -7,7 +7,7 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/renterd/api" - "go.sia.tech/renterd/testutils" + "go.sia.tech/renterd/internal/test" "go.uber.org/zap" "golang.org/x/crypto/blake2b" "lukechampine.com/frand" @@ -15,7 +15,7 @@ import ( type ( testWorker struct { - tt testutils.TT + tt test.TT *worker cs *contractStoreMock @@ -29,7 +29,7 @@ type ( } ) -func newTestWorker(t testutils.TestingCommon) *testWorker { +func newTestWorker(t test.TestingCommon) *testWorker { // create bus dependencies cs := newContractStoreMock() os := newObjectStoreMock(testBucket) @@ -55,7 +55,7 @@ func newTestWorker(t testutils.TestingCommon) *testWorker { w.uploadManager.mm = ulmm return &testWorker{ - testutils.New(t), + test.NewTest(t), w, cs, os, From 30ced3e5a8decd1c074b9ea834ce63340b32a128 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 27 Feb 2024 16:41:47 +0100 Subject: [PATCH 117/172] ci: update test.yml --- .github/workflows/test.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index e8a32e5ec..b3c794242 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -60,7 +60,7 @@ jobs: - name: Test Integration uses: n8maninger/action-golang-test@v1 with: - package: "./internal/testing/..." + package: "./internal/test/..." args: "-failfast;-race;-tags=testing;-timeout=30m" - name: Test Integration - MySQL if: matrix.os == 'ubuntu-latest' @@ -70,7 +70,7 @@ jobs: RENTERD_DB_USER: root RENTERD_DB_PASSWORD: test with: - package: "./internal/testing/..." + package: "./internal/test/..." args: "-failfast;-race;-tags=testing;-timeout=30m" - name: Build run: go build -o bin/ ./cmd/renterd From dfc7850c3e8cf5a5a2088d7db6451a8beb9336a8 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 27 Feb 2024 16:47:27 +0100 Subject: [PATCH 118/172] ci: fix e2e path --- .github/workflows/test.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index b3c794242..bb56bbdb2 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -60,7 +60,7 @@ jobs: - name: Test Integration uses: n8maninger/action-golang-test@v1 with: - package: "./internal/test/..." + package: "./internal/test/e2e/..." args: "-failfast;-race;-tags=testing;-timeout=30m" - name: Test Integration - MySQL if: matrix.os == 'ubuntu-latest' @@ -70,7 +70,7 @@ jobs: RENTERD_DB_USER: root RENTERD_DB_PASSWORD: test with: - package: "./internal/test/..." + package: "./internal/test/e2e/..." args: "-failfast;-race;-tags=testing;-timeout=30m" - name: Build run: go build -o bin/ ./cmd/renterd From 990feac6ea56041c265790f65c7d3fe9dd84bf75 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 27 Feb 2024 16:48:34 +0100 Subject: [PATCH 119/172] internal: rename TT constr --- internal/test/e2e/cluster.go | 2 +- internal/test/tt.go | 2 +- worker/host_test.go | 2 +- worker/worker_test.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/test/e2e/cluster.go b/internal/test/e2e/cluster.go index 962aa11ef..142bedf47 100644 --- a/internal/test/e2e/cluster.go +++ b/internal/test/e2e/cluster.go @@ -245,7 +245,7 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { if testing.Short() { t.SkipNow() } - tt := test.NewTest(t) + tt := test.NewTT(t) // Ensure we don't hang ctx, cancel := context.WithTimeout(context.Background(), time.Minute) diff --git a/internal/test/tt.go b/internal/test/tt.go index c48e2616b..22bcff223 100644 --- a/internal/test/tt.go +++ b/internal/test/tt.go @@ -47,7 +47,7 @@ type ( } ) -func NewTest(tc TestingCommon) TT { +func NewTT(tc TestingCommon) TT { return &impl{TestingCommon: tc} } diff --git a/worker/host_test.go b/worker/host_test.go index af4052507..618c4cfb3 100644 --- a/worker/host_test.go +++ b/worker/host_test.go @@ -34,7 +34,7 @@ type ( ) func newTestHostManager(t test.TestingCommon) *testHostManager { - return &testHostManager{tt: test.NewTest(t), hosts: make(map[types.PublicKey]*testHost)} + return &testHostManager{tt: test.NewTT(t), hosts: make(map[types.PublicKey]*testHost)} } func (hm *testHostManager) Host(hk types.PublicKey, fcid types.FileContractID, siamuxAddr string) Host { diff --git a/worker/worker_test.go b/worker/worker_test.go index bef0214ac..5e6a1554f 100644 --- a/worker/worker_test.go +++ b/worker/worker_test.go @@ -55,7 +55,7 @@ func newTestWorker(t test.TestingCommon) *testWorker { w.uploadManager.mm = ulmm return &testWorker{ - test.NewTest(t), + test.NewTT(t), w, cs, os, From 9798d6410613c61475926480b43e3c99ff2f879a Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 27 Feb 2024 16:55:44 +0100 Subject: [PATCH 120/172] testing: add config.go --- internal/test/config.go | 64 ++++++++++++++++++++++ internal/test/e2e/blocklist_test.go | 9 ++-- internal/test/e2e/cluster.go | 80 +++++----------------------- internal/test/e2e/cluster_test.go | 67 +++++++++++------------ internal/test/e2e/gouging_test.go | 5 +- internal/test/e2e/metadata_test.go | 3 +- internal/test/e2e/metrics_test.go | 5 +- internal/test/e2e/migrations_test.go | 11 ++-- internal/test/e2e/pruning_test.go | 5 +- internal/test/e2e/s3_test.go | 17 +++--- internal/test/e2e/uploads_test.go | 5 +- 11 files changed, 145 insertions(+), 126 deletions(-) create mode 100644 internal/test/config.go diff --git a/internal/test/config.go b/internal/test/config.go new file mode 100644 index 000000000..7553fa16d --- /dev/null +++ b/internal/test/config.go @@ -0,0 +1,64 @@ +package test + +import ( + "time" + + "github.com/minio/minio-go/v7/pkg/credentials" + rhpv2 "go.sia.tech/core/rhp/v2" + "go.sia.tech/core/types" + "go.sia.tech/renterd/api" +) + +var ( + // AutopilotConfig is the autopilot used for testing unless a different + // one is explicitly set. + AutopilotConfig = api.AutopilotConfig{ + Contracts: api.ContractsConfig{ + Allowance: types.Siacoins(1).Mul64(1e3), + Amount: 3, + Period: 144, + RenewWindow: 72, + + Download: rhpv2.SectorSize * 500, + Upload: rhpv2.SectorSize * 500, + Storage: rhpv2.SectorSize * 5e3, + + Set: ContractSet, + Prune: false, + }, + Hosts: api.HostsConfig{ + MaxDowntimeHours: 10, + MinRecentScanFailures: 10, + AllowRedundantIPs: true, // allow for integration tests by default + }, + } + + ContractSet = "testset" + ContractSetSettings = api.ContractSetSetting{ + Default: ContractSet, + } + + GougingSettings = api.GougingSettings{ + MinMaxCollateral: types.Siacoins(10), // at least up to 10 SC per contract + MaxRPCPrice: types.Siacoins(1).Div64(1000), // 1mS per RPC + MaxContractPrice: types.Siacoins(10), // 10 SC per contract + MaxDownloadPrice: types.Siacoins(1).Mul64(1000), // 1000 SC per 1 TiB + MaxUploadPrice: types.Siacoins(1).Mul64(1000), // 1000 SC per 1 TiB + MaxStoragePrice: types.Siacoins(1000).Div64(144 * 30), // 1000 SC per month + + HostBlockHeightLeeway: 240, // amount of leeway given to host block height + + MinPriceTableValidity: 10 * time.Second, // minimum value for price table validity + MinAccountExpiry: time.Hour, // minimum value for account expiry + MinMaxEphemeralAccountBalance: types.Siacoins(1), // 1SC + } + + RedundancySettings = api.RedundancySettings{ + MinShards: 2, + TotalShards: 3, + } + + S3AccessKeyID = "TESTINGYNHUWCPKOPSYQ" + S3SecretAccessKey = "Rh30BNyj+qNI4ftYRteoZbHJ3X4Ln71QtZkRXzJ9" + S3Credentials = credentials.NewStaticV4(S3AccessKeyID, S3SecretAccessKey, "") +) diff --git a/internal/test/e2e/blocklist_test.go b/internal/test/e2e/blocklist_test.go index 48358b854..64acc2fba 100644 --- a/internal/test/e2e/blocklist_test.go +++ b/internal/test/e2e/blocklist_test.go @@ -8,6 +8,7 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/test" ) func TestBlocklist(t *testing.T) { @@ -26,7 +27,7 @@ func TestBlocklist(t *testing.T) { tt := cluster.tt // fetch contracts - contracts, err := b.Contracts(ctx, api.ContractsOpts{ContractSet: testAutopilotConfig.Contracts.Set}) + contracts, err := b.Contracts(ctx, api.ContractsOpts{ContractSet: test.AutopilotConfig.Contracts.Set}) tt.OK(err) if len(contracts) != 3 { t.Fatalf("unexpected number of contracts, %v != 3", len(contracts)) @@ -40,7 +41,7 @@ func TestBlocklist(t *testing.T) { // assert h3 is no longer in the contract set tt.Retry(5, time.Second, func() error { - contracts, err := b.Contracts(ctx, api.ContractsOpts{ContractSet: testAutopilotConfig.Contracts.Set}) + contracts, err := b.Contracts(ctx, api.ContractsOpts{ContractSet: test.AutopilotConfig.Contracts.Set}) tt.OK(err) if len(contracts) != 2 { return fmt.Errorf("unexpected number of contracts, %v != 2", len(contracts)) @@ -60,7 +61,7 @@ func TestBlocklist(t *testing.T) { // assert h1 is no longer in the contract set tt.Retry(5, time.Second, func() error { - contracts, err := b.Contracts(ctx, api.ContractsOpts{ContractSet: testAutopilotConfig.Contracts.Set}) + contracts, err := b.Contracts(ctx, api.ContractsOpts{ContractSet: test.AutopilotConfig.Contracts.Set}) tt.OK(err) if len(contracts) != 1 { return fmt.Errorf("unexpected number of contracts, %v != 1", len(contracts)) @@ -77,7 +78,7 @@ func TestBlocklist(t *testing.T) { tt.OK(b.UpdateHostAllowlist(ctx, nil, []types.PublicKey{hk1, hk2}, false)) tt.OK(b.UpdateHostBlocklist(ctx, nil, []string{h1.NetAddress}, false)) tt.Retry(5, time.Second, func() error { - contracts, err := b.Contracts(ctx, api.ContractsOpts{ContractSet: testAutopilotConfig.Contracts.Set}) + contracts, err := b.Contracts(ctx, api.ContractsOpts{ContractSet: test.AutopilotConfig.Contracts.Set}) tt.OK(err) if len(contracts) != 3 { return fmt.Errorf("unexpected number of contracts, %v != 3", len(contracts)) diff --git a/internal/test/e2e/cluster.go b/internal/test/e2e/cluster.go index 142bedf47..b9776d598 100644 --- a/internal/test/e2e/cluster.go +++ b/internal/test/e2e/cluster.go @@ -14,9 +14,7 @@ import ( "time" "github.com/minio/minio-go/v7" - "github.com/minio/minio-go/v7/pkg/credentials" "go.sia.tech/core/consensus" - rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/jape" "go.sia.tech/renterd/api" @@ -36,66 +34,14 @@ import ( ) const ( - testBusFlushInterval = 100 * time.Millisecond - testContractSet = "testset" - testPersistInterval = 2 * time.Second - latestHardforkHeight = 50 // foundation hardfork height in testing + testBusFlushInterval = 100 * time.Millisecond + testBusPersistInterval = 2 * time.Second + latestHardforkHeight = 50 // foundation hardfork height in testing ) var ( clusterOptsDefault = testClusterOptions{} clusterOptNoFunding = false - - // testAutopilotConfig is the autopilot used for testing unless a different - // one is explicitly set. - testAutopilotConfig = api.AutopilotConfig{ - Contracts: api.ContractsConfig{ - Allowance: types.Siacoins(1).Mul64(1e3), - Amount: 3, - Period: 144, - RenewWindow: 72, - - Download: rhpv2.SectorSize * 500, - Upload: rhpv2.SectorSize * 500, - Storage: rhpv2.SectorSize * 5e3, - - Set: testContractSet, - Prune: false, - }, - Hosts: api.HostsConfig{ - MaxDowntimeHours: 10, - MinRecentScanFailures: 10, - AllowRedundantIPs: true, // allow for integration tests by default - }, - } - - testContractSetSettings = api.ContractSetSetting{ - Default: testContractSet, - } - - testGougingSettings = api.GougingSettings{ - MinMaxCollateral: types.Siacoins(10), // at least up to 10 SC per contract - MaxRPCPrice: types.Siacoins(1).Div64(1000), // 1mS per RPC - MaxContractPrice: types.Siacoins(10), // 10 SC per contract - MaxDownloadPrice: types.Siacoins(1).Mul64(1000), // 1000 SC per 1 TiB - MaxUploadPrice: types.Siacoins(1).Mul64(1000), // 1000 SC per 1 TiB - MaxStoragePrice: types.Siacoins(1000).Div64(144 * 30), // 1000 SC per month - - HostBlockHeightLeeway: 240, // amount of leeway given to host block height - - MinPriceTableValidity: 10 * time.Second, // minimum value for price table validity - MinAccountExpiry: time.Hour, // minimum value for account expiry - MinMaxEphemeralAccountBalance: types.Siacoins(1), // 1SC - } - - testRedundancySettings = api.RedundancySettings{ - MinShards: 2, - TotalShards: 3, - } - - testS3AccessKeyID = "TESTINGYNHUWCPKOPSYQ" - testS3SecretAccessKey = "Rh30BNyj+qNI4ftYRteoZbHJ3X4Ln71QtZkRXzJ9" - testS3Credentials = credentials.NewStaticV4(testS3AccessKeyID, testS3SecretAccessKey, "") ) // TestCluster is a helper type that allows for easily creating a number of @@ -287,7 +233,7 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { if opts.uploadPacking { enableUploadPacking = opts.uploadPacking } - apSettings := testAutopilotConfig + apSettings := test.AutopilotConfig if opts.autopilotSettings != nil { apSettings = *opts.autopilotSettings } @@ -340,14 +286,14 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { busClient := bus.NewClient(busAddr, busPassword) workerClient := worker.NewClient(workerAddr, workerPassword) s3Client, err := minio.New(s3Addr, &minio.Options{ - Creds: testS3Credentials, + Creds: test.S3Credentials, Secure: false, }) tt.OK(err) url := s3Client.EndpointURL() s3Core, err := minio.NewCore(url.Host+url.Path, &minio.Options{ - Creds: testS3Credentials, + Creds: test.S3Credentials, }) tt.OK(err) @@ -455,7 +401,7 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { // Set the test contract set to make sure we can add objects at the // beginning of a test right away. - tt.OK(busClient.SetContractSet(ctx, testContractSet, []types.FileContractID{})) + tt.OK(busClient.SetContractSet(ctx, test.ContractSet, []types.FileContractID{})) // Update the autopilot to use test settings if !opts.skipSettingAutopilot { @@ -466,11 +412,11 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { } // Update the bus settings. - tt.OK(busClient.UpdateSetting(ctx, api.SettingGouging, testGougingSettings)) - tt.OK(busClient.UpdateSetting(ctx, api.SettingRedundancy, testRedundancySettings)) - tt.OK(busClient.UpdateSetting(ctx, api.SettingContractSet, testContractSetSettings)) + tt.OK(busClient.UpdateSetting(ctx, api.SettingGouging, test.GougingSettings)) + tt.OK(busClient.UpdateSetting(ctx, api.SettingRedundancy, test.RedundancySettings)) + tt.OK(busClient.UpdateSetting(ctx, api.SettingContractSet, test.ContractSetSettings)) tt.OK(busClient.UpdateSetting(ctx, api.SettingS3Authentication, api.S3AuthenticationSettings{ - V4Keypairs: map[string]string{testS3AccessKeyID: testS3SecretAccessKey}, + V4Keypairs: map[string]string{test.S3AccessKeyID: test.S3SecretAccessKey}, })) tt.OK(busClient.UpdateSetting(ctx, api.SettingUploadPacking, api.UploadPackingSettings{Enabled: enableUploadPacking})) @@ -501,7 +447,7 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { if nHosts > 0 { cluster.AddHostsBlocking(nHosts) cluster.WaitForContracts() - cluster.WaitForContractSet(testContractSet, nHosts) + cluster.WaitForContractSet(test.ContractSet, nHosts) _ = cluster.WaitForAccounts() } @@ -926,7 +872,7 @@ func testBusCfg() node.BusConfig { AnnouncementMaxAgeHours: 24 * 7 * 52, // 1 year Bootstrap: false, GatewayAddr: "127.0.0.1:0", - PersistInterval: testPersistInterval, + PersistInterval: testBusPersistInterval, UsedUTXOExpiry: time.Minute, SlabBufferCompletionThreshold: 0, }, diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 5e9f440db..c546937eb 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -24,6 +24,7 @@ import ( "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" "go.sia.tech/renterd/hostdb" + "go.sia.tech/renterd/internal/test" "go.sia.tech/renterd/object" "go.sia.tech/renterd/wallet" "go.uber.org/zap" @@ -264,7 +265,7 @@ func TestObjectEntries(t *testing.T) { // create a test cluster cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, }) defer cluster.Shutdown() @@ -435,7 +436,7 @@ func TestObjectsRename(t *testing.T) { // create a test cluster cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, }) defer cluster.Shutdown() @@ -491,7 +492,7 @@ func TestUploadDownloadEmpty(t *testing.T) { // create a test cluster cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, }) defer cluster.Shutdown() @@ -519,13 +520,13 @@ func TestUploadDownloadBasic(t *testing.T) { } // sanity check the default settings - if testAutopilotConfig.Contracts.Amount < uint64(testRedundancySettings.MinShards) { + if test.AutopilotConfig.Contracts.Amount < uint64(test.RedundancySettings.MinShards) { t.Fatal("too few hosts to support the redundancy settings") } // create a test cluster cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, }) defer cluster.Shutdown() @@ -546,8 +547,8 @@ func TestUploadDownloadBasic(t *testing.T) { for _, slab := range resp.Object.Slabs { hosts := make(map[types.PublicKey]struct{}) roots := make(map[types.Hash256]struct{}) - if len(slab.Shards) != testRedundancySettings.TotalShards { - t.Fatal("wrong amount of shards", len(slab.Shards), testRedundancySettings.TotalShards) + if len(slab.Shards) != test.RedundancySettings.TotalShards { + t.Fatal("wrong amount of shards", len(slab.Shards), test.RedundancySettings.TotalShards) } for _, shard := range slab.Shards { if shard.LatestHost == (types.PublicKey{}) { @@ -631,13 +632,13 @@ func TestUploadDownloadExtended(t *testing.T) { } // sanity check the default settings - if testAutopilotConfig.Contracts.Amount < uint64(testRedundancySettings.MinShards) { + if test.AutopilotConfig.Contracts.Amount < uint64(test.RedundancySettings.MinShards) { t.Fatal("too few hosts to support the redundancy settings") } // create a test cluster cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, }) defer cluster.Shutdown() @@ -771,18 +772,18 @@ func TestUploadDownloadExtended(t *testing.T) { // and download spending metrics are tracked properly. func TestUploadDownloadSpending(t *testing.T) { // sanity check the default settings - if testAutopilotConfig.Contracts.Amount < uint64(testRedundancySettings.MinShards) { + if test.AutopilotConfig.Contracts.Amount < uint64(test.RedundancySettings.MinShards) { t.Fatal("too few hosts to support the redundancy settings") } // create a test cluster cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, }) defer cluster.Shutdown() w := cluster.Worker - rs := testRedundancySettings + rs := test.RedundancySettings tt := cluster.tt // check that the funding was recorded @@ -891,7 +892,7 @@ func TestUploadDownloadSpending(t *testing.T) { } // fetch contract set contracts - contracts, err := cluster.Bus.Contracts(context.Background(), api.ContractsOpts{ContractSet: testAutopilotConfig.Contracts.Set}) + contracts, err := cluster.Bus.Contracts(context.Background(), api.ContractsOpts{ContractSet: test.AutopilotConfig.Contracts.Set}) tt.OK(err) currentSet := make(map[types.FileContractID]struct{}) for _, c := range contracts { @@ -1090,7 +1091,7 @@ func TestParallelUpload(t *testing.T) { // create a test cluster cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, }) defer cluster.Shutdown() @@ -1168,7 +1169,7 @@ func TestParallelDownload(t *testing.T) { // create a test cluster cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, }) defer cluster.Shutdown() @@ -1285,7 +1286,7 @@ func TestUploadDownloadSameHost(t *testing.T) { // create a test cluster cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, }) defer cluster.Shutdown() tt := cluster.tt @@ -1316,7 +1317,7 @@ func TestUploadDownloadSameHost(t *testing.T) { // build a frankenstein object constructed with all sectors on the same host res.Object.Slabs[0].Shards = shards[res.Object.Slabs[0].Shards[0].LatestHost] - tt.OK(b.AddObject(context.Background(), api.DefaultBucketName, "frankenstein", testContractSet, *res.Object.Object, api.AddObjectOptions{})) + tt.OK(b.AddObject(context.Background(), api.DefaultBucketName, "frankenstein", test.ContractSet, *res.Object.Object, api.AddObjectOptions{})) // assert we can download this object tt.OK(w.DownloadObject(context.Background(), io.Discard, api.DefaultBucketName, "frankenstein", api.DownloadObjectOptions{})) @@ -1524,20 +1525,20 @@ func TestUploadPacking(t *testing.T) { } // sanity check the default settings - if testAutopilotConfig.Contracts.Amount < uint64(testRedundancySettings.MinShards) { + if test.AutopilotConfig.Contracts.Amount < uint64(test.RedundancySettings.MinShards) { t.Fatal("too few hosts to support the redundancy settings") } // create a test cluster cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, uploadPacking: true, }) defer cluster.Shutdown() b := cluster.Bus w := cluster.Worker - rs := testRedundancySettings + rs := test.RedundancySettings tt := cluster.tt // prepare 3 files which are all smaller than a slab but together make up @@ -1772,7 +1773,7 @@ func TestSlabBufferStats(t *testing.T) { } // sanity check the default settings - if testAutopilotConfig.Contracts.Amount < uint64(testRedundancySettings.MinShards) { + if test.AutopilotConfig.Contracts.Amount < uint64(test.RedundancySettings.MinShards) { t.Fatal("too few hosts to support the redundancy settings") } @@ -1782,14 +1783,14 @@ func TestSlabBufferStats(t *testing.T) { busCfg.SlabBufferCompletionThreshold = int64(threshold) cluster := newTestCluster(t, testClusterOptions{ busCfg: &busCfg, - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, uploadPacking: true, }) defer cluster.Shutdown() b := cluster.Bus w := cluster.Worker - rs := testRedundancySettings + rs := test.RedundancySettings tt := cluster.tt // prepare 3 files which are all smaller than a slab but together make up @@ -1838,8 +1839,8 @@ func TestSlabBufferStats(t *testing.T) { if len(buffers) != 1 { t.Fatal("expected 1 slab buffer, got", len(buffers)) } - if buffers[0].ContractSet != testContractSet { - t.Fatalf("expected slab buffer contract set of %v, got %v", testContractSet, buffers[0].ContractSet) + if buffers[0].ContractSet != test.ContractSet { + t.Fatalf("expected slab buffer contract set of %v, got %v", test.ContractSet, buffers[0].ContractSet) } if buffers[0].Size != int64(len(data1)) { t.Fatalf("expected slab buffer size of %v, got %v", len(data1), buffers[0].Size) @@ -2020,7 +2021,7 @@ func TestMultipartUploads(t *testing.T) { } cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, uploadPacking: true, }) defer cluster.Shutdown() @@ -2266,7 +2267,7 @@ func TestWalletFormUnconfirmed(t *testing.T) { } // Enable autopilot by setting it. - cluster.UpdateAutopilotConfig(context.Background(), testAutopilotConfig) + cluster.UpdateAutopilotConfig(context.Background(), test.AutopilotConfig) // Wait for a contract to form. contractsFormed := cluster.WaitForContracts() @@ -2300,8 +2301,8 @@ func TestBusRecordedMetrics(t *testing.T) { for _, m := range csMetrics { if m.Contracts != 1 { t.Fatalf("expected 1 contract, got %v", m.Contracts) - } else if m.Name != testContractSet { - t.Fatalf("expected contract set %v, got %v", testContractSet, m.Name) + } else if m.Name != test.ContractSet { + t.Fatalf("expected contract set %v, got %v", test.ContractSet, m.Name) } else if m.Timestamp.Std().Before(startTime) { t.Fatalf("expected time to be after start time %v, got %v", startTime, m.Timestamp.Std()) } @@ -2317,8 +2318,8 @@ func TestBusRecordedMetrics(t *testing.T) { t.Fatalf("expected added churn, got %v", m.Direction) } else if m.ContractID == (types.FileContractID{}) { t.Fatal("expected non-zero FCID") - } else if m.Name != testContractSet { - t.Fatalf("expected contract set %v, got %v", testContractSet, m.Name) + } else if m.Name != test.ContractSet { + t.Fatalf("expected contract set %v, got %v", test.ContractSet, m.Name) } else if m.Timestamp.Std().Before(startTime) { t.Fatalf("expected time to be after start time %v, got %v", startTime, m.Timestamp.Std()) } @@ -2377,14 +2378,14 @@ func TestMultipartUploadWrappedByPartialSlabs(t *testing.T) { } cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, uploadPacking: true, }) defer cluster.Shutdown() defer cluster.Shutdown() b := cluster.Bus w := cluster.Worker - slabSize := testRedundancySettings.SlabSizeNoRedundancy() + slabSize := test.RedundancySettings.SlabSizeNoRedundancy() tt := cluster.tt // start a new multipart upload. We upload the parts in reverse order diff --git a/internal/test/e2e/gouging_test.go b/internal/test/e2e/gouging_test.go index 423733ab6..60e2f9b5e 100644 --- a/internal/test/e2e/gouging_test.go +++ b/internal/test/e2e/gouging_test.go @@ -10,6 +10,7 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/test" "go.uber.org/zap/zapcore" "lukechampine.com/frand" ) @@ -21,12 +22,12 @@ func TestGouging(t *testing.T) { // create a new test cluster cluster := newTestCluster(t, testClusterOptions{ - hosts: int(testAutopilotConfig.Contracts.Amount), + hosts: int(test.AutopilotConfig.Contracts.Amount), logger: newTestLoggerCustom(zapcore.ErrorLevel), }) defer cluster.Shutdown() - cfg := testAutopilotConfig.Contracts + cfg := test.AutopilotConfig.Contracts b := cluster.Bus w := cluster.Worker tt := cluster.tt diff --git a/internal/test/e2e/metadata_test.go b/internal/test/e2e/metadata_test.go index 47fb6a2b1..b71078eef 100644 --- a/internal/test/e2e/metadata_test.go +++ b/internal/test/e2e/metadata_test.go @@ -7,6 +7,7 @@ import ( "testing" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/test" "go.uber.org/zap" ) @@ -17,7 +18,7 @@ func TestObjectMetadata(t *testing.T) { // create cluster cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, logger: zap.NewNop(), }) defer cluster.Shutdown() diff --git a/internal/test/e2e/metrics_test.go b/internal/test/e2e/metrics_test.go index ed432c3c2..aaa139102 100644 --- a/internal/test/e2e/metrics_test.go +++ b/internal/test/e2e/metrics_test.go @@ -10,6 +10,7 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/test" "lukechampine.com/frand" ) @@ -22,12 +23,12 @@ func TestMetrics(t *testing.T) { start := time.Now() // enable pruning - apCfg := testAutopilotConfig + apCfg := test.AutopilotConfig apCfg.Contracts.Prune = true // create a test cluster cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, autopilotSettings: &apCfg, }) defer cluster.Shutdown() diff --git a/internal/test/e2e/migrations_test.go b/internal/test/e2e/migrations_test.go index 66325cc15..91bcc20b7 100644 --- a/internal/test/e2e/migrations_test.go +++ b/internal/test/e2e/migrations_test.go @@ -10,6 +10,7 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/test" "lukechampine.com/frand" ) @@ -19,13 +20,13 @@ func TestMigrations(t *testing.T) { } // create a new test cluster - cfg := testAutopilotConfig - cfg.Contracts.Amount = uint64(testRedundancySettings.TotalShards) + 1 + cfg := test.AutopilotConfig + cfg.Contracts.Amount = uint64(test.RedundancySettings.TotalShards) + 1 cluster := newTestCluster(t, testClusterOptions{ // configure the cluster to use 1 more host than the total shards in the // redundancy settings. autopilotSettings: &cfg, - hosts: int(testRedundancySettings.TotalShards) + 1, + hosts: int(test.RedundancySettings.TotalShards) + 1, }) defer cluster.Shutdown() @@ -60,8 +61,8 @@ func TestMigrations(t *testing.T) { // assert amount of hosts used used := usedHosts(path) - if len(used) != testRedundancySettings.TotalShards { - t.Fatal("unexpected amount of hosts used", len(used), testRedundancySettings.TotalShards) + if len(used) != test.RedundancySettings.TotalShards { + t.Fatal("unexpected amount of hosts used", len(used), test.RedundancySettings.TotalShards) } // select one host to remove diff --git a/internal/test/e2e/pruning_test.go b/internal/test/e2e/pruning_test.go index bd1bb822d..2a9f91000 100644 --- a/internal/test/e2e/pruning_test.go +++ b/internal/test/e2e/pruning_test.go @@ -13,6 +13,7 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/api" "go.sia.tech/renterd/hostdb" + "go.sia.tech/renterd/internal/test" ) func TestHostPruning(t *testing.T) { @@ -136,8 +137,8 @@ func TestSectorPruning(t *testing.T) { } // convenience variables - cfg := testAutopilotConfig - rs := testRedundancySettings + cfg := test.AutopilotConfig + rs := test.RedundancySettings w := cluster.Worker b := cluster.Bus tt := cluster.tt diff --git a/internal/test/e2e/s3_test.go b/internal/test/e2e/s3_test.go index 92b37af04..b25e11871 100644 --- a/internal/test/e2e/s3_test.go +++ b/internal/test/e2e/s3_test.go @@ -15,6 +15,7 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/gofakes3" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/test" "go.uber.org/zap" "lukechampine.com/frand" ) @@ -30,7 +31,7 @@ func TestS3Basic(t *testing.T) { start := time.Now() cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, }) defer cluster.Shutdown() @@ -176,7 +177,7 @@ func TestS3ObjectMetadata(t *testing.T) { // create cluster opts := testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, logger: zap.NewNop(), } cluster := newTestCluster(t, opts) @@ -288,7 +289,7 @@ func TestS3Authentication(t *testing.T) { // Create client with credentials and try again.. s3Authenticated, err := minio.NewCore(url, &minio.Options{ - Creds: testS3Credentials, + Creds: test.S3Credentials, }) tt.OK(err) @@ -328,7 +329,7 @@ func TestS3Authentication(t *testing.T) { func TestS3List(t *testing.T) { cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, uploadPacking: true, }) defer cluster.Shutdown() @@ -463,7 +464,7 @@ func TestS3MultipartUploads(t *testing.T) { } cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, uploadPacking: true, }) defer cluster.Shutdown() @@ -594,7 +595,7 @@ func TestS3MultipartPruneSlabs(t *testing.T) { } cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, uploadPacking: true, }) defer cluster.Shutdown() @@ -623,7 +624,7 @@ func TestS3MultipartPruneSlabs(t *testing.T) { // Upload 1 regular object. It will share the same packed slab, cause the // packed slab to be complete and start a new one. - data = frand.Bytes(testRedundancySettings.MinShards*rhpv2.SectorSize - 1) + data = frand.Bytes(test.RedundancySettings.MinShards*rhpv2.SectorSize - 1) tt.OKAll(s3.PutObject(context.Background(), bucket, "bar", bytes.NewReader(data), int64(len(data)), minio.PutObjectOptions{})) // Block until the buffer is uploaded. @@ -648,7 +649,7 @@ func TestS3SpecialChars(t *testing.T) { } cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, uploadPacking: true, }) defer cluster.Shutdown() diff --git a/internal/test/e2e/uploads_test.go b/internal/test/e2e/uploads_test.go index 96daae8c8..3f83fd7e4 100644 --- a/internal/test/e2e/uploads_test.go +++ b/internal/test/e2e/uploads_test.go @@ -10,6 +10,7 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/test" "lukechampine.com/frand" ) @@ -49,12 +50,12 @@ func TestUploadingSectorsCache(t *testing.T) { } cluster := newTestCluster(t, testClusterOptions{ - hosts: testRedundancySettings.TotalShards, + hosts: test.RedundancySettings.TotalShards, }) defer cluster.Shutdown() w := cluster.Worker b := cluster.Bus - rs := testRedundancySettings + rs := test.RedundancySettings tt := cluster.tt // generate some random data From c9561824888b78480e58be3a686a0a11d509be59 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 27 Feb 2024 17:01:07 +0100 Subject: [PATCH 121/172] worker: use frand.Bytes --- worker/upload_test.go | 26 +++++++++----------------- 1 file changed, 9 insertions(+), 17 deletions(-) diff --git a/worker/upload_test.go b/worker/upload_test.go index 88a9e4f75..044827799 100644 --- a/worker/upload_test.go +++ b/worker/upload_test.go @@ -34,7 +34,7 @@ func TestUpload(t *testing.T) { ul := w.uploadManager // create test data - data := testData(128) + data := frand.Bytes(128) // create upload params params := testParameters(t.Name()) @@ -144,7 +144,7 @@ func TestUploadPackedSlab(t *testing.T) { w.BlockAsyncPackedSlabUploads(params) // create test data - data := testData(128) + data := frand.Bytes(128) // upload data _, _, err := ul.Upload(context.Background(), bytes.NewReader(data), w.Contracts(), params, lockingPriorityUpload) @@ -213,12 +213,12 @@ func TestUploadPackedSlab(t *testing.T) { // upload 2x64 bytes using the worker params.path = t.Name() + "2" - _, err = w.upload(context.Background(), bytes.NewReader(testData(64)), w.Contracts(), params) + _, err = w.upload(context.Background(), bytes.NewReader(frand.Bytes(64)), w.Contracts(), params) if err != nil { t.Fatal(err) } params.path = t.Name() + "3" - _, err = w.upload(context.Background(), bytes.NewReader(testData(64)), w.Contracts(), params) + _, err = w.upload(context.Background(), bytes.NewReader(frand.Bytes(64)), w.Contracts(), params) if err != nil { t.Fatal(err) } @@ -233,7 +233,7 @@ func TestUploadPackedSlab(t *testing.T) { // upload one more byte (buffer limit reached) params.path = t.Name() + "4" - _, err = w.upload(context.Background(), bytes.NewReader(testData(1)), w.Contracts(), params) + _, err = w.upload(context.Background(), bytes.NewReader(frand.Bytes(1)), w.Contracts(), params) if err != nil { t.Fatal(err) } @@ -255,7 +255,7 @@ func TestUploadPackedSlab(t *testing.T) { // upload 1 byte using the worker params.path = t.Name() + "5" - _, err = w.upload(context.Background(), bytes.NewReader(testData(129)), w.Contracts(), params) + _, err = w.upload(context.Background(), bytes.NewReader(frand.Bytes(129)), w.Contracts(), params) if err != nil { t.Fatal(err) } @@ -286,7 +286,7 @@ func TestUploadShards(t *testing.T) { ul := w.uploadManager // create test data - data := testData(128) + data := frand.Bytes(128) // create upload params params := testParameters(t.Name()) @@ -401,7 +401,7 @@ func TestRefreshUploaders(t *testing.T) { hm := w.hm // create test data - data := testData(128) + data := frand.Bytes(128) // create upload params params := testParameters(t.Name()) @@ -499,7 +499,7 @@ func TestUploadRegression(t *testing.T) { dl := w.downloadManager // create test data - data := testData(128) + data := frand.Bytes(128) // create upload params params := testParameters(t.Name()) @@ -552,11 +552,3 @@ func testParameters(path string) uploadParameters { rs: testRedundancySettings, } } - -func testData(n int) []byte { - data := make([]byte, n) - if _, err := frand.Read(data); err != nil { - panic(err) - } - return data -} From 448fd4f2c1f851799f83e24fa330db4aab329aa7 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 27 Feb 2024 18:31:47 +0100 Subject: [PATCH 122/172] stores: update findAggregatedContractPeriods to fetch individual contracts --- stores/metrics.go | 64 ++++++++++----------- stores/migrations/mysql/metrics/schema.sql | 1 + stores/migrations/sqlite/metrics/schema.sql | 1 + 3 files changed, 34 insertions(+), 32 deletions(-) diff --git a/stores/metrics.go b/stores/metrics.go index 8816d1729..7b59a40a5 100644 --- a/stores/metrics.go +++ b/stores/metrics.go @@ -522,43 +522,43 @@ func (s *SQLStore) findAggregatedContractPeriods(start time.Time, n uint64, inte return nil, api.ErrMaxIntervalsExceeded } end := start.Add(time.Duration(n) * interval) - var metricsWithPeriod []struct { + + type metricWithPeriod struct { Metric dbContractMetric `gorm:"embedded"` Period int64 } - err := s.dbMetrics.Raw(` - WITH RECURSIVE periods AS ( - SELECT ? AS period_start - UNION ALL - SELECT period_start + ? - FROM periods - WHERE period_start < ? - ? - ) - SELECT contracts.*, i.Period FROM contracts - INNER JOIN ( - SELECT - p.period_start as Period, - MIN(c.id) AS id - FROM - periods p - INNER JOIN - contracts c ON c.timestamp >= p.period_start AND c.timestamp < p.period_start + ? - GROUP BY - p.period_start, c.fcid - ORDER BY - p.period_start ASC - ) i ON contracts.id = i.id - `, unixTimeMS(start), - interval.Milliseconds(), - unixTimeMS(end), - interval.Milliseconds(), - interval.Milliseconds(), - ). - Scan(&metricsWithPeriod). - Error + var metricsWithPeriod []metricWithPeriod + + err := s.dbMetrics.Transaction(func(tx *gorm.DB) error { + var fcids []fileContractID + if err := tx.Raw("SELECT DISTINCT fcid FROM contracts WHERE contracts.timestamp >= ? AND contracts.timestamp < ?", unixTimeMS(start), unixTimeMS(end)). + Scan(&fcids).Error; err != nil { + return fmt.Errorf("failed to fetch distinct contract ids: %w", err) + } + + for intervalStart := start; intervalStart.Before(end); intervalStart = intervalStart.Add(interval) { + intervalEnd := intervalStart.Add(interval) + for _, fcid := range fcids { + var metrics []dbContractMetric + err := tx.Raw("SELECT * FROM contracts WHERE contracts.timestamp >= ? AND contracts.timestamp < ? AND contracts.fcid = ? LIMIT 1", unixTimeMS(intervalStart), unixTimeMS(intervalEnd), fileContractID(fcid)). + Scan(&metrics).Error + if err != nil { + return fmt.Errorf("failed to fetch contract metrics: %w", err) + } else if len(metrics) == 0 { + continue + } + metricsWithPeriod = append(metricsWithPeriod, metricWithPeriod{ + Metric: metrics[0], + Period: intervalStart.UnixMilli(), + }) + } + } + return nil + }) if err != nil { - return nil, fmt.Errorf("failed to fetch aggregate metrics: %w", err) + return nil, err } + currentPeriod := int64(math.MinInt64) var metrics []dbContractMetric for _, m := range metricsWithPeriod { diff --git a/stores/migrations/mysql/metrics/schema.sql b/stores/migrations/mysql/metrics/schema.sql index 6d993f0cb..4b36e6b99 100644 --- a/stores/migrations/mysql/metrics/schema.sql +++ b/stores/migrations/mysql/metrics/schema.sql @@ -83,6 +83,7 @@ CREATE TABLE `contracts` ( KEY `idx_remaining_funds` (`remaining_funds_lo`,`remaining_funds_hi`), KEY `idx_delete_spending` (`delete_spending_lo`,`delete_spending_hi`), KEY `idx_list_spending` (`list_spending_lo`,`list_spending_hi`) + KEY `idx_contracts_fcid_timestamp` (`fcid`,`timestamp`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -- dbPerformanceMetric diff --git a/stores/migrations/sqlite/metrics/schema.sql b/stores/migrations/sqlite/metrics/schema.sql index 4aa174209..63dae7d65 100644 --- a/stores/migrations/sqlite/metrics/schema.sql +++ b/stores/migrations/sqlite/metrics/schema.sql @@ -11,6 +11,7 @@ CREATE INDEX `idx_download_spending` ON `contracts`(`download_spending_lo`,`down CREATE INDEX `idx_upload_spending` ON `contracts`(`upload_spending_lo`,`upload_spending_hi`); CREATE INDEX `idx_contracts_revision_number` ON `contracts`(`revision_number`); CREATE INDEX `idx_remaining_funds` ON `contracts`(`remaining_funds_lo`,`remaining_funds_hi`); +CREATE INDEX `idx_contracts_fcid_timestamp` ON `contracts`(`fcid`,`timestamp`); -- dbContractPruneMetric CREATE TABLE `contract_prunes` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`timestamp` BIGINT NOT NULL,`fcid` blob NOT NULL,`host` blob NOT NULL,`host_version` text,`pruned` BIGINT NOT NULL,`remaining` BIGINT NOT NULL,`duration` integer NOT NULL); From a32cd3befbcdac2dbcfac1007ea112cf6c7ea6c6 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 28 Feb 2024 08:54:39 +0100 Subject: [PATCH 123/172] stores: add migrations --- .../migration_00001_idx_contracts_fcid_timestamp.sql | 1 + .../migration_00001_idx_contracts_fcid_timestamp.sql | 1 + stores/migrations_metrics.go | 12 +++++++++--- 3 files changed, 11 insertions(+), 3 deletions(-) create mode 100644 stores/migrations/mysql/metrics/migration_00001_idx_contracts_fcid_timestamp.sql create mode 100644 stores/migrations/sqlite/metrics/migration_00001_idx_contracts_fcid_timestamp.sql diff --git a/stores/migrations/mysql/metrics/migration_00001_idx_contracts_fcid_timestamp.sql b/stores/migrations/mysql/metrics/migration_00001_idx_contracts_fcid_timestamp.sql new file mode 100644 index 000000000..5276a3083 --- /dev/null +++ b/stores/migrations/mysql/metrics/migration_00001_idx_contracts_fcid_timestamp.sql @@ -0,0 +1 @@ +CREATE INDEX `idx_contracts_fcid_timestamp` ON `contracts`(`fcid`,`timestamp`); diff --git a/stores/migrations/sqlite/metrics/migration_00001_idx_contracts_fcid_timestamp.sql b/stores/migrations/sqlite/metrics/migration_00001_idx_contracts_fcid_timestamp.sql new file mode 100644 index 000000000..5276a3083 --- /dev/null +++ b/stores/migrations/sqlite/metrics/migration_00001_idx_contracts_fcid_timestamp.sql @@ -0,0 +1 @@ +CREATE INDEX `idx_contracts_fcid_timestamp` ON `contracts`(`fcid`,`timestamp`); diff --git a/stores/migrations_metrics.go b/stores/migrations_metrics.go index 60c62c476..fc3164bee 100644 --- a/stores/migrations_metrics.go +++ b/stores/migrations_metrics.go @@ -8,20 +8,26 @@ import ( "gorm.io/gorm" ) -func performMetricsMigrations(tx *gorm.DB, logger *zap.SugaredLogger) error { +func performMetricsMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { dbIdentifier := "metrics" migrations := []*gormigrate.Migration{ { ID: "00001_init", Migrate: func(tx *gorm.DB) error { return errRunV072 }, }, + { + ID: "00001_idx_contracts_fcid_timestamp", + Migrate: func(tx *gorm.DB) error { + return performMigration(tx, dbIdentifier, "00001_idx_contracts_fcid_timestamp", logger) + }, + }, } // Create migrator. - m := gormigrate.New(tx, gormigrate.DefaultOptions, migrations) + m := gormigrate.New(db, gormigrate.DefaultOptions, migrations) // Set init function. - m.InitSchema(initSchema(tx, dbIdentifier, logger)) + m.InitSchema(initSchema(db, dbIdentifier, logger)) // Perform migrations. if err := m.Migrate(); err != nil { From 82f1f5d153815f7a855b8e6375aff19b714dd18c Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 28 Feb 2024 09:25:00 +0100 Subject: [PATCH 124/172] stores: add contractMetricGranularity --- stores/metrics.go | 19 +++++++++++++++++++ stores/metrics_test.go | 24 ++++++++++++++++++++++++ 2 files changed, 43 insertions(+) diff --git a/stores/metrics.go b/stores/metrics.go index 7b59a40a5..4137f3f65 100644 --- a/stores/metrics.go +++ b/stores/metrics.go @@ -14,6 +14,10 @@ import ( "gorm.io/gorm/clause" ) +const ( + contractMetricGranularity = 5 * time.Minute +) + type ( // dbContractMetric tracks information about a contract's funds. It is // supposed to be reported by a worker every time a contract is revised. @@ -246,6 +250,21 @@ func (s *SQLStore) RecordContractMetric(ctx context.Context, metrics ...api.Cont } } return s.dbMetrics.Transaction(func(tx *gorm.DB) error { + // delete any existing metric for the same contract that has happened + // within the same 30 seconds window by diving the timestamp by 30 seconds and use integer division. + for _, metric := range metrics { + intervalStart := metric.Timestamp.Std().Truncate(contractMetricGranularity) + intervalEnd := intervalStart.Add(contractMetricGranularity) + err := tx. + Where("timestamp >= ?", unixTimeMS(intervalStart)). + Where("timestamp < ?", unixTimeMS(intervalEnd)). + Where("fcid", fileContractID(metric.ContractID)). + Delete(&dbContractMetric{}). + Error + if err != nil { + return err + } + } return tx.Create(&dbMetrics).Error }) } diff --git a/stores/metrics_test.go b/stores/metrics_test.go index f71d985bd..ec97099ba 100644 --- a/stores/metrics_test.go +++ b/stores/metrics_test.go @@ -488,6 +488,30 @@ func TestContractMetrics(t *testing.T) { } else if len(metrics) != 1 { t.Fatalf("expected 1 metric, got %v", len(metrics)) } + + // Drop all metrics. + if err := ss.dbMetrics.Where("TRUE").Delete(&dbContractMetric{}).Error; err != nil { + t.Fatal(err) + } + + // Record multiple metrics for the same contract - one per second over 10 minutes + for i := int64(0); i < 600; i++ { + err := ss.RecordContractMetric(context.Background(), api.ContractMetric{ + ContractID: types.FileContractID{1}, + Timestamp: api.TimeRFC3339(time.Unix(i, 0)), + }) + if err != nil { + t.Fatal(err) + } + } + + // Check how many metrics were recorded. + var n int64 + if err := ss.dbMetrics.Model(&dbContractMetric{}).Count(&n).Error; err != nil { + t.Fatal(err) + } else if n != 2 { + t.Fatalf("expected 2 metrics, got %v", n) + } } func TestWalletMetrics(t *testing.T) { From 7db0f926ee3f30bab346a37e95bc14193ac0d618 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 28 Feb 2024 10:03:07 +0100 Subject: [PATCH 125/172] stores: fix syntax error --- stores/migrations/mysql/metrics/schema.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stores/migrations/mysql/metrics/schema.sql b/stores/migrations/mysql/metrics/schema.sql index 4b36e6b99..da4db5a6e 100644 --- a/stores/migrations/mysql/metrics/schema.sql +++ b/stores/migrations/mysql/metrics/schema.sql @@ -82,7 +82,7 @@ CREATE TABLE `contracts` ( KEY `idx_contracts_timestamp` (`timestamp`), KEY `idx_remaining_funds` (`remaining_funds_lo`,`remaining_funds_hi`), KEY `idx_delete_spending` (`delete_spending_lo`,`delete_spending_hi`), - KEY `idx_list_spending` (`list_spending_lo`,`list_spending_hi`) + KEY `idx_list_spending` (`list_spending_lo`,`list_spending_hi`), KEY `idx_contracts_fcid_timestamp` (`fcid`,`timestamp`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; From 017c775c583c83de495d67e6398201cf2092bb77 Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 28 Feb 2024 10:23:31 +0100 Subject: [PATCH 126/172] autopilot: fix json key casing --- autopilot/churn.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/autopilot/churn.go b/autopilot/churn.go index fdc1a0f54..31a1073cf 100644 --- a/autopilot/churn.go +++ b/autopilot/churn.go @@ -32,10 +32,10 @@ func (c *accumulatedChurn) Alert(name string) alerts.Alert { Severity: alerts.SeverityInfo, Message: "Contract set changed", Data: map[string]any{ - "name": name, - "set_additions": c.additions, - "set_removals": c.removals, - "hint": hint, + "name": name, + "setAdditions": c.additions, + "setRemovals": c.removals, + "hint": hint, }, Timestamp: time.Now(), } From 91ab508b67b9e6448eb640fb458549f362db3e33 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 28 Feb 2024 11:25:07 +0100 Subject: [PATCH 127/172] worker: ignore ErrUploadInterrupted when registering upload failure alerts --- worker/worker.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/worker/worker.go b/worker/worker.go index 17faca7cb..3812b7e84 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -1041,7 +1041,7 @@ func (w *worker) objectsHandlerPUT(jc jape.Context) { if err := jc.Check("couldn't upload object", err); err != nil { if err != nil { w.logger.Error(err) - if !errors.Is(err, ErrShuttingDown) { + if !errors.Is(err, ErrShuttingDown) && !errors.Is(err, errUploadInterrupted) { w.registerAlert(newUploadFailedAlert(bucket, path, up.ContractSet, mimeType, rs.MinShards, rs.TotalShards, len(contracts), up.UploadPacking, false, err)) } } @@ -1179,7 +1179,7 @@ func (w *worker) multipartUploadHandlerPUT(jc jape.Context) { if jc.Check("couldn't upload object", err) != nil { if err != nil { w.logger.Error(err) - if !errors.Is(err, ErrShuttingDown) { + if !errors.Is(err, ErrShuttingDown) && !errors.Is(err, errUploadInterrupted) { w.registerAlert(newUploadFailedAlert(bucket, path, up.ContractSet, "", rs.MinShards, rs.TotalShards, len(contracts), up.UploadPacking, true, err)) } } From 75afd5624357b09ea1b685353b0686354fed16ce Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Thu, 29 Feb 2024 09:16:15 +0100 Subject: [PATCH 128/172] Update stores/metrics.go Co-authored-by: Peter-Jan Brone --- stores/metrics.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stores/metrics.go b/stores/metrics.go index 4137f3f65..333ed8a42 100644 --- a/stores/metrics.go +++ b/stores/metrics.go @@ -251,7 +251,7 @@ func (s *SQLStore) RecordContractMetric(ctx context.Context, metrics ...api.Cont } return s.dbMetrics.Transaction(func(tx *gorm.DB) error { // delete any existing metric for the same contract that has happened - // within the same 30 seconds window by diving the timestamp by 30 seconds and use integer division. + // within the same 5' window by diving the timestamp by 5' and use integer division. for _, metric := range metrics { intervalStart := metric.Timestamp.Std().Truncate(contractMetricGranularity) intervalEnd := intervalStart.Add(contractMetricGranularity) From 5e4aaab0ceeed6359a7866ae76cf903d24851339 Mon Sep 17 00:00:00 2001 From: PJ Date: Thu, 29 Feb 2024 09:58:08 +0100 Subject: [PATCH 129/172] autopilot: reduce logger noise --- autopilot/autopilot.go | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index e5ddd8411..f0d81f434 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -202,6 +202,13 @@ func (ap *Autopilot) Run() error { var forceScan bool var launchAccountRefillsOnce sync.Once for { + // check for shutdown right before starting a new iteration + select { + case <-ap.shutdownCtx.Done(): + return nil + default: + } + ap.logger.Info("autopilot iteration starting") tickerFired := make(chan struct{}) ap.workers.withWorker(func(w Worker) { @@ -220,7 +227,7 @@ func (ap *Autopilot) Run() error { close(tickerFired) return } - ap.logger.Error("autopilot stopped before consensus was synced") + ap.logger.Info("autopilot stopped before consensus was synced") return } else if blocked { if scanning, _ := ap.s.Status(); !scanning { @@ -234,7 +241,7 @@ func (ap *Autopilot) Run() error { close(tickerFired) return } - ap.logger.Error("autopilot stopped before it was able to confirm it was configured in the bus") + ap.logger.Info("autopilot stopped before it was able to confirm it was configured in the bus") return } @@ -463,11 +470,17 @@ func (ap *Autopilot) blockUntilSynced(interrupt <-chan time.Time) (synced, block } func (ap *Autopilot) tryScheduleTriggerWhenFunded() error { - ctx, cancel := context.WithTimeout(ap.shutdownCtx, 30*time.Second) - wallet, err := ap.bus.Wallet(ctx) - cancel() + // no need to schedule a trigger if we're stopped + if ap.isStopped() { + return nil + } + + // apply sane timeout + ctx, cancel := context.WithTimeout(ap.shutdownCtx, time.Minute) + defer cancel() // no need to schedule a trigger if the wallet is already funded + wallet, err := ap.bus.Wallet(ctx) if err != nil { return err } else if !wallet.Confirmed.Add(wallet.Unconfirmed).IsZero() { From 54cd3f928742c2fa011693be52e4234f59cc5662 Mon Sep 17 00:00:00 2001 From: PJ Date: Thu, 29 Feb 2024 10:42:29 +0100 Subject: [PATCH 130/172] autoilot: implement MR remarks --- autopilot/autopilot.go | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index f0d81f434..da7105d06 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -194,21 +194,14 @@ func (ap *Autopilot) Run() error { } // schedule a trigger when the wallet receives its first deposit - if err := ap.tryScheduleTriggerWhenFunded(); err != nil { + if err := ap.tryScheduleTriggerWhenFunded(); err != nil && !isErr(err, context.Canceled) { ap.logger.Error(err) return nil } var forceScan bool var launchAccountRefillsOnce sync.Once - for { - // check for shutdown right before starting a new iteration - select { - case <-ap.shutdownCtx.Done(): - return nil - default: - } - + for !ap.isStopped() { ap.logger.Info("autopilot iteration starting") tickerFired := make(chan struct{}) ap.workers.withWorker(func(w Worker) { @@ -315,6 +308,7 @@ func (ap *Autopilot) Run() error { case <-tickerFired: } } + return nil } // Shutdown shuts down the autopilot. @@ -470,11 +464,6 @@ func (ap *Autopilot) blockUntilSynced(interrupt <-chan time.Time) (synced, block } func (ap *Autopilot) tryScheduleTriggerWhenFunded() error { - // no need to schedule a trigger if we're stopped - if ap.isStopped() { - return nil - } - // apply sane timeout ctx, cancel := context.WithTimeout(ap.shutdownCtx, time.Minute) defer cancel() From f0cbd63ff20ee317cfab3659f738014a7392f564 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Thu, 29 Feb 2024 10:43:30 +0100 Subject: [PATCH 131/172] Create codeql.yml --- .github/workflows/codeql.yml | 84 ++++++++++++++++++++++++++++++++++++ 1 file changed, 84 insertions(+) create mode 100644 .github/workflows/codeql.yml diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 000000000..8e3029333 --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,84 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL" + +on: + push: + branches: [ "dev", "master" ] + pull_request: + branches: [ "dev", "master" ] + schedule: + - cron: '22 22 * * 4' + +jobs: + analyze: + name: Analyze + # Runner size impacts CodeQL analysis time. To learn more, please see: + # - https://gh.io/recommended-hardware-resources-for-running-codeql + # - https://gh.io/supported-runners-and-hardware-resources + # - https://gh.io/using-larger-runners + # Consider using larger runners for possible analysis time improvements. + runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }} + timeout-minutes: ${{ (matrix.language == 'swift' && 120) || 360 }} + permissions: + # required for all workflows + security-events: write + + # only required for workflows in private repositories + actions: read + contents: read + + strategy: + fail-fast: false + matrix: + language: [ 'go' ] + # CodeQL supports [ 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift' ] + # Use only 'java-kotlin' to analyze code written in Java, Kotlin or both + # Use only 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both + # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + + # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs + # queries: security-extended,security-and-quality + + + # Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v3 + + # ℹ️ Command-line programs to run using the OS shell. + # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun + + # If the Autobuild fails above, remove it and uncomment the following three lines. + # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. + + # - run: | + # echo "Run, Build Application using script" + # ./location_of_script_within_repo/buildscript.sh + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 + with: + category: "/language:${{matrix.language}}" From c947309da95d1c8b43becde6d699297b9b82a45d Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 28 Feb 2024 14:11:39 +0100 Subject: [PATCH 132/172] stores: fix deadlock in RefreshHealth --- stores/metadata.go | 57 ++++++++++++++++++---------------------------- 1 file changed, 22 insertions(+), 35 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index c281c9800..7db497be6 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1961,8 +1961,8 @@ func (s *SQLStore) RefreshHealth(ctx context.Context) error { // Update slab health in batches. now := time.Now() - for { - healthQuery := s.db.Raw(` + // build health query + healthQuery := s.db.Raw(` SELECT slabs.id, slabs.db_contract_set_id, CASE WHEN (slabs.min_shards = slabs.total_shards) THEN CASE WHEN (COUNT(DISTINCT(CASE WHEN cs.name IS NULL THEN NULL ELSE c.host_id END)) < slabs.min_shards) @@ -1981,50 +1981,37 @@ WHERE slabs.health_valid_until <= ? GROUP BY slabs.id LIMIT ? `, now.Unix(), refreshHealthBatchSize) + + for { var rowsAffected int64 err := s.retryTransaction(func(tx *gorm.DB) error { - // create temp table from the health query since we will reuse it - if err := tx.Exec("DROP TABLE IF EXISTS src").Error; err != nil { - return err - } else if err = tx.Exec("CREATE TEMPORARY TABLE src AS ?", healthQuery).Error; err != nil { - return err - } else if err = tx.Exec("CREATE INDEX src_id ON src (id)").Error; err != nil { - return err - } - var res *gorm.DB if isSQLite(s.db) { - res = tx.Exec("UPDATE slabs SET health = src.health, health_valid_until = (?) FROM src WHERE slabs.id=src.id", sqlRandomTimestamp(s.db, now, refreshHealthMinHealthValidity, refreshHealthMaxHealthValidity)) + res = tx.Exec("UPDATE slabs SET health = inner.health, health_valid_until = (?) FROM (?) AS inner WHERE slabs.id=inner.id", sqlRandomTimestamp(s.db, now, refreshHealthMinHealthValidity, refreshHealthMaxHealthValidity), healthQuery) } else { - res = tx.Exec("UPDATE slabs sla INNER JOIN src h ON sla.id = h.id SET sla.health = h.health, health_valid_until = (?)", sqlRandomTimestamp(s.db, now, refreshHealthMinHealthValidity, refreshHealthMaxHealthValidity)) + res = tx.Exec("UPDATE slabs sla INNER JOIN (?) h ON sla.id = h.id SET sla.health = h.health, health_valid_until = (?)", healthQuery, sqlRandomTimestamp(s.db, now, refreshHealthMinHealthValidity, refreshHealthMaxHealthValidity)) } if res.Error != nil { return res.Error } rowsAffected = res.RowsAffected - // Update the health of the objects associated with the updated slabs. - if isSQLite(s.db) { - return tx.Exec(`UPDATE objects SET health = i.health FROM ( - SELECT slices.db_object_id, MIN(s.health) AS health - FROM slices - INNER JOIN src s ON s.id = slices.db_slab_id - INNER JOIN objects o ON o.id = slices.db_object_id - GROUP BY slices.db_object_id - ) i - WHERE i.db_object_id = objects.id AND objects.health != i.health`).Error - } else { - return tx.Exec(`UPDATE objects - INNER JOIN ( - SELECT slices.db_object_id, MIN(s.health) as health - FROM slices - INNER JOIN src s ON s.id = slices.db_slab_id - GROUP BY slices.db_object_id - ) i ON objects.id = i.db_object_id - SET objects.health = i.health - WHERE objects.health != i.health - `).Error - } + // Update the health of objects with outdated health. + return tx.Exec(` + UPDATE objects + SET health = ( + SELECT MIN(slabs.health) + FROM slabs + INNER JOIN slices ON slices.db_slab_id = slabs.id + INNER JOIN objects ON slices.db_object_id = objects.id + ) + WHERE EXISTS ( + SELECT 1 FROM slabs + INNER JOIN slices ON slices.db_slab_id = slabs.id + INNER JOIN objects ON slices.db_object_id = objects.id + WHERE slabs.health < objects.health + ) + `).Error }) if err != nil { return err From 8aa3fe3f3a5f7b0c6e247ef554c4685e5ea4cd43 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 28 Feb 2024 14:41:21 +0100 Subject: [PATCH 133/172] stores: separate query for MySQL --- stores/metadata.go | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/stores/metadata.go b/stores/metadata.go index 7db497be6..618ab41d9 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1997,7 +1997,9 @@ LIMIT ? rowsAffected = res.RowsAffected // Update the health of objects with outdated health. - return tx.Exec(` + var err error + if isSQLite(tx) { + err = tx.Exec(` UPDATE objects SET health = ( SELECT MIN(slabs.health) @@ -2012,6 +2014,25 @@ LIMIT ? WHERE slabs.health < objects.health ) `).Error + } else { + err = tx.Exec(` + UPDATE objects + JOIN ( + SELECT slices.db_object_id, MIN(slabs.health) AS min_health + FROM slabs + INNER JOIN slices ON slices.db_slab_id = slabs.id + GROUP BY slices.db_object_id + ) AS min_healths ON objects.id = min_healths.db_object_id + SET objects.health = min_healths.min_health + WHERE objects.health > ( + SELECT MIN(slabs.health) + FROM slabs + INNER JOIN slices ON slices.db_slab_id = slabs.id + WHERE slices.db_object_id = objects.id + ); + `).Error + } + return err }) if err != nil { return err From d320125bd33b8caf6e5c3cd2d6746032e01a78e5 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 28 Feb 2024 18:15:46 +0100 Subject: [PATCH 134/172] stores: change UpdateSlab to update slab first --- stores/metadata.go | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 618ab41d9..901c0f73b 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1845,13 +1845,19 @@ func (ss *SQLStore) UpdateSlab(ctx context.Context, s object.Slab, contractSet s // Update slab. return ss.retryTransaction(func(tx *gorm.DB) (err error) { - // fetch contract set - var cs dbContractSet - if err := tx.Take(&cs, "name = ?", contractSet).Error; err != nil { + // update slab + if err := tx.Model(&dbSlab{}). + Where("key", key). + Updates(map[string]interface{}{ + "db_contract_set_id": gorm.Expr("(SELECT id FROM contract_sets WHERE name = ?)", contractSet), + "health_valid_until": time.Now().Unix(), + "health": 1, + }). + Error; err != nil { return err } - // find all contracts of that shard + // find all used contracts contracts, err := fetchUsedContracts(tx, usedContracts) if err != nil { return err @@ -1885,18 +1891,6 @@ func (ss *SQLStore) UpdateSlab(ctx context.Context, s object.Slab, contractSet s } } - // update fields - if err := tx.Model(&slab). - Where(&slab). - Updates(map[string]interface{}{ - "db_contract_set_id": cs.ID, - "health_valid_until": time.Now().Unix(), - "health": 1, - }). - Error; err != nil { - return err - } - // prepare sectors to update sectors := make([]dbSector, len(s.Shards)) for i := range s.Shards { From 96bb6ef800fa4b56ab62d54b5d93488dfcd7614e Mon Sep 17 00:00:00 2001 From: PJ Date: Thu, 29 Feb 2024 10:44:09 +0100 Subject: [PATCH 135/172] autopilot: fix error check --- autopilot/autopilot.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index da7105d06..a7cb225ba 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -194,8 +194,10 @@ func (ap *Autopilot) Run() error { } // schedule a trigger when the wallet receives its first deposit - if err := ap.tryScheduleTriggerWhenFunded(); err != nil && !isErr(err, context.Canceled) { - ap.logger.Error(err) + if err := ap.tryScheduleTriggerWhenFunded(); err != nil { + if !errors.Is(err, context.Canceled) { + ap.logger.Error(err) + } return nil } From 445e33b9a8573405f9286c35f7547e46c16638df Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 23 Feb 2024 11:45:22 +0100 Subject: [PATCH 136/172] autopilot: add endpoint for evaluating autopilot configuration --- api/autopilot.go | 25 ++++++++++++++-- autopilot/autopilot.go | 66 ++++++++++++++++++++++++++++++++++++++++++ autopilot/client.go | 11 +++++++ 3 files changed, 100 insertions(+), 2 deletions(-) diff --git a/api/autopilot.go b/api/autopilot.go index 6283f64f3..7033bbfd4 100644 --- a/api/autopilot.go +++ b/api/autopilot.go @@ -91,9 +91,30 @@ type ( StartTime TimeRFC3339 `json:"startTime"` BuildState } -) -type ( + ConfigEvaluationRequest struct { + AutopilotConfig AutopilotConfig `json:"autopilotConfig"` + GougingSettings GougingSettings `json:"gougingSettings"` + RedundancySettings RedundancySettings `json:"redundancySettings"` + } + + // ConfigEvaluationResponse is the response type for /evaluate + ConfigEvaluationResponse struct { + Usable uint64 `json:"usable"` + Total uint64 `json:"total"` + Blocked uint64 `json:"blocked"` + Gouging struct { + Contract uint64 `json:"contract"` + Download uint64 `json:"download"` + Gouging uint64 `json:"gouging"` + Pruning uint64 `json:"pruning"` + Upload uint64 `json:"upload"` + } + NotAcceptingContracts uint64 `json:"notAcceptingContracts"` + NotScanned uint64 `json:"notScanned"` + Other uint64 `json:"other"` + } + // HostHandlerResponse is the response type for the /host/:hostkey endpoint. HostHandlerResponse struct { Host hostdb.Host `json:"host"` diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index e5ddd8411..2f087a7cf 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -21,6 +21,7 @@ import ( "go.sia.tech/renterd/object" "go.sia.tech/renterd/wallet" "go.sia.tech/renterd/webhooks" + "go.sia.tech/renterd/worker" "go.uber.org/zap" ) @@ -166,6 +167,7 @@ func (ap *Autopilot) Handler() http.Handler { return jape.Mux(map[string]jape.Handler{ "GET /config": ap.configHandlerGET, "PUT /config": ap.configHandlerPUT, + "POST /config": ap.configHandlerPOST, "POST /hosts": ap.hostsHandlerPOST, "GET /host/:hostKey": ap.hostHandlerGET, "GET /state": ap.stateHandlerGET, @@ -173,6 +175,70 @@ func (ap *Autopilot) Handler() http.Handler { }) } +func (ap *Autopilot) configHandlerPOST(jc jape.Context) { + ctx := jc.Request.Context() + + // decode request + var req api.ConfigEvaluationRequest + if jc.Decode(&req) != nil { + return + } + + // fetch necessary information + apCfg := req.AutopilotConfig + gs := req.GougingSettings + rs := req.RedundancySettings + cs, err := ap.bus.ConsensusState(ctx) + if jc.Check("failed to get consensus state", err) != nil { + return + } + gc := worker.NewGougingChecker(gs, cs, ap.State().fee, ap.State().period, apCfg.Contracts.RenewWindow) + + // fetch hosts + hosts, err := ap.bus.Hosts(ctx, api.GetHostsOptions{}) + if jc.Check("failed to get hosts", err) != nil { + return + } + + var resp api.ConfigEvaluationResponse + for _, host := range hosts { + usable, usableBreakdown := isUsableHost(apCfg, rs, gc, host, 0, 0) + if usable { + resp.Usable++ + continue + } + resp.Total++ + if usableBreakdown.blocked > 0 { + resp.Blocked++ + } + if usableBreakdown.notacceptingcontracts > 0 { + resp.NotAcceptingContracts++ + } + if usableBreakdown.notcompletingscan > 0 { + resp.NotScanned++ + } + if usableBreakdown.unknown > 0 { + resp.Other++ + } + if usableBreakdown.gougingBreakdown.ContractErr != "" { + resp.Gouging.Contract++ + } + if usableBreakdown.gougingBreakdown.DownloadErr != "" { + resp.Gouging.Download++ + } + if usableBreakdown.gougingBreakdown.GougingErr != "" { + resp.Gouging.Gouging++ + } + if usableBreakdown.gougingBreakdown.PruneErr != "" { + resp.Gouging.Pruning++ + } + if usableBreakdown.gougingBreakdown.UploadErr != "" { + resp.Gouging.Upload++ + } + } + jc.Encode(resp) +} + func (ap *Autopilot) Run() error { ap.startStopMu.Lock() if ap.isRunning() { diff --git a/autopilot/client.go b/autopilot/client.go index 35e3981aa..5c4dee064 100644 --- a/autopilot/client.go +++ b/autopilot/client.go @@ -64,3 +64,14 @@ func (c *Client) Trigger(forceScan bool) (_ bool, err error) { err = c.c.POST("/trigger", api.AutopilotTriggerRequest{ForceScan: forceScan}, &resp) return resp.Triggered, err } + +// EvalutateConfig evaluates a autopilot config using the given gouging and +// redundancy settings. +func (c *Client) EvaluateConfig(ctx context.Context, cfg api.AutopilotConfig, gs api.GougingSettings, rs api.RedundancySettings) (resp api.ConfigEvaluationResponse, err error) { + err = c.c.WithContext(ctx).POST("/config", api.ConfigEvaluationRequest{ + AutopilotConfig: cfg, + GougingSettings: gs, + RedundancySettings: rs, + }, &resp) + return +} From 4a0060452bb3d86ad9b393d4d57183425bd3838a Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 29 Feb 2024 14:37:20 +0100 Subject: [PATCH 137/172] autopilot: optmiseGougingSetting --- api/autopilot.go | 7 +- autopilot/autopilot.go | 134 ++++++++++++++++++++++++++++-------- autopilot/autopilot_test.go | 110 +++++++++++++++++++++++++++++ 3 files changed, 220 insertions(+), 31 deletions(-) create mode 100644 autopilot/autopilot_test.go diff --git a/api/autopilot.go b/api/autopilot.go index 7033bbfd4..ff8d05e2e 100644 --- a/api/autopilot.go +++ b/api/autopilot.go @@ -110,9 +110,10 @@ type ( Pruning uint64 `json:"pruning"` Upload uint64 `json:"upload"` } - NotAcceptingContracts uint64 `json:"notAcceptingContracts"` - NotScanned uint64 `json:"notScanned"` - Other uint64 `json:"other"` + NotAcceptingContracts uint64 `json:"notAcceptingContracts"` + NotScanned uint64 `json:"notScanned"` + Other uint64 `json:"other"` + Recommendation *AutopilotConfig `json:"recommendation,omitempty"` } // HostHandlerResponse is the response type for the /host/:hostkey endpoint. diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 2f087a7cf..52e647687 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -175,34 +175,10 @@ func (ap *Autopilot) Handler() http.Handler { }) } -func (ap *Autopilot) configHandlerPOST(jc jape.Context) { - ctx := jc.Request.Context() - - // decode request - var req api.ConfigEvaluationRequest - if jc.Decode(&req) != nil { - return - } - - // fetch necessary information - apCfg := req.AutopilotConfig - gs := req.GougingSettings - rs := req.RedundancySettings - cs, err := ap.bus.ConsensusState(ctx) - if jc.Check("failed to get consensus state", err) != nil { - return - } - gc := worker.NewGougingChecker(gs, cs, ap.State().fee, ap.State().period, apCfg.Contracts.RenewWindow) - - // fetch hosts - hosts, err := ap.bus.Hosts(ctx, api.GetHostsOptions{}) - if jc.Check("failed to get hosts", err) != nil { - return - } - - var resp api.ConfigEvaluationResponse +func evaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.Host) (resp api.ConfigEvaluationResponse) { + gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) for _, host := range hosts { - usable, usableBreakdown := isUsableHost(apCfg, rs, gc, host, 0, 0) + usable, usableBreakdown := isUsableHost(cfg, rs, gc, host, 0, 0) if usable { resp.Usable++ continue @@ -236,7 +212,109 @@ func (ap *Autopilot) configHandlerPOST(jc jape.Context) { resp.Gouging.Upload++ } } - jc.Encode(resp) + + if resp.Usable >= cfg.Contracts.Amount { + return // no recommendation needed + } + + // optimise gouging settings + //recGS := gs + + //gsForOptimisation := func() api.GougingSettings { + // return api.GougingSettings{ + // MinMaxCollateral: types.ZeroCurrency, + // MaxRPCPrice: types.MaxCurrency, + // } + //} + + // optimise upload price + // if optimiseGougingSetting(&gs, cfg, &gs.MaxUploadPrice, cs, fee, currentPeriod, rs, hosts){ + // + // } + // MinMaxCollateral types.Currency + // MaxRPCPrice types.Currency + // MaxContractPrice types.Currency + // MaxDownloadPrice types.Currency + // MaxUploadPrice types.Currency + // MaxStoragePrice types.Currency + + return +} + +func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.Host) (usables uint64) { + gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) + for _, host := range hosts { + usable, _ := isUsableHost(cfg, rs, gc, host, 0, 0) + if usable { + usables++ + } + } + return +} + +func optimiseGougingSetting(gs *api.GougingSettings, cfg api.AutopilotConfig, field *types.Currency, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, hosts []hostdb.Host) bool { + if cfg.Contracts.Amount == 0 { + return true // nothing to do + } + stepSize := []uint64{200, 150, 125, 110, 105} + maxSteps := 12 + + stepIdx := 0 + nSteps := 1 + prevVal := *field // to keep accurate value + for { + nUsable := countUsableHosts(cfg, cs, fee, currentPeriod, rs, *gs, hosts) + targetHit := nUsable >= cfg.Contracts.Amount + + if targetHit && stepIdx == len(stepSize)-1 { + return true + } else if targetHit { + // move one step back and decrease step size + stepIdx++ + nSteps-- + *field = prevVal + } else if nSteps >= maxSteps { + return false + } + + prevVal = *field + newValue, overflow := prevVal.Mul64WithOverflow(stepSize[stepIdx]) + if overflow { + return false + } + newValue = newValue.Div64(100) + *field = newValue + nSteps++ + } +} + +func (ap *Autopilot) configHandlerPOST(jc jape.Context) { + ctx := jc.Request.Context() + + // decode request + var req api.ConfigEvaluationRequest + if jc.Decode(&req) != nil { + return + } + + // fetch necessary information + cfg := req.AutopilotConfig + gs := req.GougingSettings + rs := req.RedundancySettings + cs, err := ap.bus.ConsensusState(ctx) + if jc.Check("failed to get consensus state", err) != nil { + return + } + state := ap.State() + + // fetch hosts + hosts, err := ap.bus.Hosts(ctx, api.GetHostsOptions{}) + if jc.Check("failed to get hosts", err) != nil { + return + } + + // evaluate the config + jc.Encode(evaluateConfig(cfg, cs, state.fee, state.period, rs, gs, hosts)) } func (ap *Autopilot) Run() error { diff --git a/autopilot/autopilot_test.go b/autopilot/autopilot_test.go new file mode 100644 index 000000000..387c85d62 --- /dev/null +++ b/autopilot/autopilot_test.go @@ -0,0 +1,110 @@ +package autopilot + +import ( + "math" + "testing" + "time" + + rhpv2 "go.sia.tech/core/rhp/v2" + rhpv3 "go.sia.tech/core/rhp/v3" + "go.sia.tech/core/types" + "go.sia.tech/renterd/api" + "go.sia.tech/renterd/hostdb" +) + +func TestOptimiseGougingSetting(t *testing.T) { + // create 10 hosts that should all be usable + var hosts []hostdb.Host + for i := 0; i < 10; i++ { + hosts = append(hosts, hostdb.Host{ + PriceTable: hostdb.HostPriceTable{ + HostPriceTable: rhpv3.HostPriceTable{ + MaxCollateral: types.Siacoins(1000), + }, + }, + Settings: rhpv2.HostSettings{ + AcceptingContracts: true, + MaxCollateral: types.Siacoins(1000), + }, + Interactions: hostdb.Interactions{ + LastScan: time.Now(), + LastScanSuccess: true, + SecondToLastScanSuccess: true, + TotalScans: 100, + }, + LastAnnouncement: time.Now(), + Scanned: true, + }) + } + + // prepare settings that result in all hosts being usable + cfg := api.AutopilotConfig{ + Contracts: api.ContractsConfig{ + Allowance: types.Siacoins(100000), + Amount: 10, + }, + Hosts: api.HostsConfig{}, + } + cs := api.ConsensusState{ + BlockHeight: 100, + LastBlockTime: api.TimeNow(), + Synced: true, + } + fee := types.ZeroCurrency + rs := api.RedundancySettings{MinShards: 10, TotalShards: 30} + gs := api.GougingSettings{ + MaxRPCPrice: types.Siacoins(1), + MaxContractPrice: types.Siacoins(1), + MaxDownloadPrice: types.Siacoins(1), + MaxUploadPrice: types.Siacoins(1), + MaxStoragePrice: types.Siacoins(1), + HostBlockHeightLeeway: math.MaxInt32, + } + + // confirm all hosts are usable + assertUsable := func(n int) { + t.Helper() + nUsable := countUsableHosts(cfg, cs, fee, 0, rs, gs, hosts) + if nUsable != uint64(n) { + t.Fatalf("expected %v usable hosts, got %v", len(hosts), nUsable) + } + } + assertUsable(len(hosts)) + + // Case1: test optimising a field which gets us back to a full set of hosts + for i := range hosts { + hosts[i].Settings.StoragePrice = types.Siacoins(uint32(i + 1)) + } + assertUsable(1) + if !optimiseGougingSetting(&gs, cfg, &gs.MaxStoragePrice, cs, fee, 0, rs, hosts) { + t.Fatal("optimising failed") + } + assertUsable(len(hosts)) + if gs.MaxStoragePrice.ExactString() != "10164000000000000000000000" { // 10.164 SC + t.Fatal("unexpected storage price", gs.MaxStoragePrice.ExactString()) + } + + // Case2: test optimising a field where we can't get back to a full set of + // hosts + hosts[0].Settings.StoragePrice = types.Siacoins(10000) + assertUsable(9) + if optimiseGougingSetting(&gs, cfg, &gs.MaxStoragePrice, cs, fee, 0, rs, hosts) { + t.Fatal("optimising succeeded") + } + if gs.MaxStoragePrice.ExactString() != "9757440000000000000000000000" { // ~9.757 KS + t.Fatal("unexpected storage price", gs.MaxStoragePrice.ExactString()) + } + + // Case3: force overflow + for i := range hosts { + hosts[i].Settings.StoragePrice = types.MaxCurrency + } + gs.MaxStoragePrice = types.MaxCurrency.Sub(types.Siacoins(1)) + assertUsable(0) + if optimiseGougingSetting(&gs, cfg, &gs.MaxStoragePrice, cs, fee, 0, rs, hosts) { + t.Fatal("optimising succeeded") + } + if gs.MaxStoragePrice.ExactString() != "340282366920937463463374607431768211455" { // ~340.3 TS + t.Fatal("unexpected storage price", gs.MaxStoragePrice.ExactString()) + } +} From ec4329d350e9227456a70c81f70830b4bae954fe Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 29 Feb 2024 15:21:17 +0100 Subject: [PATCH 138/172] autopilot: return recommendation for config --- api/autopilot.go | 12 +- autopilot/autopilot.go | 276 +++++++++++++++++++++--------------- autopilot/autopilot_test.go | 10 +- 3 files changed, 176 insertions(+), 122 deletions(-) diff --git a/api/autopilot.go b/api/autopilot.go index ff8d05e2e..846c490ed 100644 --- a/api/autopilot.go +++ b/api/autopilot.go @@ -98,6 +98,10 @@ type ( RedundancySettings RedundancySettings `json:"redundancySettings"` } + ConfigRecommendation struct { + GougingSettings GougingSettings `json:"gougingSettings,omitempty"` + } + // ConfigEvaluationResponse is the response type for /evaluate ConfigEvaluationResponse struct { Usable uint64 `json:"usable"` @@ -110,10 +114,10 @@ type ( Pruning uint64 `json:"pruning"` Upload uint64 `json:"upload"` } - NotAcceptingContracts uint64 `json:"notAcceptingContracts"` - NotScanned uint64 `json:"notScanned"` - Other uint64 `json:"other"` - Recommendation *AutopilotConfig `json:"recommendation,omitempty"` + NotAcceptingContracts uint64 `json:"notAcceptingContracts"` + NotScanned uint64 `json:"notScanned"` + Other uint64 `json:"other"` + Recommendation *ConfigRecommendation `json:"recommendation,omitempty"` } // HostHandlerResponse is the response type for the /host/:hostkey endpoint. diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 52e647687..7cf56deb7 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -175,119 +175,6 @@ func (ap *Autopilot) Handler() http.Handler { }) } -func evaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.Host) (resp api.ConfigEvaluationResponse) { - gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) - for _, host := range hosts { - usable, usableBreakdown := isUsableHost(cfg, rs, gc, host, 0, 0) - if usable { - resp.Usable++ - continue - } - resp.Total++ - if usableBreakdown.blocked > 0 { - resp.Blocked++ - } - if usableBreakdown.notacceptingcontracts > 0 { - resp.NotAcceptingContracts++ - } - if usableBreakdown.notcompletingscan > 0 { - resp.NotScanned++ - } - if usableBreakdown.unknown > 0 { - resp.Other++ - } - if usableBreakdown.gougingBreakdown.ContractErr != "" { - resp.Gouging.Contract++ - } - if usableBreakdown.gougingBreakdown.DownloadErr != "" { - resp.Gouging.Download++ - } - if usableBreakdown.gougingBreakdown.GougingErr != "" { - resp.Gouging.Gouging++ - } - if usableBreakdown.gougingBreakdown.PruneErr != "" { - resp.Gouging.Pruning++ - } - if usableBreakdown.gougingBreakdown.UploadErr != "" { - resp.Gouging.Upload++ - } - } - - if resp.Usable >= cfg.Contracts.Amount { - return // no recommendation needed - } - - // optimise gouging settings - //recGS := gs - - //gsForOptimisation := func() api.GougingSettings { - // return api.GougingSettings{ - // MinMaxCollateral: types.ZeroCurrency, - // MaxRPCPrice: types.MaxCurrency, - // } - //} - - // optimise upload price - // if optimiseGougingSetting(&gs, cfg, &gs.MaxUploadPrice, cs, fee, currentPeriod, rs, hosts){ - // - // } - // MinMaxCollateral types.Currency - // MaxRPCPrice types.Currency - // MaxContractPrice types.Currency - // MaxDownloadPrice types.Currency - // MaxUploadPrice types.Currency - // MaxStoragePrice types.Currency - - return -} - -func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.Host) (usables uint64) { - gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) - for _, host := range hosts { - usable, _ := isUsableHost(cfg, rs, gc, host, 0, 0) - if usable { - usables++ - } - } - return -} - -func optimiseGougingSetting(gs *api.GougingSettings, cfg api.AutopilotConfig, field *types.Currency, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, hosts []hostdb.Host) bool { - if cfg.Contracts.Amount == 0 { - return true // nothing to do - } - stepSize := []uint64{200, 150, 125, 110, 105} - maxSteps := 12 - - stepIdx := 0 - nSteps := 1 - prevVal := *field // to keep accurate value - for { - nUsable := countUsableHosts(cfg, cs, fee, currentPeriod, rs, *gs, hosts) - targetHit := nUsable >= cfg.Contracts.Amount - - if targetHit && stepIdx == len(stepSize)-1 { - return true - } else if targetHit { - // move one step back and decrease step size - stepIdx++ - nSteps-- - *field = prevVal - } else if nSteps >= maxSteps { - return false - } - - prevVal = *field - newValue, overflow := prevVal.Mul64WithOverflow(stepSize[stepIdx]) - if overflow { - return false - } - newValue = newValue.Div64(100) - *field = newValue - nSteps++ - } -} - func (ap *Autopilot) configHandlerPOST(jc jape.Context) { ctx := jc.Request.Context() @@ -842,3 +729,166 @@ func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { } jc.Encode(hosts) } + +func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.Host) (usables uint64) { + gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) + for _, host := range hosts { + usable, _ := isUsableHost(cfg, rs, gc, host, 0, 0) + if usable { + usables++ + } + } + return +} + +// evaluateConfig evaluates the given configuration and if the gouging settings +// are too strict for the number of contracts required by 'cfg', it will provide +// a recommendation on how to loosen it. +func evaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.Host) (resp api.ConfigEvaluationResponse) { + gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) + for _, host := range hosts { + usable, usableBreakdown := isUsableHost(cfg, rs, gc, host, 0, 0) + if usable { + resp.Usable++ + continue + } + resp.Total++ + if usableBreakdown.blocked > 0 { + resp.Blocked++ + } + if usableBreakdown.notacceptingcontracts > 0 { + resp.NotAcceptingContracts++ + } + if usableBreakdown.notcompletingscan > 0 { + resp.NotScanned++ + } + if usableBreakdown.unknown > 0 { + resp.Other++ + } + if usableBreakdown.gougingBreakdown.ContractErr != "" { + resp.Gouging.Contract++ + } + if usableBreakdown.gougingBreakdown.DownloadErr != "" { + resp.Gouging.Download++ + } + if usableBreakdown.gougingBreakdown.GougingErr != "" { + resp.Gouging.Gouging++ + } + if usableBreakdown.gougingBreakdown.PruneErr != "" { + resp.Gouging.Pruning++ + } + if usableBreakdown.gougingBreakdown.UploadErr != "" { + resp.Gouging.Upload++ + } + } + + if resp.Usable >= cfg.Contracts.Amount { + return // no recommendation needed + } + + // optimise gouging settings + maxGS := func() api.GougingSettings { + return api.GougingSettings{ + // these are the fields we optimise one-by-one + MaxRPCPrice: types.MaxCurrency, + MaxContractPrice: types.MaxCurrency, + MaxDownloadPrice: types.MaxCurrency, + MaxUploadPrice: types.MaxCurrency, + MaxStoragePrice: types.MaxCurrency, + + // these are not optimised, so we keep the same values as the user + // provided + MinMaxCollateral: gs.MinMaxCollateral, + HostBlockHeightLeeway: gs.HostBlockHeightLeeway, + MinPriceTableValidity: gs.MinPriceTableValidity, + MinAccountExpiry: gs.MinAccountExpiry, + MinMaxEphemeralAccountBalance: gs.MinMaxEphemeralAccountBalance, + MigrationSurchargeMultiplier: gs.MigrationSurchargeMultiplier, + } + } + + // use the input gouging settings as the starting point and try to optimise + // each field independent of the other fields we want to optimise + optimisedGS := gs + success := false + + // MaxRPCPrice + tmpGS := maxGS() + if optimiseGougingSetting(&tmpGS, &tmpGS.MaxRPCPrice, cfg, cs, fee, currentPeriod, rs, hosts) { + optimisedGS.MaxRPCPrice = tmpGS.MaxRPCPrice + success = true + } + // MaxContractPrice + tmpGS = maxGS() + if optimiseGougingSetting(&tmpGS, &tmpGS.MaxContractPrice, cfg, cs, fee, currentPeriod, rs, hosts) { + optimisedGS.MaxContractPrice = tmpGS.MaxContractPrice + success = true + } + // MaxDownloadPrice + tmpGS = maxGS() + if optimiseGougingSetting(&tmpGS, &tmpGS.MaxDownloadPrice, cfg, cs, fee, currentPeriod, rs, hosts) { + optimisedGS.MaxDownloadPrice = tmpGS.MaxDownloadPrice + success = true + } + // MaxUploadPrice + tmpGS = maxGS() + if optimiseGougingSetting(&tmpGS, &tmpGS.MaxUploadPrice, cfg, cs, fee, currentPeriod, rs, hosts) { + optimisedGS.MaxUploadPrice = tmpGS.MaxUploadPrice + success = true + } + // MaxStoragePrice + tmpGS = maxGS() + if optimiseGougingSetting(&tmpGS, &tmpGS.MaxStoragePrice, cfg, cs, fee, currentPeriod, rs, hosts) { + optimisedGS.MaxStoragePrice = tmpGS.MaxStoragePrice + success = true + } + // If one of the optimisations was successful, we return the optimised + // gouging settings + if success { + resp.Recommendation = &api.ConfigRecommendation{ + GougingSettings: optimisedGS, + } + } + return +} + +// optimiseGougingSetting tries to optimise one field of the gouging settings to +// try and hit the target number of contracts. +func optimiseGougingSetting(gs *api.GougingSettings, field *types.Currency, cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, hosts []hostdb.Host) bool { + if cfg.Contracts.Amount == 0 { + return true // nothing to do + } + stepSize := []uint64{200, 150, 125, 110, 105} + maxSteps := 12 + + stepIdx := 0 + nSteps := 0 + prevVal := *field // to keep accurate value + for { + nUsable := countUsableHosts(cfg, cs, fee, currentPeriod, rs, *gs, hosts) + targetHit := nUsable >= cfg.Contracts.Amount + + if targetHit && nSteps == 0 { + return true // target already hit without optimising + } else if targetHit && stepIdx == len(stepSize)-1 { + return true // target hit after optimising + } else if targetHit { + // move one step back and decrease step size + stepIdx++ + nSteps-- + *field = prevVal + } else if nSteps >= maxSteps { + return false // ran out of steps + } + + // apply next step + prevVal = *field + newValue, overflow := prevVal.Mul64WithOverflow(stepSize[stepIdx]) + if overflow { + return false + } + newValue = newValue.Div64(100) + *field = newValue + nSteps++ + } +} diff --git a/autopilot/autopilot_test.go b/autopilot/autopilot_test.go index 387c85d62..211a49a33 100644 --- a/autopilot/autopilot_test.go +++ b/autopilot/autopilot_test.go @@ -76,7 +76,7 @@ func TestOptimiseGougingSetting(t *testing.T) { hosts[i].Settings.StoragePrice = types.Siacoins(uint32(i + 1)) } assertUsable(1) - if !optimiseGougingSetting(&gs, cfg, &gs.MaxStoragePrice, cs, fee, 0, rs, hosts) { + if !optimiseGougingSetting(&gs, &gs.MaxStoragePrice, cfg, cs, fee, 0, rs, hosts) { t.Fatal("optimising failed") } assertUsable(len(hosts)) @@ -86,12 +86,12 @@ func TestOptimiseGougingSetting(t *testing.T) { // Case2: test optimising a field where we can't get back to a full set of // hosts - hosts[0].Settings.StoragePrice = types.Siacoins(10000) + hosts[0].Settings.StoragePrice = types.Siacoins(100000) assertUsable(9) - if optimiseGougingSetting(&gs, cfg, &gs.MaxStoragePrice, cs, fee, 0, rs, hosts) { + if optimiseGougingSetting(&gs, &gs.MaxStoragePrice, cfg, cs, fee, 0, rs, hosts) { t.Fatal("optimising succeeded") } - if gs.MaxStoragePrice.ExactString() != "9757440000000000000000000000" { // ~9.757 KS + if gs.MaxStoragePrice.ExactString() != "41631744000000000000000000000" { // ~41.63 KS t.Fatal("unexpected storage price", gs.MaxStoragePrice.ExactString()) } @@ -101,7 +101,7 @@ func TestOptimiseGougingSetting(t *testing.T) { } gs.MaxStoragePrice = types.MaxCurrency.Sub(types.Siacoins(1)) assertUsable(0) - if optimiseGougingSetting(&gs, cfg, &gs.MaxStoragePrice, cs, fee, 0, rs, hosts) { + if optimiseGougingSetting(&gs, &gs.MaxStoragePrice, cfg, cs, fee, 0, rs, hosts) { t.Fatal("optimising succeeded") } if gs.MaxStoragePrice.ExactString() != "340282366920937463463374607431768211455" { // ~340.3 TS From 59f11ad8be5a50599c4e3268b6077b52b1b1bbb6 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 29 Feb 2024 15:28:47 +0100 Subject: [PATCH 139/172] e2e: extend TestGouging --- internal/test/e2e/gouging_test.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/internal/test/e2e/gouging_test.go b/internal/test/e2e/gouging_test.go index 60e2f9b5e..28bf51069 100644 --- a/internal/test/e2e/gouging_test.go +++ b/internal/test/e2e/gouging_test.go @@ -91,4 +91,19 @@ func TestGouging(t *testing.T) { if err := w.DownloadObject(context.Background(), &buffer, api.DefaultBucketName, path, api.DownloadObjectOptions{}); err == nil { t.Fatal("expected download to fail", err) } + + // try optimising gouging settings + resp, err := cluster.Autopilot.EvaluateConfig(context.Background(), test.AutopilotConfig, test.GougingSettings, test.RedundancySettings) + tt.OK(err) + if resp.Recommendation == nil { + t.Fatal("expected recommendation") + } + + // set optimised settings + tt.OK(b.UpdateSetting(context.Background(), api.SettingGouging, resp.Recommendation.GougingSettings)) + + // renter should recover and be able to upload again + + // upload some data - should fail + tt.FailAll(w.UploadObject(context.Background(), bytes.NewReader(data), api.DefaultBucketName, path, api.UploadObjectOptions{})) } From 051f3864ec3ec9980bd472e1f9c4ae30f6893b5a Mon Sep 17 00:00:00 2001 From: alexfreska Date: Sat, 2 Mar 2024 21:48:04 +0000 Subject: [PATCH 140/172] ui: v0.49.0 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index a000468a7..ec74374ef 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640 go.sia.tech/mux v1.2.0 go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca - go.sia.tech/web/renterd v0.46.0 + go.sia.tech/web/renterd v0.49.0 go.uber.org/zap v1.27.0 golang.org/x/crypto v0.19.0 golang.org/x/term v0.17.0 diff --git a/go.sum b/go.sum index cbcb15f69..9e8f9d428 100644 --- a/go.sum +++ b/go.sum @@ -255,8 +255,8 @@ go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca h1:aZMg2AKevn7jKx+wlusWQf go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca/go.mod h1:h/1afFwpxzff6/gG5i1XdAgPK7dEY6FaibhK7N5F86Y= go.sia.tech/web v0.0.0-20231213145933-3f175a86abff h1:/nE7nhewDRxzEdtSKT4SkiUwtjPSiy7Xz7CHEW3MaGQ= go.sia.tech/web v0.0.0-20231213145933-3f175a86abff/go.mod h1:RKODSdOmR3VtObPAcGwQqm4qnqntDVFylbvOBbWYYBU= -go.sia.tech/web/renterd v0.46.0 h1:BMVg4i7LxSlc8wZ4T0EG1k3EK4JxVIzCfD3/cjmwH0k= -go.sia.tech/web/renterd v0.46.0/go.mod h1:FgXrdmAnu591a3h96RB/15pMZ74xO9457g902uE06BM= +go.sia.tech/web/renterd v0.49.0 h1:z9iDr3gIJ60zqiydDZ2MUbhANm6GwdvRf4k67+Zrj14= +go.sia.tech/web/renterd v0.49.0/go.mod h1:FgXrdmAnu591a3h96RB/15pMZ74xO9457g902uE06BM= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= From 1d120394e566ee55a34c84a1288b0a7f74dfa3fe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Mar 2024 02:01:17 +0000 Subject: [PATCH 141/172] build(deps): bump golang.org/x/crypto from 0.19.0 to 0.20.0 Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.19.0 to 0.20.0. - [Commits](https://github.com/golang/crypto/compare/v0.19.0...v0.20.0) --- updated-dependencies: - dependency-name: golang.org/x/crypto dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index a000468a7..fb7f59ceb 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca go.sia.tech/web/renterd v0.46.0 go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.19.0 + golang.org/x/crypto v0.20.0 golang.org/x/term v0.17.0 gopkg.in/yaml.v3 v3.0.1 gorm.io/driver/mysql v1.5.4 @@ -75,7 +75,7 @@ require ( gitlab.com/NebulousLabs/threadgroup v0.0.0-20200608151952-38921fbef213 // indirect go.sia.tech/web v0.0.0-20231213145933-3f175a86abff // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/net v0.20.0 // indirect + golang.org/x/net v0.21.0 // indirect golang.org/x/sys v0.17.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect diff --git a/go.sum b/go.sum index cbcb15f69..c2012bb77 100644 --- a/go.sum +++ b/go.sum @@ -277,8 +277,8 @@ golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.20.0 h1:jmAMJJZXr5KiCw05dfYK9QnqaqKLYXijU23lsEdcQqg= +golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= @@ -303,8 +303,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= From 69523674c09c95227f02b500acb094a442192dcd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Mar 2024 02:01:26 +0000 Subject: [PATCH 142/172] build(deps): bump github.com/minio/minio-go/v7 from 7.0.67 to 7.0.68 Bumps [github.com/minio/minio-go/v7](https://github.com/minio/minio-go) from 7.0.67 to 7.0.68. - [Release notes](https://github.com/minio/minio-go/releases) - [Commits](https://github.com/minio/minio-go/compare/v7.0.67...v7.0.68) --- updated-dependencies: - dependency-name: github.com/minio/minio-go/v7 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 9 ++++----- go.sum | 21 ++++++++------------- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/go.mod b/go.mod index a000468a7..06a197da1 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/google/go-cmp v0.6.0 github.com/gotd/contrib v0.19.0 github.com/klauspost/reedsolomon v1.12.1 - github.com/minio/minio-go/v7 v7.0.67 + github.com/minio/minio-go/v7 v7.0.68 github.com/montanaflynn/stats v0.7.1 gitlab.com/NebulousLabs/encoding v0.0.0-20200604091946-456c3dc907fe go.sia.tech/core v0.2.1 @@ -38,7 +38,7 @@ require ( github.com/go-sql-driver/mysql v1.7.1 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/uuid v1.5.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.1 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.5 // indirect @@ -48,7 +48,7 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect - github.com/klauspost/compress v1.17.4 // indirect + github.com/klauspost/compress v1.17.6 // indirect github.com/klauspost/cpuid/v2 v2.2.6 // indirect github.com/mattn/go-sqlite3 v1.14.18 // indirect github.com/minio/md5-simd v1.1.2 // indirect @@ -59,7 +59,6 @@ require ( github.com/rs/xid v1.5.0 // indirect github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df // indirect - github.com/sirupsen/logrus v1.9.3 // indirect gitlab.com/NebulousLabs/bolt v1.4.4 // indirect gitlab.com/NebulousLabs/demotemutex v0.0.0-20151003192217-235395f71c40 // indirect gitlab.com/NebulousLabs/entropy-mnemonics v0.0.0-20181018051301-7532f67e3500 // indirect @@ -75,7 +74,7 @@ require ( gitlab.com/NebulousLabs/threadgroup v0.0.0-20200608151952-38921fbef213 // indirect go.sia.tech/web v0.0.0-20231213145933-3f175a86abff // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/net v0.20.0 // indirect + golang.org/x/net v0.21.0 // indirect golang.org/x/sys v0.17.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect diff --git a/go.sum b/go.sum index cbcb15f69..1ae8b562b 100644 --- a/go.sum +++ b/go.sum @@ -64,8 +64,8 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= @@ -106,8 +106,8 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8 github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI= +github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/klauspost/cpuid v1.2.2/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= @@ -135,8 +135,8 @@ github.com/mattn/go-sqlite3 v1.14.18/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.67 h1:BeBvZWAS+kRJm1vGTMJYVjKUNoo0FoEt/wUWdUtfmh8= -github.com/minio/minio-go/v7 v7.0.67/go.mod h1:+UXocnUeZ3wHvVh5s95gcrA4YjMIbccT6ubB+1m054A= +github.com/minio/minio-go/v7 v7.0.68 h1:hTqSIfLlpXaKuNy4baAp4Jjy2sqZEN9hRxD0M4aOfrQ= +github.com/minio/minio-go/v7 v7.0.68/go.mod h1:XAvOPJQ5Xlzk5o3o/ArO2NMbhSGkimC+bpW/ngRKDmQ= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -178,8 +178,6 @@ github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df h1:S77Pf5fIG github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df/go.mod h1:dcuzJZ83w/SqN9k4eQqwKYMgmKWzg/KzJAURBhRL1tc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= @@ -193,7 +191,6 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -303,8 +300,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -327,7 +324,6 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -388,7 +384,6 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/mysql v1.5.4 h1:igQmHfKcbaTVyAIHNhhB888vvxh8EdQ2uSUT0LPcBso= From cce9d22b1aed0b8d823e7ef6bc448f60b7c97f43 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 4 Mar 2024 09:40:13 +0100 Subject: [PATCH 143/172] stores: improve locking for deleting multipart uploads in parallel --- stores/multipart.go | 44 +++++++++++++++++++++++++------------------- 1 file changed, 25 insertions(+), 19 deletions(-) diff --git a/stores/multipart.go b/stores/multipart.go index 3a5bcd54a..2983f86d1 100644 --- a/stores/multipart.go +++ b/stores/multipart.go @@ -274,26 +274,32 @@ func (s *SQLStore) MultipartUploadParts(ctx context.Context, bucket, object stri func (s *SQLStore) AbortMultipartUpload(ctx context.Context, bucket, path string, uploadID string) error { return s.retryTransaction(func(tx *gorm.DB) error { - // Find multipart upload. - var mu dbMultipartUpload - err := tx.Where("upload_id = ?", uploadID). - Preload("Parts"). + // delete multipart upload optimistically + res := tx.Where("upload_id", uploadID). + Where("object_id", path). + Where("DBBucket.name", bucket). Joins("DBBucket"). - Take(&mu). - Error - if err != nil { - return fmt.Errorf("failed to fetch multipart upload: %w", err) - } - if mu.ObjectID != path { - // Check object id. - return fmt.Errorf("object id mismatch: %v != %v: %w", mu.ObjectID, path, api.ErrObjectNotFound) - } else if mu.DBBucket.Name != bucket { - // Check bucket name. - return fmt.Errorf("bucket name mismatch: %v != %v: %w", mu.DBBucket.Name, bucket, api.ErrBucketNotFound) - } - err = tx.Delete(&mu).Error - if err != nil { - return fmt.Errorf("failed to delete multipart upload: %w", err) + Delete(&dbMultipartUpload{}) + if res.Error != nil { + return fmt.Errorf("failed to fetch multipart upload: %w", res.Error) + } + // if the upload wasn't found, find out why + if res.RowsAffected == 0 { + var mu dbMultipartUpload + err := tx.Where("upload_id = ?", uploadID). + Joins("DBBucket"). + Take(&mu). + Error + if errors.Is(err, gorm.ErrRecordNotFound) { + return api.ErrMultipartUploadNotFound + } else if err != nil { + return fmt.Errorf("failed to fetch multipart upload: %w", err) + } else if mu.ObjectID != path { + return fmt.Errorf("object id mismatch: %v != %v: %w", mu.ObjectID, path, api.ErrObjectNotFound) + } else if mu.DBBucket.Name != bucket { + return fmt.Errorf("bucket name mismatch: %v != %v: %w", mu.DBBucket.Name, bucket, api.ErrBucketNotFound) + } + return errors.New("failed to delete multipart upload for unknown reason") } // Prune the slabs. if err := pruneSlabs(tx); err != nil { From 3c7e42f453f14b2cfd06c3998b0b400eaea12099 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 4 Mar 2024 10:31:29 +0100 Subject: [PATCH 144/172] stores: fix TestS3MultipartUploads --- stores/multipart.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stores/multipart.go b/stores/multipart.go index 2983f86d1..d180ea97b 100644 --- a/stores/multipart.go +++ b/stores/multipart.go @@ -275,10 +275,10 @@ func (s *SQLStore) MultipartUploadParts(ctx context.Context, bucket, object stri func (s *SQLStore) AbortMultipartUpload(ctx context.Context, bucket, path string, uploadID string) error { return s.retryTransaction(func(tx *gorm.DB) error { // delete multipart upload optimistically - res := tx.Where("upload_id", uploadID). + res := tx. + Where("upload_id", uploadID). Where("object_id", path). - Where("DBBucket.name", bucket). - Joins("DBBucket"). + Where("db_bucket_id = (SELECT id FROM buckets WHERE buckets.name = ?)", bucket). Delete(&dbMultipartUpload{}) if res.Error != nil { return fmt.Errorf("failed to fetch multipart upload: %w", res.Error) From 798dd313915ad4f15787fe59c0f0fd0b612df956 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 4 Mar 2024 11:40:36 +0100 Subject: [PATCH 145/172] main: print address when generating new seed to match hostd behaviour --- cmd/renterd/main.go | 21 ++++--- wallet/seed.go | 149 -------------------------------------------- 2 files changed, 14 insertions(+), 156 deletions(-) delete mode 100644 wallet/seed.go diff --git a/cmd/renterd/main.go b/cmd/renterd/main.go index 79d1e31b4..98e075d92 100644 --- a/cmd/renterd/main.go +++ b/cmd/renterd/main.go @@ -16,6 +16,7 @@ import ( "time" "go.sia.tech/core/types" + "go.sia.tech/coreutils/wallet" "go.sia.tech/jape" "go.sia.tech/renterd/api" "go.sia.tech/renterd/autopilot" @@ -25,7 +26,6 @@ import ( "go.sia.tech/renterd/internal/node" "go.sia.tech/renterd/s3" "go.sia.tech/renterd/stores" - "go.sia.tech/renterd/wallet" "go.sia.tech/renterd/worker" "go.sia.tech/web/renterd" "go.uber.org/zap" @@ -160,11 +160,11 @@ func getSeed() types.PrivateKey { fmt.Println() phrase = string(pw) } - key, err := wallet.KeyFromPhrase(phrase) - if err != nil { - log.Fatal(err) + var rawSeed [32]byte + if err := wallet.SeedFromPhrase(&rawSeed, phrase); err != nil { + panic(err) } - seed = key + seed = wallet.KeyFromSeed(&rawSeed, 0) } return seed } @@ -315,8 +315,15 @@ func main() { log.Println("Build Date:", build.BuildTime()) return } else if flag.Arg(0) == "seed" { - log.Println("Seed phrase:") - fmt.Println(wallet.NewSeedPhrase()) + var seed [32]byte + phrase := wallet.NewSeedPhrase() + if err := wallet.SeedFromPhrase(&seed, phrase); err != nil { + println(err.Error()) + os.Exit(1) + } + key := wallet.KeyFromSeed(&seed, 0) + fmt.Println("Recovery Phrase:", phrase) + fmt.Println("Address", types.StandardUnlockHash(key.PublicKey())) return } else if flag.Arg(0) == "config" { cmdBuildConfig() diff --git a/wallet/seed.go b/wallet/seed.go deleted file mode 100644 index afe9e2abf..000000000 --- a/wallet/seed.go +++ /dev/null @@ -1,149 +0,0 @@ -package wallet - -import ( - "crypto/rand" - "crypto/sha256" - "encoding/binary" - "errors" - "fmt" - "strings" - - "go.sia.tech/core/types" - "golang.org/x/crypto/blake2b" -) - -// NOTE: This is not a full implementation of BIP39; only 12-word phrases (128 -// bits of entropy) are supported. - -func memclr(p []byte) { - for i := range p { - p[i] = 0 - } -} - -// NewSeedPhrase returns a random seed phrase. -func NewSeedPhrase() string { - var entropy [16]byte - if _, err := rand.Read(entropy[:]); err != nil { - panic("insufficient system entropy") - } - return encodeBIP39Phrase(&entropy) -} - -// KeyFromPhrase returns the Ed25519 key derived from the supplied seed phrase. -func KeyFromPhrase(phrase string) (types.PrivateKey, error) { - entropy, err := decodeBIP39Phrase(phrase) - if err != nil { - return nil, err - } - h := blake2b.Sum256(entropy[:]) - memclr(entropy[:]) - buf := make([]byte, 32+8) - copy(buf[:32], h[:]) - memclr(h[:]) - binary.LittleEndian.PutUint64(buf[32:], 0) - seed := blake2b.Sum256(buf) - key := types.NewPrivateKeyFromSeed(seed[:]) - memclr(seed[:]) - return key, nil -} - -func bip39checksum(entropy *[16]byte) uint64 { - hash := sha256.Sum256(entropy[:]) - return uint64((hash[0] & 0xF0) >> 4) -} - -func encodeBIP39Phrase(entropy *[16]byte) string { - // convert entropy to a 128-bit integer - hi := binary.BigEndian.Uint64(entropy[:8]) - lo := binary.BigEndian.Uint64(entropy[8:]) - - // convert each group of 11 bits into a word - words := make([]string, 12) - // last word is special: 4 bits are checksum - w := ((lo & 0x7F) << 4) | bip39checksum(entropy) - words[len(words)-1] = bip39EnglishWordList[w] - lo = lo>>7 | hi<<(64-7) - hi >>= 7 - for i := len(words) - 2; i >= 0; i-- { - words[i] = bip39EnglishWordList[lo&0x7FF] - lo = lo>>11 | hi<<(64-11) - hi >>= 11 - } - - return strings.Join(words, " ") -} - -func decodeBIP39Phrase(phrase string) (*[16]byte, error) { - // validate that the phrase is well formed and only contains words that - // are present in the word list - words := strings.Fields(phrase) - if n := len(words); n != 12 { - return nil, errors.New("wrong number of words in seed phrase") - } - for _, word := range words { - if _, ok := wordMap[word]; !ok { - return nil, fmt.Errorf("unrecognized word %q in seed phrase", word) - } - } - - // convert words to 128 bits, 11 bits at a time - var lo, hi uint64 - for _, v := range words[:len(words)-1] { - hi = hi<<11 | lo>>(64-11) - lo = lo<<11 | wordMap[v] - } - // last word is special: least-significant 4 bits are checksum, so shift - // them off and only add the remaining 7 bits - w := wordMap[words[len(words)-1]] - checksum := w & 0xF - hi = hi<<7 | lo>>(64-7) - lo = lo<<7 | w>>4 - - // convert to big-endian byte slice - var entropy [16]byte - binary.BigEndian.PutUint64(entropy[:8], hi) - binary.BigEndian.PutUint64(entropy[8:], lo) - - // validate checksum - if bip39checksum(&entropy) != checksum { - return nil, errors.New("invalid checksum") - } - return &entropy, nil -} - -var wordMap = func() map[string]uint64 { - m := make(map[string]uint64, len(bip39EnglishWordList)) - for i, v := range bip39EnglishWordList { - m[v] = uint64(i) - } - return m -}() - -var bip39EnglishWordList = []string{ - "abandon", "ability", "able", "about", "above", "absent", "absorb", "abstract", "absurd", "abuse", "access", "accident", "account", "accuse", "achieve", "acid", "acoustic", "acquire", "across", "act", "action", "actor", "actress", "actual", "adapt", "add", "addict", "address", "adjust", "admit", "adult", "advance", "advice", "aerobic", "affair", "afford", "afraid", "again", "age", "agent", "agree", "ahead", "aim", "air", "airport", "aisle", "alarm", "album", "alcohol", "alert", "alien", "all", "alley", "allow", "almost", "alone", "alpha", "already", "also", "alter", "always", "amateur", "amazing", "among", "amount", "amused", "analyst", "anchor", "ancient", "anger", "angle", "angry", "animal", "ankle", "announce", "annual", "another", "answer", "antenna", "antique", "anxiety", "any", "apart", "apology", "appear", "apple", "approve", "april", "arch", "arctic", "area", "arena", "argue", "arm", "armed", "armor", "army", "around", "arrange", "arrest", "arrive", "arrow", "art", "artefact", "artist", "artwork", "ask", "aspect", "assault", "asset", "assist", "assume", "asthma", "athlete", "atom", "attack", "attend", "attitude", "attract", "auction", "audit", "august", "aunt", "author", "auto", "autumn", "average", "avocado", "avoid", "awake", "aware", "away", "awesome", "awful", "awkward", "axis", - "baby", "bachelor", "bacon", "badge", "bag", "balance", "balcony", "ball", "bamboo", "banana", "banner", "bar", "barely", "bargain", "barrel", "base", "basic", "basket", "battle", "beach", "bean", "beauty", "because", "become", "beef", "before", "begin", "behave", "behind", "believe", "below", "belt", "bench", "benefit", "best", "betray", "better", "between", "beyond", "bicycle", "bid", "bike", "bind", "biology", "bird", "birth", "bitter", "black", "blade", "blame", "blanket", "blast", "bleak", "bless", "blind", "blood", "blossom", "blouse", "blue", "blur", "blush", "board", "boat", "body", "boil", "bomb", "bone", "bonus", "book", "boost", "border", "boring", "borrow", "boss", "bottom", "bounce", "box", "boy", "bracket", "brain", "brand", "brass", "brave", "bread", "breeze", "brick", "bridge", "brief", "bright", "bring", "brisk", "broccoli", "broken", "bronze", "broom", "brother", "brown", "brush", "bubble", "buddy", "budget", "buffalo", "build", "bulb", "bulk", "bullet", "bundle", "bunker", "burden", "burger", "burst", "bus", "business", "busy", "butter", "buyer", "buzz", - "cabbage", "cabin", "cable", "cactus", "cage", "cake", "call", "calm", "camera", "camp", "can", "canal", "cancel", "candy", "cannon", "canoe", "canvas", "canyon", "capable", "capital", "captain", "car", "carbon", "card", "cargo", "carpet", "carry", "cart", "case", "cash", "casino", "castle", "casual", "cat", "catalog", "catch", "category", "cattle", "caught", "cause", "caution", "cave", "ceiling", "celery", "cement", "census", "century", "cereal", "certain", "chair", "chalk", "champion", "change", "chaos", "chapter", "charge", "chase", "chat", "cheap", "check", "cheese", "chef", "cherry", "chest", "chicken", "chief", "child", "chimney", "choice", "choose", "chronic", "chuckle", "chunk", "churn", "cigar", "cinnamon", "circle", "citizen", "city", "civil", "claim", "clap", "clarify", "claw", "clay", "clean", "clerk", "clever", "click", "client", "cliff", "climb", "clinic", "clip", "clock", "clog", "close", "cloth", "cloud", "clown", "club", "clump", "cluster", "clutch", "coach", "coast", "coconut", "code", "coffee", "coil", "coin", "collect", "color", "column", "combine", "come", "comfort", "comic", "common", "company", "concert", "conduct", "confirm", "congress", "connect", "consider", "control", "convince", "cook", "cool", "copper", "copy", "coral", "core", "corn", "correct", "cost", "cotton", "couch", "country", "couple", "course", "cousin", "cover", "coyote", "crack", "cradle", "craft", "cram", "crane", "crash", "crater", "crawl", "crazy", "cream", "credit", "creek", "crew", "cricket", "crime", "crisp", "critic", "crop", "cross", "crouch", "crowd", "crucial", "cruel", "cruise", "crumble", "crunch", "crush", "cry", "crystal", "cube", "culture", "cup", "cupboard", "curious", "current", "curtain", "curve", "cushion", "custom", "cute", "cycle", - "dad", "damage", "damp", "dance", "danger", "daring", "dash", "daughter", "dawn", "day", "deal", "debate", "debris", "decade", "december", "decide", "decline", "decorate", "decrease", "deer", "defense", "define", "defy", "degree", "delay", "deliver", "demand", "demise", "denial", "dentist", "deny", "depart", "depend", "deposit", "depth", "deputy", "derive", "describe", "desert", "design", "desk", "despair", "destroy", "detail", "detect", "develop", "device", "devote", "diagram", "dial", "diamond", "diary", "dice", "diesel", "diet", "differ", "digital", "dignity", "dilemma", "dinner", "dinosaur", "direct", "dirt", "disagree", "discover", "disease", "dish", "dismiss", "disorder", "display", "distance", "divert", "divide", "divorce", "dizzy", "doctor", "document", "dog", "doll", "dolphin", "domain", "donate", "donkey", "donor", "door", "dose", "double", "dove", "draft", "dragon", "drama", "drastic", "draw", "dream", "dress", "drift", "drill", "drink", "drip", "drive", "drop", "drum", "dry", "duck", "dumb", "dune", "during", "dust", "dutch", "duty", "dwarf", "dynamic", - "eager", "eagle", "early", "earn", "earth", "easily", "east", "easy", "echo", "ecology", "economy", "edge", "edit", "educate", "effort", "egg", "eight", "either", "elbow", "elder", "electric", "elegant", "element", "elephant", "elevator", "elite", "else", "embark", "embody", "embrace", "emerge", "emotion", "employ", "empower", "empty", "enable", "enact", "end", "endless", "endorse", "enemy", "energy", "enforce", "engage", "engine", "enhance", "enjoy", "enlist", "enough", "enrich", "enroll", "ensure", "enter", "entire", "entry", "envelope", "episode", "equal", "equip", "era", "erase", "erode", "erosion", "error", "erupt", "escape", "essay", "essence", "estate", "eternal", "ethics", "evidence", "evil", "evoke", "evolve", "exact", "example", "excess", "exchange", "excite", "exclude", "excuse", "execute", "exercise", "exhaust", "exhibit", "exile", "exist", "exit", "exotic", "expand", "expect", "expire", "explain", "expose", "express", "extend", "extra", "eye", "eyebrow", - "fabric", "face", "faculty", "fade", "faint", "faith", "fall", "false", "fame", "family", "famous", "fan", "fancy", "fantasy", "farm", "fashion", "fat", "fatal", "father", "fatigue", "fault", "favorite", "feature", "february", "federal", "fee", "feed", "feel", "female", "fence", "festival", "fetch", "fever", "few", "fiber", "fiction", "field", "figure", "file", "film", "filter", "final", "find", "fine", "finger", "finish", "fire", "firm", "first", "fiscal", "fish", "fit", "fitness", "fix", "flag", "flame", "flash", "flat", "flavor", "flee", "flight", "flip", "float", "flock", "floor", "flower", "fluid", "flush", "fly", "foam", "focus", "fog", "foil", "fold", "follow", "food", "foot", "force", "forest", "forget", "fork", "fortune", "forum", "forward", "fossil", "foster", "found", "fox", "fragile", "frame", "frequent", "fresh", "friend", "fringe", "frog", "front", "frost", "frown", "frozen", "fruit", "fuel", "fun", "funny", "furnace", "fury", "future", - "gadget", "gain", "galaxy", "gallery", "game", "gap", "garage", "garbage", "garden", "garlic", "garment", "gas", "gasp", "gate", "gather", "gauge", "gaze", "general", "genius", "genre", "gentle", "genuine", "gesture", "ghost", "giant", "gift", "giggle", "ginger", "giraffe", "girl", "give", "glad", "glance", "glare", "glass", "glide", "glimpse", "globe", "gloom", "glory", "glove", "glow", "glue", "goat", "goddess", "gold", "good", "goose", "gorilla", "gospel", "gossip", "govern", "gown", "grab", "grace", "grain", "grant", "grape", "grass", "gravity", "great", "green", "grid", "grief", "grit", "grocery", "group", "grow", "grunt", "guard", "guess", "guide", "guilt", "guitar", "gun", "gym", "habit", - "hair", "half", "hammer", "hamster", "hand", "happy", "harbor", "hard", "harsh", "harvest", "hat", "have", "hawk", "hazard", "head", "health", "heart", "heavy", "hedgehog", "height", "hello", "helmet", "help", "hen", "hero", "hidden", "high", "hill", "hint", "hip", "hire", "history", "hobby", "hockey", "hold", "hole", "holiday", "hollow", "home", "honey", "hood", "hope", "horn", "horror", "horse", "hospital", "host", "hotel", "hour", "hover", "hub", "huge", "human", "humble", "humor", "hundred", "hungry", "hunt", "hurdle", "hurry", "hurt", "husband", "hybrid", - "ice", "icon", "idea", "identify", "idle", "ignore", "ill", "illegal", "illness", "image", "imitate", "immense", "immune", "impact", "impose", "improve", "impulse", "inch", "include", "income", "increase", "index", "indicate", "indoor", "industry", "infant", "inflict", "inform", "inhale", "inherit", "initial", "inject", "injury", "inmate", "inner", "innocent", "input", "inquiry", "insane", "insect", "inside", "inspire", "install", "intact", "interest", "into", "invest", "invite", "involve", "iron", "island", "isolate", "issue", "item", "ivory", - "jacket", "jaguar", "jar", "jazz", "jealous", "jeans", "jelly", "jewel", "job", "join", "joke", "journey", "joy", "judge", "juice", "jump", "jungle", "junior", "junk", "just", - "kangaroo", "keen", "keep", "ketchup", "key", "kick", "kid", "kidney", "kind", "kingdom", "kiss", "kit", "kitchen", "kite", "kitten", "kiwi", "knee", "knife", "knock", "know", - "lab", "label", "labor", "ladder", "lady", "lake", "lamp", "language", "laptop", "large", "later", "latin", "laugh", "laundry", "lava", "law", "lawn", "lawsuit", "layer", "lazy", "leader", "leaf", "learn", "leave", "lecture", "left", "leg", "legal", "legend", "leisure", "lemon", "lend", "length", "lens", "leopard", "lesson", "letter", "level", "liar", "liberty", "library", "license", "life", "lift", "light", "like", "limb", "limit", "link", "lion", "liquid", "list", "little", "live", "lizard", "load", "loan", "lobster", "local", "lock", "logic", "lonely", "long", "loop", "lottery", "loud", "lounge", "love", "loyal", "lucky", "luggage", "lumber", "lunar", "lunch", "luxury", "lyrics", - "machine", "mad", "magic", "magnet", "maid", "mail", "main", "major", "make", "mammal", "man", "manage", "mandate", "mango", "mansion", "manual", "maple", "marble", "march", "margin", "marine", "market", "marriage", "mask", "mass", "master", "match", "material", "math", "matrix", "matter", "maximum", "maze", "meadow", "mean", "measure", "meat", "mechanic", "medal", "media", "melody", "melt", "member", "memory", "mention", "menu", "mercy", "merge", "merit", "merry", "mesh", "message", "metal", "method", "middle", "midnight", "milk", "million", "mimic", "mind", "minimum", "minor", "minute", "miracle", "mirror", "misery", "miss", "mistake", "mix", "mixed", "mixture", "mobile", "model", "modify", "mom", "moment", "monitor", "monkey", "monster", "month", "moon", "moral", "more", "morning", "mosquito", "mother", "motion", "motor", "mountain", "mouse", "move", "movie", "much", "muffin", "mule", "multiply", "muscle", "museum", "mushroom", "music", "must", "mutual", "myself", "mystery", "myth", - "naive", "name", "napkin", "narrow", "nasty", "nation", "nature", "near", "neck", "need", "negative", "neglect", "neither", "nephew", "nerve", "nest", "net", "network", "neutral", "never", "news", "next", "nice", "night", "noble", "noise", "nominee", "noodle", "normal", "north", "nose", "notable", "note", "nothing", "notice", "novel", "now", "nuclear", "number", "nurse", "nut", - "oak", "obey", "object", "oblige", "obscure", "observe", "obtain", "obvious", "occur", "ocean", "october", "odor", "off", "offer", "office", "often", "oil", "okay", "old", "olive", "olympic", "omit", "once", "one", "onion", "online", "only", "open", "opera", "opinion", "oppose", "option", "orange", "orbit", "orchard", "order", "ordinary", "organ", "orient", "original", "orphan", "ostrich", "other", "outdoor", "outer", "output", "outside", "oval", "oven", "over", "own", "owner", "oxygen", "oyster", "ozone", - "pact", "paddle", "page", "pair", "palace", "palm", "panda", "panel", "panic", "panther", "paper", "parade", "parent", "park", "parrot", "party", "pass", "patch", "path", "patient", "patrol", "pattern", "pause", "pave", "payment", "peace", "peanut", "pear", "peasant", "pelican", "pen", "penalty", "pencil", "people", "pepper", "perfect", "permit", "person", "pet", "phone", "photo", "phrase", "physical", "piano", "picnic", "picture", "piece", "pig", "pigeon", "pill", "pilot", "pink", "pioneer", "pipe", "pistol", "pitch", "pizza", "place", "planet", "plastic", "plate", "play", "please", "pledge", "pluck", "plug", "plunge", "poem", "poet", "point", "polar", "pole", "police", "pond", "pony", "pool", "popular", "portion", "position", "possible", "post", "potato", "pottery", "poverty", "powder", "power", "practice", "praise", "predict", "prefer", "prepare", "present", "pretty", "prevent", "price", "pride", "primary", "print", "priority", "prison", "private", "prize", "problem", "process", "produce", "profit", "program", "project", "promote", "proof", "property", "prosper", "protect", "proud", "provide", "public", "pudding", "pull", "pulp", "pulse", "pumpkin", "punch", "pupil", "puppy", "purchase", "purity", "purpose", "purse", "push", "put", "puzzle", "pyramid", - "quality", "quantum", "quarter", "question", "quick", "quit", "quiz", "quote", - "rabbit", "raccoon", "race", "rack", "radar", "radio", "rail", "rain", "raise", "rally", "ramp", "ranch", "random", "range", "rapid", "rare", "rate", "rather", "raven", "raw", "razor", "ready", "real", "reason", "rebel", "rebuild", "recall", "receive", "recipe", "record", "recycle", "reduce", "reflect", "reform", "refuse", "region", "regret", "regular", "reject", "relax", "release", "relief", "rely", "remain", "remember", "remind", "remove", "render", "renew", "rent", "reopen", "repair", "repeat", "replace", "report", "require", "rescue", "resemble", "resist", "resource", "response", "result", "retire", "retreat", "return", "reunion", "reveal", "review", "reward", "rhythm", "rib", "ribbon", "rice", "rich", "ride", "ridge", "rifle", "right", "rigid", "ring", "riot", "ripple", "risk", "ritual", "rival", "river", "road", "roast", "robot", "robust", "rocket", "romance", "roof", "rookie", "room", "rose", "rotate", "rough", "round", "route", "royal", "rubber", "rude", "rug", "rule", "run", "runway", "rural", - "sad", "saddle", "sadness", "safe", "sail", "salad", "salmon", "salon", "salt", "salute", "same", "sample", "sand", "satisfy", "satoshi", "sauce", "sausage", "save", "say", "scale", "scan", "scare", "scatter", "scene", "scheme", "school", "science", "scissors", "scorpion", "scout", "scrap", "screen", "script", "scrub", "sea", "search", "season", "seat", "second", "secret", "section", "security", "seed", "seek", "segment", "select", "sell", "seminar", "senior", "sense", "sentence", "series", "service", "session", "settle", "setup", "seven", "shadow", "shaft", "shallow", "share", "shed", "shell", "sheriff", "shield", "shift", "shine", "ship", "shiver", "shock", "shoe", "shoot", "shop", "short", "shoulder", "shove", "shrimp", "shrug", "shuffle", "shy", "sibling", "sick", "side", "siege", "sight", "sign", "silent", "silk", "silly", "silver", "similar", "simple", "since", "sing", "siren", "sister", "situate", "six", "size", "skate", "sketch", "ski", "skill", "skin", "skirt", "skull", "slab", "slam", "sleep", "slender", "slice", "slide", "slight", "slim", "slogan", "slot", "slow", "slush", "small", "smart", "smile", "smoke", "smooth", "snack", "snake", "snap", "sniff", "snow", "soap", "soccer", "social", "sock", "soda", "soft", "solar", "soldier", "solid", "solution", "solve", "someone", "song", "soon", "sorry", "sort", "soul", "sound", "soup", "source", "south", "space", "spare", "spatial", "spawn", "speak", "special", "speed", "spell", "spend", "sphere", "spice", "spider", "spike", "spin", "spirit", "split", "spoil", "sponsor", "spoon", "sport", "spot", "spray", "spread", "spring", "spy", "square", "squeeze", "squirrel", "stable", "stadium", "staff", "stage", "stairs", "stamp", "stand", "start", "state", "stay", "steak", "steel", "stem", "step", "stereo", "stick", "still", "sting", "stock", "stomach", "stone", "stool", "story", "stove", "strategy", "street", "strike", "strong", "struggle", "student", "stuff", "stumble", "style", "subject", "submit", "subway", "success", "such", "sudden", "suffer", "sugar", "suggest", "suit", "summer", "sun", "sunny", "sunset", "super", "supply", "supreme", "sure", "surface", "surge", "surprise", "surround", "survey", "suspect", "sustain", "swallow", "swamp", "swap", "swarm", "swear", "sweet", "swift", "swim", "swing", "switch", "sword", "symbol", "symptom", "syrup", "system", - "table", "tackle", "tag", "tail", "talent", "talk", "tank", "tape", "target", "task", "taste", "tattoo", "taxi", "teach", "team", "tell", "ten", "tenant", "tennis", "tent", "term", "test", "text", "thank", "that", "theme", "then", "theory", "there", "they", "thing", "this", "thought", "three", "thrive", "throw", "thumb", "thunder", "ticket", "tide", "tiger", "tilt", "timber", "time", "tiny", "tip", "tired", "tissue", "title", "toast", "tobacco", "today", "toddler", "toe", "together", "toilet", "token", "tomato", "tomorrow", "tone", "tongue", "tonight", "tool", "tooth", "top", "topic", "topple", "torch", "tornado", "tortoise", "toss", "total", "tourist", "toward", "tower", "town", "toy", "track", "trade", "traffic", "tragic", "train", "transfer", "trap", "trash", "travel", "tray", "treat", "tree", "trend", "trial", "tribe", "trick", "trigger", "trim", "trip", "trophy", "trouble", "truck", "true", "truly", "trumpet", "trust", "truth", "try", "tube", "tuition", "tumble", "tuna", "tunnel", "turkey", "turn", "turtle", "twelve", "twenty", "twice", "twin", "twist", "two", "type", "typical", - "ugly", "umbrella", "unable", "unaware", "uncle", "uncover", "under", "undo", "unfair", "unfold", "unhappy", "uniform", "unique", "unit", "universe", "unknown", "unlock", "until", "unusual", "unveil", "update", "upgrade", "uphold", "upon", "upper", "upset", "urban", "urge", "usage", "use", "used", "useful", "useless", "usual", "utility", - "vacant", "vacuum", "vague", "valid", "valley", "valve", "van", "vanish", "vapor", "various", "vast", "vault", "vehicle", "velvet", "vendor", "venture", "venue", "verb", "verify", "version", "very", "vessel", "veteran", "viable", "vibrant", "vicious", "victory", "video", "view", "village", "vintage", "violin", "virtual", "virus", "visa", "visit", "visual", "vital", "vivid", "vocal", "voice", "void", "volcano", "volume", "vote", "voyage", - "wage", "wagon", "wait", "walk", "wall", "walnut", "want", "warfare", "warm", "warrior", "wash", "wasp", "waste", "water", "wave", "way", "wealth", "weapon", "wear", "weasel", "weather", "web", "wedding", "weekend", "weird", "welcome", "west", "wet", "whale", "what", "wheat", "wheel", "when", "where", "whip", "whisper", "wide", "width", "wife", "wild", "will", "win", "window", "wine", "wing", "wink", "winner", "winter", "wire", "wisdom", "wise", "wish", "witness", "wolf", "woman", "wonder", "wood", "wool", "word", "work", "world", "worry", "worth", "wrap", "wreck", "wrestle", "wrist", "write", "wrong", - "yard", "year", "yellow", "you", "young", "youth", - "zebra", "zero", "zone", "zoo", -} From ba9e7fc09e97bedbcda8fa0cf39f8f8871b8edc5 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 4 Mar 2024 13:59:11 +0100 Subject: [PATCH 146/172] stores: fix MultipartUploads order of returned uploads --- stores/multipart.go | 22 +++++---- stores/multipart_test.go | 98 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 111 insertions(+), 9 deletions(-) diff --git a/stores/multipart.go b/stores/multipart.go index 3a5bcd54a..3b2f09c29 100644 --- a/stores/multipart.go +++ b/stores/multipart.go @@ -187,17 +187,18 @@ func (s *SQLStore) MultipartUploads(ctx context.Context, bucket, prefix, keyMark limit++ } - prefixExpr := exprTRUE - if prefix != "" { - prefixExpr = gorm.Expr("SUBSTR(object_id, 1, ?) = ?", utf8.RuneCountInString(prefix), prefix) + // both markers must be used together + if (keyMarker == "" && uploadIDMarker != "") || (keyMarker != "" && uploadIDMarker == "") { + return api.MultipartListUploadsResponse{}, errors.New("both keyMarker and uploadIDMarker must be set or neither") } - keyMarkerExpr := exprTRUE + markerExpr := exprTRUE if keyMarker != "" { - keyMarkerExpr = gorm.Expr("object_id > ?", keyMarker) + markerExpr = gorm.Expr("object_id > ? OR (object_id = ? AND upload_id > ?)", keyMarker, keyMarker, uploadIDMarker) } - uploadIDMarkerExpr := exprTRUE - if uploadIDMarker != "" { - uploadIDMarkerExpr = gorm.Expr("upload_id > ?", keyMarker) + + prefixExpr := exprTRUE + if prefix != "" { + prefixExpr = gorm.Expr("SUBSTR(object_id, 1, ?) = ?", utf8.RuneCountInString(prefix), prefix) } err = s.retryTransaction(func(tx *gorm.DB) error { @@ -205,7 +206,10 @@ func (s *SQLStore) MultipartUploads(ctx context.Context, bucket, prefix, keyMark err := tx. Model(&dbMultipartUpload{}). Joins("DBBucket"). - Where("? AND ? AND ? AND DBBucket.name = ?", prefixExpr, keyMarkerExpr, uploadIDMarkerExpr, bucket). + Where("DBBucket.name", bucket). + Where("?", markerExpr). + Where("?", prefixExpr). + Order("object_id ASC, upload_id ASC"). Limit(limit). Find(&dbUploads). Error diff --git a/stores/multipart_test.go b/stores/multipart_test.go index eeda43229..37b294418 100644 --- a/stores/multipart_test.go +++ b/stores/multipart_test.go @@ -4,6 +4,8 @@ import ( "context" "encoding/hex" "reflect" + "sort" + "strings" "testing" "time" @@ -168,3 +170,99 @@ func TestMultipartUploadWithUploadPackingRegression(t *testing.T) { t.Fatalf("expected object total size to be %v, got %v", totalSize, obj.TotalSize()) } } + +func TestMultipartUploads(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + ss := newTestSQLStore(t, defaultTestSQLStoreConfig) + defer ss.Close() + + // create 3 multipart uploads, the first 2 have the same path + resp1, err := ss.CreateMultipartUpload(context.Background(), api.DefaultBucketName, "/foo", object.NoOpKey, testMimeType, testMetadata) + if err != nil { + t.Fatal(err) + } + resp2, err := ss.CreateMultipartUpload(context.Background(), api.DefaultBucketName, "/foo", object.NoOpKey, testMimeType, testMetadata) + if err != nil { + t.Fatal(err) + } + resp3, err := ss.CreateMultipartUpload(context.Background(), api.DefaultBucketName, "/foo2", object.NoOpKey, testMimeType, testMetadata) + if err != nil { + t.Fatal(err) + } + + // prepare the expected order of uploads returned by MultipartUploads + orderedUploads := []struct { + uploadID string + objectID string + }{ + {uploadID: resp1.UploadID, objectID: "/foo"}, + {uploadID: resp2.UploadID, objectID: "/foo"}, + {uploadID: resp3.UploadID, objectID: "/foo2"}, + } + sort.Slice(orderedUploads, func(i, j int) bool { + if orderedUploads[i].objectID != orderedUploads[j].objectID { + return strings.Compare(orderedUploads[i].objectID, orderedUploads[j].objectID) < 0 + } + return strings.Compare(orderedUploads[i].uploadID, orderedUploads[j].uploadID) < 0 + }) + + // fetch uploads + mur, err := ss.MultipartUploads(context.Background(), api.DefaultBucketName, "", "", "", 3) + if err != nil { + t.Fatal(err) + } else if len(mur.Uploads) != 3 { + t.Fatal("expected 3 uploads") + } else if mur.Uploads[0].UploadID != orderedUploads[0].uploadID { + t.Fatal("unexpected upload id") + } else if mur.Uploads[1].UploadID != orderedUploads[1].uploadID { + t.Fatal("unexpected upload id") + } else if mur.Uploads[2].UploadID != orderedUploads[2].uploadID { + t.Fatal("unexpected upload id") + } + + // fetch uploads with prefix + mur, err = ss.MultipartUploads(context.Background(), api.DefaultBucketName, "/foo", "", "", 3) + if err != nil { + t.Fatal(err) + } else if len(mur.Uploads) != 3 { + t.Fatal("expected 3 uploads") + } else if mur.Uploads[0].UploadID != orderedUploads[0].uploadID { + t.Fatal("unexpected upload id") + } else if mur.Uploads[1].UploadID != orderedUploads[1].uploadID { + t.Fatal("unexpected upload id") + } else if mur.Uploads[2].UploadID != orderedUploads[2].uploadID { + t.Fatal("unexpected upload id") + } + mur, err = ss.MultipartUploads(context.Background(), api.DefaultBucketName, "/foo2", "", "", 3) + if err != nil { + t.Fatal(err) + } else if len(mur.Uploads) != 1 { + t.Fatal("expected 1 upload") + } else if mur.Uploads[0].UploadID != orderedUploads[2].uploadID { + t.Fatal("unexpected upload id") + } + + // paginate through them one-by-one + keyMarker := "" + uploadIDMarker := "" + hasMore := true + for hasMore { + mur, err = ss.MultipartUploads(context.Background(), api.DefaultBucketName, "", keyMarker, uploadIDMarker, 1) + if err != nil { + t.Fatal(err) + } else if len(mur.Uploads) != 1 { + t.Fatal("expected 1 upload") + } else if mur.Uploads[0].UploadID != orderedUploads[0].uploadID { + t.Fatalf("unexpected upload id: %v != %v", mur.Uploads[0].UploadID, orderedUploads[0].uploadID) + } + orderedUploads = orderedUploads[1:] + keyMarker = mur.NextPathMarker + uploadIDMarker = mur.NextUploadIDMarker + hasMore = mur.HasMore + } + if len(orderedUploads) != 0 { + t.Fatal("expected 3 iterations") + } +} From bc5251d08418b6bcbe2b6bb8d295a43d02d20fc4 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 4 Mar 2024 14:43:22 +0100 Subject: [PATCH 147/172] autopilot: address review comments --- api/autopilot.go | 30 ++++++++++++++++-------------- autopilot/autopilot.go | 21 +++++++++++---------- autopilot/client.go | 2 +- autopilot/hostfilter.go | 2 +- 4 files changed, 29 insertions(+), 26 deletions(-) diff --git a/api/autopilot.go b/api/autopilot.go index 846c490ed..fdd6c4942 100644 --- a/api/autopilot.go +++ b/api/autopilot.go @@ -99,25 +99,27 @@ type ( } ConfigRecommendation struct { - GougingSettings GougingSettings `json:"gougingSettings,omitempty"` + GougingSettings GougingSettings `json:"gougingSettings"` } // ConfigEvaluationResponse is the response type for /evaluate ConfigEvaluationResponse struct { - Usable uint64 `json:"usable"` - Total uint64 `json:"total"` - Blocked uint64 `json:"blocked"` - Gouging struct { - Contract uint64 `json:"contract"` - Download uint64 `json:"download"` - Gouging uint64 `json:"gouging"` - Pruning uint64 `json:"pruning"` - Upload uint64 `json:"upload"` + Hosts uint64 `json:"hosts"` + Usable uint64 `json:"usable"` + Unusable struct { + Blocked uint64 `json:"blocked"` + Gouging struct { + Contract uint64 `json:"contract"` + Download uint64 `json:"download"` + Gouging uint64 `json:"gouging"` + Pruning uint64 `json:"pruning"` + Upload uint64 `json:"upload"` + } `json:"gouging"` + NotAcceptingContracts uint64 `json:"notAcceptingContracts"` + NotScanned uint64 `json:"notScanned"` + Unknown uint64 `json:"unknown"` } - NotAcceptingContracts uint64 `json:"notAcceptingContracts"` - NotScanned uint64 `json:"notScanned"` - Other uint64 `json:"other"` - Recommendation *ConfigRecommendation `json:"recommendation,omitempty"` + Recommendation *ConfigRecommendation `json:"recommendation,omitempty"` } // HostHandlerResponse is the response type for the /host/:hostkey endpoint. diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 7cf56deb7..5983cc49e 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -746,39 +746,40 @@ func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types. // a recommendation on how to loosen it. func evaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.Host) (resp api.ConfigEvaluationResponse) { gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) + + resp.Hosts = uint64(len(hosts)) for _, host := range hosts { usable, usableBreakdown := isUsableHost(cfg, rs, gc, host, 0, 0) if usable { resp.Usable++ continue } - resp.Total++ if usableBreakdown.blocked > 0 { - resp.Blocked++ + resp.Unusable.Blocked++ } if usableBreakdown.notacceptingcontracts > 0 { - resp.NotAcceptingContracts++ + resp.Unusable.NotAcceptingContracts++ } if usableBreakdown.notcompletingscan > 0 { - resp.NotScanned++ + resp.Unusable.NotScanned++ } if usableBreakdown.unknown > 0 { - resp.Other++ + resp.Unusable.Unknown++ } if usableBreakdown.gougingBreakdown.ContractErr != "" { - resp.Gouging.Contract++ + resp.Unusable.Gouging.Contract++ } if usableBreakdown.gougingBreakdown.DownloadErr != "" { - resp.Gouging.Download++ + resp.Unusable.Gouging.Download++ } if usableBreakdown.gougingBreakdown.GougingErr != "" { - resp.Gouging.Gouging++ + resp.Unusable.Gouging.Gouging++ } if usableBreakdown.gougingBreakdown.PruneErr != "" { - resp.Gouging.Pruning++ + resp.Unusable.Gouging.Pruning++ } if usableBreakdown.gougingBreakdown.UploadErr != "" { - resp.Gouging.Upload++ + resp.Unusable.Gouging.Upload++ } } diff --git a/autopilot/client.go b/autopilot/client.go index 5c4dee064..ba16754a5 100644 --- a/autopilot/client.go +++ b/autopilot/client.go @@ -65,7 +65,7 @@ func (c *Client) Trigger(forceScan bool) (_ bool, err error) { return resp.Triggered, err } -// EvalutateConfig evaluates a autopilot config using the given gouging and +// EvalutateConfig evaluates an autopilot config using the given gouging and // redundancy settings. func (c *Client) EvaluateConfig(ctx context.Context, cfg api.AutopilotConfig, gs api.GougingSettings, rs api.RedundancySettings) (resp api.ConfigEvaluationResponse, err error) { err = c.c.WithContext(ctx).POST("/config", api.ConfigEvaluationRequest{ diff --git a/autopilot/hostfilter.go b/autopilot/hostfilter.go index 2ebc81f38..574862a97 100644 --- a/autopilot/hostfilter.go +++ b/autopilot/hostfilter.go @@ -204,7 +204,7 @@ func isUsableHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker. gougingBreakdown = gc.Check(&h.Settings, &h.PriceTable.HostPriceTable) if gougingBreakdown.Gouging() { errs = append(errs, fmt.Errorf("%w: %v", errHostPriceGouging, gougingBreakdown)) - } else { + } else if minScore > 0 { // perform scoring checks // // NOTE: only perform these scoring checks if we know the host is From fa4e7966a8f1efbc369009ea7b8e70a63ec8b96f Mon Sep 17 00:00:00 2001 From: PJ Date: Mon, 4 Mar 2024 16:31:37 +0100 Subject: [PATCH 148/172] stores: update RefreshHealth query and add unit test --- stores/metadata.go | 46 ++++------------ stores/metadata_test.go | 117 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 127 insertions(+), 36 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 901c0f73b..529d7ec89 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1991,42 +1991,16 @@ LIMIT ? rowsAffected = res.RowsAffected // Update the health of objects with outdated health. - var err error - if isSQLite(tx) { - err = tx.Exec(` - UPDATE objects - SET health = ( - SELECT MIN(slabs.health) - FROM slabs - INNER JOIN slices ON slices.db_slab_id = slabs.id - INNER JOIN objects ON slices.db_object_id = objects.id - ) - WHERE EXISTS ( - SELECT 1 FROM slabs - INNER JOIN slices ON slices.db_slab_id = slabs.id - INNER JOIN objects ON slices.db_object_id = objects.id - WHERE slabs.health < objects.health - ) - `).Error - } else { - err = tx.Exec(` - UPDATE objects - JOIN ( - SELECT slices.db_object_id, MIN(slabs.health) AS min_health - FROM slabs - INNER JOIN slices ON slices.db_slab_id = slabs.id - GROUP BY slices.db_object_id - ) AS min_healths ON objects.id = min_healths.db_object_id - SET objects.health = min_healths.min_health - WHERE objects.health > ( - SELECT MIN(slabs.health) - FROM slabs - INNER JOIN slices ON slices.db_slab_id = slabs.id - WHERE slices.db_object_id = objects.id - ); - `).Error - } - return err + return tx.Exec(` +UPDATE objects SET health = ( + SELECT MIN(slabs.health) + FROM slabs + INNER JOIN slices ON slices.db_slab_id = slabs.id AND slices.db_object_id = objects.id +) WHERE health != ( + SELECT MIN(slabs.health) + FROM slabs + INNER JOIN slices ON slices.db_slab_id = slabs.id AND slices.db_object_id = objects.id +)`).Error }) if err != nil { return err diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 16e104695..a5fc76b05 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -3879,6 +3879,123 @@ func TestSlabHealthInvalidation(t *testing.T) { } } +func TestRefreshHealth(t *testing.T) { + ss := newTestSQLStore(t, defaultTestSQLStoreConfig) + defer ss.Close() + + // define a helper function to return an object's health + health := func(name string) float64 { + t.Helper() + o, err := ss.Object(context.Background(), api.DefaultBucketName, name) + if err != nil { + t.Fatal(err) + } + return o.Health + } + + // add test hosts + hks, err := ss.addTestHosts(2) + if err != nil { + t.Fatal(err) + } + + // add test contract & set it as contract set + fcids, _, err := ss.addTestContracts(hks) + if err != nil { + t.Fatal(err) + } + err = ss.SetContractSet(context.Background(), testContractSet, fcids) + if err != nil { + t.Fatal(err) + } + + // add two test objects + o1 := t.Name() + "1" + s1 := object.GenerateEncryptionKey() + if added, err := ss.addTestObject(o1, object.Object{ + Key: object.GenerateEncryptionKey(), + Slabs: []object.SlabSlice{{Slab: object.Slab{ + Key: s1, + Shards: []object.Sector{ + newTestShard(hks[0], fcids[0], types.Hash256{0}), + newTestShard(hks[1], fcids[1], types.Hash256{1}), + }, + }}}, + }); err != nil { + t.Fatal(err) + } else if added.Health != 1 { + t.Fatal("expected health to be 1, got", added.Health) + } + + o2 := t.Name() + "2" + s2 := object.GenerateEncryptionKey() + if added, err := ss.addTestObject(o2, object.Object{ + Key: object.GenerateEncryptionKey(), + Slabs: []object.SlabSlice{{Slab: object.Slab{ + Key: s2, + Shards: []object.Sector{ + newTestShard(hks[0], fcids[0], types.Hash256{2}), + newTestShard(hks[1], fcids[1], types.Hash256{3}), + }, + }}}, + }); err != nil { + t.Fatal(err) + } else if added.Health != 1 { + t.Fatal("expected health to be 1, got", added.Health) + } + + // update contract set and refresh health, assert health is .5 + err = ss.SetContractSet(context.Background(), testContractSet, fcids[:1]) + if err != nil { + t.Fatal(err) + } + err = ss.RefreshHealth(context.Background()) + if err != nil { + t.Fatal(err) + } + if health(o1) != .5 { + t.Fatal("expected health to be .5, got", health(o1)) + } else if health(o2) != .5 { + t.Fatal("expected health to be .5, got", health(o2)) + } + + // set the health of s1 to be lower than .5 + s1b, _ := s1.MarshalBinary() + err = ss.db.Exec("UPDATE slabs SET health = 0.4 WHERE key = ?", secretKey(s1b)).Error + if err != nil { + t.Fatal(err) + } + + // refresh health and assert only object 1's health got updated + err = ss.RefreshHealth(context.Background()) + if err != nil { + t.Fatal(err) + } + if health(o1) != .4 { + t.Fatal("expected health to be .4, got", health(o1)) + } else if health(o2) != .5 { + t.Fatal("expected health to be .5, got", health(o2)) + } + + // set the health of s2 to be higher than .5 + s2b, _ := s2.MarshalBinary() + err = ss.db.Exec("UPDATE slabs SET health = 0.6 WHERE key = ?", secretKey(s2b)).Error + if err != nil { + t.Fatal(err) + } + + // refresh health and assert only object 2's health got updated + err = ss.RefreshHealth(context.Background()) + if err != nil { + t.Fatal(err) + } + if health(o1) != .4 { + t.Fatal("expected health to be .4, got", health(o1)) + } else if health(o2) != .6 { + t.Fatal("expected health to be .6, got", health(o2)) + } +} + func TestSlabCleanupTrigger(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() From cb73ef0803116c4ec065dd628517882dbadd244b Mon Sep 17 00:00:00 2001 From: PJ Date: Mon, 4 Mar 2024 16:50:36 +0100 Subject: [PATCH 149/172] stores: fix TestRefreshHealth for MySQL test suite --- stores/metadata_test.go | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index a5fc76b05..27aa26a13 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -3911,11 +3911,10 @@ func TestRefreshHealth(t *testing.T) { // add two test objects o1 := t.Name() + "1" - s1 := object.GenerateEncryptionKey() if added, err := ss.addTestObject(o1, object.Object{ Key: object.GenerateEncryptionKey(), Slabs: []object.SlabSlice{{Slab: object.Slab{ - Key: s1, + Key: object.GenerateEncryptionKey(), Shards: []object.Sector{ newTestShard(hks[0], fcids[0], types.Hash256{0}), newTestShard(hks[1], fcids[1], types.Hash256{1}), @@ -3928,11 +3927,10 @@ func TestRefreshHealth(t *testing.T) { } o2 := t.Name() + "2" - s2 := object.GenerateEncryptionKey() if added, err := ss.addTestObject(o2, object.Object{ Key: object.GenerateEncryptionKey(), Slabs: []object.SlabSlice{{Slab: object.Slab{ - Key: s2, + Key: object.GenerateEncryptionKey(), Shards: []object.Sector{ newTestShard(hks[0], fcids[0], types.Hash256{2}), newTestShard(hks[1], fcids[1], types.Hash256{3}), @@ -3960,8 +3958,7 @@ func TestRefreshHealth(t *testing.T) { } // set the health of s1 to be lower than .5 - s1b, _ := s1.MarshalBinary() - err = ss.db.Exec("UPDATE slabs SET health = 0.4 WHERE key = ?", secretKey(s1b)).Error + err = ss.overrideSlabHealth(o1, 0.4) if err != nil { t.Fatal(err) } @@ -3978,8 +3975,7 @@ func TestRefreshHealth(t *testing.T) { } // set the health of s2 to be higher than .5 - s2b, _ := s2.MarshalBinary() - err = ss.db.Exec("UPDATE slabs SET health = 0.6 WHERE key = ?", secretKey(s2b)).Error + err = ss.overrideSlabHealth(o2, 0.6) if err != nil { t.Fatal(err) } From a6b50491862da83fa35ceb24b76a5ebb83127f0b Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 4 Mar 2024 16:58:53 +0100 Subject: [PATCH 150/172] worker: fix host height gouging when price table has expiry far in the future --- worker/gouging.go | 11 +++++++++ worker/host_test.go | 2 +- worker/mocks_test.go | 6 +++-- worker/pricetables.go | 16 ++++++++++++- worker/pricetables_test.go | 49 ++++++++++++++++++++++++++++++++++---- 5 files changed, 75 insertions(+), 9 deletions(-) diff --git a/worker/gouging.go b/worker/gouging.go index 19ae177aa..a7b2078a1 100644 --- a/worker/gouging.go +++ b/worker/gouging.go @@ -39,6 +39,7 @@ var ( type ( GougingChecker interface { Check(_ *rhpv2.HostSettings, _ *rhpv3.HostPriceTable) api.HostGougingBreakdown + BlocksUntilBlockHeightGouging(hostHeight uint64) int64 } gougingChecker struct { @@ -107,6 +108,16 @@ func NewGougingChecker(gs api.GougingSettings, cs api.ConsensusState, txnFee typ } } +func (gc gougingChecker) BlocksUntilBlockHeightGouging(hostHeight uint64) int64 { + blockHeight := gc.consensusState.BlockHeight + leeway := gc.settings.HostBlockHeightLeeway + var min uint64 + if blockHeight >= uint64(leeway) { + min = blockHeight - uint64(leeway) + } + return int64(hostHeight) - int64(min) +} + func (gc gougingChecker) Check(hs *rhpv2.HostSettings, pt *rhpv3.HostPriceTable) api.HostGougingBreakdown { if hs == nil && pt == nil { panic("gouging checker needs to be provided with at least host settings or a price table") // developer error diff --git a/worker/host_test.go b/worker/host_test.go index 618c4cfb3..a993c12e1 100644 --- a/worker/host_test.go +++ b/worker/host_test.go @@ -70,7 +70,7 @@ func newTestHostPriceTable() hostdb.HostPriceTable { frand.Read(uid[:]) return hostdb.HostPriceTable{ - HostPriceTable: rhpv3.HostPriceTable{UID: uid, Validity: time.Minute}, + HostPriceTable: rhpv3.HostPriceTable{UID: uid, HostBlockHeight: 100, Validity: time.Minute}, Expiry: time.Now().Add(time.Minute), } } diff --git a/worker/mocks_test.go b/worker/mocks_test.go index aefcf65ce..4f7c24b8f 100644 --- a/worker/mocks_test.go +++ b/worker/mocks_test.go @@ -64,10 +64,12 @@ func (*alerterMock) DismissAlerts(context.Context, ...types.Hash256) error { ret var _ ConsensusState = (*chainMock)(nil) -type chainMock struct{} +type chainMock struct { + cs api.ConsensusState +} func (c *chainMock) ConsensusState(ctx context.Context) (api.ConsensusState, error) { - return api.ConsensusState{}, nil + return c.cs, nil } var _ Bus = (*busMock)(nil) diff --git a/worker/pricetables.go b/worker/pricetables.go index 3f7683b2a..1bc2ee009 100644 --- a/worker/pricetables.go +++ b/worker/pricetables.go @@ -19,6 +19,11 @@ const ( // for use, we essentially add 30 seconds to the current time when checking // whether we are still before a pricetable's expiry time priceTableValidityLeeway = 30 * time.Second + + // priceTableBlockHeightLeeway is the amount of blocks before a price table + // is considered gouging on the block height when we renew it even if it is + // still valid + priceTableBlockHeightLeeway = 2 ) var ( @@ -106,10 +111,19 @@ func (p *priceTable) fetch(ctx context.Context, rev *types.FileContractRevision) hpt = p.hpt p.mu.Unlock() + // get gouging checker to figure out how many blocks we have left before the + // current price table is considered to gouge on the block height + gc, err := GougingCheckerFromContext(ctx, false) + if err != nil { + return hostdb.HostPriceTable{}, err + } + // figure out whether we should update the price table, if not we can return if hpt.UID != (rhpv3.SettingsID{}) { randomUpdateLeeway := frand.Intn(int(math.Floor(hpt.HostPriceTable.Validity.Seconds() * 0.1))) - if time.Now().Add(priceTableValidityLeeway).Add(time.Duration(randomUpdateLeeway) * time.Second).Before(hpt.Expiry) { + closeToGouging := gc.BlocksUntilBlockHeightGouging(hpt.HostBlockHeight) <= priceTableBlockHeightLeeway + closeToExpiring := time.Now().Add(priceTableValidityLeeway).Add(time.Duration(randomUpdateLeeway) * time.Second).After(hpt.Expiry) + if !closeToExpiring && !closeToGouging { return } } diff --git a/worker/pricetables_test.go b/worker/pricetables_test.go index 5e0616092..55b0f7057 100644 --- a/worker/pricetables_test.go +++ b/worker/pricetables_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "go.sia.tech/renterd/api" "go.sia.tech/renterd/hostdb" ) @@ -22,6 +23,20 @@ func TestPriceTables(t *testing.T) { h := hs.addHost() c := cs.addContract(h.hk) + cm := &chainMock{ + cs: api.ConsensusState{ + BlockHeight: 1, + }, + } + + blockHeightLeeway := 10 + gCtx := WithGougingChecker(context.Background(), cm, api.GougingParams{ + ConsensusState: cm.cs, + GougingSettings: api.GougingSettings{ + HostBlockHeightLeeway: blockHeightLeeway, + }, + }) + // expire its price table expiredPT := newTestHostPriceTable() expiredPT.Expiry = time.Now() @@ -36,13 +51,13 @@ func TestPriceTables(t *testing.T) { })) // trigger a fetch to make it block - go pts.fetch(context.Background(), h.hk, nil) + go pts.fetch(gCtx, h.hk, nil) time.Sleep(50 * time.Millisecond) // fetch it again but with a canceled context to avoid blocking // indefinitely, the error will indicate we were blocking on a price table // update - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(gCtx) cancel() _, err := pts.fetch(ctx, h.hk, nil) if !errors.Is(err, errPriceTableUpdateTimedOut) { @@ -51,7 +66,7 @@ func TestPriceTables(t *testing.T) { // unblock and assert we receive a valid price table close(fetchPTBlockChan) - update, err := pts.fetch(context.Background(), h.hk, nil) + update, err := pts.fetch(gCtx, h.hk, nil) if err != nil { t.Fatal(err) } else if update.UID != validPT.UID { @@ -61,7 +76,7 @@ func TestPriceTables(t *testing.T) { // refresh the price table on the host, update again, assert we receive the // same price table as it hasn't expired yet h.hi.PriceTable = newTestHostPriceTable() - update, err = pts.fetch(context.Background(), h.hk, nil) + update, err = pts.fetch(gCtx, h.hk, nil) if err != nil { t.Fatal(err) } else if update.UID != validPT.UID { @@ -72,7 +87,31 @@ func TestPriceTables(t *testing.T) { pts.priceTables[h.hk].hpt.Expiry = time.Now() // fetch it again and assert we updated the price table - update, err = pts.fetch(context.Background(), h.hk, nil) + update, err = pts.fetch(gCtx, h.hk, nil) + if err != nil { + t.Fatal(err) + } else if update.UID != h.hi.PriceTable.UID { + t.Fatal("price table mismatch") + } + + // refresh the price table on the host and make sure fetching doesn't update + // the price table since it's not expired + validPT = h.hi.PriceTable + h.hi.PriceTable = newTestHostPriceTable() + update, err = pts.fetch(gCtx, h.hk, nil) + if err != nil { + t.Fatal(err) + } else if update.UID != validPT.UID { + t.Fatal("price table mismatch") + } + + // increase the current block height to be exactly + // 'priceTableBlockHeightLeeway' blocks before the leeway of the gouging + // settings + cm.cs.BlockHeight = validPT.HostBlockHeight + uint64(blockHeightLeeway) - priceTableBlockHeightLeeway + + // fetch it again and assert we updated the price table + update, err = pts.fetch(gCtx, h.hk, nil) if err != nil { t.Fatal(err) } else if update.UID != h.hi.PriceTable.UID { From 1bed43aa53faf2735b0a15b4a5737194be4580e2 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 5 Mar 2024 09:49:54 +0100 Subject: [PATCH 151/172] autopilot: introduce smallestValidScore --- autopilot/autopilot.go | 2 +- autopilot/autopilot_test.go | 9 +++++++-- autopilot/contractor.go | 6 +++--- autopilot/hostscore.go | 2 ++ 4 files changed, 13 insertions(+), 6 deletions(-) diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 5983cc49e..b546eada9 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -733,7 +733,7 @@ func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.Host) (usables uint64) { gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) for _, host := range hosts { - usable, _ := isUsableHost(cfg, rs, gc, host, 0, 0) + usable, _ := isUsableHost(cfg, rs, gc, host, smallestValidScore, 0) if usable { usables++ } diff --git a/autopilot/autopilot_test.go b/autopilot/autopilot_test.go index 211a49a33..f818c312b 100644 --- a/autopilot/autopilot_test.go +++ b/autopilot/autopilot_test.go @@ -17,22 +17,27 @@ func TestOptimiseGougingSetting(t *testing.T) { var hosts []hostdb.Host for i := 0; i < 10; i++ { hosts = append(hosts, hostdb.Host{ + KnownSince: time.Unix(0, 0), PriceTable: hostdb.HostPriceTable{ HostPriceTable: rhpv3.HostPriceTable{ - MaxCollateral: types.Siacoins(1000), + CollateralCost: types.Siacoins(1), + MaxCollateral: types.Siacoins(1000), }, }, Settings: rhpv2.HostSettings{ AcceptingContracts: true, + Collateral: types.Siacoins(1), MaxCollateral: types.Siacoins(1000), + Version: "1.6.0", }, Interactions: hostdb.Interactions{ + Uptime: time.Hour * 1000, LastScan: time.Now(), LastScanSuccess: true, SecondToLastScanSuccess: true, TotalScans: 100, }, - LastAnnouncement: time.Now(), + LastAnnouncement: time.Unix(0, 0), Scanned: true, }) } diff --git a/autopilot/contractor.go b/autopilot/contractor.go index 9e2b52cca..188b55661 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -267,7 +267,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( } // fetch candidate hosts - candidates, unusableHosts, err := c.candidateHosts(ctx, hosts, usedHosts, hostData, math.SmallestNonzeroFloat64) // avoid 0 score hosts + candidates, unusableHosts, err := c.candidateHosts(ctx, hosts, usedHosts, hostData, smallestValidScore) // avoid 0 score hosts if err != nil { return false, err } @@ -1249,7 +1249,7 @@ func (c *contractor) calculateMinScore(ctx context.Context, candidates []scoredH // return early if there's no hosts if len(candidates) == 0 { c.logger.Warn("min host score is set to the smallest non-zero float because there are no candidate hosts") - return math.SmallestNonzeroFloat64 + return smallestValidScore } // determine the number of random hosts we fetch per iteration when @@ -1283,7 +1283,7 @@ func (c *contractor) calculateMinScore(ctx context.Context, candidates []scoredH return candidates[i].score > candidates[j].score }) if len(candidates) < int(numContracts) { - return math.SmallestNonzeroFloat64 + return smallestValidScore } else if cutoff := candidates[numContracts-1].score; minScore > cutoff { minScore = cutoff } diff --git a/autopilot/hostscore.go b/autopilot/hostscore.go index f0f103c6c..b15857d19 100644 --- a/autopilot/hostscore.go +++ b/autopilot/hostscore.go @@ -13,6 +13,8 @@ import ( "go.sia.tech/siad/build" ) +const smallestValidScore = math.SmallestNonzeroFloat64 + func hostScore(cfg api.AutopilotConfig, h hostdb.Host, storedData uint64, expectedRedundancy float64) api.HostScoreBreakdown { // idealDataPerHost is the amount of data that we would have to put on each // host assuming that our storage requirements were spread evenly across From 7bbafb029f1ed0e162036632ac9a7c1d20d53c9d Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 5 Mar 2024 10:58:53 +0100 Subject: [PATCH 152/172] e2e: fix TestGouging --- autopilot/autopilot.go | 5 +++++ internal/test/e2e/gouging_test.go | 16 ++++++++++------ 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index b546eada9..c6ce42884 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -815,30 +815,35 @@ func evaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Cu // MaxRPCPrice tmpGS := maxGS() + tmpGS.MaxRPCPrice = gs.MaxRPCPrice if optimiseGougingSetting(&tmpGS, &tmpGS.MaxRPCPrice, cfg, cs, fee, currentPeriod, rs, hosts) { optimisedGS.MaxRPCPrice = tmpGS.MaxRPCPrice success = true } // MaxContractPrice tmpGS = maxGS() + tmpGS.MaxContractPrice = gs.MaxContractPrice if optimiseGougingSetting(&tmpGS, &tmpGS.MaxContractPrice, cfg, cs, fee, currentPeriod, rs, hosts) { optimisedGS.MaxContractPrice = tmpGS.MaxContractPrice success = true } // MaxDownloadPrice tmpGS = maxGS() + tmpGS.MaxDownloadPrice = gs.MaxDownloadPrice if optimiseGougingSetting(&tmpGS, &tmpGS.MaxDownloadPrice, cfg, cs, fee, currentPeriod, rs, hosts) { optimisedGS.MaxDownloadPrice = tmpGS.MaxDownloadPrice success = true } // MaxUploadPrice tmpGS = maxGS() + tmpGS.MaxUploadPrice = gs.MaxUploadPrice if optimiseGougingSetting(&tmpGS, &tmpGS.MaxUploadPrice, cfg, cs, fee, currentPeriod, rs, hosts) { optimisedGS.MaxUploadPrice = tmpGS.MaxUploadPrice success = true } // MaxStoragePrice tmpGS = maxGS() + tmpGS.MaxStoragePrice = gs.MaxStoragePrice if optimiseGougingSetting(&tmpGS, &tmpGS.MaxStoragePrice, cfg, cs, fee, currentPeriod, rs, hosts) { optimisedGS.MaxStoragePrice = tmpGS.MaxStoragePrice success = true diff --git a/internal/test/e2e/gouging_test.go b/internal/test/e2e/gouging_test.go index 28bf51069..a08fadd3f 100644 --- a/internal/test/e2e/gouging_test.go +++ b/internal/test/e2e/gouging_test.go @@ -53,17 +53,21 @@ func TestGouging(t *testing.T) { t.Fatal("unexpected data") } + // update the gouging settings to limit the max storage price to 100H + gs := test.GougingSettings + gs.MaxStoragePrice = types.NewCurrency64(100) + if err := b.UpdateSetting(context.Background(), api.SettingGouging, gs); err != nil { + t.Fatal(err) + } // fetch current contract set contracts, err := b.Contracts(context.Background(), api.ContractsOpts{ContractSet: cfg.Set}) tt.OK(err) - // update the host settings so it's gouging + // update one host's settings so it's gouging hk := contracts[0].HostKey host := hostsMap[hk.String()] settings := host.settings.Settings() - settings.IngressPrice = types.Siacoins(1) - settings.EgressPrice = types.Siacoins(1) - settings.ContractPrice = types.Siacoins(11) + settings.StoragePrice = types.NewCurrency64(101) // gouging tt.OK(host.UpdateSettings(settings)) // make sure the price table expires so the worker is forced to fetch it @@ -76,7 +80,7 @@ func TestGouging(t *testing.T) { // update all host settings so they're gouging for _, h := range cluster.hosts { settings := h.settings.Settings() - settings.EgressPrice = types.Siacoins(1) + settings.StoragePrice = types.NewCurrency64(101) if err := h.UpdateSettings(settings); err != nil { t.Fatal(err) } @@ -93,7 +97,7 @@ func TestGouging(t *testing.T) { } // try optimising gouging settings - resp, err := cluster.Autopilot.EvaluateConfig(context.Background(), test.AutopilotConfig, test.GougingSettings, test.RedundancySettings) + resp, err := cluster.Autopilot.EvaluateConfig(context.Background(), test.AutopilotConfig, gs, test.RedundancySettings) tt.OK(err) if resp.Recommendation == nil { t.Fatal("expected recommendation") From e09f7ca24cab20773b64f95c7b4f57324b5cbe19 Mon Sep 17 00:00:00 2001 From: PJ Date: Thu, 29 Feb 2024 10:22:49 +0100 Subject: [PATCH 153/172] worker: fix TestUploadPackedSlab NDF --- internal/test/tt.go | 12 ++++-- worker/upload.go | 23 +++------- worker/upload_test.go | 99 ++++++++++++++++++++----------------------- 3 files changed, 59 insertions(+), 75 deletions(-) diff --git a/internal/test/tt.go b/internal/test/tt.go index 22bcff223..ca53dc904 100644 --- a/internal/test/tt.go +++ b/internal/test/tt.go @@ -90,12 +90,16 @@ func (t impl) OKAll(vs ...interface{}) { func (t impl) Retry(tries int, durationBetweenAttempts time.Duration, fn func() error) { t.Helper() - for i := 1; i < tries; i++ { - err := fn() + t.OK(Retry(tries, durationBetweenAttempts, fn)) +} + +func Retry(tries int, durationBetweenAttempts time.Duration, fn func() error) (err error) { + for i := 0; i < tries; i++ { + err = fn() if err == nil { - return + return nil } time.Sleep(durationBetweenAttempts) } - t.OK(fn()) + return } diff --git a/worker/upload.go b/worker/upload.go index d95c9db9e..c5e86a166 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -198,7 +198,7 @@ func (w *worker) upload(ctx context.Context, r io.Reader, contracts []api.Contra // upload packed slab if len(packedSlabs) > 0 { if err := w.tryUploadPackedSlab(ctx, mem, packedSlabs[0], up.rs, up.contractSet, lockingPriorityBlockedUpload); err != nil { - w.logger.Errorf("couldn't upload packed slabs, err: %v", err) + w.logger.Error(err) } } } @@ -227,10 +227,6 @@ func (w *worker) threadedUploadPackedSlabs(rs api.RedundancySettings, contractSe w.uploadsMu.Unlock() }() - // upload packed slabs - var mu sync.Mutex - var errs error - // derive a context that we can use as an interrupt in case of an error or shutdown. interruptCtx, interruptCancel := context.WithCancel(w.shutdownCtx) defer interruptCancel() @@ -246,9 +242,9 @@ func (w *worker) threadedUploadPackedSlabs(rs api.RedundancySettings, contractSe // fetch packed slab to upload packedSlabs, err := w.bus.PackedSlabsForUpload(interruptCtx, defaultPackedSlabsLockDuration, uint8(rs.MinShards), uint8(rs.TotalShards), contractSet, 1) if err != nil { - mu.Lock() - errs = errors.Join(errs, fmt.Errorf("couldn't fetch packed slabs from bus: %v", err)) - mu.Unlock() + w.logger.Errorf("couldn't fetch packed slabs from bus: %v", err) + mem.Release() + break } // no more packed slabs to upload @@ -270,9 +266,7 @@ func (w *worker) threadedUploadPackedSlabs(rs api.RedundancySettings, contractSe // try to upload a packed slab, if there were no packed slabs left to upload ok is false if err := w.tryUploadPackedSlab(ctx, mem, ps, rs, contractSet, lockPriority); err != nil { - mu.Lock() - errs = errors.Join(errs, err) - mu.Unlock() + w.logger.Error(err) interruptCancel() // prevent new uploads from being launched } }(packedSlabs[0]) @@ -280,11 +274,6 @@ func (w *worker) threadedUploadPackedSlabs(rs api.RedundancySettings, contractSe // wait for all threads to finish wg.Wait() - - // log errors - if err := errors.Join(errs); err != nil { - w.logger.Errorf("couldn't upload packed slabs, err: %v", err) - } return } @@ -890,7 +879,7 @@ loop: for slab.numInflight > 0 && !done { select { case <-u.shutdownCtx.Done(): - return nil, 0, 0, errors.New("upload stopped") + return nil, 0, 0, ErrShuttingDown case <-ctx.Done(): return nil, 0, 0, ctx.Err() case resp := <-respChan: diff --git a/worker/upload_test.go b/worker/upload_test.go index 044827799..1d441693f 100644 --- a/worker/upload_test.go +++ b/worker/upload_test.go @@ -4,13 +4,14 @@ import ( "bytes" "context" "errors" - "math" + "fmt" "testing" "time" rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/test" "go.sia.tech/renterd/object" "lukechampine.com/frand" ) @@ -128,7 +129,7 @@ func TestUploadPackedSlab(t *testing.T) { w := newTestWorker(t) // add hosts to worker - w.AddHosts(testRedundancySettings.TotalShards * 2) + w.AddHosts(testRedundancySettings.TotalShards) // convenience variables os := w.os @@ -140,9 +141,6 @@ func TestUploadPackedSlab(t *testing.T) { params := testParameters(t.Name()) params.packing = true - // block aysnc packed slab uploads - w.BlockAsyncPackedSlabUploads(params) - // create test data data := frand.Bytes(128) @@ -208,67 +206,60 @@ func TestUploadPackedSlab(t *testing.T) { t.Fatal("data mismatch") } - // configure max buffer size - os.setSlabBufferMaxSizeSoft(128) - - // upload 2x64 bytes using the worker - params.path = t.Name() + "2" - _, err = w.upload(context.Background(), bytes.NewReader(frand.Bytes(64)), w.Contracts(), params) - if err != nil { - t.Fatal(err) - } - params.path = t.Name() + "3" - _, err = w.upload(context.Background(), bytes.NewReader(frand.Bytes(64)), w.Contracts(), params) - if err != nil { - t.Fatal(err) + // define a helper that counts packed slabs + packedSlabsCount := func() int { + t.Helper() + os.mu.Lock() + cnt := len(os.partials) + os.mu.Unlock() + return cnt } - // assert we still have two packed slabs (buffer limit not reached) - pss, err = os.PackedSlabsForUpload(context.Background(), 0, uint8(params.rs.MinShards), uint8(params.rs.TotalShards), testContractSet, math.MaxInt) - if err != nil { - t.Fatal(err) - } else if len(pss) != 2 { - t.Fatal("expected 2 packed slab") + // define a helper that uploads data using the worker + var c int + uploadBytes := func(n int) { + t.Helper() + params.path = fmt.Sprintf("%s_%d", t.Name(), c) + _, err := w.upload(context.Background(), bytes.NewReader(frand.Bytes(n)), w.Contracts(), params) + if err != nil { + t.Fatal(err) + } + c++ } - // upload one more byte (buffer limit reached) - params.path = t.Name() + "4" - _, err = w.upload(context.Background(), bytes.NewReader(frand.Bytes(1)), w.Contracts(), params) - if err != nil { - t.Fatal(err) - } + // block aysnc packed slab uploads + w.BlockAsyncPackedSlabUploads(params) - // assert we still have two packed slabs (one got uploaded synchronously) - pss, err = os.PackedSlabsForUpload(context.Background(), 0, uint8(params.rs.MinShards), uint8(params.rs.TotalShards), testContractSet, math.MaxInt) - if err != nil { - t.Fatal(err) - } else if len(pss) != 2 { - t.Fatal("expected 2 packed slab") + // configure max buffer size + os.setSlabBufferMaxSizeSoft(128) + + // upload 2x64 bytes using the worker and assert we still have two packed + // slabs (buffer limit not reached) + uploadBytes(64) + uploadBytes(64) + if packedSlabsCount() != 2 { + t.Fatal("expected 2 packed slabs") } - // allow some time for the background thread to realise we blocked async - // packed slab uploads - time.Sleep(time.Second) + // upload one more byte and assert we still have two packed slabs (one got + // uploaded synchronously because buffer limit was reached) + uploadBytes(1) + if packedSlabsCount() != 2 { + t.Fatal("expected 2 packed slabs") + } // unblock asynchronous uploads w.UnblockAsyncPackedSlabUploads(params) + uploadBytes(129) // ensure background thread is running - // upload 1 byte using the worker - params.path = t.Name() + "5" - _, err = w.upload(context.Background(), bytes.NewReader(frand.Bytes(129)), w.Contracts(), params) - if err != nil { - t.Fatal(err) - } - - // allow some time for the thread to pick up the packed slabs - time.Sleep(time.Second) - - // assert we uploaded all packed slabs - pss, err = os.PackedSlabsForUpload(context.Background(), 0, uint8(params.rs.MinShards), uint8(params.rs.TotalShards), testContractSet, 1) - if err != nil { + // assert packed slabs get uploaded asynchronously + if err := test.Retry(100, 100*time.Millisecond, func() error { + if packedSlabsCount() != 0 { + return errors.New("expected 0 packed slabs") + } + return nil + }); err != nil { t.Fatal(err) - } else if len(pss) != 0 { - t.Fatal("expected 0 packed slab") } } From e3204944a9204232d20e823977cfc2298c0685e6 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 5 Mar 2024 14:41:01 +0100 Subject: [PATCH 154/172] testing: remove named return var --- internal/test/tt.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/internal/test/tt.go b/internal/test/tt.go index ca53dc904..d44152eda 100644 --- a/internal/test/tt.go +++ b/internal/test/tt.go @@ -93,13 +93,14 @@ func (t impl) Retry(tries int, durationBetweenAttempts time.Duration, fn func() t.OK(Retry(tries, durationBetweenAttempts, fn)) } -func Retry(tries int, durationBetweenAttempts time.Duration, fn func() error) (err error) { +func Retry(tries int, durationBetweenAttempts time.Duration, fn func() error) error { + var err error for i := 0; i < tries; i++ { err = fn() if err == nil { - return nil + break } time.Sleep(durationBetweenAttempts) } - return + return err } From 64a27884726e4173020143a0d1c642c00c3f6bf1 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 5 Mar 2024 15:06:51 +0100 Subject: [PATCH 155/172] worker: improve logging when scanning hosts --- worker/worker.go | 62 ++++++++++++++++++++++++++++++++++++------------ 1 file changed, 47 insertions(+), 15 deletions(-) diff --git a/worker/worker.go b/worker/worker.go index b335a5f6c..79ad1b1b0 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -9,6 +9,7 @@ import ( "math/big" "net" "net/http" + "os" "runtime" "sort" "strings" @@ -52,6 +53,8 @@ const ( ) var ( + errHostOnPrivateNetwork = errors.New("host is on a private network") + ErrShuttingDown = errors.New("worker is shutting down") ) @@ -259,13 +262,6 @@ func (w *worker) rhpScanHandler(jc jape.Context) { return } - // apply the timeout - if rsr.Timeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, time.Duration(rsr.Timeout)) - defer cancel() - } - // only scan hosts if we are online peers, err := w.bus.SyncerPeers(ctx) if jc.Check("failed to fetch peers from bus", err) != nil { @@ -278,7 +274,7 @@ func (w *worker) rhpScanHandler(jc jape.Context) { // scan host var errStr string - settings, priceTable, elapsed, err := w.scanHost(ctx, rsr.HostKey, rsr.HostIP) + settings, priceTable, elapsed, err := w.scanHost(time.Duration(rsr.Timeout), rsr.HostKey, rsr.HostIP) if err != nil { errStr = err.Error() } @@ -1390,9 +1386,18 @@ func (w *worker) Shutdown(ctx context.Context) error { return nil } -func (w *worker) scanHost(ctx context.Context, hostKey types.PublicKey, hostIP string) (rhpv2.HostSettings, rhpv3.HostPriceTable, time.Duration, error) { +func (w *worker) scanHost(timeout time.Duration, hostKey types.PublicKey, hostIP string) (rhpv2.HostSettings, rhpv3.HostPriceTable, time.Duration, error) { + logger := w.logger.With("host", hostKey).With("hostIP", hostIP).With("timeout", timeout) + // prepare a helper for scanning scan := func() (rhpv2.HostSettings, rhpv3.HostPriceTable, time.Duration, error) { + // apply timeout + ctx := w.shutdownCtx + var cancel context.CancelFunc + if timeout > 0 { + ctx, cancel = context.WithTimeout(w.shutdownCtx, timeout) + defer cancel() + } // resolve hostIP. We don't want to scan hosts on private networks. if !w.allowPrivateIPs { host, _, err := net.SplitHostPort(hostIP) @@ -1405,7 +1410,7 @@ func (w *worker) scanHost(ctx context.Context, hostKey types.PublicKey, hostIP s } for _, addr := range addrs { if isPrivateIP(addr.IP) { - return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, errors.New("host is on a private network") + return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, errHostOnPrivateNetwork } } } @@ -1418,8 +1423,9 @@ func (w *worker) scanHost(ctx context.Context, hostKey types.PublicKey, hostIP s // NOTE: we overwrite the NetAddress with the host address here since we // just used it to dial the host we know it's valid settings.NetAddress = hostIP + return nil } - return err + return fmt.Errorf("failed to fetch host settings: %w", err) }) elapsed := time.Since(start) if err != nil { @@ -1430,7 +1436,7 @@ func (w *worker) scanHost(ctx context.Context, hostKey types.PublicKey, hostIP s var pt rhpv3.HostPriceTable err = w.transportPoolV3.withTransportV3(ctx, hostKey, settings.SiamuxAddr(), func(ctx context.Context, t *transportV3) error { if hpt, err := RPCPriceTable(ctx, t, func(pt rhpv3.HostPriceTable) (rhpv3.PaymentMethod, error) { return nil, nil }); err != nil { - return err + return fmt.Errorf("failed to fetch host price table: %w", err) } else { pt = hpt.HostPriceTable return nil @@ -1444,12 +1450,17 @@ func (w *worker) scanHost(ctx context.Context, hostKey types.PublicKey, hostIP s if err != nil { // scan: second try select { - case <-ctx.Done(): + case <-w.shutdownCtx.Done(): + return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, w.shutdownCtx.Err() case <-time.After(time.Second): } settings, pt, duration, err = scan() + + logger = logger.With("elapsed", duration) if err == nil { - w.logger.Debugf("successfully scanned host %v after retry", hostKey) + logger.Debugf("successfully scanned host on second try") + } else if !isErrHostUnreachable(err) { + logger.Debugw("failed to scan host", zap.Error(err)) } } @@ -1477,7 +1488,7 @@ func (w *worker) scanHost(ctx context.Context, hostKey types.PublicKey, hostIP s }, }) if scanErr != nil { - w.logger.Errorf("failed to record host scan: %v", scanErr) + logger.Errorw("failed to record host scan", zap.Error(scanErr)) } return settings, pt, duration, err } @@ -1494,6 +1505,27 @@ func discardTxnOnErr(ctx context.Context, bus Bus, l *zap.SugaredLogger, txn typ cancel() } +func isErrHostUnreachable(err error) bool { + if isError(err, os.ErrDeadlineExceeded) { + return true + } else if isError(err, context.DeadlineExceeded) { + return true + } else if isError(err, errHostOnPrivateNetwork) { + return true + } else if isError(err, errors.New("no route to host")) { + return true + } else if isError(err, errors.New("no such host")) { + return true + } else if isError(err, errors.New("connection refused")) { + return true + } else if isError(err, errors.New("unknown port")) { + return true + } else if isError(err, errors.New("cannot assign requested address")) { + return true + } + return false +} + func isErrDuplicateTransactionSet(err error) bool { return err != nil && strings.Contains(err.Error(), modules.ErrDuplicateTransactionSet.Error()) } From f51fc9822b8bcc646d2e10cc6d3802864f95d8bd Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 5 Mar 2024 18:22:25 +0100 Subject: [PATCH 156/172] worker: add HEAD object endpoint --- api/object.go | 66 ++++++++++++++++++++++++++- internal/test/e2e/metadata_test.go | 35 ++++++++++---- worker/client/client.go | 73 ++++++++++++++++++------------ worker/serve.go | 8 ++-- worker/worker.go | 41 +++++++++++++++++ 5 files changed, 178 insertions(+), 45 deletions(-) diff --git a/api/object.go b/api/object.go index cef672a97..f53dbb5ac 100644 --- a/api/object.go +++ b/api/object.go @@ -83,7 +83,7 @@ type ( Object *Object `json:"object,omitempty"` } - // GetObjectResponse is the response type for the /worker/object endpoint. + // GetObjectResponse is the response type for the GET /worker/object endpoint. GetObjectResponse struct { Content io.ReadCloser `json:"content"` ContentType string `json:"contentType"` @@ -91,6 +91,16 @@ type ( Range *DownloadRange `json:"range,omitempty"` Size int64 `json:"size"` Metadata ObjectUserMetadata `json:"metadata"` + // NOTE: keep HeadObjectResponse in sync with this type + } + + // HeadObjectResponse is the response type for the HEAD /worker/object endpoint. + HeadObjectResponse struct { + ContentType string `json:"contentType"` + LastModified string `json:"lastModified"` + Range *DownloadRange `json:"range,omitempty"` + Size int64 `json:"size"` + Metadata ObjectUserMetadata `json:"metadata"` } // ObjectsDeleteRequest is the request type for the /bus/objects/list endpoint. @@ -135,6 +145,46 @@ type ( } ) +func ParseObjectHeadResponseFrom(header http.Header) (HeadObjectResponse, error) { + // parse size + var size int64 + _, err := fmt.Sscan(header.Get("Content-Length"), &size) + if err != nil { + return HeadObjectResponse{}, err + } + + // parse range + var r *DownloadRange + if cr := header.Get("Content-Range"); cr != "" { + dr, err := ParseDownloadRange(cr) + if err != nil { + return HeadObjectResponse{}, err + } + r = &dr + + // if a range is set, the size is the size extracted from the range + // since Content-Length will then only be the length of the returned + // range. + size = dr.Size + } + + // parse headers + headers := make(map[string]string) + for k, v := range header { + if len(v) > 0 { + headers[k] = v[0] + } + } + + return HeadObjectResponse{ + ContentType: header.Get("Content-Type"), + LastModified: header.Get("Last-Modified"), + Range: r, + Size: size, + Metadata: ExtractObjectUserMetadataFrom(headers), + }, nil +} + func ExtractObjectUserMetadataFrom(metadata map[string]string) ObjectUserMetadata { oum := make(map[string]string) for k, v := range metadata { @@ -206,6 +256,10 @@ type ( Batch bool } + HeadObjectOptions struct { + Range DownloadRange + } + DownloadObjectOptions struct { GetObjectOptions Range DownloadRange @@ -301,6 +355,16 @@ func (opts DeleteObjectOptions) Apply(values url.Values) { } } +func (opts HeadObjectOptions) ApplyHeaders(h http.Header) { + if opts.Range != (DownloadRange{}) { + if opts.Range.Length == -1 { + h.Set("Range", fmt.Sprintf("bytes=%v-", opts.Range.Offset)) + } else { + h.Set("Range", fmt.Sprintf("bytes=%v-%v", opts.Range.Offset, opts.Range.Offset+opts.Range.Length-1)) + } + } +} + func (opts GetObjectOptions) Apply(values url.Values) { if opts.Prefix != "" { values.Set("prefix", opts.Prefix) diff --git a/internal/test/e2e/metadata_test.go b/internal/test/e2e/metadata_test.go index b71078eef..d11f6ba4e 100644 --- a/internal/test/e2e/metadata_test.go +++ b/internal/test/e2e/metadata_test.go @@ -33,27 +33,42 @@ func TestObjectMetadata(t *testing.T) { } // upload the object - _, err := w.UploadObject(context.Background(), bytes.NewReader([]byte(t.Name())), api.DefaultBucketName, t.Name(), opts) + data := []byte(t.Name()) + _, err := w.UploadObject(context.Background(), bytes.NewReader(data), api.DefaultBucketName, t.Name(), opts) if err != nil { t.Fatal(err) } // get the object from the bus and assert it has the metadata - ress, err := b.Object(context.Background(), api.DefaultBucketName, t.Name(), api.GetObjectOptions{}) + or, err := b.Object(context.Background(), api.DefaultBucketName, t.Name(), api.GetObjectOptions{}) if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(ress.Object.Metadata, opts.Metadata) { - t.Fatal("metadata mismatch", ress.Object.Metadata) + if !reflect.DeepEqual(or.Object.Metadata, opts.Metadata) { + t.Fatal("metadata mismatch", or.Object.Metadata) } // get the object from the worker and assert it has the metadata - res, err := w.GetObject(context.Background(), api.DefaultBucketName, t.Name(), api.DownloadObjectOptions{}) + gor, err := w.GetObject(context.Background(), api.DefaultBucketName, t.Name(), api.DownloadObjectOptions{}) if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(res.Metadata, opts.Metadata) { - t.Fatal("metadata mismatch", res.Metadata) + if !reflect.DeepEqual(gor.Metadata, opts.Metadata) { + t.Fatal("metadata mismatch", gor.Metadata) + } + + // perform a HEAD request and assert the headers are all present + hor, err := w.HeadObject(context.Background(), api.DefaultBucketName, t.Name(), api.HeadObjectOptions{Range: api.DownloadRange{Offset: 1, Length: 1}}) + if err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(hor, &api.HeadObjectResponse{ + ContentType: or.Object.ContentType(), + LastModified: or.Object.LastModified(), + Range: &api.DownloadRange{Offset: 1, Length: 1, Size: int64(len(data))}, + Size: int64(len(data)), + Metadata: gor.Metadata, + }) { + t.Fatalf("unexpected response: %+v", hor) } // re-upload the object @@ -63,11 +78,11 @@ func TestObjectMetadata(t *testing.T) { } // assert metadata was removed - res, err = w.GetObject(context.Background(), api.DefaultBucketName, t.Name(), api.DownloadObjectOptions{}) + gor, err = w.GetObject(context.Background(), api.DefaultBucketName, t.Name(), api.DownloadObjectOptions{}) if err != nil { t.Fatal(err) } - if len(res.Metadata) > 0 { - t.Fatal("unexpected metadata", res.Metadata) + if len(gor.Metadata) > 0 { + t.Fatal("unexpected metadata", gor.Metadata) } } diff --git a/worker/client/client.go b/worker/client/client.go index f45789093..ff8625541 100644 --- a/worker/client/client.go +++ b/worker/client/client.go @@ -77,13 +77,49 @@ func (c *Client) DownloadStats() (resp api.DownloadStatsResponse, err error) { return } +// HeadObject returns the metadata of the object at the given path. +func (c *Client) HeadObject(ctx context.Context, bucket, path string, opts api.HeadObjectOptions) (*api.HeadObjectResponse, error) { + c.c.Custom("HEAD", fmt.Sprintf("/objects/%s", path), nil, nil) + + if strings.HasSuffix(path, "/") { + return nil, errors.New("the given path is a directory, HEAD can only be performed on objects") + } + + values := url.Values{} + values.Set("bucket", url.QueryEscape(bucket)) + path += "?" + values.Encode() + + // TODO: support HEAD in jape client + req, err := http.NewRequestWithContext(ctx, "HEAD", fmt.Sprintf("%s/objects/%s", c.c.BaseURL, path), nil) + if err != nil { + panic(err) + } + req.SetBasicAuth("", c.c.WithContext(ctx).Password) + opts.ApplyHeaders(req.Header) + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + if resp.StatusCode != 200 && resp.StatusCode != 206 { + err, _ := io.ReadAll(resp.Body) + _ = resp.Body.Close() + return nil, errors.New(string(err)) + } + + head, err := api.ParseObjectHeadResponseFrom(resp.Header) + if err != nil { + return nil, err + } + return &head, nil +} + // GetObject returns the object at given path alongside its metadata. func (c *Client) GetObject(ctx context.Context, bucket, path string, opts api.DownloadObjectOptions) (*api.GetObjectResponse, error) { if strings.HasSuffix(path, "/") { return nil, errors.New("the given path is a directory, use ObjectEntries instead") } - // Start download. path = api.ObjectPathEscape(path) body, header, err := c.object(ctx, bucket, path, opts) if err != nil { @@ -96,41 +132,18 @@ func (c *Client) GetObject(ctx context.Context, bucket, path string, opts api.Do } }() - // Parse header. - var size int64 - _, err = fmt.Sscan(header.Get("Content-Length"), &size) + head, err := api.ParseObjectHeadResponseFrom(header) if err != nil { return nil, err } - var r *api.DownloadRange - if cr := header.Get("Content-Range"); cr != "" { - dr, err := api.ParseDownloadRange(cr) - if err != nil { - return nil, err - } - r = &dr - - // If a range is set, the size is the size extracted from the range - // since Content-Length will then only be the length of the returned - // range. - size = dr.Size - } - - // Parse headers. - headers := make(map[string]string) - for k, v := range header { - if len(v) > 0 { - headers[k] = v[0] - } - } return &api.GetObjectResponse{ Content: body, - ContentType: header.Get("Content-Type"), - LastModified: header.Get("Last-Modified"), - Range: r, - Size: size, - Metadata: api.ExtractObjectUserMetadataFrom(headers), + ContentType: head.ContentType, + LastModified: head.LastModified, + Range: head.Range, + Size: head.Size, + Metadata: head.Metadata, }, nil } diff --git a/worker/serve.go b/worker/serve.go index 76c1fb2d5..25d0c0412 100644 --- a/worker/serve.go +++ b/worker/serve.go @@ -76,9 +76,6 @@ func serveContent(rw http.ResponseWriter, req *http.Request, obj api.Object, dow } }() - // create a content reader - rs := newContentReader(pr, obj, offset) - // fetch the content type, if not set and we can't infer it from object's // name we default to application/octet-stream, that is important because we // have to avoid http.ServeContent to sniff the content type as it would @@ -87,17 +84,20 @@ func serveContent(rw http.ResponseWriter, req *http.Request, obj api.Object, dow if contentType == "" { contentType = "application/octet-stream" } + rw.Header().Set("Content-Type", contentType) // set the response headers, no need to set Last-Modified header as // serveContent does that for us rw.Header().Set("ETag", api.FormatETag(obj.ETag)) - rw.Header().Set("Content-Type", contentType) // set the user metadata headers for k, v := range obj.Metadata { rw.Header().Set(fmt.Sprintf("%s%s", api.ObjectMetadataPrefix, k), v) } + // create a content reader + rs := newContentReader(pr, obj, offset) + http.ServeContent(rw, req, obj.Name, obj.ModTime.Std(), rs) return http.StatusOK, nil } diff --git a/worker/worker.go b/worker/worker.go index b335a5f6c..91786c481 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -860,6 +860,46 @@ func (w *worker) uploadsStatsHandlerGET(jc jape.Context) { }) } +func (w *worker) objectsHandlerHEAD(jc jape.Context) { + // parse bucket + bucket := api.DefaultBucketName + if jc.DecodeForm("bucket", &bucket) != nil { + return + } + + // parse path + path := jc.PathParam("path") + if path == "" || strings.HasSuffix(path, "/") { + jc.Error(fmt.Errorf("directories are not accepted"), http.StatusBadRequest) + return + } + + // fetch object metadata + res, err := w.bus.Object(jc.Request.Context(), bucket, path, api.GetObjectOptions{ + OnlyMetadata: true, + }) + if errors.Is(err, api.ErrObjectNotFound) { + jc.Error(err, http.StatusNotFound) + return + } else if err != nil { + jc.Error(err, http.StatusInternalServerError) + return + } else if res.Object == nil { + jc.Error(api.ErrObjectNotFound, http.StatusInternalServerError) // should never happen but checking because we deref. later + return + } + + // serve the content + status, err := serveContent(jc.ResponseWriter, jc.Request, *res.Object, func(io.Writer, int64, int64) error { return nil }) + if errors.Is(err, http_range.ErrInvalid) || errors.Is(err, errMultiRangeNotSupported) { + jc.Error(err, http.StatusBadRequest) + } else if errors.Is(err, http_range.ErrNoOverlap) { + jc.Error(err, http.StatusRequestedRangeNotSatisfiable) + } else if err != nil { + jc.Error(err, status) + } +} + func (w *worker) objectsHandlerGET(jc jape.Context) { jc.Custom(nil, []api.ObjectMetadata{}) @@ -1366,6 +1406,7 @@ func (w *worker) Handler() http.Handler { "GET /stats/uploads": w.uploadsStatsHandlerGET, "POST /slab/migrate": w.slabMigrateHandler, + "HEAD /objects/*path": w.objectsHandlerHEAD, "GET /objects/*path": w.objectsHandlerGET, "PUT /objects/*path": w.objectsHandlerPUT, "DELETE /objects/*path": w.objectsHandlerDELETE, From 906bcc8295ed6f788c2e28c6f4eaf0c2b026654e Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 5 Mar 2024 18:43:35 +0100 Subject: [PATCH 157/172] worker: updat error message --- worker/worker.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/worker/worker.go b/worker/worker.go index 91786c481..729c544c7 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -870,7 +870,7 @@ func (w *worker) objectsHandlerHEAD(jc jape.Context) { // parse path path := jc.PathParam("path") if path == "" || strings.HasSuffix(path, "/") { - jc.Error(fmt.Errorf("directories are not accepted"), http.StatusBadRequest) + jc.Error(errors.New("HEAD requests can only be performed on objects, not directories"), http.StatusBadRequest) return } From 991d89dfdad6686bd47682e6f9275be14e1dabd5 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 6 Mar 2024 10:22:34 +0100 Subject: [PATCH 158/172] e2e: address review comment --- internal/test/e2e/gouging_test.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/internal/test/e2e/gouging_test.go b/internal/test/e2e/gouging_test.go index a08fadd3f..68dc264eb 100644 --- a/internal/test/e2e/gouging_test.go +++ b/internal/test/e2e/gouging_test.go @@ -106,8 +106,9 @@ func TestGouging(t *testing.T) { // set optimised settings tt.OK(b.UpdateSetting(context.Background(), api.SettingGouging, resp.Recommendation.GougingSettings)) - // renter should recover and be able to upload again - - // upload some data - should fail - tt.FailAll(w.UploadObject(context.Background(), bytes.NewReader(data), api.DefaultBucketName, path, api.UploadObjectOptions{})) + // upload some data - should work now once contract maintenance is done + tt.Retry(30, time.Second, func() error { + _, err := w.UploadObject(context.Background(), bytes.NewReader(data), api.DefaultBucketName, path, api.UploadObjectOptions{}) + return err + }) } From 49c725cd6f6fc949fb9374dbe20414fae14bfa07 Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 6 Mar 2024 10:31:00 +0100 Subject: [PATCH 159/172] bus: do not broadcast empty txn sets --- bus/bus.go | 5 ++++ internal/test/e2e/cluster_test.go | 44 ++++++++++++++++++++++++++++++- 2 files changed, 48 insertions(+), 1 deletion(-) diff --git a/bus/bus.go b/bus/bus.go index fbda894d7..045b8e82a 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -606,6 +606,11 @@ func (b *bus) walletRedistributeHandler(jc jape.Context) { } var ids []types.TransactionID + if len(txns) == 0 { + jc.Encode(ids) + return + } + for i := 0; i < len(txns); i++ { err = b.w.SignTransaction(cs, &txns[i], toSign, types.CoveredFields{WholeTransaction: true}) if jc.Check("couldn't sign the transaction", err) != nil { diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index c546937eb..d234e0f19 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -2382,7 +2382,7 @@ func TestMultipartUploadWrappedByPartialSlabs(t *testing.T) { uploadPacking: true, }) defer cluster.Shutdown() - defer cluster.Shutdown() + b := cluster.Bus w := cluster.Worker slabSize := test.RedundancySettings.SlabSizeNoRedundancy() @@ -2448,3 +2448,45 @@ func TestMultipartUploadWrappedByPartialSlabs(t *testing.T) { t.Fatal("unexpected data") } } + +func TestWalletRedistribute(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + + cluster := newTestCluster(t, testClusterOptions{ + hosts: test.RedundancySettings.TotalShards, + uploadPacking: true, + }) + defer cluster.Shutdown() + + // redistribute into 5 outputs + _, err := cluster.Bus.WalletRedistribute(context.Background(), 5, types.Siacoins(10)) + if err != nil { + t.Fatal(err) + } + cluster.MineBlocks(1) + + // assert we have 5 outputs with 10 SC + outputs, err := cluster.Bus.WalletOutputs(context.Background()) + if err != nil { + t.Fatal(err) + } + + var cnt int + for _, output := range outputs { + if output.Value.Cmp(types.Siacoins(10)) == 0 { + cnt++ + } + } + if cnt != 5 { + t.Fatalf("expected 5 outputs with 10 SC, got %v", cnt) + } + + // assert redistributing into 3 outputs succeeds, used to fail because we + // were broadcasting an empty transaction set + _, err = cluster.Bus.WalletRedistribute(context.Background(), 3, types.Siacoins(10)) + if err != nil { + t.Fatal(err) + } +} From 7f0cf5ccf82df6734755f939e30b004f4bd8843d Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 6 Mar 2024 10:36:20 +0100 Subject: [PATCH 160/172] worker: address comments --- api/worker.go | 4 ++++ worker/worker.go | 62 +++++++++++++++++++----------------------------- 2 files changed, 29 insertions(+), 37 deletions(-) diff --git a/api/worker.go b/api/worker.go index 7ee2800f4..39f075718 100644 --- a/api/worker.go +++ b/api/worker.go @@ -19,6 +19,10 @@ var ( // ErrContractSetNotSpecified is returned by the worker API by endpoints that // need a contract set to be able to upload data. ErrContractSetNotSpecified = errors.New("contract set is not specified") + + // ErrHostOnPrivateNetwork is returned by the worker API when a host can't + // be scanned since it is on a private network. + ErrHostOnPrivateNetwork = errors.New("host is on a private network") ) type ( diff --git a/worker/worker.go b/worker/worker.go index 79ad1b1b0..2fe9d334a 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -53,8 +53,6 @@ const ( ) var ( - errHostOnPrivateNetwork = errors.New("host is on a private network") - ErrShuttingDown = errors.New("worker is shutting down") ) @@ -274,7 +272,7 @@ func (w *worker) rhpScanHandler(jc jape.Context) { // scan host var errStr string - settings, priceTable, elapsed, err := w.scanHost(time.Duration(rsr.Timeout), rsr.HostKey, rsr.HostIP) + settings, priceTable, elapsed, err := w.scanHost(ctx, time.Duration(rsr.Timeout), rsr.HostKey, rsr.HostIP) if err != nil { errStr = err.Error() } @@ -1386,16 +1384,15 @@ func (w *worker) Shutdown(ctx context.Context) error { return nil } -func (w *worker) scanHost(timeout time.Duration, hostKey types.PublicKey, hostIP string) (rhpv2.HostSettings, rhpv3.HostPriceTable, time.Duration, error) { +func (w *worker) scanHost(ctx context.Context, timeout time.Duration, hostKey types.PublicKey, hostIP string) (rhpv2.HostSettings, rhpv3.HostPriceTable, time.Duration, error) { logger := w.logger.With("host", hostKey).With("hostIP", hostIP).With("timeout", timeout) // prepare a helper for scanning scan := func() (rhpv2.HostSettings, rhpv3.HostPriceTable, time.Duration, error) { // apply timeout - ctx := w.shutdownCtx var cancel context.CancelFunc if timeout > 0 { - ctx, cancel = context.WithTimeout(w.shutdownCtx, timeout) + ctx, cancel = context.WithTimeout(ctx, timeout) defer cancel() } // resolve hostIP. We don't want to scan hosts on private networks. @@ -1410,7 +1407,7 @@ func (w *worker) scanHost(timeout time.Duration, hostKey types.PublicKey, hostIP } for _, addr := range addrs { if isPrivateIP(addr.IP) { - return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, errHostOnPrivateNetwork + return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, api.ErrHostOnPrivateNetwork } } } @@ -1418,14 +1415,15 @@ func (w *worker) scanHost(timeout time.Duration, hostKey types.PublicKey, hostIP // fetch the host settings start := time.Now() var settings rhpv2.HostSettings - err := w.withTransportV2(ctx, hostKey, hostIP, func(t *rhpv2.Transport) (err error) { - if settings, err = RPCSettings(ctx, t); err == nil { - // NOTE: we overwrite the NetAddress with the host address here since we - // just used it to dial the host we know it's valid - settings.NetAddress = hostIP - return nil + err := w.withTransportV2(ctx, hostKey, hostIP, func(t *rhpv2.Transport) error { + var err error + if settings, err = RPCSettings(ctx, t); err != nil { + return fmt.Errorf("failed to fetch host settings: %w", err) } - return fmt.Errorf("failed to fetch host settings: %w", err) + // NOTE: we overwrite the NetAddress with the host address here + // since we just used it to dial the host we know it's valid + settings.NetAddress = hostIP + return nil }) elapsed := time.Since(start) if err != nil { @@ -1450,15 +1448,15 @@ func (w *worker) scanHost(timeout time.Duration, hostKey types.PublicKey, hostIP if err != nil { // scan: second try select { - case <-w.shutdownCtx.Done(): - return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, w.shutdownCtx.Err() + case <-ctx.Done(): + return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, ctx.Err() case <-time.After(time.Second): } settings, pt, duration, err = scan() logger = logger.With("elapsed", duration) if err == nil { - logger.Debugf("successfully scanned host on second try") + logger.Debug("successfully scanned host on second try") } else if !isErrHostUnreachable(err) { logger.Debugw("failed to scan host", zap.Error(err)) } @@ -1468,8 +1466,8 @@ func (w *worker) scanHost(timeout time.Duration, hostKey types.PublicKey, hostIP // just in case since recording a failed scan might have serious // repercussions select { - case <-w.shutdownCtx.Done(): - return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, w.shutdownCtx.Err() + case <-ctx.Done(): + return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, ctx.Err() default: } @@ -1506,24 +1504,14 @@ func discardTxnOnErr(ctx context.Context, bus Bus, l *zap.SugaredLogger, txn typ } func isErrHostUnreachable(err error) bool { - if isError(err, os.ErrDeadlineExceeded) { - return true - } else if isError(err, context.DeadlineExceeded) { - return true - } else if isError(err, errHostOnPrivateNetwork) { - return true - } else if isError(err, errors.New("no route to host")) { - return true - } else if isError(err, errors.New("no such host")) { - return true - } else if isError(err, errors.New("connection refused")) { - return true - } else if isError(err, errors.New("unknown port")) { - return true - } else if isError(err, errors.New("cannot assign requested address")) { - return true - } - return false + return isError(err, os.ErrDeadlineExceeded) || + isError(err, context.DeadlineExceeded) || + isError(err, api.ErrHostOnPrivateNetwork) || + isError(err, errors.New("no route to host")) || + isError(err, errors.New("no such host")) || + isError(err, errors.New("connection refused")) || + isError(err, errors.New("unknown port")) || + isError(err, errors.New("cannot assign requested address")) } func isErrDuplicateTransactionSet(err error) bool { From dd8cb8a2ccea8551ac1918377c224f89346f080d Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 6 Mar 2024 10:47:18 +0100 Subject: [PATCH 161/172] e2e: fix TestBlocklist --- worker/worker.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/worker/worker.go b/worker/worker.go index 2fe9d334a..fcf68387f 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -1390,9 +1390,10 @@ func (w *worker) scanHost(ctx context.Context, timeout time.Duration, hostKey ty // prepare a helper for scanning scan := func() (rhpv2.HostSettings, rhpv3.HostPriceTable, time.Duration, error) { // apply timeout + scanCtx := ctx var cancel context.CancelFunc if timeout > 0 { - ctx, cancel = context.WithTimeout(ctx, timeout) + scanCtx, cancel = context.WithTimeout(scanCtx, timeout) defer cancel() } // resolve hostIP. We don't want to scan hosts on private networks. @@ -1401,7 +1402,7 @@ func (w *worker) scanHost(ctx context.Context, timeout time.Duration, hostKey ty if err != nil { return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, err } - addrs, err := (&net.Resolver{}).LookupIPAddr(ctx, host) + addrs, err := (&net.Resolver{}).LookupIPAddr(scanCtx, host) if err != nil { return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, err } @@ -1415,9 +1416,9 @@ func (w *worker) scanHost(ctx context.Context, timeout time.Duration, hostKey ty // fetch the host settings start := time.Now() var settings rhpv2.HostSettings - err := w.withTransportV2(ctx, hostKey, hostIP, func(t *rhpv2.Transport) error { + err := w.withTransportV2(scanCtx, hostKey, hostIP, func(t *rhpv2.Transport) error { var err error - if settings, err = RPCSettings(ctx, t); err != nil { + if settings, err = RPCSettings(scanCtx, t); err != nil { return fmt.Errorf("failed to fetch host settings: %w", err) } // NOTE: we overwrite the NetAddress with the host address here @@ -1432,7 +1433,7 @@ func (w *worker) scanHost(ctx context.Context, timeout time.Duration, hostKey ty // fetch the host pricetable var pt rhpv3.HostPriceTable - err = w.transportPoolV3.withTransportV3(ctx, hostKey, settings.SiamuxAddr(), func(ctx context.Context, t *transportV3) error { + err = w.transportPoolV3.withTransportV3(scanCtx, hostKey, settings.SiamuxAddr(), func(ctx context.Context, t *transportV3) error { if hpt, err := RPCPriceTable(ctx, t, func(pt rhpv3.HostPriceTable) (rhpv3.PaymentMethod, error) { return nil, nil }); err != nil { return fmt.Errorf("failed to fetch host price table: %w", err) } else { From 31cad9f33846dfced016700f3166902fee068cb0 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 6 Mar 2024 11:13:29 +0100 Subject: [PATCH 162/172] stores: migration to update object health to 1 when size is 0 --- .../mysql/main/migration_00005_zero_size_object_health.sql | 1 + .../sqlite/main/migration_00005_zero_size_object_health.sql | 1 + 2 files changed, 2 insertions(+) create mode 100644 stores/migrations/mysql/main/migration_00005_zero_size_object_health.sql create mode 100644 stores/migrations/sqlite/main/migration_00005_zero_size_object_health.sql diff --git a/stores/migrations/mysql/main/migration_00005_zero_size_object_health.sql b/stores/migrations/mysql/main/migration_00005_zero_size_object_health.sql new file mode 100644 index 000000000..1a0799394 --- /dev/null +++ b/stores/migrations/mysql/main/migration_00005_zero_size_object_health.sql @@ -0,0 +1 @@ +UPDATE objects SET health = 1 WHERE size = 0; diff --git a/stores/migrations/sqlite/main/migration_00005_zero_size_object_health.sql b/stores/migrations/sqlite/main/migration_00005_zero_size_object_health.sql new file mode 100644 index 000000000..1a0799394 --- /dev/null +++ b/stores/migrations/sqlite/main/migration_00005_zero_size_object_health.sql @@ -0,0 +1 @@ +UPDATE objects SET health = 1 WHERE size = 0; From 1c496f7fa581770d4f4b6683ca5dbf41faa767fa Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 6 Mar 2024 11:15:55 +0100 Subject: [PATCH 163/172] stores: extend TestRefreshHealth --- stores/metadata_test.go | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 27aa26a13..f5461147c 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -3990,6 +3990,34 @@ func TestRefreshHealth(t *testing.T) { } else if health(o2) != .6 { t.Fatal("expected health to be .6, got", health(o2)) } + + // add another object that is empty + o3 := t.Name() + "3" + if added, err := ss.addTestObject(o3, object.Object{ + Key: object.GenerateEncryptionKey(), + }); err != nil { + t.Fatal(err) + } else if added.Health != 1 { + t.Fatal("expected health to be 1, got", added.Health) + } + + // update its health to .1 + if err := ss.db. + Model(&dbObject{}). + Where("object_id", o3). + Update("health", 0.1). + Error; err != nil { + t.Fatal(err) + } else if health(o3) != .1 { + t.Fatalf("expected health to be .1, got %v", health(o3)) + } + + // a refresh should not update its health + if err := ss.RefreshHealth(context.Background()); err != nil { + t.Fatal(err) + } else if health(o3) != .1 { + t.Fatalf("expected health to be .1, got %v", health(o3)) + } } func TestSlabCleanupTrigger(t *testing.T) { From e4e5a8eed6d2b17222aa28e9b442366840435dde Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 6 Mar 2024 11:17:34 +0100 Subject: [PATCH 164/172] stores: add migration to performMigrations --- stores/migrations.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/stores/migrations.go b/stores/migrations.go index d89be7ab5..cb0a38b18 100644 --- a/stores/migrations.go +++ b/stores/migrations.go @@ -50,6 +50,12 @@ func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { return performMigration(tx, dbIdentifier, "00004_prune_slabs_cascade", logger) }, }, + { + ID: "00005_zero_size_object_health", + Migrate: func(tx *gorm.DB) error { + return performMigration(tx, dbIdentifier, "00005_zero_size_object_health", logger) + }, + }, } // Create migrator. From 2a39168d9a637504378689381f6c6e10df823f2b Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Wed, 6 Mar 2024 11:24:15 +0100 Subject: [PATCH 165/172] fix typos --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index bcaca045a..a4ccc8681 100644 --- a/README.md +++ b/README.md @@ -214,14 +214,14 @@ setting does not have a default value, it can be updated using the settings API: In most cases the default set should match the set from your autopilot configuration in order for migrations to work properly. The contract set can be -overriden by passing it as a query string parameter to the worker's upload and +overridden by passing it as a query string parameter to the worker's upload and migrate endpoints. - `PUT /api/worker/objects/foo?contractset=foo` ### Redundancy -The default redundancy on mainnet is 30-10, on testnet it is 6-2. The redunancy +The default redundancy on mainnet is 30-10, on testnet it is 6-2. The redundancy can be updated using the settings API: - `GET /api/bus/setting/redundancy` From 362b45c46f1bbeed2884c4db00b0cb84898e2ae5 Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Wed, 6 Mar 2024 11:26:48 +0100 Subject: [PATCH 166/172] fix typo --- worker/rhpv3.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/worker/rhpv3.go b/worker/rhpv3.go index 45a3610e2..9c280f2bd 100644 --- a/worker/rhpv3.go +++ b/worker/rhpv3.go @@ -471,7 +471,7 @@ func (a *account) WithWithdrawal(ctx context.Context, amtFn func() (types.Curren func (a *accounts) deriveAccountKey(hostKey types.PublicKey) types.PrivateKey { index := byte(0) // not used yet but can be used to derive more than 1 account per host - // Append the the host for which to create it and the index to the + // Append the host for which to create it and the index to the // corresponding sub-key. subKey := a.key data := append(subKey, hostKey[:]...) From 1b0bd3bb02c9713174bf0814b7f3eee8499f2bc6 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 6 Mar 2024 14:47:04 +0100 Subject: [PATCH 167/172] e2e: TestHostScan --- api/host.go | 2 +- autopilot/contractor.go | 13 +++-- internal/test/e2e/cluster.go | 13 +++-- internal/test/e2e/cluster_test.go | 96 +++++++++++++++++++++++++++++++ worker/worker.go | 1 - 5 files changed, 113 insertions(+), 12 deletions(-) diff --git a/api/host.go b/api/host.go index 0ad52e8ef..aea80a9fe 100644 --- a/api/host.go +++ b/api/host.go @@ -112,6 +112,6 @@ func (opts HostsForScanningOptions) Apply(values url.Values) { values.Set("limit", fmt.Sprint(opts.Limit)) } if !opts.MaxLastScan.IsZero() { - values.Set("maxLastScan", fmt.Sprint(TimeRFC3339(opts.MaxLastScan))) + values.Set("lastScan", fmt.Sprint(TimeRFC3339(opts.MaxLastScan))) } } diff --git a/autopilot/contractor.go b/autopilot/contractor.go index 188b55661..4e5e8c842 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -752,11 +752,14 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts continue } - // if the host doesn't have a valid pricetable, update it - var invalidPT bool - if err := refreshPriceTable(ctx, w, &host.Host); err != nil { - c.logger.Errorf("could not fetch price table for host %v: %v", host.PublicKey, err) - invalidPT = true + // if the host doesn't have a valid pricetable, update it if we were + // able to obtain a revision + invalidPT := contract.Revision == nil + if contract.Revision != nil { + if err := refreshPriceTable(ctx, w, &host.Host); err != nil { + c.logger.Errorf("could not fetch price table for host %v: %v", host.PublicKey, err) + invalidPT = true + } } // refresh the consensus state diff --git a/internal/test/e2e/cluster.go b/internal/test/e2e/cluster.go index b9776d598..16b3acbfd 100644 --- a/internal/test/e2e/cluster.go +++ b/internal/test/e2e/cluster.go @@ -154,6 +154,7 @@ type testClusterOptions struct { logger *zap.Logger uploadPacking bool skipSettingAutopilot bool + skipRunningAutopilot bool walletKey *types.PrivateKey autopilotCfg *node.AutopilotConfig @@ -393,11 +394,13 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { _ = autopilotServer.Serve(autopilotListener) cluster.wg.Done() }() - cluster.wg.Add(1) - go func() { - _ = aStartFn() - cluster.wg.Done() - }() + if !opts.skipRunningAutopilot { + cluster.wg.Add(1) + go func() { + _ = aStartFn() + cluster.wg.Done() + }() + } // Set the test contract set to make sure we can add objects at the // beginning of a test right away. diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index c546937eb..f2ef53767 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -2448,3 +2448,99 @@ func TestMultipartUploadWrappedByPartialSlabs(t *testing.T) { t.Fatal("unexpected data") } } + +func TestHostScan(t *testing.T) { + // New cluster with autopilot disabled + cfg := clusterOptsDefault + cfg.skipRunningAutopilot = true + cluster := newTestCluster(t, cfg) + defer cluster.Shutdown() + + b := cluster.Bus + w := cluster.Worker + tt := cluster.tt + + // Add a host. + hosts := cluster.AddHosts(2) + host := hosts[0] + + settings, err := host.RHPv2Settings() + tt.OK(err) + + hk := host.PublicKey() + hostIP := settings.NetAddress + + assertHost := func(ls time.Time, lss, slss bool, ts uint64) { + t.Helper() + + hi, err := b.Host(context.Background(), host.PublicKey()) + tt.OK(err) + + if ls.IsZero() && !hi.Interactions.LastScan.IsZero() { + t.Fatal("expected last scan to be zero") + } else if !ls.IsZero() && !hi.Interactions.LastScan.After(ls) { + t.Fatal("expected last scan to be after", ls) + } else if hi.Interactions.LastScanSuccess != lss { + t.Fatalf("expected last scan success to be %v, got %v", lss, hi.Interactions.LastScanSuccess) + } else if hi.Interactions.SecondToLastScanSuccess != slss { + t.Fatalf("expected second to last scan success to be %v, got %v", slss, hi.Interactions.SecondToLastScanSuccess) + } else if hi.Interactions.TotalScans != ts { + t.Fatalf("expected total scans to be %v, got %v", ts, hi.Interactions.TotalScans) + } + } + + scanHost := func() error { + resp, err := w.RHPScan(context.Background(), hk, hostIP, 10*time.Second) + tt.OK(err) + if resp.ScanError != "" { + return errors.New(resp.ScanError) + } + return nil + } + + assertHost(time.Time{}, false, false, 0) + + // scan the host the first time + ls := time.Now() + if err := scanHost(); err != nil { + t.Fatal(err) + } + assertHost(ls, true, false, 1) + + // scan the host the second time + ls = time.Now() + if err := scanHost(); err != nil { + t.Fatal(err) + } + assertHost(ls, true, true, 2) + + // close the host to make scans fail + tt.OK(host.Close()) + + // scan the host a third time + ls = time.Now() + if err := scanHost(); err == nil { + t.Fatal("expected scan error") + } + assertHost(ls, false, true, 3) + + // fetch hosts for scanning with maxLastScan set to now which should return + // all hosts + toScan, err := b.HostsForScanning(context.Background(), api.HostsForScanningOptions{ + MaxLastScan: api.TimeRFC3339(time.Now()), + }) + tt.OK(err) + if len(toScan) != 2 { + t.Fatalf("expected 2 hosts, got %v", len(toScan)) + } + + // fetch hosts again with the unix epoch timestamp which should only return + // 1 host since that one hasn't been scanned yet + toScan, err = b.HostsForScanning(context.Background(), api.HostsForScanningOptions{ + MaxLastScan: api.TimeRFC3339(time.Unix(0, 1)), + }) + tt.OK(err) + if len(toScan) != 1 { + t.Fatalf("expected 1 hosts, got %v", len(toScan)) + } +} diff --git a/worker/worker.go b/worker/worker.go index fcf68387f..9f4ef90ed 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -1386,7 +1386,6 @@ func (w *worker) Shutdown(ctx context.Context) error { func (w *worker) scanHost(ctx context.Context, timeout time.Duration, hostKey types.PublicKey, hostIP string) (rhpv2.HostSettings, rhpv3.HostPriceTable, time.Duration, error) { logger := w.logger.With("host", hostKey).With("hostIP", hostIP).With("timeout", timeout) - // prepare a helper for scanning scan := func() (rhpv2.HostSettings, rhpv3.HostPriceTable, time.Duration, error) { // apply timeout From ee4d925a3055219a5850eadd4c3cdae080a4cd75 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 6 Mar 2024 15:19:34 +0100 Subject: [PATCH 168/172] e2e: update comment in TestHostScan --- internal/test/e2e/cluster_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index f2ef53767..25426848b 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -2460,7 +2460,8 @@ func TestHostScan(t *testing.T) { w := cluster.Worker tt := cluster.tt - // Add a host. + // add 2 hosts to the cluster, 1 to scan and 1 to make sure we always have 1 + // peer and consider ourselves connected to the internet hosts := cluster.AddHosts(2) host := hosts[0] From 78769d232dcfaa790662b0af1110b8471875ee44 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 6 Mar 2024 15:35:12 +0100 Subject: [PATCH 169/172] e2e: run check in retry --- internal/test/e2e/cluster_test.go | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 25426848b..9f9699b7d 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -2527,17 +2527,20 @@ func TestHostScan(t *testing.T) { // fetch hosts for scanning with maxLastScan set to now which should return // all hosts - toScan, err := b.HostsForScanning(context.Background(), api.HostsForScanningOptions{ - MaxLastScan: api.TimeRFC3339(time.Now()), + tt.Retry(100, 100*time.Millisecond, func() error { + toScan, err := b.HostsForScanning(context.Background(), api.HostsForScanningOptions{ + MaxLastScan: api.TimeRFC3339(time.Now()), + }) + tt.OK(err) + if len(toScan) != 2 { + return fmt.Errorf("expected 2 hosts, got %v", len(toScan)) + } + return nil }) - tt.OK(err) - if len(toScan) != 2 { - t.Fatalf("expected 2 hosts, got %v", len(toScan)) - } // fetch hosts again with the unix epoch timestamp which should only return // 1 host since that one hasn't been scanned yet - toScan, err = b.HostsForScanning(context.Background(), api.HostsForScanningOptions{ + toScan, err := b.HostsForScanning(context.Background(), api.HostsForScanningOptions{ MaxLastScan: api.TimeRFC3339(time.Unix(0, 1)), }) tt.OK(err) From e4ab18af4f3f55ff5d32e3e9bbfff02d48a8ed63 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 6 Mar 2024 16:00:55 +0100 Subject: [PATCH 170/172] e2e: sleep before scan --- internal/test/e2e/cluster_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 9f9699b7d..4c96cc989 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -2491,6 +2491,10 @@ func TestHostScan(t *testing.T) { } scanHost := func() error { + // timing on the CI can be weird, wait a bit to make sure time passes + // between scans + time.Sleep(time.Millisecond) + resp, err := w.RHPScan(context.Background(), hk, hostIP, 10*time.Second) tt.OK(err) if resp.ScanError != "" { From e9ba938876e020ad5094cd51ae09ba80356a9f40 Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 6 Mar 2024 16:56:23 +0100 Subject: [PATCH 171/172] worker: implement CR remarks --- api/object.go | 49 ++------------------------------------ worker/client/client.go | 52 ++++++++++++++++++++++++++++++++++------- worker/worker.go | 3 ++- 3 files changed, 48 insertions(+), 56 deletions(-) diff --git a/api/object.go b/api/object.go index f53dbb5ac..4b1993341 100644 --- a/api/object.go +++ b/api/object.go @@ -85,13 +85,8 @@ type ( // GetObjectResponse is the response type for the GET /worker/object endpoint. GetObjectResponse struct { - Content io.ReadCloser `json:"content"` - ContentType string `json:"contentType"` - LastModified string `json:"lastModified"` - Range *DownloadRange `json:"range,omitempty"` - Size int64 `json:"size"` - Metadata ObjectUserMetadata `json:"metadata"` - // NOTE: keep HeadObjectResponse in sync with this type + Content io.ReadCloser `json:"content"` + HeadObjectResponse } // HeadObjectResponse is the response type for the HEAD /worker/object endpoint. @@ -145,46 +140,6 @@ type ( } ) -func ParseObjectHeadResponseFrom(header http.Header) (HeadObjectResponse, error) { - // parse size - var size int64 - _, err := fmt.Sscan(header.Get("Content-Length"), &size) - if err != nil { - return HeadObjectResponse{}, err - } - - // parse range - var r *DownloadRange - if cr := header.Get("Content-Range"); cr != "" { - dr, err := ParseDownloadRange(cr) - if err != nil { - return HeadObjectResponse{}, err - } - r = &dr - - // if a range is set, the size is the size extracted from the range - // since Content-Length will then only be the length of the returned - // range. - size = dr.Size - } - - // parse headers - headers := make(map[string]string) - for k, v := range header { - if len(v) > 0 { - headers[k] = v[0] - } - } - - return HeadObjectResponse{ - ContentType: header.Get("Content-Type"), - LastModified: header.Get("Last-Modified"), - Range: r, - Size: size, - Metadata: ExtractObjectUserMetadataFrom(headers), - }, nil -} - func ExtractObjectUserMetadataFrom(metadata map[string]string) ObjectUserMetadata { oum := make(map[string]string) for k, v := range metadata { diff --git a/worker/client/client.go b/worker/client/client.go index ff8625541..410e4c66e 100644 --- a/worker/client/client.go +++ b/worker/client/client.go @@ -107,7 +107,7 @@ func (c *Client) HeadObject(ctx context.Context, bucket, path string, opts api.H return nil, errors.New(string(err)) } - head, err := api.ParseObjectHeadResponseFrom(resp.Header) + head, err := parseObjectResponseHeaders(resp.Header) if err != nil { return nil, err } @@ -132,18 +132,14 @@ func (c *Client) GetObject(ctx context.Context, bucket, path string, opts api.Do } }() - head, err := api.ParseObjectHeadResponseFrom(header) + head, err := parseObjectResponseHeaders(header) if err != nil { return nil, err } return &api.GetObjectResponse{ - Content: body, - ContentType: head.ContentType, - LastModified: head.LastModified, - Range: head.Range, - Size: head.Size, - Metadata: head.Metadata, + Content: body, + HeadObjectResponse: head, }, nil } @@ -296,6 +292,46 @@ func (c *Client) object(ctx context.Context, bucket, path string, opts api.Downl return resp.Body, resp.Header, err } +func parseObjectResponseHeaders(header http.Header) (api.HeadObjectResponse, error) { + // parse size + var size int64 + _, err := fmt.Sscan(header.Get("Content-Length"), &size) + if err != nil { + return api.HeadObjectResponse{}, err + } + + // parse range + var r *api.DownloadRange + if cr := header.Get("Content-Range"); cr != "" { + dr, err := api.ParseDownloadRange(cr) + if err != nil { + return api.HeadObjectResponse{}, err + } + r = &dr + + // if a range is set, the size is the size extracted from the range + // since Content-Length will then only be the length of the returned + // range. + size = dr.Size + } + + // parse headers + headers := make(map[string]string) + for k, v := range header { + if len(v) > 0 { + headers[k] = v[0] + } + } + + return api.HeadObjectResponse{ + ContentType: header.Get("Content-Type"), + LastModified: header.Get("Last-Modified"), + Range: r, + Size: size, + Metadata: api.ExtractObjectUserMetadataFrom(headers), + }, nil +} + func sizeFromSeeker(r io.Reader) (int64, error) { s, ok := r.(io.Seeker) if !ok { diff --git a/worker/worker.go b/worker/worker.go index 729c544c7..498269bf4 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -889,7 +889,8 @@ func (w *worker) objectsHandlerHEAD(jc jape.Context) { return } - // serve the content + // serve the content to ensure we're setting the exact same headers as we + // would for a GET request status, err := serveContent(jc.ResponseWriter, jc.Request, *res.Object, func(io.Writer, int64, int64) error { return nil }) if errors.Is(err, http_range.ErrInvalid) || errors.Is(err, errMultiRangeNotSupported) { jc.Error(err, http.StatusBadRequest) From 968dc1c3626f30f601a37b58a3b6afba89ded88a Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 7 Mar 2024 13:19:19 +0100 Subject: [PATCH 172/172] e2e: gofmt --- internal/test/e2e/cluster_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 7fe59dbb0..5ca7141d5 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -2488,7 +2488,7 @@ func TestWalletRedistribute(t *testing.T) { _, err = cluster.Bus.WalletRedistribute(context.Background(), 3, types.Siacoins(10)) if err != nil { t.Fatal(err) - } + } } func TestHostScan(t *testing.T) {