diff --git a/CHANGELOG.md b/CHANGELOG.md index 1a62354e2..12771a888 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,12 +24,32 @@ The following emojis are used to highlight certain changes: ### Security +## [v0.22.0] + +### Changed + +- `go-libp2p` dependency updated to [v0.36 (release notes)](https://github.com/libp2p/go-libp2p/releases/tag/v0.36.1) +- `bitswap/server` minor memory use and performance improvements +- `bitswap` unify logger names to use uniform format bitswap/path/pkgname +- `gateway` now always returns meaningful cache-control headers for generated HTML listings of UnixFS directories +- generate random test data using `ipfs/go-test` instead of internal util code + +### Removed + +- `util` logic for generating random test data moved to [`ipfs/go-test/random`](https://github.com/ipfs/go-test) + +### Fixed + +- `boxo/gateway` now correctly returns 404 Status Not Found instead of 500 when the requested content cannot be found due to offline exchange, gateway running in no-fetch (non-recursive) mode, or a similar restriction that only serves a specific set of CIDs. +- `bitswap/client` fix memory leak in BlockPresenceManager due to unlimited map growth. +- `bitswap/network` fixed race condition when a timeout occurred before hole punching completed while establishing a first-time stream to a peer behind a NAT + ## [v0.21.0] ### Changed - `boxo/gateway` is now tested against [gateway-conformance v6](https://github.com/ipfs/gateway-conformance/releases/tag/v0.6.0) -- `bitswap/client` supports additional tracing +- `bitswap/client` supports additional tracing ### Removed @@ -39,6 +59,7 @@ The following emojis are used to highlight certain changes: - `routing/http`: the `FindPeer` now returns `routing.ErrNotFound` when no addresses are found - `routing/http`: the `FindProvidersAsync` no longer causes a goroutine buildup +- `bitswap`: wantlist overflow handling now cancels existing entries to make room for newer entries. This fix prevents the wantlist from filling up with CIDs that the server does not have. ## [v0.20.0] diff --git a/README.md b/README.md index bd824f512..ed8ac109f 100644 --- a/README.md +++ b/README.md @@ -85,7 +85,7 @@ Boxo includes high-quality components useful for interacting with IPFS protocols - Interacting with public and private IPFS networks - Working with content-addressed data -Boxo aims to provide a cohesive interface into these components. Note that not all of the underlying components necessarily reside in this respository. +Boxo aims to provide a cohesive interface into these components. Note that not all of the underlying components necessarily reside in this repository. ### Does Boxo == IPFS? diff --git a/bitswap/benchmarks_test.go b/bitswap/benchmarks_test.go index 80eb373ab..bd8f342ea 100644 --- a/bitswap/benchmarks_test.go +++ b/bitswap/benchmarks_test.go @@ -12,8 +12,8 @@ import ( "testing" "time" - "github.com/ipfs/boxo/bitswap/internal/testutil" blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-test/random" protocol "github.com/libp2p/go-libp2p/core/protocol" "github.com/ipfs/boxo/bitswap" @@ -169,8 +169,8 @@ func BenchmarkFetchFromOldBitswap(b *testing.B) { testinstance.ConnectInstances(instances) // Generate blocks, with a smaller root block - rootBlock := testutil.GenerateBlocksOfSize(1, rootBlockSize) - blocks := testutil.GenerateBlocksOfSize(bch.blockCount, stdBlockSize) + rootBlock := random.BlocksOfSize(1, rootBlockSize) + blocks := random.BlocksOfSize(bch.blockCount, stdBlockSize) blocks[0] = rootBlock[0] // Run the distribution @@ -300,7 +300,7 @@ func BenchmarkDatacenterMultiLeechMultiSeed(b *testing.B) { defer ig.Close() instances := ig.Instances(numnodes) - blocks := testutil.GenerateBlocksOfSize(numblks, blockSize) + blocks := random.BlocksOfSize(numblks, int(blockSize)) runDistributionMulti(b, instances[:3], instances[3:], blocks, bstoreLatency, df, ff) } }) @@ -317,8 +317,8 @@ func subtestDistributeAndFetch(b *testing.B, numnodes, numblks int, d delay.D, b ig := testinstance.NewTestInstanceGenerator(net, nil, nil) instances := ig.Instances(numnodes) - rootBlock := testutil.GenerateBlocksOfSize(1, rootBlockSize) - blocks := testutil.GenerateBlocksOfSize(numblks, stdBlockSize) + rootBlock := random.BlocksOfSize(1, rootBlockSize) + blocks := random.BlocksOfSize(numblks, stdBlockSize) blocks[0] = rootBlock[0] runDistribution(b, instances, blocks, bstoreLatency, df, ff) ig.Close() @@ -333,8 +333,8 @@ func subtestDistributeAndFetchRateLimited(b *testing.B, numnodes, numblks int, d defer ig.Close() instances := ig.Instances(numnodes) - rootBlock := testutil.GenerateBlocksOfSize(1, rootBlockSize) - blocks := testutil.GenerateBlocksOfSize(numblks, blockSize) + rootBlock := random.BlocksOfSize(1, rootBlockSize) + blocks := random.BlocksOfSize(numblks, int(blockSize)) blocks[0] = rootBlock[0] runDistribution(b, instances, blocks, bstoreLatency, df, ff) } diff --git a/bitswap/client/client.go b/bitswap/client/client.go index aa9ab78fa..0a5bdeb9e 100644 --- a/bitswap/client/client.go +++ b/bitswap/client/client.go @@ -38,7 +38,7 @@ import ( "go.opentelemetry.io/otel/trace" ) -var log = logging.Logger("bitswap-client") +var log = logging.Logger("bitswap/client") // Option defines the functional option type that can be used to configure // bitswap instances diff --git a/bitswap/client/internal/blockpresencemanager/blockpresencemanager.go b/bitswap/client/internal/blockpresencemanager/blockpresencemanager.go index 1b76acc5b..685981381 100644 --- a/bitswap/client/internal/blockpresencemanager/blockpresencemanager.go +++ b/bitswap/client/internal/blockpresencemanager/blockpresencemanager.go @@ -15,9 +15,7 @@ type BlockPresenceManager struct { } func New() *BlockPresenceManager { - return &BlockPresenceManager{ - presence: make(map[cid.Cid]map[peer.ID]bool), - } + return &BlockPresenceManager{} } // ReceiveFrom is called when a peer sends us information about which blocks @@ -26,6 +24,10 @@ func (bpm *BlockPresenceManager) ReceiveFrom(p peer.ID, haves []cid.Cid, dontHav bpm.Lock() defer bpm.Unlock() + if bpm.presence == nil { + bpm.presence = make(map[cid.Cid]map[peer.ID]bool) + } + for _, c := range haves { bpm.updateBlockPresence(p, c, true) } @@ -75,6 +77,10 @@ func (bpm *BlockPresenceManager) AllPeersDoNotHaveBlock(peers []peer.ID, ks []ci bpm.RLock() defer bpm.RUnlock() + if len(bpm.presence) == 0 { + return nil + } + var res []cid.Cid for _, c := range ks { if bpm.allDontHave(peers, c) { @@ -90,6 +96,9 @@ func (bpm *BlockPresenceManager) allDontHave(peers []peer.ID, c cid.Cid) bool { if !cok { return false } + if len(ps) == 0 { + return false + } // Check if we explicitly know that all the given peers do not have the cid for _, p := range peers { @@ -108,6 +117,25 @@ func (bpm *BlockPresenceManager) RemoveKeys(ks []cid.Cid) { for _, c := range ks { delete(bpm.presence, c) } + if len(bpm.presence) == 0 { + bpm.presence = nil + } +} + +// RemovePeer removes the given peer from every cid key in the presence map. +func (bpm *BlockPresenceManager) RemovePeer(p peer.ID) { + bpm.Lock() + defer bpm.Unlock() + + for c, pm := range bpm.presence { + delete(pm, p) + if len(pm) == 0 { + delete(bpm.presence, c) + } + } + if len(bpm.presence) == 0 { + bpm.presence = nil + } } // HasKey indicates whether the BlockPresenceManager is tracking the given key diff --git a/bitswap/client/internal/blockpresencemanager/blockpresencemanager_test.go b/bitswap/client/internal/blockpresencemanager/blockpresencemanager_test.go index b977c28ff..bde71676a 100644 --- a/bitswap/client/internal/blockpresencemanager/blockpresencemanager_test.go +++ b/bitswap/client/internal/blockpresencemanager/blockpresencemanager_test.go @@ -3,9 +3,10 @@ package blockpresencemanager import ( "testing" - "github.com/ipfs/boxo/bitswap/internal/testutil" cid "github.com/ipfs/go-cid" + "github.com/ipfs/go-test/random" peer "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" ) const ( @@ -18,90 +19,72 @@ const ( func TestBlockPresenceManager(t *testing.T) { bpm := New() - p := testutil.GeneratePeers(1)[0] - cids := testutil.GenerateCids(2) + p := random.Peers(1)[0] + cids := random.Cids(2) c0 := cids[0] c1 := cids[1] // Nothing stored yet, both PeerHasBlock and PeerDoesNotHaveBlock should // return false - if bpm.PeerHasBlock(p, c0) { - t.Fatal(expHasFalseMsg) - } - if bpm.PeerDoesNotHaveBlock(p, c0) { - t.Fatal(expDoesNotHaveFalseMsg) - } + require.False(t, bpm.PeerHasBlock(p, c0), expHasFalseMsg) + require.False(t, bpm.PeerDoesNotHaveBlock(p, c0), expDoesNotHaveFalseMsg) // HAVE cid0 / DONT_HAVE cid1 bpm.ReceiveFrom(p, []cid.Cid{c0}, []cid.Cid{c1}) // Peer has received HAVE for cid0 - if !bpm.PeerHasBlock(p, c0) { - t.Fatal(expHasTrueMsg) - } - if bpm.PeerDoesNotHaveBlock(p, c0) { - t.Fatal(expDoesNotHaveFalseMsg) - } + require.True(t, bpm.PeerHasBlock(p, c0), expHasTrueMsg) + require.False(t, bpm.PeerDoesNotHaveBlock(p, c0), expDoesNotHaveFalseMsg) // Peer has received DONT_HAVE for cid1 - if !bpm.PeerDoesNotHaveBlock(p, c1) { - t.Fatal(expDoesNotHaveTrueMsg) - } - if bpm.PeerHasBlock(p, c1) { - t.Fatal(expHasFalseMsg) - } + require.True(t, bpm.PeerDoesNotHaveBlock(p, c1), expDoesNotHaveTrueMsg) + require.False(t, bpm.PeerHasBlock(p, c1), expHasFalseMsg) // HAVE cid1 / DONT_HAVE cid0 bpm.ReceiveFrom(p, []cid.Cid{c1}, []cid.Cid{c0}) // DONT_HAVE cid0 should NOT over-write earlier HAVE cid0 - if bpm.PeerDoesNotHaveBlock(p, c0) { - t.Fatal(expDoesNotHaveFalseMsg) - } - if !bpm.PeerHasBlock(p, c0) { - t.Fatal(expHasTrueMsg) - } + require.False(t, bpm.PeerDoesNotHaveBlock(p, c0), expDoesNotHaveFalseMsg) + require.True(t, bpm.PeerHasBlock(p, c0), expHasTrueMsg) // HAVE cid1 should over-write earlier DONT_HAVE cid1 - if !bpm.PeerHasBlock(p, c1) { - t.Fatal(expHasTrueMsg) - } - if bpm.PeerDoesNotHaveBlock(p, c1) { - t.Fatal(expDoesNotHaveFalseMsg) - } + require.True(t, bpm.PeerHasBlock(p, c1), expHasTrueMsg) + require.False(t, bpm.PeerDoesNotHaveBlock(p, c1), expDoesNotHaveFalseMsg) // Remove cid0 bpm.RemoveKeys([]cid.Cid{c0}) // Nothing stored, both PeerHasBlock and PeerDoesNotHaveBlock should // return false - if bpm.PeerHasBlock(p, c0) { - t.Fatal(expHasFalseMsg) - } - if bpm.PeerDoesNotHaveBlock(p, c0) { - t.Fatal(expDoesNotHaveFalseMsg) - } + require.False(t, bpm.PeerHasBlock(p, c0), expHasFalseMsg) + require.False(t, bpm.PeerDoesNotHaveBlock(p, c0), expDoesNotHaveFalseMsg) // Remove cid1 bpm.RemoveKeys([]cid.Cid{c1}) // Nothing stored, both PeerHasBlock and PeerDoesNotHaveBlock should // return false - if bpm.PeerHasBlock(p, c1) { - t.Fatal(expHasFalseMsg) - } - if bpm.PeerDoesNotHaveBlock(p, c1) { - t.Fatal(expDoesNotHaveFalseMsg) - } + require.False(t, bpm.PeerHasBlock(p, c1), expHasFalseMsg) + require.False(t, bpm.PeerDoesNotHaveBlock(p, c1), expDoesNotHaveFalseMsg) + + bpm.ReceiveFrom(p, []cid.Cid{c0}, []cid.Cid{c1}) + require.True(t, bpm.PeerHasBlock(p, c0), expHasTrueMsg) + require.True(t, bpm.PeerDoesNotHaveBlock(p, c1), expDoesNotHaveTrueMsg) + + bpm.RemovePeer(p) + require.False(t, bpm.PeerHasBlock(p, c0), expHasFalseMsg) + require.False(t, bpm.PeerDoesNotHaveBlock(p, c0), expDoesNotHaveFalseMsg) + require.False(t, bpm.PeerHasBlock(p, c1), expHasFalseMsg) + require.False(t, bpm.PeerDoesNotHaveBlock(p, c1), expDoesNotHaveFalseMsg) } func TestAddRemoveMulti(t *testing.T) { bpm := New() - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) p0 := peers[0] p1 := peers[1] - cids := testutil.GenerateCids(3) + cids := random.Cids(3) c0 := cids[0] c1 := cids[1] c2 := cids[2] @@ -115,78 +98,46 @@ func TestAddRemoveMulti(t *testing.T) { // - HAVE cid0 // - HAVE cid1 // - DONT_HAVE cid2 - if !bpm.PeerHasBlock(p0, c0) { - t.Fatal(expHasTrueMsg) - } - if !bpm.PeerHasBlock(p0, c1) { - t.Fatal(expHasTrueMsg) - } - if !bpm.PeerDoesNotHaveBlock(p0, c2) { - t.Fatal(expDoesNotHaveTrueMsg) - } + require.True(t, bpm.PeerHasBlock(p0, c0), expHasTrueMsg) + require.True(t, bpm.PeerHasBlock(p0, c1), expHasTrueMsg) + require.True(t, bpm.PeerDoesNotHaveBlock(p0, c2), expDoesNotHaveTrueMsg) // Peer 1 should end up with // - HAVE cid1 // - HAVE cid2 // - DONT_HAVE cid0 - if !bpm.PeerHasBlock(p1, c1) { - t.Fatal(expHasTrueMsg) - } - if !bpm.PeerHasBlock(p1, c2) { - t.Fatal(expHasTrueMsg) - } - if !bpm.PeerDoesNotHaveBlock(p1, c0) { - t.Fatal(expDoesNotHaveTrueMsg) - } + require.True(t, bpm.PeerHasBlock(p1, c1), expHasTrueMsg) + require.True(t, bpm.PeerHasBlock(p1, c2), expHasTrueMsg) + require.True(t, bpm.PeerDoesNotHaveBlock(p1, c0), expDoesNotHaveTrueMsg) // Remove cid1 and cid2. Should end up with // Peer 0: HAVE cid0 // Peer 1: DONT_HAVE cid0 bpm.RemoveKeys([]cid.Cid{c1, c2}) - if !bpm.PeerHasBlock(p0, c0) { - t.Fatal(expHasTrueMsg) - } - if !bpm.PeerDoesNotHaveBlock(p1, c0) { - t.Fatal(expDoesNotHaveTrueMsg) - } + require.True(t, bpm.PeerHasBlock(p0, c0), expHasTrueMsg) + require.True(t, bpm.PeerDoesNotHaveBlock(p1, c0), expDoesNotHaveTrueMsg) // The other keys should have been cleared, so both HasBlock() and // DoesNotHaveBlock() should return false - if bpm.PeerHasBlock(p0, c1) { - t.Fatal(expHasFalseMsg) - } - if bpm.PeerDoesNotHaveBlock(p0, c1) { - t.Fatal(expDoesNotHaveFalseMsg) - } - if bpm.PeerHasBlock(p0, c2) { - t.Fatal(expHasFalseMsg) - } - if bpm.PeerDoesNotHaveBlock(p0, c2) { - t.Fatal(expDoesNotHaveFalseMsg) - } - if bpm.PeerHasBlock(p1, c1) { - t.Fatal(expHasFalseMsg) - } - if bpm.PeerDoesNotHaveBlock(p1, c1) { - t.Fatal(expDoesNotHaveFalseMsg) - } - if bpm.PeerHasBlock(p1, c2) { - t.Fatal(expHasFalseMsg) - } - if bpm.PeerDoesNotHaveBlock(p1, c2) { - t.Fatal(expDoesNotHaveFalseMsg) - } + require.False(t, bpm.PeerHasBlock(p0, c1), expHasFalseMsg) + require.False(t, bpm.PeerDoesNotHaveBlock(p0, c1), expDoesNotHaveFalseMsg) + require.False(t, bpm.PeerHasBlock(p0, c2), expHasFalseMsg) + require.False(t, bpm.PeerDoesNotHaveBlock(p0, c2), expDoesNotHaveFalseMsg) + require.False(t, bpm.PeerHasBlock(p1, c1), expHasFalseMsg) + require.False(t, bpm.PeerDoesNotHaveBlock(p1, c1), expDoesNotHaveFalseMsg) + require.False(t, bpm.PeerHasBlock(p1, c2), expHasFalseMsg) + require.False(t, bpm.PeerDoesNotHaveBlock(p1, c2), expDoesNotHaveFalseMsg) } func TestAllPeersDoNotHaveBlock(t *testing.T) { bpm := New() - peers := testutil.GeneratePeers(3) + peers := random.Peers(3) p0 := peers[0] p1 := peers[1] p2 := peers[2] - cids := testutil.GenerateCids(3) + cids := random.Cids(3) c0 := cids[0] c1 := cids[1] c2 := cids[2] @@ -227,11 +178,7 @@ func TestAllPeersDoNotHaveBlock(t *testing.T) { } for i, tc := range testcases { - if !testutil.MatchKeysIgnoreOrder( - bpm.AllPeersDoNotHaveBlock(tc.peers, tc.ks), - tc.exp, - ) { - t.Fatalf("test case %d failed: expected matching keys", i) - } + require.ElementsMatchf(t, bpm.AllPeersDoNotHaveBlock(tc.peers, tc.ks), tc.exp, + "test case %d failed: expected matching keys", i) } } diff --git a/bitswap/client/internal/getter/getter.go b/bitswap/client/internal/getter/getter.go index 822d319b7..c03b2aecc 100644 --- a/bitswap/client/internal/getter/getter.go +++ b/bitswap/client/internal/getter/getter.go @@ -13,7 +13,7 @@ import ( ipld "github.com/ipfs/go-ipld-format" ) -var log = logging.Logger("bitswap") +var log = logging.Logger("bitswap/client/getter") // GetBlocksFunc is any function that can take an array of CIDs and return a // channel of incoming blocks. diff --git a/bitswap/client/internal/messagequeue/donthavetimeoutmgr_test.go b/bitswap/client/internal/messagequeue/donthavetimeoutmgr_test.go index 038213c25..ee478e605 100644 --- a/bitswap/client/internal/messagequeue/donthavetimeoutmgr_test.go +++ b/bitswap/client/internal/messagequeue/donthavetimeoutmgr_test.go @@ -8,8 +8,8 @@ import ( "time" "github.com/benbjohnson/clock" - "github.com/ipfs/boxo/bitswap/internal/testutil" cid "github.com/ipfs/go-cid" + "github.com/ipfs/go-test/random" "github.com/libp2p/go-libp2p/p2p/protocol/ping" ) @@ -73,8 +73,8 @@ func (tr *timeoutRecorder) clear() { } func TestDontHaveTimeoutMgrTimeout(t *testing.T) { - firstks := testutil.GenerateCids(2) - secondks := append(firstks, testutil.GenerateCids(3)...) + firstks := random.Cids(2) + secondks := append(firstks, random.Cids(3)...) latency := time.Millisecond * 20 latMultiplier := 2 expProcessTime := 5 * time.Millisecond @@ -129,7 +129,7 @@ func TestDontHaveTimeoutMgrTimeout(t *testing.T) { } func TestDontHaveTimeoutMgrCancel(t *testing.T) { - ks := testutil.GenerateCids(3) + ks := random.Cids(3) latency := time.Millisecond * 10 latMultiplier := 1 expProcessTime := time.Duration(0) @@ -165,7 +165,7 @@ func TestDontHaveTimeoutMgrCancel(t *testing.T) { } func TestDontHaveTimeoutWantCancelWant(t *testing.T) { - ks := testutil.GenerateCids(3) + ks := random.Cids(3) latency := time.Millisecond * 20 latMultiplier := 1 expProcessTime := time.Duration(0) @@ -218,7 +218,7 @@ func TestDontHaveTimeoutWantCancelWant(t *testing.T) { } func TestDontHaveTimeoutRepeatedAddPending(t *testing.T) { - ks := testutil.GenerateCids(10) + ks := random.Cids(10) latency := time.Millisecond * 5 latMultiplier := 1 expProcessTime := time.Duration(0) @@ -251,7 +251,7 @@ func TestDontHaveTimeoutRepeatedAddPending(t *testing.T) { } func TestDontHaveTimeoutMgrMessageLatency(t *testing.T) { - ks := testutil.GenerateCids(2) + ks := random.Cids(2) latency := time.Millisecond * 40 latMultiplier := 1 expProcessTime := time.Duration(0) @@ -300,7 +300,7 @@ func TestDontHaveTimeoutMgrMessageLatency(t *testing.T) { } func TestDontHaveTimeoutMgrMessageLatencyMax(t *testing.T) { - ks := testutil.GenerateCids(2) + ks := random.Cids(2) clock := clock.NewMock() pinged := make(chan struct{}) pc := &mockPeerConn{latency: time.Second, clock: clock, pinged: pinged} @@ -333,7 +333,7 @@ func TestDontHaveTimeoutMgrMessageLatencyMax(t *testing.T) { } func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfPingError(t *testing.T) { - ks := testutil.GenerateCids(2) + ks := random.Cids(2) latency := time.Millisecond * 1 latMultiplier := 2 expProcessTime := 2 * time.Millisecond @@ -374,7 +374,7 @@ func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfPingError(t *testing.T) { } func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfLatencyLonger(t *testing.T) { - ks := testutil.GenerateCids(2) + ks := random.Cids(2) latency := time.Millisecond * 200 latMultiplier := 1 expProcessTime := time.Duration(0) @@ -414,7 +414,7 @@ func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfLatencyLonger(t *testing.T) { } func TestDontHaveTimeoutNoTimeoutAfterShutdown(t *testing.T) { - ks := testutil.GenerateCids(2) + ks := random.Cids(2) latency := time.Millisecond * 10 latMultiplier := 1 expProcessTime := time.Duration(0) diff --git a/bitswap/client/internal/messagequeue/messagequeue.go b/bitswap/client/internal/messagequeue/messagequeue.go index 4f90f239b..fac72f7cd 100644 --- a/bitswap/client/internal/messagequeue/messagequeue.go +++ b/bitswap/client/internal/messagequeue/messagequeue.go @@ -19,7 +19,7 @@ import ( ) var ( - log = logging.Logger("bitswap") + log = logging.Logger("bitswap/client/msgq") sflog = log.Desugar() ) diff --git a/bitswap/client/internal/messagequeue/messagequeue_test.go b/bitswap/client/internal/messagequeue/messagequeue_test.go index 4d361c5d5..3a9c21309 100644 --- a/bitswap/client/internal/messagequeue/messagequeue_test.go +++ b/bitswap/client/internal/messagequeue/messagequeue_test.go @@ -10,16 +10,17 @@ import ( "time" "github.com/benbjohnson/clock" - "github.com/ipfs/boxo/bitswap/internal/testutil" bsmsg "github.com/ipfs/boxo/bitswap/message" pb "github.com/ipfs/boxo/bitswap/message/pb" bsnet "github.com/ipfs/boxo/bitswap/network" - "github.com/ipfs/boxo/internal/test" cid "github.com/ipfs/go-cid" + "github.com/ipfs/go-test/random" peer "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/p2p/protocol/ping" ) +const collectTimeout = 200 * time.Millisecond + type fakeMessageNetwork struct { connectError error messageSenderError error @@ -166,13 +167,13 @@ func TestStartupAndShutdown(t *testing.T) { resetChan := make(chan struct{}, 1) fakeSender := newFakeMessageSender(resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] + peerID := random.Peers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) - bcstwh := testutil.GenerateCids(10) + bcstwh := random.Cids(10) messageQueue.Startup() messageQueue.AddBroadcastWantHaves(bcstwh) - messages := collectMessages(ctx, t, messagesSent, 100*time.Millisecond) + messages := collectMessages(ctx, t, messagesSent, collectTimeout) if len(messages) != 1 { t.Fatal("wrong number of messages were sent for broadcast want-haves") } @@ -204,15 +205,15 @@ func TestSendingMessagesDeduped(t *testing.T) { resetChan := make(chan struct{}, 1) fakeSender := newFakeMessageSender(resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] + peerID := random.Peers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) - wantHaves := testutil.GenerateCids(10) - wantBlocks := testutil.GenerateCids(10) + wantHaves := random.Cids(10) + wantBlocks := random.Cids(10) messageQueue.Startup() messageQueue.AddWants(wantBlocks, wantHaves) messageQueue.AddWants(wantBlocks, wantHaves) - messages := collectMessages(ctx, t, messagesSent, 100*time.Millisecond) + messages := collectMessages(ctx, t, messagesSent, collectTimeout) if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks) { t.Fatal("Messages were not deduped") @@ -220,22 +221,20 @@ func TestSendingMessagesDeduped(t *testing.T) { } func TestSendingMessagesPartialDupe(t *testing.T) { - test.Flaky(t) - ctx := context.Background() messagesSent := make(chan []bsmsg.Entry) resetChan := make(chan struct{}, 1) fakeSender := newFakeMessageSender(resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] + peerID := random.Peers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) - wantHaves := testutil.GenerateCids(10) - wantBlocks := testutil.GenerateCids(10) + wantHaves := random.Cids(10) + wantBlocks := random.Cids(10) messageQueue.Startup() messageQueue.AddWants(wantBlocks[:8], wantHaves[:8]) messageQueue.AddWants(wantBlocks[3:], wantHaves[3:]) - messages := collectMessages(ctx, t, messagesSent, 20*time.Millisecond) + messages := collectMessages(ctx, t, messagesSent, 5*collectTimeout) if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks) { t.Fatal("messages were not correctly deduped") @@ -243,26 +242,24 @@ func TestSendingMessagesPartialDupe(t *testing.T) { } func TestSendingMessagesPriority(t *testing.T) { - test.Flaky(t) - ctx := context.Background() messagesSent := make(chan []bsmsg.Entry) resetChan := make(chan struct{}, 1) fakeSender := newFakeMessageSender(resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] + peerID := random.Peers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) - wantHaves1 := testutil.GenerateCids(5) - wantHaves2 := testutil.GenerateCids(5) + wantHaves1 := random.Cids(5) + wantHaves2 := random.Cids(5) wantHaves := append(wantHaves1, wantHaves2...) - wantBlocks1 := testutil.GenerateCids(5) - wantBlocks2 := testutil.GenerateCids(5) + wantBlocks1 := random.Cids(5) + wantBlocks2 := random.Cids(5) wantBlocks := append(wantBlocks1, wantBlocks2...) messageQueue.Startup() messageQueue.AddWants(wantBlocks1, wantHaves1) messageQueue.AddWants(wantBlocks2, wantHaves2) - messages := collectMessages(ctx, t, messagesSent, 20*time.Millisecond) + messages := collectMessages(ctx, t, messagesSent, 5*collectTimeout) if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks) { t.Fatal("wrong number of wants") @@ -317,17 +314,17 @@ func TestCancelOverridesPendingWants(t *testing.T) { resetChan := make(chan struct{}, 1) fakeSender := newFakeMessageSender(resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] + peerID := random.Peers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) - wantHaves := testutil.GenerateCids(2) - wantBlocks := testutil.GenerateCids(2) + wantHaves := random.Cids(2) + wantBlocks := random.Cids(2) cancels := []cid.Cid{wantBlocks[0], wantHaves[0]} messageQueue.Startup() messageQueue.AddWants(wantBlocks, wantHaves) messageQueue.AddCancels(cancels) - messages := collectMessages(ctx, t, messagesSent, 100*time.Millisecond) + messages := collectMessages(ctx, t, messagesSent, collectTimeout) if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks)-len(cancels) { t.Fatal("Wrong message count") @@ -351,7 +348,7 @@ func TestCancelOverridesPendingWants(t *testing.T) { // Cancel the remaining want-blocks and want-haves cancels = append(wantHaves, wantBlocks...) messageQueue.AddCancels(cancels) - messages = collectMessages(ctx, t, messagesSent, 100*time.Millisecond) + messages = collectMessages(ctx, t, messagesSent, collectTimeout) // The remaining 2 cancels should be sent to the network as they are for // wants that were sent to the network @@ -367,10 +364,10 @@ func TestWantOverridesPendingCancels(t *testing.T) { resetChan := make(chan struct{}, 1) fakeSender := newFakeMessageSender(resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] + peerID := random.Peers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) - cids := testutil.GenerateCids(3) + cids := random.Cids(3) wantBlocks := cids[:1] wantHaves := cids[1:] @@ -379,7 +376,7 @@ func TestWantOverridesPendingCancels(t *testing.T) { // Add 1 want-block and 2 want-haves messageQueue.AddWants(wantBlocks, wantHaves) - messages := collectMessages(ctx, t, messagesSent, 100*time.Millisecond) + messages := collectMessages(ctx, t, messagesSent, collectTimeout) if totalEntriesLength(messages) != len(wantBlocks)+len(wantHaves) { t.Fatal("Wrong message count", totalEntriesLength(messages)) } @@ -389,7 +386,7 @@ func TestWantOverridesPendingCancels(t *testing.T) { // Override one cancel with a want-block (before cancel is sent to network) messageQueue.AddWants(cids[:1], []cid.Cid{}) - messages = collectMessages(ctx, t, messagesSent, 100*time.Millisecond) + messages = collectMessages(ctx, t, messagesSent, collectTimeout) if totalEntriesLength(messages) != 3 { t.Fatal("Wrong message count", totalEntriesLength(messages)) } @@ -413,14 +410,14 @@ func TestWantlistRebroadcast(t *testing.T) { resetChan := make(chan struct{}, 1) fakeSender := newFakeMessageSender(resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] + peerID := random.Peers(1)[0] dhtm := &fakeDontHaveTimeoutMgr{} clock := clock.NewMock() events := make(chan messageEvent) messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm, clock, events) - bcstwh := testutil.GenerateCids(10) - wantHaves := testutil.GenerateCids(10) - wantBlocks := testutil.GenerateCids(10) + bcstwh := random.Cids(10) + wantHaves := random.Cids(10) + wantBlocks := random.Cids(10) // Add some broadcast want-haves messageQueue.Startup() @@ -522,16 +519,16 @@ func TestSendingLargeMessages(t *testing.T) { fakeSender := newFakeMessageSender(resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} dhtm := &fakeDontHaveTimeoutMgr{} - peerID := testutil.GeneratePeers(1)[0] + peerID := random.Peers(1)[0] - wantBlocks := testutil.GenerateCids(10) + wantBlocks := random.Cids(10) entrySize := 44 maxMsgSize := entrySize * 3 // 3 wants messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMsgSize, sendErrorBackoff, maxValidLatency, dhtm, clock.New(), nil) messageQueue.Startup() messageQueue.AddWants(wantBlocks, []cid.Cid{}) - messages := collectMessages(ctx, t, messagesSent, 100*time.Millisecond) + messages := collectMessages(ctx, t, messagesSent, 5*collectTimeout) // want-block has size 44, so with maxMsgSize 44 * 3 (3 want-blocks), then if // we send 10 want-blocks we should expect 4 messages: @@ -550,7 +547,7 @@ func TestSendToPeerThatDoesntSupportHave(t *testing.T) { resetChan := make(chan struct{}, 1) fakeSender := newFakeMessageSender(resetChan, messagesSent, false) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] + peerID := random.Peers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) messageQueue.Startup() @@ -561,9 +558,9 @@ func TestSendToPeerThatDoesntSupportHave(t *testing.T) { // - broadcast want-haves should be sent as want-blocks // Check broadcast want-haves - bcwh := testutil.GenerateCids(10) + bcwh := random.Cids(10) messageQueue.AddBroadcastWantHaves(bcwh) - messages := collectMessages(ctx, t, messagesSent, 100*time.Millisecond) + messages := collectMessages(ctx, t, messagesSent, collectTimeout) if len(messages) != 1 { t.Fatal("wrong number of messages were sent", len(messages)) @@ -579,10 +576,10 @@ func TestSendToPeerThatDoesntSupportHave(t *testing.T) { } // Check regular want-haves and want-blocks - wbs := testutil.GenerateCids(10) - whs := testutil.GenerateCids(10) + wbs := random.Cids(10) + whs := random.Cids(10) messageQueue.AddWants(wbs, whs) - messages = collectMessages(ctx, t, messagesSent, 100*time.Millisecond) + messages = collectMessages(ctx, t, messagesSent, collectTimeout) if len(messages) != 1 { t.Fatal("wrong number of messages were sent", len(messages)) @@ -604,15 +601,15 @@ func TestSendToPeerThatDoesntSupportHaveMonitorsTimeouts(t *testing.T) { resetChan := make(chan struct{}, 1) fakeSender := newFakeMessageSender(resetChan, messagesSent, false) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] + peerID := random.Peers(1)[0] dhtm := &fakeDontHaveTimeoutMgr{} messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm, clock.New(), nil) messageQueue.Startup() - wbs := testutil.GenerateCids(10) + wbs := random.Cids(10) messageQueue.AddWants(wbs, nil) - collectMessages(ctx, t, messagesSent, 100*time.Millisecond) + collectMessages(ctx, t, messagesSent, collectTimeout) // Check want-blocks are added to DontHaveTimeoutMgr if dhtm.pendingCount() != len(wbs) { @@ -621,7 +618,7 @@ func TestSendToPeerThatDoesntSupportHaveMonitorsTimeouts(t *testing.T) { cancelCount := 2 messageQueue.AddCancels(wbs[:cancelCount]) - collectMessages(ctx, t, messagesSent, 100*time.Millisecond) + collectMessages(ctx, t, messagesSent, collectTimeout) // Check want-blocks are removed from DontHaveTimeoutMgr if dhtm.pendingCount() != len(wbs)-cancelCount { @@ -635,7 +632,7 @@ func TestResponseReceived(t *testing.T) { resetChan := make(chan struct{}, 1) fakeSender := newFakeMessageSender(resetChan, messagesSent, false) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] + peerID := random.Peers(1)[0] dhtm := &fakeDontHaveTimeoutMgr{} clock := clock.NewMock() @@ -643,7 +640,7 @@ func TestResponseReceived(t *testing.T) { messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm, clock, events) messageQueue.Startup() - cids := testutil.GenerateCids(10) + cids := random.Cids(10) // Add some wants messageQueue.AddWants(cids[:5], nil) @@ -684,17 +681,17 @@ func TestResponseReceivedAppliesForFirstResponseOnly(t *testing.T) { resetChan := make(chan struct{}, 1) fakeSender := newFakeMessageSender(resetChan, messagesSent, false) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] + peerID := random.Peers(1)[0] dhtm := &fakeDontHaveTimeoutMgr{} messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm, clock.New(), nil) messageQueue.Startup() - cids := testutil.GenerateCids(2) + cids := random.Cids(2) - // Add some wants and wait 10ms + // Add some wants and wait messageQueue.AddWants(cids, nil) - collectMessages(ctx, t, messagesSent, 100*time.Millisecond) + collectMessages(ctx, t, messagesSent, collectTimeout) // Receive a response for the wants messageQueue.ResponseReceived(cids) @@ -728,7 +725,7 @@ func TestResponseReceivedDiscardsOutliers(t *testing.T) { resetChan := make(chan struct{}, 1) fakeSender := newFakeMessageSender(resetChan, messagesSent, false) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] + peerID := random.Peers(1)[0] maxValLatency := 30 * time.Millisecond dhtm := &fakeDontHaveTimeoutMgr{} @@ -737,7 +734,7 @@ func TestResponseReceivedDiscardsOutliers(t *testing.T) { messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValLatency, dhtm, clock, events) messageQueue.Startup() - cids := testutil.GenerateCids(4) + cids := random.Cids(4) // Add some wants and wait 20ms messageQueue.AddWants(cids[:2], nil) @@ -799,7 +796,7 @@ func BenchmarkMessageQueue(b *testing.B) { fakeSender := newFakeMessageSender(resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} dhtm := &fakeDontHaveTimeoutMgr{} - peerID := testutil.GeneratePeers(1)[0] + peerID := random.Peers(1)[0] messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm, clock.New(), nil) messageQueue.Startup() @@ -835,10 +832,10 @@ func BenchmarkMessageQueue(b *testing.B) { // Alternately add either a few wants or a lot of broadcast wants if rand.Intn(2) == 0 { - wants := testutil.GenerateCids(10) + wants := random.Cids(10) qs[i].AddWants(wants[:2], wants[2:]) } else { - wants := testutil.GenerateCids(60) + wants := random.Cids(60) qs[i].AddBroadcastWantHaves(wants) } } diff --git a/bitswap/client/internal/peermanager/peermanager.go b/bitswap/client/internal/peermanager/peermanager.go index f26b8fbec..25cdd605f 100644 --- a/bitswap/client/internal/peermanager/peermanager.go +++ b/bitswap/client/internal/peermanager/peermanager.go @@ -11,7 +11,7 @@ import ( peer "github.com/libp2p/go-libp2p/core/peer" ) -var log = logging.Logger("bs:peermgr") +var log = logging.Logger("bitswap/client/peermgr") // PeerQueue provides a queue of messages to be sent for a single peer. type PeerQueue interface { diff --git a/bitswap/client/internal/peermanager/peermanager_test.go b/bitswap/client/internal/peermanager/peermanager_test.go index d3c712704..b778c46e3 100644 --- a/bitswap/client/internal/peermanager/peermanager_test.go +++ b/bitswap/client/internal/peermanager/peermanager_test.go @@ -3,11 +3,12 @@ package peermanager import ( "context" "math/rand" + "slices" "testing" "time" - "github.com/ipfs/boxo/bitswap/internal/testutil" cid "github.com/ipfs/go-cid" + "github.com/ipfs/go-test/random" "github.com/libp2p/go-libp2p/core/peer" ) @@ -83,7 +84,7 @@ func TestAddingAndRemovingPeers(t *testing.T) { msgs := make(chan msg, 16) peerQueueFactory := makePeerQueueFactory(msgs) - tp := testutil.GeneratePeers(6) + tp := random.Peers(6) self, peer1, peer2, peer3, peer4, peer5 := tp[0], tp[1], tp[2], tp[3], tp[4], tp[5] peerManager := New(ctx, peerQueueFactory, self) @@ -93,14 +94,14 @@ func TestAddingAndRemovingPeers(t *testing.T) { connectedPeers := peerManager.ConnectedPeers() - if !testutil.ContainsPeer(connectedPeers, peer1) || - !testutil.ContainsPeer(connectedPeers, peer2) || - !testutil.ContainsPeer(connectedPeers, peer3) { + if !slices.Contains(connectedPeers, peer1) || + !slices.Contains(connectedPeers, peer2) || + !slices.Contains(connectedPeers, peer3) { t.Fatal("Peers not connected that should be connected") } - if testutil.ContainsPeer(connectedPeers, peer4) || - testutil.ContainsPeer(connectedPeers, peer5) { + if slices.Contains(connectedPeers, peer4) || + slices.Contains(connectedPeers, peer5) { t.Fatal("Peers connected that shouldn't be connected") } @@ -108,7 +109,7 @@ func TestAddingAndRemovingPeers(t *testing.T) { peerManager.Disconnected(peer1) connectedPeers = peerManager.ConnectedPeers() - if testutil.ContainsPeer(connectedPeers, peer1) { + if slices.Contains(connectedPeers, peer1) { t.Fatal("Peer should have been disconnected but was not") } @@ -116,7 +117,7 @@ func TestAddingAndRemovingPeers(t *testing.T) { peerManager.Connected(peer1) connectedPeers = peerManager.ConnectedPeers() - if !testutil.ContainsPeer(connectedPeers, peer1) { + if !slices.Contains(connectedPeers, peer1) { t.Fatal("Peer should have been connected but was not") } } @@ -126,11 +127,11 @@ func TestBroadcastOnConnect(t *testing.T) { defer cancel() msgs := make(chan msg, 16) peerQueueFactory := makePeerQueueFactory(msgs) - tp := testutil.GeneratePeers(2) + tp := random.Peers(2) self, peer1 := tp[0], tp[1] peerManager := New(ctx, peerQueueFactory, self) - cids := testutil.GenerateCids(2) + cids := random.Cids(2) peerManager.BroadcastWantHaves(ctx, cids) // Connect with two broadcast wants for first peer @@ -147,11 +148,11 @@ func TestBroadcastWantHaves(t *testing.T) { defer cancel() msgs := make(chan msg, 16) peerQueueFactory := makePeerQueueFactory(msgs) - tp := testutil.GeneratePeers(3) + tp := random.Peers(3) self, peer1, peer2 := tp[0], tp[1], tp[2] peerManager := New(ctx, peerQueueFactory, self) - cids := testutil.GenerateCids(3) + cids := random.Cids(3) // Broadcast the first two. peerManager.BroadcastWantHaves(ctx, cids[:2]) @@ -188,10 +189,10 @@ func TestSendWants(t *testing.T) { defer cancel() msgs := make(chan msg, 16) peerQueueFactory := makePeerQueueFactory(msgs) - tp := testutil.GeneratePeers(2) + tp := random.Peers(2) self, peer1 := tp[0], tp[1] peerManager := New(ctx, peerQueueFactory, self) - cids := testutil.GenerateCids(4) + cids := random.Cids(4) peerManager.Connected(peer1) peerManager.SendWants(ctx, peer1, []cid.Cid{cids[0]}, []cid.Cid{cids[2]}) @@ -222,10 +223,10 @@ func TestSendCancels(t *testing.T) { defer cancel() msgs := make(chan msg, 16) peerQueueFactory := makePeerQueueFactory(msgs) - tp := testutil.GeneratePeers(3) + tp := random.Peers(3) self, peer1, peer2 := tp[0], tp[1], tp[2] peerManager := New(ctx, peerQueueFactory, self) - cids := testutil.GenerateCids(4) + cids := random.Cids(4) // Connect to peer1 and peer2 peerManager.Connected(peer1) @@ -283,7 +284,7 @@ func TestSessionRegistration(t *testing.T) { msgs := make(chan msg, 16) peerQueueFactory := makePeerQueueFactory(msgs) - tp := testutil.GeneratePeers(3) + tp := random.Peers(3) self, p1, p2 := tp[0], tp[1], tp[2] peerManager := New(ctx, peerQueueFactory, self) @@ -343,8 +344,8 @@ func BenchmarkPeerManager(b *testing.B) { return &benchPeerQueue{} } - self := testutil.GeneratePeers(1)[0] - peers := testutil.GeneratePeers(500) + self := random.Peers(1)[0] + peers := random.Peers(500) peerManager := New(ctx, peerQueueFactory, self) // Create a bunch of connections @@ -364,11 +365,11 @@ func BenchmarkPeerManager(b *testing.B) { // Alternately add either a few wants or many broadcast wants r := rand.Intn(8) if r == 0 { - wants := testutil.GenerateCids(10) + wants := random.Cids(10) peerManager.SendWants(ctx, peers[i], wants[:2], wants[2:]) wanted = append(wanted, wants...) } else if r == 1 { - wants := testutil.GenerateCids(30) + wants := random.Cids(30) peerManager.BroadcastWantHaves(ctx, wants) wanted = append(wanted, wants...) } else { diff --git a/bitswap/client/internal/peermanager/peerwantmanager_test.go b/bitswap/client/internal/peermanager/peerwantmanager_test.go index 505fbea1a..bfe0c626d 100644 --- a/bitswap/client/internal/peermanager/peerwantmanager_test.go +++ b/bitswap/client/internal/peermanager/peerwantmanager_test.go @@ -3,9 +3,10 @@ package peermanager import ( "testing" - "github.com/ipfs/boxo/bitswap/internal/testutil" cid "github.com/ipfs/go-cid" + "github.com/ipfs/go-test/random" peer "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" ) type gauge struct { @@ -61,43 +62,32 @@ func clearSent(pqs map[peer.ID]PeerQueue) { func TestEmpty(t *testing.T) { pwm := newPeerWantManager(&gauge{}, &gauge{}) - - if len(pwm.getWantBlocks()) > 0 { - t.Fatal("Expected GetWantBlocks() to have length 0") - } - if len(pwm.getWantHaves()) > 0 { - t.Fatal("Expected GetWantHaves() to have length 0") - } + require.Empty(t, pwm.getWantBlocks()) + require.Empty(t, pwm.getWantHaves()) } func TestPWMBroadcastWantHaves(t *testing.T) { pwm := newPeerWantManager(&gauge{}, &gauge{}) - peers := testutil.GeneratePeers(3) - cids := testutil.GenerateCids(2) - cids2 := testutil.GenerateCids(2) - cids3 := testutil.GenerateCids(2) + peers := random.Peers(3) + cids := random.Cids(2) + cids2 := random.Cids(2) + cids3 := random.Cids(2) peerQueues := make(map[peer.ID]PeerQueue) for _, p := range peers[:2] { pq := &mockPQ{} peerQueues[p] = pq pwm.addPeer(pq, p) - if len(pq.bcst) > 0 { - t.Errorf("expected no broadcast wants") - } + require.Empty(t, pq.bcst, "expected no broadcast wants") } // Broadcast 2 cids to 2 peers pwm.broadcastWantHaves(cids) for _, pqi := range peerQueues { pq := pqi.(*mockPQ) - if len(pq.bcst) != 2 { - t.Fatal("Expected 2 want-haves") - } - if !testutil.MatchKeysIgnoreOrder(pq.bcst, cids) { - t.Fatal("Expected all cids to be broadcast") - } + require.Len(t, pq.bcst, 2, "Expected 2 want-haves") + require.ElementsMatch(t, pq.bcst, cids, "Expected all cids to be broadcast") } // Broadcasting same cids should have no effect @@ -105,9 +95,7 @@ func TestPWMBroadcastWantHaves(t *testing.T) { pwm.broadcastWantHaves(cids) for _, pqi := range peerQueues { pq := pqi.(*mockPQ) - if len(pq.bcst) != 0 { - t.Fatal("Expected 0 want-haves") - } + require.Len(t, pq.bcst, 0, "Expected 0 want-haves") } // Broadcast 2 other cids @@ -115,12 +103,8 @@ func TestPWMBroadcastWantHaves(t *testing.T) { pwm.broadcastWantHaves(cids2) for _, pqi := range peerQueues { pq := pqi.(*mockPQ) - if len(pq.bcst) != 2 { - t.Fatal("Expected 2 want-haves") - } - if !testutil.MatchKeysIgnoreOrder(pq.bcst, cids2) { - t.Fatal("Expected all new cids to be broadcast") - } + require.Len(t, pq.bcst, 2, "Expected 2 want-haves") + require.ElementsMatch(t, pq.bcst, cids2, "Expected all new cids to be broadcast") } // Broadcast mix of old and new cids @@ -128,18 +112,14 @@ func TestPWMBroadcastWantHaves(t *testing.T) { pwm.broadcastWantHaves(append(cids, cids3...)) for _, pqi := range peerQueues { pq := pqi.(*mockPQ) - if len(pq.bcst) != 2 { - t.Fatal("Expected 2 want-haves") - } + require.Len(t, pq.bcst, 2, "Expected 2 want-haves") // Only new cids should be broadcast - if !testutil.MatchKeysIgnoreOrder(pq.bcst, cids3) { - t.Fatal("Expected all new cids to be broadcast") - } + require.ElementsMatch(t, pq.bcst, cids3, "Expected all new cids to be broadcast") } // Sending want-block for a cid should prevent broadcast to that peer clearSent(peerQueues) - cids4 := testutil.GenerateCids(4) + cids4 := random.Cids(4) wantBlocks := []cid.Cid{cids4[0], cids4[2]} p0 := peers[0] p1 := peers[1] @@ -147,19 +127,13 @@ func TestPWMBroadcastWantHaves(t *testing.T) { pwm.broadcastWantHaves(cids4) pq0 := peerQueues[p0].(*mockPQ) - if len(pq0.bcst) != 2 { // only broadcast 2 / 4 want-haves - t.Fatal("Expected 2 want-haves") - } - if !testutil.MatchKeysIgnoreOrder(pq0.bcst, []cid.Cid{cids4[1], cids4[3]}) { - t.Fatalf("Expected unsent cids to be broadcast") - } + // only broadcast 2 / 4 want-haves + require.Len(t, pq0.bcst, 2, "Expected 2 want-haves") + require.ElementsMatch(t, pq0.bcst, []cid.Cid{cids4[1], cids4[3]}, "Expected unsent cids to be broadcast") pq1 := peerQueues[p1].(*mockPQ) - if len(pq1.bcst) != 4 { // broadcast all 4 want-haves - t.Fatal("Expected 4 want-haves") - } - if !testutil.MatchKeysIgnoreOrder(pq1.bcst, cids4) { - t.Fatal("Expected all cids to be broadcast") - } + // broadcast all 4 want-haves + require.Len(t, pq1.bcst, 4, "Expected 4 want-haves") + require.ElementsMatch(t, pq1.bcst, cids4, "Expected all cids to be broadcast") allCids := cids allCids = append(allCids, cids2...) @@ -171,25 +145,21 @@ func TestPWMBroadcastWantHaves(t *testing.T) { pq2 := &mockPQ{} peerQueues[peer2] = pq2 pwm.addPeer(pq2, peer2) - if !testutil.MatchKeysIgnoreOrder(pq2.bcst, allCids) { - t.Fatalf("Expected all cids to be broadcast.") - } + require.ElementsMatch(t, pq2.bcst, allCids, "Expected all cids to be broadcast") clearSent(peerQueues) pwm.broadcastWantHaves(allCids) - if len(pq2.bcst) != 0 { - t.Errorf("did not expect to have CIDs to broadcast") - } + require.Empty(t, pq2.bcst, "did not expect to have CIDs to broadcast") } func TestPWMSendWants(t *testing.T) { pwm := newPeerWantManager(&gauge{}, &gauge{}) - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) p0 := peers[0] p1 := peers[1] - cids := testutil.GenerateCids(2) - cids2 := testutil.GenerateCids(2) + cids := random.Cids(2) + cids2 := random.Cids(2) peerQueues := make(map[peer.ID]PeerQueue) for _, p := range peers[:2] { @@ -203,75 +173,55 @@ func TestPWMSendWants(t *testing.T) { // Send 2 want-blocks and 2 want-haves to p0 clearSent(peerQueues) pwm.sendWants(p0, cids, cids2) - if !testutil.MatchKeysIgnoreOrder(pq0.wbs, cids) { - t.Fatal("Expected 2 want-blocks") - } - if !testutil.MatchKeysIgnoreOrder(pq0.whs, cids2) { - t.Fatal("Expected 2 want-haves") - } + require.ElementsMatch(t, pq0.wbs, cids, "Expected 2 want-blocks") + require.ElementsMatch(t, pq0.whs, cids2, "Expected 2 want-haves") // Send to p0 // - 1 old want-block and 2 new want-blocks // - 1 old want-have and 2 new want-haves clearSent(peerQueues) - cids3 := testutil.GenerateCids(2) - cids4 := testutil.GenerateCids(2) + cids3 := random.Cids(2) + cids4 := random.Cids(2) pwm.sendWants(p0, append(cids3, cids[0]), append(cids4, cids2[0])) - if !testutil.MatchKeysIgnoreOrder(pq0.wbs, cids3) { - t.Fatal("Expected 2 want-blocks") - } - if !testutil.MatchKeysIgnoreOrder(pq0.whs, cids4) { - t.Fatal("Expected 2 want-haves") - } + require.ElementsMatch(t, pq0.wbs, cids3, "Expected 2 want-blocks") + require.ElementsMatch(t, pq0.whs, cids4, "Expected 2 want-haves") // Send to p0 as want-blocks: 1 new want-block, 1 old want-have clearSent(peerQueues) - cids5 := testutil.GenerateCids(1) + cids5 := random.Cids(1) newWantBlockOldWantHave := append(cids5, cids2[0]) pwm.sendWants(p0, newWantBlockOldWantHave, []cid.Cid{}) // If a want was sent as a want-have, it should be ok to now send it as a // want-block - if !testutil.MatchKeysIgnoreOrder(pq0.wbs, newWantBlockOldWantHave) { - t.Fatal("Expected 2 want-blocks") - } - if len(pq0.whs) != 0 { - t.Fatal("Expected 0 want-haves") - } + require.ElementsMatch(t, pq0.wbs, newWantBlockOldWantHave, "Expected 2 want-blocks") + require.Empty(t, pq0.whs, "Expected 0 want-haves") // Send to p0 as want-haves: 1 new want-have, 1 old want-block clearSent(peerQueues) - cids6 := testutil.GenerateCids(1) + cids6 := random.Cids(1) newWantHaveOldWantBlock := append(cids6, cids[0]) pwm.sendWants(p0, []cid.Cid{}, newWantHaveOldWantBlock) // If a want was previously sent as a want-block, it should not be // possible to now send it as a want-have - if !testutil.MatchKeysIgnoreOrder(pq0.whs, cids6) { - t.Fatal("Expected 1 want-have") - } - if len(pq0.wbs) != 0 { - t.Fatal("Expected 0 want-blocks") - } + require.ElementsMatch(t, pq0.whs, cids6, "Expected 1 want-have") + require.Empty(t, pq0.wbs, "Expected 0 want-blocks") // Send 2 want-blocks and 2 want-haves to p1 pwm.sendWants(p1, cids, cids2) - if !testutil.MatchKeysIgnoreOrder(pq1.wbs, cids) { - t.Fatal("Expected 2 want-blocks") - } - if !testutil.MatchKeysIgnoreOrder(pq1.whs, cids2) { - t.Fatal("Expected 2 want-haves") - } + require.ElementsMatch(t, pq1.wbs, cids, "Expected 2 want-blocks") + require.ElementsMatch(t, pq1.whs, cids2, "Expected 2 want-haves") } func TestPWMSendCancels(t *testing.T) { pwm := newPeerWantManager(&gauge{}, &gauge{}) - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) p0 := peers[0] p1 := peers[1] - wb1 := testutil.GenerateCids(2) - wh1 := testutil.GenerateCids(2) - wb2 := testutil.GenerateCids(2) - wh2 := testutil.GenerateCids(2) + wb1 := random.Cids(2) + wh1 := random.Cids(2) + wb2 := random.Cids(2) + wh2 := random.Cids(2) allwb := append(wb1, wb2...) allwh := append(wh1, wh2...) @@ -290,54 +240,32 @@ func TestPWMSendCancels(t *testing.T) { // (1 overlapping want-block / want-have with p0) pwm.sendWants(p1, append(wb2, wb1[1]), append(wh2, wh1[1])) - if !testutil.MatchKeysIgnoreOrder(pwm.getWantBlocks(), allwb) { - t.Fatal("Expected 4 cids to be wanted") - } - if !testutil.MatchKeysIgnoreOrder(pwm.getWantHaves(), allwh) { - t.Fatal("Expected 4 cids to be wanted") - } + require.ElementsMatch(t, pwm.getWantBlocks(), allwb, "Expected 4 cids to be wanted") + require.ElementsMatch(t, pwm.getWantHaves(), allwh, "Expected 4 cids to be wanted") // Cancel 1 want-block and 1 want-have that were sent to p0 clearSent(peerQueues) pwm.sendCancels([]cid.Cid{wb1[0], wh1[0]}) // Should cancel the want-block and want-have - if len(pq1.cancels) != 0 { - t.Fatal("Expected no cancels sent to p1") - } - if !testutil.MatchKeysIgnoreOrder(pq0.cancels, []cid.Cid{wb1[0], wh1[0]}) { - t.Fatal("Expected 2 cids to be cancelled") - } - if !testutil.MatchKeysIgnoreOrder(pwm.getWantBlocks(), append(wb2, wb1[1])) { - t.Fatal("Expected 3 want-blocks") - } - if !testutil.MatchKeysIgnoreOrder(pwm.getWantHaves(), append(wh2, wh1[1])) { - t.Fatal("Expected 3 want-haves") - } + require.Empty(t, pq1.cancels, "Expected no cancels sent to p1") + require.ElementsMatch(t, pq0.cancels, []cid.Cid{wb1[0], wh1[0]}, "Expected 2 cids to be cancelled") + require.ElementsMatch(t, pwm.getWantBlocks(), append(wb2, wb1[1]), "Expected 3 want-blocks") + require.ElementsMatch(t, pwm.getWantHaves(), append(wh2, wh1[1]), "Expected 3 want-haves") // Cancel everything clearSent(peerQueues) allCids := append(allwb, allwh...) pwm.sendCancels(allCids) // Should cancel the remaining want-blocks and want-haves for p0 - if !testutil.MatchKeysIgnoreOrder(pq0.cancels, []cid.Cid{wb1[1], wh1[1]}) { - t.Fatal("Expected un-cancelled cids to be cancelled") - } + require.ElementsMatch(t, pq0.cancels, []cid.Cid{wb1[1], wh1[1]}, "Expected un-cancelled cids to be cancelled") // Should cancel the remaining want-blocks and want-haves for p1 remainingP1 := append(wb2, wh2...) remainingP1 = append(remainingP1, wb1[1], wh1[1]) - if len(pq1.cancels) != len(remainingP1) { - t.Fatal("mismatch", len(pq1.cancels), len(remainingP1)) - } - if !testutil.MatchKeysIgnoreOrder(pq1.cancels, remainingP1) { - t.Fatal("Expected un-cancelled cids to be cancelled") - } - if len(pwm.getWantBlocks()) != 0 { - t.Fatal("Expected 0 want-blocks") - } - if len(pwm.getWantHaves()) != 0 { - t.Fatal("Expected 0 want-haves") - } + require.Equal(t, len(pq1.cancels), len(remainingP1), "mismatch", len(pq1.cancels), len(remainingP1)) + require.ElementsMatch(t, pq1.cancels, remainingP1, "Expected un-cancelled cids to be cancelled") + require.Empty(t, pwm.getWantBlocks(), "Expected 0 want-blocks") + require.Empty(t, pwm.getWantHaves(), "Expected 0 want-haves") } func TestStats(t *testing.T) { @@ -345,11 +273,11 @@ func TestStats(t *testing.T) { wbg := &gauge{} pwm := newPeerWantManager(g, wbg) - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) p0 := peers[0] p1 := peers[1] - cids := testutil.GenerateCids(2) - cids2 := testutil.GenerateCids(2) + cids := random.Cids(2) + cids2 := random.Cids(2) peerQueues := make(map[peer.ID]PeerQueue) pq := &mockPQ{} @@ -359,86 +287,54 @@ func TestStats(t *testing.T) { // Send 2 want-blocks and 2 want-haves to p0 pwm.sendWants(p0, cids, cids2) - if g.count != 4 { - t.Fatal("Expected 4 wants") - } - if wbg.count != 2 { - t.Fatal("Expected 2 want-blocks") - } + require.Equal(t, 4, g.count, "Expected 4 wants") + require.Equal(t, 2, wbg.count, "Expected 2 want-blocks") // Send 1 old want-block and 2 new want-blocks to p0 - cids3 := testutil.GenerateCids(2) + cids3 := random.Cids(2) pwm.sendWants(p0, append(cids3, cids[0]), []cid.Cid{}) - if g.count != 6 { - t.Fatal("Expected 6 wants") - } - if wbg.count != 4 { - t.Fatal("Expected 4 want-blocks") - } + require.Equal(t, 6, g.count, "Expected 6 wants") + require.Equal(t, 4, wbg.count, "Expected 4 want-blocks") // Broadcast 1 old want-have and 2 new want-haves - cids4 := testutil.GenerateCids(2) + cids4 := random.Cids(2) pwm.broadcastWantHaves(append(cids4, cids2[0])) - if g.count != 8 { - t.Fatal("Expected 8 wants") - } - if wbg.count != 4 { - t.Fatal("Expected 4 want-blocks") - } + require.Equal(t, 8, g.count, "Expected 8 wants") + require.Equal(t, 4, wbg.count, "Expected 4 want-blocks") // Add a second peer pwm.addPeer(pq, p1) - if g.count != 8 { - t.Fatal("Expected 8 wants") - } - if wbg.count != 4 { - t.Fatal("Expected 4 want-blocks") - } + require.Equal(t, 8, g.count, "Expected 8 wants") + require.Equal(t, 4, wbg.count, "Expected 4 want-blocks") // Cancel 1 want-block that was sent to p0 // and 1 want-block that was not sent - cids5 := testutil.GenerateCids(1) + cids5 := random.Cids(1) pwm.sendCancels(append(cids5, cids[0])) - if g.count != 7 { - t.Fatal("Expected 7 wants") - } - if wbg.count != 3 { - t.Fatal("Expected 3 want-blocks") - } + require.Equal(t, 7, g.count, "Expected 7 wants") + require.Equal(t, 3, wbg.count, "Expected 3 want-blocks") // Remove first peer pwm.removePeer(p0) // Should still have 3 broadcast wants - if g.count != 3 { - t.Fatal("Expected 3 wants") - } - if wbg.count != 0 { - t.Fatal("Expected all want-blocks to be removed") - } + require.Equal(t, 3, g.count, "Expected 3 wants") + require.Zero(t, wbg.count, "Expected all want-blocks to be removed") // Remove second peer pwm.removePeer(p1) // Should still have 3 broadcast wants - if g.count != 3 { - t.Fatal("Expected 3 wants") - } - if wbg.count != 0 { - t.Fatal("Expected 0 want-blocks") - } + require.Equal(t, 3, g.count, "Expected 3 wants") + require.Zero(t, wbg.count, "Expected 0 want-blocks") // Cancel one remaining broadcast want-have pwm.sendCancels(cids2[:1]) - if g.count != 2 { - t.Fatal("Expected 2 wants") - } - if wbg.count != 0 { - t.Fatal("Expected 0 want-blocks") - } + require.Equal(t, 2, g.count, "Expected 2 wants") + require.Zero(t, wbg.count, "Expected 0 want-blocks") } func TestStatsOverlappingWantBlockWantHave(t *testing.T) { @@ -446,11 +342,11 @@ func TestStatsOverlappingWantBlockWantHave(t *testing.T) { wbg := &gauge{} pwm := newPeerWantManager(g, wbg) - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) p0 := peers[0] p1 := peers[1] - cids := testutil.GenerateCids(2) - cids2 := testutil.GenerateCids(2) + cids := random.Cids(2) + cids2 := random.Cids(2) pwm.addPeer(&mockPQ{}, p0) pwm.addPeer(&mockPQ{}, p1) @@ -462,22 +358,14 @@ func TestStatsOverlappingWantBlockWantHave(t *testing.T) { // 2 want-haves and 2 want-blocks to p1 pwm.sendWants(p1, cids2, cids) - if g.count != 4 { - t.Fatal("Expected 4 wants") - } - if wbg.count != 4 { - t.Fatal("Expected 4 want-blocks") - } + require.Equal(t, 4, g.count, "Expected 4 wants") + require.Equal(t, 4, wbg.count, "Expected 4 want-blocks") // Cancel 1 of each group of cids pwm.sendCancels([]cid.Cid{cids[0], cids2[0]}) - if g.count != 2 { - t.Fatal("Expected 2 wants") - } - if wbg.count != 2 { - t.Fatal("Expected 2 want-blocks") - } + require.Equal(t, 2, g.count, "Expected 2 wants") + require.Equal(t, 2, wbg.count, "Expected 2 want-blocks") } func TestStatsRemovePeerOverlappingWantBlockWantHave(t *testing.T) { @@ -485,11 +373,11 @@ func TestStatsRemovePeerOverlappingWantBlockWantHave(t *testing.T) { wbg := &gauge{} pwm := newPeerWantManager(g, wbg) - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) p0 := peers[0] p1 := peers[1] - cids := testutil.GenerateCids(2) - cids2 := testutil.GenerateCids(2) + cids := random.Cids(2) + cids2 := random.Cids(2) pwm.addPeer(&mockPQ{}, p0) pwm.addPeer(&mockPQ{}, p1) @@ -501,20 +389,12 @@ func TestStatsRemovePeerOverlappingWantBlockWantHave(t *testing.T) { // 2 want-haves and 2 want-blocks to p1 pwm.sendWants(p1, cids2, cids) - if g.count != 4 { - t.Fatal("Expected 4 wants") - } - if wbg.count != 4 { - t.Fatal("Expected 4 want-blocks") - } + require.Equal(t, 4, g.count, "Expected 4 wants") + require.Equal(t, 4, wbg.count, "Expected 4 want-blocks") // Remove p0 pwm.removePeer(p0) - if g.count != 4 { - t.Fatal("Expected 4 wants") - } - if wbg.count != 2 { - t.Fatal("Expected 2 want-blocks") - } + require.Equal(t, 4, g.count, "Expected 4 wants") + require.Equal(t, 2, wbg.count, "Expected 2 want-blocks") } diff --git a/bitswap/client/internal/providerquerymanager/providerquerymanager.go b/bitswap/client/internal/providerquerymanager/providerquerymanager.go index 9bba5211f..ea10a40e5 100644 --- a/bitswap/client/internal/providerquerymanager/providerquerymanager.go +++ b/bitswap/client/internal/providerquerymanager/providerquerymanager.go @@ -13,7 +13,7 @@ import ( "go.opentelemetry.io/otel/trace" ) -var log = logging.Logger("bitswap") +var log = logging.Logger("bitswap/client/provqrymgr") const ( maxProviders = 10 diff --git a/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go b/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go index 52447e2c1..9deb77f99 100644 --- a/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go +++ b/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go @@ -8,8 +8,8 @@ import ( "testing" "time" - "github.com/ipfs/boxo/bitswap/internal/testutil" cid "github.com/ipfs/go-cid" + "github.com/ipfs/go-test/random" "github.com/libp2p/go-libp2p/core/peer" ) @@ -58,7 +58,7 @@ func (fpn *fakeProviderNetwork) FindProvidersAsync(ctx context.Context, k cid.Ci } func TestNormalSimultaneousFetch(t *testing.T) { - peers := testutil.GeneratePeers(10) + peers := random.Peers(10) fpn := &fakeProviderNetwork{ peersFound: peers, delay: 1 * time.Millisecond, @@ -66,7 +66,7 @@ func TestNormalSimultaneousFetch(t *testing.T) { ctx := context.Background() providerQueryManager := New(ctx, fpn) providerQueryManager.Startup() - keys := testutil.GenerateCids(2) + keys := random.Cids(2) sessionCtx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() @@ -95,7 +95,7 @@ func TestNormalSimultaneousFetch(t *testing.T) { } func TestDedupingProviderRequests(t *testing.T) { - peers := testutil.GeneratePeers(10) + peers := random.Peers(10) fpn := &fakeProviderNetwork{ peersFound: peers, delay: 1 * time.Millisecond, @@ -103,7 +103,7 @@ func TestDedupingProviderRequests(t *testing.T) { ctx := context.Background() providerQueryManager := New(ctx, fpn) providerQueryManager.Startup() - key := testutil.GenerateCids(1)[0] + key := random.Cids(1)[0] sessionCtx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() @@ -135,7 +135,7 @@ func TestDedupingProviderRequests(t *testing.T) { } func TestCancelOneRequestDoesNotTerminateAnother(t *testing.T) { - peers := testutil.GeneratePeers(10) + peers := random.Peers(10) fpn := &fakeProviderNetwork{ peersFound: peers, delay: 1 * time.Millisecond, @@ -144,7 +144,7 @@ func TestCancelOneRequestDoesNotTerminateAnother(t *testing.T) { providerQueryManager := New(ctx, fpn) providerQueryManager.Startup() - key := testutil.GenerateCids(1)[0] + key := random.Cids(1)[0] // first session will cancel before done firstSessionCtx, firstCancel := context.WithTimeout(ctx, 3*time.Millisecond) @@ -179,7 +179,7 @@ func TestCancelOneRequestDoesNotTerminateAnother(t *testing.T) { } func TestCancelManagerExitsGracefully(t *testing.T) { - peers := testutil.GeneratePeers(10) + peers := random.Peers(10) fpn := &fakeProviderNetwork{ peersFound: peers, delay: 1 * time.Millisecond, @@ -190,7 +190,7 @@ func TestCancelManagerExitsGracefully(t *testing.T) { providerQueryManager := New(managerCtx, fpn) providerQueryManager.Startup() - key := testutil.GenerateCids(1)[0] + key := random.Cids(1)[0] sessionCtx, cancel := context.WithTimeout(ctx, 20*time.Millisecond) defer cancel() @@ -214,7 +214,7 @@ func TestCancelManagerExitsGracefully(t *testing.T) { } func TestPeersWithConnectionErrorsNotAddedToPeerList(t *testing.T) { - peers := testutil.GeneratePeers(10) + peers := random.Peers(10) fpn := &fakeProviderNetwork{ peersFound: peers, connectError: errors.New("not able to connect"), @@ -224,7 +224,7 @@ func TestPeersWithConnectionErrorsNotAddedToPeerList(t *testing.T) { providerQueryManager := New(ctx, fpn) providerQueryManager.Startup() - key := testutil.GenerateCids(1)[0] + key := random.Cids(1)[0] sessionCtx, cancel := context.WithTimeout(ctx, 20*time.Millisecond) defer cancel() @@ -247,7 +247,7 @@ func TestPeersWithConnectionErrorsNotAddedToPeerList(t *testing.T) { } func TestRateLimitingRequests(t *testing.T) { - peers := testutil.GeneratePeers(10) + peers := random.Peers(10) fpn := &fakeProviderNetwork{ peersFound: peers, delay: 5 * time.Millisecond, @@ -258,7 +258,7 @@ func TestRateLimitingRequests(t *testing.T) { providerQueryManager := New(ctx, fpn) providerQueryManager.Startup() - keys := testutil.GenerateCids(maxInProcessRequests + 1) + keys := random.Cids(maxInProcessRequests + 1) sessionCtx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() var requestChannels []<-chan peer.ID @@ -286,7 +286,7 @@ func TestRateLimitingRequests(t *testing.T) { } func TestFindProviderTimeout(t *testing.T) { - peers := testutil.GeneratePeers(10) + peers := random.Peers(10) fpn := &fakeProviderNetwork{ peersFound: peers, delay: 10 * time.Millisecond, @@ -295,7 +295,7 @@ func TestFindProviderTimeout(t *testing.T) { providerQueryManager := New(ctx, fpn) providerQueryManager.Startup() providerQueryManager.SetFindProviderTimeout(2 * time.Millisecond) - keys := testutil.GenerateCids(1) + keys := random.Cids(1) sessionCtx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() @@ -310,7 +310,7 @@ func TestFindProviderTimeout(t *testing.T) { } func TestFindProviderPreCanceled(t *testing.T) { - peers := testutil.GeneratePeers(10) + peers := random.Peers(10) fpn := &fakeProviderNetwork{ peersFound: peers, delay: 1 * time.Millisecond, @@ -319,7 +319,7 @@ func TestFindProviderPreCanceled(t *testing.T) { providerQueryManager := New(ctx, fpn) providerQueryManager.Startup() providerQueryManager.SetFindProviderTimeout(100 * time.Millisecond) - keys := testutil.GenerateCids(1) + keys := random.Cids(1) sessionCtx, cancel := context.WithCancel(ctx) cancel() @@ -335,7 +335,7 @@ func TestFindProviderPreCanceled(t *testing.T) { } func TestCancelFindProvidersAfterCompletion(t *testing.T) { - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) fpn := &fakeProviderNetwork{ peersFound: peers, delay: 1 * time.Millisecond, @@ -344,7 +344,7 @@ func TestCancelFindProvidersAfterCompletion(t *testing.T) { providerQueryManager := New(ctx, fpn) providerQueryManager.Startup() providerQueryManager.SetFindProviderTimeout(100 * time.Millisecond) - keys := testutil.GenerateCids(1) + keys := random.Cids(1) sessionCtx, cancel := context.WithCancel(ctx) firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0]) diff --git a/bitswap/client/internal/session/peerresponsetracker_test.go b/bitswap/client/internal/session/peerresponsetracker_test.go index bdcd80daa..0bf5fe3fe 100644 --- a/bitswap/client/internal/session/peerresponsetracker_test.go +++ b/bitswap/client/internal/session/peerresponsetracker_test.go @@ -4,12 +4,12 @@ import ( "math" "testing" - "github.com/ipfs/boxo/bitswap/internal/testutil" + "github.com/ipfs/go-test/random" peer "github.com/libp2p/go-libp2p/core/peer" ) func TestPeerResponseTrackerInit(t *testing.T) { - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) prt := newPeerResponseTracker() if prt.choose([]peer.ID{}) != "" { @@ -25,7 +25,7 @@ func TestPeerResponseTrackerInit(t *testing.T) { } func TestPeerResponseTrackerProbabilityUnknownPeers(t *testing.T) { - peers := testutil.GeneratePeers(4) + peers := random.Peers(4) prt := newPeerResponseTracker() choices := []int{0, 0, 0, 0} @@ -54,7 +54,7 @@ func TestPeerResponseTrackerProbabilityUnknownPeers(t *testing.T) { } func TestPeerResponseTrackerProbabilityOneKnownOneUnknownPeer(t *testing.T) { - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) prt := newPeerResponseTracker() prt.receivedBlockFrom(peers[0]) @@ -79,7 +79,7 @@ func TestPeerResponseTrackerProbabilityOneKnownOneUnknownPeer(t *testing.T) { } func TestPeerResponseTrackerProbabilityProportional(t *testing.T) { - peers := testutil.GeneratePeers(3) + peers := random.Peers(3) prt := newPeerResponseTracker() probabilities := []float64{0.1, 0.6, 0.3} diff --git a/bitswap/client/internal/session/sentwantblockstracker_test.go b/bitswap/client/internal/session/sentwantblockstracker_test.go index c74e8c5f8..7e4435fd8 100644 --- a/bitswap/client/internal/session/sentwantblockstracker_test.go +++ b/bitswap/client/internal/session/sentwantblockstracker_test.go @@ -3,12 +3,12 @@ package session import ( "testing" - "github.com/ipfs/boxo/bitswap/internal/testutil" + "github.com/ipfs/go-test/random" ) func TestSendWantBlocksTracker(t *testing.T) { - peers := testutil.GeneratePeers(2) - cids := testutil.GenerateCids(2) + peers := random.Peers(2) + cids := random.Cids(2) swbt := newSentWantBlocksTracker() if swbt.haveSentWantBlockTo(peers[0], cids[0]) { diff --git a/bitswap/client/internal/session/session.go b/bitswap/client/internal/session/session.go index b77a82283..6f99dec0e 100644 --- a/bitswap/client/internal/session/session.go +++ b/bitswap/client/internal/session/session.go @@ -20,7 +20,7 @@ import ( ) var ( - log = logging.Logger("bs:sess") + log = logging.Logger("bitswap/session") sflog = log.Desugar() ) diff --git a/bitswap/client/internal/session/session_test.go b/bitswap/client/internal/session/session_test.go index 2eb166f90..1c40b64e1 100644 --- a/bitswap/client/internal/session/session_test.go +++ b/bitswap/client/internal/session/session_test.go @@ -2,6 +2,7 @@ package session import ( "context" + "slices" "sync" "testing" "time" @@ -11,12 +12,14 @@ import ( bspm "github.com/ipfs/boxo/bitswap/client/internal/peermanager" bssim "github.com/ipfs/boxo/bitswap/client/internal/sessioninterestmanager" bsspm "github.com/ipfs/boxo/bitswap/client/internal/sessionpeermanager" - "github.com/ipfs/boxo/bitswap/internal/testutil" "github.com/ipfs/boxo/internal/test" + blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" blocksutil "github.com/ipfs/go-ipfs-blocksutil" delay "github.com/ipfs/go-ipfs-delay" + "github.com/ipfs/go-test/random" peer "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" ) type mockSessionMgr struct { @@ -159,7 +162,7 @@ func TestSessionGetBlocks(t *testing.T) { bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() - id := testutil.GenerateSessionID() + id := random.SequenceNext() sm := newMockSessionMgr() session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") blockGenerator := blocksutil.NewBlockGenerator() @@ -179,34 +182,29 @@ func TestSessionGetBlocks(t *testing.T) { // Should have registered session's interest in blocks intSes := sim.FilterSessionInterested(id, cids) - if !testutil.MatchKeysIgnoreOrder(intSes[0], cids) { - t.Fatal("did not register session interest in blocks") - } + require.ElementsMatch(t, intSes[0], cids, "did not register session interest in blocks") // Should have sent out broadcast request for wants - if len(receivedWantReq.cids) != broadcastLiveWantsLimit { - t.Fatal("did not enqueue correct initial number of wants") - } + require.Len(t, receivedWantReq.cids, broadcastLiveWantsLimit, "did not enqueue correct initial number of wants") // Simulate receiving HAVEs from several peers - peers := testutil.GeneratePeers(5) + peers := random.Peers(5) for i, p := range peers { - blk := blks[testutil.IndexOf(blks, receivedWantReq.cids[i])] + blkIndex := slices.IndexFunc(blks, func(blk blocks.Block) bool { + return blk.Cid() == receivedWantReq.cids[i] + }) + blk := blks[blkIndex] session.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{blk.Cid()}, []cid.Cid{}) } time.Sleep(10 * time.Millisecond) // Verify new peers were recorded - if !testutil.MatchPeersIgnoreOrder(fspm.Peers(), peers) { - t.Fatal("peers not recorded by the peer manager") - } + require.ElementsMatch(t, fspm.Peers(), peers, "peers not recorded by the peer manager") // Verify session still wants received blocks _, unwanted := sim.SplitWantedUnwanted(blks) - if len(unwanted) > 0 { - t.Fatal("all blocks should still be wanted") - } + require.Empty(t, unwanted, "all blocks should still be wanted") // Simulate receiving DONT_HAVE for a CID session.ReceiveFrom(peers[0], []cid.Cid{}, []cid.Cid{}, []cid.Cid{blks[0].Cid()}) @@ -215,9 +213,7 @@ func TestSessionGetBlocks(t *testing.T) { // Verify session still wants received blocks _, unwanted = sim.SplitWantedUnwanted(blks) - if len(unwanted) > 0 { - t.Fatal("all blocks should still be wanted") - } + require.Empty(t, unwanted, "all blocks should still be wanted") // Simulate receiving block for a CID session.ReceiveFrom(peers[1], []cid.Cid{blks[0].Cid()}, []cid.Cid{}, []cid.Cid{}) @@ -226,12 +222,9 @@ func TestSessionGetBlocks(t *testing.T) { // Verify session no longer wants received block wanted, unwanted := sim.SplitWantedUnwanted(blks) - if len(unwanted) != 1 || !unwanted[0].Cid().Equals(blks[0].Cid()) { - t.Fatal("session wants block that has already been received") - } - if len(wanted) != len(blks)-1 { - t.Fatal("session wants incorrect number of blocks") - } + require.Len(t, unwanted, 1) + require.True(t, unwanted[0].Cid().Equals(blks[0].Cid()), "session wants block that has already been received") + require.Len(t, wanted, len(blks)-1, "session wants incorrect number of blocks") // Shut down session cancel() @@ -239,9 +232,7 @@ func TestSessionGetBlocks(t *testing.T) { time.Sleep(10 * time.Millisecond) // Verify session was removed - if !sm.removeSessionCalled() { - t.Fatal("expected session to be removed") - } + require.True(t, sm.removeSessionCalled(), "expected session to be removed") } func TestSessionFindMorePeers(t *testing.T) { @@ -254,7 +245,7 @@ func TestSessionFindMorePeers(t *testing.T) { bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() - id := testutil.GenerateSessionID() + id := random.SequenceNext() sm := newMockSessionMgr() session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") session.SetBaseTickDelay(200 * time.Microsecond) @@ -265,9 +256,7 @@ func TestSessionFindMorePeers(t *testing.T) { cids = append(cids, block.Cid()) } _, err := session.GetBlocks(ctx, cids) - if err != nil { - t.Fatal("error getting blocks") - } + require.NoError(t, err, "error getting blocks") // The session should initially broadcast want-haves select { @@ -280,7 +269,7 @@ func TestSessionFindMorePeers(t *testing.T) { time.Sleep(20 * time.Millisecond) // need to make sure some latency registers // or there will be no tick set -- time precision on Windows in go is in the // millisecond range - p := testutil.GeneratePeers(1)[0] + p := random.Peers(1)[0] blk := blks[0] session.ReceiveFrom(p, []cid.Cid{blk.Cid()}, []cid.Cid{}, []cid.Cid{}) @@ -302,9 +291,7 @@ func TestSessionFindMorePeers(t *testing.T) { // Make sure the first block is not included because it has already // been received for _, c := range receivedWantReq.cids { - if c.Equals(cids[0]) { - t.Fatal("should not braodcast block that was already received") - } + require.False(t, c.Equals(cids[0]), "should not braodcast block that was already received") } case <-ctx.Done(): t.Fatal("Never rebroadcast want list") @@ -329,7 +316,7 @@ func TestSessionOnPeersExhausted(t *testing.T) { bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() - id := testutil.GenerateSessionID() + id := random.SequenceNext() sm := newMockSessionMgr() session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") blockGenerator := blocksutil.NewBlockGenerator() @@ -339,17 +326,13 @@ func TestSessionOnPeersExhausted(t *testing.T) { cids = append(cids, block.Cid()) } _, err := session.GetBlocks(ctx, cids) - if err != nil { - t.Fatal("error getting blocks") - } + require.NoError(t, err, "error getting blocks") // Wait for initial want request receivedWantReq := <-fpm.wantReqs // Should have sent out broadcast request for wants - if len(receivedWantReq.cids) != broadcastLiveWantsLimit { - t.Fatal("did not enqueue correct initial number of wants") - } + require.Len(t, receivedWantReq.cids, broadcastLiveWantsLimit, "did not enqueue correct initial number of wants") // Signal that all peers have send DONT_HAVE for two of the wants session.onPeersExhausted(cids[len(cids)-2:]) @@ -358,9 +341,7 @@ func TestSessionOnPeersExhausted(t *testing.T) { receivedWantReq = <-fpm.wantReqs // Should have sent out broadcast request for wants - if len(receivedWantReq.cids) != 2 { - t.Fatal("did not enqueue correct initial number of wants") - } + require.Len(t, receivedWantReq.cids, 2, "did not enqueue correct initial number of wants") } func TestSessionFailingToGetFirstBlock(t *testing.T) { @@ -375,7 +356,7 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() - id := testutil.GenerateSessionID() + id := random.SequenceNext() sm := newMockSessionMgr() session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, 10*time.Millisecond, delay.Fixed(100*time.Millisecond), "") blockGenerator := blocksutil.NewBlockGenerator() @@ -386,9 +367,7 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { } startTick := time.Now() _, err := session.GetBlocks(ctx, cids) - if err != nil { - t.Fatal("error getting blocks") - } + require.NoError(t, err, "error getting blocks") // The session should initially broadcast want-haves select { @@ -410,7 +389,9 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { // Wait for a request to find more peers to occur select { case k := <-fpf.findMorePeersRequested: - if testutil.IndexOf(blks, k) == -1 { + if !slices.ContainsFunc(blks, func(blk blocks.Block) bool { + return blk.Cid() == k + }) { t.Fatal("did not rebroadcast an active want") } case <-ctx.Done(): @@ -472,7 +453,9 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { // Wait for rebroadcast to occur select { case k := <-fpf.findMorePeersRequested: - if testutil.IndexOf(blks, k) == -1 { + if !slices.ContainsFunc(blks, func(blk blocks.Block) bool { + return blk.Cid() == k + }) { t.Fatal("did not rebroadcast an active want") } case <-ctx.Done(): @@ -488,7 +471,7 @@ func TestSessionCtxCancelClosesGetBlocksChannel(t *testing.T) { bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() - id := testutil.GenerateSessionID() + id := random.SequenceNext() sm := newMockSessionMgr() // Create a new session with its own context @@ -505,9 +488,7 @@ func TestSessionCtxCancelClosesGetBlocksChannel(t *testing.T) { defer getcancel() getBlocksCh, err := session.GetBlocks(getctx, []cid.Cid{blks[0].Cid()}) - if err != nil { - t.Fatal("error getting blocks") - } + require.NoError(t, err, "error getting blocks") // Cancel the session context sesscancel() @@ -515,9 +496,7 @@ func TestSessionCtxCancelClosesGetBlocksChannel(t *testing.T) { // Expect the GetBlocks() channel to be closed select { case _, ok := <-getBlocksCh: - if ok { - t.Fatal("expected channel to be closed but was not closed") - } + require.False(t, ok, "expected channel to be closed but was not closed") case <-timerCtx.Done(): t.Fatal("expected channel to be closed before timeout") } @@ -525,9 +504,7 @@ func TestSessionCtxCancelClosesGetBlocksChannel(t *testing.T) { time.Sleep(10 * time.Millisecond) // Expect RemoveSession to be called - if !sm.removeSessionCalled() { - t.Fatal("expected onShutdown to be called") - } + require.True(t, sm.removeSessionCalled(), "expected onShutdown to be called") } func TestSessionOnShutdownCalled(t *testing.T) { @@ -538,7 +515,7 @@ func TestSessionOnShutdownCalled(t *testing.T) { bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() - id := testutil.GenerateSessionID() + id := random.SequenceNext() sm := newMockSessionMgr() // Create a new session with its own context @@ -552,9 +529,7 @@ func TestSessionOnShutdownCalled(t *testing.T) { time.Sleep(10 * time.Millisecond) // Expect RemoveSession to be called - if !sm.removeSessionCalled() { - t.Fatal("expected onShutdown to be called") - } + require.True(t, sm.removeSessionCalled(), "expected onShutdown to be called") } func TestSessionReceiveMessageAfterCtxCancel(t *testing.T) { @@ -567,7 +542,7 @@ func TestSessionReceiveMessageAfterCtxCancel(t *testing.T) { bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() - id := testutil.GenerateSessionID() + id := random.SequenceNext() sm := newMockSessionMgr() session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") blockGenerator := blocksutil.NewBlockGenerator() @@ -575,9 +550,7 @@ func TestSessionReceiveMessageAfterCtxCancel(t *testing.T) { cids := []cid.Cid{blks[0].Cid(), blks[1].Cid()} _, err := session.GetBlocks(ctx, cids) - if err != nil { - t.Fatal("error getting blocks") - } + require.NoError(t, err, "error getting blocks") // Wait for initial want request <-fpm.wantReqs @@ -586,7 +559,7 @@ func TestSessionReceiveMessageAfterCtxCancel(t *testing.T) { cancelCtx() // Simulate receiving block for a CID - peer := testutil.GeneratePeers(1)[0] + peer := random.Peers(1)[0] session.ReceiveFrom(peer, []cid.Cid{blks[0].Cid()}, []cid.Cid{}, []cid.Cid{}) time.Sleep(5 * time.Millisecond) diff --git a/bitswap/client/internal/session/sessionwants_test.go b/bitswap/client/internal/session/sessionwants_test.go index f940ac14f..77430135f 100644 --- a/bitswap/client/internal/session/sessionwants_test.go +++ b/bitswap/client/internal/session/sessionwants_test.go @@ -3,8 +3,8 @@ package session import ( "testing" - "github.com/ipfs/boxo/bitswap/internal/testutil" cid "github.com/ipfs/go-cid" + "github.com/ipfs/go-test/random" ) func TestEmptySessionWants(t *testing.T) { @@ -30,8 +30,8 @@ func TestEmptySessionWants(t *testing.T) { func TestSessionWants(t *testing.T) { sw := newSessionWants(5) - cids := testutil.GenerateCids(10) - others := testutil.GenerateCids(1) + cids := random.Cids(10) + others := random.Cids(1) // Add 10 new wants // toFetch Live @@ -111,7 +111,7 @@ func TestSessionWants(t *testing.T) { func TestPrepareBroadcast(t *testing.T) { sw := newSessionWants(3) - cids := testutil.GenerateCids(10) + cids := random.Cids(10) // Add 6 new wants // toFetch Live @@ -171,7 +171,7 @@ func TestPrepareBroadcast(t *testing.T) { // Test that even after GC broadcast returns correct wants func TestPrepareBroadcastAfterGC(t *testing.T) { sw := newSessionWants(5) - cids := testutil.GenerateCids(liveWantsOrderGCLimit * 2) + cids := random.Cids(liveWantsOrderGCLimit * 2) sw.BlocksRequested(cids) diff --git a/bitswap/client/internal/session/sessionwantsender.go b/bitswap/client/internal/session/sessionwantsender.go index 390fdf29d..1beefeb94 100644 --- a/bitswap/client/internal/session/sessionwantsender.go +++ b/bitswap/client/internal/session/sessionwantsender.go @@ -455,6 +455,7 @@ func (sws *sessionWantSender) processUpdates(updates []update) []cid.Cid { go func() { for p := range prunePeers { // Peer doesn't have anything we want, so remove it + sws.bpm.RemovePeer(p) log.Infof("peer %s sent too many dont haves, removing from session %d", p, sws.ID()) sws.SignalAvailability(p, false) } diff --git a/bitswap/client/internal/session/sessionwantsender_test.go b/bitswap/client/internal/session/sessionwantsender_test.go index 86a930f61..ac094ac06 100644 --- a/bitswap/client/internal/session/sessionwantsender_test.go +++ b/bitswap/client/internal/session/sessionwantsender_test.go @@ -9,9 +9,10 @@ import ( bsbpm "github.com/ipfs/boxo/bitswap/client/internal/blockpresencemanager" bspm "github.com/ipfs/boxo/bitswap/client/internal/peermanager" bsspm "github.com/ipfs/boxo/bitswap/client/internal/sessionpeermanager" - "github.com/ipfs/boxo/bitswap/internal/testutil" cid "github.com/ipfs/go-cid" + "github.com/ipfs/go-test/random" peer "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" ) type sentWants struct { @@ -98,7 +99,8 @@ func (pm *mockPeerManager) waitNextWants() map[peer.ID]*sentWants { pm.lk.Lock() defer pm.lk.Unlock() - nw := make(map[peer.ID]*sentWants) + + nw := make(map[peer.ID]*sentWants, len(pm.peerSends)) for p, sentWants := range pm.peerSends { nw[p] = sentWants } @@ -108,10 +110,7 @@ func (pm *mockPeerManager) waitNextWants() map[peer.ID]*sentWants { func (pm *mockPeerManager) clearWants() { pm.lk.Lock() defer pm.lk.Unlock() - - for p := range pm.peerSends { - delete(pm.peerSends, p) - } + clear(pm.peerSends) } type exhaustedPeers struct { @@ -141,10 +140,10 @@ func (ep *exhaustedPeers) exhausted() []cid.Cid { } func TestSendWants(t *testing.T) { - cids := testutil.GenerateCids(4) - peers := testutil.GeneratePeers(1) + cids := random.Cids(4) + peers := random.Peers(1) peerA := peers[0] - sid := uint64(1) + const sid = uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() swc := newMockSessionMgr() @@ -168,23 +167,17 @@ func TestSendWants(t *testing.T) { // Should have sent // peerA: want-block cid0, cid1 sw, ok := peerSends[peerA] - if !ok { - t.Fatal("Nothing sent to peer") - } - if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), blkCids0) { - t.Fatal("Wrong keys") - } - if len(sw.wantHavesKeys()) > 0 { - t.Fatal("Expecting no want-haves") - } + require.True(t, ok, "Nothing sent to peer") + require.ElementsMatch(t, sw.wantBlocksKeys(), blkCids0, "Wrong keys") + require.Empty(t, sw.wantHavesKeys(), "Expecting no want-haves") } func TestSendsWantBlockToOnePeerOnly(t *testing.T) { - cids := testutil.GenerateCids(4) - peers := testutil.GeneratePeers(2) + cids := random.Cids(4) + peers := random.Peers(2) peerA := peers[0] peerB := peers[1] - sid := uint64(1) + const sid = uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() swc := newMockSessionMgr() @@ -208,12 +201,8 @@ func TestSendsWantBlockToOnePeerOnly(t *testing.T) { // Should have sent // peerA: want-block cid0, cid1 sw, ok := peerSends[peerA] - if !ok { - t.Fatal("Nothing sent to peer") - } - if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), blkCids0) { - t.Fatal("Wrong keys") - } + require.True(t, ok, "Nothing sent to peer") + require.ElementsMatch(t, sw.wantBlocksKeys(), blkCids0, "Wrong keys") // Clear wants (makes keeping track of what's been sent easier) pm.clearWants() @@ -228,23 +217,17 @@ func TestSendsWantBlockToOnePeerOnly(t *testing.T) { // peerB. Should have sent // peerB: want-have cid0, cid1 sw, ok = peerSends[peerB] - if !ok { - t.Fatal("Nothing sent to peer") - } - if sw.wantBlocks.Len() > 0 { - t.Fatal("Expecting no want-blocks") - } - if !testutil.MatchKeysIgnoreOrder(sw.wantHavesKeys(), blkCids0) { - t.Fatal("Wrong keys") - } + require.True(t, ok, "Nothing sent to peer") + require.Zero(t, sw.wantBlocks.Len(), "Expecting no want-blocks") + require.ElementsMatch(t, sw.wantHavesKeys(), blkCids0, "Wrong keys") } func TestReceiveBlock(t *testing.T) { - cids := testutil.GenerateCids(2) - peers := testutil.GeneratePeers(2) + cids := random.Cids(2) + peers := random.Peers(2) peerA := peers[0] peerB := peers[1] - sid := uint64(1) + const sid = uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() swc := newMockSessionMgr() @@ -267,12 +250,8 @@ func TestReceiveBlock(t *testing.T) { // Should have sent // peerA: want-block cid0, cid1 sw, ok := peerSends[peerA] - if !ok { - t.Fatal("Nothing sent to peer") - } - if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), cids) { - t.Fatal("Wrong keys") - } + require.True(t, ok, "Nothing sent to peer") + require.ElementsMatch(t, sw.wantBlocksKeys(), cids, "Wrong keys") // Clear wants (makes keeping track of what's been sent easier) pm.clearWants() @@ -292,9 +271,7 @@ func TestReceiveBlock(t *testing.T) { // (should not have sent want-block for cid0 because block0 has already // been received) sw, ok = peerSends[peerB] - if !ok { - t.Fatal("Nothing sent to peer") - } + require.True(t, ok, "Nothing sent to peer") wb := sw.wantBlocksKeys() if len(wb) != 1 || !wb[0].Equals(cids[1]) { t.Fatal("Wrong keys", wb) @@ -302,8 +279,8 @@ func TestReceiveBlock(t *testing.T) { } func TestCancelWants(t *testing.T) { - cids := testutil.GenerateCids(4) - sid := uint64(1) + cids := random.Cids(4) + const sid = uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() swc := newMockSessionMgr() @@ -330,17 +307,15 @@ func TestCancelWants(t *testing.T) { // Should have sent cancels for cid0, cid2 sent := swc.cancelled() - if !testutil.MatchKeysIgnoreOrder(sent, cancelCids) { - t.Fatal("Wrong keys") - } + require.ElementsMatch(t, sent, cancelCids, "Wrong keys") } func TestRegisterSessionWithPeerManager(t *testing.T) { - cids := testutil.GenerateCids(2) - peers := testutil.GeneratePeers(2) + cids := random.Cids(2) + peers := random.Peers(2) peerA := peers[0] peerB := peers[1] - sid := uint64(1) + const sid = uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() swc := newMockSessionMgr() @@ -359,9 +334,7 @@ func TestRegisterSessionWithPeerManager(t *testing.T) { time.Sleep(10 * time.Millisecond) // Expect session to have been registered with PeerManager - if !pm.has(peerA, sid) { - t.Fatal("Expected HAVE to register session with PeerManager") - } + require.True(t, pm.has(peerA, sid), "Expected HAVE to register session with PeerManager") // peerB: block cid1 spm.Update(peerB, cids[1:], nil, nil) @@ -370,18 +343,16 @@ func TestRegisterSessionWithPeerManager(t *testing.T) { time.Sleep(10 * time.Millisecond) // Expect session to have been registered with PeerManager - if !pm.has(peerB, sid) { - t.Fatal("Expected HAVE to register session with PeerManager") - } + require.True(t, pm.has(peerB, sid), "Expected HAVE to register session with PeerManager") } func TestProtectConnFirstPeerToSendWantedBlock(t *testing.T) { - cids := testutil.GenerateCids(2) - peers := testutil.GeneratePeers(3) + cids := random.Cids(2) + peers := random.Peers(3) peerA := peers[0] peerB := peers[1] peerC := peers[2] - sid := uint64(1) + const sid = uint64(1) pm := newMockPeerManager() fpt := newFakePeerTagger() fpm := bsspm.New(1, fpt) @@ -432,11 +403,11 @@ func TestProtectConnFirstPeerToSendWantedBlock(t *testing.T) { } func TestPeerUnavailable(t *testing.T) { - cids := testutil.GenerateCids(2) - peers := testutil.GeneratePeers(2) + cids := random.Cids(2) + peers := random.Peers(2) peerA := peers[0] peerB := peers[1] - sid := uint64(1) + const sid = uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() swc := newMockSessionMgr() @@ -459,12 +430,8 @@ func TestPeerUnavailable(t *testing.T) { // Should have sent // peerA: want-block cid0, cid1 sw, ok := peerSends[peerA] - if !ok { - t.Fatal("Nothing sent to peer") - } - if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), cids) { - t.Fatal("Wrong keys") - } + require.True(t, ok, "Nothing sent to peer") + require.ElementsMatch(t, sw.wantBlocksKeys(), cids, "Wrong keys") // Clear wants (makes keeping track of what's been sent easier) pm.clearWants() @@ -490,20 +457,16 @@ func TestPeerUnavailable(t *testing.T) { // Should now have sent want-block cid0, cid1 to peerB sw, ok = peerSends[peerB] - if !ok { - t.Fatal("Nothing sent to peer") - } - if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), cids) { - t.Fatal("Wrong keys") - } + require.True(t, ok, "Nothing sent to peer") + require.ElementsMatch(t, sw.wantBlocksKeys(), cids, "Wrong keys") } func TestPeersExhausted(t *testing.T) { - cids := testutil.GenerateCids(3) - peers := testutil.GeneratePeers(2) + cids := random.Cids(3) + peers := random.Peers(2) peerA := peers[0] peerB := peers[1] - sid := uint64(1) + const sid = uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() swc := newMockSessionMgr() @@ -531,9 +494,7 @@ func TestPeersExhausted(t *testing.T) { // All available peers (peer A) have sent us a DONT_HAVE for cid1, // so expect that onPeersExhausted() will be called with cid1 - if !testutil.MatchKeysIgnoreOrder(ep.exhausted(), []cid.Cid{cids[1]}) { - t.Fatal("Wrong keys") - } + require.ElementsMatch(t, ep.exhausted(), []cid.Cid{cids[1]}, "Wrong keys") // Clear exhausted cids ep.clear() @@ -566,9 +527,7 @@ func TestPeersExhausted(t *testing.T) { // All available peers (peer A and peer B) have sent us a DONT_HAVE for // cid2, so expect that onPeersExhausted() will be called with cid2 - if !testutil.MatchKeysIgnoreOrder(ep.exhausted(), []cid.Cid{cids[2]}) { - t.Fatal("Wrong keys") - } + require.ElementsMatch(t, ep.exhausted(), []cid.Cid{cids[2]}, "Wrong keys") } // Tests that when @@ -576,11 +535,11 @@ func TestPeersExhausted(t *testing.T) { // - the remaining peer becomes unavailable // onPeersExhausted should be sent for that CID func TestPeersExhaustedLastWaitingPeerUnavailable(t *testing.T) { - cids := testutil.GenerateCids(2) - peers := testutil.GeneratePeers(2) + cids := random.Cids(2) + peers := random.Peers(2) peerA := peers[0] peerB := peers[1] - sid := uint64(1) + const sid = uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() swc := newMockSessionMgr() @@ -617,19 +576,17 @@ func TestPeersExhaustedLastWaitingPeerUnavailable(t *testing.T) { // All remaining peers (peer A) have sent us a DONT_HAVE for cid1, // so expect that onPeersExhausted() will be called with cid1 - if !testutil.MatchKeysIgnoreOrder(ep.exhausted(), []cid.Cid{cids[1]}) { - t.Fatal("Wrong keys") - } + require.ElementsMatch(t, ep.exhausted(), []cid.Cid{cids[1]}, "Wrong keys") } // Tests that when all the peers are removed from the session // onPeersExhausted should be called with all outstanding CIDs func TestPeersExhaustedAllPeersUnavailable(t *testing.T) { - cids := testutil.GenerateCids(3) - peers := testutil.GeneratePeers(2) + cids := random.Cids(3) + peers := random.Peers(2) peerA := peers[0] peerB := peers[1] - sid := uint64(1) + const sid = uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() swc := newMockSessionMgr() @@ -661,15 +618,13 @@ func TestPeersExhaustedAllPeersUnavailable(t *testing.T) { // Expect that onPeersExhausted() will be called with all cids for blocks // that have not been received - if !testutil.MatchKeysIgnoreOrder(ep.exhausted(), []cid.Cid{cids[1], cids[2]}) { - t.Fatal("Wrong keys") - } + require.ElementsMatch(t, ep.exhausted(), []cid.Cid{cids[1], cids[2]}, "Wrong keys") } func TestConsecutiveDontHaveLimit(t *testing.T) { - cids := testutil.GenerateCids(peerDontHaveLimit + 10) - p := testutil.GeneratePeers(1)[0] - sid := uint64(1) + cids := random.Cids(peerDontHaveLimit + 10) + p := random.Peers(1)[0] + const sid = uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() swc := newMockSessionMgr() @@ -691,9 +646,7 @@ func TestConsecutiveDontHaveLimit(t *testing.T) { time.Sleep(10 * time.Millisecond) // Peer should be available - if has := fpm.HasPeer(p); !has { - t.Fatal("Expected peer to be available") - } + require.True(t, fpm.HasPeer(p), "Expected peer to be available") // Receive DONT_HAVEs from peer that do not exceed limit for _, c := range cids[1:peerDontHaveLimit] { @@ -705,9 +658,7 @@ func TestConsecutiveDontHaveLimit(t *testing.T) { time.Sleep(20 * time.Millisecond) // Peer should be available - if has := fpm.HasPeer(p); !has { - t.Fatal("Expected peer to be available") - } + require.True(t, fpm.HasPeer(p), "Expected peer to be available") // Receive DONT_HAVEs from peer that exceed limit for _, c := range cids[peerDontHaveLimit:] { @@ -719,15 +670,13 @@ func TestConsecutiveDontHaveLimit(t *testing.T) { time.Sleep(20 * time.Millisecond) // Session should remove peer - if has := fpm.HasPeer(p); has { - t.Fatal("Expected peer not to be available") - } + require.False(t, fpm.HasPeer(p), "Expected peer not to be available") } func TestConsecutiveDontHaveLimitInterrupted(t *testing.T) { - cids := testutil.GenerateCids(peerDontHaveLimit + 10) - p := testutil.GeneratePeers(1)[0] - sid := uint64(1) + cids := random.Cids(peerDontHaveLimit + 10) + p := random.Peers(1)[0] + const sid = uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() swc := newMockSessionMgr() @@ -749,9 +698,7 @@ func TestConsecutiveDontHaveLimitInterrupted(t *testing.T) { time.Sleep(5 * time.Millisecond) // Peer should be available - if has := fpm.HasPeer(p); !has { - t.Fatal("Expected peer to be available") - } + require.True(t, fpm.HasPeer(p), "Expected peer to be available") // Receive DONT_HAVE then HAVE then DONT_HAVE from peer, // where consecutive DONT_HAVEs would have exceeded limit @@ -776,15 +723,13 @@ func TestConsecutiveDontHaveLimitInterrupted(t *testing.T) { time.Sleep(5 * time.Millisecond) // Peer should be available - if has := fpm.HasPeer(p); !has { - t.Fatal("Expected peer to be available") - } + require.True(t, fpm.HasPeer(p), "Expected peer to be available") } func TestConsecutiveDontHaveReinstateAfterRemoval(t *testing.T) { - cids := testutil.GenerateCids(peerDontHaveLimit + 10) - p := testutil.GeneratePeers(1)[0] - sid := uint64(1) + cids := random.Cids(peerDontHaveLimit + 10) + p := random.Peers(1)[0] + const sid = uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() swc := newMockSessionMgr() @@ -806,9 +751,7 @@ func TestConsecutiveDontHaveReinstateAfterRemoval(t *testing.T) { time.Sleep(5 * time.Millisecond) // Peer should be available - if has := fpm.HasPeer(p); !has { - t.Fatal("Expected peer to be available") - } + require.True(t, fpm.HasPeer(p), "Expected peer to be available") // Receive DONT_HAVEs from peer that exceed limit for _, c := range cids[1 : peerDontHaveLimit+2] { @@ -820,9 +763,7 @@ func TestConsecutiveDontHaveReinstateAfterRemoval(t *testing.T) { time.Sleep(10 * time.Millisecond) // Session should remove peer - if has := fpm.HasPeer(p); has { - t.Fatal("Expected peer not to be available") - } + require.False(t, fpm.HasPeer(p), "Expected peer not to be available") // Receive a HAVE from peer (adds it back into the session) bpm.ReceiveFrom(p, cids[:1], []cid.Cid{}) @@ -832,11 +773,9 @@ func TestConsecutiveDontHaveReinstateAfterRemoval(t *testing.T) { time.Sleep(10 * time.Millisecond) // Peer should be available - if has := fpm.HasPeer(p); !has { - t.Fatal("Expected peer to be available") - } + require.True(t, fpm.HasPeer(p), "Expected peer to be available") - cids2 := testutil.GenerateCids(peerDontHaveLimit + 10) + cids2 := random.Cids(peerDontHaveLimit + 10) // Receive DONT_HAVEs from peer that don't exceed limit for _, c := range cids2[1:peerDontHaveLimit] { @@ -848,9 +787,7 @@ func TestConsecutiveDontHaveReinstateAfterRemoval(t *testing.T) { time.Sleep(10 * time.Millisecond) // Peer should be available - if has := fpm.HasPeer(p); !has { - t.Fatal("Expected peer to be available") - } + require.True(t, fpm.HasPeer(p), "Expected peer to be available") // Receive DONT_HAVEs from peer that exceed limit for _, c := range cids2[peerDontHaveLimit:] { @@ -862,15 +799,13 @@ func TestConsecutiveDontHaveReinstateAfterRemoval(t *testing.T) { time.Sleep(10 * time.Millisecond) // Session should remove peer - if has := fpm.HasPeer(p); has { - t.Fatal("Expected peer not to be available") - } + require.False(t, fpm.HasPeer(p), "Expected peer not to be available") } func TestConsecutiveDontHaveDontRemoveIfHasWantedBlock(t *testing.T) { - cids := testutil.GenerateCids(peerDontHaveLimit + 10) - p := testutil.GeneratePeers(1)[0] - sid := uint64(1) + cids := random.Cids(peerDontHaveLimit + 10) + p := random.Peers(1)[0] + const sid = uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() swc := newMockSessionMgr() @@ -893,9 +828,7 @@ func TestConsecutiveDontHaveDontRemoveIfHasWantedBlock(t *testing.T) { time.Sleep(10 * time.Millisecond) // Peer should be available - if has := fpm.HasPeer(p); !has { - t.Fatal("Expected peer to be available") - } + require.True(t, fpm.HasPeer(p), "Expected peer to be available") // Receive DONT_HAVEs from peer that exceed limit for _, c := range cids[1 : peerDontHaveLimit+5] { @@ -908,7 +841,5 @@ func TestConsecutiveDontHaveDontRemoveIfHasWantedBlock(t *testing.T) { // Peer should still be available because it has a block that we want. // (We received a HAVE for cid 0 but didn't yet receive the block) - if has := fpm.HasPeer(p); !has { - t.Fatal("Expected peer to be available") - } + require.True(t, fpm.HasPeer(p), "Expected peer to be available") } diff --git a/bitswap/client/internal/session/wantinfo_test.go b/bitswap/client/internal/session/wantinfo_test.go index ad42b174d..a21670d8d 100644 --- a/bitswap/client/internal/session/wantinfo_test.go +++ b/bitswap/client/internal/session/wantinfo_test.go @@ -3,7 +3,7 @@ package session import ( "testing" - "github.com/ipfs/boxo/bitswap/internal/testutil" + "github.com/ipfs/go-test/random" ) func TestEmptyWantInfo(t *testing.T) { @@ -15,7 +15,7 @@ func TestEmptyWantInfo(t *testing.T) { } func TestSetPeerBlockPresence(t *testing.T) { - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) wp := newWantInfo(newPeerResponseTracker()) wp.setPeerBlockPresence(peers[0], BPUnknown) @@ -35,7 +35,7 @@ func TestSetPeerBlockPresence(t *testing.T) { } func TestSetPeerBlockPresenceBestLower(t *testing.T) { - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) wp := newWantInfo(newPeerResponseTracker()) wp.setPeerBlockPresence(peers[0], BPHave) @@ -55,7 +55,7 @@ func TestSetPeerBlockPresenceBestLower(t *testing.T) { } func TestRemoveThenSetDontHave(t *testing.T) { - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) wp := newWantInfo(newPeerResponseTracker()) wp.setPeerBlockPresence(peers[0], BPUnknown) diff --git a/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager_test.go b/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager_test.go index f2b4d8aa0..a9779e297 100644 --- a/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager_test.go +++ b/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager_test.go @@ -3,15 +3,15 @@ package sessioninterestmanager import ( "testing" - "github.com/ipfs/boxo/bitswap/internal/testutil" cid "github.com/ipfs/go-cid" + "github.com/ipfs/go-test/random" ) func TestEmpty(t *testing.T) { sim := New() - ses := uint64(1) - cids := testutil.GenerateCids(2) + const ses = 1 + cids := random.Cids(2) res := sim.FilterSessionInterested(ses, cids) if len(res) != 1 || len(res[0]) > 0 { t.Fatal("Expected no interest") @@ -24,10 +24,10 @@ func TestEmpty(t *testing.T) { func TestBasic(t *testing.T) { sim := New() - ses1 := uint64(1) - ses2 := uint64(2) - cids1 := testutil.GenerateCids(2) - cids2 := append(testutil.GenerateCids(1), cids1[1]) + const ses1 = 1 + const ses2 = 2 + cids1 := random.Cids(2) + cids2 := append(random.Cids(1), cids1[1]) sim.RecordSessionInterest(ses1, cids1) res := sim.FilterSessionInterested(ses1, cids1) @@ -59,8 +59,8 @@ func TestBasic(t *testing.T) { func TestInterestedSessions(t *testing.T) { sim := New() - ses := uint64(1) - cids := testutil.GenerateCids(3) + const ses = 1 + cids := random.Cids(3) sim.RecordSessionInterest(ses, cids[0:2]) if len(sim.InterestedSessions(cids, []cid.Cid{}, []cid.Cid{})) != 1 { @@ -86,10 +86,10 @@ func TestInterestedSessions(t *testing.T) { func TestRemoveSession(t *testing.T) { sim := New() - ses1 := uint64(1) - ses2 := uint64(2) - cids1 := testutil.GenerateCids(2) - cids2 := append(testutil.GenerateCids(1), cids1[1]) + const ses1 = 1 + const ses2 = 2 + cids1 := random.Cids(2) + cids2 := append(random.Cids(1), cids1[1]) sim.RecordSessionInterest(ses1, cids1) sim.RecordSessionInterest(ses2, cids2) sim.RemoveSession(ses1) @@ -114,10 +114,10 @@ func TestRemoveSession(t *testing.T) { func TestRemoveSessionInterested(t *testing.T) { sim := New() - ses1 := uint64(1) - ses2 := uint64(2) - cids1 := testutil.GenerateCids(2) - cids2 := append(testutil.GenerateCids(1), cids1[1]) + const ses1 = uint64(1) + const ses2 = uint64(2) + cids1 := random.Cids(2) + cids2 := append(random.Cids(1), cids1[1]) sim.RecordSessionInterest(ses1, cids1) sim.RecordSessionInterest(ses2, cids2) @@ -148,10 +148,10 @@ func TestRemoveSessionInterested(t *testing.T) { } func TestSplitWantedUnwanted(t *testing.T) { - blks := testutil.GenerateBlocksOfSize(3, 1024) + blks := random.BlocksOfSize(3, 1024) sim := New() - ses1 := uint64(1) - ses2 := uint64(2) + const ses1 = 1 + const ses2 = 2 var cids []cid.Cid for _, b := range blks { diff --git a/bitswap/client/internal/sessionmanager/sessionmanager_test.go b/bitswap/client/internal/sessionmanager/sessionmanager_test.go index 8b6c4207c..e8259b1d8 100644 --- a/bitswap/client/internal/sessionmanager/sessionmanager_test.go +++ b/bitswap/client/internal/sessionmanager/sessionmanager_test.go @@ -12,11 +12,11 @@ import ( bspm "github.com/ipfs/boxo/bitswap/client/internal/peermanager" bssession "github.com/ipfs/boxo/bitswap/client/internal/session" bssim "github.com/ipfs/boxo/bitswap/client/internal/sessioninterestmanager" - "github.com/ipfs/boxo/bitswap/internal/testutil" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" peer "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" ) type fakeSession struct { @@ -153,9 +153,7 @@ func TestReceiveFrom(t *testing.T) { t.Fatal("should have received want-haves but didn't") } - if len(pm.cancelled()) != 1 { - t.Fatal("should have sent cancel for received blocks") - } + require.Len(t, pm.cancelled(), 1, "should have sent cancel for received blocks") } func TestReceiveBlocksWhenManagerShutdown(t *testing.T) { @@ -246,19 +244,13 @@ func TestShutdown(t *testing.T) { sim.RecordSessionInterest(firstSession.ID(), cids) sm.ReceiveFrom(ctx, p, []cid.Cid{}, []cid.Cid{}, cids) - if !bpm.HasKey(block.Cid()) { - t.Fatal("expected cid to be added to block presence manager") - } + require.True(t, bpm.HasKey(block.Cid()), "expected cid to be added to block presence manager") sm.Shutdown() // wait for cleanup time.Sleep(10 * time.Millisecond) - if bpm.HasKey(block.Cid()) { - t.Fatal("expected cid to be removed from block presence manager") - } - if !testutil.MatchKeysIgnoreOrder(pm.cancelled(), cids) { - t.Fatal("expected cancels to be sent") - } + require.False(t, bpm.HasKey(block.Cid()), "expected cid to be removed from block presence manager") + require.ElementsMatch(t, pm.cancelled(), cids, "expected cancels to be sent") } diff --git a/bitswap/client/internal/sessionpeermanager/sessionpeermanager.go b/bitswap/client/internal/sessionpeermanager/sessionpeermanager.go index f15be86b4..1832e9c7f 100644 --- a/bitswap/client/internal/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/client/internal/sessionpeermanager/sessionpeermanager.go @@ -9,7 +9,7 @@ import ( peer "github.com/libp2p/go-libp2p/core/peer" ) -var log = logging.Logger("bs:sprmgr") +var log = logging.Logger("bitswap/client/sesspeermgr") const ( // Connection Manager tag value for session peers. Indicates to connection diff --git a/bitswap/client/internal/sessionpeermanager/sessionpeermanager_test.go b/bitswap/client/internal/sessionpeermanager/sessionpeermanager_test.go index 0d9275579..ce7a24872 100644 --- a/bitswap/client/internal/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/client/internal/sessionpeermanager/sessionpeermanager_test.go @@ -4,7 +4,7 @@ import ( "sync" "testing" - "github.com/ipfs/boxo/bitswap/internal/testutil" + "github.com/ipfs/go-test/random" peer "github.com/libp2p/go-libp2p/core/peer" ) @@ -78,7 +78,7 @@ func (fpt *fakePeerTagger) isProtected(p peer.ID) bool { } func TestAddPeers(t *testing.T) { - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) spm := New(1, &fakePeerTagger{}) isNew := spm.AddPeer(peers[0]) @@ -98,7 +98,7 @@ func TestAddPeers(t *testing.T) { } func TestRemovePeers(t *testing.T) { - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) spm := New(1, &fakePeerTagger{}) existed := spm.RemovePeer(peers[0]) @@ -124,7 +124,7 @@ func TestRemovePeers(t *testing.T) { } func TestHasPeers(t *testing.T) { - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) spm := New(1, &fakePeerTagger{}) if spm.HasPeers() { @@ -153,7 +153,7 @@ func TestHasPeers(t *testing.T) { } func TestHasPeer(t *testing.T) { - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) spm := New(1, &fakePeerTagger{}) if spm.HasPeer(peers[0]) { @@ -181,7 +181,7 @@ func TestHasPeer(t *testing.T) { } func TestPeers(t *testing.T) { - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) spm := New(1, &fakePeerTagger{}) if len(spm.Peers()) > 0 { @@ -205,7 +205,7 @@ func TestPeers(t *testing.T) { } func TestPeersDiscovered(t *testing.T) { - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) spm := New(1, &fakePeerTagger{}) if spm.PeersDiscovered() { @@ -224,7 +224,7 @@ func TestPeersDiscovered(t *testing.T) { } func TestPeerTagging(t *testing.T) { - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) fpt := &fakePeerTagger{} spm := New(1, fpt) @@ -250,7 +250,7 @@ func TestPeerTagging(t *testing.T) { } func TestProtectConnection(t *testing.T) { - peers := testutil.GeneratePeers(1) + peers := random.Peers(1) peerA := peers[0] fpt := newFakePeerTagger() spm := New(1, fpt) @@ -276,7 +276,7 @@ func TestProtectConnection(t *testing.T) { } func TestShutdown(t *testing.T) { - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) fpt := newFakePeerTagger() spm := New(1, fpt) diff --git a/bitswap/internal/testutil/testutil.go b/bitswap/internal/testutil/testutil.go deleted file mode 100644 index 3fc2a8e0a..000000000 --- a/bitswap/internal/testutil/testutil.go +++ /dev/null @@ -1,142 +0,0 @@ -package testutil - -import ( - "crypto/rand" - "strconv" - - "github.com/ipfs/boxo/bitswap/client/wantlist" - bsmsg "github.com/ipfs/boxo/bitswap/message" - blocks "github.com/ipfs/go-block-format" - cid "github.com/ipfs/go-cid" - blocksutil "github.com/ipfs/go-ipfs-blocksutil" - peer "github.com/libp2p/go-libp2p/core/peer" -) - -var ( - blockGenerator = blocksutil.NewBlockGenerator() - prioritySeq int32 -) - -// GenerateBlocksOfSize generates a series of blocks of the given byte size -func GenerateBlocksOfSize(n int, size int64) []blocks.Block { - generatedBlocks := make([]blocks.Block, 0, n) - for i := 0; i < n; i++ { - // rand.Read never errors - buf := make([]byte, size) - rand.Read(buf) - b := blocks.NewBlock(buf) - generatedBlocks = append(generatedBlocks, b) - - } - return generatedBlocks -} - -// GenerateCids produces n content identifiers. -func GenerateCids(n int) []cid.Cid { - cids := make([]cid.Cid, 0, n) - for i := 0; i < n; i++ { - c := blockGenerator.Next().Cid() - cids = append(cids, c) - } - return cids -} - -// GenerateMessageEntries makes fake bitswap message entries. -func GenerateMessageEntries(n int, isCancel bool) []bsmsg.Entry { - bsmsgs := make([]bsmsg.Entry, 0, n) - for i := 0; i < n; i++ { - prioritySeq++ - msg := bsmsg.Entry{ - Entry: wantlist.NewRefEntry(blockGenerator.Next().Cid(), prioritySeq), - Cancel: isCancel, - } - bsmsgs = append(bsmsgs, msg) - } - return bsmsgs -} - -var peerSeq int - -// GeneratePeers creates n peer ids. -func GeneratePeers(n int) []peer.ID { - peerIds := make([]peer.ID, 0, n) - for i := 0; i < n; i++ { - peerSeq++ - p := peer.ID(strconv.Itoa(peerSeq)) - peerIds = append(peerIds, p) - } - return peerIds -} - -var nextSession uint64 - -// GenerateSessionID make a unit session identifier. -func GenerateSessionID() uint64 { - nextSession++ - return uint64(nextSession) -} - -// ContainsPeer returns true if a peer is found n a list of peers. -func ContainsPeer(peers []peer.ID, p peer.ID) bool { - for _, n := range peers { - if p == n { - return true - } - } - return false -} - -// IndexOf returns the index of a given cid in an array of blocks -func IndexOf(blks []blocks.Block, c cid.Cid) int { - for i, n := range blks { - if n.Cid() == c { - return i - } - } - return -1 -} - -// ContainsBlock returns true if a block is found n a list of blocks -func ContainsBlock(blks []blocks.Block, block blocks.Block) bool { - return IndexOf(blks, block.Cid()) != -1 -} - -// ContainsKey returns true if a key is found n a list of CIDs. -func ContainsKey(ks []cid.Cid, c cid.Cid) bool { - for _, k := range ks { - if c == k { - return true - } - } - return false -} - -// MatchKeysIgnoreOrder returns true if the lists of CIDs match (even if -// they're in a different order) -func MatchKeysIgnoreOrder(ks1 []cid.Cid, ks2 []cid.Cid) bool { - if len(ks1) != len(ks2) { - return false - } - - for _, k := range ks1 { - if !ContainsKey(ks2, k) { - return false - } - } - return true -} - -// MatchPeersIgnoreOrder returns true if the lists of peers match (even if -// they're in a different order) -func MatchPeersIgnoreOrder(ps1 []peer.ID, ps2 []peer.ID) bool { - if len(ps1) != len(ps2) { - return false - } - - for _, p := range ps1 { - if !ContainsPeer(ps2, p) { - return false - } - } - return true -} diff --git a/bitswap/internal/testutil/testutil_test.go b/bitswap/internal/testutil/testutil_test.go deleted file mode 100644 index c4dc1af15..000000000 --- a/bitswap/internal/testutil/testutil_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package testutil - -import ( - "testing" - - blocks "github.com/ipfs/go-block-format" -) - -func TestGenerateBlocksOfSize(t *testing.T) { - for _, b1 := range GenerateBlocksOfSize(10, 100) { - b2 := blocks.NewBlock(b1.RawData()) - if b2.Cid() != b1.Cid() { - t.Fatal("block CIDs mismatch") - } - } -} diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 6b9d787e7..a0a45970b 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -182,15 +182,9 @@ func (m *impl) Clone() BitSwapMessage { // Reset the values in the message back to defaults, so it can be reused func (m *impl) Reset(full bool) { m.full = full - for k := range m.wantlist { - delete(m.wantlist, k) - } - for k := range m.blocks { - delete(m.blocks, k) - } - for k := range m.blockPresences { - delete(m.blockPresences, k) - } + clear(m.wantlist) + clear(m.blocks) + clear(m.blockPresences) m.pendingBytes = 0 } @@ -253,25 +247,31 @@ func (m *impl) Empty() bool { } func (m *impl) Wantlist() []Entry { - out := make([]Entry, 0, len(m.wantlist)) + out := make([]Entry, len(m.wantlist)) + var i int for _, e := range m.wantlist { - out = append(out, *e) + out[i] = *e + i++ } return out } func (m *impl) Blocks() []blocks.Block { - bs := make([]blocks.Block, 0, len(m.blocks)) + bs := make([]blocks.Block, len(m.blocks)) + var i int for _, block := range m.blocks { - bs = append(bs, block) + bs[i] = block + i++ } return bs } func (m *impl) BlockPresences() []BlockPresence { - bps := make([]BlockPresence, 0, len(m.blockPresences)) + bps := make([]BlockPresence, len(m.blockPresences)) + var i int for c, t := range m.blockPresences { - bps = append(bps, BlockPresence{c, t}) + bps[i] = BlockPresence{c, t} + i++ } return bps } diff --git a/bitswap/network/connecteventmanager_test.go b/bitswap/network/connecteventmanager_test.go index bb3c52266..3107efbcf 100644 --- a/bitswap/network/connecteventmanager_test.go +++ b/bitswap/network/connecteventmanager_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/ipfs/boxo/bitswap/internal/testutil" + "github.com/ipfs/go-test/random" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" ) @@ -46,7 +46,7 @@ func wait(t *testing.T, c *connectEventManager) { func TestConnectEventManagerConnectDisconnect(t *testing.T) { connListener := newMockConnListener() - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) cem := newConnectEventManager(connListener) cem.Start() t.Cleanup(cem.Stop) @@ -85,7 +85,7 @@ func TestConnectEventManagerConnectDisconnect(t *testing.T) { func TestConnectEventManagerMarkUnresponsive(t *testing.T) { connListener := newMockConnListener() - p := testutil.GeneratePeers(1)[0] + p := random.Peers(1)[0] cem := newConnectEventManager(connListener) cem.Start() t.Cleanup(cem.Stop) @@ -134,7 +134,7 @@ func TestConnectEventManagerMarkUnresponsive(t *testing.T) { func TestConnectEventManagerDisconnectAfterMarkUnresponsive(t *testing.T) { connListener := newMockConnListener() - p := testutil.GeneratePeers(1)[0] + p := random.Peers(1)[0] cem := newConnectEventManager(connListener) cem.Start() t.Cleanup(cem.Stop) diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 962bc2588..6ea0fc525 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -19,7 +19,7 @@ var ( ProtocolBitswapNoVers = internal.ProtocolBitswapNoVers // ProtocolBitswapOneZero is the prefix for the legacy bitswap protocol ProtocolBitswapOneZero = internal.ProtocolBitswapOneZero - // ProtocolBitswapOneOne is the the prefix for version 1.1.0 + // ProtocolBitswapOneOne is the prefix for version 1.1.0 ProtocolBitswapOneOne = internal.ProtocolBitswapOneOne // ProtocolBitswap is the current version of the bitswap protocol: 1.2.0 ProtocolBitswap = internal.ProtocolBitswap diff --git a/bitswap/network/internal/default.go b/bitswap/network/internal/default.go index 13f4936a8..ee5a8974e 100644 --- a/bitswap/network/internal/default.go +++ b/bitswap/network/internal/default.go @@ -9,7 +9,7 @@ var ( ProtocolBitswapNoVers protocol.ID = "/ipfs/bitswap" // ProtocolBitswapOneZero is the prefix for the legacy bitswap protocol ProtocolBitswapOneZero protocol.ID = "/ipfs/bitswap/1.0.0" - // ProtocolBitswapOneOne is the the prefix for version 1.1.0 + // ProtocolBitswapOneOne is the prefix for version 1.1.0 ProtocolBitswapOneOne protocol.ID = "/ipfs/bitswap/1.1.0" // ProtocolBitswap is the current version of the bitswap protocol: 1.2.0 ProtocolBitswap protocol.ID = "/ipfs/bitswap/1.2.0" diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 422249952..7acc3abcc 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -26,9 +26,7 @@ import ( "github.com/multiformats/go-multistream" ) -var log = logging.Logger("bitswap_network") - -var connectTimeout = time.Second * 5 +var log = logging.Logger("bitswap/network") var ( maxSendTimeout = 2 * time.Minute @@ -323,10 +321,7 @@ func (bsnet *impl) SendMessage( p peer.ID, outgoing bsmsg.BitSwapMessage, ) error { - tctx, cancel := context.WithTimeout(ctx, connectTimeout) - defer cancel() - - s, err := bsnet.newStreamToPeer(tctx, p) + s, err := bsnet.newStreamToPeer(ctx, p) if err != nil { return err } diff --git a/bitswap/server/internal/decision/blockstoremanager_test.go b/bitswap/server/internal/decision/blockstoremanager_test.go index 1192873f3..f65c88e83 100644 --- a/bitswap/server/internal/decision/blockstoremanager_test.go +++ b/bitswap/server/internal/decision/blockstoremanager_test.go @@ -7,7 +7,6 @@ import ( "testing" "time" - "github.com/ipfs/boxo/bitswap/internal/testutil" blockstore "github.com/ipfs/boxo/blockstore" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" @@ -16,6 +15,7 @@ import ( ds_sync "github.com/ipfs/go-datastore/sync" delay "github.com/ipfs/go-ipfs-delay" "github.com/ipfs/go-metrics-interface" + "github.com/ipfs/go-test/random" ) func newBlockstoreManagerForTesting( @@ -40,7 +40,7 @@ func TestBlockstoreManagerNotFoundKey(t *testing.T) { bsm := newBlockstoreManagerForTesting(t, ctx, bstore, 5) - cids := testutil.GenerateCids(4) + cids := random.Cids(4) sizes, err := bsm.getBlockSizes(ctx, cids) if err != nil { t.Fatal(err) @@ -158,11 +158,11 @@ func TestBlockstoreManagerConcurrency(t *testing.T) { dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) - workerCount := 5 + const workerCount = 5 bsm := newBlockstoreManagerForTesting(t, ctx, bstore, workerCount) - blkSize := int64(8 * 1024) - blks := testutil.GenerateBlocksOfSize(32, blkSize) + const blkSize = 8 * 1024 + blks := random.BlocksOfSize(32, blkSize) var ks []cid.Cid for _, b := range blks { ks = append(ks, b.Cid()) @@ -195,14 +195,14 @@ func TestBlockstoreManagerConcurrency(t *testing.T) { func TestBlockstoreManagerClose(t *testing.T) { ctx := context.Background() - delayTime := 20 * time.Millisecond + const delayTime = 20 * time.Millisecond bsdelay := delay.Fixed(delayTime) dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) bsm := newBlockstoreManagerForTesting(t, ctx, bstore, 3) - blks := testutil.GenerateBlocksOfSize(10, 1024) + blks := random.BlocksOfSize(10, 1024) var ks []cid.Cid for _, b := range blks { ks = append(ks, b.Cid()) @@ -227,7 +227,7 @@ func TestBlockstoreManagerClose(t *testing.T) { } func TestBlockstoreManagerCtxDone(t *testing.T) { - delayTime := 20 * time.Millisecond + const delayTime = 20 * time.Millisecond bsdelay := delay.Fixed(delayTime) underlyingDstore := ds_sync.MutexWrap(ds.NewMapDatastore()) @@ -238,7 +238,7 @@ func TestBlockstoreManagerCtxDone(t *testing.T) { ctx := context.Background() bsm := newBlockstoreManagerForTesting(t, ctx, bstore, 3) - blks := testutil.GenerateBlocksOfSize(100, 128) + blks := random.BlocksOfSize(100, 128) var ks []cid.Cid for _, b := range blks { ks = append(ks, b.Cid()) diff --git a/bitswap/server/internal/decision/engine.go b/bitswap/server/internal/decision/engine.go index 234c1c510..a40345d8f 100644 --- a/bitswap/server/internal/decision/engine.go +++ b/bitswap/server/internal/decision/engine.go @@ -2,14 +2,15 @@ package decision import ( + "cmp" "context" + "errors" "fmt" - "math/bits" + "slices" "sync" "time" "github.com/google/uuid" - wl "github.com/ipfs/boxo/bitswap/client/wantlist" "github.com/ipfs/boxo/bitswap/internal/defaults" bsmsg "github.com/ipfs/boxo/bitswap/message" @@ -60,7 +61,7 @@ import ( // whatever it sees fit to produce desired outcomes (get wanted keys // quickly, maintain good relationships with peers, etc). -var log = logging.Logger("engine") +var log = logging.Logger("bitswap/server/decision") const ( // outboxChanBuffer must be 0 to prevent stale messages from being sent @@ -132,9 +133,11 @@ type PeerEntry struct { // PeerLedger is an external ledger dealing with peers and their want lists. type PeerLedger interface { // Wants informs the ledger that [peer.ID] wants [wl.Entry]. - Wants(p peer.ID, e wl.Entry) + // If peer ledger exceed internal limit, then the entry is not added + // and false is returned. + Wants(p peer.ID, e wl.Entry) bool - // CancelWant returns true if the [cid.Cid] is present in the wantlist of [peer.ID]. + // CancelWant returns true if the [cid.Cid] was removed from the wantlist of [peer.ID]. CancelWant(p peer.ID, k cid.Cid) bool // CancelWantWithType will not cancel WantBlock if we sent a HAVE message. @@ -315,8 +318,11 @@ func WithMaxOutstandingBytesPerPeer(count int) Option { } } -// WithMaxQueuedWantlistEntriesPerPeer limits how much individual entries each peer is allowed to send. -// If a peer send us more than this we will truncate newest entries. +// WithMaxQueuedWantlistEntriesPerPeer limits how many individual entries each +// peer is allowed to send. If a peer sends more than this, then the lowest +// priority entries are truncated to this limit. If there is insufficient space +// to enqueue new entries, then older existing wants with no associated blocks, +// and lower priority wants, are canceled to make room for the new wants. func WithMaxQueuedWantlistEntriesPerPeer(count uint) Option { return func(e *Engine) { e.maxQueuedWantlistEntriesPerPeer = count @@ -402,7 +408,6 @@ func newEngine( taskWorkerCount: defaults.BitswapEngineTaskWorkerCount, sendDontHaves: true, self: self, - peerLedger: NewDefaultPeerLedger(), pendingGauge: bmetrics.PendingEngineGauge(ctx), activeGauge: bmetrics.ActiveEngineGauge(ctx), targetMessageSize: defaultTargetMessageSize, @@ -416,6 +421,11 @@ func newEngine( opt(e) } + // If peerLedger was not set by option, then create a default instance. + if e.peerLedger == nil { + e.peerLedger = NewDefaultPeerLedger(e.maxQueuedWantlistEntriesPerPeer) + } + e.bsm = newBlockstoreManager(bs, e.bstoreWorkerCount, bmetrics.PendingBlocksGauge(ctx), bmetrics.ActiveBlocksGauge(ctx)) // default peer task queue options @@ -668,37 +678,20 @@ func (e *Engine) Peers() []peer.ID { // MessageReceived is called when a message is received from a remote peer. // For each item in the wantlist, add a want-have or want-block entry to the -// request queue (this is later popped off by the workerTasks) -func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwapMessage) (mustKillConnection bool) { - entries := m.Wantlist() - - if len(entries) > 0 { - log.Debugw("Bitswap engine <- msg", "local", e.self, "from", p, "entryCount", len(entries)) - for _, et := range entries { - if !et.Cancel { - if et.WantType == pb.Message_Wantlist_Have { - log.Debugw("Bitswap engine <- want-have", "local", e.self, "from", p, "cid", et.Cid) - } else { - log.Debugw("Bitswap engine <- want-block", "local", e.self, "from", p, "cid", et.Cid) - } - } - } - } - +// request queue (this is later popped off by the workerTasks). Returns true +// if the connection to the server must be closed. +func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwapMessage) bool { if m.Empty() { log.Infof("received empty message from %s", p) + return false } - newWorkExists := false - defer func() { - if newWorkExists { - e.signalNewWork() - } - }() - - // Dispatch entries - wants, cancels := e.splitWantsCancels(entries) - wants, denials := e.splitWantsDenials(p, wants) + wants, cancels, denials, err := e.splitWantsCancelsDenials(p, m) + if err != nil { + // This is a truely broken client, let's kill the connection. + log.Warnw(err.Error(), "local", e.self, "remote", p) + return true + } // Get block sizes wantKs := cid.NewSet() @@ -708,7 +701,7 @@ func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwap blockSizes, err := e.bsm.getBlockSizes(ctx, wantKs.Keys()) if err != nil { log.Info("aborting message processing", err) - return + return false } e.lock.Lock() @@ -717,56 +710,35 @@ func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwap e.peerLedger.ClearPeerWantlist(p) } - s := uint(e.peerLedger.WantlistSizeForPeer(p)) - if wouldBe := s + uint(len(wants)); wouldBe > e.maxQueuedWantlistEntriesPerPeer { - log.Debugw("wantlist overflow", "local", e.self, "remote", p, "would be", wouldBe) - // truncate wantlist to avoid overflow - available, o := bits.Sub(e.maxQueuedWantlistEntriesPerPeer, s, 0) - if o != 0 { - available = 0 + var overflow []bsmsg.Entry + if len(wants) != 0 { + filteredWants := wants[:0] // shift inplace + for _, entry := range wants { + if !e.peerLedger.Wants(p, entry.Entry) { + // Cannot add entry because it would exceed size limit. + overflow = append(overflow, entry) + continue + } + filteredWants = append(filteredWants, entry) } - wants = wants[:available] + // Clear truncated entries - early GC. + clear(wants[len(filteredWants):]) + wants = filteredWants } - filteredWants := wants[:0] // shift inplace - - for _, entry := range wants { - if entry.Cid.Prefix().MhType == mh.IDENTITY { - // This is a truely broken client, let's kill the connection. - e.lock.Unlock() - log.Warnw("peer wants an identity CID", "local", e.self, "remote", p) - return true - } - if e.maxCidSize != 0 && uint(entry.Cid.ByteLen()) > e.maxCidSize { - // Ignore requests about CIDs that big. - continue - } - - e.peerLedger.Wants(p, entry.Entry) - filteredWants = append(filteredWants, entry) - } - clear := wants[len(filteredWants):] - for i := range clear { - clear[i] = bsmsg.Entry{} // early GC + if len(overflow) != 0 { + log.Infow("handling wantlist overflow", "local", e.self, "from", p, "wantlistSize", len(wants), "overflowSize", len(overflow)) + wants = e.handleOverflow(ctx, p, overflow, wants) } - wants = filteredWants - for _, entry := range cancels { - if entry.Cid.Prefix().MhType == mh.IDENTITY { - // This is a truely broken client, let's kill the connection. - e.lock.Unlock() - log.Warnw("peer canceled an identity CID", "local", e.self, "remote", p) - return true - } - if e.maxCidSize != 0 && uint(entry.Cid.ByteLen()) > e.maxCidSize { - // Ignore requests about CIDs that big. - continue - } - log.Debugw("Bitswap engine <- cancel", "local", e.self, "from", p, "cid", entry.Cid) - if e.peerLedger.CancelWant(p, entry.Cid) { - e.peerRequestQueue.Remove(entry.Cid, p) + for _, entry := range cancels { + c := entry.Cid + log.Debugw("Bitswap engine <- cancel", "local", e.self, "from", p, "cid", c) + if e.peerLedger.CancelWant(p, c) { + e.peerRequestQueue.Remove(c, p) } } + e.lock.Unlock() var activeEntries []peertask.Task @@ -776,13 +748,6 @@ func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwap // Only add the task to the queue if the requester wants a DONT_HAVE if e.sendDontHaves && entry.SendDontHave { c := entry.Cid - - newWorkExists = true - isWantBlock := false - if entry.WantType == pb.Message_Wantlist_Block { - isWantBlock = true - } - activeEntries = append(activeEntries, peertask.Task{ Topic: c, Priority: int(entry.Priority), @@ -790,7 +755,7 @@ func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwap Data: &taskData{ BlockSize: 0, HaveBlock: false, - IsWantBlock: isWantBlock, + IsWantBlock: entry.WantType == pb.Message_Wantlist_Block, SendDontHave: entry.SendDontHave, }, }) @@ -806,82 +771,177 @@ func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwap // For each want-have / want-block for _, entry := range wants { c := entry.Cid - blockSize, found := blockSizes[entry.Cid] + blockSize, found := blockSizes[c] // If the block was not found if !found { - log.Debugw("Bitswap engine: block not found", "local", e.self, "from", p, "cid", entry.Cid, "sendDontHave", entry.SendDontHave) + log.Debugw("Bitswap engine: block not found", "local", e.self, "from", p, "cid", c, "sendDontHave", entry.SendDontHave) sendDontHave(entry) - } else { - // The block was found, add it to the queue - newWorkExists = true - - isWantBlock := e.sendAsBlock(entry.WantType, blockSize) - - log.Debugw("Bitswap engine: block found", "local", e.self, "from", p, "cid", entry.Cid, "isWantBlock", isWantBlock) - - // entrySize is the amount of space the entry takes up in the - // message we send to the recipient. If we're sending a block, the - // entrySize is the size of the block. Otherwise it's the size of - // a block presence entry. - entrySize := blockSize - if !isWantBlock { - entrySize = bsmsg.BlockPresenceSize(c) - } - activeEntries = append(activeEntries, peertask.Task{ - Topic: c, - Priority: int(entry.Priority), - Work: entrySize, - Data: &taskData{ - BlockSize: blockSize, - HaveBlock: true, - IsWantBlock: isWantBlock, - SendDontHave: entry.SendDontHave, - }, - }) + continue + } + // The block was found, add it to the queue + isWantBlock := e.sendAsBlock(entry.WantType, blockSize) + + log.Debugw("Bitswap engine: block found", "local", e.self, "from", p, "cid", c, "isWantBlock", isWantBlock) + + // entrySize is the amount of space the entry takes up in the + // message we send to the recipient. If we're sending a block, the + // entrySize is the size of the block. Otherwise it's the size of + // a block presence entry. + entrySize := blockSize + if !isWantBlock { + entrySize = bsmsg.BlockPresenceSize(c) } + activeEntries = append(activeEntries, peertask.Task{ + Topic: c, + Priority: int(entry.Priority), + Work: entrySize, + Data: &taskData{ + BlockSize: blockSize, + HaveBlock: true, + IsWantBlock: isWantBlock, + SendDontHave: entry.SendDontHave, + }, + }) } - // Push entries onto the request queue - if len(activeEntries) > 0 { + // Push entries onto the request queue and signal network that new work is ready. + if len(activeEntries) != 0 { e.peerRequestQueue.PushTasksTruncated(e.maxQueuedWantlistEntriesPerPeer, p, activeEntries...) e.updateMetrics() + e.signalNewWork() } return false } -// Split the want-have / want-block entries from the cancel entries -func (e *Engine) splitWantsCancels(es []bsmsg.Entry) ([]bsmsg.Entry, []bsmsg.Entry) { - wants := make([]bsmsg.Entry, 0, len(es)) - cancels := make([]bsmsg.Entry, 0, len(es)) - for _, et := range es { - if et.Cancel { - cancels = append(cancels, et) - } else { - wants = append(wants, et) +// handleOverflow processes incoming wants that could not be addded to the peer +// ledger without exceeding the peer want limit. These are handled by trying to +// make room by canceling existing wants for which there is no block. If this +// does not make sufficient room, then any lower priority wants that have +// blocks are canceled. +// +// Important: handleOverflwo must be called e.lock is locked. +func (e *Engine) handleOverflow(ctx context.Context, p peer.ID, overflow, wants []bsmsg.Entry) []bsmsg.Entry { + // Sort overflow from most to least important. + slices.SortFunc(overflow, func(a, b bsmsg.Entry) int { + return cmp.Compare(b.Entry.Priority, a.Entry.Priority) + }) + // Sort existing wants from least to most important, to try to replace + // lowest priority items first. + existingWants := e.peerLedger.WantlistForPeer(p) + slices.SortFunc(existingWants, func(a, b wl.Entry) int { + return cmp.Compare(b.Priority, a.Priority) + }) + + queuedWantKs := cid.NewSet() + for _, entry := range existingWants { + queuedWantKs.Add(entry.Cid) + } + queuedBlockSizes, err := e.bsm.getBlockSizes(ctx, queuedWantKs.Keys()) + if err != nil { + log.Info("aborting overflow processing", err) + return wants + } + + // Remove entries for blocks that are not present to make room for overflow. + var removed []int + for i, w := range existingWants { + if _, found := queuedBlockSizes[w.Cid]; !found { + // Cancel lowest priority dont-have. + if e.peerLedger.CancelWant(p, w.Cid) { + e.peerRequestQueue.Remove(w.Cid, p) + } + removed = append(removed, i) + // Pop hoghest priority overflow. + firstOver := overflow[0] + overflow = overflow[1:] + // Add highest priority overflow to wants. + e.peerLedger.Wants(p, firstOver.Entry) + wants = append(wants, firstOver) + if len(overflow) == 0 { + return wants + } } } - return wants, cancels + + // Replace existing entries, that are a lower priority, with overflow + // entries. + var replace int + for _, overflowEnt := range overflow { + // Do not compare with removed existingWants entry. + for len(removed) != 0 && replace == removed[0] { + replace++ + removed = removed[1:] + } + if overflowEnt.Entry.Priority < existingWants[replace].Priority { + // All overflow entries have too low of priority to replace any + // existing wants. + break + } + entCid := existingWants[replace].Cid + replace++ + if e.peerLedger.CancelWant(p, entCid) { + e.peerRequestQueue.Remove(entCid, p) + } + e.peerLedger.Wants(p, overflowEnt.Entry) + wants = append(wants, overflowEnt) + } + + return wants } -// Split the want-have / want-block entries from the block that will be denied access -func (e *Engine) splitWantsDenials(p peer.ID, allWants []bsmsg.Entry) ([]bsmsg.Entry, []bsmsg.Entry) { - if e.peerBlockRequestFilter == nil { - return allWants, nil +// Split the want-havek entries from the cancel and deny entries. +func (e *Engine) splitWantsCancelsDenials(p peer.ID, m bsmsg.BitSwapMessage) ([]bsmsg.Entry, []bsmsg.Entry, []bsmsg.Entry, error) { + entries := m.Wantlist() // creates copy; safe to modify + if len(entries) == 0 { + return nil, nil, nil, nil } - wants := make([]bsmsg.Entry, 0, len(allWants)) - denied := make([]bsmsg.Entry, 0, len(allWants)) + log.Debugw("Bitswap engine <- msg", "local", e.self, "from", p, "entryCount", len(entries)) - for _, et := range allWants { - if e.peerBlockRequestFilter(p, et.Cid) { - wants = append(wants, et) + wants := entries[:0] // shift in-place + var cancels, denials []bsmsg.Entry + + for _, et := range entries { + c := et.Cid + if e.maxCidSize != 0 && uint(c.ByteLen()) > e.maxCidSize { + // Ignore requests about CIDs that big. + continue + } + if c.Prefix().MhType == mh.IDENTITY { + return nil, nil, nil, errors.New("peer canceled an identity CID") + } + + if et.Cancel { + cancels = append(cancels, et) + continue + } + + if et.WantType == pb.Message_Wantlist_Have { + log.Debugw("Bitswap engine <- want-have", "local", e.self, "from", p, "cid", c) } else { - denied = append(denied, et) + log.Debugw("Bitswap engine <- want-block", "local", e.self, "from", p, "cid", c) + } + + if e.peerBlockRequestFilter != nil && !e.peerBlockRequestFilter(p, c) { + denials = append(denials, et) + continue + } + + // Do not take more wants that can be handled. + if len(wants) < int(e.maxQueuedWantlistEntriesPerPeer) { + wants = append(wants, et) } } - return wants, denied + if len(wants) == 0 { + wants = nil + } + + // Clear truncated entries. + clear(entries[len(wants):]) + + return wants, cancels, denials, nil } // ReceivedBlocks is called when new blocks are received from the network. diff --git a/bitswap/server/internal/decision/engine_test.go b/bitswap/server/internal/decision/engine_test.go index c25e3508d..593bbde0f 100644 --- a/bitswap/server/internal/decision/engine_test.go +++ b/bitswap/server/internal/decision/engine_test.go @@ -14,7 +14,7 @@ import ( "time" "github.com/benbjohnson/clock" - "github.com/ipfs/boxo/bitswap/internal/testutil" + wl "github.com/ipfs/boxo/bitswap/client/wantlist" message "github.com/ipfs/boxo/bitswap/message" pb "github.com/ipfs/boxo/bitswap/message/pb" blockstore "github.com/ipfs/boxo/blockstore" @@ -22,6 +22,7 @@ import ( "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" + "github.com/ipfs/go-test/random" process "github.com/jbenet/goprocess" peer "github.com/libp2p/go-libp2p/core/peer" libp2ptest "github.com/libp2p/go-libp2p/core/test" @@ -221,8 +222,8 @@ func TestOutboxClosedWhenEngineClosed(t *testing.T) { } func TestPartnerWantHaveWantBlockNonActive(t *testing.T) { - alphabet := "abcdefghijklmnopqrstuvwxyz" - vowels := "aeiou" + const alphabet = "abcdefghijklmnopqrstuvwxyz" + const vowels = "aeiou" bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) for _, letter := range strings.Split(alphabet, "") { @@ -561,7 +562,7 @@ func TestPartnerWantHaveWantBlockNonActive(t *testing.T) { } func TestPartnerWantHaveWantBlockActive(t *testing.T) { - alphabet := "abcdefghijklmnopqrstuvwxyz" + const alphabet = "abcdefghijklmnopqrstuvwxyz" bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) for _, letter := range strings.Split(alphabet, "") { @@ -903,7 +904,7 @@ func TestSendReceivedBlocksToPeersThatWantThem(t *testing.T) { e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) - blks := testutil.GenerateBlocksOfSize(4, 8*1024) + blks := random.BlocksOfSize(4, 8*1024) msg := message.New(false) msg.AddEntry(blks[0].Cid(), 4, pb.Message_Wantlist_Have, false) msg.AddEntry(blks[1].Cid(), 3, pb.Message_Wantlist_Have, false) @@ -949,7 +950,7 @@ func TestSendDontHave(t *testing.T) { e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) - blks := testutil.GenerateBlocksOfSize(4, 8*1024) + blks := random.BlocksOfSize(4, 8*1024) msg := message.New(false) msg.AddEntry(blks[0].Cid(), 4, pb.Message_Wantlist_Have, false) msg.AddEntry(blks[1].Cid(), 3, pb.Message_Wantlist_Have, true) @@ -1015,7 +1016,7 @@ func TestWantlistForPeer(t *testing.T) { e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) - blks := testutil.GenerateBlocksOfSize(4, 8*1024) + blks := random.BlocksOfSize(4, 8*1024) msg := message.New(false) msg.AddEntry(blks[0].Cid(), 2, pb.Message_Wantlist_Have, false) msg.AddEntry(blks[1].Cid(), 3, pb.Message_Wantlist_Have, false) @@ -1454,7 +1455,7 @@ func TestTaggingPeers(t *testing.T) { } func TestTaggingUseful(t *testing.T) { - peerSampleIntervalHalf := 10 * time.Millisecond + const peerSampleIntervalHalf = 10 * time.Millisecond ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() @@ -1733,3 +1734,235 @@ func TestKillConnectionForInlineCid(t *testing.T) { t.Fatal("connection was not killed when receiving inline in cancel") } } + +func TestWantlistBlocked(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + const limit = 32 + + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + + // Generate a set of blocks that the server has. + haveCids := make([]cid.Cid, limit) + var blockNum int + for blockNum < limit { + block := blocks.NewBlock([]byte(fmt.Sprint(blockNum))) + if blockNum != 0 { // do not put first block in blockstore. + if err := bs.Put(context.Background(), block); err != nil { + t.Fatal(err) + } + } + haveCids[blockNum] = block.Cid() + blockNum++ + } + + fpt := &fakePeerTagger{} + e := newEngineForTesting(ctx, bs, fpt, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4), WithMaxQueuedWantlistEntriesPerPeer(limit)) + e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + warsaw := engineSet{ + Peer: peer.ID("warsaw"), + PeerTagger: fpt, + Blockstore: bs, + Engine: e, + } + riga := newTestEngine(ctx, "riga") + if warsaw.Peer == riga.Peer { + t.Fatal("Sanity Check: Peers have same Key!") + } + + m := message.New(false) + dontHaveCids := make([]cid.Cid, limit) + for i := 0; i < limit; i++ { + c := blocks.NewBlock([]byte(fmt.Sprint(blockNum))).Cid() + blockNum++ + m.AddEntry(c, 1, pb.Message_Wantlist_Block, true) + dontHaveCids[i] = c + } + warsaw.Engine.MessageReceived(ctx, riga.Peer, m) + wl := warsaw.Engine.WantlistForPeer(riga.Peer) + // Check that all the dontHave wants are on the wantlist. + for _, c := range dontHaveCids { + if !findCid(c, wl) { + t.Fatal("Expected all dontHaveCids to be on wantlist") + } + } + t.Log("All", len(wl), "dont-have CIDs are on wantlist") + + m = message.New(false) + for _, c := range haveCids { + m.AddEntry(c, 1, pb.Message_Wantlist_Block, true) + } + warsaw.Engine.MessageReceived(ctx, riga.Peer, m) + wl = warsaw.Engine.WantlistForPeer(riga.Peer) + // Check that all the dontHave wants are on the wantlist. + for _, c := range haveCids { + if !findCid(c, wl) { + t.Fatal("Missing expected want. Expected all haveCids to be on wantlist") + } + } + t.Log("All", len(wl), "new have CIDs are now on wantlist") + + m = message.New(false) + for i := 0; i < limit; i++ { + c := blocks.NewBlock([]byte(fmt.Sprint(blockNum))).Cid() + blockNum++ + m.AddEntry(c, 1, pb.Message_Wantlist_Block, true) + dontHaveCids[i] = c + } + warsaw.Engine.MessageReceived(ctx, riga.Peer, m) + // Check that all the new dontHave wants are not on the wantlist. + for _, c := range dontHaveCids { + if findCid(c, wl) { + t.Fatal("No new dontHaveCids should be on wantlist") + } + } + t.Log("All", len(wl), "new dont-have CIDs are not on wantlist") +} + +func TestWantlistOverflow(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + const limit = 32 + + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + + origCids := make([]cid.Cid, limit) + var blockNum int + m := message.New(false) + for blockNum < limit { + block := blocks.NewBlock([]byte(fmt.Sprint(blockNum))) + if blockNum != 0 { // do not put first block in blockstore. + if err := bs.Put(context.Background(), block); err != nil { + t.Fatal(err) + } + } + m.AddEntry(block.Cid(), 1, pb.Message_Wantlist_Block, true) + origCids[blockNum] = block.Cid() + blockNum++ + } + + fpt := &fakePeerTagger{} + e := newEngineForTesting(ctx, bs, fpt, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4), WithMaxQueuedWantlistEntriesPerPeer(limit)) + e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + warsaw := engineSet{ + Peer: peer.ID("warsaw"), + PeerTagger: fpt, + Blockstore: bs, + Engine: e, + } + riga := newTestEngine(ctx, "riga") + if warsaw.Peer == riga.Peer { + t.Fatal("Sanity Check: Peers have same Key!") + } + + warsaw.Engine.MessageReceived(ctx, riga.Peer, m) + // Check that the wantlist is at the size limit. + wl := warsaw.Engine.WantlistForPeer(riga.Peer) + if len(wl) != limit { + t.Fatal("wantlist size", len(wl), "does not match limit", limit) + } + t.Log("Sent message with", limit, "medium-priority wants and", limit-1, "have blocks present") + + m = message.New(false) + lowPrioCids := make([]cid.Cid, 5) + for i := 0; i < cap(lowPrioCids); i++ { + c := blocks.NewBlock([]byte(fmt.Sprint(blockNum))).Cid() + blockNum++ + m.AddEntry(c, 0, pb.Message_Wantlist_Block, true) + lowPrioCids[i] = c + } + warsaw.Engine.MessageReceived(ctx, riga.Peer, m) + wl = warsaw.Engine.WantlistForPeer(riga.Peer) + if len(wl) != limit { + t.Fatal("wantlist size", len(wl), "does not match limit", limit) + } + // Check that one low priority entry is on the wantlist, since there is one + // existing entry without a blocks and none at a lower priority. + var count int + for _, c := range lowPrioCids { + if findCid(c, wl) { + count++ + } + } + if count != 1 { + t.Fatal("Expected 1 low priority entry on wantlist, found", count) + } + t.Log("Sent message with", len(lowPrioCids), "low-priority wants. One accepted as replacement for existig want without block.") + + m = message.New(false) + highPrioCids := make([]cid.Cid, 5) + for i := 0; i < cap(highPrioCids); i++ { + c := blocks.NewBlock([]byte(fmt.Sprint(blockNum))).Cid() + blockNum++ + m.AddEntry(c, 10, pb.Message_Wantlist_Block, true) + highPrioCids[i] = c + } + warsaw.Engine.MessageReceived(ctx, riga.Peer, m) + wl = warsaw.Engine.WantlistForPeer(riga.Peer) + if len(wl) != limit { + t.Fatal("wantlist size", len(wl), "does not match limit", limit) + } + // Check that all high priority entries are all on wantlist, since there + // were existing entries with lower priority. + for _, c := range highPrioCids { + if !findCid(c, wl) { + t.Fatal("expected high priority entry on wantlist") + } + } + t.Log("Sent message with", len(highPrioCids), "high-priority wants. All accepted replacing wants without block or low priority.") + + // These new wants should overflow and some of them should replace existing + // wants that do not have blocks (the high-priority weants from the + // previous message). + m = message.New(false) + blockCids := make([]cid.Cid, len(highPrioCids)+2) + for i := 0; i < cap(blockCids); i++ { + c := blocks.NewBlock([]byte(fmt.Sprint(blockNum))).Cid() + blockNum++ + m.AddEntry(c, 0, pb.Message_Wantlist_Block, true) + blockCids[i] = c + } + warsaw.Engine.MessageReceived(ctx, riga.Peer, m) + wl = warsaw.Engine.WantlistForPeer(riga.Peer) + if len(wl) != limit { + t.Fatal("wantlist size", len(wl), "does not match limit", limit) + } + + count = 0 + for _, c := range blockCids { + if findCid(c, wl) { + count++ + } + } + if count != len(highPrioCids) { + t.Fatal("expected", len(highPrioCids), "of the new blocks, found", count) + } + t.Log("Sent message with", len(blockCids), "low-priority wants.", count, "accepted replacing wants without blocks from previous message") + + // Send the original wants. Some should replace the existing wants that do + // not have blocks associated, and the rest should overwrite the existing + // ones. + m = message.New(false) + for _, c := range origCids { + m.AddEntry(c, 0, pb.Message_Wantlist_Block, true) + } + warsaw.Engine.MessageReceived(ctx, riga.Peer, m) + wl = warsaw.Engine.WantlistForPeer(riga.Peer) + for _, c := range origCids { + if !findCid(c, wl) { + t.Fatal("missing low-priority original wants to overwrite existing") + } + } + t.Log("Sent message with", len(origCids), "original wants at low priority. All accepted overwriting existing wants.") +} + +func findCid(c cid.Cid, wantList []wl.Entry) bool { + for i := range wantList { + if wantList[i].Cid == c { + return true + } + } + return false +} diff --git a/bitswap/server/internal/decision/peer_ledger.go b/bitswap/server/internal/decision/peer_ledger.go index b79db226d..227e50de1 100644 --- a/bitswap/server/internal/decision/peer_ledger.go +++ b/bitswap/server/internal/decision/peer_ledger.go @@ -12,20 +12,31 @@ type DefaultPeerLedger struct { // these two maps are inversions of each other peers map[peer.ID]map[cid.Cid]entry cids map[cid.Cid]map[peer.ID]entry + // value 0 mean no limit + maxEntriesPerPeer int } -func NewDefaultPeerLedger() *DefaultPeerLedger { +func NewDefaultPeerLedger(maxEntriesPerPeer uint) *DefaultPeerLedger { return &DefaultPeerLedger{ peers: make(map[peer.ID]map[cid.Cid]entry), cids: make(map[cid.Cid]map[peer.ID]entry), + + maxEntriesPerPeer: int(maxEntriesPerPeer), } } -func (l *DefaultPeerLedger) Wants(p peer.ID, e wl.Entry) { +// Wants adds an entry to the peer ledger. If adding the entry would make the +// peer ledger exceed the maxEntriesPerPeer limit, then the entry is not added +// and false is returned. +func (l *DefaultPeerLedger) Wants(p peer.ID, e wl.Entry) bool { cids, ok := l.peers[p] if !ok { cids = make(map[cid.Cid]entry) l.peers[p] = cids + } else if l.maxEntriesPerPeer != 0 && len(cids) == l.maxEntriesPerPeer { + if _, ok = cids[e.Cid]; !ok { + return false // cannot add to peer ledger + } } cids[e.Cid] = entry{e.Priority, e.WantType} @@ -35,6 +46,8 @@ func (l *DefaultPeerLedger) Wants(p peer.ID, e wl.Entry) { l.cids[e.Cid] = m } m[p] = entry{e.Priority, e.WantType} + + return true } func (l *DefaultPeerLedger) CancelWant(p peer.ID, k cid.Cid) bool { @@ -42,13 +55,14 @@ func (l *DefaultPeerLedger) CancelWant(p peer.ID, k cid.Cid) bool { if !ok { return false } + _, had := wants[k] delete(wants, k) if len(wants) == 0 { delete(l.peers, p) } l.removePeerFromCid(p, k) - return true + return had } func (l *DefaultPeerLedger) CancelWantWithType(p peer.ID, k cid.Cid, typ pb.Message_Wantlist_WantType) { diff --git a/bitswap/server/internal/decision/taskmerger_test.go b/bitswap/server/internal/decision/taskmerger_test.go index ae3b0384d..e1b58d056 100644 --- a/bitswap/server/internal/decision/taskmerger_test.go +++ b/bitswap/server/internal/decision/taskmerger_test.go @@ -3,13 +3,13 @@ package decision import ( "testing" - "github.com/ipfs/boxo/bitswap/internal/testutil" "github.com/ipfs/go-peertaskqueue" "github.com/ipfs/go-peertaskqueue/peertask" + "github.com/ipfs/go-test/random" ) func TestPushHaveVsBlock(t *testing.T) { - partner := testutil.GeneratePeers(1)[0] + partner := random.Peers(1)[0] wantHave := peertask.Task{ Topic: "1", @@ -61,7 +61,7 @@ func TestPushHaveVsBlock(t *testing.T) { } func TestPushSizeInfo(t *testing.T) { - partner := testutil.GeneratePeers(1)[0] + partner := random.Peers(1)[0] wantBlockBlockSize := 10 wantBlockDontHaveBlockSize := 0 @@ -131,8 +131,8 @@ func TestPushSizeInfo(t *testing.T) { } } - isWantBlock := true - isWantHave := false + const isWantBlock = true + const isWantHave = false // want-block (DONT_HAVE) should have no effect on existing want-block (DONT_HAVE) runTestCase([]peertask.Task{wantBlockDontHave, wantBlockDontHave}, wantBlockDontHave.Work, wantBlockDontHaveBlockSize, isWantBlock) @@ -173,7 +173,7 @@ func TestPushSizeInfo(t *testing.T) { } func TestPushHaveVsBlockActive(t *testing.T) { - partner := testutil.GeneratePeers(1)[0] + partner := random.Peers(1)[0] wantBlock := peertask.Task{ Topic: "1", @@ -227,7 +227,7 @@ func TestPushHaveVsBlockActive(t *testing.T) { } func TestPushSizeInfoActive(t *testing.T) { - partner := testutil.GeneratePeers(1)[0] + partner := random.Peers(1)[0] wantBlock := peertask.Task{ Topic: "1", diff --git a/bitswap/server/server.go b/bitswap/server/server.go index 1e723ddce..85651a5ef 100644 --- a/bitswap/server/server.go +++ b/bitswap/server/server.go @@ -29,7 +29,7 @@ import ( var provideKeysBufferSize = 2048 var ( - log = logging.Logger("bitswap-server") + log = logging.Logger("bitswap/server") sflog = log.Desugar() ) diff --git a/chunker/buzhash_test.go b/chunker/buzhash_test.go index 2eaf5ae32..0c334d54f 100644 --- a/chunker/buzhash_test.go +++ b/chunker/buzhash_test.go @@ -5,13 +5,13 @@ import ( "io" "testing" - util "github.com/ipfs/boxo/util" + random "github.com/ipfs/go-test/random" ) func testBuzhashChunking(t *testing.T, buf []byte) (chunkCount int) { t.Parallel() - n, err := util.NewTimeSeededRand().Read(buf) + n, err := random.NewRand().Read(buf) if n < len(buf) { t.Fatalf("expected %d bytes, got %d", len(buf), n) } diff --git a/chunker/rabin_test.go b/chunker/rabin_test.go index 31f3464ee..98939d36d 100644 --- a/chunker/rabin_test.go +++ b/chunker/rabin_test.go @@ -6,15 +6,15 @@ import ( "io" "testing" - util "github.com/ipfs/boxo/util" blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-test/random" ) func TestRabinChunking(t *testing.T) { t.Parallel() data := make([]byte, 1024*1024*16) - n, err := util.NewTimeSeededRand().Read(data) + n, err := random.NewRand().Read(data) if n < len(data) { t.Fatalf("expected %d bytes, got %d", len(data), n) } @@ -72,7 +72,7 @@ func testReuse(t *testing.T, cr newSplitter) { t.Parallel() data := make([]byte, 1024*1024*16) - n, err := util.NewTimeSeededRand().Read(data) + n, err := random.NewRand().Read(data) if n < len(data) { t.Fatalf("expected %d bytes, got %d", len(data), n) } diff --git a/chunker/splitting_test.go b/chunker/splitting_test.go index c6712446a..23170ee37 100644 --- a/chunker/splitting_test.go +++ b/chunker/splitting_test.go @@ -5,12 +5,12 @@ import ( "io" "testing" - u "github.com/ipfs/boxo/util" + "github.com/ipfs/go-test/random" ) func randBuf(t *testing.T, size int) []byte { buf := make([]byte, size) - if _, err := u.NewTimeSeededRand().Read(buf); err != nil { + if _, err := random.NewRand().Read(buf); err != nil { t.Fatal("failed to read enough randomness") } return buf @@ -25,7 +25,7 @@ func copyBuf(buf []byte) []byte { func TestSizeSplitterOverAllocate(t *testing.T) { t.Parallel() - max := 1000 + const max = 1000 r := bytes.NewReader(randBuf(t, max)) chunksize := int64(1024 * 256) splitter := NewSizeSplitter(r, chunksize) @@ -80,10 +80,10 @@ func TestSizeSplitterFillsChunks(t *testing.T) { } t.Parallel() - max := 10000000 + const max = 10000000 b := randBuf(t, max) r := &clipReader{r: bytes.NewReader(b), size: 4000} - chunksize := int64(1024 * 256) + const chunksize = 1024 * 256 c, _ := Chan(NewSizeSplitter(r, chunksize)) sofar := 0 @@ -98,7 +98,7 @@ func TestSizeSplitterFillsChunks(t *testing.T) { copy(whole[sofar:], chunk) sofar += len(chunk) - if sofar != max && len(chunk) < int(chunksize) { + if sofar != max && len(chunk) < chunksize { t.Fatal("sizesplitter split at a smaller size") } } diff --git a/docs/CODEOWNERS b/docs/CODEOWNERS index 7781c410c..94fb077e5 100644 --- a/docs/CODEOWNERS +++ b/docs/CODEOWNERS @@ -3,7 +3,7 @@ # requested to review draft pull requests. # Deafult -* @ipfs/kubo-maintainers +* @ipfs/kubo-maintainers # HTTP Gateway -gateway/ @lidel @hacdias +gateway/ @lidel diff --git a/docs/tracing.md b/docs/tracing.md index c43c10aed..868b68d95 100644 --- a/docs/tracing.md +++ b/docs/tracing.md @@ -9,7 +9,7 @@ for the `OTEL_TRACES_EXPORTER` environment variables. Therefore, we provide some helper functions under [`boxo/tracing`](../tracing/) to support these. In this document, we document the quirks of our custom support for the `OTEL_TRACES_EXPORTER`, -as well as examples on how to use tracing, create traceable headers, and how +as well as examples of how to use tracing, create traceable headers, and how to use the Jaeger UI. The [Gateway examples](../examples/gateway/) fully support Tracing. - [Environment Variables](#environment-variables) diff --git a/examples/car-file-fetcher/README.md b/examples/car-file-fetcher/README.md index d44b9cb59..97894251d 100644 --- a/examples/car-file-fetcher/README.md +++ b/examples/car-file-fetcher/README.md @@ -2,7 +2,7 @@ This example shows how to download a UnixFS file or directory from a gateway that implements [application/vnd.ipld.car](https://www.iana.org/assignments/media-types/application/vnd.ipld.car) -responses of the [Trustles Gateway](https://specs.ipfs.tech/http-gateways/trustless-gateway/) +responses of the [Trustless Gateway](https://specs.ipfs.tech/http-gateways/trustless-gateway/) specification, in a trustless, verifiable manner. It relies on [IPIP-402](https://specs.ipfs.tech/ipips/ipip-0402/) to retrieve diff --git a/examples/go.mod b/examples/go.mod index 6b223fa1d..615b39356 100644 --- a/examples/go.mod +++ b/examples/go.mod @@ -9,9 +9,9 @@ require ( github.com/ipfs/go-datastore v0.6.0 github.com/ipld/go-car/v2 v2.13.1 github.com/ipld/go-ipld-prime v0.21.0 - github.com/libp2p/go-libp2p v0.35.1 + github.com/libp2p/go-libp2p v0.36.1 github.com/libp2p/go-libp2p-routing-helpers v0.7.3 - github.com/multiformats/go-multiaddr v0.12.4 + github.com/multiformats/go-multiaddr v0.13.0 github.com/multiformats/go-multicodec v0.9.0 github.com/prometheus/client_golang v1.19.1 github.com/stretchr/testify v1.9.0 @@ -48,7 +48,7 @@ require ( github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20240618054019-d3b898a103f8 // indirect + github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.3 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect @@ -113,36 +113,37 @@ require ( github.com/multiformats/go-multihash v0.2.3 // indirect github.com/multiformats/go-multistream v0.5.0 // indirect github.com/multiformats/go-varint v0.0.7 // indirect - github.com/onsi/ginkgo/v2 v2.19.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/onsi/ginkgo/v2 v2.19.1 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/openzipkin/zipkin-go v0.4.3 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect - github.com/pion/datachannel v1.5.6 // indirect - github.com/pion/dtls/v2 v2.2.11 // indirect - github.com/pion/ice/v2 v2.3.25 // indirect + github.com/pion/datachannel v1.5.8 // indirect + github.com/pion/dtls/v2 v2.2.12 // indirect + github.com/pion/ice/v2 v2.3.32 // indirect github.com/pion/interceptor v0.1.29 // indirect github.com/pion/logging v0.2.2 // indirect github.com/pion/mdns v0.0.12 // indirect github.com/pion/randutil v0.1.0 // indirect github.com/pion/rtcp v1.2.14 // indirect - github.com/pion/rtp v1.8.6 // indirect - github.com/pion/sctp v1.8.16 // indirect + github.com/pion/rtp v1.8.8 // indirect + github.com/pion/sctp v1.8.20 // indirect github.com/pion/sdp/v3 v3.0.9 // indirect - github.com/pion/srtp/v2 v2.0.18 // indirect + github.com/pion/srtp/v2 v2.0.20 // indirect github.com/pion/stun v0.6.1 // indirect - github.com/pion/transport/v2 v2.2.5 // indirect + github.com/pion/transport/v2 v2.2.9 // indirect github.com/pion/turn/v2 v2.1.6 // indirect - github.com/pion/webrtc/v3 v3.2.42 // indirect + github.com/pion/webrtc/v3 v3.2.50 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polydawn/refmt v0.89.0 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.54.0 // indirect + github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/quic-go/qpack v0.4.0 // indirect - github.com/quic-go/quic-go v0.45.0 // indirect + github.com/quic-go/quic-go v0.45.2 // indirect github.com/quic-go/webtransport-go v0.8.0 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/samber/lo v1.39.0 // indirect @@ -153,6 +154,7 @@ require ( github.com/whyrusleeping/cbor-gen v0.1.2 // indirect github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect + github.com/wlynxg/anet v0.0.3 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/propagators/aws v1.21.1 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.21.1 // indirect @@ -168,18 +170,18 @@ require ( go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/dig v1.17.1 // indirect - go.uber.org/fx v1.22.0 // indirect + go.uber.org/fx v1.22.1 // indirect go.uber.org/mock v0.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.24.0 // indirect - golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect - golang.org/x/mod v0.18.0 // indirect - golang.org/x/net v0.26.0 // indirect + golang.org/x/crypto v0.25.0 // indirect + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect + golang.org/x/mod v0.19.0 // indirect + golang.org/x/net v0.27.0 // indirect golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.21.0 // indirect + golang.org/x/sys v0.22.0 // indirect golang.org/x/text v0.16.0 // indirect - golang.org/x/tools v0.22.0 // indirect + golang.org/x/tools v0.23.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect gonum.org/v1/gonum v0.15.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 // indirect diff --git a/examples/go.sum b/examples/go.sum index 0846f18ba..67b7b18ea 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -129,8 +129,8 @@ github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20240618054019-d3b898a103f8 h1:ASJ/LAqdCHOyMYI+dwNxn7Rd8FscNkMyTr1KZU1JI/M= -github.com/google/pprof v0.0.0-20240618054019-d3b898a103f8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -215,6 +215,8 @@ github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fG github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= github.com/ipfs/go-peertaskqueue v0.8.1 h1:YhxAs1+wxb5jk7RvS0LHdyiILpNmRIRnZVztekOF0pg= github.com/ipfs/go-peertaskqueue v0.8.1/go.mod h1:Oxxd3eaK279FxeydSPPVGHzbwVeHjatZ2GA8XD+KbPU= +github.com/ipfs/go-test v0.0.4 h1:DKT66T6GBB6PsDFLoO56QZPrOmzJkqU1FZH5C9ySkew= +github.com/ipfs/go-test v0.0.4/go.mod h1:qhIM1EluEfElKKM6fnWxGn822/z9knUGM1+I/OAQNKI= github.com/ipfs/go-unixfs v0.4.5 h1:wj8JhxvV1G6CD7swACwSKYa+NgtdWC1RUit+gFnymDU= github.com/ipfs/go-unixfs v0.4.5/go.mod h1:BIznJNvt/gEx/ooRMI4Us9K8+qeGO7vx1ohnbk8gjFg= github.com/ipfs/go-unixfsnode v1.9.0 h1:ubEhQhr22sPAKO2DNsyVBW7YB/zA8Zkif25aBvz8rc8= @@ -269,8 +271,8 @@ github.com/libp2p/go-doh-resolver v0.4.0 h1:gUBa1f1XsPwtpE1du0O+nnZCUqtG7oYi7Bb+ github.com/libp2p/go-doh-resolver v0.4.0/go.mod h1:v1/jwsFusgsWIGX/c6vCRrnJ60x7bhTiq/fs2qt0cAg= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.35.1 h1:Hm7Ub2BF+GCb14ojcsEK6WAy5it5smPDK02iXSZLl50= -github.com/libp2p/go-libp2p v0.35.1/go.mod h1:Dnkgba5hsfSv5dvvXC8nfqk44hH0gIKKno+HOMU0fdc= +github.com/libp2p/go-libp2p v0.36.1 h1:piAHesy0/8ifBEBUS8HF2m7ywR5vnktUFv00dTsVKcs= +github.com/libp2p/go-libp2p v0.36.1/go.mod h1:vHzel3CpRB+vS11fIjZSJAU4ALvieKV9VZHC9VerHj8= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= github.com/libp2p/go-libp2p-kad-dht v0.25.2 h1:FOIk9gHoe4YRWXTu8SY9Z1d0RILol0TrtApsMDPjAVQ= @@ -326,8 +328,8 @@ github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9 github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.12.4 h1:rrKqpY9h+n80EwhhC/kkcunCZZ7URIF8yN1WEUt2Hvc= -github.com/multiformats/go-multiaddr v0.12.4/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII= +github.com/multiformats/go-multiaddr v0.13.0 h1:BCBzs61E3AGHcYYTv8dqRH43ZfyrqM8RXVPT8t13tLQ= +github.com/multiformats/go-multiaddr v0.13.0/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII= github.com/multiformats/go-multiaddr-dns v0.3.0/go.mod h1:mNzQ4eTGDg0ll1N9jKPOUogZPoJ30W8a7zk66FQPpdQ= github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= @@ -345,12 +347,14 @@ github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dy github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= -github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= -github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= -github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= +github.com/onsi/ginkgo/v2 v2.19.1 h1:QXgq3Z8Crl5EL1WBAC98A5sEBHARrAJNzAmMxzLcRF0= +github.com/onsi/ginkgo/v2 v2.19.1/go.mod h1:O3DtEWQkPa/F7fBMgmZQKKsluAy8pd3rEQdrjkPb9zA= +github.com/onsi/gomega v1.34.0 h1:eSSPsPNp6ZpsG8X1OVmOTxig+CblTc4AxpPBykhe2Os= +github.com/onsi/gomega v1.34.0/go.mod h1:MIKI8c+f+QLWk+hxbePD4i0LMJSExPaZOVfkoex4cAo= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -363,13 +367,13 @@ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2D github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw= -github.com/pion/datachannel v1.5.6 h1:1IxKJntfSlYkpUj8LlYRSWpYiTTC02nUrOE8T3DqGeg= -github.com/pion/datachannel v1.5.6/go.mod h1:1eKT6Q85pRnr2mHiWHxJwO50SfZRtWHTsNIVb/NfGW4= +github.com/pion/datachannel v1.5.8 h1:ph1P1NsGkazkjrvyMfhRBUAWMxugJjq2HfQifaOoSNo= +github.com/pion/datachannel v1.5.8/go.mod h1:PgmdpoaNBLX9HNzNClmdki4DYW5JtI7Yibu8QzbL3tI= github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= -github.com/pion/dtls/v2 v2.2.11 h1:9U/dpCYl1ySttROPWJgqWKEylUdT0fXp/xst6JwY5Ks= -github.com/pion/dtls/v2 v2.2.11/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= -github.com/pion/ice/v2 v2.3.25 h1:M5rJA07dqhi3nobJIg+uPtcVjFECTrhcR3n0ns8kDZs= -github.com/pion/ice/v2 v2.3.25/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw= +github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= +github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= +github.com/pion/ice/v2 v2.3.32 h1:VwE/uEeqiMm0zUWpdt1DJtnqEkj3UjEbhX92/CurtWI= +github.com/pion/ice/v2 v2.3.32/go.mod h1:8fac0+qftclGy1tYd/nfwfHC729BLaxtVqMdMVCAVPU= github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M= github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4= github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= @@ -382,31 +386,30 @@ github.com/pion/rtcp v1.2.12/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9 github.com/pion/rtcp v1.2.14 h1:KCkGV3vJ+4DAJmvP0vaQShsb0xkRfWkO540Gy102KyE= github.com/pion/rtcp v1.2.14/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= github.com/pion/rtp v1.8.3/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= -github.com/pion/rtp v1.8.6 h1:MTmn/b0aWWsAzux2AmP8WGllusBVw4NPYPVFFd7jUPw= -github.com/pion/rtp v1.8.6/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= -github.com/pion/sctp v1.8.13/go.mod h1:YKSgO/bO/6aOMP9LCie1DuD7m+GamiK2yIiPM6vH+GA= -github.com/pion/sctp v1.8.16 h1:PKrMs+o9EMLRvFfXq59WFsC+V8mN1wnKzqrv+3D/gYY= -github.com/pion/sctp v1.8.16/go.mod h1:P6PbDVA++OJMrVNg2AL3XtYHV4uD6dvfyOovCgMs0PE= +github.com/pion/rtp v1.8.8 h1:EtYFHI0rpUEjT/RMnGfb1vdJhbYmPG77szD72uUnSxs= +github.com/pion/rtp v1.8.8/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= +github.com/pion/sctp v1.8.20 h1:sOc3lkV/tQaP57ZUEXIMdM2V92IIB2ia5v/ygnBxaEg= +github.com/pion/sctp v1.8.20/go.mod h1:oTxw8i5m+WbDHZJL/xUpe6CPIn1Y0GIKKwTLF4h53H8= github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY= github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M= -github.com/pion/srtp/v2 v2.0.18 h1:vKpAXfawO9RtTRKZJbG4y0v1b11NZxQnxRl85kGuUlo= -github.com/pion/srtp/v2 v2.0.18/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA= +github.com/pion/srtp/v2 v2.0.20 h1:HNNny4s+OUmG280ETrCdgFndp4ufx3/uy85EawYEhTk= +github.com/pion/srtp/v2 v2.0.20/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA= github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= -github.com/pion/transport/v2 v2.2.2/go.mod h1:OJg3ojoBJopjEeECq2yJdXH9YVrUJ1uQ++NjXLOUorc= github.com/pion/transport/v2 v2.2.3/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= -github.com/pion/transport/v2 v2.2.5 h1:iyi25i/21gQck4hfRhomF6SktmUQjRsRW4WJdhfc3Kc= -github.com/pion/transport/v2 v2.2.5/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= +github.com/pion/transport/v2 v2.2.8/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= +github.com/pion/transport/v2 v2.2.9 h1:WEDygVovkJlV2CCunM9KS2kds+kcl7zdIefQA5y/nkE= +github.com/pion/transport/v2 v2.2.9/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0= -github.com/pion/transport/v3 v3.0.2 h1:r+40RJR25S9w3jbA6/5uEPTzcdn7ncyU44RWCbHkLg4= -github.com/pion/transport/v3 v3.0.2/go.mod h1:nIToODoOlb5If2jF9y2Igfx3PFYWfuXi37m0IlWa/D0= +github.com/pion/transport/v3 v3.0.6 h1:k1mQU06bmmX143qSWgXFqSH1KUJceQvIUuVH/K5ELWw= +github.com/pion/transport/v3 v3.0.6/go.mod h1:HvJr2N/JwNJAfipsRleqwFoR3t/pWyHeZUs89v3+t5s= github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc= github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= -github.com/pion/webrtc/v3 v3.2.42 h1:WN/ZuMjtpQOoGRCZUg/zFG+JHEvYLVyDKOxU6H1qWlE= -github.com/pion/webrtc/v3 v3.2.42/go.mod h1:M1RAe3TNTD1tzyvqHrbVODfwdPGSXOUo/OgpoGGJqFY= +github.com/pion/webrtc/v3 v3.2.50 h1:C/rwL2mBfCxHv6tlLzDAO3krJpQXfVx8A8WHnGJ2j34= +github.com/pion/webrtc/v3 v3.2.50/go.mod h1:dytYYoSBy7ZUWhJMbndx9UckgYvzNAfL7xgVnrIKxqo= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -422,15 +425,15 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= -github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/quic-go v0.45.0 h1:OHmkQGM37luZITyTSu6ff03HP/2IrwDX1ZFiNEhSFUE= -github.com/quic-go/quic-go v0.45.0/go.mod h1:1dLehS7TIR64+vxGR70GDcatWTOtMX2PUtnKsjbTurI= +github.com/quic-go/quic-go v0.45.2 h1:DfqBmqjb4ExSdxRIb/+qXhPC+7k6+DUNZha4oeiC9fY= +github.com/quic-go/quic-go v0.45.2/go.mod h1:1dLehS7TIR64+vxGR70GDcatWTOtMX2PUtnKsjbTurI= github.com/quic-go/webtransport-go v0.8.0 h1:HxSrwun11U+LlmwpgM1kEqIqH90IT4N8auv/cD7QFJg= github.com/quic-go/webtransport-go v0.8.0/go.mod h1:N99tjprW432Ut5ONql/aUhSLT0YVSlwHohQsuac9WaM= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= @@ -514,6 +517,8 @@ github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= +github.com/wlynxg/anet v0.0.3 h1:PvR53psxFXstc12jelG6f1Lv4MWqE0tI76/hHGjh9rg= +github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= @@ -559,8 +564,8 @@ go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= -go.uber.org/fx v1.22.0 h1:pApUK7yL0OUHMd8vkunWSlLxZVFFk70jR2nKde8X2NM= -go.uber.org/fx v1.22.0/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= +go.uber.org/fx v1.22.1 h1:nvvln7mwyT5s1q201YE29V/BFrGor6vMiDNpU/78Mys= +go.uber.org/fx v1.22.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -588,16 +593,13 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY= -golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -611,8 +613,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= -golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -634,13 +636,10 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -682,30 +681,23 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= @@ -733,8 +725,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= -golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/gateway/errors.go b/gateway/errors.go index c245ae4c1..5ea15ff18 100644 --- a/gateway/errors.go +++ b/gateway/errors.go @@ -13,6 +13,7 @@ import ( "github.com/ipfs/boxo/path" "github.com/ipfs/boxo/path/resolver" "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" "github.com/ipld/go-ipld-prime/datamodel" "github.com/ipld/go-ipld-prime/schema" ) @@ -185,10 +186,10 @@ func webError(w http.ResponseWriter, r *http.Request, c *Config, err error, defa switch { case errors.Is(err, &cid.ErrInvalidCid{}): code = http.StatusBadRequest - case isErrNotFound(err): - code = http.StatusNotFound case isErrContentBlocked(err): code = http.StatusGone + case isErrNotFound(err): + code = http.StatusNotFound case errors.Is(err, context.DeadlineExceeded): code = http.StatusGatewayTimeout } @@ -226,6 +227,10 @@ func isErrNotFound(err error) bool { return true } + if ipld.IsNotFound(err) { + return true + } + // Checks if err is of a type that does not implement the .Is interface and // cannot be directly compared to. Therefore, errors.Is cannot be used. for { diff --git a/gateway/gateway_test.go b/gateway/gateway_test.go index d48334b92..3c1ab3f72 100644 --- a/gateway/gateway_test.go +++ b/gateway/gateway_test.go @@ -130,9 +130,9 @@ func TestHeaders(t *testing.T) { path string cacheControl string }{ - {"/ipns/example.net/", "public, max-age=30"}, // As generated directory listing - {"/ipns/example.com/", "public, max-age=55"}, // As generated directory listing (different) - {"/ipns/unknown.com/", ""}, // As generated directory listing (unknown) + {"/ipns/example.net/", "public, max-age=30, stale-while-revalidate=2678400"}, // As generated directory listing + {"/ipns/example.com/", "public, max-age=55, stale-while-revalidate=2678400"}, // As generated directory listing (different) + {"/ipns/unknown.com/", ""}, // As generated directory listing (unknown TTL) {"/ipns/example.net/foo/", "public, max-age=30"}, // As index.html directory listing {"/ipns/example.net/foo/index.html", "public, max-age=30"}, // As deserialized UnixFS file {"/ipns/example.net/?format=raw", "public, max-age=30"}, // As Raw block @@ -924,7 +924,7 @@ func TestErrorBubblingFromBackend(t *testing.T) { }) } - testError("500 Not Found from IPLD", &ipld.ErrNotFound{}, http.StatusInternalServerError) + testError("404 Not Found from IPLD", &ipld.ErrNotFound{}, http.StatusNotFound) testError("404 Not Found from path resolver", &resolver.ErrNoLink{}, http.StatusNotFound) testError("502 Bad Gateway", ErrBadGateway, http.StatusBadGateway) testError("504 Gateway Timeout", ErrGatewayTimeout, http.StatusGatewayTimeout) diff --git a/gateway/handler_unixfs_dir.go b/gateway/handler_unixfs_dir.go index 7a49dcafc..6f9f856d1 100644 --- a/gateway/handler_unixfs_dir.go +++ b/gateway/handler_unixfs_dir.go @@ -136,9 +136,14 @@ func (i *handler) serveDirectory(ctx context.Context, w http.ResponseWriter, r * dirEtag := getDirListingEtag(resolvedPath.RootCid()) w.Header().Set("Etag", dirEtag) - // Add TTL if known. + // Set Cache-Control if rq.ttl > 0 { - w.Header().Set("Cache-Control", fmt.Sprintf("public, max-age=%d", int(rq.ttl.Seconds()))) + // Use known TTL from IPNS Record or DNSLink TXT Record + w.Header().Set("Cache-Control", fmt.Sprintf("public, max-age=%d, stale-while-revalidate=2678400", int(rq.ttl.Seconds()))) + } else if !rq.contentPath.Mutable() { + // Cache for 1 week, serve stale cache for up to a month + // (style of generated HTML may change, should not be cached forever) + w.Header().Set("Cache-Control", "public, max-age=604800, stale-while-revalidate=2678400") } if r.Method == http.MethodHead { diff --git a/go.mod b/go.mod index f40b0b66e..ed882bbee 100644 --- a/go.mod +++ b/go.mod @@ -30,6 +30,7 @@ require ( github.com/ipfs/go-log/v2 v2.5.1 github.com/ipfs/go-metrics-interface v0.0.1 github.com/ipfs/go-peertaskqueue v0.8.1 + github.com/ipfs/go-test v0.0.4 github.com/ipfs/go-unixfsnode v1.9.0 github.com/ipld/go-car v0.6.2 github.com/ipld/go-car/v2 v2.13.1 @@ -38,7 +39,7 @@ require ( github.com/jbenet/goprocess v0.1.4 github.com/libp2p/go-buffer-pool v0.1.0 github.com/libp2p/go-doh-resolver v0.4.0 - github.com/libp2p/go-libp2p v0.35.1 + github.com/libp2p/go-libp2p v0.36.1 github.com/libp2p/go-libp2p-kad-dht v0.25.2 github.com/libp2p/go-libp2p-record v0.2.0 github.com/libp2p/go-libp2p-routing-helpers v0.7.3 @@ -47,7 +48,7 @@ require ( github.com/miekg/dns v1.1.61 github.com/mr-tron/base58 v1.2.0 github.com/multiformats/go-base32 v0.1.0 - github.com/multiformats/go-multiaddr v0.12.4 + github.com/multiformats/go-multiaddr v0.13.0 github.com/multiformats/go-multiaddr-dns v0.3.1 github.com/multiformats/go-multibase v0.2.0 github.com/multiformats/go-multicodec v0.9.0 @@ -75,7 +76,7 @@ require ( go.uber.org/zap v1.27.0 golang.org/x/oauth2 v0.21.0 golang.org/x/sync v0.7.0 - golang.org/x/sys v0.21.0 + golang.org/x/sys v0.22.0 google.golang.org/protobuf v1.34.2 ) @@ -99,7 +100,7 @@ require ( github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20240618054019-d3b898a103f8 // indirect + github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c // indirect github.com/gorilla/websocket v1.5.3 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect @@ -138,34 +139,35 @@ require ( github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect github.com/multiformats/go-varint v0.0.7 // indirect - github.com/onsi/ginkgo/v2 v2.19.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/onsi/ginkgo/v2 v2.19.1 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/openzipkin/zipkin-go v0.4.3 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect - github.com/pion/datachannel v1.5.6 // indirect - github.com/pion/dtls/v2 v2.2.11 // indirect - github.com/pion/ice/v2 v2.3.25 // indirect + github.com/pion/datachannel v1.5.8 // indirect + github.com/pion/dtls/v2 v2.2.12 // indirect + github.com/pion/ice/v2 v2.3.32 // indirect github.com/pion/interceptor v0.1.29 // indirect github.com/pion/logging v0.2.2 // indirect github.com/pion/mdns v0.0.12 // indirect github.com/pion/randutil v0.1.0 // indirect github.com/pion/rtcp v1.2.14 // indirect - github.com/pion/rtp v1.8.6 // indirect - github.com/pion/sctp v1.8.16 // indirect + github.com/pion/rtp v1.8.8 // indirect + github.com/pion/sctp v1.8.20 // indirect github.com/pion/sdp/v3 v3.0.9 // indirect - github.com/pion/srtp/v2 v2.0.18 // indirect + github.com/pion/srtp/v2 v2.0.20 // indirect github.com/pion/stun v0.6.1 // indirect - github.com/pion/transport/v2 v2.2.5 // indirect + github.com/pion/transport/v2 v2.2.9 // indirect github.com/pion/turn/v2 v2.1.6 // indirect - github.com/pion/webrtc/v3 v3.2.42 // indirect + github.com/pion/webrtc/v3 v3.2.50 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.54.0 // indirect + github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/quic-go/qpack v0.4.0 // indirect - github.com/quic-go/quic-go v0.45.0 // indirect + github.com/quic-go/quic-go v0.45.2 // indirect github.com/quic-go/webtransport-go v0.8.0 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/stretchr/objx v0.5.2 // indirect @@ -173,18 +175,19 @@ require ( github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect github.com/whyrusleeping/cbor-gen v0.1.2 // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect + github.com/wlynxg/anet v0.0.3 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 // indirect go.opentelemetry.io/otel/metric v1.27.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/dig v1.17.1 // indirect - go.uber.org/fx v1.22.0 // indirect + go.uber.org/fx v1.22.1 // indirect go.uber.org/mock v0.4.0 // indirect - golang.org/x/crypto v0.24.0 // indirect - golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect - golang.org/x/mod v0.18.0 // indirect - golang.org/x/net v0.26.0 // indirect + golang.org/x/crypto v0.25.0 // indirect + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect + golang.org/x/mod v0.19.0 // indirect + golang.org/x/net v0.27.0 // indirect golang.org/x/text v0.16.0 // indirect - golang.org/x/tools v0.22.0 // indirect + golang.org/x/tools v0.23.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect gonum.org/v1/gonum v0.15.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 // indirect @@ -193,3 +196,5 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.3.0 // indirect ) + +replace github.com/libp2p/go-libp2p => github.com/libp2p/go-libp2p v0.35.1-0.20240804142423-e2e0d2917f55 diff --git a/go.sum b/go.sum index f8ca0bf1f..59626e503 100644 --- a/go.sum +++ b/go.sum @@ -129,8 +129,8 @@ github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20240618054019-d3b898a103f8 h1:ASJ/LAqdCHOyMYI+dwNxn7Rd8FscNkMyTr1KZU1JI/M= -github.com/google/pprof v0.0.0-20240618054019-d3b898a103f8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -222,6 +222,8 @@ github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fG github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= github.com/ipfs/go-peertaskqueue v0.8.1 h1:YhxAs1+wxb5jk7RvS0LHdyiILpNmRIRnZVztekOF0pg= github.com/ipfs/go-peertaskqueue v0.8.1/go.mod h1:Oxxd3eaK279FxeydSPPVGHzbwVeHjatZ2GA8XD+KbPU= +github.com/ipfs/go-test v0.0.4 h1:DKT66T6GBB6PsDFLoO56QZPrOmzJkqU1FZH5C9ySkew= +github.com/ipfs/go-test v0.0.4/go.mod h1:qhIM1EluEfElKKM6fnWxGn822/z9knUGM1+I/OAQNKI= github.com/ipfs/go-unixfs v0.4.5 h1:wj8JhxvV1G6CD7swACwSKYa+NgtdWC1RUit+gFnymDU= github.com/ipfs/go-unixfs v0.4.5/go.mod h1:BIznJNvt/gEx/ooRMI4Us9K8+qeGO7vx1ohnbk8gjFg= github.com/ipfs/go-unixfsnode v1.9.0 h1:ubEhQhr22sPAKO2DNsyVBW7YB/zA8Zkif25aBvz8rc8= @@ -277,8 +279,8 @@ github.com/libp2p/go-doh-resolver v0.4.0 h1:gUBa1f1XsPwtpE1du0O+nnZCUqtG7oYi7Bb+ github.com/libp2p/go-doh-resolver v0.4.0/go.mod h1:v1/jwsFusgsWIGX/c6vCRrnJ60x7bhTiq/fs2qt0cAg= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.35.1 h1:Hm7Ub2BF+GCb14ojcsEK6WAy5it5smPDK02iXSZLl50= -github.com/libp2p/go-libp2p v0.35.1/go.mod h1:Dnkgba5hsfSv5dvvXC8nfqk44hH0gIKKno+HOMU0fdc= +github.com/libp2p/go-libp2p v0.35.1-0.20240804142423-e2e0d2917f55 h1:/iBsYYCzlVCiMMUfXWiHzgWpTFzZwes3cTlamdzXv6g= +github.com/libp2p/go-libp2p v0.35.1-0.20240804142423-e2e0d2917f55/go.mod h1:mdtNGqy0AQuiYJuO1bXPdFOyFeyMTMSVZ03OBi/XLS4= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= github.com/libp2p/go-libp2p-kad-dht v0.25.2 h1:FOIk9gHoe4YRWXTu8SY9Z1d0RILol0TrtApsMDPjAVQ= @@ -337,8 +339,8 @@ github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9 github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.12.4 h1:rrKqpY9h+n80EwhhC/kkcunCZZ7URIF8yN1WEUt2Hvc= -github.com/multiformats/go-multiaddr v0.12.4/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII= +github.com/multiformats/go-multiaddr v0.13.0 h1:BCBzs61E3AGHcYYTv8dqRH43ZfyrqM8RXVPT8t13tLQ= +github.com/multiformats/go-multiaddr v0.13.0/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII= github.com/multiformats/go-multiaddr-dns v0.3.0/go.mod h1:mNzQ4eTGDg0ll1N9jKPOUogZPoJ30W8a7zk66FQPpdQ= github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= @@ -358,12 +360,14 @@ github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dy github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= -github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= -github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= -github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= +github.com/onsi/ginkgo/v2 v2.19.1 h1:QXgq3Z8Crl5EL1WBAC98A5sEBHARrAJNzAmMxzLcRF0= +github.com/onsi/ginkgo/v2 v2.19.1/go.mod h1:O3DtEWQkPa/F7fBMgmZQKKsluAy8pd3rEQdrjkPb9zA= +github.com/onsi/gomega v1.34.0 h1:eSSPsPNp6ZpsG8X1OVmOTxig+CblTc4AxpPBykhe2Os= +github.com/onsi/gomega v1.34.0/go.mod h1:MIKI8c+f+QLWk+hxbePD4i0LMJSExPaZOVfkoex4cAo= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -376,13 +380,13 @@ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2D github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw= -github.com/pion/datachannel v1.5.6 h1:1IxKJntfSlYkpUj8LlYRSWpYiTTC02nUrOE8T3DqGeg= -github.com/pion/datachannel v1.5.6/go.mod h1:1eKT6Q85pRnr2mHiWHxJwO50SfZRtWHTsNIVb/NfGW4= +github.com/pion/datachannel v1.5.8 h1:ph1P1NsGkazkjrvyMfhRBUAWMxugJjq2HfQifaOoSNo= +github.com/pion/datachannel v1.5.8/go.mod h1:PgmdpoaNBLX9HNzNClmdki4DYW5JtI7Yibu8QzbL3tI= github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= -github.com/pion/dtls/v2 v2.2.11 h1:9U/dpCYl1ySttROPWJgqWKEylUdT0fXp/xst6JwY5Ks= -github.com/pion/dtls/v2 v2.2.11/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= -github.com/pion/ice/v2 v2.3.25 h1:M5rJA07dqhi3nobJIg+uPtcVjFECTrhcR3n0ns8kDZs= -github.com/pion/ice/v2 v2.3.25/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw= +github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= +github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= +github.com/pion/ice/v2 v2.3.32 h1:VwE/uEeqiMm0zUWpdt1DJtnqEkj3UjEbhX92/CurtWI= +github.com/pion/ice/v2 v2.3.32/go.mod h1:8fac0+qftclGy1tYd/nfwfHC729BLaxtVqMdMVCAVPU= github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M= github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4= github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= @@ -395,31 +399,30 @@ github.com/pion/rtcp v1.2.12/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9 github.com/pion/rtcp v1.2.14 h1:KCkGV3vJ+4DAJmvP0vaQShsb0xkRfWkO540Gy102KyE= github.com/pion/rtcp v1.2.14/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= github.com/pion/rtp v1.8.3/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= -github.com/pion/rtp v1.8.6 h1:MTmn/b0aWWsAzux2AmP8WGllusBVw4NPYPVFFd7jUPw= -github.com/pion/rtp v1.8.6/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= -github.com/pion/sctp v1.8.13/go.mod h1:YKSgO/bO/6aOMP9LCie1DuD7m+GamiK2yIiPM6vH+GA= -github.com/pion/sctp v1.8.16 h1:PKrMs+o9EMLRvFfXq59WFsC+V8mN1wnKzqrv+3D/gYY= -github.com/pion/sctp v1.8.16/go.mod h1:P6PbDVA++OJMrVNg2AL3XtYHV4uD6dvfyOovCgMs0PE= +github.com/pion/rtp v1.8.8 h1:EtYFHI0rpUEjT/RMnGfb1vdJhbYmPG77szD72uUnSxs= +github.com/pion/rtp v1.8.8/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= +github.com/pion/sctp v1.8.20 h1:sOc3lkV/tQaP57ZUEXIMdM2V92IIB2ia5v/ygnBxaEg= +github.com/pion/sctp v1.8.20/go.mod h1:oTxw8i5m+WbDHZJL/xUpe6CPIn1Y0GIKKwTLF4h53H8= github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY= github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M= -github.com/pion/srtp/v2 v2.0.18 h1:vKpAXfawO9RtTRKZJbG4y0v1b11NZxQnxRl85kGuUlo= -github.com/pion/srtp/v2 v2.0.18/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA= +github.com/pion/srtp/v2 v2.0.20 h1:HNNny4s+OUmG280ETrCdgFndp4ufx3/uy85EawYEhTk= +github.com/pion/srtp/v2 v2.0.20/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA= github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= -github.com/pion/transport/v2 v2.2.2/go.mod h1:OJg3ojoBJopjEeECq2yJdXH9YVrUJ1uQ++NjXLOUorc= github.com/pion/transport/v2 v2.2.3/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= -github.com/pion/transport/v2 v2.2.5 h1:iyi25i/21gQck4hfRhomF6SktmUQjRsRW4WJdhfc3Kc= -github.com/pion/transport/v2 v2.2.5/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= +github.com/pion/transport/v2 v2.2.8/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= +github.com/pion/transport/v2 v2.2.9 h1:WEDygVovkJlV2CCunM9KS2kds+kcl7zdIefQA5y/nkE= +github.com/pion/transport/v2 v2.2.9/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0= -github.com/pion/transport/v3 v3.0.2 h1:r+40RJR25S9w3jbA6/5uEPTzcdn7ncyU44RWCbHkLg4= -github.com/pion/transport/v3 v3.0.2/go.mod h1:nIToODoOlb5If2jF9y2Igfx3PFYWfuXi37m0IlWa/D0= +github.com/pion/transport/v3 v3.0.6 h1:k1mQU06bmmX143qSWgXFqSH1KUJceQvIUuVH/K5ELWw= +github.com/pion/transport/v3 v3.0.6/go.mod h1:HvJr2N/JwNJAfipsRleqwFoR3t/pWyHeZUs89v3+t5s= github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc= github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= -github.com/pion/webrtc/v3 v3.2.42 h1:WN/ZuMjtpQOoGRCZUg/zFG+JHEvYLVyDKOxU6H1qWlE= -github.com/pion/webrtc/v3 v3.2.42/go.mod h1:M1RAe3TNTD1tzyvqHrbVODfwdPGSXOUo/OgpoGGJqFY= +github.com/pion/webrtc/v3 v3.2.50 h1:C/rwL2mBfCxHv6tlLzDAO3krJpQXfVx8A8WHnGJ2j34= +github.com/pion/webrtc/v3 v3.2.50/go.mod h1:dytYYoSBy7ZUWhJMbndx9UckgYvzNAfL7xgVnrIKxqo= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -435,15 +438,15 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= -github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/quic-go v0.45.0 h1:OHmkQGM37luZITyTSu6ff03HP/2IrwDX1ZFiNEhSFUE= -github.com/quic-go/quic-go v0.45.0/go.mod h1:1dLehS7TIR64+vxGR70GDcatWTOtMX2PUtnKsjbTurI= +github.com/quic-go/quic-go v0.45.2 h1:DfqBmqjb4ExSdxRIb/+qXhPC+7k6+DUNZha4oeiC9fY= +github.com/quic-go/quic-go v0.45.2/go.mod h1:1dLehS7TIR64+vxGR70GDcatWTOtMX2PUtnKsjbTurI= github.com/quic-go/webtransport-go v0.8.0 h1:HxSrwun11U+LlmwpgM1kEqIqH90IT4N8auv/cD7QFJg= github.com/quic-go/webtransport-go v0.8.0/go.mod h1:N99tjprW432Ut5ONql/aUhSLT0YVSlwHohQsuac9WaM= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= @@ -527,6 +530,8 @@ github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= +github.com/wlynxg/anet v0.0.3 h1:PvR53psxFXstc12jelG6f1Lv4MWqE0tI76/hHGjh9rg= +github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= @@ -562,8 +567,8 @@ go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= -go.uber.org/fx v1.22.0 h1:pApUK7yL0OUHMd8vkunWSlLxZVFFk70jR2nKde8X2NM= -go.uber.org/fx v1.22.0/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= +go.uber.org/fx v1.22.1 h1:nvvln7mwyT5s1q201YE29V/BFrGor6vMiDNpU/78Mys= +go.uber.org/fx v1.22.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -592,16 +597,13 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY= -golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -615,8 +617,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= -golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -638,13 +640,10 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -689,30 +688,23 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= @@ -740,8 +732,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= -golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/ipld/merkledag/merkledag_test.go b/ipld/merkledag/merkledag_test.go index ffe4946ca..004d462aa 100644 --- a/ipld/merkledag/merkledag_test.go +++ b/ipld/merkledag/merkledag_test.go @@ -23,10 +23,10 @@ import ( bserv "github.com/ipfs/boxo/blockservice" bstest "github.com/ipfs/boxo/blockservice/test" offline "github.com/ipfs/boxo/exchange/offline" - u "github.com/ipfs/boxo/util" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-test/random" prime "github.com/ipld/go-ipld-prime" mh "github.com/multiformats/go-multihash" ) @@ -353,7 +353,7 @@ func (devZero) Read(b []byte) (int, error) { } func TestBatchFetch(t *testing.T) { - read := io.LimitReader(u.NewTimeSeededRand(), 1024*32) + read := io.LimitReader(random.NewRand(), 1024*32) runBatchFetchTest(t, read) } @@ -513,7 +513,7 @@ func TestFetchGraph(t *testing.T) { dservs = append(dservs, NewDAGService(bsi)) } - read := io.LimitReader(u.NewTimeSeededRand(), 1024*32) + read := io.LimitReader(random.NewRand(), 1024*32) root := makeTestDAG(t, read, dservs[0]) err := FetchGraph(context.TODO(), root.Cid(), dservs[1]) @@ -595,7 +595,7 @@ func TestWalk(t *testing.T) { bsi := bstest.Mocks(1) ds := NewDAGService(bsi[0]) - read := io.LimitReader(u.NewTimeSeededRand(), 1024*1024) + read := io.LimitReader(random.NewRand(), 1024*1024) root := makeTestDAG(t, read, ds) set := cid.NewSet() diff --git a/ipld/unixfs/hamt/hamt_stress_test.go b/ipld/unixfs/hamt/hamt_stress_test.go index 89c3c69b4..de8575d22 100644 --- a/ipld/unixfs/hamt/hamt_stress_test.go +++ b/ipld/unixfs/hamt/hamt_stress_test.go @@ -33,14 +33,6 @@ type testOp struct { Val string } -func stringArrToSet(arr []string) map[string]bool { - out := make(map[string]bool) - for _, s := range arr { - out[s] = true - } - return out -} - // generate two different random sets of operations to result in the same // ending directory (same set of entries at the end) and execute each of them // in turn, then compare to ensure the output is the same on each. @@ -147,7 +139,10 @@ func executeOpSet(t *testing.T, ds ipld.DAGService, width int, ops []testOp) (*S } func genOpSet(seed int64, keep, temp []string) []testOp { - tempset := stringArrToSet(temp) + tempset := make(map[string]struct{}, len(temp)) + for _, s := range temp { + tempset[s] = struct{}{} + } allnames := append(keep, temp...) shuffle(seed, allnames) @@ -172,7 +167,7 @@ func genOpSet(seed int64, keep, temp []string) []testOp { Val: next, }) - if tempset[next] { + if _, ok := tempset[next]; ok { todel = append(todel, next) } } else { diff --git a/ipld/unixfs/importer/balanced/balanced_test.go b/ipld/unixfs/importer/balanced/balanced_test.go index 17afbb232..5a5dcf9ad 100644 --- a/ipld/unixfs/importer/balanced/balanced_test.go +++ b/ipld/unixfs/importer/balanced/balanced_test.go @@ -14,8 +14,8 @@ import ( chunker "github.com/ipfs/boxo/chunker" dag "github.com/ipfs/boxo/ipld/merkledag" mdtest "github.com/ipfs/boxo/ipld/merkledag/test" - u "github.com/ipfs/boxo/util" ipld "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-test/random" ) // TODO: extract these tests and more as a generic layout test suite @@ -41,7 +41,7 @@ func buildTestDag(ds ipld.DAGService, spl chunker.Splitter) (*dag.ProtoNode, err func getTestDag(t *testing.T, ds ipld.DAGService, size int64, blksize int64) (*dag.ProtoNode, []byte) { data := make([]byte, size) - u.NewTimeSeededRand().Read(data) + random.NewRand().Read(data) r := bytes.NewReader(data) nd, err := buildTestDag(ds, chunker.NewSizeSplitter(r, blksize)) diff --git a/ipld/unixfs/importer/importer_test.go b/ipld/unixfs/importer/importer_test.go index 85028257d..1d20525ea 100644 --- a/ipld/unixfs/importer/importer_test.go +++ b/ipld/unixfs/importer/importer_test.go @@ -10,14 +10,14 @@ import ( chunker "github.com/ipfs/boxo/chunker" mdtest "github.com/ipfs/boxo/ipld/merkledag/test" - u "github.com/ipfs/boxo/util" cid "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-test/random" ) func getBalancedDag(t testing.TB, size int64, blksize int64) (ipld.Node, ipld.DAGService) { ds := mdtest.Mock() - r := io.LimitReader(u.NewTimeSeededRand(), size) + r := io.LimitReader(random.NewRand(), size) nd, err := BuildDagFromReader(ds, chunker.NewSizeSplitter(r, blksize)) if err != nil { t.Fatal(err) @@ -27,7 +27,7 @@ func getBalancedDag(t testing.TB, size int64, blksize int64) (ipld.Node, ipld.DA func getTrickleDag(t testing.TB, size int64, blksize int64) (ipld.Node, ipld.DAGService) { ds := mdtest.Mock() - r := io.LimitReader(u.NewTimeSeededRand(), size) + r := io.LimitReader(random.NewRand(), size) nd, err := BuildTrickleDagFromReader(ds, chunker.NewSizeSplitter(r, blksize)) if err != nil { t.Fatal(err) @@ -38,7 +38,7 @@ func getTrickleDag(t testing.TB, size int64, blksize int64) (ipld.Node, ipld.DAG func TestStableCid(t *testing.T) { ds := mdtest.Mock() buf := make([]byte, 10*1024*1024) - u.NewSeededRand(0xdeadbeef).Read(buf) + random.NewSeededRand(0xdeadbeef).Read(buf) r := bytes.NewReader(buf) nd, err := BuildDagFromReader(ds, chunker.DefaultSplitter(r)) @@ -46,7 +46,7 @@ func TestStableCid(t *testing.T) { t.Fatal(err) } - expected, err := cid.Decode("QmZN1qquw84zhV4j6vT56tCcmFxaDaySL1ezTXFvMdNmrK") + expected, err := cid.Decode("QmPu94p2EkpSpgKdyz8eWomA7edAQN6maztoBycMZFixyz") if err != nil { t.Fatal(err) } @@ -72,7 +72,7 @@ func TestStableCid(t *testing.T) { func TestBalancedDag(t *testing.T) { ds := mdtest.Mock() buf := make([]byte, 10000) - u.NewTimeSeededRand().Read(buf) + random.NewRand().Read(buf) r := bytes.NewReader(buf) nd, err := BuildDagFromReader(ds, chunker.DefaultSplitter(r)) diff --git a/ipld/unixfs/importer/trickle/trickle_test.go b/ipld/unixfs/importer/trickle/trickle_test.go index e525cd9e8..9078fdc02 100644 --- a/ipld/unixfs/importer/trickle/trickle_test.go +++ b/ipld/unixfs/importer/trickle/trickle_test.go @@ -15,8 +15,8 @@ import ( chunker "github.com/ipfs/boxo/chunker" merkledag "github.com/ipfs/boxo/ipld/merkledag" mdtest "github.com/ipfs/boxo/ipld/merkledag/test" - u "github.com/ipfs/boxo/util" ipld "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-test/random" ) type UseRawLeaves bool @@ -90,7 +90,7 @@ func dup(b []byte) []byte { func testFileConsistency(t *testing.T, bs chunker.SplitterGen, nbytes int, rawLeaves UseRawLeaves) { should := make([]byte, nbytes) - u.NewTimeSeededRand().Read(should) + random.NewRand().Read(should) read := bytes.NewReader(should) ds := mdtest.Mock() @@ -120,9 +120,10 @@ func TestBuilderConsistency(t *testing.T) { } func testBuilderConsistency(t *testing.T, rawLeaves UseRawLeaves) { - nbytes := 100000 + const nbytes = 100000 buf := new(bytes.Buffer) - io.CopyN(buf, u.NewTimeSeededRand(), int64(nbytes)) + io.CopyN(buf, random.NewRand(), int64(nbytes)) + should := dup(buf.Bytes()) dagserv := mdtest.Mock() nd, err := buildTestDag(dagserv, chunker.DefaultSplitter(buf), rawLeaves) @@ -163,9 +164,9 @@ func TestIndirectBlocks(t *testing.T) { func testIndirectBlocks(t *testing.T, rawLeaves UseRawLeaves) { splitter := chunker.SizeSplitterGen(512) - nbytes := 1024 * 1024 + const nbytes = 1024 * 1024 buf := make([]byte, nbytes) - u.NewTimeSeededRand().Read(buf) + random.NewRand().Read(buf) read := bytes.NewReader(buf) @@ -195,9 +196,9 @@ func TestSeekingBasic(t *testing.T) { } func testSeekingBasic(t *testing.T, rawLeaves UseRawLeaves) { - nbytes := int64(10 * 1024) + const nbytes = 10 * 1024 should := make([]byte, nbytes) - u.NewTimeSeededRand().Read(should) + random.NewRand().Read(should) read := bytes.NewReader(should) ds := mdtest.Mock() @@ -236,9 +237,9 @@ func TestSeekToBegin(t *testing.T) { } func testSeekToBegin(t *testing.T, rawLeaves UseRawLeaves) { - nbytes := int64(10 * 1024) + const nbytes = 10 * 1024 should := make([]byte, nbytes) - u.NewTimeSeededRand().Read(should) + random.NewRand().Read(should) read := bytes.NewReader(should) ds := mdtest.Mock() @@ -284,9 +285,9 @@ func TestSeekToAlmostBegin(t *testing.T) { } func testSeekToAlmostBegin(t *testing.T, rawLeaves UseRawLeaves) { - nbytes := int64(10 * 1024) + const nbytes = 10 * 1024 should := make([]byte, nbytes) - u.NewTimeSeededRand().Read(should) + random.NewRand().Read(should) read := bytes.NewReader(should) ds := mdtest.Mock() @@ -332,9 +333,9 @@ func TestSeekEnd(t *testing.T) { } func testSeekEnd(t *testing.T, rawLeaves UseRawLeaves) { - nbytes := int64(50 * 1024) + const nbytes = 50 * 1024 should := make([]byte, nbytes) - u.NewTimeSeededRand().Read(should) + random.NewRand().Read(should) read := bytes.NewReader(should) ds := mdtest.Mock() @@ -362,9 +363,9 @@ func TestSeekEndSingleBlockFile(t *testing.T) { } func testSeekEndSingleBlockFile(t *testing.T, rawLeaves UseRawLeaves) { - nbytes := int64(100) + const nbytes = 100 should := make([]byte, nbytes) - u.NewTimeSeededRand().Read(should) + random.NewRand().Read(should) read := bytes.NewReader(should) ds := mdtest.Mock() @@ -392,9 +393,9 @@ func TestSeekingStress(t *testing.T) { } func testSeekingStress(t *testing.T, rawLeaves UseRawLeaves) { - nbytes := int64(1024 * 1024) + const nbytes = 1024 * 1024 should := make([]byte, nbytes) - u.NewTimeSeededRand().Read(should) + random.NewRand().Read(should) read := bytes.NewReader(should) ds := mdtest.Mock() @@ -440,9 +441,9 @@ func TestSeekingConsistency(t *testing.T) { } func testSeekingConsistency(t *testing.T, rawLeaves UseRawLeaves) { - nbytes := int64(128 * 1024) + const nbytes = 128 * 1024 should := make([]byte, nbytes) - u.NewTimeSeededRand().Read(should) + random.NewRand().Read(should) read := bytes.NewReader(should) ds := mdtest.Mock() @@ -458,7 +459,7 @@ func testSeekingConsistency(t *testing.T, rawLeaves UseRawLeaves) { out := make([]byte, nbytes) - for coff := nbytes - 4096; coff >= 0; coff -= 4096 { + for coff := int64(nbytes - 4096); coff >= 0; coff -= 4096 { t.Log(coff) n, err := rs.Seek(coff, io.SeekStart) if err != nil { @@ -487,9 +488,9 @@ func TestAppend(t *testing.T) { } func testAppend(t *testing.T, rawLeaves UseRawLeaves) { - nbytes := int64(128 * 1024) + const nbytes = 128 * 1024 should := make([]byte, nbytes) - u.NewTimeSeededRand().Read(should) + random.NewRand().Read(should) // Reader for half the bytes read := bytes.NewReader(should[:nbytes/2]) @@ -554,9 +555,9 @@ func testMultipleAppends(t *testing.T, rawLeaves UseRawLeaves) { ds := mdtest.Mock() // TODO: fix small size appends and make this number bigger - nbytes := int64(1000) + const nbytes = 1000 should := make([]byte, nbytes) - u.NewTimeSeededRand().Read(should) + random.NewRand().Read(should) read := bytes.NewReader(nil) nd, err := buildTestDag(ds, chunker.NewSizeSplitter(read, 500), rawLeaves) diff --git a/ipld/unixfs/mod/dagmodifier_test.go b/ipld/unixfs/mod/dagmodifier_test.go index fab7a125b..ebbc98b10 100644 --- a/ipld/unixfs/mod/dagmodifier_test.go +++ b/ipld/unixfs/mod/dagmodifier_test.go @@ -7,19 +7,17 @@ import ( "testing" dag "github.com/ipfs/boxo/ipld/merkledag" + "github.com/ipfs/boxo/ipld/unixfs" h "github.com/ipfs/boxo/ipld/unixfs/importer/helpers" trickle "github.com/ipfs/boxo/ipld/unixfs/importer/trickle" uio "github.com/ipfs/boxo/ipld/unixfs/io" testu "github.com/ipfs/boxo/ipld/unixfs/test" - - "github.com/ipfs/boxo/ipld/unixfs" - u "github.com/ipfs/boxo/util" + "github.com/ipfs/go-test/random" ) func testModWrite(t *testing.T, beg, size uint64, orig []byte, dm *DagModifier, opts testu.NodeOpts) []byte { newdata := make([]byte, size) - r := u.NewTimeSeededRand() - r.Read(newdata) + random.NewRand().Read(newdata) if size+beg > uint64(len(orig)) { orig = append(orig, make([]byte, (size+beg)-uint64(len(orig)))...) @@ -135,7 +133,7 @@ func testDagModifierBasic(t *testing.T, opts testu.NodeOpts) { t.Fatal(err) } - expected := uint64(50000 + 3500 + 3000) + const expected = uint64(50000 + 3500 + 3000) if size != expected { t.Fatalf("Final reported size is incorrect [%d != %d]", size, expected) } @@ -161,7 +159,7 @@ func testMultiWrite(t *testing.T, opts testu.NodeOpts) { } data := make([]byte, 4000) - u.NewTimeSeededRand().Read(data) + random.NewRand().Read(data) for i := 0; i < len(data); i++ { n, err := dagmod.WriteAt(data[i:i+1], int64(i)) @@ -205,7 +203,7 @@ func testMultiWriteAndFlush(t *testing.T, opts testu.NodeOpts) { } data := make([]byte, 20) - u.NewTimeSeededRand().Read(data) + random.NewRand().Read(data) for i := 0; i < len(data); i++ { n, err := dagmod.WriteAt(data[i:i+1], int64(i)) @@ -244,7 +242,7 @@ func testWriteNewFile(t *testing.T, opts testu.NodeOpts) { } towrite := make([]byte, 2000) - u.NewTimeSeededRand().Read(towrite) + random.NewRand().Read(towrite) nw, err := dagmod.Write(towrite) if err != nil { @@ -277,7 +275,7 @@ func testMultiWriteCoal(t *testing.T, opts testu.NodeOpts) { } data := make([]byte, 1000) - u.NewTimeSeededRand().Read(data) + random.NewRand().Read(data) for i := 0; i < len(data); i++ { n, err := dagmod.WriteAt(data[:i+1], 0) @@ -313,11 +311,11 @@ func testLargeWriteChunks(t *testing.T, opts testu.NodeOpts) { dagmod.RawLeaves = true } - wrsize := 1000 - datasize := 10000000 + const wrsize = 1000 + const datasize = 10000000 data := make([]byte, datasize) - u.NewTimeSeededRand().Read(data) + random.NewRand().Read(data) for i := 0; i < datasize/wrsize; i++ { n, err := dagmod.WriteAt(data[i*wrsize:(i+1)*wrsize], int64(i*wrsize)) @@ -532,7 +530,7 @@ func testSparseWrite(t *testing.T, opts testu.NodeOpts) { } buf := make([]byte, 5000) - u.NewTimeSeededRand().Read(buf[2500:]) + random.NewRand().Read(buf[2500:]) wrote, err := dagmod.WriteAt(buf[2500:], 2500) if err != nil { @@ -577,7 +575,7 @@ func testSeekPastEndWrite(t *testing.T, opts testu.NodeOpts) { } buf := make([]byte, 5000) - u.NewTimeSeededRand().Read(buf[2500:]) + random.NewRand().Read(buf[2500:]) nseek, err := dagmod.Seek(2500, io.SeekStart) if err != nil { @@ -841,7 +839,7 @@ func BenchmarkDagmodWrite(b *testing.B) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - wrsize := 4096 + const wrsize = 4096 dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512)) if err != nil { @@ -849,7 +847,7 @@ func BenchmarkDagmodWrite(b *testing.B) { } buf := make([]byte, b.N*wrsize) - u.NewTimeSeededRand().Read(buf) + random.NewRand().Read(buf) b.StartTimer() b.SetBytes(int64(wrsize)) for i := 0; i < b.N; i++ { diff --git a/ipld/unixfs/test/utils.go b/ipld/unixfs/test/utils.go index 4df8c1675..465808ca2 100644 --- a/ipld/unixfs/test/utils.go +++ b/ipld/unixfs/test/utils.go @@ -14,9 +14,9 @@ import ( chunker "github.com/ipfs/boxo/chunker" mdag "github.com/ipfs/boxo/ipld/merkledag" mdagmock "github.com/ipfs/boxo/ipld/merkledag/test" - u "github.com/ipfs/boxo/util" cid "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-test/random" mh "github.com/multiformats/go-multihash" ) @@ -85,14 +85,8 @@ func GetEmptyNode(t testing.TB, dserv ipld.DAGService, opts NodeOpts) ipld.Node // GetRandomNode returns a random unixfs file node. func GetRandomNode(t testing.TB, dserv ipld.DAGService, size int64, opts NodeOpts) ([]byte, ipld.Node) { - in := io.LimitReader(u.NewTimeSeededRand(), size) - buf, err := io.ReadAll(in) - if err != nil { - t.Fatal(err) - } - - node := GetNode(t, dserv, buf, opts) - return buf, node + buf := random.Bytes(int(size)) + return buf, GetNode(t, dserv, buf, opts) } // ArrComp checks if two byte slices are the same. diff --git a/mfs/mfs_test.go b/mfs/mfs_test.go index 2375c4f05..eb5585a64 100644 --- a/mfs/mfs_test.go +++ b/mfs/mfs_test.go @@ -25,7 +25,7 @@ import ( ft "github.com/ipfs/boxo/ipld/unixfs" importer "github.com/ipfs/boxo/ipld/unixfs/importer" uio "github.com/ipfs/boxo/ipld/unixfs/io" - u "github.com/ipfs/boxo/util" + "github.com/ipfs/go-test/random" cid "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" @@ -46,7 +46,7 @@ func getDagserv(t testing.TB) ipld.DAGService { } func getRandFile(t *testing.T, ds ipld.DAGService, size int64) ipld.Node { - r := io.LimitReader(u.NewTimeSeededRand(), size) + r := io.LimitReader(random.NewRand(), size) return fileNodeFromReader(t, ds, r) } diff --git a/out b/out new file mode 100644 index 000000000..e69de29bb diff --git a/pinning/pinner/dspinner/pin_test.go b/pinning/pinner/dspinner/pin_test.go index 54e308cba..cbc1e6c34 100644 --- a/pinning/pinner/dspinner/pin_test.go +++ b/pinning/pinner/dspinner/pin_test.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "io" "path" "testing" "time" @@ -22,13 +21,11 @@ import ( blockstore "github.com/ipfs/boxo/blockstore" offline "github.com/ipfs/boxo/exchange/offline" - util "github.com/ipfs/boxo/util" + "github.com/ipfs/go-test/random" ipfspin "github.com/ipfs/boxo/pinning/pinner" ) -var rand = util.NewTimeSeededRand() - type fakeLogger struct { logging.StandardLogger lastError error @@ -44,13 +41,8 @@ func (f *fakeLogger) Errorf(format string, args ...interface{}) { func randNode() (*mdag.ProtoNode, cid.Cid) { nd := new(mdag.ProtoNode) - nd.SetData(make([]byte, 32)) - _, err := io.ReadFull(rand, nd.Data()) - if err != nil { - panic(err) - } - k := nd.Cid() - return nd, k + nd.SetData(random.Bytes(32)) + return nd, nd.Cid() } func assertPinned(t *testing.T, p ipfspin.Pinner, c cid.Cid, failmsg string) { diff --git a/util/util.go b/util/util.go index 7a96ae393..3a76dd238 100644 --- a/util/util.go +++ b/util/util.go @@ -54,33 +54,20 @@ func ExpandPathnames(paths []string) ([]string, error) { return out, nil } -type randGen struct { - rand.Rand -} - // NewTimeSeededRand returns a random bytes reader // which has been initialized with the current time. +// +// Deprecated: use github.com/ipfs/go-test/random instead. func NewTimeSeededRand() io.Reader { - src := rand.NewSource(time.Now().UnixNano()) - return &randGen{ - Rand: *rand.New(src), - } + return NewSeededRand(time.Now().UnixNano()) } // NewSeededRand returns a random bytes reader // initialized with the given seed. +// +// Deprecated: use github.com/ipfs/go-test/random instead. func NewSeededRand(seed int64) io.Reader { - src := rand.NewSource(seed) - return &randGen{ - Rand: *rand.New(src), - } -} - -func (r *randGen) Read(p []byte) (n int, err error) { - for i := 0; i < len(p); i++ { - p[i] = byte(r.Rand.Intn(255)) - } - return len(p), nil + return rand.New(rand.NewSource(seed)) } // GetenvBool is the way to check an env var as a boolean diff --git a/util/util_test.go b/util/util_test.go index 70747ad90..c884d4614 100644 --- a/util/util_test.go +++ b/util/util_test.go @@ -3,6 +3,8 @@ package util import ( "bytes" "testing" + + "github.com/ipfs/go-test/random" ) func TestXOR(t *testing.T) { @@ -33,9 +35,9 @@ func TestXOR(t *testing.T) { } func BenchmarkHash256K(b *testing.B) { - buf := make([]byte, 256*1024) - NewTimeSeededRand().Read(buf) - b.SetBytes(int64(256 * 1024)) + const size = 256 * 1024 + buf := random.Bytes(size) + b.SetBytes(size) b.ResetTimer() for i := 0; i < b.N; i++ { Hash(buf) @@ -43,9 +45,9 @@ func BenchmarkHash256K(b *testing.B) { } func BenchmarkHash512K(b *testing.B) { - buf := make([]byte, 512*1024) - NewTimeSeededRand().Read(buf) - b.SetBytes(int64(512 * 1024)) + const size = 512 * 1024 + buf := random.Bytes(size) + b.SetBytes(size) b.ResetTimer() for i := 0; i < b.N; i++ { Hash(buf) @@ -53,9 +55,9 @@ func BenchmarkHash512K(b *testing.B) { } func BenchmarkHash1M(b *testing.B) { - buf := make([]byte, 1024*1024) - NewTimeSeededRand().Read(buf) - b.SetBytes(int64(1024 * 1024)) + const size = 1024 * 1024 + buf := random.Bytes(size) + b.SetBytes(size) b.ResetTimer() for i := 0; i < b.N; i++ { Hash(buf) diff --git a/version.json b/version.json index c6a304fe7..6578f1967 100644 --- a/version.json +++ b/version.json @@ -1,3 +1,3 @@ { - "version": "v0.21.0" + "version": "v0.22.0" }