diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f8770a29bc9..b02398bb950 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -234,9 +234,8 @@ jobs: fail-fast: false matrix: rust-version: [ - # 1.72.0, # current stable - # beta, - nightly-2023-09-10 + 1.75.0, # current stable + beta, ] steps: - uses: actions/checkout@v4 diff --git a/muxers/mplex/benches/split_send_size.rs b/muxers/mplex/benches/split_send_size.rs index 9a9814d2f2a..0125d49dcef 100644 --- a/muxers/mplex/benches/split_send_size.rs +++ b/muxers/mplex/benches/split_send_size.rs @@ -101,7 +101,7 @@ fn prepare(c: &mut Criterion) { fn run( receiver_trans: &mut BenchTransport, sender_trans: &mut BenchTransport, - payload: &Vec, + payload: &[u8], listen_addr: &Multiaddr, ) { receiver_trans diff --git a/muxers/mplex/src/io.rs b/muxers/mplex/src/io.rs index 753294a7845..50fc0fc1d3f 100644 --- a/muxers/mplex/src/io.rs +++ b/muxers/mplex/src/io.rs @@ -224,7 +224,7 @@ where // yield to give the current task a chance to read // from the respective substreams. if num_buffered == self.config.max_buffer_len { - cx.waker().clone().wake(); + cx.waker().wake_by_ref(); return Poll::Pending; } @@ -456,7 +456,7 @@ where // next frame for `id`, yield to give the current task // a chance to read from the other substream(s). if num_buffered == self.config.max_buffer_len { - cx.waker().clone().wake(); + cx.waker().wake_by_ref(); return Poll::Pending; } @@ -663,7 +663,7 @@ where connection=%self.id, "No task to read from blocked stream. Waking current task." ); - cx.waker().clone().wake(); + cx.waker().wake_by_ref(); } else if let Some(id) = stream_id { // We woke some other task, but are still interested in // reading `Data` frames from the current stream when unblocked. diff --git a/protocols/kad/src/behaviour.rs b/protocols/kad/src/behaviour.rs index cde4fbb8536..f7e2fb3a032 100644 --- a/protocols/kad/src/behaviour.rs +++ b/protocols/kad/src/behaviour.rs @@ -519,7 +519,7 @@ where }; let key = kbucket::Key::from(*peer); match self.kbuckets.entry(&key) { - kbucket::Entry::Present(mut entry, _) => { + Some(kbucket::Entry::Present(mut entry, _)) => { if entry.value().insert(address) { self.queued_events .push_back(ToSwarm::GenerateEvent(Event::RoutingUpdated { @@ -536,11 +536,11 @@ where } RoutingUpdate::Success } - kbucket::Entry::Pending(mut entry, _) => { + Some(kbucket::Entry::Pending(mut entry, _)) => { entry.value().insert(address); RoutingUpdate::Pending } - kbucket::Entry::Absent(entry) => { + Some(kbucket::Entry::Absent(entry)) => { let addresses = Addresses::new(address); let status = if self.connected_peers.contains(peer) { NodeStatus::Connected @@ -578,7 +578,7 @@ where } } } - kbucket::Entry::SelfEntry => RoutingUpdate::Failed, + None => RoutingUpdate::Failed, } } @@ -599,7 +599,7 @@ where ) -> Option, Addresses>> { let address = &address.to_owned().with_p2p(*peer).ok()?; let key = kbucket::Key::from(*peer); - match self.kbuckets.entry(&key) { + match self.kbuckets.entry(&key)? { kbucket::Entry::Present(mut entry, _) => { if entry.value().remove(address).is_err() { Some(entry.remove()) // it is the last address, thus remove the peer. @@ -614,7 +614,7 @@ where None } } - kbucket::Entry::Absent(..) | kbucket::Entry::SelfEntry => None, + kbucket::Entry::Absent(..) => None, } } @@ -627,10 +627,10 @@ where peer: &PeerId, ) -> Option, Addresses>> { let key = kbucket::Key::from(*peer); - match self.kbuckets.entry(&key) { + match self.kbuckets.entry(&key)? { kbucket::Entry::Present(entry, _) => Some(entry.remove()), kbucket::Entry::Pending(entry, _) => Some(entry.remove()), - kbucket::Entry::Absent(..) | kbucket::Entry::SelfEntry => None, + kbucket::Entry::Absent(..) => None, } } @@ -1164,7 +1164,8 @@ where let key = kbucket::Key::from(node_id); kbuckets .entry(&key) - .view() + .as_mut() + .and_then(|e| e.view()) .map(|e| e.node.value.clone().into_vec()) } } else { @@ -1220,7 +1221,7 @@ where ) { let key = kbucket::Key::from(peer); match self.kbuckets.entry(&key) { - kbucket::Entry::Present(mut entry, old_status) => { + Some(kbucket::Entry::Present(mut entry, old_status)) => { if old_status != new_status { entry.update(new_status) } @@ -1243,7 +1244,7 @@ where } } - kbucket::Entry::Pending(mut entry, old_status) => { + Some(kbucket::Entry::Pending(mut entry, old_status)) => { if let Some(address) = address { entry.value().insert(address); } @@ -1252,7 +1253,7 @@ where } } - kbucket::Entry::Absent(entry) => { + Some(kbucket::Entry::Absent(entry)) => { // Only connected nodes with a known address are newly inserted. if new_status != NodeStatus::Connected { return; @@ -1863,7 +1864,7 @@ where fn address_failed(&mut self, peer_id: PeerId, address: &Multiaddr) { let key = kbucket::Key::from(peer_id); - if let Some(addrs) = self.kbuckets.entry(&key).value() { + if let Some(addrs) = self.kbuckets.entry(&key).as_mut().and_then(|e| e.value()) { // TODO: Ideally, the address should only be removed if the error can // be classified as "permanent" but since `err` is currently a borrowed // trait object without a `'static` bound, even downcasting for inspection @@ -1931,7 +1932,12 @@ where let (old, new) = (old.get_remote_address(), new.get_remote_address()); // Update routing table. - if let Some(addrs) = self.kbuckets.entry(&kbucket::Key::from(peer)).value() { + if let Some(addrs) = self + .kbuckets + .entry(&kbucket::Key::from(peer)) + .as_mut() + .and_then(|e| e.value()) + { if addrs.replace(old, new) { tracing::debug!( %peer, @@ -2132,7 +2138,7 @@ where // the addresses of that peer in the k-buckets. let key = kbucket::Key::from(peer_id); let mut peer_addrs = - if let kbucket::Entry::Present(mut entry, _) = self.kbuckets.entry(&key) { + if let Some(kbucket::Entry::Present(mut entry, _)) = self.kbuckets.entry(&key) { let addrs = entry.value().iter().cloned().collect::>(); debug_assert!(!addrs.is_empty(), "Empty peer addresses in routing table."); addrs diff --git a/protocols/kad/src/kbucket.rs b/protocols/kad/src/kbucket.rs index b42806fcf3c..a0d272c31f1 100644 --- a/protocols/kad/src/kbucket.rs +++ b/protocols/kad/src/kbucket.rs @@ -172,17 +172,16 @@ where /// Returns an `Entry` for the given key, representing the state of the entry /// in the routing table. - pub(crate) fn entry<'a>(&'a mut self, key: &'a TKey) -> Entry<'a, TKey, TVal> { - let index = BucketIndex::new(&self.local_key.as_ref().distance(key)); - if let Some(i) = index { - let bucket = &mut self.buckets[i.get()]; - if let Some(applied) = bucket.apply_pending() { - self.applied_pending.push_back(applied) - } - Entry::new(bucket, key) - } else { - Entry::SelfEntry + /// + /// Returns `None` in case the key points to the local node. + pub(crate) fn entry<'a>(&'a mut self, key: &'a TKey) -> Option> { + let index = BucketIndex::new(&self.local_key.as_ref().distance(key))?; + + let bucket = &mut self.buckets[index.get()]; + if let Some(applied) = bucket.apply_pending() { + self.applied_pending.push_back(applied) } + Some(Entry::new(bucket, key)) } /// Returns an iterator over all buckets. @@ -627,7 +626,7 @@ mod tests { let other_id = Key::from(PeerId::random()); let mut table = KBucketsTable::<_, ()>::new(local_key, Duration::from_secs(5)); - if let Entry::Absent(entry) = table.entry(&other_id) { + if let Some(Entry::Absent(entry)) = table.entry(&other_id) { match entry.insert((), NodeStatus::Connected) { InsertResult::Inserted => (), _ => panic!(), @@ -645,10 +644,8 @@ mod tests { fn entry_self() { let local_key = Key::from(PeerId::random()); let mut table = KBucketsTable::<_, ()>::new(local_key.clone(), Duration::from_secs(5)); - match table.entry(&local_key) { - Entry::SelfEntry => (), - _ => panic!(), - } + + assert!(table.entry(&local_key).is_none()) } #[test] @@ -661,7 +658,7 @@ mod tests { break; } let key = Key::from(PeerId::random()); - if let Entry::Absent(e) = table.entry(&key) { + if let Some(Entry::Absent(e)) = table.entry(&key) { match e.insert((), NodeStatus::Connected) { InsertResult::Inserted => count += 1, _ => continue, @@ -694,10 +691,10 @@ mod tests { let full_bucket_index; loop { let key = Key::from(PeerId::random()); - if let Entry::Absent(e) = table.entry(&key) { + if let Some(Entry::Absent(e)) = table.entry(&key) { match e.insert((), NodeStatus::Disconnected) { InsertResult::Full => { - if let Entry::Absent(e) = table.entry(&key) { + if let Some(Entry::Absent(e)) = table.entry(&key) { match e.insert((), NodeStatus::Connected) { InsertResult::Pending { disconnected } => { expected_applied = AppliedPending { @@ -732,12 +729,12 @@ mod tests { full_bucket.pending_mut().unwrap().set_ready_at(elapsed); match table.entry(&expected_applied.inserted.key) { - Entry::Present(_, NodeStatus::Connected) => {} + Some(Entry::Present(_, NodeStatus::Connected)) => {} x => panic!("Unexpected entry: {x:?}"), } match table.entry(&expected_applied.evicted.as_ref().unwrap().key) { - Entry::Absent(_) => {} + Some(Entry::Absent(_)) => {} x => panic!("Unexpected entry: {x:?}"), } diff --git a/protocols/kad/src/kbucket/entry.rs b/protocols/kad/src/kbucket/entry.rs index c38aac4a483..02c90fdfcc7 100644 --- a/protocols/kad/src/kbucket/entry.rs +++ b/protocols/kad/src/kbucket/entry.rs @@ -81,8 +81,6 @@ pub(crate) enum Entry<'a, TPeerId, TVal> { Pending(PendingEntry<'a, TPeerId, TVal>, NodeStatus), /// The entry is absent and may be inserted. Absent(AbsentEntry<'a, TPeerId, TVal>), - /// The entry represents the local node. - SelfEntry, } /// The internal representation of the different states of an `Entry`, @@ -144,7 +142,6 @@ where Entry::Present(entry, _) => Some(entry.value()), Entry::Pending(entry, _) => Some(entry.value()), Entry::Absent(_) => None, - Entry::SelfEntry => None, } } } diff --git a/protocols/relay/src/behaviour/rate_limiter.rs b/protocols/relay/src/behaviour/rate_limiter.rs index a4a127e1253..9c4f67d04a8 100644 --- a/protocols/relay/src/behaviour/rate_limiter.rs +++ b/protocols/relay/src/behaviour/rate_limiter.rs @@ -124,7 +124,7 @@ impl GenericRateLimiter { // Note when used with a high number of buckets: This loop refills all the to-be-refilled // buckets at once, thus potentially delaying the parent call to `try_next`. loop { - match self.refill_schedule.get(0) { + match self.refill_schedule.front() { // Only continue if (a) there is a bucket and (b) the bucket has not already been // refilled recently. Some((last_refill, _)) if now.duration_since(*last_refill) >= self.interval => {} diff --git a/transports/dns/src/lib.rs b/transports/dns/src/lib.rs index 3aeac4e4154..be847e568ed 100644 --- a/transports/dns/src/lib.rs +++ b/transports/dns/src/lib.rs @@ -416,6 +416,7 @@ pub enum Error { /// The underlying transport encountered an error. Transport(TErr), /// DNS resolution failed. + #[allow(clippy::enum_variant_names)] ResolveError(ResolveError), /// DNS resolution was successful, but the underlying transport refused the resolved address. MultiaddrNotSupported(Multiaddr), diff --git a/transports/noise/src/lib.rs b/transports/noise/src/lib.rs index 70fae9d7ee6..2557e76e276 100644 --- a/transports/noise/src/lib.rs +++ b/transports/noise/src/lib.rs @@ -240,6 +240,7 @@ pub enum Error { #[error("failed to decode protobuf ")] InvalidPayload(#[from] DecodeError), #[error(transparent)] + #[allow(clippy::enum_variant_names)] SigningError(#[from] libp2p_identity::SigningError), #[error("Expected WebTransport certhashes ({}) are not a subset of received ones ({})", certhashes_to_string(.0), certhashes_to_string(.1))] UnknownWebTransportCerthashes(HashSet>, HashSet>), diff --git a/transports/webtransport-websys/src/error.rs b/transports/webtransport-websys/src/error.rs index ad85cab7537..704cfffb1fd 100644 --- a/transports/webtransport-websys/src/error.rs +++ b/transports/webtransport-websys/src/error.rs @@ -11,6 +11,7 @@ pub enum Error { Noise(#[from] libp2p_noise::Error), #[error("JavaScript error: {0}")] + #[allow(clippy::enum_variant_names)] JsError(String), #[error("JavaScript typecasting failed")]