| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| |
|
| | mod test; |
| |
|
| | use crate::addresses::Addresses; |
| | use crate::handler::{Handler, HandlerEvent, HandlerIn, RequestId}; |
| | use crate::kbucket::{self, Distance, KBucketConfig, KBucketsTable, NodeStatus}; |
| | use crate::protocol::{ConnectionType, KadPeer, ProtocolConfig}; |
| | use crate::query::{Query, QueryConfig, QueryId, QueryPool, QueryPoolState}; |
| | use crate::record::{ |
| | self, |
| | store::{self, RecordStore}, |
| | ProviderRecord, Record, |
| | }; |
| | use crate::{bootstrap, K_VALUE}; |
| | use crate::{jobs::*, protocol}; |
| | use fnv::FnvHashSet; |
| | use libp2p_core::{transport::PortUse, ConnectedPoint, Endpoint, Multiaddr}; |
| | use libp2p_identity::PeerId; |
| | use libp2p_swarm::behaviour::{ |
| | AddressChange, ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm, |
| | }; |
| | use libp2p_swarm::{ |
| | dial_opts::{self, DialOpts}, |
| | ConnectionDenied, ConnectionHandler, ConnectionId, DialError, ExternalAddresses, |
| | ListenAddresses, NetworkBehaviour, NotifyHandler, StreamProtocol, THandler, THandlerInEvent, |
| | THandlerOutEvent, ToSwarm, |
| | }; |
| | use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; |
| | use std::fmt; |
| | use std::num::NonZeroUsize; |
| | use std::task::{Context, Poll, Waker}; |
| | use std::time::Duration; |
| | use std::vec; |
| | use thiserror::Error; |
| | use tracing::Level; |
| | use web_time::Instant; |
| |
|
| | pub use crate::query::QueryStats; |
| |
|
| | |
| | |
| | pub struct Behaviour<TStore> { |
| | |
| | kbuckets: KBucketsTable<kbucket::Key<PeerId>, Addresses>, |
| |
|
| | |
| | kbucket_inserts: BucketInserts, |
| |
|
| | |
| | protocol_config: ProtocolConfig, |
| |
|
| | |
| | record_filtering: StoreInserts, |
| |
|
| | |
| | queries: QueryPool, |
| |
|
| | |
| | |
| | |
| | connected_peers: FnvHashSet<PeerId>, |
| |
|
| | |
| | |
| | add_provider_job: Option<AddProviderJob>, |
| |
|
| | |
| | |
| | put_record_job: Option<PutRecordJob>, |
| |
|
| | |
| | record_ttl: Option<Duration>, |
| |
|
| | |
| | provider_record_ttl: Option<Duration>, |
| |
|
| | |
| | queued_events: VecDeque<ToSwarm<Event, HandlerIn>>, |
| |
|
| | listen_addresses: ListenAddresses, |
| |
|
| | external_addresses: ExternalAddresses, |
| |
|
| | connections: HashMap<ConnectionId, PeerId>, |
| |
|
| | |
| | caching: Caching, |
| |
|
| | local_peer_id: PeerId, |
| |
|
| | mode: Mode, |
| | auto_mode: bool, |
| | no_events_waker: Option<Waker>, |
| |
|
| | |
| | store: TStore, |
| |
|
| | |
| | bootstrap_status: bootstrap::Status, |
| | } |
| |
|
| | |
| | |
| | |
| | #[derive(Copy, Clone, Debug, PartialEq, Eq)] |
| | pub enum BucketInserts { |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | OnConnected, |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | Manual, |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | #[derive(Copy, Clone, Debug, PartialEq, Eq)] |
| | pub enum StoreInserts { |
| | |
| | |
| | Unfiltered, |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | FilterBoth, |
| | } |
| |
|
| | |
| | |
| | |
| | #[derive(Debug, Clone)] |
| | pub struct Config { |
| | kbucket_config: KBucketConfig, |
| | query_config: QueryConfig, |
| | protocol_config: ProtocolConfig, |
| | record_ttl: Option<Duration>, |
| | record_replication_interval: Option<Duration>, |
| | record_publication_interval: Option<Duration>, |
| | record_filtering: StoreInserts, |
| | provider_record_ttl: Option<Duration>, |
| | provider_publication_interval: Option<Duration>, |
| | kbucket_inserts: BucketInserts, |
| | caching: Caching, |
| | periodic_bootstrap_interval: Option<Duration>, |
| | automatic_bootstrap_throttle: Option<Duration>, |
| | } |
| |
|
| | impl Default for Config { |
| | |
| | |
| | |
| | fn default() -> Self { |
| | Self::new(protocol::DEFAULT_PROTO_NAME) |
| | } |
| | } |
| |
|
| | |
| | |
| | #[derive(Debug, Clone)] |
| | pub enum Caching { |
| | |
| | |
| | |
| | Disabled, |
| | |
| | |
| | |
| | |
| | Enabled { max_peers: u16 }, |
| | } |
| |
|
| | impl Config { |
| | |
| | pub fn new(protocol_name: StreamProtocol) -> Self { |
| | Config { |
| | kbucket_config: KBucketConfig::default(), |
| | query_config: QueryConfig::default(), |
| | protocol_config: ProtocolConfig::new(protocol_name), |
| | record_ttl: Some(Duration::from_secs(48 * 60 * 60)), |
| | record_replication_interval: Some(Duration::from_secs(60 * 60)), |
| | record_publication_interval: Some(Duration::from_secs(22 * 60 * 60)), |
| | record_filtering: StoreInserts::Unfiltered, |
| | provider_publication_interval: Some(Duration::from_secs(12 * 60 * 60)), |
| | provider_record_ttl: Some(Duration::from_secs(48 * 60 * 60)), |
| | kbucket_inserts: BucketInserts::OnConnected, |
| | caching: Caching::Enabled { max_peers: 1 }, |
| | periodic_bootstrap_interval: Some(Duration::from_secs(5 * 60)), |
| | automatic_bootstrap_throttle: Some(bootstrap::DEFAULT_AUTOMATIC_THROTTLE), |
| | } |
| | } |
| |
|
| | |
| | #[deprecated(note = "Use `Config::new` instead")] |
| | #[allow(clippy::should_implement_trait)] |
| | pub fn default() -> Self { |
| | Default::default() |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | #[deprecated(note = "Use `Config::new` instead")] |
| | #[allow(deprecated)] |
| | pub fn set_protocol_names(&mut self, names: Vec<StreamProtocol>) -> &mut Self { |
| | self.protocol_config.set_protocol_names(names); |
| | self |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | pub fn set_query_timeout(&mut self, timeout: Duration) -> &mut Self { |
| | self.query_config.timeout = timeout; |
| | self |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | pub fn set_replication_factor(&mut self, replication_factor: NonZeroUsize) -> &mut Self { |
| | self.query_config.replication_factor = replication_factor; |
| | self |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | pub fn set_parallelism(&mut self, parallelism: NonZeroUsize) -> &mut Self { |
| | self.query_config.parallelism = parallelism; |
| | self |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | pub fn disjoint_query_paths(&mut self, enabled: bool) -> &mut Self { |
| | self.query_config.disjoint_query_paths = enabled; |
| | self |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | pub fn set_record_ttl(&mut self, record_ttl: Option<Duration>) -> &mut Self { |
| | self.record_ttl = record_ttl; |
| | self |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | pub fn set_record_filtering(&mut self, filtering: StoreInserts) -> &mut Self { |
| | self.record_filtering = filtering; |
| | self |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | pub fn set_replication_interval(&mut self, interval: Option<Duration>) -> &mut Self { |
| | self.record_replication_interval = interval; |
| | self |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | pub fn set_publication_interval(&mut self, interval: Option<Duration>) -> &mut Self { |
| | self.record_publication_interval = interval; |
| | self |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | pub fn set_provider_record_ttl(&mut self, ttl: Option<Duration>) -> &mut Self { |
| | self.provider_record_ttl = ttl; |
| | self |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | pub fn set_provider_publication_interval(&mut self, interval: Option<Duration>) -> &mut Self { |
| | self.provider_publication_interval = interval; |
| | self |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | pub fn set_max_packet_size(&mut self, size: usize) -> &mut Self { |
| | self.protocol_config.set_max_packet_size(size); |
| | self |
| | } |
| |
|
| | |
| | pub fn set_kbucket_inserts(&mut self, inserts: BucketInserts) -> &mut Self { |
| | self.kbucket_inserts = inserts; |
| | self |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | pub fn set_caching(&mut self, c: Caching) -> &mut Self { |
| | self.caching = c; |
| | self |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | pub fn set_periodic_bootstrap_interval(&mut self, interval: Option<Duration>) -> &mut Self { |
| | self.periodic_bootstrap_interval = interval; |
| | self |
| | } |
| |
|
| | |
| | |
| | |
| | pub fn set_kbucket_size(&mut self, size: NonZeroUsize) -> &mut Self { |
| | self.kbucket_config.set_bucket_size(size); |
| | self |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | pub fn set_kbucket_pending_timeout(&mut self, timeout: Duration) -> &mut Self { |
| | self.kbucket_config.set_pending_timeout(timeout); |
| | self |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | #[cfg(test)] |
| | pub(crate) fn set_automatic_bootstrap_throttle( |
| | &mut self, |
| | duration: Option<Duration>, |
| | ) -> &mut Self { |
| | self.automatic_bootstrap_throttle = duration; |
| | self |
| | } |
| | } |
| |
|
| | impl<TStore> Behaviour<TStore> |
| | where |
| | TStore: RecordStore + Send + 'static, |
| | { |
| | |
| | pub fn new(id: PeerId, store: TStore) -> Self { |
| | Self::with_config(id, store, Default::default()) |
| | } |
| |
|
| | |
| | pub fn protocol_names(&self) -> &[StreamProtocol] { |
| | self.protocol_config.protocol_names() |
| | } |
| |
|
| | |
| | pub fn with_config(id: PeerId, store: TStore, config: Config) -> Self { |
| | let local_key = kbucket::Key::from(id); |
| |
|
| | let put_record_job = config |
| | .record_replication_interval |
| | .or(config.record_publication_interval) |
| | .map(|interval| { |
| | PutRecordJob::new( |
| | id, |
| | interval, |
| | config.record_publication_interval, |
| | config.record_ttl, |
| | ) |
| | }); |
| |
|
| | let add_provider_job = config |
| | .provider_publication_interval |
| | .map(AddProviderJob::new); |
| |
|
| | Behaviour { |
| | store, |
| | caching: config.caching, |
| | kbuckets: KBucketsTable::new(local_key, config.kbucket_config), |
| | kbucket_inserts: config.kbucket_inserts, |
| | protocol_config: config.protocol_config, |
| | record_filtering: config.record_filtering, |
| | queued_events: VecDeque::with_capacity(config.query_config.replication_factor.get()), |
| | listen_addresses: Default::default(), |
| | queries: QueryPool::new(config.query_config), |
| | connected_peers: Default::default(), |
| | add_provider_job, |
| | put_record_job, |
| | record_ttl: config.record_ttl, |
| | provider_record_ttl: config.provider_record_ttl, |
| | external_addresses: Default::default(), |
| | local_peer_id: id, |
| | connections: Default::default(), |
| | mode: Mode::Client, |
| | auto_mode: true, |
| | no_events_waker: None, |
| | bootstrap_status: bootstrap::Status::new( |
| | config.periodic_bootstrap_interval, |
| | config.automatic_bootstrap_throttle, |
| | ), |
| | } |
| | } |
| |
|
| | |
| | pub fn iter_queries(&self) -> impl Iterator<Item = QueryRef<'_>> { |
| | self.queries.iter().filter_map(|query| { |
| | if !query.is_finished() { |
| | Some(QueryRef { query }) |
| | } else { |
| | None |
| | } |
| | }) |
| | } |
| |
|
| | |
| | pub fn iter_queries_mut(&mut self) -> impl Iterator<Item = QueryMut<'_>> { |
| | self.queries.iter_mut().filter_map(|query| { |
| | if !query.is_finished() { |
| | Some(QueryMut { query }) |
| | } else { |
| | None |
| | } |
| | }) |
| | } |
| |
|
| | |
| | pub fn query(&self, id: &QueryId) -> Option<QueryRef<'_>> { |
| | self.queries.get(id).and_then(|query| { |
| | if !query.is_finished() { |
| | Some(QueryRef { query }) |
| | } else { |
| | None |
| | } |
| | }) |
| | } |
| |
|
| | |
| | pub fn query_mut<'a>(&'a mut self, id: &QueryId) -> Option<QueryMut<'a>> { |
| | self.queries.get_mut(id).and_then(|query| { |
| | if !query.is_finished() { |
| | Some(QueryMut { query }) |
| | } else { |
| | None |
| | } |
| | }) |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | pub fn add_address(&mut self, peer: &PeerId, address: Multiaddr) -> RoutingUpdate { |
| | |
| | let Ok(address) = address.with_p2p(*peer) else { |
| | return RoutingUpdate::Failed; |
| | }; |
| | let key = kbucket::Key::from(*peer); |
| | match self.kbuckets.entry(&key) { |
| | Some(kbucket::Entry::Present(mut entry, _)) => { |
| | if entry.value().insert(address) { |
| | self.queued_events |
| | .push_back(ToSwarm::GenerateEvent(Event::RoutingUpdated { |
| | peer: *peer, |
| | is_new_peer: false, |
| | addresses: entry.value().clone(), |
| | old_peer: None, |
| | bucket_range: self |
| | .kbuckets |
| | .bucket(&key) |
| | .map(|b| b.range()) |
| | .expect("Not kbucket::Entry::SelfEntry."), |
| | })) |
| | } |
| | RoutingUpdate::Success |
| | } |
| | Some(kbucket::Entry::Pending(mut entry, _)) => { |
| | entry.value().insert(address); |
| | RoutingUpdate::Pending |
| | } |
| | Some(kbucket::Entry::Absent(entry)) => { |
| | let addresses = Addresses::new(address); |
| | let status = if self.connected_peers.contains(peer) { |
| | NodeStatus::Connected |
| | } else { |
| | NodeStatus::Disconnected |
| | }; |
| | match entry.insert(addresses.clone(), status) { |
| | kbucket::InsertResult::Inserted => { |
| | self.bootstrap_on_low_peers(); |
| |
|
| | self.queued_events.push_back(ToSwarm::GenerateEvent( |
| | Event::RoutingUpdated { |
| | peer: *peer, |
| | is_new_peer: true, |
| | addresses, |
| | old_peer: None, |
| | bucket_range: self |
| | .kbuckets |
| | .bucket(&key) |
| | .map(|b| b.range()) |
| | .expect("Not kbucket::Entry::SelfEntry."), |
| | }, |
| | )); |
| | RoutingUpdate::Success |
| | } |
| | kbucket::InsertResult::Full => { |
| | tracing::debug!(%peer, "Bucket full. Peer not added to routing table"); |
| | RoutingUpdate::Failed |
| | } |
| | kbucket::InsertResult::Pending { disconnected } => { |
| | self.queued_events.push_back(ToSwarm::Dial { |
| | opts: DialOpts::peer_id(disconnected.into_preimage()).build(), |
| | }); |
| | RoutingUpdate::Pending |
| | } |
| | } |
| | } |
| | None => RoutingUpdate::Failed, |
| | } |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | pub fn remove_address( |
| | &mut self, |
| | peer: &PeerId, |
| | address: &Multiaddr, |
| | ) -> Option<kbucket::EntryView<kbucket::Key<PeerId>, Addresses>> { |
| | let address = &address.to_owned().with_p2p(*peer).ok()?; |
| | let key = kbucket::Key::from(*peer); |
| | match self.kbuckets.entry(&key)? { |
| | kbucket::Entry::Present(mut entry, _) => { |
| | if entry.value().remove(address).is_err() { |
| | Some(entry.remove()) |
| | } else { |
| | None |
| | } |
| | } |
| | kbucket::Entry::Pending(mut entry, _) => { |
| | if entry.value().remove(address).is_err() { |
| | Some(entry.remove()) |
| | } else { |
| | None |
| | } |
| | } |
| | kbucket::Entry::Absent(..) => None, |
| | } |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | pub fn remove_peer( |
| | &mut self, |
| | peer: &PeerId, |
| | ) -> Option<kbucket::EntryView<kbucket::Key<PeerId>, Addresses>> { |
| | let key = kbucket::Key::from(*peer); |
| | match self.kbuckets.entry(&key)? { |
| | kbucket::Entry::Present(entry, _) => Some(entry.remove()), |
| | kbucket::Entry::Pending(entry, _) => Some(entry.remove()), |
| | kbucket::Entry::Absent(..) => None, |
| | } |
| | } |
| |
|
| | |
| | pub fn kbuckets( |
| | &mut self, |
| | ) -> impl Iterator<Item = kbucket::KBucketRef<'_, kbucket::Key<PeerId>, Addresses>> { |
| | self.kbuckets.iter().filter(|b| !b.is_empty()) |
| | } |
| |
|
| | |
| | |
| | |
| | pub fn kbucket<K>( |
| | &mut self, |
| | key: K, |
| | ) -> Option<kbucket::KBucketRef<'_, kbucket::Key<PeerId>, Addresses>> |
| | where |
| | K: Into<kbucket::Key<K>> + Clone, |
| | { |
| | self.kbuckets.bucket(&key.into()) |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | pub fn get_closest_peers<K>(&mut self, key: K) -> QueryId |
| | where |
| | K: Into<kbucket::Key<K>> + Into<Vec<u8>> + Clone, |
| | { |
| | let target: kbucket::Key<K> = key.clone().into(); |
| | let key: Vec<u8> = key.into(); |
| | let info = QueryInfo::GetClosestPeers { |
| | key, |
| | step: ProgressStep::first(), |
| | }; |
| | let peer_keys: Vec<kbucket::Key<PeerId>> = self.kbuckets.closest_keys(&target).collect(); |
| | self.queries.add_iter_closest(target, peer_keys, info) |
| | } |
| |
|
| | |
| | pub fn get_closest_local_peers<'a, K: Clone>( |
| | &'a mut self, |
| | key: &'a kbucket::Key<K>, |
| | ) -> impl Iterator<Item = kbucket::Key<PeerId>> + 'a { |
| | self.kbuckets.closest_keys(key) |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | pub fn get_record(&mut self, key: record::Key) -> QueryId { |
| | let record = if let Some(record) = self.store.get(&key) { |
| | if record.is_expired(Instant::now()) { |
| | self.store.remove(&key); |
| | None |
| | } else { |
| | Some(PeerRecord { |
| | peer: None, |
| | record: record.into_owned(), |
| | }) |
| | } |
| | } else { |
| | None |
| | }; |
| |
|
| | let step = ProgressStep::first(); |
| |
|
| | let target = kbucket::Key::new(key.clone()); |
| | let info = if record.is_some() { |
| | QueryInfo::GetRecord { |
| | key, |
| | step: step.next(), |
| | found_a_record: true, |
| | cache_candidates: BTreeMap::new(), |
| | } |
| | } else { |
| | QueryInfo::GetRecord { |
| | key, |
| | step: step.clone(), |
| | found_a_record: false, |
| | cache_candidates: BTreeMap::new(), |
| | } |
| | }; |
| | let peers = self.kbuckets.closest_keys(&target); |
| | let id = self.queries.add_iter_closest(target.clone(), peers, info); |
| |
|
| | |
| | let stats = QueryStats::empty(); |
| |
|
| | if let Some(record) = record { |
| | self.queued_events |
| | .push_back(ToSwarm::GenerateEvent(Event::OutboundQueryProgressed { |
| | id, |
| | result: QueryResult::GetRecord(Ok(GetRecordOk::FoundRecord(record))), |
| | step, |
| | stats, |
| | })); |
| | } |
| |
|
| | id |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | pub fn put_record( |
| | &mut self, |
| | mut record: Record, |
| | quorum: Quorum, |
| | ) -> Result<QueryId, store::Error> { |
| | record.publisher = Some(*self.kbuckets.local_key().preimage()); |
| | self.store.put(record.clone())?; |
| | record.expires = record |
| | .expires |
| | .or_else(|| self.record_ttl.map(|ttl| Instant::now() + ttl)); |
| | let quorum = quorum.eval(self.queries.config().replication_factor); |
| | let target = kbucket::Key::new(record.key.clone()); |
| | let peers = self.kbuckets.closest_keys(&target); |
| | let context = PutRecordContext::Publish; |
| | let info = QueryInfo::PutRecord { |
| | context, |
| | record, |
| | quorum, |
| | phase: PutRecordPhase::GetClosestPeers, |
| | }; |
| | Ok(self.queries.add_iter_closest(target.clone(), peers, info)) |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | pub fn put_record_to<I>(&mut self, mut record: Record, peers: I, quorum: Quorum) -> QueryId |
| | where |
| | I: ExactSizeIterator<Item = PeerId>, |
| | { |
| | let quorum = if peers.len() > 0 { |
| | quorum.eval(NonZeroUsize::new(peers.len()).expect("> 0")) |
| | } else { |
| | |
| | |
| | |
| | NonZeroUsize::new(1).expect("1 > 0") |
| | }; |
| | record.expires = record |
| | .expires |
| | .or_else(|| self.record_ttl.map(|ttl| Instant::now() + ttl)); |
| | let context = PutRecordContext::Custom; |
| | let info = QueryInfo::PutRecord { |
| | context, |
| | record, |
| | quorum, |
| | phase: PutRecordPhase::PutRecord { |
| | success: Vec::new(), |
| | get_closest_peers_stats: QueryStats::empty(), |
| | }, |
| | }; |
| | self.queries.add_fixed(peers, info) |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | pub fn remove_record(&mut self, key: &record::Key) { |
| | if let Some(r) = self.store.get(key) { |
| | if r.publisher.as_ref() == Some(self.kbuckets.local_key().preimage()) { |
| | self.store.remove(key) |
| | } |
| | } |
| | } |
| |
|
| | |
| | pub fn store_mut(&mut self) -> &mut TStore { |
| | &mut self.store |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | pub fn bootstrap(&mut self) -> Result<QueryId, NoKnownPeers> { |
| | let local_key = *self.kbuckets.local_key(); |
| | let info = QueryInfo::Bootstrap { |
| | peer: *local_key.preimage(), |
| | remaining: None, |
| | step: ProgressStep::first(), |
| | }; |
| | let peers = self.kbuckets.closest_keys(&local_key).collect::<Vec<_>>(); |
| | if peers.is_empty() { |
| | self.bootstrap_status.reset_timers(); |
| | Err(NoKnownPeers()) |
| | } else { |
| | self.bootstrap_status.on_started(); |
| | Ok(self.queries.add_iter_closest(local_key, peers, info)) |
| | } |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | pub fn start_providing(&mut self, key: record::Key) -> Result<QueryId, store::Error> { |
| | |
| | |
| | |
| | let local_addrs = Vec::new(); |
| | let record = ProviderRecord::new( |
| | key.clone(), |
| | *self.kbuckets.local_key().preimage(), |
| | local_addrs, |
| | ); |
| | self.store.add_provider(record)?; |
| | let target = kbucket::Key::new(key.clone()); |
| | let peers = self.kbuckets.closest_keys(&target); |
| | let context = AddProviderContext::Publish; |
| | let info = QueryInfo::AddProvider { |
| | context, |
| | key, |
| | phase: AddProviderPhase::GetClosestPeers, |
| | }; |
| | let id = self.queries.add_iter_closest(target.clone(), peers, info); |
| | Ok(id) |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | pub fn stop_providing(&mut self, key: &record::Key) { |
| | self.store |
| | .remove_provider(key, self.kbuckets.local_key().preimage()); |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | pub fn get_providers(&mut self, key: record::Key) -> QueryId { |
| | let providers: HashSet<_> = self |
| | .store |
| | .providers(&key) |
| | .into_iter() |
| | .filter(|p| !p.is_expired(Instant::now())) |
| | .map(|p| p.provider) |
| | .collect(); |
| |
|
| | let step = ProgressStep::first(); |
| |
|
| | let info = QueryInfo::GetProviders { |
| | key: key.clone(), |
| | providers_found: providers.len(), |
| | step: if providers.is_empty() { |
| | step.clone() |
| | } else { |
| | step.next() |
| | }, |
| | }; |
| |
|
| | let target = kbucket::Key::new(key.clone()); |
| | let peers = self.kbuckets.closest_keys(&target); |
| | let id = self.queries.add_iter_closest(target.clone(), peers, info); |
| |
|
| | |
| | let stats = QueryStats::empty(); |
| |
|
| | if !providers.is_empty() { |
| | self.queued_events |
| | .push_back(ToSwarm::GenerateEvent(Event::OutboundQueryProgressed { |
| | id, |
| | result: QueryResult::GetProviders(Ok(GetProvidersOk::FoundProviders { |
| | key, |
| | providers, |
| | })), |
| | step, |
| | stats, |
| | })); |
| | } |
| | id |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | pub fn set_mode(&mut self, mode: Option<Mode>) { |
| | match mode { |
| | Some(mode) => { |
| | self.mode = mode; |
| | self.auto_mode = false; |
| | self.reconfigure_mode(); |
| | } |
| | None => { |
| | self.auto_mode = true; |
| | self.determine_mode_from_external_addresses(); |
| | } |
| | } |
| |
|
| | if let Some(waker) = self.no_events_waker.take() { |
| | waker.wake(); |
| | } |
| | } |
| |
|
| | fn reconfigure_mode(&mut self) { |
| | if self.connections.is_empty() { |
| | return; |
| | } |
| |
|
| | let num_connections = self.connections.len(); |
| |
|
| | tracing::debug!( |
| | "Re-configuring {} established connection{}", |
| | num_connections, |
| | if num_connections > 1 { "s" } else { "" } |
| | ); |
| |
|
| | self.queued_events |
| | .extend( |
| | self.connections |
| | .iter() |
| | .map(|(conn_id, peer_id)| ToSwarm::NotifyHandler { |
| | peer_id: *peer_id, |
| | handler: NotifyHandler::One(*conn_id), |
| | event: HandlerIn::ReconfigureMode { |
| | new_mode: self.mode, |
| | }, |
| | }), |
| | ); |
| | } |
| |
|
| | fn determine_mode_from_external_addresses(&mut self) { |
| | let old_mode = self.mode; |
| |
|
| | self.mode = match (self.external_addresses.as_slice(), self.mode) { |
| | ([], Mode::Server) => { |
| | tracing::debug!("Switching to client-mode because we no longer have any confirmed external addresses"); |
| |
|
| | Mode::Client |
| | } |
| | ([], Mode::Client) => { |
| | |
| |
|
| | Mode::Client |
| | } |
| | (confirmed_external_addresses, Mode::Client) => { |
| | if tracing::enabled!(Level::DEBUG) { |
| | let confirmed_external_addresses = |
| | to_comma_separated_list(confirmed_external_addresses); |
| |
|
| | tracing::debug!("Switching to server-mode assuming that one of [{confirmed_external_addresses}] is externally reachable"); |
| | } |
| |
|
| | Mode::Server |
| | } |
| | (confirmed_external_addresses, Mode::Server) => { |
| | debug_assert!( |
| | !confirmed_external_addresses.is_empty(), |
| | "Previous match arm handled empty list" |
| | ); |
| |
|
| | |
| |
|
| | Mode::Server |
| | } |
| | }; |
| |
|
| | self.reconfigure_mode(); |
| |
|
| | if old_mode != self.mode { |
| | self.queued_events |
| | .push_back(ToSwarm::GenerateEvent(Event::ModeChanged { |
| | new_mode: self.mode, |
| | })); |
| | } |
| | } |
| |
|
| | |
| | fn discovered<'a, I>(&'a mut self, query_id: &QueryId, source: &PeerId, peers: I) |
| | where |
| | I: Iterator<Item = &'a KadPeer> + Clone, |
| | { |
| | let local_id = self.kbuckets.local_key().preimage(); |
| | let others_iter = peers.filter(|p| &p.node_id != local_id); |
| | if let Some(query) = self.queries.get_mut(query_id) { |
| | tracing::trace!(peer=%source, query=?query_id, "Request to peer in query succeeded"); |
| | for peer in others_iter.clone() { |
| | tracing::trace!( |
| | ?peer, |
| | %source, |
| | query=?query_id, |
| | "Peer reported by source in query" |
| | ); |
| | let addrs = peer.multiaddrs.iter().cloned().collect(); |
| | query.peers.addresses.insert(peer.node_id, addrs); |
| | } |
| | query.on_success(source, others_iter.cloned().map(|kp| kp.node_id)) |
| | } |
| | } |
| |
|
| | |
| | |
| | |
| | fn find_closest<T: Clone>( |
| | &mut self, |
| | target: &kbucket::Key<T>, |
| | source: &PeerId, |
| | ) -> Vec<KadPeer> { |
| | self.kbuckets |
| | .closest(target) |
| | .filter(|e| e.node.key.preimage() != source) |
| | .take(self.queries.config().replication_factor.get()) |
| | .map(KadPeer::from) |
| | .collect() |
| | } |
| |
|
| | |
| | fn provider_peers(&mut self, key: &record::Key, source: &PeerId) -> Vec<KadPeer> { |
| | let kbuckets = &mut self.kbuckets; |
| | let connected = &mut self.connected_peers; |
| | let listen_addresses = &self.listen_addresses; |
| | let external_addresses = &self.external_addresses; |
| |
|
| | self.store |
| | .providers(key) |
| | .into_iter() |
| | .filter_map(move |p| { |
| | if &p.provider != source { |
| | let node_id = p.provider; |
| | let multiaddrs = p.addresses; |
| | let connection_ty = if connected.contains(&node_id) { |
| | ConnectionType::Connected |
| | } else { |
| | ConnectionType::NotConnected |
| | }; |
| | if multiaddrs.is_empty() { |
| | |
| | |
| | |
| | |
| | |
| | |
| | if &node_id == kbuckets.local_key().preimage() { |
| | Some( |
| | listen_addresses |
| | .iter() |
| | .chain(external_addresses.iter()) |
| | .cloned() |
| | .collect::<Vec<_>>(), |
| | ) |
| | } else { |
| | let key = kbucket::Key::from(node_id); |
| | kbuckets |
| | .entry(&key) |
| | .as_mut() |
| | .and_then(|e| e.view()) |
| | .map(|e| e.node.value.clone().into_vec()) |
| | } |
| | } else { |
| | Some(multiaddrs) |
| | } |
| | .map(|multiaddrs| KadPeer { |
| | node_id, |
| | multiaddrs, |
| | connection_ty, |
| | }) |
| | } else { |
| | None |
| | } |
| | }) |
| | .take(self.queries.config().replication_factor.get()) |
| | .collect() |
| | } |
| |
|
| | |
| | fn start_add_provider(&mut self, key: record::Key, context: AddProviderContext) { |
| | let info = QueryInfo::AddProvider { |
| | context, |
| | key: key.clone(), |
| | phase: AddProviderPhase::GetClosestPeers, |
| | }; |
| | let target = kbucket::Key::new(key); |
| | let peers = self.kbuckets.closest_keys(&target); |
| | self.queries.add_iter_closest(target.clone(), peers, info); |
| | } |
| |
|
| | |
| | fn start_put_record(&mut self, record: Record, quorum: Quorum, context: PutRecordContext) { |
| | let quorum = quorum.eval(self.queries.config().replication_factor); |
| | let target = kbucket::Key::new(record.key.clone()); |
| | let peers = self.kbuckets.closest_keys(&target); |
| | let info = QueryInfo::PutRecord { |
| | record, |
| | quorum, |
| | context, |
| | phase: PutRecordPhase::GetClosestPeers, |
| | }; |
| | self.queries.add_iter_closest(target.clone(), peers, info); |
| | } |
| |
|
| | |
| | fn connection_updated( |
| | &mut self, |
| | peer: PeerId, |
| | address: Option<Multiaddr>, |
| | new_status: NodeStatus, |
| | ) { |
| | let key = kbucket::Key::from(peer); |
| | match self.kbuckets.entry(&key) { |
| | Some(kbucket::Entry::Present(mut entry, old_status)) => { |
| | if old_status != new_status { |
| | entry.update(new_status) |
| | } |
| | if let Some(address) = address { |
| | if entry.value().insert(address) { |
| | self.queued_events.push_back(ToSwarm::GenerateEvent( |
| | Event::RoutingUpdated { |
| | peer, |
| | is_new_peer: false, |
| | addresses: entry.value().clone(), |
| | old_peer: None, |
| | bucket_range: self |
| | .kbuckets |
| | .bucket(&key) |
| | .map(|b| b.range()) |
| | .expect("Not kbucket::Entry::SelfEntry."), |
| | }, |
| | )) |
| | } |
| | } |
| | } |
| |
|
| | Some(kbucket::Entry::Pending(mut entry, old_status)) => { |
| | if let Some(address) = address { |
| | entry.value().insert(address); |
| | } |
| | if old_status != new_status { |
| | entry.update(new_status); |
| | } |
| | } |
| |
|
| | Some(kbucket::Entry::Absent(entry)) => { |
| | |
| | if new_status != NodeStatus::Connected { |
| | return; |
| | } |
| | match (address, self.kbucket_inserts) { |
| | (None, _) => { |
| | self.queued_events |
| | .push_back(ToSwarm::GenerateEvent(Event::UnroutablePeer { peer })); |
| | } |
| | (Some(a), BucketInserts::Manual) => { |
| | self.queued_events |
| | .push_back(ToSwarm::GenerateEvent(Event::RoutablePeer { |
| | peer, |
| | address: a, |
| | })); |
| | } |
| | (Some(a), BucketInserts::OnConnected) => { |
| | let addresses = Addresses::new(a); |
| | match entry.insert(addresses.clone(), new_status) { |
| | kbucket::InsertResult::Inserted => { |
| | self.bootstrap_on_low_peers(); |
| |
|
| | let event = Event::RoutingUpdated { |
| | peer, |
| | is_new_peer: true, |
| | addresses, |
| | old_peer: None, |
| | bucket_range: self |
| | .kbuckets |
| | .bucket(&key) |
| | .map(|b| b.range()) |
| | .expect("Not kbucket::Entry::SelfEntry."), |
| | }; |
| | self.queued_events.push_back(ToSwarm::GenerateEvent(event)); |
| | } |
| | kbucket::InsertResult::Full => { |
| | tracing::debug!( |
| | %peer, |
| | "Bucket full. Peer not added to routing table" |
| | ); |
| | let address = addresses.first().clone(); |
| | self.queued_events.push_back(ToSwarm::GenerateEvent( |
| | Event::RoutablePeer { peer, address }, |
| | )); |
| | } |
| | kbucket::InsertResult::Pending { disconnected } => { |
| | let address = addresses.first().clone(); |
| | self.queued_events.push_back(ToSwarm::GenerateEvent( |
| | Event::PendingRoutablePeer { peer, address }, |
| | )); |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | if !self.connected_peers.contains(disconnected.preimage()) { |
| | self.queued_events.push_back(ToSwarm::Dial { |
| | opts: DialOpts::peer_id(disconnected.into_preimage()) |
| | .build(), |
| | }) |
| | } |
| | } |
| | } |
| | } |
| | } |
| | } |
| | _ => {} |
| | } |
| | } |
| |
|
| | |
| | |
| | |
| | fn bootstrap_on_low_peers(&mut self) { |
| | if self |
| | .kbuckets() |
| | .map(|kbucket| kbucket.num_entries()) |
| | .sum::<usize>() |
| | < K_VALUE.get() |
| | { |
| | self.bootstrap_status.trigger(); |
| | } |
| | } |
| |
|
| | |
| | fn query_finished(&mut self, q: Query) -> Option<Event> { |
| | let query_id = q.id(); |
| | tracing::trace!(query=?query_id, "Query finished"); |
| | match q.info { |
| | QueryInfo::Bootstrap { |
| | peer, |
| | remaining, |
| | mut step, |
| | } => { |
| | let local_key = *self.kbuckets.local_key(); |
| | let mut remaining = remaining.unwrap_or_else(|| { |
| | debug_assert_eq!(&peer, local_key.preimage()); |
| | |
| | |
| | |
| | |
| | self.kbuckets |
| | .iter() |
| | .skip_while(|b| b.is_empty()) |
| | .skip(1) |
| | .map(|b| { |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | let mut target = kbucket::Key::from(PeerId::random()); |
| | for _ in 0..16 { |
| | let d = local_key.distance(&target); |
| | if b.contains(&d) { |
| | break; |
| | } |
| | target = kbucket::Key::from(PeerId::random()); |
| | } |
| | target |
| | }) |
| | .collect::<Vec<_>>() |
| | .into_iter() |
| | }); |
| |
|
| | let num_remaining = remaining.len() as u32; |
| |
|
| | if let Some(target) = remaining.next() { |
| | let info = QueryInfo::Bootstrap { |
| | peer: *target.preimage(), |
| | remaining: Some(remaining), |
| | step: step.next(), |
| | }; |
| | let peers = self.kbuckets.closest_keys(&target); |
| | self.queries |
| | .continue_iter_closest(query_id, target, peers, info); |
| | } else { |
| | step.last = true; |
| | self.bootstrap_status.on_finish(); |
| | }; |
| |
|
| | Some(Event::OutboundQueryProgressed { |
| | id: query_id, |
| | stats: q.stats, |
| | result: QueryResult::Bootstrap(Ok(BootstrapOk { |
| | peer, |
| | num_remaining, |
| | })), |
| | step, |
| | }) |
| | } |
| |
|
| | QueryInfo::GetClosestPeers { key, mut step } => { |
| | step.last = true; |
| |
|
| | Some(Event::OutboundQueryProgressed { |
| | id: query_id, |
| | stats: q.stats, |
| | result: QueryResult::GetClosestPeers(Ok(GetClosestPeersOk { |
| | key, |
| | peers: q.peers.into_peerinfos_iter().collect(), |
| | })), |
| | step, |
| | }) |
| | } |
| |
|
| | QueryInfo::GetProviders { mut step, .. } => { |
| | step.last = true; |
| |
|
| | Some(Event::OutboundQueryProgressed { |
| | id: query_id, |
| | stats: q.stats, |
| | result: QueryResult::GetProviders(Ok( |
| | GetProvidersOk::FinishedWithNoAdditionalRecord { |
| | closest_peers: q.peers.into_peerids_iter().collect(), |
| | }, |
| | )), |
| | step, |
| | }) |
| | } |
| |
|
| | QueryInfo::AddProvider { |
| | context, |
| | key, |
| | phase: AddProviderPhase::GetClosestPeers, |
| | } => { |
| | let provider_id = self.local_peer_id; |
| | let external_addresses = self.external_addresses.iter().cloned().collect(); |
| | let info = QueryInfo::AddProvider { |
| | context, |
| | key, |
| | phase: AddProviderPhase::AddProvider { |
| | provider_id, |
| | external_addresses, |
| | get_closest_peers_stats: q.stats, |
| | }, |
| | }; |
| | self.queries |
| | .continue_fixed(query_id, q.peers.into_peerids_iter(), info); |
| | None |
| | } |
| |
|
| | QueryInfo::AddProvider { |
| | context, |
| | key, |
| | phase: |
| | AddProviderPhase::AddProvider { |
| | get_closest_peers_stats, |
| | .. |
| | }, |
| | } => match context { |
| | AddProviderContext::Publish => Some(Event::OutboundQueryProgressed { |
| | id: query_id, |
| | stats: get_closest_peers_stats.merge(q.stats), |
| | result: QueryResult::StartProviding(Ok(AddProviderOk { key })), |
| | step: ProgressStep::first_and_last(), |
| | }), |
| | AddProviderContext::Republish => Some(Event::OutboundQueryProgressed { |
| | id: query_id, |
| | stats: get_closest_peers_stats.merge(q.stats), |
| | result: QueryResult::RepublishProvider(Ok(AddProviderOk { key })), |
| | step: ProgressStep::first_and_last(), |
| | }), |
| | }, |
| |
|
| | QueryInfo::GetRecord { |
| | key, |
| | mut step, |
| | found_a_record, |
| | cache_candidates, |
| | } => { |
| | step.last = true; |
| |
|
| | let results = if found_a_record { |
| | Ok(GetRecordOk::FinishedWithNoAdditionalRecord { cache_candidates }) |
| | } else { |
| | Err(GetRecordError::NotFound { |
| | key, |
| | closest_peers: q.peers.into_peerids_iter().collect(), |
| | }) |
| | }; |
| | Some(Event::OutboundQueryProgressed { |
| | id: query_id, |
| | stats: q.stats, |
| | result: QueryResult::GetRecord(results), |
| | step, |
| | }) |
| | } |
| |
|
| | QueryInfo::PutRecord { |
| | context, |
| | record, |
| | quorum, |
| | phase: PutRecordPhase::GetClosestPeers, |
| | } => { |
| | let info = QueryInfo::PutRecord { |
| | context, |
| | record, |
| | quorum, |
| | phase: PutRecordPhase::PutRecord { |
| | success: vec![], |
| | get_closest_peers_stats: q.stats, |
| | }, |
| | }; |
| | self.queries |
| | .continue_fixed(query_id, q.peers.into_peerids_iter(), info); |
| | None |
| | } |
| |
|
| | QueryInfo::PutRecord { |
| | context, |
| | record, |
| | quorum, |
| | phase: |
| | PutRecordPhase::PutRecord { |
| | success, |
| | get_closest_peers_stats, |
| | }, |
| | } => { |
| | let mk_result = |key: record::Key| { |
| | if success.len() >= quorum.get() { |
| | Ok(PutRecordOk { key }) |
| | } else { |
| | Err(PutRecordError::QuorumFailed { |
| | key, |
| | quorum, |
| | success, |
| | }) |
| | } |
| | }; |
| | match context { |
| | PutRecordContext::Publish | PutRecordContext::Custom => { |
| | Some(Event::OutboundQueryProgressed { |
| | id: query_id, |
| | stats: get_closest_peers_stats.merge(q.stats), |
| | result: QueryResult::PutRecord(mk_result(record.key)), |
| | step: ProgressStep::first_and_last(), |
| | }) |
| | } |
| | PutRecordContext::Republish => Some(Event::OutboundQueryProgressed { |
| | id: query_id, |
| | stats: get_closest_peers_stats.merge(q.stats), |
| | result: QueryResult::RepublishRecord(mk_result(record.key)), |
| | step: ProgressStep::first_and_last(), |
| | }), |
| | PutRecordContext::Replicate => { |
| | tracing::debug!(record=?record.key, "Record replicated"); |
| | None |
| | } |
| | } |
| | } |
| | } |
| | } |
| |
|
| | |
| | fn query_timeout(&mut self, query: Query) -> Option<Event> { |
| | let query_id = query.id(); |
| | tracing::trace!(query=?query_id, "Query timed out"); |
| | match query.info { |
| | QueryInfo::Bootstrap { |
| | peer, |
| | mut remaining, |
| | mut step, |
| | } => { |
| | let num_remaining = remaining.as_ref().map(|r| r.len().saturating_sub(1) as u32); |
| |
|
| | |
| | if let Some((target, remaining)) = |
| | remaining.take().and_then(|mut r| Some((r.next()?, r))) |
| | { |
| | let info = QueryInfo::Bootstrap { |
| | peer: target.into_preimage(), |
| | remaining: Some(remaining), |
| | step: step.next(), |
| | }; |
| | let peers = self.kbuckets.closest_keys(&target); |
| | self.queries |
| | .continue_iter_closest(query_id, target, peers, info); |
| | } else { |
| | step.last = true; |
| | self.bootstrap_status.on_finish(); |
| | } |
| |
|
| | Some(Event::OutboundQueryProgressed { |
| | id: query_id, |
| | stats: query.stats, |
| | result: QueryResult::Bootstrap(Err(BootstrapError::Timeout { |
| | peer, |
| | num_remaining, |
| | })), |
| | step, |
| | }) |
| | } |
| |
|
| | QueryInfo::AddProvider { context, key, .. } => Some(match context { |
| | AddProviderContext::Publish => Event::OutboundQueryProgressed { |
| | id: query_id, |
| | stats: query.stats, |
| | result: QueryResult::StartProviding(Err(AddProviderError::Timeout { key })), |
| | step: ProgressStep::first_and_last(), |
| | }, |
| | AddProviderContext::Republish => Event::OutboundQueryProgressed { |
| | id: query_id, |
| | stats: query.stats, |
| | result: QueryResult::RepublishProvider(Err(AddProviderError::Timeout { key })), |
| | step: ProgressStep::first_and_last(), |
| | }, |
| | }), |
| |
|
| | QueryInfo::GetClosestPeers { key, mut step } => { |
| | step.last = true; |
| | Some(Event::OutboundQueryProgressed { |
| | id: query_id, |
| | stats: query.stats, |
| | result: QueryResult::GetClosestPeers(Err(GetClosestPeersError::Timeout { |
| | key, |
| | peers: query.peers.into_peerinfos_iter().collect(), |
| | })), |
| | step, |
| | }) |
| | } |
| |
|
| | QueryInfo::PutRecord { |
| | record, |
| | quorum, |
| | context, |
| | phase, |
| | } => { |
| | let err = Err(PutRecordError::Timeout { |
| | key: record.key, |
| | quorum, |
| | success: match phase { |
| | PutRecordPhase::GetClosestPeers => vec![], |
| | PutRecordPhase::PutRecord { ref success, .. } => success.clone(), |
| | }, |
| | }); |
| | match context { |
| | PutRecordContext::Publish | PutRecordContext::Custom => { |
| | Some(Event::OutboundQueryProgressed { |
| | id: query_id, |
| | stats: query.stats, |
| | result: QueryResult::PutRecord(err), |
| | step: ProgressStep::first_and_last(), |
| | }) |
| | } |
| | PutRecordContext::Republish => Some(Event::OutboundQueryProgressed { |
| | id: query_id, |
| | stats: query.stats, |
| | result: QueryResult::RepublishRecord(err), |
| | step: ProgressStep::first_and_last(), |
| | }), |
| | PutRecordContext::Replicate => match phase { |
| | PutRecordPhase::GetClosestPeers => { |
| | tracing::warn!( |
| | "Locating closest peers for replication failed: {:?}", |
| | err |
| | ); |
| | None |
| | } |
| | PutRecordPhase::PutRecord { .. } => { |
| | tracing::debug!("Replicating record failed: {:?}", err); |
| | None |
| | } |
| | }, |
| | } |
| | } |
| |
|
| | QueryInfo::GetRecord { key, mut step, .. } => { |
| | step.last = true; |
| |
|
| | Some(Event::OutboundQueryProgressed { |
| | id: query_id, |
| | stats: query.stats, |
| | result: QueryResult::GetRecord(Err(GetRecordError::Timeout { key })), |
| | step, |
| | }) |
| | } |
| |
|
| | QueryInfo::GetProviders { key, mut step, .. } => { |
| | step.last = true; |
| |
|
| | Some(Event::OutboundQueryProgressed { |
| | id: query_id, |
| | stats: query.stats, |
| | result: QueryResult::GetProviders(Err(GetProvidersError::Timeout { |
| | key, |
| | closest_peers: query.peers.into_peerids_iter().collect(), |
| | })), |
| | step, |
| | }) |
| | } |
| | } |
| | } |
| |
|
| | |
| | fn record_received( |
| | &mut self, |
| | source: PeerId, |
| | connection: ConnectionId, |
| | request_id: RequestId, |
| | mut record: Record, |
| | ) { |
| | if record.publisher.as_ref() == Some(self.kbuckets.local_key().preimage()) { |
| | |
| | |
| | |
| | self.queued_events.push_back(ToSwarm::NotifyHandler { |
| | peer_id: source, |
| | handler: NotifyHandler::One(connection), |
| | event: HandlerIn::PutRecordRes { |
| | key: record.key, |
| | value: record.value, |
| | request_id, |
| | }, |
| | }); |
| | return; |
| | } |
| |
|
| | let now = Instant::now(); |
| |
|
| | |
| | |
| | |
| | |
| | let target = kbucket::Key::new(record.key.clone()); |
| | let num_between = self.kbuckets.count_nodes_between(&target); |
| | let k = self.queries.config().replication_factor.get(); |
| | let num_beyond_k = (usize::max(k, num_between) - k) as u32; |
| | let expiration = self |
| | .record_ttl |
| | .map(|ttl| now + exp_decrease(ttl, num_beyond_k)); |
| | |
| | |
| | record.expires = record.expires.or(expiration).min(expiration); |
| |
|
| | if let Some(job) = self.put_record_job.as_mut() { |
| | |
| | |
| | |
| | |
| | |
| | job.skip(record.key.clone()) |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | if !record.is_expired(now) { |
| | |
| | |
| | |
| | match self.record_filtering { |
| | StoreInserts::Unfiltered => match self.store.put(record.clone()) { |
| | Ok(()) => { |
| | tracing::debug!( |
| | record=?record.key, |
| | "Record stored: {} bytes", |
| | record.value.len() |
| | ); |
| | self.queued_events.push_back(ToSwarm::GenerateEvent( |
| | Event::InboundRequest { |
| | request: InboundRequest::PutRecord { |
| | source, |
| | connection, |
| | record: None, |
| | }, |
| | }, |
| | )); |
| | } |
| | Err(e) => { |
| | tracing::info!("Record not stored: {:?}", e); |
| | self.queued_events.push_back(ToSwarm::NotifyHandler { |
| | peer_id: source, |
| | handler: NotifyHandler::One(connection), |
| | event: HandlerIn::Reset(request_id), |
| | }); |
| |
|
| | return; |
| | } |
| | }, |
| | StoreInserts::FilterBoth => { |
| | self.queued_events |
| | .push_back(ToSwarm::GenerateEvent(Event::InboundRequest { |
| | request: InboundRequest::PutRecord { |
| | source, |
| | connection, |
| | record: Some(record.clone()), |
| | }, |
| | })); |
| | } |
| | } |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | self.queued_events.push_back(ToSwarm::NotifyHandler { |
| | peer_id: source, |
| | handler: NotifyHandler::One(connection), |
| | event: HandlerIn::PutRecordRes { |
| | key: record.key, |
| | value: record.value, |
| | request_id, |
| | }, |
| | }) |
| | } |
| |
|
| | |
| | fn provider_received(&mut self, key: record::Key, provider: KadPeer) { |
| | if &provider.node_id != self.kbuckets.local_key().preimage() { |
| | let record = ProviderRecord { |
| | key, |
| | provider: provider.node_id, |
| | expires: self.provider_record_ttl.map(|ttl| Instant::now() + ttl), |
| | addresses: provider.multiaddrs, |
| | }; |
| | match self.record_filtering { |
| | StoreInserts::Unfiltered => { |
| | if let Err(e) = self.store.add_provider(record) { |
| | tracing::info!("Provider record not stored: {:?}", e); |
| | return; |
| | } |
| |
|
| | self.queued_events |
| | .push_back(ToSwarm::GenerateEvent(Event::InboundRequest { |
| | request: InboundRequest::AddProvider { record: None }, |
| | })); |
| | } |
| | StoreInserts::FilterBoth => { |
| | self.queued_events |
| | .push_back(ToSwarm::GenerateEvent(Event::InboundRequest { |
| | request: InboundRequest::AddProvider { |
| | record: Some(record), |
| | }, |
| | })); |
| | } |
| | } |
| | } |
| | } |
| |
|
| | fn address_failed(&mut self, peer_id: PeerId, address: &Multiaddr) { |
| | let key = kbucket::Key::from(peer_id); |
| |
|
| | if let Some(addrs) = self.kbuckets.entry(&key).as_mut().and_then(|e| e.value()) { |
| | |
| | |
| | |
| | |
| | |
| | if addrs.remove(address).is_ok() { |
| | tracing::debug!( |
| | peer=%peer_id, |
| | %address, |
| | "Address removed from peer due to error." |
| | ); |
| | } else { |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | tracing::debug!( |
| | peer=%peer_id, |
| | %address, |
| | "Last remaining address of peer is unreachable." |
| | ); |
| | } |
| | } |
| |
|
| | for query in self.queries.iter_mut() { |
| | if let Some(addrs) = query.peers.addresses.get_mut(&peer_id) { |
| | addrs.retain(|a| a != address); |
| | } |
| | } |
| | } |
| |
|
| | fn on_connection_established( |
| | &mut self, |
| | ConnectionEstablished { |
| | peer_id, |
| | failed_addresses, |
| | other_established, |
| | .. |
| | }: ConnectionEstablished, |
| | ) { |
| | for addr in failed_addresses { |
| | self.address_failed(peer_id, addr); |
| | } |
| |
|
| | |
| | if other_established == 0 { |
| | self.connected_peers.insert(peer_id); |
| | } |
| | } |
| |
|
| | fn on_address_change( |
| | &mut self, |
| | AddressChange { |
| | peer_id: peer, |
| | old, |
| | new, |
| | .. |
| | }: AddressChange, |
| | ) { |
| | let (old, new) = (old.get_remote_address(), new.get_remote_address()); |
| |
|
| | |
| | if let Some(addrs) = self |
| | .kbuckets |
| | .entry(&kbucket::Key::from(peer)) |
| | .as_mut() |
| | .and_then(|e| e.value()) |
| | { |
| | if addrs.replace(old, new) { |
| | tracing::debug!( |
| | %peer, |
| | old_address=%old, |
| | new_address=%new, |
| | "Old address replaced with new address for peer." |
| | ); |
| | } else { |
| | tracing::debug!( |
| | %peer, |
| | old_address=%old, |
| | new_address=%new, |
| | "Old address not replaced with new address for peer as old address wasn't present.", |
| | ); |
| | } |
| | } else { |
| | tracing::debug!( |
| | %peer, |
| | old_address=%old, |
| | new_address=%new, |
| | "Old address not replaced with new address for peer as peer is not present in the \ |
| | routing table." |
| | ); |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | for query in self.queries.iter_mut() { |
| | if let Some(addrs) = query.peers.addresses.get_mut(&peer) { |
| | for addr in addrs.iter_mut() { |
| | if addr == old { |
| | *addr = new.clone(); |
| | } |
| | } |
| | } |
| | } |
| | } |
| |
|
| | fn on_dial_failure(&mut self, DialFailure { peer_id, error, .. }: DialFailure) { |
| | let Some(peer_id) = peer_id else { return }; |
| |
|
| | match error { |
| | DialError::LocalPeerId { .. } |
| | | DialError::WrongPeerId { .. } |
| | | DialError::Aborted |
| | | DialError::Denied { .. } |
| | | DialError::Transport(_) |
| | | DialError::NoAddresses => { |
| | if let DialError::Transport(addresses) = error { |
| | for (addr, _) in addresses { |
| | self.address_failed(peer_id, addr) |
| | } |
| | } |
| |
|
| | for query in self.queries.iter_mut() { |
| | query.on_failure(&peer_id); |
| | } |
| | } |
| | DialError::DialPeerConditionFalse( |
| | dial_opts::PeerCondition::Disconnected |
| | | dial_opts::PeerCondition::NotDialing |
| | | dial_opts::PeerCondition::DisconnectedAndNotDialing, |
| | ) => { |
| | |
| | |
| | } |
| | DialError::DialPeerConditionFalse(dial_opts::PeerCondition::Always) => { |
| | unreachable!("DialPeerCondition::Always can not trigger DialPeerConditionFalse."); |
| | } |
| | } |
| | } |
| |
|
| | fn on_connection_closed( |
| | &mut self, |
| | ConnectionClosed { |
| | peer_id, |
| | remaining_established, |
| | connection_id, |
| | .. |
| | }: ConnectionClosed, |
| | ) { |
| | self.connections.remove(&connection_id); |
| |
|
| | if remaining_established == 0 { |
| | for query in self.queries.iter_mut() { |
| | query.on_failure(&peer_id); |
| | } |
| | self.connection_updated(peer_id, None, NodeStatus::Disconnected); |
| | self.connected_peers.remove(&peer_id); |
| | } |
| | } |
| |
|
| | |
| | fn preload_new_handler( |
| | &mut self, |
| | handler: &mut Handler, |
| | connection_id: ConnectionId, |
| | peer: PeerId, |
| | ) { |
| | self.connections.insert(connection_id, peer); |
| | |
| | |
| | for (_peer_id, event) in self.queries.iter_mut().filter_map(|q| { |
| | q.pending_rpcs |
| | .iter() |
| | .position(|(p, _)| p == &peer) |
| | .map(|p| q.pending_rpcs.remove(p)) |
| | }) { |
| | handler.on_behaviour_event(event) |
| | } |
| | } |
| | } |
| |
|
| | |
| | fn exp_decrease(ttl: Duration, exp: u32) -> Duration { |
| | Duration::from_secs(ttl.as_secs().checked_shr(exp).unwrap_or(0)) |
| | } |
| |
|
| | impl<TStore> NetworkBehaviour for Behaviour<TStore> |
| | where |
| | TStore: RecordStore + Send + 'static, |
| | { |
| | type ConnectionHandler = Handler; |
| | type ToSwarm = Event; |
| |
|
| | fn handle_established_inbound_connection( |
| | &mut self, |
| | connection_id: ConnectionId, |
| | peer: PeerId, |
| | local_addr: &Multiaddr, |
| | remote_addr: &Multiaddr, |
| | ) -> Result<THandler<Self>, ConnectionDenied> { |
| | let connected_point = ConnectedPoint::Listener { |
| | local_addr: local_addr.clone(), |
| | send_back_addr: remote_addr.clone(), |
| | }; |
| |
|
| | let mut handler = Handler::new( |
| | self.protocol_config.clone(), |
| | connected_point, |
| | peer, |
| | self.mode, |
| | ); |
| | self.preload_new_handler(&mut handler, connection_id, peer); |
| |
|
| | Ok(handler) |
| | } |
| |
|
| | fn handle_established_outbound_connection( |
| | &mut self, |
| | connection_id: ConnectionId, |
| | peer: PeerId, |
| | addr: &Multiaddr, |
| | role_override: Endpoint, |
| | port_use: PortUse, |
| | ) -> Result<THandler<Self>, ConnectionDenied> { |
| | let connected_point = ConnectedPoint::Dialer { |
| | address: addr.clone(), |
| | role_override, |
| | port_use, |
| | }; |
| |
|
| | let mut handler = Handler::new( |
| | self.protocol_config.clone(), |
| | connected_point, |
| | peer, |
| | self.mode, |
| | ); |
| | self.preload_new_handler(&mut handler, connection_id, peer); |
| |
|
| | Ok(handler) |
| | } |
| |
|
| | fn handle_pending_outbound_connection( |
| | &mut self, |
| | _connection_id: ConnectionId, |
| | maybe_peer: Option<PeerId>, |
| | _addresses: &[Multiaddr], |
| | _effective_role: Endpoint, |
| | ) -> Result<Vec<Multiaddr>, ConnectionDenied> { |
| | let peer_id = match maybe_peer { |
| | None => return Ok(vec![]), |
| | Some(peer) => peer, |
| | }; |
| |
|
| | |
| | |
| | let key = kbucket::Key::from(peer_id); |
| | let mut peer_addrs = |
| | if let Some(kbucket::Entry::Present(mut entry, _)) = self.kbuckets.entry(&key) { |
| | let addrs = entry.value().iter().cloned().collect::<Vec<_>>(); |
| | debug_assert!(!addrs.is_empty(), "Empty peer addresses in routing table."); |
| | addrs |
| | } else { |
| | Vec::new() |
| | }; |
| |
|
| | |
| | for query in self.queries.iter() { |
| | if let Some(addrs) = query.peers.addresses.get(&peer_id) { |
| | peer_addrs.extend(addrs.iter().cloned()) |
| | } |
| | } |
| |
|
| | Ok(peer_addrs) |
| | } |
| |
|
| | fn on_connection_handler_event( |
| | &mut self, |
| | source: PeerId, |
| | connection: ConnectionId, |
| | event: THandlerOutEvent<Self>, |
| | ) { |
| | match event { |
| | HandlerEvent::ProtocolConfirmed { endpoint } => { |
| | debug_assert!(self.connected_peers.contains(&source)); |
| | |
| | |
| | |
| | |
| | let address = match endpoint { |
| | ConnectedPoint::Dialer { address, .. } => Some(address), |
| | ConnectedPoint::Listener { .. } => None, |
| | }; |
| |
|
| | self.connection_updated(source, address, NodeStatus::Connected); |
| | } |
| |
|
| | HandlerEvent::ProtocolNotSupported { endpoint } => { |
| | let address = match endpoint { |
| | ConnectedPoint::Dialer { address, .. } => Some(address), |
| | ConnectedPoint::Listener { .. } => None, |
| | }; |
| | self.connection_updated(source, address, NodeStatus::Disconnected); |
| | } |
| |
|
| | HandlerEvent::FindNodeReq { key, request_id } => { |
| | let closer_peers = self.find_closest(&kbucket::Key::new(key), &source); |
| |
|
| | self.queued_events |
| | .push_back(ToSwarm::GenerateEvent(Event::InboundRequest { |
| | request: InboundRequest::FindNode { |
| | num_closer_peers: closer_peers.len(), |
| | }, |
| | })); |
| |
|
| | self.queued_events.push_back(ToSwarm::NotifyHandler { |
| | peer_id: source, |
| | handler: NotifyHandler::One(connection), |
| | event: HandlerIn::FindNodeRes { |
| | closer_peers, |
| | request_id, |
| | }, |
| | }); |
| | } |
| |
|
| | HandlerEvent::FindNodeRes { |
| | closer_peers, |
| | query_id, |
| | } => { |
| | self.discovered(&query_id, &source, closer_peers.iter()); |
| | } |
| |
|
| | HandlerEvent::GetProvidersReq { key, request_id } => { |
| | let provider_peers = self.provider_peers(&key, &source); |
| | let closer_peers = self.find_closest(&kbucket::Key::new(key), &source); |
| |
|
| | self.queued_events |
| | .push_back(ToSwarm::GenerateEvent(Event::InboundRequest { |
| | request: InboundRequest::GetProvider { |
| | num_closer_peers: closer_peers.len(), |
| | num_provider_peers: provider_peers.len(), |
| | }, |
| | })); |
| |
|
| | self.queued_events.push_back(ToSwarm::NotifyHandler { |
| | peer_id: source, |
| | handler: NotifyHandler::One(connection), |
| | event: HandlerIn::GetProvidersRes { |
| | closer_peers, |
| | provider_peers, |
| | request_id, |
| | }, |
| | }); |
| | } |
| |
|
| | HandlerEvent::GetProvidersRes { |
| | closer_peers, |
| | provider_peers, |
| | query_id, |
| | } => { |
| | let peers = closer_peers.iter().chain(provider_peers.iter()); |
| | self.discovered(&query_id, &source, peers); |
| | if let Some(query) = self.queries.get_mut(&query_id) { |
| | let stats = query.stats().clone(); |
| | if let QueryInfo::GetProviders { |
| | ref key, |
| | ref mut providers_found, |
| | ref mut step, |
| | .. |
| | } = query.info |
| | { |
| | *providers_found += provider_peers.len(); |
| | let providers = provider_peers.iter().map(|p| p.node_id).collect(); |
| |
|
| | self.queued_events.push_back(ToSwarm::GenerateEvent( |
| | Event::OutboundQueryProgressed { |
| | id: query_id, |
| | result: QueryResult::GetProviders(Ok( |
| | GetProvidersOk::FoundProviders { |
| | key: key.clone(), |
| | providers, |
| | }, |
| | )), |
| | step: step.clone(), |
| | stats, |
| | }, |
| | )); |
| | *step = step.next(); |
| | } |
| | } |
| | } |
| | HandlerEvent::QueryError { query_id, error } => { |
| | tracing::debug!( |
| | peer=%source, |
| | query=?query_id, |
| | "Request to peer in query failed with {:?}", |
| | error |
| | ); |
| | |
| | |
| | if let Some(query) = self.queries.get_mut(&query_id) { |
| | query.on_failure(&source) |
| | } |
| | } |
| |
|
| | HandlerEvent::AddProvider { key, provider } => { |
| | |
| | if provider.node_id != source { |
| | return; |
| | } |
| |
|
| | self.provider_received(key, provider); |
| | } |
| |
|
| | HandlerEvent::GetRecord { key, request_id } => { |
| | |
| | let record = match self.store.get(&key) { |
| | Some(record) => { |
| | if record.is_expired(Instant::now()) { |
| | self.store.remove(&key); |
| | None |
| | } else { |
| | Some(record.into_owned()) |
| | } |
| | } |
| | None => None, |
| | }; |
| |
|
| | let closer_peers = self.find_closest(&kbucket::Key::new(key), &source); |
| |
|
| | self.queued_events |
| | .push_back(ToSwarm::GenerateEvent(Event::InboundRequest { |
| | request: InboundRequest::GetRecord { |
| | num_closer_peers: closer_peers.len(), |
| | present_locally: record.is_some(), |
| | }, |
| | })); |
| |
|
| | self.queued_events.push_back(ToSwarm::NotifyHandler { |
| | peer_id: source, |
| | handler: NotifyHandler::One(connection), |
| | event: HandlerIn::GetRecordRes { |
| | record, |
| | closer_peers, |
| | request_id, |
| | }, |
| | }); |
| | } |
| |
|
| | HandlerEvent::GetRecordRes { |
| | record, |
| | closer_peers, |
| | query_id, |
| | } => { |
| | if let Some(query) = self.queries.get_mut(&query_id) { |
| | let stats = query.stats().clone(); |
| | if let QueryInfo::GetRecord { |
| | key, |
| | ref mut step, |
| | ref mut found_a_record, |
| | cache_candidates, |
| | } = &mut query.info |
| | { |
| | if let Some(record) = record { |
| | *found_a_record = true; |
| | let record = PeerRecord { |
| | peer: Some(source), |
| | record, |
| | }; |
| |
|
| | self.queued_events.push_back(ToSwarm::GenerateEvent( |
| | Event::OutboundQueryProgressed { |
| | id: query_id, |
| | result: QueryResult::GetRecord(Ok(GetRecordOk::FoundRecord( |
| | record, |
| | ))), |
| | step: step.clone(), |
| | stats, |
| | }, |
| | )); |
| |
|
| | *step = step.next(); |
| | } else { |
| | tracing::trace!(record=?key, %source, "Record not found at source"); |
| | if let Caching::Enabled { max_peers } = self.caching { |
| | let source_key = kbucket::Key::from(source); |
| | let target_key = kbucket::Key::from(key.clone()); |
| | let distance = source_key.distance(&target_key); |
| | cache_candidates.insert(distance, source); |
| | if cache_candidates.len() > max_peers as usize { |
| | |
| | |
| | let last = |
| | *cache_candidates.keys().next_back().expect("len > 0"); |
| | cache_candidates.remove(&last); |
| | } |
| | } |
| | } |
| | } |
| | } |
| |
|
| | self.discovered(&query_id, &source, closer_peers.iter()); |
| | } |
| |
|
| | HandlerEvent::PutRecord { record, request_id } => { |
| | self.record_received(source, connection, request_id, record); |
| | } |
| |
|
| | HandlerEvent::PutRecordRes { query_id, .. } => { |
| | if let Some(query) = self.queries.get_mut(&query_id) { |
| | query.on_success(&source, vec![]); |
| | if let QueryInfo::PutRecord { |
| | phase: PutRecordPhase::PutRecord { success, .. }, |
| | quorum, |
| | .. |
| | } = &mut query.info |
| | { |
| | success.push(source); |
| |
|
| | let quorum = quorum.get(); |
| | if success.len() >= quorum { |
| | let peers = success.clone(); |
| | let finished = query.try_finish(peers.iter()); |
| | if !finished { |
| | tracing::debug!( |
| | peer=%source, |
| | query=?query_id, |
| | "PutRecord query reached quorum ({}/{}) with response \ |
| | from peer but could not yet finish.", |
| | peers.len(), |
| | quorum, |
| | ); |
| | } |
| | } |
| | } |
| | } |
| | } |
| | }; |
| | } |
| |
|
| | #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self, cx))] |
| | fn poll( |
| | &mut self, |
| | cx: &mut Context<'_>, |
| | ) -> Poll<ToSwarm<Self::ToSwarm, THandlerInEvent<Self>>> { |
| | let now = Instant::now(); |
| |
|
| | |
| | let mut jobs_query_capacity = JOBS_MAX_QUERIES.saturating_sub(self.queries.size()); |
| |
|
| | |
| | if let Some(mut job) = self.add_provider_job.take() { |
| | let num = usize::min(JOBS_MAX_NEW_QUERIES, jobs_query_capacity); |
| | for i in 0..num { |
| | if let Poll::Ready(r) = job.poll(cx, &mut self.store, now) { |
| | self.start_add_provider(r.key, AddProviderContext::Republish) |
| | } else { |
| | jobs_query_capacity -= i; |
| | break; |
| | } |
| | } |
| | self.add_provider_job = Some(job); |
| | } |
| |
|
| | |
| | if let Some(mut job) = self.put_record_job.take() { |
| | let num = usize::min(JOBS_MAX_NEW_QUERIES, jobs_query_capacity); |
| | for _ in 0..num { |
| | if let Poll::Ready(r) = job.poll(cx, &mut self.store, now) { |
| | let context = |
| | if r.publisher.as_ref() == Some(self.kbuckets.local_key().preimage()) { |
| | PutRecordContext::Republish |
| | } else { |
| | PutRecordContext::Replicate |
| | }; |
| | self.start_put_record(r, Quorum::All, context) |
| | } else { |
| | break; |
| | } |
| | } |
| | self.put_record_job = Some(job); |
| | } |
| |
|
| | |
| | if let Poll::Ready(()) = self.bootstrap_status.poll_next_bootstrap(cx) { |
| | if let Err(e) = self.bootstrap() { |
| | tracing::warn!("Failed to trigger bootstrap: {e}"); |
| | } |
| | } |
| |
|
| | loop { |
| | |
| | if let Some(event) = self.queued_events.pop_front() { |
| | return Poll::Ready(event); |
| | } |
| |
|
| | |
| | if let Some(entry) = self.kbuckets.take_applied_pending() { |
| | let kbucket::Node { key, value } = entry.inserted; |
| | let event = Event::RoutingUpdated { |
| | bucket_range: self |
| | .kbuckets |
| | .bucket(&key) |
| | .map(|b| b.range()) |
| | .expect("Self to never be applied from pending."), |
| | peer: key.into_preimage(), |
| | is_new_peer: true, |
| | addresses: value, |
| | old_peer: entry.evicted.map(|n| n.key.into_preimage()), |
| | }; |
| | return Poll::Ready(ToSwarm::GenerateEvent(event)); |
| | } |
| |
|
| | |
| | loop { |
| | match self.queries.poll(now) { |
| | QueryPoolState::Finished(q) => { |
| | if let Some(event) = self.query_finished(q) { |
| | return Poll::Ready(ToSwarm::GenerateEvent(event)); |
| | } |
| | } |
| | QueryPoolState::Timeout(q) => { |
| | if let Some(event) = self.query_timeout(q) { |
| | return Poll::Ready(ToSwarm::GenerateEvent(event)); |
| | } |
| | } |
| | QueryPoolState::Waiting(Some((query, peer_id))) => { |
| | let event = query.info.to_request(query.id()); |
| | |
| | |
| | |
| | |
| | |
| | if let QueryInfo::AddProvider { |
| | phase: AddProviderPhase::AddProvider { .. }, |
| | .. |
| | } = &query.info |
| | { |
| | query.on_success(&peer_id, vec![]) |
| | } |
| |
|
| | if self.connected_peers.contains(&peer_id) { |
| | self.queued_events.push_back(ToSwarm::NotifyHandler { |
| | peer_id, |
| | event, |
| | handler: NotifyHandler::Any, |
| | }); |
| | } else if &peer_id != self.kbuckets.local_key().preimage() { |
| | query.pending_rpcs.push((peer_id, event)); |
| | self.queued_events.push_back(ToSwarm::Dial { |
| | opts: DialOpts::peer_id(peer_id).build(), |
| | }); |
| | } |
| | } |
| | QueryPoolState::Waiting(None) | QueryPoolState::Idle => break, |
| | } |
| | } |
| |
|
| | |
| | |
| | |
| | if self.queued_events.is_empty() { |
| | self.no_events_waker = Some(cx.waker().clone()); |
| |
|
| | return Poll::Pending; |
| | } |
| | } |
| | } |
| |
|
| | fn on_swarm_event(&mut self, event: FromSwarm) { |
| | self.listen_addresses.on_swarm_event(&event); |
| | let external_addresses_changed = self.external_addresses.on_swarm_event(&event); |
| |
|
| | if self.auto_mode && external_addresses_changed { |
| | self.determine_mode_from_external_addresses(); |
| | } |
| |
|
| | match event { |
| | FromSwarm::ConnectionEstablished(connection_established) => { |
| | self.on_connection_established(connection_established) |
| | } |
| | FromSwarm::ConnectionClosed(connection_closed) => { |
| | self.on_connection_closed(connection_closed) |
| | } |
| | FromSwarm::DialFailure(dial_failure) => self.on_dial_failure(dial_failure), |
| | FromSwarm::AddressChange(address_change) => self.on_address_change(address_change), |
| | FromSwarm::NewListenAddr(_) if self.connected_peers.is_empty() => { |
| | |
| | |
| | |
| | self.bootstrap_status.trigger(); |
| | } |
| | _ => {} |
| | } |
| | } |
| | } |
| |
|
| | |
| | #[derive(Debug, Clone, PartialEq, Eq)] |
| | pub struct PeerInfo { |
| | pub peer_id: PeerId, |
| | pub addrs: Vec<Multiaddr>, |
| | } |
| |
|
| | |
| | |
| | |
| | #[derive(Debug, Copy, Clone, PartialEq, Eq)] |
| | pub enum Quorum { |
| | One, |
| | Majority, |
| | All, |
| | N(NonZeroUsize), |
| | } |
| |
|
| | impl Quorum { |
| | |
| | fn eval(&self, total: NonZeroUsize) -> NonZeroUsize { |
| | match self { |
| | Quorum::One => NonZeroUsize::new(1).expect("1 != 0"), |
| | Quorum::Majority => NonZeroUsize::new(total.get() / 2 + 1).expect("n + 1 != 0"), |
| | Quorum::All => total, |
| | Quorum::N(n) => NonZeroUsize::min(total, *n), |
| | } |
| | } |
| | } |
| |
|
| | |
| | |
| | #[derive(Debug, Clone, PartialEq, Eq)] |
| | pub struct PeerRecord { |
| | |
| | |
| | pub peer: Option<PeerId>, |
| | pub record: Record, |
| | } |
| |
|
| | |
| | |
| |
|
| | |
| | |
| | |
| | #[derive(Debug, Clone)] |
| | #[allow(clippy::large_enum_variant)] |
| | pub enum Event { |
| | |
| | |
| | |
| | |
| | |
| | InboundRequest { request: InboundRequest }, |
| |
|
| | |
| | OutboundQueryProgressed { |
| | |
| | id: QueryId, |
| | |
| | result: QueryResult, |
| | |
| | stats: QueryStats, |
| | |
| | step: ProgressStep, |
| | }, |
| |
|
| | |
| | |
| | RoutingUpdated { |
| | |
| | peer: PeerId, |
| | |
| | |
| | is_new_peer: bool, |
| | |
| | addresses: Addresses, |
| | |
| | |
| | bucket_range: (Distance, Distance), |
| | |
| | |
| | old_peer: Option<PeerId>, |
| | }, |
| |
|
| | |
| | |
| | |
| | |
| | UnroutablePeer { peer: PeerId }, |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | RoutablePeer { peer: PeerId, address: Multiaddr }, |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | PendingRoutablePeer { peer: PeerId, address: Multiaddr }, |
| |
|
| | |
| | |
| | |
| | |
| | ModeChanged { new_mode: Mode }, |
| | } |
| |
|
| | |
| | #[derive(Debug, Clone)] |
| | pub struct ProgressStep { |
| | |
| | pub count: NonZeroUsize, |
| | |
| | pub last: bool, |
| | } |
| |
|
| | impl ProgressStep { |
| | fn first() -> Self { |
| | Self { |
| | count: NonZeroUsize::new(1).expect("1 to be greater than 0."), |
| | last: false, |
| | } |
| | } |
| |
|
| | fn first_and_last() -> Self { |
| | let mut first = ProgressStep::first(); |
| | first.last = true; |
| | first |
| | } |
| |
|
| | fn next(&self) -> Self { |
| | assert!(!self.last); |
| | let count = NonZeroUsize::new(self.count.get() + 1).expect("Adding 1 not to result in 0."); |
| |
|
| | Self { count, last: false } |
| | } |
| | } |
| |
|
| | |
| | #[derive(Debug, Clone)] |
| | pub enum InboundRequest { |
| | |
| | FindNode { num_closer_peers: usize }, |
| | |
| | |
| | GetProvider { |
| | num_closer_peers: usize, |
| | num_provider_peers: usize, |
| | }, |
| | |
| | |
| | |
| | |
| | |
| | AddProvider { record: Option<ProviderRecord> }, |
| | |
| | GetRecord { |
| | num_closer_peers: usize, |
| | present_locally: bool, |
| | }, |
| | |
| | |
| | |
| | |
| | PutRecord { |
| | source: PeerId, |
| | connection: ConnectionId, |
| | record: Option<Record>, |
| | }, |
| | } |
| |
|
| | |
| | #[derive(Debug, Clone)] |
| | pub enum QueryResult { |
| | |
| | Bootstrap(BootstrapResult), |
| |
|
| | |
| | GetClosestPeers(GetClosestPeersResult), |
| |
|
| | |
| | GetProviders(GetProvidersResult), |
| |
|
| | |
| | StartProviding(AddProviderResult), |
| |
|
| | |
| | RepublishProvider(AddProviderResult), |
| |
|
| | |
| | GetRecord(GetRecordResult), |
| |
|
| | |
| | PutRecord(PutRecordResult), |
| |
|
| | |
| | RepublishRecord(PutRecordResult), |
| | } |
| |
|
| | |
| | pub type GetRecordResult = Result<GetRecordOk, GetRecordError>; |
| |
|
| | |
| | #[derive(Debug, Clone)] |
| | pub enum GetRecordOk { |
| | FoundRecord(PeerRecord), |
| | FinishedWithNoAdditionalRecord { |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | cache_candidates: BTreeMap<kbucket::Distance, PeerId>, |
| | }, |
| | } |
| |
|
| | |
| | #[derive(Debug, Clone, Error)] |
| | pub enum GetRecordError { |
| | #[error("the record was not found")] |
| | NotFound { |
| | key: record::Key, |
| | closest_peers: Vec<PeerId>, |
| | }, |
| | #[error("the quorum failed; needed {quorum} peers")] |
| | QuorumFailed { |
| | key: record::Key, |
| | records: Vec<PeerRecord>, |
| | quorum: NonZeroUsize, |
| | }, |
| | #[error("the request timed out")] |
| | Timeout { key: record::Key }, |
| | } |
| |
|
| | impl GetRecordError { |
| | |
| | pub fn key(&self) -> &record::Key { |
| | match self { |
| | GetRecordError::QuorumFailed { key, .. } => key, |
| | GetRecordError::Timeout { key, .. } => key, |
| | GetRecordError::NotFound { key, .. } => key, |
| | } |
| | } |
| |
|
| | |
| | |
| | pub fn into_key(self) -> record::Key { |
| | match self { |
| | GetRecordError::QuorumFailed { key, .. } => key, |
| | GetRecordError::Timeout { key, .. } => key, |
| | GetRecordError::NotFound { key, .. } => key, |
| | } |
| | } |
| | } |
| |
|
| | |
| | pub type PutRecordResult = Result<PutRecordOk, PutRecordError>; |
| |
|
| | |
| | #[derive(Debug, Clone)] |
| | pub struct PutRecordOk { |
| | pub key: record::Key, |
| | } |
| |
|
| | |
| | #[derive(Debug, Clone, Error)] |
| | pub enum PutRecordError { |
| | #[error("the quorum failed; needed {quorum} peers")] |
| | QuorumFailed { |
| | key: record::Key, |
| | |
| | success: Vec<PeerId>, |
| | quorum: NonZeroUsize, |
| | }, |
| | #[error("the request timed out")] |
| | Timeout { |
| | key: record::Key, |
| | |
| | success: Vec<PeerId>, |
| | quorum: NonZeroUsize, |
| | }, |
| | } |
| |
|
| | impl PutRecordError { |
| | |
| | pub fn key(&self) -> &record::Key { |
| | match self { |
| | PutRecordError::QuorumFailed { key, .. } => key, |
| | PutRecordError::Timeout { key, .. } => key, |
| | } |
| | } |
| |
|
| | |
| | |
| | pub fn into_key(self) -> record::Key { |
| | match self { |
| | PutRecordError::QuorumFailed { key, .. } => key, |
| | PutRecordError::Timeout { key, .. } => key, |
| | } |
| | } |
| | } |
| |
|
| | |
| | pub type BootstrapResult = Result<BootstrapOk, BootstrapError>; |
| |
|
| | |
| | #[derive(Debug, Clone)] |
| | pub struct BootstrapOk { |
| | pub peer: PeerId, |
| | pub num_remaining: u32, |
| | } |
| |
|
| | |
| | #[derive(Debug, Clone, Error)] |
| | pub enum BootstrapError { |
| | #[error("the request timed out")] |
| | Timeout { |
| | peer: PeerId, |
| | num_remaining: Option<u32>, |
| | }, |
| | } |
| |
|
| | |
| | pub type GetClosestPeersResult = Result<GetClosestPeersOk, GetClosestPeersError>; |
| |
|
| | |
| | #[derive(Debug, Clone)] |
| | pub struct GetClosestPeersOk { |
| | pub key: Vec<u8>, |
| | pub peers: Vec<PeerInfo>, |
| | } |
| |
|
| | |
| | #[derive(Debug, Clone, Error)] |
| | pub enum GetClosestPeersError { |
| | #[error("the request timed out")] |
| | Timeout { key: Vec<u8>, peers: Vec<PeerInfo> }, |
| | } |
| |
|
| | impl GetClosestPeersError { |
| | |
| | pub fn key(&self) -> &Vec<u8> { |
| | match self { |
| | GetClosestPeersError::Timeout { key, .. } => key, |
| | } |
| | } |
| |
|
| | |
| | |
| | pub fn into_key(self) -> Vec<u8> { |
| | match self { |
| | GetClosestPeersError::Timeout { key, .. } => key, |
| | } |
| | } |
| | } |
| |
|
| | |
| | pub type GetProvidersResult = Result<GetProvidersOk, GetProvidersError>; |
| |
|
| | |
| | #[derive(Debug, Clone)] |
| | pub enum GetProvidersOk { |
| | FoundProviders { |
| | key: record::Key, |
| | |
| | providers: HashSet<PeerId>, |
| | }, |
| | FinishedWithNoAdditionalRecord { |
| | closest_peers: Vec<PeerId>, |
| | }, |
| | } |
| |
|
| | |
| | #[derive(Debug, Clone, Error)] |
| | pub enum GetProvidersError { |
| | #[error("the request timed out")] |
| | Timeout { |
| | key: record::Key, |
| | closest_peers: Vec<PeerId>, |
| | }, |
| | } |
| |
|
| | impl GetProvidersError { |
| | |
| | pub fn key(&self) -> &record::Key { |
| | match self { |
| | GetProvidersError::Timeout { key, .. } => key, |
| | } |
| | } |
| |
|
| | |
| | |
| | pub fn into_key(self) -> record::Key { |
| | match self { |
| | GetProvidersError::Timeout { key, .. } => key, |
| | } |
| | } |
| | } |
| |
|
| | |
| | pub type AddProviderResult = Result<AddProviderOk, AddProviderError>; |
| |
|
| | |
| | #[derive(Debug, Clone)] |
| | pub struct AddProviderOk { |
| | pub key: record::Key, |
| | } |
| |
|
| | |
| | #[derive(Debug, Clone, Error)] |
| | pub enum AddProviderError { |
| | #[error("the request timed out")] |
| | Timeout { key: record::Key }, |
| | } |
| |
|
| | impl AddProviderError { |
| | |
| | pub fn key(&self) -> &record::Key { |
| | match self { |
| | AddProviderError::Timeout { key, .. } => key, |
| | } |
| | } |
| |
|
| | |
| | pub fn into_key(self) -> record::Key { |
| | match self { |
| | AddProviderError::Timeout { key, .. } => key, |
| | } |
| | } |
| | } |
| |
|
| | impl From<kbucket::EntryView<kbucket::Key<PeerId>, Addresses>> for KadPeer { |
| | fn from(e: kbucket::EntryView<kbucket::Key<PeerId>, Addresses>) -> KadPeer { |
| | KadPeer { |
| | node_id: e.node.key.into_preimage(), |
| | multiaddrs: e.node.value.into_vec(), |
| | connection_ty: match e.status { |
| | NodeStatus::Connected => ConnectionType::Connected, |
| | NodeStatus::Disconnected => ConnectionType::NotConnected, |
| | }, |
| | } |
| | } |
| | } |
| |
|
| | |
| | #[derive(Debug, Copy, Clone, PartialEq, Eq)] |
| | pub enum AddProviderContext { |
| | |
| | Publish, |
| | |
| | |
| | Republish, |
| | } |
| |
|
| | |
| | #[derive(Debug, Copy, Clone, PartialEq, Eq)] |
| | pub enum PutRecordContext { |
| | |
| | Publish, |
| | |
| | |
| | Republish, |
| | |
| | |
| | Replicate, |
| | |
| | |
| | Custom, |
| | } |
| |
|
| | |
| | #[derive(Debug, Clone)] |
| | pub enum QueryInfo { |
| | |
| | Bootstrap { |
| | |
| | peer: PeerId, |
| | |
| | |
| | |
| | |
| | |
| | |
| | remaining: Option<vec::IntoIter<kbucket::Key<PeerId>>>, |
| | step: ProgressStep, |
| | }, |
| |
|
| | |
| | GetClosestPeers { |
| | |
| | key: Vec<u8>, |
| | |
| | step: ProgressStep, |
| | }, |
| |
|
| | |
| | GetProviders { |
| | |
| | key: record::Key, |
| | |
| | providers_found: usize, |
| | |
| | step: ProgressStep, |
| | }, |
| |
|
| | |
| | AddProvider { |
| | |
| | key: record::Key, |
| | |
| | phase: AddProviderPhase, |
| | |
| | context: AddProviderContext, |
| | }, |
| |
|
| | |
| | PutRecord { |
| | record: Record, |
| | |
| | quorum: NonZeroUsize, |
| | |
| | phase: PutRecordPhase, |
| | |
| | context: PutRecordContext, |
| | }, |
| |
|
| | |
| | GetRecord { |
| | |
| | key: record::Key, |
| | |
| | step: ProgressStep, |
| | |
| | found_a_record: bool, |
| | |
| | |
| | cache_candidates: BTreeMap<kbucket::Distance, PeerId>, |
| | }, |
| | } |
| |
|
| | impl QueryInfo { |
| | |
| | |
| | fn to_request(&self, query_id: QueryId) -> HandlerIn { |
| | match &self { |
| | QueryInfo::Bootstrap { peer, .. } => HandlerIn::FindNodeReq { |
| | key: peer.to_bytes(), |
| | query_id, |
| | }, |
| | QueryInfo::GetClosestPeers { key, .. } => HandlerIn::FindNodeReq { |
| | key: key.clone(), |
| | query_id, |
| | }, |
| | QueryInfo::GetProviders { key, .. } => HandlerIn::GetProvidersReq { |
| | key: key.clone(), |
| | query_id, |
| | }, |
| | QueryInfo::AddProvider { key, phase, .. } => match phase { |
| | AddProviderPhase::GetClosestPeers => HandlerIn::FindNodeReq { |
| | key: key.to_vec(), |
| | query_id, |
| | }, |
| | AddProviderPhase::AddProvider { |
| | provider_id, |
| | external_addresses, |
| | .. |
| | } => HandlerIn::AddProvider { |
| | key: key.clone(), |
| | provider: crate::protocol::KadPeer { |
| | node_id: *provider_id, |
| | multiaddrs: external_addresses.clone(), |
| | connection_ty: crate::protocol::ConnectionType::Connected, |
| | }, |
| | query_id, |
| | }, |
| | }, |
| | QueryInfo::GetRecord { key, .. } => HandlerIn::GetRecord { |
| | key: key.clone(), |
| | query_id, |
| | }, |
| | QueryInfo::PutRecord { record, phase, .. } => match phase { |
| | PutRecordPhase::GetClosestPeers => HandlerIn::FindNodeReq { |
| | key: record.key.to_vec(), |
| | query_id, |
| | }, |
| | PutRecordPhase::PutRecord { .. } => HandlerIn::PutRecord { |
| | record: record.clone(), |
| | query_id, |
| | }, |
| | }, |
| | } |
| | } |
| | } |
| |
|
| | |
| | #[derive(Debug, Clone)] |
| | pub enum AddProviderPhase { |
| | |
| | GetClosestPeers, |
| |
|
| | |
| | |
| | AddProvider { |
| | |
| | provider_id: PeerId, |
| | |
| | external_addresses: Vec<Multiaddr>, |
| | |
| | get_closest_peers_stats: QueryStats, |
| | }, |
| | } |
| |
|
| | |
| | #[derive(Debug, Clone, PartialEq, Eq)] |
| | pub enum PutRecordPhase { |
| | |
| | GetClosestPeers, |
| |
|
| | |
| | PutRecord { |
| | |
| | success: Vec<PeerId>, |
| | |
| | get_closest_peers_stats: QueryStats, |
| | }, |
| | } |
| |
|
| | |
| | pub struct QueryMut<'a> { |
| | query: &'a mut Query, |
| | } |
| |
|
| | impl<'a> QueryMut<'a> { |
| | pub fn id(&self) -> QueryId { |
| | self.query.id() |
| | } |
| |
|
| | |
| | pub fn info(&self) -> &QueryInfo { |
| | &self.query.info |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | pub fn stats(&self) -> &QueryStats { |
| | self.query.stats() |
| | } |
| |
|
| | |
| | |
| | pub fn finish(&mut self) { |
| | self.query.finish() |
| | } |
| | } |
| |
|
| | |
| | pub struct QueryRef<'a> { |
| | query: &'a Query, |
| | } |
| |
|
| | impl<'a> QueryRef<'a> { |
| | pub fn id(&self) -> QueryId { |
| | self.query.id() |
| | } |
| |
|
| | |
| | pub fn info(&self) -> &QueryInfo { |
| | &self.query.info |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | pub fn stats(&self) -> &QueryStats { |
| | self.query.stats() |
| | } |
| | } |
| |
|
| | |
| | #[derive(Debug, Clone)] |
| | pub struct NoKnownPeers(); |
| |
|
| | impl fmt::Display for NoKnownPeers { |
| | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
| | write!(f, "No known peers.") |
| | } |
| | } |
| |
|
| | impl std::error::Error for NoKnownPeers {} |
| |
|
| | |
| | #[derive(Debug, Clone, Copy, PartialEq, Eq)] |
| | pub enum RoutingUpdate { |
| | |
| | |
| | Success, |
| | |
| | |
| | |
| | |
| | |
| | Pending, |
| | |
| | |
| | |
| | |
| | |
| | Failed, |
| | } |
| |
|
| | #[derive(PartialEq, Copy, Clone, Debug)] |
| | pub enum Mode { |
| | Client, |
| | Server, |
| | } |
| |
|
| | impl fmt::Display for Mode { |
| | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
| | match self { |
| | Mode::Client => write!(f, "client"), |
| | Mode::Server => write!(f, "server"), |
| | } |
| | } |
| | } |
| |
|
| | fn to_comma_separated_list<T>(confirmed_external_addresses: &[T]) -> String |
| | where |
| | T: ToString, |
| | { |
| | confirmed_external_addresses |
| | .iter() |
| | .map(|addr| addr.to_string()) |
| | .collect::<Vec<_>>() |
| | .join(", ") |
| | } |
| |
|