atomic routing domain editor fixes
This commit is contained in:
parent
51b509221c
commit
9dcfcd02a0
@ -6,7 +6,7 @@ impl RoutingTable {
|
|||||||
let inner = self.inner.read();
|
let inner = self.inner.read();
|
||||||
out += "Routing Table Info:\n";
|
out += "Routing Table Info:\n";
|
||||||
|
|
||||||
out += &format!(" Node Id: {}\n", inner.node_id.encode());
|
out += &format!(" Node Id: {}\n", self.unlocked_inner.node_id.encode());
|
||||||
out += &format!(
|
out += &format!(
|
||||||
" Self Latency Stats Accounting: {:#?}\n\n",
|
" Self Latency Stats Accounting: {:#?}\n\n",
|
||||||
inner.self_latency_stats_accounting
|
inner.self_latency_stats_accounting
|
||||||
|
@ -212,7 +212,7 @@ impl RoutingTable {
|
|||||||
T: FnMut(DHTKey, Option<Arc<BucketEntry>>) -> O,
|
T: FnMut(DHTKey, Option<Arc<BucketEntry>>) -> O,
|
||||||
{
|
{
|
||||||
let inner = self.inner.read();
|
let inner = self.inner.read();
|
||||||
let self_node_id = inner.node_id;
|
let self_node_id = self.unlocked_inner.node_id;
|
||||||
|
|
||||||
// collect all the nodes for sorting
|
// collect all the nodes for sorting
|
||||||
let mut nodes =
|
let mut nodes =
|
||||||
@ -340,7 +340,7 @@ impl RoutingTable {
|
|||||||
{
|
{
|
||||||
let cur_ts = intf::get_timestamp();
|
let cur_ts = intf::get_timestamp();
|
||||||
let node_count = {
|
let node_count = {
|
||||||
let c = self.config.get();
|
let c = self.unlocked_inner.config.get();
|
||||||
c.network.dht.max_find_node_count as usize
|
c.network.dht.max_find_node_count as usize
|
||||||
};
|
};
|
||||||
let out = self.find_peers_with_sort_and_filter(
|
let out = self.find_peers_with_sort_and_filter(
|
||||||
|
@ -34,41 +34,53 @@ pub struct RecentPeersEntry {
|
|||||||
|
|
||||||
/// RoutingTable rwlock-internal data
|
/// RoutingTable rwlock-internal data
|
||||||
struct RoutingTableInner {
|
struct RoutingTableInner {
|
||||||
// The current node's public DHT key
|
/// Routing table buckets that hold entries
|
||||||
node_id: DHTKey,
|
buckets: Vec<Bucket>,
|
||||||
node_id_secret: DHTKeySecret, // The current node's DHT key secret
|
/// A fast counter for the number of entries in the table, total
|
||||||
|
bucket_entry_count: usize,
|
||||||
buckets: Vec<Bucket>, // Routing table buckets that hold entries
|
/// The public internet routing domain
|
||||||
kick_queue: BTreeSet<usize>, // Buckets to kick on our next kick task
|
public_internet_routing_domain: PublicInternetRoutingDomainDetail,
|
||||||
bucket_entry_count: usize, // A fast counter for the number of entries in the table, total
|
/// The dial info we use on the local network
|
||||||
|
local_network_routing_domain: LocalInternetRoutingDomainDetail,
|
||||||
public_internet_routing_domain: PublicInternetRoutingDomainDetail, // The public internet
|
/// Interim accounting mechanism for this node's RPC latency to any other node
|
||||||
local_network_routing_domain: LocalInternetRoutingDomainDetail, // The dial info we use on the local network
|
self_latency_stats_accounting: LatencyStatsAccounting,
|
||||||
|
/// Interim accounting mechanism for the total bandwidth to/from this node
|
||||||
self_latency_stats_accounting: LatencyStatsAccounting, // Interim accounting mechanism for this node's RPC latency to any other node
|
self_transfer_stats_accounting: TransferStatsAccounting,
|
||||||
self_transfer_stats_accounting: TransferStatsAccounting, // Interim accounting mechanism for the total bandwidth to/from this node
|
/// Statistics about the total bandwidth to/from this node
|
||||||
self_transfer_stats: TransferStatsDownUp, // Statistics about the total bandwidth to/from this node
|
self_transfer_stats: TransferStatsDownUp,
|
||||||
recent_peers: LruCache<DHTKey, RecentPeersEntry>, // Peers we have recently communicated with
|
/// Peers we have recently communicated with
|
||||||
|
recent_peers: LruCache<DHTKey, RecentPeersEntry>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default)]
|
#[derive(Clone, Debug, Default)]
|
||||||
pub struct RoutingTableHealth {
|
pub struct RoutingTableHealth {
|
||||||
|
/// Number of reliable (responsive) entries in the routing table
|
||||||
pub reliable_entry_count: usize,
|
pub reliable_entry_count: usize,
|
||||||
|
/// Number of unreliable (occasionally unresponsive) entries in the routing table
|
||||||
pub unreliable_entry_count: usize,
|
pub unreliable_entry_count: usize,
|
||||||
|
/// Number of dead (always unresponsive) entries in the routing table
|
||||||
pub dead_entry_count: usize,
|
pub dead_entry_count: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
struct RoutingTableUnlockedInner {
|
struct RoutingTableUnlockedInner {
|
||||||
|
// Accessors
|
||||||
|
config: VeilidConfig,
|
||||||
network_manager: NetworkManager,
|
network_manager: NetworkManager,
|
||||||
|
|
||||||
// Background processes
|
/// The current node's public DHT key
|
||||||
|
node_id: DHTKey,
|
||||||
|
/// The current node's DHT key secret
|
||||||
|
node_id_secret: DHTKeySecret,
|
||||||
|
/// Buckets to kick on our next kick task
|
||||||
|
kick_queue: Mutex<BTreeSet<usize>>,
|
||||||
|
/// Background process for computing statistics
|
||||||
rolling_transfers_task: TickTask<EyreReport>,
|
rolling_transfers_task: TickTask<EyreReport>,
|
||||||
|
/// Backgroup process to purge dead routing table entries when necessary
|
||||||
kick_buckets_task: TickTask<EyreReport>,
|
kick_buckets_task: TickTask<EyreReport>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct RoutingTable {
|
pub struct RoutingTable {
|
||||||
config: VeilidConfig,
|
|
||||||
inner: Arc<RwLock<RoutingTableInner>>,
|
inner: Arc<RwLock<RoutingTableInner>>,
|
||||||
unlocked_inner: Arc<RoutingTableUnlockedInner>,
|
unlocked_inner: Arc<RoutingTableUnlockedInner>,
|
||||||
}
|
}
|
||||||
@ -76,10 +88,7 @@ pub struct RoutingTable {
|
|||||||
impl RoutingTable {
|
impl RoutingTable {
|
||||||
fn new_inner() -> RoutingTableInner {
|
fn new_inner() -> RoutingTableInner {
|
||||||
RoutingTableInner {
|
RoutingTableInner {
|
||||||
node_id: DHTKey::default(),
|
|
||||||
node_id_secret: DHTKeySecret::default(),
|
|
||||||
buckets: Vec::new(),
|
buckets: Vec::new(),
|
||||||
kick_queue: BTreeSet::default(),
|
|
||||||
public_internet_routing_domain: PublicInternetRoutingDomainDetail::default(),
|
public_internet_routing_domain: PublicInternetRoutingDomainDetail::default(),
|
||||||
local_network_routing_domain: LocalInternetRoutingDomainDetail::default(),
|
local_network_routing_domain: LocalInternetRoutingDomainDetail::default(),
|
||||||
bucket_entry_count: 0,
|
bucket_entry_count: 0,
|
||||||
@ -90,12 +99,16 @@ impl RoutingTable {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
fn new_unlocked_inner(
|
fn new_unlocked_inner(
|
||||||
_config: VeilidConfig,
|
config: VeilidConfig,
|
||||||
network_manager: NetworkManager,
|
network_manager: NetworkManager,
|
||||||
) -> RoutingTableUnlockedInner {
|
) -> RoutingTableUnlockedInner {
|
||||||
//let c = config.get();
|
let c = config.get();
|
||||||
RoutingTableUnlockedInner {
|
RoutingTableUnlockedInner {
|
||||||
|
config: config.clone(),
|
||||||
network_manager,
|
network_manager,
|
||||||
|
node_id: c.network.node_id,
|
||||||
|
node_id_secret: c.network.node_id_secret,
|
||||||
|
kick_queue: Mutex::new(BTreeSet::default()),
|
||||||
rolling_transfers_task: TickTask::new(ROLLING_TRANSFERS_INTERVAL_SECS),
|
rolling_transfers_task: TickTask::new(ROLLING_TRANSFERS_INTERVAL_SECS),
|
||||||
kick_buckets_task: TickTask::new(1),
|
kick_buckets_task: TickTask::new(1),
|
||||||
}
|
}
|
||||||
@ -103,7 +116,6 @@ impl RoutingTable {
|
|||||||
pub fn new(network_manager: NetworkManager) -> Self {
|
pub fn new(network_manager: NetworkManager) -> Self {
|
||||||
let config = network_manager.config();
|
let config = network_manager.config();
|
||||||
let this = Self {
|
let this = Self {
|
||||||
config: config.clone(),
|
|
||||||
inner: Arc::new(RwLock::new(Self::new_inner())),
|
inner: Arc::new(RwLock::new(Self::new_inner())),
|
||||||
unlocked_inner: Arc::new(Self::new_unlocked_inner(config, network_manager)),
|
unlocked_inner: Arc::new(Self::new_unlocked_inner(config, network_manager)),
|
||||||
};
|
};
|
||||||
@ -137,11 +149,11 @@ impl RoutingTable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn node_id(&self) -> DHTKey {
|
pub fn node_id(&self) -> DHTKey {
|
||||||
self.inner.read().node_id
|
self.unlocked_inner.node_id
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn node_id_secret(&self) -> DHTKeySecret {
|
pub fn node_id_secret(&self) -> DHTKeySecret {
|
||||||
self.inner.read().node_id_secret
|
self.unlocked_inner.node_id_secret
|
||||||
}
|
}
|
||||||
|
|
||||||
fn routing_domain_for_address_inner(
|
fn routing_domain_for_address_inner(
|
||||||
@ -377,11 +389,6 @@ impl RoutingTable {
|
|||||||
inner.buckets.push(bucket);
|
inner.buckets.push(bucket);
|
||||||
}
|
}
|
||||||
|
|
||||||
// make local copy of node id for easy access
|
|
||||||
let c = self.config.get();
|
|
||||||
inner.node_id = c.network.node_id;
|
|
||||||
inner.node_id_secret = c.network.node_id_secret;
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -482,8 +489,8 @@ impl RoutingTable {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn find_bucket_index(inner: &RoutingTableInner, node_id: DHTKey) -> usize {
|
fn find_bucket_index(&self, node_id: DHTKey) -> usize {
|
||||||
distance(&node_id, &inner.node_id)
|
distance(&node_id, &self.unlocked_inner.node_id)
|
||||||
.first_nonzero_bit()
|
.first_nonzero_bit()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
}
|
}
|
||||||
@ -598,9 +605,8 @@ impl RoutingTable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn queue_bucket_kick(&self, node_id: DHTKey) {
|
fn queue_bucket_kick(&self, node_id: DHTKey) {
|
||||||
let mut inner = self.inner.write();
|
let idx = self.find_bucket_index(node_id);
|
||||||
let idx = Self::find_bucket_index(&*inner, node_id);
|
self.unlocked_inner.kick_queue.lock().insert(idx);
|
||||||
inner.kick_queue.insert(idx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a node reference, possibly creating a bucket entry
|
// Create a node reference, possibly creating a bucket entry
|
||||||
@ -620,7 +626,7 @@ impl RoutingTable {
|
|||||||
let mut inner = self.inner.write();
|
let mut inner = self.inner.write();
|
||||||
|
|
||||||
// Look up existing entry
|
// Look up existing entry
|
||||||
let idx = Self::find_bucket_index(&*inner, node_id);
|
let idx = self.find_bucket_index(node_id);
|
||||||
let noderef = {
|
let noderef = {
|
||||||
let bucket = &inner.buckets[idx];
|
let bucket = &inner.buckets[idx];
|
||||||
let entry = bucket.entry(&node_id);
|
let entry = bucket.entry(&node_id);
|
||||||
@ -641,7 +647,7 @@ impl RoutingTable {
|
|||||||
entry.with_mut(update_func);
|
entry.with_mut(update_func);
|
||||||
|
|
||||||
// Kick the bucket
|
// Kick the bucket
|
||||||
inner.kick_queue.insert(idx);
|
self.unlocked_inner.kick_queue.lock().insert(idx);
|
||||||
log_rtab!(debug "Routing table now has {} nodes, {} live", cnt, Self::get_entry_count_inner(&mut *inner, RoutingDomainSet::all(), BucketEntryState::Unreliable));
|
log_rtab!(debug "Routing table now has {} nodes, {} live", cnt, Self::get_entry_count_inner(&mut *inner, RoutingDomainSet::all(), BucketEntryState::Unreliable));
|
||||||
|
|
||||||
nr
|
nr
|
||||||
@ -662,12 +668,12 @@ impl RoutingTable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn lookup_node_ref(&self, node_id: DHTKey) -> Option<NodeRef> {
|
pub fn lookup_node_ref(&self, node_id: DHTKey) -> Option<NodeRef> {
|
||||||
let inner = self.inner.read();
|
if node_id == self.unlocked_inner.node_id {
|
||||||
if node_id == inner.node_id {
|
|
||||||
log_rtab!(debug "can't look up own node id in routing table");
|
log_rtab!(debug "can't look up own node id in routing table");
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
let idx = Self::find_bucket_index(&*inner, node_id);
|
let idx = self.find_bucket_index(node_id);
|
||||||
|
let inner = self.inner.read();
|
||||||
let bucket = &inner.buckets[idx];
|
let bucket = &inner.buckets[idx];
|
||||||
bucket
|
bucket
|
||||||
.entry(&node_id)
|
.entry(&node_id)
|
||||||
@ -749,7 +755,7 @@ impl RoutingTable {
|
|||||||
self.unlocked_inner.rolling_transfers_task.tick().await?;
|
self.unlocked_inner.rolling_transfers_task.tick().await?;
|
||||||
|
|
||||||
// Kick buckets task
|
// Kick buckets task
|
||||||
let kick_bucket_queue_count = { self.inner.read().kick_queue.len() };
|
let kick_bucket_queue_count = self.unlocked_inner.kick_queue.lock().len();
|
||||||
if kick_bucket_queue_count > 0 {
|
if kick_bucket_queue_count > 0 {
|
||||||
self.unlocked_inner.kick_buckets_task.tick().await?;
|
self.unlocked_inner.kick_buckets_task.tick().await?;
|
||||||
}
|
}
|
||||||
|
@ -72,10 +72,10 @@ impl RoutingDomainEditor {
|
|||||||
pub async fn commit(self) {
|
pub async fn commit(self) {
|
||||||
let mut changed = false;
|
let mut changed = false;
|
||||||
{
|
{
|
||||||
|
let node_id = self.routing_table.node_id();
|
||||||
|
|
||||||
let mut inner = self.routing_table.inner.write();
|
let mut inner = self.routing_table.inner.write();
|
||||||
let inner = &mut *inner;
|
let inner = &mut *inner;
|
||||||
let node_id = inner.node_id;
|
|
||||||
|
|
||||||
RoutingTable::with_routing_domain_mut(inner, self.routing_domain, |detail| {
|
RoutingTable::with_routing_domain_mut(inner, self.routing_domain, |detail| {
|
||||||
for change in self.changes {
|
for change in self.changes {
|
||||||
match change {
|
match change {
|
||||||
|
@ -37,9 +37,10 @@ impl RoutingTable {
|
|||||||
_last_ts: u64,
|
_last_ts: u64,
|
||||||
cur_ts: u64,
|
cur_ts: u64,
|
||||||
) -> EyreResult<()> {
|
) -> EyreResult<()> {
|
||||||
|
let kick_queue: Vec<usize> = core::mem::take(&mut *self.unlocked_inner.kick_queue.lock())
|
||||||
|
.into_iter()
|
||||||
|
.collect();
|
||||||
let mut inner = self.inner.write();
|
let mut inner = self.inner.write();
|
||||||
let kick_queue: Vec<usize> = inner.kick_queue.iter().map(|v| *v).collect();
|
|
||||||
inner.kick_queue.clear();
|
|
||||||
for idx in kick_queue {
|
for idx in kick_queue {
|
||||||
Self::kick_bucket(&mut *inner, idx)
|
Self::kick_bucket(&mut *inner, idx)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user