2021-11-22 16:28:30 +00:00
|
|
|
use super::*;
|
2022-06-25 14:57:33 +00:00
|
|
|
use core::sync::atomic::Ordering;
|
2022-11-11 03:11:57 +00:00
|
|
|
use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize};
|
2021-11-22 16:28:30 +00:00
|
|
|
|
|
|
|
pub struct Bucket {
|
|
|
|
routing_table: RoutingTable,
|
2022-06-25 14:57:33 +00:00
|
|
|
entries: BTreeMap<DHTKey, Arc<BucketEntry>>,
|
2021-11-22 16:28:30 +00:00
|
|
|
newest_entry: Option<DHTKey>,
|
|
|
|
}
|
2022-06-25 14:57:33 +00:00
|
|
|
pub(super) type EntriesIter<'a> = alloc::collections::btree_map::Iter<'a, DHTKey, Arc<BucketEntry>>;
|
2021-11-22 16:28:30 +00:00
|
|
|
|
2022-11-09 22:11:35 +00:00
|
|
|
#[derive(Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
|
|
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
|
|
|
struct BucketEntryData {
|
|
|
|
key: DHTKey,
|
|
|
|
value: Vec<u8>,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
|
|
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
|
|
|
struct BucketData {
|
|
|
|
entries: Vec<BucketEntryData>,
|
|
|
|
newest_entry: Option<DHTKey>,
|
|
|
|
}
|
2022-11-06 21:07:56 +00:00
|
|
|
|
2021-11-22 16:28:30 +00:00
|
|
|
fn state_ordering(state: BucketEntryState) -> usize {
|
|
|
|
match state {
|
|
|
|
BucketEntryState::Dead => 0,
|
|
|
|
BucketEntryState::Unreliable => 1,
|
|
|
|
BucketEntryState::Reliable => 2,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Bucket {
|
|
|
|
pub fn new(routing_table: RoutingTable) -> Self {
|
|
|
|
Self {
|
2021-11-26 15:39:43 +00:00
|
|
|
routing_table,
|
2021-11-22 16:28:30 +00:00
|
|
|
entries: BTreeMap::new(),
|
|
|
|
newest_entry: None,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-09 22:11:35 +00:00
|
|
|
pub(super) fn load_bucket(&mut self, data: Vec<u8>) -> EyreResult<()> {
|
|
|
|
let bucket_data: BucketData = from_rkyv(data)?;
|
2022-11-06 21:07:56 +00:00
|
|
|
|
2022-11-09 22:11:35 +00:00
|
|
|
for e in bucket_data.entries {
|
|
|
|
let entryinner = from_rkyv(e.value).wrap_err("failed to deserialize bucket entry")?;
|
2022-11-06 21:07:56 +00:00
|
|
|
self.entries
|
2022-11-09 22:11:35 +00:00
|
|
|
.insert(e.key, Arc::new(BucketEntry::new_with_inner(entryinner)));
|
2022-11-06 21:07:56 +00:00
|
|
|
}
|
|
|
|
|
2022-11-09 22:11:35 +00:00
|
|
|
self.newest_entry = bucket_data.newest_entry;
|
2022-11-06 21:07:56 +00:00
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
pub(super) fn save_bucket(&self) -> EyreResult<Vec<u8>> {
|
2022-11-09 22:11:35 +00:00
|
|
|
let mut entries = Vec::new();
|
2022-11-06 21:07:56 +00:00
|
|
|
for (k, v) in &self.entries {
|
2022-11-09 22:11:35 +00:00
|
|
|
let entry_bytes = v.with_inner(|e| to_rkyv(e))?;
|
|
|
|
entries.push(BucketEntryData {
|
|
|
|
key: *k,
|
|
|
|
value: entry_bytes,
|
|
|
|
});
|
2022-11-06 21:07:56 +00:00
|
|
|
}
|
2022-11-09 22:11:35 +00:00
|
|
|
let bucket_data = BucketData {
|
|
|
|
entries,
|
|
|
|
newest_entry: self.newest_entry.clone(),
|
|
|
|
};
|
|
|
|
let out = to_rkyv(&bucket_data)?;
|
2022-11-06 21:07:56 +00:00
|
|
|
Ok(out)
|
|
|
|
}
|
|
|
|
|
2021-11-22 16:28:30 +00:00
|
|
|
pub(super) fn add_entry(&mut self, node_id: DHTKey) -> NodeRef {
|
2021-12-18 00:18:25 +00:00
|
|
|
log_rtab!("Node added: {}", node_id.encode());
|
2021-11-22 16:28:30 +00:00
|
|
|
|
|
|
|
// Add new entry
|
2022-06-25 14:57:33 +00:00
|
|
|
self.entries.insert(node_id, Arc::new(BucketEntry::new()));
|
2021-11-22 16:28:30 +00:00
|
|
|
|
|
|
|
// This is now the newest bucket entry
|
|
|
|
self.newest_entry = Some(node_id);
|
|
|
|
|
|
|
|
// Get a node ref to return
|
2022-06-25 14:57:33 +00:00
|
|
|
let entry = self.entries.get(&node_id).unwrap().clone();
|
|
|
|
NodeRef::new(self.routing_table.clone(), node_id, entry, None)
|
2021-11-22 16:28:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pub(super) fn remove_entry(&mut self, node_id: &DHTKey) {
|
2021-12-18 00:18:25 +00:00
|
|
|
log_rtab!("Node removed: {}", node_id);
|
2021-11-22 16:28:30 +00:00
|
|
|
|
|
|
|
// Remove the entry
|
|
|
|
self.entries.remove(node_id);
|
|
|
|
|
|
|
|
// newest_entry is updated by kick_bucket()
|
|
|
|
}
|
|
|
|
|
2022-06-25 14:57:33 +00:00
|
|
|
pub(super) fn entry(&self, key: &DHTKey) -> Option<Arc<BucketEntry>> {
|
|
|
|
self.entries.get(key).cloned()
|
2021-11-22 16:28:30 +00:00
|
|
|
}
|
|
|
|
|
2021-12-11 01:14:33 +00:00
|
|
|
pub(super) fn entries(&self) -> EntriesIter {
|
|
|
|
self.entries.iter()
|
|
|
|
}
|
2021-11-22 16:28:30 +00:00
|
|
|
|
2022-10-31 03:23:12 +00:00
|
|
|
pub(super) fn kick(&mut self, bucket_depth: usize) -> Option<BTreeSet<DHTKey>> {
|
2021-11-22 16:28:30 +00:00
|
|
|
// Get number of entries to attempt to purge from bucket
|
|
|
|
let bucket_len = self.entries.len();
|
2021-12-11 01:14:33 +00:00
|
|
|
|
|
|
|
// Don't bother kicking bucket unless it is full
|
2021-11-22 16:28:30 +00:00
|
|
|
if bucket_len <= bucket_depth {
|
|
|
|
return None;
|
|
|
|
}
|
2021-12-11 01:14:33 +00:00
|
|
|
|
2021-11-22 16:28:30 +00:00
|
|
|
// Try to purge the newest entries that overflow the bucket
|
|
|
|
let mut dead_node_ids: BTreeSet<DHTKey> = BTreeSet::new();
|
|
|
|
let mut extra_entries = bucket_len - bucket_depth;
|
|
|
|
|
|
|
|
// Get the sorted list of entries by their kick order
|
2022-06-25 14:57:33 +00:00
|
|
|
let mut sorted_entries: Vec<(DHTKey, Arc<BucketEntry>)> = self
|
|
|
|
.entries
|
|
|
|
.iter()
|
|
|
|
.map(|(k, v)| (k.clone(), v.clone()))
|
|
|
|
.collect();
|
2022-06-28 03:46:29 +00:00
|
|
|
let cur_ts = intf::get_timestamp();
|
2022-06-25 14:57:33 +00:00
|
|
|
sorted_entries.sort_by(|a, b| -> core::cmp::Ordering {
|
|
|
|
if a.0 == b.0 {
|
|
|
|
return core::cmp::Ordering::Equal;
|
|
|
|
}
|
2022-10-31 03:23:12 +00:00
|
|
|
a.1.with_inner(|ea| {
|
|
|
|
b.1.with_inner(|eb| {
|
2022-06-25 14:57:33 +00:00
|
|
|
let astate = state_ordering(ea.state(cur_ts));
|
|
|
|
let bstate = state_ordering(eb.state(cur_ts));
|
|
|
|
// first kick dead nodes, then unreliable nodes
|
|
|
|
if astate < bstate {
|
|
|
|
return core::cmp::Ordering::Less;
|
|
|
|
}
|
|
|
|
if astate > bstate {
|
|
|
|
return core::cmp::Ordering::Greater;
|
|
|
|
}
|
|
|
|
// then kick by time added, most recent nodes are kicked first
|
|
|
|
let ata = ea.peer_stats().time_added;
|
|
|
|
let bta = eb.peer_stats().time_added;
|
|
|
|
bta.cmp(&ata)
|
|
|
|
})
|
|
|
|
})
|
|
|
|
});
|
2021-11-22 16:28:30 +00:00
|
|
|
|
|
|
|
self.newest_entry = None;
|
2021-11-26 15:39:43 +00:00
|
|
|
for entry in sorted_entries {
|
2021-11-22 16:28:30 +00:00
|
|
|
// If we're not evicting more entries, exit, noting this may be the newest entry
|
|
|
|
if extra_entries == 0 {
|
|
|
|
// The first 'live' entry we find is our newest entry
|
|
|
|
if self.newest_entry.is_none() {
|
2022-06-25 14:57:33 +00:00
|
|
|
self.newest_entry = Some(entry.0);
|
2021-11-22 16:28:30 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
extra_entries -= 1;
|
|
|
|
|
|
|
|
// if this entry has references we can't drop it yet
|
2022-06-25 14:57:33 +00:00
|
|
|
if entry.1.ref_count.load(Ordering::Acquire) > 0 {
|
2021-11-22 16:28:30 +00:00
|
|
|
// The first 'live' entry we fine is our newest entry
|
|
|
|
if self.newest_entry.is_none() {
|
2022-06-25 14:57:33 +00:00
|
|
|
self.newest_entry = Some(entry.0);
|
2021-11-22 16:28:30 +00:00
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// if no references, lets evict it
|
2022-06-25 14:57:33 +00:00
|
|
|
dead_node_ids.insert(entry.0);
|
2021-11-22 16:28:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Now purge the dead node ids
|
|
|
|
for id in &dead_node_ids {
|
|
|
|
// Remove the entry
|
|
|
|
self.remove_entry(id);
|
|
|
|
}
|
|
|
|
|
2021-11-26 15:39:43 +00:00
|
|
|
if !dead_node_ids.is_empty() {
|
2021-11-22 16:28:30 +00:00
|
|
|
Some(dead_node_ids)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|