This commit is contained in:
John Smith
2022-07-10 17:36:50 -04:00
parent cd0cd78e30
commit 7e0d7dad06
108 changed files with 1378 additions and 1535 deletions

View File

@@ -69,12 +69,12 @@ pub struct RoutingTableHealth {
struct RoutingTableUnlockedInner {
// Background processes
rolling_transfers_task: TickTask,
bootstrap_task: TickTask,
peer_minimum_refresh_task: TickTask,
ping_validator_task: TickTask,
rolling_transfers_task: TickTask<EyreReport>,
bootstrap_task: TickTask<EyreReport>,
peer_minimum_refresh_task: TickTask<EyreReport>,
ping_validator_task: TickTask<EyreReport>,
node_info_update_single_future: MustJoinSingleFuture<()>,
kick_buckets_task: TickTask,
kick_buckets_task: TickTask<EyreReport>,
}
#[derive(Clone)]
@@ -283,7 +283,7 @@ impl RoutingTable {
domain: RoutingDomain,
dial_info: DialInfo,
class: DialInfoClass,
) -> Result<(), String> {
) -> EyreResult<()> {
log_rtab!(debug
"Registering dial_info with:\n domain: {:?}\n dial_info: {:?}\n class: {:?}",
domain, dial_info, class
@@ -298,15 +298,13 @@ impl RoutingTable {
&& matches!(domain, RoutingDomain::PublicInternet)
&& dial_info.is_local()
{
return Err("shouldn't be registering local addresses as public".to_owned())
.map_err(logthru_rtab!(error));
bail!("shouldn't be registering local addresses as public");
}
if !dial_info.is_valid() {
return Err(format!(
bail!(
"shouldn't be registering invalid addresses: {:?}",
dial_info
))
.map_err(logthru_rtab!(error));
);
}
let mut inner = self.inner.write();
@@ -379,7 +377,7 @@ impl RoutingTable {
}
}
pub async fn init(&self) -> Result<(), String> {
pub async fn init(&self) -> EyreResult<()> {
let mut inner = self.inner.write();
// Size the buckets (one per bit)
inner.buckets.reserve(DHT_KEY_LENGTH * 8);
@@ -578,13 +576,13 @@ impl RoutingTable {
// Create a node reference, possibly creating a bucket entry
// the 'update_func' closure is called on the node, and, if created,
// in a locked fashion as to ensure the bucket entry state is always valid
pub fn create_node_ref<F>(&self, node_id: DHTKey, update_func: F) -> Result<NodeRef, String>
pub fn create_node_ref<F>(&self, node_id: DHTKey, update_func: F) -> EyreResult<NodeRef>
where
F: FnOnce(&mut BucketEntryInner),
{
// Ensure someone isn't trying register this node itself
if node_id == self.node_id() {
return Err("can't register own node".to_owned()).map_err(logthru_rtab!(error));
bail!("can't register own node");
}
// Lock this entire operation
@@ -647,14 +645,14 @@ impl RoutingTable {
&self,
node_id: DHTKey,
signed_node_info: SignedNodeInfo,
) -> Result<NodeRef, String> {
) -> EyreResult<NodeRef> {
// validate signed node info is not something malicious
if node_id == self.node_id() {
return Err("can't register own node id in routing table".to_owned());
bail!("can't register own node id in routing table");
}
if let Some(rpi) = &signed_node_info.node_info.relay_peer_info {
if rpi.node_id.key == node_id {
return Err("node can not be its own relay".to_owned());
bail!("node can not be its own relay");
}
}
@@ -672,7 +670,7 @@ impl RoutingTable {
node_id: DHTKey,
descriptor: ConnectionDescriptor,
timestamp: u64,
) -> Result<NodeRef, String> {
) -> EyreResult<NodeRef> {
let nr = self.create_node_ref(node_id, |e| {
// set the most recent node address for connection finding and udp replies
e.set_last_connection(descriptor, timestamp);
@@ -681,53 +679,9 @@ impl RoutingTable {
Ok(nr)
}
// fn operate_on_bucket_entry_inner_locked<T, F>(
// inner: &RoutingTableInner,
// node_id: DHTKey,
// f: F,
// ) -> T
// where
// F: FnOnce(&BucketEntryInner) -> T,
// {
// let idx = Self::find_bucket_index(&*inner, node_id);
// let bucket = &inner.buckets[idx];
// let entry = bucket.entry(&node_id).unwrap();
// entry.with(f)
// }
// fn operate_on_bucket_entry_inner_locked_mut<T, F>(
// inner: &RoutingTableInner,
// node_id: DHTKey,
// f: F,
// ) -> T
// where
// F: FnOnce(&mut BucketEntryInner) -> T,
// {
// let idx = Self::find_bucket_index(&*inner, node_id);
// let bucket = &inner.buckets[idx];
// let entry = bucket.entry(&node_id).unwrap();
// entry.with_mut(f)
// }
// fn operate_on_bucket_entry<T, F>(&self, node_id: DHTKey, f: F) -> T
// where
// F: FnOnce(&BucketEntryInner) -> T,
// {
// let inner = self.inner.read();
// Self::operate_on_bucket_entry_inner_locked(&mut *inner, node_id, f)
// }
// fn operate_on_bucket_entry_mut<T, F>(&self, node_id: DHTKey, f: F) -> T
// where
// F: FnOnce(&mut BucketEntryInner) -> T,
// {
// let inner = self.inner.read();
// Self::operate_on_bucket_entry_inner_locked_mut(&*inner, node_id, f)
// }
// Ticks about once per second
// to run tick tasks which may run at slower tick rates as configured
pub async fn tick(&self) -> Result<(), String> {
pub async fn tick(&self) -> EyreResult<()> {
// Do rolling transfers every ROLLING_TRANSFERS_INTERVAL_SECS secs
self.unlocked_inner.rolling_transfers_task.tick().await?;

View File

@@ -13,7 +13,7 @@ impl RoutingTable {
_stop_token: StopToken,
last_ts: u64,
cur_ts: u64,
) -> Result<(), String> {
) -> EyreResult<()> {
// log_rtab!("--- rolling_transfers task");
let mut inner = self.inner.write();
let inner = &mut *inner;
@@ -37,7 +37,7 @@ impl RoutingTable {
pub(super) async fn resolve_bootstrap(
&self,
bootstrap: Vec<String>,
) -> Result<BootstrapRecordMap, String> {
) -> EyreResult<BootstrapRecordMap> {
// Resolve from bootstrap root to bootstrap hostnames
let mut bsnames = Vec::<String>::new();
for bh in bootstrap {
@@ -202,7 +202,7 @@ impl RoutingTable {
self,
stop_token: StopToken,
bootstrap_dialinfos: Vec<DialInfo>,
) -> Result<(), String> {
) -> EyreResult<()> {
let network_manager = self.network_manager();
let mut unord = FuturesUnordered::new();
@@ -219,9 +219,7 @@ impl RoutingTable {
for pi in peer_info {
let k = pi.node_id.key;
// Register the node
let nr = self
.register_node_with_signed_node_info(k, pi.signed_node_info)
.map_err(logthru_rtab!(error "Couldn't add bootstrap node: {}", k))?;
let nr = self.register_node_with_signed_node_info(k, pi.signed_node_info)?;
// Add this our futures to process in parallel
unord.push(
@@ -238,7 +236,7 @@ impl RoutingTable {
}
#[instrument(level = "trace", skip(self), err)]
pub(super) async fn bootstrap_task_routine(self, stop_token: StopToken) -> Result<(), String> {
pub(super) async fn bootstrap_task_routine(self, stop_token: StopToken) -> EyreResult<()> {
let (bootstrap, bootstrap_nodes) = {
let c = self.config.get();
(
@@ -272,11 +270,7 @@ impl RoutingTable {
let mut bootstrap_node_dial_infos = Vec::new();
for b in bootstrap_nodes {
let ndis = NodeDialInfo::from_str(b.as_str())
.map_err(map_to_string)
.map_err(logthru_rtab!(
"Invalid node dial info in bootstrap entry: {}",
b
))?;
.wrap_err("Invalid node dial info in bootstrap entry")?;
bootstrap_node_dial_infos.push(ndis);
}
for ndi in bootstrap_node_dial_infos {
@@ -311,19 +305,17 @@ impl RoutingTable {
log_rtab!("--- bootstrapping {} with {:?}", k.encode(), &v);
// Make invalid signed node info (no signature)
let nr = self
.register_node_with_signed_node_info(
k,
SignedNodeInfo::with_no_signature(NodeInfo {
network_class: NetworkClass::InboundCapable, // Bootstraps are always inbound capable
outbound_protocols: ProtocolSet::empty(), // Bootstraps do not participate in relaying and will not make outbound requests
min_version: v.min_version, // Minimum protocol version specified in txt record
max_version: v.max_version, // Maximum protocol version specified in txt record
dial_info_detail_list: v.dial_info_details, // Dial info is as specified in the bootstrap list
relay_peer_info: None, // Bootstraps never require a relay themselves
}),
)
.map_err(logthru_rtab!(error "Couldn't add bootstrap node: {}", k))?;
let nr = self.register_node_with_signed_node_info(
k,
SignedNodeInfo::with_no_signature(NodeInfo {
network_class: NetworkClass::InboundCapable, // Bootstraps are always inbound capable
outbound_protocols: ProtocolSet::empty(), // Bootstraps do not participate in relaying and will not make outbound requests
min_version: v.min_version, // Minimum protocol version specified in txt record
max_version: v.max_version, // Maximum protocol version specified in txt record
dial_info_detail_list: v.dial_info_details, // Dial info is as specified in the bootstrap list
relay_peer_info: None, // Bootstraps never require a relay themselves
}),
)?;
// Add this our futures to process in parallel
let this = self.clone();
@@ -359,7 +351,7 @@ impl RoutingTable {
stop_token: StopToken,
_last_ts: u64,
cur_ts: u64,
) -> Result<(), String> {
) -> EyreResult<()> {
let rpc = self.rpc_processor();
let netman = self.network_manager();
let relay_node_id = netman.relay_node().map(|nr| nr.node_id());
@@ -389,7 +381,7 @@ impl RoutingTable {
pub(super) async fn peer_minimum_refresh_task_routine(
self,
stop_token: StopToken,
) -> Result<(), String> {
) -> EyreResult<()> {
// get list of all peers we know about, even the unreliable ones, and ask them to find nodes close to our node too
let noderefs = {
let inner = self.inner.read();
@@ -421,7 +413,7 @@ impl RoutingTable {
_stop_token: StopToken,
_last_ts: u64,
cur_ts: u64,
) -> Result<(), String> {
) -> EyreResult<()> {
let mut inner = self.inner.write();
let kick_queue: Vec<usize> = inner.kick_queue.iter().map(|v| *v).collect();
inner.kick_queue.clear();