clean up ui

pause routing table tasks when making routing domain changes
This commit is contained in:
Christien Rioux
2023-09-06 13:20:36 -04:00
parent 28ed99d2af
commit bfc42cdd8e
9 changed files with 122 additions and 72 deletions

View File

@@ -125,6 +125,11 @@ impl RoutingTable {
/// Ticks about once per second
/// to run tick tasks which may run at slower tick rates as configured
pub async fn tick(&self) -> EyreResult<()> {
// Don't tick if paused
if self.inner.read().tick_paused {
return Ok(());
}
// Do rolling transfers every ROLLING_TRANSFERS_INTERVAL_SECS secs
self.unlocked_inner.rolling_transfers_task.tick().await?;
@@ -168,13 +173,33 @@ impl RoutingTable {
self.unlocked_inner.relay_management_task.tick().await?;
// Run the private route management task
self.unlocked_inner
.private_route_management_task
.tick()
.await?;
// If we don't know our network class then don't do this yet
if self.has_valid_network_class(RoutingDomain::PublicInternet) {
self.unlocked_inner
.private_route_management_task
.tick()
.await?;
}
Ok(())
}
pub(crate) async fn pause_tasks(&self, paused: bool) {
let cancel = {
let mut inner = self.inner.write();
if !inner.tick_paused && paused {
inner.tick_paused = true;
true
} else if inner.tick_paused && !paused {
inner.tick_paused = false;
false
} else {
false
}
};
if cancel {
self.cancel_tasks().await;
}
}
pub(crate) async fn cancel_tasks(&self) {
// Cancel all tasks being ticked

View File

@@ -12,7 +12,7 @@ impl RoutingTable {
// Ping each node in the routing table if they need to be pinged
// to determine their reliability
#[instrument(level = "trace", skip(self), err)]
fn relay_keepalive_public_internet(
async fn relay_keepalive_public_internet(
&self,
cur_ts: Timestamp,
relay_nr: NodeRef,
@@ -41,7 +41,8 @@ impl RoutingTable {
// Say we're doing this keepalive now
self.edit_routing_domain(RoutingDomain::PublicInternet)
.set_relay_node_keepalive(Some(cur_ts))
.commit();
.commit(false)
.await;
// We need to keep-alive at one connection per ordering for relays
// but also one per NAT mapping that we need to keep open for our inbound dial info
@@ -119,7 +120,7 @@ impl RoutingTable {
// Ping each node in the routing table if they need to be pinged
// to determine their reliability
#[instrument(level = "trace", skip(self), err)]
fn ping_validator_public_internet(
async fn ping_validator_public_internet(
&self,
cur_ts: Timestamp,
unord: &mut FuturesUnordered<
@@ -136,7 +137,8 @@ impl RoutingTable {
// If this is our relay, let's check for NAT keepalives
if let Some(relay_nr) = opt_relay_nr {
self.relay_keepalive_public_internet(cur_ts, relay_nr, unord)?;
self.relay_keepalive_public_internet(cur_ts, relay_nr, unord)
.await?;
}
// Just do a single ping with the best protocol for all the other nodes to check for liveness
@@ -156,7 +158,7 @@ impl RoutingTable {
// Ping each node in the LocalNetwork routing domain if they
// need to be pinged to determine their reliability
#[instrument(level = "trace", skip(self), err)]
fn ping_validator_local_network(
async fn ping_validator_local_network(
&self,
cur_ts: Timestamp,
unord: &mut FuturesUnordered<
@@ -195,10 +197,12 @@ impl RoutingTable {
let mut unord = FuturesUnordered::new();
// PublicInternet
self.ping_validator_public_internet(cur_ts, &mut unord)?;
self.ping_validator_public_internet(cur_ts, &mut unord)
.await?;
// LocalNetwork
self.ping_validator_local_network(cur_ts, &mut unord)?;
self.ping_validator_local_network(cur_ts, &mut unord)
.await?;
// Wait for ping futures to complete in parallel
while let Ok(Some(_)) = unord.next().timeout_at(stop_token.clone()).await {}

View File

@@ -169,11 +169,6 @@ impl RoutingTable {
_last_ts: Timestamp,
cur_ts: Timestamp,
) -> EyreResult<()> {
// If we don't know our network class then don't do this yet
if !self.has_valid_network_class(RoutingDomain::PublicInternet) {
return Ok(());
}
// Test locally allocated routes first
// This may remove dead routes
let routes_needing_testing = self.get_allocated_routes_to_test(cur_ts);

View File

@@ -23,13 +23,13 @@ impl RoutingTable {
let state = relay_node.state(cur_ts);
// Relay node is dead or no longer needed
if matches!(state, BucketEntryState::Dead) {
info!("Relay node died, dropping relay {}", relay_node);
debug!("Relay node died, dropping relay {}", relay_node);
editor.clear_relay_node();
false
}
// Relay node no longer can relay
else if relay_node.operate(|_rti, e| !relay_node_filter(e)) {
info!(
debug!(
"Relay node can no longer relay, dropping relay {}",
relay_node
);
@@ -38,7 +38,7 @@ impl RoutingTable {
}
// Relay node is no longer required
else if !own_node_info.requires_relay() {
info!(
debug!(
"Relay node no longer required, dropping relay {}",
relay_node
);
@@ -47,7 +47,7 @@ impl RoutingTable {
}
// Should not have relay for invalid network class
else if !self.has_valid_network_class(RoutingDomain::PublicInternet) {
info!(
debug!(
"Invalid network class does not get a relay, dropping relay {}",
relay_node
);
@@ -75,7 +75,7 @@ impl RoutingTable {
false,
) {
Ok(nr) => {
info!("Outbound relay node selected: {}", nr);
debug!("Outbound relay node selected: {}", nr);
editor.set_relay_node(nr);
got_outbound_relay = true;
}
@@ -84,20 +84,20 @@ impl RoutingTable {
}
}
} else {
info!("Outbound relay desired but not available");
debug!("Outbound relay desired but not available");
}
}
if !got_outbound_relay {
// Find a node in our routing table that is an acceptable inbound relay
if let Some(nr) = self.find_inbound_relay(RoutingDomain::PublicInternet, cur_ts) {
info!("Inbound relay node selected: {}", nr);
debug!("Inbound relay node selected: {}", nr);
editor.set_relay_node(nr);
}
}
}
// Commit the changes
editor.commit();
editor.commit(false).await;
Ok(())
}