From 27f7f49d4f7c166e46eb2f09d109f3437e8cc05a Mon Sep 17 00:00:00 2001 From: John Smith Date: Mon, 21 Nov 2022 22:50:42 -0500 Subject: [PATCH] checkpoint --- doc/config/sample.config | 2 +- doc/config/veilid-server-config.md | 2 +- veilid-core/src/routing_table/bucket_entry.rs | 5 ++- .../src/routing_table/route_spec_store.rs | 13 +++---- .../routing_table/routing_domain_editor.rs | 11 ++++++ veilid-core/src/rpc_processor/mod.rs | 36 +++++++++++++++++-- .../src/tests/common/test_veilid_config.rs | 4 +-- veilid-core/src/xx/tick_task.rs | 3 +- veilid-flutter/example/lib/config.dart | 2 +- veilid-server/src/settings.rs | 4 +-- veilid-wasm/tests/web.rs | 2 +- 11 files changed, 64 insertions(+), 20 deletions(-) diff --git a/doc/config/sample.config b/doc/config/sample.config index 88665c76..c7e65f0f 100644 --- a/doc/config/sample.config +++ b/doc/config/sample.config @@ -64,7 +64,7 @@ core: max_timestamp_ahead_ms: 10000 timeout_ms: 10000 max_route_hop_count: 4 - default_route_hop_count: 2 + default_route_hop_count: 1 dht: resolve_node_timeout: diff --git a/doc/config/veilid-server-config.md b/doc/config/veilid-server-config.md index b7968693..83203e3b 100644 --- a/doc/config/veilid-server-config.md +++ b/doc/config/veilid-server-config.md @@ -229,7 +229,7 @@ rpc: max_timestamp_ahead_ms: 10000 timeout_ms: 10000 max_route_hop_count: 4 - default_route_hop_count: 2 + default_route_hop_count: 1 ``` #### core:network:dht diff --git a/veilid-core/src/routing_table/bucket_entry.rs b/veilid-core/src/routing_table/bucket_entry.rs index 2ee36d1c..c4cf88c6 100644 --- a/veilid-core/src/routing_table/bucket_entry.rs +++ b/veilid-core/src/routing_table/bucket_entry.rs @@ -85,8 +85,11 @@ pub struct BucketEntryInner { /// The minimum and maximum range of cryptography versions supported by the node, /// inclusive of the requirements of any relay the node may be using min_max_version: Option, - /// Whether or not we have updated this peer with our node info since our network + /// If this node has updated it's SignedNodeInfo since our network /// and dial info has last changed, for example when our IP address changes + /// Used to determine if we should make this entry 'live' again when we receive a signednodeinfo update that + /// has the same timestamp, because if we change our own IP address or network class it may be possible for nodes that were + /// unreachable may now be reachable with the same SignedNodeInfo/DialInfo updated_since_last_network_change: bool, /// The last connection descriptors used to contact this node, per protocol type #[with(Skip)] diff --git a/veilid-core/src/routing_table/route_spec_store.rs b/veilid-core/src/routing_table/route_spec_store.rs index bf0fc2db..9bb8d50d 100644 --- a/veilid-core/src/routing_table/route_spec_store.rs +++ b/veilid-core/src/routing_table/route_spec_store.rs @@ -1268,35 +1268,32 @@ impl RouteSpecStore { .unwrap_or_default() } - /// Mark a remote private route as having seen our node info { - pub fn mark_remote_private_route_seen_our_node_info(&self, key: &DHTKey) { + /// Mark a remote private route as having seen our node info + pub fn mark_remote_private_route_seen_our_node_info(&self, key: &DHTKey, cur_ts: u64) { let inner = &mut *self.inner.lock(); - let cur_ts = intf::get_timestamp(); Self::with_create_remote_private_route(inner, cur_ts, key, |rpr| { rpr.seen_our_node_info = true; }) } /// Mark a remote private route as having replied to a question { - pub fn mark_remote_private_route_replied(&self, key: &DHTKey) { + pub fn mark_remote_private_route_replied(&self, key: &DHTKey, cur_ts: u64) { let inner = &mut *self.inner.lock(); - let cur_ts = intf::get_timestamp(); Self::with_create_remote_private_route(inner, cur_ts, key, |rpr| { rpr.last_replied_ts = Some(cur_ts); }) } /// Mark a remote private route as having beed used { - pub fn mark_remote_private_route_used(&self, key: &DHTKey) { + pub fn mark_remote_private_route_used(&self, key: &DHTKey, cur_ts: u64) { let inner = &mut *self.inner.lock(); - let cur_ts = intf::get_timestamp(); Self::with_create_remote_private_route(inner, cur_ts, key, |rpr| { rpr.last_used_ts = Some(cur_ts); }) } /// Clear caches when local our local node info changes - pub fn local_node_info_changed(&self) { + pub fn reset(&self) { let inner = &mut *self.inner.lock(); // Clean up local allocated routes diff --git a/veilid-core/src/routing_table/routing_domain_editor.rs b/veilid-core/src/routing_table/routing_domain_editor.rs index b9150196..08aba320 100644 --- a/veilid-core/src/routing_table/routing_domain_editor.rs +++ b/veilid-core/src/routing_table/routing_domain_editor.rs @@ -194,14 +194,25 @@ impl RoutingDomainEditor { } } if changed { + // Clear our 'peer info' cache, the peerinfo for this routing domain will get regenerated next time it is asked for detail.common_mut().clear_cache() } }); if changed { + // Mark that nothing in the routing table has seen our new node info inner.reset_all_seen_our_node_info(self.routing_domain); + // inner.reset_all_updated_since_last_network_change(); } } + // Clear the routespecstore cache if our PublicInternet dial info has changed + if changed { + if self.routing_domain == RoutingDomain::PublicInternet { + let rss = self.routing_table.route_spec_store(); + rss.reset(); + } + } + // Send our updated node info to all the nodes in the routing table if changed && self.send_node_info_updates { let network_manager = self.routing_table.unlocked_inner.network_manager.clone(); network_manager diff --git a/veilid-core/src/rpc_processor/mod.rs b/veilid-core/src/rpc_processor/mod.rs index 9119719e..c661dec7 100644 --- a/veilid-core/src/rpc_processor/mod.rs +++ b/veilid-core/src/rpc_processor/mod.rs @@ -162,6 +162,7 @@ where #[derive(Debug)] struct WaitableReply { + dest: Destination, handle: OperationWaitHandle, timeout: u64, node_ref: NodeRef, @@ -441,7 +442,27 @@ impl RPCProcessor { waitable_reply.send_ts, recv_ts, rpcreader.header.body_len, - ) + ); + // Process private route replies + if let Destination::PrivateRoute { + private_route, + safety_selection, + } = &waitable_reply.dest + { + let rss = self.routing_table.route_spec_store(); + + // If we received a reply from a private route, mark it as such + rss.mark_remote_private_route_replied(&private_route.public_key, recv_ts); + + // If we sent to a private route without a safety route + // We need to mark our own node info as having been seen so we can optimize sending it + if let SafetySelection::Unsafe(_) = safety_selection { + rss.mark_remote_private_route_seen_our_node_info( + &private_route.public_key, + recv_ts, + ); + } + } } }; @@ -702,7 +723,7 @@ impl RPCProcessor { node_id, node_ref, hop_count, - } = network_result_try!(self.render_operation(dest, &operation)?); + } = network_result_try!(self.render_operation(dest.clone(), &operation)?); // Calculate answer timeout // Timeout is number of hops times the timeout per hop @@ -733,8 +754,19 @@ impl RPCProcessor { // Successfully sent node_ref.stats_question_sent(send_ts, bytes, true); + // Private route stats + if let Destination::PrivateRoute { + private_route, + safety_selection: _, + } = &dest + { + let rss = self.routing_table.route_spec_store(); + rss.mark_remote_private_route_used(&private_route.public_key, intf::get_timestamp()); + } + // Pass back waitable reply completion Ok(NetworkResult::value(WaitableReply { + dest, handle, timeout, node_ref, diff --git a/veilid-core/src/tests/common/test_veilid_config.rs b/veilid-core/src/tests/common/test_veilid_config.rs index 4e56d4e7..e73e79a1 100644 --- a/veilid-core/src/tests/common/test_veilid_config.rs +++ b/veilid-core/src/tests/common/test_veilid_config.rs @@ -207,7 +207,7 @@ fn config_callback(key: String) -> ConfigCallbackReturn { "network.rpc.max_timestamp_ahead_ms" => Ok(Box::new(Some(10_000u32))), "network.rpc.timeout_ms" => Ok(Box::new(10_000u32)), "network.rpc.max_route_hop_count" => Ok(Box::new(4u8)), - "network.rpc.default_route_hop_count" => Ok(Box::new(2u8)), + "network.rpc.default_route_hop_count" => Ok(Box::new(1u8)), "network.dht.resolve_node_timeout_ms" => Ok(Box::new(Option::::None)), "network.dht.resolve_node_count" => Ok(Box::new(20u32)), "network.dht.resolve_node_fanout" => Ok(Box::new(3u32)), @@ -325,7 +325,7 @@ pub async fn test_config() { assert_eq!(inner.network.rpc.queue_size, 1024u32); assert_eq!(inner.network.rpc.timeout_ms, 10_000u32); assert_eq!(inner.network.rpc.max_route_hop_count, 4u8); - assert_eq!(inner.network.rpc.default_route_hop_count, 2u8); + assert_eq!(inner.network.rpc.default_route_hop_count, 1u8); assert_eq!(inner.network.routing_table.limit_over_attached, 64u32); assert_eq!(inner.network.routing_table.limit_fully_attached, 32u32); assert_eq!(inner.network.routing_table.limit_attached_strong, 16u32); diff --git a/veilid-core/src/xx/tick_task.rs b/veilid-core/src/xx/tick_task.rs index 56ea442e..2d5d1e70 100644 --- a/veilid-core/src/xx/tick_task.rs +++ b/veilid-core/src/xx/tick_task.rs @@ -83,7 +83,8 @@ impl TickTask { let now = intf::get_timestamp(); let last_timestamp_us = self.last_timestamp_us.load(Ordering::Acquire); - if last_timestamp_us != 0u64 && (now - last_timestamp_us) < self.tick_period_us { + if last_timestamp_us != 0u64 && now.saturating_sub(last_timestamp_us) < self.tick_period_us + { // It's not time yet return Ok(()); } diff --git a/veilid-flutter/example/lib/config.dart b/veilid-flutter/example/lib/config.dart index 9eb209c3..aaeea802 100644 --- a/veilid-flutter/example/lib/config.dart +++ b/veilid-flutter/example/lib/config.dart @@ -66,7 +66,7 @@ Future getDefaultVeilidConfig() async { maxTimestampAheadMs: 10000, timeoutMs: 10000, maxRouteHopCount: 4, - defaultRouteHopCount: 2, + defaultRouteHopCount: 1, ), dht: VeilidConfigDHT( resolveNodeTimeoutMs: null, diff --git a/veilid-server/src/settings.rs b/veilid-server/src/settings.rs index 4c6b4135..203076cd 100644 --- a/veilid-server/src/settings.rs +++ b/veilid-server/src/settings.rs @@ -84,7 +84,7 @@ core: max_timestamp_ahead_ms: 10000 timeout_ms: 10000 max_route_hop_count: 4 - default_route_hop_count: 2 + default_route_hop_count: 1 dht: resolve_node_timeout: resolve_node_count: 20 @@ -1510,7 +1510,7 @@ mod tests { assert_eq!(s.core.network.rpc.max_timestamp_ahead_ms, Some(10_000u32)); assert_eq!(s.core.network.rpc.timeout_ms, 10_000u32); assert_eq!(s.core.network.rpc.max_route_hop_count, 4); - assert_eq!(s.core.network.rpc.default_route_hop_count, 2); + assert_eq!(s.core.network.rpc.default_route_hop_count, 1); // assert_eq!(s.core.network.dht.resolve_node_timeout_ms, None); assert_eq!(s.core.network.dht.resolve_node_count, 20u32); diff --git a/veilid-wasm/tests/web.rs b/veilid-wasm/tests/web.rs index 230f0609..8856b033 100644 --- a/veilid-wasm/tests/web.rs +++ b/veilid-wasm/tests/web.rs @@ -46,7 +46,7 @@ fn init_callbacks() { case "network.rpc.max_timestamp_ahead": return 10000000; case "network.rpc.timeout": return 10000000; case "network.rpc.max_route_hop_count": return 4; - case "network.rpc.default_route_hop_count": return 2; + case "network.rpc.default_route_hop_count": return 1; case "network.dht.resolve_node_timeout": return null; case "network.dht.resolve_node_count": return 20; case "network.dht.resolve_node_fanout": return 3;