veilid/veilid-core/src/network_manager.rs

975 lines
36 KiB
Rust
Raw Normal View History

2021-11-22 16:28:30 +00:00
use crate::*;
use connection_manager::*;
2021-11-22 16:28:30 +00:00
use dht::*;
2022-03-20 14:52:03 +00:00
use hashlink::LruCache;
2021-11-22 16:28:30 +00:00
use intf::*;
use receipt_manager::*;
use routing_table::*;
use rpc_processor::RPCProcessor;
use xx::*;
////////////////////////////////////////////////////////////////////////////////////////
2022-04-07 13:55:09 +00:00
pub const RELAY_MANAGEMENT_INTERVAL_SECS: u32 = 1;
2021-11-22 16:28:30 +00:00
pub const MAX_MESSAGE_SIZE: usize = MAX_ENVELOPE_SIZE;
2022-03-19 22:19:40 +00:00
pub const IPADDR_TABLE_SIZE: usize = 1024;
pub const IPADDR_MAX_INACTIVE_DURATION_US: u64 = 300_000_000u64; // 5 minutes
2021-11-22 16:28:30 +00:00
2021-12-22 03:20:55 +00:00
#[derive(Copy, Clone, Debug, Default)]
pub struct ProtocolConfig {
2022-04-16 15:18:54 +00:00
pub outbound: ProtocolSet,
pub inbound: ProtocolSet,
2021-12-24 01:34:52 +00:00
}
2021-11-22 16:28:30 +00:00
// Things we get when we start up and go away when we shut down
// Routing table is not in here because we want it to survive a network shutdown/startup restart
#[derive(Clone)]
struct NetworkComponents {
net: Network,
connection_manager: ConnectionManager,
2021-11-22 16:28:30 +00:00
rpc_processor: RPCProcessor,
receipt_manager: ReceiptManager,
}
2022-03-19 22:19:40 +00:00
// Statistics per address
#[derive(Clone, Default)]
pub struct PerAddressStats {
last_seen_ts: u64,
transfer_stats_accounting: TransferStatsAccounting,
transfer_stats: TransferStatsDownUp,
}
2022-03-20 14:52:03 +00:00
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
pub struct PerAddressStatsKey(IpAddr);
impl Default for PerAddressStatsKey {
fn default() -> Self {
Self(IpAddr::V4(Ipv4Addr::UNSPECIFIED))
}
}
2022-03-19 22:19:40 +00:00
// Statistics about the low-level network
2022-03-20 14:52:03 +00:00
#[derive(Clone)]
2022-03-19 22:19:40 +00:00
pub struct NetworkManagerStats {
self_stats: PerAddressStats,
2022-03-20 14:52:03 +00:00
per_address_stats: LruCache<PerAddressStatsKey, PerAddressStats>,
2022-03-19 22:19:40 +00:00
}
2022-03-20 14:52:03 +00:00
impl Default for NetworkManagerStats {
fn default() -> Self {
Self {
self_stats: PerAddressStats::default(),
per_address_stats: LruCache::new(IPADDR_TABLE_SIZE),
}
}
}
struct ClientWhitelistEntry {
last_seen: u64,
}
2022-04-16 15:18:54 +00:00
// Mechanism required to contact another node
enum InboundMethod {
Direct, // Contact the node directly
SignalReverse, // Request via signal the node connect back directly
SignalHolePunch, // Request via signal the node negotiate a hole punch
Relay, // Must use a third party relay to reach the node
}
2021-11-22 16:28:30 +00:00
// The mutable state of the network manager
2022-03-19 22:19:40 +00:00
struct NetworkManagerInner {
2021-11-22 16:28:30 +00:00
routing_table: Option<RoutingTable>,
components: Option<NetworkComponents>,
network_class: Option<NetworkClass>,
2022-03-19 22:19:40 +00:00
stats: NetworkManagerStats,
client_whitelist: LruCache<key::DHTKey, ClientWhitelistEntry>,
2022-04-07 13:55:09 +00:00
relay_node: Option<NodeRef>,
2022-03-19 22:19:40 +00:00
}
struct NetworkManagerUnlockedInner {
// Background processes
rolling_transfers_task: TickTask,
2022-04-07 13:55:09 +00:00
relay_management_task: TickTask,
2021-11-22 16:28:30 +00:00
}
#[derive(Clone)]
pub struct NetworkManager {
config: VeilidConfig,
table_store: TableStore,
crypto: Crypto,
inner: Arc<Mutex<NetworkManagerInner>>,
2022-03-19 22:19:40 +00:00
unlocked_inner: Arc<NetworkManagerUnlockedInner>,
2021-11-22 16:28:30 +00:00
}
impl NetworkManager {
fn new_inner() -> NetworkManagerInner {
NetworkManagerInner {
2021-11-22 16:28:30 +00:00
routing_table: None,
components: None,
network_class: None,
2022-03-19 22:19:40 +00:00
stats: NetworkManagerStats::default(),
client_whitelist: LruCache::new_unbounded(),
2022-04-07 13:55:09 +00:00
relay_node: None,
2022-03-19 22:19:40 +00:00
}
}
fn new_unlocked_inner(_config: VeilidConfig) -> NetworkManagerUnlockedInner {
//let c = config.get();
NetworkManagerUnlockedInner {
rolling_transfers_task: TickTask::new(ROLLING_TRANSFERS_INTERVAL_SECS),
2022-04-07 13:55:09 +00:00
relay_management_task: TickTask::new(RELAY_MANAGEMENT_INTERVAL_SECS),
}
2021-11-22 16:28:30 +00:00
}
pub fn new(config: VeilidConfig, table_store: TableStore, crypto: Crypto) -> Self {
2022-03-19 22:19:40 +00:00
let this = Self {
config: config.clone(),
table_store,
crypto,
2021-11-22 16:28:30 +00:00
inner: Arc::new(Mutex::new(Self::new_inner())),
2022-03-19 22:19:40 +00:00
unlocked_inner: Arc::new(Self::new_unlocked_inner(config)),
};
// Set rolling transfers tick task
{
let this2 = this.clone();
this.unlocked_inner
.rolling_transfers_task
.set_routine(move |l, t| {
Box::pin(this2.clone().rolling_transfers_task_routine(l, t))
});
2021-11-22 16:28:30 +00:00
}
2022-04-07 13:55:09 +00:00
// Set relay management tick task
{
let this2 = this.clone();
this.unlocked_inner
.relay_management_task
.set_routine(move |l, t| {
Box::pin(this2.clone().relay_management_task_routine(l, t))
});
}
2022-03-19 22:19:40 +00:00
this
2021-11-22 16:28:30 +00:00
}
pub fn config(&self) -> VeilidConfig {
self.config.clone()
}
pub fn table_store(&self) -> TableStore {
self.table_store.clone()
}
pub fn crypto(&self) -> Crypto {
self.crypto.clone()
}
pub fn routing_table(&self) -> RoutingTable {
self.inner.lock().routing_table.as_ref().unwrap().clone()
}
pub fn net(&self) -> Network {
self.inner.lock().components.as_ref().unwrap().net.clone()
}
pub fn rpc_processor(&self) -> RPCProcessor {
self.inner
.lock()
.components
.as_ref()
.unwrap()
.rpc_processor
.clone()
}
pub fn receipt_manager(&self) -> ReceiptManager {
self.inner
.lock()
.components
.as_ref()
.unwrap()
.receipt_manager
.clone()
}
pub fn connection_manager(&self) -> ConnectionManager {
2021-11-22 16:28:30 +00:00
self.inner
.lock()
.components
.as_ref()
.unwrap()
.connection_manager
2021-11-22 16:28:30 +00:00
.clone()
}
2022-04-07 13:55:09 +00:00
pub fn relay_node(&self) -> Option<NodeRef> {
self.inner.lock().relay_node.clone()
}
2021-11-22 16:28:30 +00:00
pub async fn init(&self) -> Result<(), String> {
let routing_table = RoutingTable::new(self.clone());
routing_table.init().await?;
self.inner.lock().routing_table = Some(routing_table.clone());
Ok(())
}
pub async fn terminate(&self) {
2022-02-07 02:18:42 +00:00
let routing_table = {
let mut inner = self.inner.lock();
inner.routing_table.take()
};
if let Some(routing_table) = routing_table {
2021-11-22 16:28:30 +00:00
routing_table.terminate().await;
}
}
pub async fn internal_startup(&self) -> Result<(), String> {
trace!("NetworkManager::internal_startup begin");
if self.inner.lock().components.is_some() {
debug!("NetworkManager::internal_startup already started");
return Ok(());
}
// Create network components
let net = Network::new(self.clone());
let connection_manager = ConnectionManager::new(self.clone());
2021-11-22 16:28:30 +00:00
let rpc_processor = RPCProcessor::new(self.clone());
let receipt_manager = ReceiptManager::new(self.clone());
self.inner.lock().components = Some(NetworkComponents {
net: net.clone(),
connection_manager: connection_manager.clone(),
2021-11-22 16:28:30 +00:00
rpc_processor: rpc_processor.clone(),
receipt_manager: receipt_manager.clone(),
});
// Start network components
rpc_processor.startup().await?;
receipt_manager.startup().await?;
net.startup().await?;
connection_manager.startup().await;
2021-11-22 16:28:30 +00:00
trace!("NetworkManager::internal_startup end");
Ok(())
}
pub async fn startup(&self) -> Result<(), String> {
if let Err(e) = self.internal_startup().await {
self.shutdown().await;
return Err(e);
}
Ok(())
}
pub async fn shutdown(&self) {
trace!("NetworkManager::shutdown begin");
// Shutdown network components if they started up
let components = self.inner.lock().components.clone();
2021-11-22 16:28:30 +00:00
if let Some(components) = components {
components.connection_manager.shutdown().await;
2021-11-22 16:28:30 +00:00
components.net.shutdown().await;
components.receipt_manager.shutdown().await;
components.rpc_processor.shutdown().await;
}
// reset the state
let mut inner = self.inner.lock();
inner.components = None;
inner.network_class = None;
2021-11-22 16:28:30 +00:00
trace!("NetworkManager::shutdown end");
}
pub fn update_client_whitelist(&self, client: key::DHTKey) {
let mut inner = self.inner.lock();
match inner.client_whitelist.entry(client) {
hashlink::lru_cache::Entry::Occupied(mut entry) => {
entry.get_mut().last_seen = intf::get_timestamp()
}
hashlink::lru_cache::Entry::Vacant(entry) => {
entry.insert(ClientWhitelistEntry {
last_seen: intf::get_timestamp(),
});
}
}
}
pub fn check_client_whitelist(&self, client: key::DHTKey) -> bool {
let mut inner = self.inner.lock();
match inner.client_whitelist.entry(client) {
hashlink::lru_cache::Entry::Occupied(mut entry) => {
entry.get_mut().last_seen = intf::get_timestamp();
true
}
hashlink::lru_cache::Entry::Vacant(_) => false,
}
}
pub fn purge_client_whitelist(&self) {
let timeout_ms = self.config.get().network.client_whitelist_timeout_ms;
let mut inner = self.inner.lock();
let cutoff_timestamp = intf::get_timestamp() - ((timeout_ms as u64) * 1000u64);
// Remove clients from the whitelist that haven't been since since our whitelist timeout
while inner
.client_whitelist
.peek_lru()
.map(|v| v.1.last_seen < cutoff_timestamp)
.unwrap_or_default()
{
inner.client_whitelist.remove_lru();
}
}
2021-11-22 16:28:30 +00:00
pub async fn tick(&self) -> Result<(), String> {
let (routing_table, net, receipt_manager) = {
2021-11-22 16:28:30 +00:00
let inner = self.inner.lock();
let components = inner.components.as_ref().unwrap();
(
inner.routing_table.as_ref().unwrap().clone(),
2021-11-22 16:28:30 +00:00
components.net.clone(),
components.receipt_manager.clone(),
)
};
// If the network needs to be reset, do it
// if things can't restart, then we fail out of the attachment manager
if net.needs_restart() {
net.shutdown().await;
net.startup().await?;
}
// Run the routing table tick
routing_table.tick().await?;
2021-11-22 16:28:30 +00:00
// Run the low level network tick
net.tick().await?;
// Run the receipt manager tick
receipt_manager.tick().await?;
// Purge the client whitelist
self.purge_client_whitelist();
2021-11-22 16:28:30 +00:00
Ok(())
}
// Return what network class we are in
2021-12-22 03:20:55 +00:00
pub fn get_network_class(&self) -> Option<NetworkClass> {
2021-11-22 16:28:30 +00:00
if let Some(components) = &self.inner.lock().components {
components.net.get_network_class()
} else {
2021-12-22 03:20:55 +00:00
None
}
}
// Get our node's capabilities
2022-04-08 14:17:09 +00:00
pub fn generate_node_status(&self) -> NodeStatus {
let network_class = self.get_network_class().unwrap_or(NetworkClass::Invalid);
2022-04-07 13:55:09 +00:00
let will_route = network_class.can_inbound_relay(); // xxx: eventually this may have more criteria added
let will_tunnel = network_class.can_inbound_relay(); // xxx: we may want to restrict by battery life and network bandwidth at some point
let will_signal = network_class.can_signal();
2022-04-07 13:55:09 +00:00
let will_relay = network_class.can_inbound_relay();
let will_validate_dial_info = network_class.can_validate_dial_info();
2022-04-08 14:17:09 +00:00
NodeStatus {
will_route,
will_tunnel,
will_signal,
will_relay,
will_validate_dial_info,
}
}
2021-12-22 03:20:55 +00:00
// Return what protocols we have enabled
pub fn get_protocol_config(&self) -> Option<ProtocolConfig> {
if let Some(components) = &self.inner.lock().components {
components.net.get_protocol_config()
} else {
None
2021-11-22 16:28:30 +00:00
}
}
// Generates an out-of-band receipt
pub fn generate_receipt<D: AsRef<[u8]>>(
&self,
expiration_us: u64,
expected_returns: u32,
extra_data: D,
callback: impl ReceiptCallback,
) -> Result<Vec<u8>, String> {
let receipt_manager = self.receipt_manager();
let routing_table = self.routing_table();
// Generate receipt and serialized form to return
let nonce = Crypto::get_random_nonce();
let receipt = Receipt::try_new(0, nonce, routing_table.node_id(), extra_data)?;
let out = receipt
.to_signed_data(&routing_table.node_id_secret())
.map_err(|_| "failed to generate signed receipt".to_owned())?;
// Record the receipt for later
let exp_ts = intf::get_timestamp() + expiration_us;
receipt_manager.record_receipt(receipt, exp_ts, expected_returns, callback);
Ok(out)
}
pub fn generate_single_shot_receipt<D: AsRef<[u8]>>(
&self,
expiration_us: u64,
extra_data: D,
) -> Result<(Vec<u8>, EventualValueCloneFuture<ReceiptEvent>), String> {
let receipt_manager = self.receipt_manager();
let routing_table = self.routing_table();
// Generate receipt and serialized form to return
let nonce = Crypto::get_random_nonce();
let receipt = Receipt::try_new(0, nonce, routing_table.node_id(), extra_data)?;
let out = receipt
.to_signed_data(&routing_table.node_id_secret())
.map_err(|_| "failed to generate signed receipt".to_owned())?;
// Record the receipt for later
let exp_ts = intf::get_timestamp() + expiration_us;
2021-11-27 17:44:21 +00:00
let eventual = SingleShotEventual::new(ReceiptEvent::Cancelled);
2021-11-22 16:28:30 +00:00
let instance = eventual.instance();
receipt_manager.record_single_shot_receipt(receipt, exp_ts, eventual);
Ok((out, instance))
}
// Process a received out-of-band receipt
pub async fn process_receipt<R: AsRef<[u8]>>(&self, receipt_data: R) -> Result<(), String> {
let receipt_manager = self.receipt_manager();
let receipt = Receipt::from_signed_data(receipt_data.as_ref())
.map_err(|_| "failed to parse signed receipt".to_owned())?;
receipt_manager.handle_receipt(receipt).await
}
// Builds an envelope for sending over the network
fn build_envelope<B: AsRef<[u8]>>(
&self,
dest_node_id: key::DHTKey,
version: u8,
body: B,
) -> Result<Vec<u8>, String> {
// DH to get encryption key
let routing_table = self.routing_table();
let node_id = routing_table.node_id();
let node_id_secret = routing_table.node_id_secret();
// Get timestamp, nonce
let ts = intf::get_timestamp();
let nonce = Crypto::get_random_nonce();
// Encode envelope
let envelope = Envelope::new(version, ts, nonce, node_id, dest_node_id);
envelope
.to_encrypted_data(self.crypto.clone(), body.as_ref(), &node_id_secret)
.map_err(|_| "envelope failed to encode".to_owned())
}
// Called by the RPC handler when we want to issue an RPC request or response
2022-04-16 15:18:54 +00:00
// node_ref is the direct destination to which the envelope will be sent
// If 'node_id' is specified, it can be different than node_ref.node_id()
// which will cause the envelope to be relayed
2021-11-22 16:28:30 +00:00
pub async fn send_envelope<B: AsRef<[u8]>>(
&self,
node_ref: NodeRef,
2022-04-16 15:18:54 +00:00
node_id: Option<DHTKey>,
2021-11-22 16:28:30 +00:00
body: B,
) -> Result<(), String> {
2022-04-16 15:18:54 +00:00
if let Some(node_id) = node_id {
log_net!("sending envelope to {:?} via {:?}", node_id, node_ref);
} else {
log_net!("sending envelope to {:?}", node_ref);
}
2021-11-22 16:28:30 +00:00
// Get node's min/max version and see if we can send to it
// and if so, get the max version we can use
let version = if let Some((node_min, node_max)) = node_ref.operate(|e| e.min_max_version())
{
2021-11-27 17:44:21 +00:00
#[allow(clippy::absurd_extreme_comparisons)]
2021-11-22 16:28:30 +00:00
if node_min > MAX_VERSION || node_max < MIN_VERSION {
return Err(format!(
"can't talk to this node {} because version is unsupported: ({},{})",
node_ref.node_id(),
node_min,
node_max
2022-01-05 17:01:02 +00:00
))
.map_err(logthru_rpc!(warn));
2021-11-22 16:28:30 +00:00
}
cmp::min(node_max, MAX_VERSION)
} else {
MAX_VERSION
};
// Build the envelope to send
2022-01-05 17:01:02 +00:00
let out = self
2022-04-16 15:18:54 +00:00
.build_envelope(node_id.unwrap_or_else(|| node_ref.node_id()), version, body)
2022-01-05 17:01:02 +00:00
.map_err(logthru_rpc!(error))?;
2021-11-22 16:28:30 +00:00
2022-04-16 15:18:54 +00:00
// Send the envelope via whatever means necessary
self.send_data(node_ref, out).await
2021-11-22 16:28:30 +00:00
}
// Called by the RPC handler when we want to issue an direct receipt
pub async fn send_direct_receipt<B: AsRef<[u8]>>(
&self,
2022-01-04 19:25:32 +00:00
dial_info: DialInfo,
2021-11-22 16:28:30 +00:00
rcpt_data: B,
alternate_port: bool,
) -> Result<(), String> {
// Validate receipt before we send it, otherwise this may be arbitrary data!
let _ = Receipt::from_signed_data(rcpt_data.as_ref())
.map_err(|_| "failed to validate direct receipt".to_owned())?;
// Send receipt directly
if alternate_port {
self.net()
.send_data_unbound_to_dial_info(dial_info, rcpt_data.as_ref().to_vec())
2021-11-22 16:28:30 +00:00
.await
} else {
self.net()
.send_data_to_dial_info(dial_info, rcpt_data.as_ref().to_vec())
2021-11-22 16:28:30 +00:00
.await
}
}
2022-04-16 15:18:54 +00:00
// Figure out how to reach a node
// Node info here must be the filtered kind, with only
fn get_inbound_method(&self, node_info: &NodeInfo) -> Result<InboundMethod, String> {
// Get our network class
let network_class = self.get_network_class().unwrap_or(NetworkClass::Invalid);
// If we don't have a network class yet (no public dial info or haven't finished detection)
// then we just need to try to send to the best direct dial info because we won't
// know how to use relays effectively yet
if matches!(network_class, NetworkClass::Invalid) {
return Ok(InboundMethod::Direct);
}
// Get the protocol of the best matching direct dial info
let protocol_type = node_info.dial_info_list.first().map(|d| d.protocol_type());
// Can the target node do inbound?
if node_info.network_class.inbound_capable() {
// Do we need to signal before going inbound?
if node_info.network_class.inbound_requires_signal() {
// Can we receive a direct reverse connection?
if network_class.inbound_capable() && !network_class.inbound_requires_signal() {
Ok(InboundMethod::SignalReverse)
}
// Is this a hole-punch capable protocol?
else if protocol_type == Some(ProtocolType::UDP) {
Ok(InboundMethod::SignalHolePunch)
}
// Otherwise we have to relay
else {
Ok(InboundMethod::Relay)
}
}
// Can go direct
else {
Ok(InboundMethod::Direct)
}
// If the other node is not inbound capable at all, it requires a relay
} else {
Ok(InboundMethod::Relay)
}
}
// Send a reverse connection signal and wait for the return receipt over it
// Then send the data across the new connection
pub async fn do_reverse_connect(
&self,
best_node_info: &NodeInfo,
data: Vec<u8>,
) -> Result<(), String> {
// Get relay to signal from
let relay_nr = if let Some(rpi) = best_node_info.relay_peer_info {
// Get the noderef for this inbound relay
self.routing_table().register_node_with_node_info(rpi.node_id.key, rpi.node_info)?;
} else {
// If we don't have a relay dial info that matches our protocol configuration
// then we can't send to this node!
return Err("Can't send to this relay".to_owned())
}
// Get the receipt timeout
let receipt_time = ms_to_us(self.config.get().network.reverse_connection_receipt_time_ms);
// Build a return receipt for the signal
let (rcpt_data, eventual_value) = self
.generate_single_shot_receipt(receipt_time, [])
.map_err(map_error_string!())?;
// Issue the signal
let rpc = self.rpc_processor();
rpc.rpc_call_signal(dest, )
// Wait for the return receipt
match eventual_value.await {
ReceiptEvent::Returned => (),
ReceiptEvent::Expired => {
return Err("receipt was dropped before expiration".to_owned());
}
ReceiptEvent::Cancelled => {
return Err("receipt was dropped before expiration".to_owned());
}
};
// And now use the existing connection to send over
if let Some(descriptor) = node_ref.last_connection() {
match self
.net()
.send_data_to_existing_connection(descriptor, data)
.await
.map_err(logthru_net!())?
{
None => {
return Ok(());
}
Some(d) => d,
}
}
Ok(())
}
// Send a hole punch signal and do a negotiating ping and wait for the return receipt
// Then send the data across the new connection
pub async fn do_hole_punch(&self, best_node_info: &NodeInfo, data: Vec<u8>) -> Result<(), String> {
if let Some(relay_dial_info) = node_info.relay_dial_info_list.first() {
self.net()
.do_hole_punch(relay_dial_info.clone(), data)
.await
.map_err(logthru_net!())
} else {
// If we don't have a relay dial info that matches our protocol configuration
// then we can't send to this node!
Err("Can't send to this node yet".to_owned())
}
}
// Send raw data to a node
//
// We may not have dial info for a node, but have an existing connection for it
// because an inbound connection happened first, and no FindNodeQ has happened to that
// node yet to discover its dial info. The existing connection should be tried first
// in this case.
//
// Sending to a node requires determining a NetworkClass compatible mechanism
//
pub fn send_data(&self, node_ref: NodeRef, data: Vec<u8>) -> SystemPinBoxFuture<Result<(), String>> {
let this = self.clone();
Box::pin(async move {
// First try to send data to the last socket we've seen this peer on
let data = if let Some(descriptor) = node_ref.last_connection() {
match this
.net()
.send_data_to_existing_connection(descriptor, data)
.await
.map_err(logthru_net!())?
{
None => {
return Ok(());
}
Some(d) => d,
}
} else {
data
};
// If we don't have last_connection, try to reach out to the peer via its dial info
let best_node_info = match node_ref
.best_node_info() {
Some(ni) => ni,
None => {
// If neither this node nor its relays would never ever be
// reachable by any of our protocols
// then we need to go through the outbound relay
if let Some(relay_node) = this.relay_node() {
// We have an outbound relay, lets use it
return this.send_data(relay_node, data).await;
}
else {
// We have no way to reach the node nor an outbound relay to use
return Err("Can't reach this node".to_owned());
}
}
};
// If we aren't using an outbound relay to reach this node, what inbound method do we use?
match this.get_inbound_method(&best_node_info)? {
InboundMethod::Direct => {
if let Some(dial_info) = best_node_info.dial_info_list.first() {
this.net()
.send_data_to_dial_info(dial_info.clone(), data)
.await
.map_err(logthru_net!())
} else {
// If we don't have a direct dial info that matches our protocol configuration
// then we can't send to this node!
Err("Can't send to this node yet".to_owned())
}
}
InboundMethod::SignalReverse => this.do_reverse_connect(&best_node_info, data).await,
InboundMethod::SignalHolePunch => this.do_hole_punch(&best_node_info, data).await,
InboundMethod::Relay => {
if let Some(rpi) = best_node_info.relay_peer_info {
// Get the noderef for this inbound relay
let inbound_relay_noderef = this.routing_table().register_node_with_node_info(rpi.node_id.key, rpi.node_info)?;
// Send to the inbound relay
this.send_data(inbound_relay_noderef, data).await
} else {
// If we don't have a relay dial info that matches our protocol configuration
// then we can't send to this node!
Err("Can't send to this relay".to_owned())
}
}
}
})
}
2021-11-22 16:28:30 +00:00
// Called when a packet potentially containing an RPC envelope is received by a low-level
// network protocol handler. Processes the envelope, authenticates and decrypts the RPC message
// and passes it to the RPC handler
pub async fn on_recv_envelope(
&self,
data: &[u8],
2022-01-04 14:53:30 +00:00
descriptor: ConnectionDescriptor,
2021-12-17 02:57:28 +00:00
) -> Result<bool, String> {
2022-01-05 17:01:02 +00:00
log_net!(
"envelope of {} bytes received from {:?}",
data.len(),
descriptor
);
2022-03-20 14:52:03 +00:00
// Network accounting
self.stats_packet_rcvd(descriptor.remote.to_socket_addr().ip(), data.len() as u64);
2021-11-22 16:28:30 +00:00
// Is this an out-of-band receipt instead of an envelope?
if data[0..4] == *RECEIPT_MAGIC {
2021-12-17 02:57:28 +00:00
self.process_receipt(data).await?;
2021-11-22 16:28:30 +00:00
return Ok(true);
}
// Decode envelope header (may fail signature validation)
2021-12-17 02:57:28 +00:00
let envelope =
Envelope::from_signed_data(data).map_err(|_| "envelope failed to decode".to_owned())?;
2021-11-22 16:28:30 +00:00
// Get routing table and rpc processor
let (routing_table, rpc) = {
2021-11-22 16:28:30 +00:00
let inner = self.inner.lock();
(
inner.routing_table.as_ref().unwrap().clone(),
inner.components.as_ref().unwrap().rpc_processor.clone(),
)
};
// Get timestamp range
let (tsbehind, tsahead) = {
let c = self.config.get();
(
2022-01-27 14:53:01 +00:00
c.network.rpc.max_timestamp_behind_ms.map(ms_to_us),
c.network.rpc.max_timestamp_ahead_ms.map(ms_to_us),
2021-11-22 16:28:30 +00:00
)
};
// Validate timestamp isn't too old
let ts = intf::get_timestamp();
let ets = envelope.get_timestamp();
if let Some(tsbehind) = tsbehind {
if tsbehind > 0 && (ts > ets && ts - ets > tsbehind) {
2021-12-17 02:57:28 +00:00
return Err(format!(
2021-11-22 16:28:30 +00:00
"envelope time was too far in the past: {}ms ",
timestamp_to_secs(ts - ets) * 1000f64
2021-12-17 02:57:28 +00:00
));
2021-11-22 16:28:30 +00:00
}
}
if let Some(tsahead) = tsahead {
if tsahead > 0 && (ts < ets && ets - ts > tsahead) {
2021-12-17 02:57:28 +00:00
return Err(format!(
2021-11-22 16:28:30 +00:00
"envelope time was too far in the future: {}ms",
timestamp_to_secs(ets - ts) * 1000f64
2021-12-17 02:57:28 +00:00
));
2021-11-22 16:28:30 +00:00
}
}
// Peek at header and see if we need to relay this
// If the recipient id is not our node id, then it needs relaying
let sender_id = envelope.get_sender_id();
let recipient_id = envelope.get_recipient_id();
if recipient_id != routing_table.node_id() {
// See if the source node is allowed to resolve nodes
// This is a costly operation, so only outbound-relay permitted
// nodes are allowed to do this, for example PWA users
let relay_nr = if self.check_client_whitelist(sender_id) {
2022-04-16 15:18:54 +00:00
// Full relay allowed, do a full resolve_node
rpc.resolve_node(recipient_id).await.map_err(|e| {
format!(
"failed to resolve recipient node for relay, dropping outbound relayed packet...: {:?}",
e
)
}).map_err(logthru_net!())?
} else {
// If this is not a node in the client whitelist, only allow inbound relay
// which only performs a lightweight lookup before passing the packet back out
// See if we have the node in our routing table
// We should, because relays are chosen by nodes that have established connectivity and
// should be mutually in each others routing tables. The node needing the relay will be
// pinging this node regularly to keep itself in the routing table
2022-04-16 15:18:54 +00:00
routing_table.lookup_node_ref(recipient_id).ok_or_else(|| {
format!(
"Inbound relay asked for recipient not in routing table: {}",
recipient_id
2022-04-16 15:18:54 +00:00
)
})?
};
2022-04-16 15:18:54 +00:00
// Relay the packet to the desired destination
self.send_data(relay_nr, data.to_vec())
.await
.map_err(|e| format!("failed to forward envelope: {}", e))?;
// Inform caller that we dealt with the envelope, but did not process it locally
return Ok(false);
}
// DH to get decryption key (cached)
let node_id_secret = routing_table.node_id_secret();
// Decrypt the envelope body
// xxx: punish nodes that send messages that fail to decrypt eventually
let body = envelope
.decrypt_body(self.crypto(), data, &node_id_secret)
.map_err(|_| "failed to decrypt envelope body".to_owned())?;
2021-11-22 16:28:30 +00:00
// Cache the envelope information in the routing table
let source_noderef = routing_table
2022-01-04 14:53:30 +00:00
.register_node_with_existing_connection(envelope.get_sender_id(), descriptor, ts)
2021-12-17 02:57:28 +00:00
.map_err(|e| format!("node id registration failed: {}", e))?;
2021-11-22 16:28:30 +00:00
source_noderef.operate(|e| e.set_min_max_version(envelope.get_min_max_version()));
// xxx: deal with spoofing and flooding here?
// Pass message to RPC system
rpc.enqueue_message(envelope, body, source_noderef)
2021-12-17 02:57:28 +00:00
.map_err(|e| format!("enqueing rpc message failed: {}", e))?;
2021-11-22 16:28:30 +00:00
// Inform caller that we dealt with the envelope locally
Ok(true)
}
2022-03-19 22:19:40 +00:00
2022-04-07 13:55:09 +00:00
// Keep relays assigned and accessible
2022-04-16 15:18:54 +00:00
async fn relay_management_task_routine(self, _last_ts: u64, cur_ts: u64) -> Result<(), String> {
2022-04-07 13:55:09 +00:00
log_net!("--- network manager relay_management task");
// Get our node's current network class and do the right thing
let network_class = self.get_network_class();
// Do we know our network class yet?
if let Some(network_class) = network_class {
let routing_table = self.routing_table();
// If we already have a relay, see if it is dead, or if we don't need it any more
{
let mut inner = self.inner.lock();
if let Some(relay_node) = inner.relay_node.clone() {
let state = relay_node.operate(|e| e.state(cur_ts));
if matches!(state, BucketEntryState::Dead) || !network_class.needs_relay() {
// Relay node is dead or no longer needed
inner.relay_node = None;
}
}
}
// Do we need an outbound relay?
if network_class.outbound_wants_relay() {
// The outbound relay is the host of the PWA
if let Some(outbound_relay_peerinfo) = intf::get_outbound_relay_peer().await {
let mut inner = self.inner.lock();
// Register new outbound relay
2022-04-16 15:18:54 +00:00
let nr = routing_table.register_node_with_node_info(
2022-04-07 13:55:09 +00:00
outbound_relay_peerinfo.node_id.key,
2022-04-16 15:18:54 +00:00
outbound_relay_peerinfo.node_info,
2022-04-07 13:55:09 +00:00
)?;
inner.relay_node = Some(nr);
}
} else if network_class.needs_relay() {
// Find a node in our routing table that is an acceptable inbound relay
if let Some(nr) = routing_table.find_inbound_relay(cur_ts) {
let mut inner = self.inner.lock();
inner.relay_node = Some(nr);
}
}
} else {
// If we don't know our network class, we do nothing here and wait until we do
}
Ok(())
}
2022-03-19 22:19:40 +00:00
// Compute transfer statistics for the low level network
async fn rolling_transfers_task_routine(self, last_ts: u64, cur_ts: u64) -> Result<(), String> {
log_net!("--- network manager rolling_transfers task");
let inner = &mut *self.inner.lock();
// Roll the low level network transfer stats for our address
inner
.stats
.self_stats
.transfer_stats_accounting
.roll_transfers(last_ts, cur_ts, &mut inner.stats.self_stats.transfer_stats);
// Roll all per-address transfers
2022-03-20 14:52:03 +00:00
let mut dead_addrs: HashSet<PerAddressStatsKey> = HashSet::new();
2022-03-19 22:19:40 +00:00
for (addr, stats) in &mut inner.stats.per_address_stats {
stats.transfer_stats_accounting.roll_transfers(
last_ts,
cur_ts,
&mut stats.transfer_stats,
);
// While we're here, lets see if this address has timed out
if cur_ts - stats.last_seen_ts >= IPADDR_MAX_INACTIVE_DURATION_US {
// it's dead, put it in the dead list
dead_addrs.insert(*addr);
}
}
// Remove the dead addresses from our tables
for da in &dead_addrs {
inner.stats.per_address_stats.remove(da);
}
Ok(())
}
// Callbacks from low level network for statistics gathering
2022-03-20 14:52:03 +00:00
pub fn stats_packet_sent(&self, addr: IpAddr, bytes: u64) {
2022-03-19 22:19:40 +00:00
let inner = &mut *self.inner.lock();
inner
.stats
.self_stats
.transfer_stats_accounting
.add_up(bytes);
inner
.stats
.per_address_stats
2022-03-20 14:52:03 +00:00
.entry(PerAddressStatsKey(addr))
.or_insert(PerAddressStats::default())
2022-03-19 22:19:40 +00:00
.transfer_stats_accounting
.add_up(bytes);
}
2022-03-20 14:52:03 +00:00
pub fn stats_packet_rcvd(&self, addr: IpAddr, bytes: u64) {
2022-03-19 22:19:40 +00:00
let inner = &mut *self.inner.lock();
inner
.stats
.self_stats
.transfer_stats_accounting
.add_down(bytes);
inner
.stats
.per_address_stats
2022-03-20 14:52:03 +00:00
.entry(PerAddressStatsKey(addr))
.or_insert(PerAddressStats::default())
2022-03-19 22:19:40 +00:00
.transfer_stats_accounting
.add_down(bytes);
}
2021-11-22 16:28:30 +00:00
}