checkpoint
This commit is contained in:
parent
847623f2b4
commit
0b059e0ef9
@ -521,9 +521,10 @@ struct Answer @0xacacb8b6988c1058 {
|
||||
struct Operation @0xbf2811c435403c3b {
|
||||
opId @0 :UInt64; # Random RPC ID. Must be random to foil reply forgery attacks.
|
||||
senderNodeInfo @1 :SignedNodeInfo; # (optional) SignedNodeInfo for the sender to be cached by the receiver.
|
||||
targetNodeInfoTs @2 :UInt64; # Timestamp the sender believes the target's node info to be at or zero if not sent
|
||||
kind :union {
|
||||
question @2 :Question;
|
||||
statement @3 :Statement;
|
||||
answer @4 :Answer;
|
||||
question @3 :Question;
|
||||
statement @4 :Statement;
|
||||
answer @5 :Answer;
|
||||
}
|
||||
}
|
||||
|
@ -448,9 +448,17 @@ impl NetworkManager {
|
||||
|
||||
/// Get our node's capabilities in the PublicInternet routing domain
|
||||
fn generate_public_internet_node_status(&self) -> PublicInternetNodeStatus {
|
||||
let own_peer_info = self
|
||||
let Some(own_peer_info) = self
|
||||
.routing_table()
|
||||
.get_own_peer_info(RoutingDomain::PublicInternet);
|
||||
.get_own_peer_info(RoutingDomain::PublicInternet) else {
|
||||
return PublicInternetNodeStatus {
|
||||
will_route: false,
|
||||
will_tunnel: false,
|
||||
will_signal: false,
|
||||
will_relay: false,
|
||||
will_validate_dial_info: false,
|
||||
};
|
||||
};
|
||||
let own_node_info = own_peer_info.signed_node_info.node_info();
|
||||
|
||||
let will_route = own_node_info.can_inbound_relay(); // xxx: eventually this may have more criteria added
|
||||
@ -469,9 +477,14 @@ impl NetworkManager {
|
||||
}
|
||||
/// Get our node's capabilities in the LocalNetwork routing domain
|
||||
fn generate_local_network_node_status(&self) -> LocalNetworkNodeStatus {
|
||||
let own_peer_info = self
|
||||
let Some(own_peer_info) = self
|
||||
.routing_table()
|
||||
.get_own_peer_info(RoutingDomain::LocalNetwork);
|
||||
.get_own_peer_info(RoutingDomain::LocalNetwork) else {
|
||||
return LocalNetworkNodeStatus {
|
||||
will_relay: false,
|
||||
will_validate_dial_info: false,
|
||||
};
|
||||
};
|
||||
|
||||
let own_node_info = own_peer_info.signed_node_info.node_info();
|
||||
|
||||
@ -833,10 +846,17 @@ impl NetworkManager {
|
||||
);
|
||||
let (receipt, eventual_value) = self.generate_single_shot_receipt(receipt_timeout, [])?;
|
||||
|
||||
// Get target routing domain
|
||||
let Some(routing_domain) = target_nr.best_routing_domain() else {
|
||||
return Ok(NetworkResult::no_connection_other("No routing domain for target"));
|
||||
};
|
||||
|
||||
// Get our peer info
|
||||
let peer_info = self
|
||||
let Some(peer_info) = self
|
||||
.routing_table()
|
||||
.get_own_peer_info(RoutingDomain::PublicInternet);
|
||||
.get_own_peer_info(routing_domain) else {
|
||||
return Ok(NetworkResult::no_connection_other("Own peer info not available"));
|
||||
};
|
||||
|
||||
// Issue the signal
|
||||
let rpc = self.rpc_processor();
|
||||
@ -900,17 +920,11 @@ impl NetworkManager {
|
||||
data: Vec<u8>,
|
||||
) -> EyreResult<NetworkResult<ConnectionDescriptor>> {
|
||||
// Ensure we are filtered down to UDP (the only hole punch protocol supported today)
|
||||
// and only in the PublicInternet routing domain
|
||||
assert!(target_nr
|
||||
.filter_ref()
|
||||
.map(|nrf| nrf.dial_info_filter.protocol_type_set
|
||||
== ProtocolTypeSet::only(ProtocolType::UDP))
|
||||
.unwrap_or_default());
|
||||
assert!(target_nr
|
||||
.filter_ref()
|
||||
.map(|nrf| nrf.routing_domain_set
|
||||
== RoutingDomainSet::only(RoutingDomain::PublicInternet))
|
||||
.unwrap_or_default());
|
||||
|
||||
// Build a return receipt for the signal
|
||||
let receipt_timeout = ms_to_us(
|
||||
@ -921,10 +935,18 @@ impl NetworkManager {
|
||||
.hole_punch_receipt_time_ms,
|
||||
);
|
||||
let (receipt, eventual_value) = self.generate_single_shot_receipt(receipt_timeout, [])?;
|
||||
|
||||
// Get target routing domain
|
||||
let Some(routing_domain) = target_nr.best_routing_domain() else {
|
||||
return Ok(NetworkResult::no_connection_other("No routing domain for target"));
|
||||
};
|
||||
|
||||
// Get our peer info
|
||||
let peer_info = self
|
||||
let Some(peer_info) = self
|
||||
.routing_table()
|
||||
.get_own_peer_info(RoutingDomain::PublicInternet);
|
||||
.get_own_peer_info(routing_domain) else {
|
||||
return Ok(NetworkResult::no_connection_other("Own peer info not available"));
|
||||
};
|
||||
|
||||
// Get the udp direct dialinfo for the hole punch
|
||||
let hole_punch_did = target_nr
|
||||
@ -1016,7 +1038,8 @@ impl NetworkManager {
|
||||
};
|
||||
|
||||
// Node A is our own node
|
||||
let peer_a = routing_table.get_own_peer_info(routing_domain);
|
||||
// Use whatever node info we've calculated so far
|
||||
let peer_a = routing_table.get_best_effort_own_peer_info(routing_domain);
|
||||
|
||||
// Node B is the target node
|
||||
let peer_b = match target_node_ref.make_peer_info(routing_domain) {
|
||||
|
@ -710,8 +710,8 @@ impl Network {
|
||||
}
|
||||
|
||||
ProtocolConfig {
|
||||
inbound,
|
||||
outbound,
|
||||
inbound,
|
||||
family_global,
|
||||
family_local,
|
||||
}
|
||||
@ -758,13 +758,13 @@ impl Network {
|
||||
// if we have static public dialinfo, upgrade our network class
|
||||
|
||||
editor_public_internet.setup_network(
|
||||
protocol_config.inbound,
|
||||
protocol_config.outbound,
|
||||
protocol_config.inbound,
|
||||
protocol_config.family_global,
|
||||
);
|
||||
editor_local_network.setup_network(
|
||||
protocol_config.inbound,
|
||||
protocol_config.outbound,
|
||||
protocol_config.inbound,
|
||||
protocol_config.family_local,
|
||||
);
|
||||
let detect_address_changes = {
|
||||
|
@ -252,7 +252,7 @@ impl Network {
|
||||
|
||||
pub async fn startup(&self) -> EyreResult<()> {
|
||||
// get protocol config
|
||||
self.inner.lock().protocol_config = {
|
||||
let protocol_config = {
|
||||
let c = self.config.get();
|
||||
let inbound = ProtocolTypeSet::new();
|
||||
let mut outbound = ProtocolTypeSet::new();
|
||||
@ -269,12 +269,30 @@ impl Network {
|
||||
let family_local = AddressTypeSet::all();
|
||||
|
||||
ProtocolConfig {
|
||||
inbound,
|
||||
outbound,
|
||||
inbound,
|
||||
family_global,
|
||||
family_local,
|
||||
}
|
||||
};
|
||||
self.inner.lock().protocol_config = protocol_config;
|
||||
|
||||
// Start editing routing table
|
||||
let mut editor_public_internet = self
|
||||
.unlocked_inner
|
||||
.routing_table
|
||||
.edit_routing_domain(RoutingDomain::PublicInternet);
|
||||
|
||||
// set up the routing table's network config
|
||||
// if we have static public dialinfo, upgrade our network class
|
||||
editor_public_internet.setup_network(
|
||||
protocol_config.outbound,
|
||||
protocol_config.inbound,
|
||||
protocol_config.family_global,
|
||||
);
|
||||
|
||||
// commit routing table edits
|
||||
editor_public_internet.commit().await;
|
||||
|
||||
self.inner.lock().network_started = true;
|
||||
Ok(())
|
||||
@ -304,11 +322,6 @@ impl Network {
|
||||
editor.clear_dial_info_details();
|
||||
editor.commit().await;
|
||||
|
||||
let mut editor = routing_table.edit_routing_domain(RoutingDomain::LocalNetwork);
|
||||
editor.disable_node_info_updates();
|
||||
editor.clear_dial_info_details();
|
||||
editor.commit().await;
|
||||
|
||||
// Cancels all async background tasks by dropping join handles
|
||||
*self.inner.lock() = Self::new_inner();
|
||||
|
||||
|
@ -50,8 +50,8 @@ pub struct LastConnectionKey(ProtocolType, AddressType);
|
||||
pub struct BucketEntryPublicInternet {
|
||||
/// The PublicInternet node info
|
||||
signed_node_info: Option<Box<SignedNodeInfo>>,
|
||||
/// If this node has seen our publicinternet node info
|
||||
seen_our_node_info: bool,
|
||||
/// The last node info timestamp of ours that this entry has seen
|
||||
last_seen_our_node_info_ts: u64,
|
||||
/// Last known node status
|
||||
node_status: Option<PublicInternetNodeStatus>,
|
||||
}
|
||||
@ -62,8 +62,8 @@ pub struct BucketEntryPublicInternet {
|
||||
pub struct BucketEntryLocalNetwork {
|
||||
/// The LocalNetwork node info
|
||||
signed_node_info: Option<Box<SignedNodeInfo>>,
|
||||
/// If this node has seen our localnetwork node info
|
||||
seen_our_node_info: bool,
|
||||
/// The last node info timestamp of ours that this entry has seen
|
||||
last_seen_our_node_info_ts: u64,
|
||||
/// Last known node status
|
||||
node_status: Option<LocalNetworkNodeStatus>,
|
||||
}
|
||||
@ -427,21 +427,29 @@ impl BucketEntryInner {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_seen_our_node_info(&mut self, routing_domain: RoutingDomain, seen: bool) {
|
||||
pub fn set_our_node_info_ts(&mut self, routing_domain: RoutingDomain, seen_ts: u64) {
|
||||
match routing_domain {
|
||||
RoutingDomain::LocalNetwork => {
|
||||
self.local_network.seen_our_node_info = seen;
|
||||
self.local_network.last_seen_our_node_info_ts = seen_ts;
|
||||
}
|
||||
RoutingDomain::PublicInternet => {
|
||||
self.public_internet.seen_our_node_info = seen;
|
||||
self.public_internet.last_seen_our_node_info_ts = seen_ts;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn has_seen_our_node_info(&self, routing_domain: RoutingDomain) -> bool {
|
||||
pub fn has_seen_our_node_info_ts(
|
||||
&self,
|
||||
routing_domain: RoutingDomain,
|
||||
our_node_info_ts: u64,
|
||||
) -> bool {
|
||||
match routing_domain {
|
||||
RoutingDomain::LocalNetwork => self.local_network.seen_our_node_info,
|
||||
RoutingDomain::PublicInternet => self.public_internet.seen_our_node_info,
|
||||
RoutingDomain::LocalNetwork => {
|
||||
our_node_info_ts == self.local_network.last_seen_our_node_info_ts
|
||||
}
|
||||
RoutingDomain::PublicInternet => {
|
||||
our_node_info_ts == self.public_internet.last_seen_our_node_info_ts
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -680,12 +688,12 @@ impl BucketEntry {
|
||||
updated_since_last_network_change: false,
|
||||
last_connections: BTreeMap::new(),
|
||||
local_network: BucketEntryLocalNetwork {
|
||||
seen_our_node_info: false,
|
||||
last_seen_our_node_info_ts: 0,
|
||||
signed_node_info: None,
|
||||
node_status: None,
|
||||
},
|
||||
public_internet: BucketEntryPublicInternet {
|
||||
seen_our_node_info: false,
|
||||
last_seen_our_node_info_ts: 0,
|
||||
signed_node_info: None,
|
||||
node_status: None,
|
||||
},
|
||||
|
@ -374,15 +374,30 @@ impl RoutingTable {
|
||||
}
|
||||
|
||||
/// Return a copy of our node's peerinfo
|
||||
pub fn get_own_peer_info(&self, routing_domain: RoutingDomain) -> PeerInfo {
|
||||
pub fn get_own_peer_info(&self, routing_domain: RoutingDomain) -> Option<PeerInfo> {
|
||||
self.inner.read().get_own_peer_info(routing_domain)
|
||||
}
|
||||
|
||||
/// Return the best effort copy of our node's peerinfo
|
||||
/// This may be invalid and should not be passed to other nodes,
|
||||
/// but may be used for contact method calculation
|
||||
pub fn get_best_effort_own_peer_info(&self, routing_domain: RoutingDomain) -> PeerInfo {
|
||||
self.inner
|
||||
.read()
|
||||
.get_best_effort_own_peer_info(routing_domain)
|
||||
}
|
||||
|
||||
/// If we have a valid network class in this routing domain, then our 'NodeInfo' is valid
|
||||
/// If this is true, we can get our final peer info, otherwise we only have a 'best effort' peer info
|
||||
pub fn has_valid_own_node_info(&self, routing_domain: RoutingDomain) -> bool {
|
||||
self.inner.read().has_valid_own_node_info(routing_domain)
|
||||
}
|
||||
|
||||
/// Return our current node info timestamp
|
||||
pub fn get_own_node_info_ts(&self, routing_domain: RoutingDomain) -> Option<u64> {
|
||||
self.inner.read().get_own_node_info_ts(routing_domain)
|
||||
}
|
||||
|
||||
/// Return the domain's currently registered network class
|
||||
pub fn get_network_class(&self, routing_domain: RoutingDomain) -> Option<NetworkClass> {
|
||||
self.inner.read().get_network_class(routing_domain)
|
||||
|
@ -143,11 +143,22 @@ pub trait NodeRefBase: Sized {
|
||||
.unwrap_or(false)
|
||||
})
|
||||
}
|
||||
fn has_seen_our_node_info(&self, routing_domain: RoutingDomain) -> bool {
|
||||
self.operate(|_rti, e| e.has_seen_our_node_info(routing_domain))
|
||||
fn node_info_ts(&self, routing_domain: RoutingDomain) -> u64 {
|
||||
self.operate(|_rti, e| {
|
||||
e.signed_node_info(routing_domain)
|
||||
.map(|sni| sni.timestamp())
|
||||
.unwrap_or(0u64)
|
||||
})
|
||||
}
|
||||
fn set_seen_our_node_info(&self, routing_domain: RoutingDomain) {
|
||||
self.operate_mut(|_rti, e| e.set_seen_our_node_info(routing_domain, true));
|
||||
fn has_seen_our_node_info_ts(
|
||||
&self,
|
||||
routing_domain: RoutingDomain,
|
||||
our_node_info_ts: u64,
|
||||
) -> bool {
|
||||
self.operate(|_rti, e| e.has_seen_our_node_info_ts(routing_domain, our_node_info_ts))
|
||||
}
|
||||
fn set_our_node_info_ts(&self, routing_domain: RoutingDomain, seen_ts: u64) {
|
||||
self.operate_mut(|_rti, e| e.set_our_node_info_ts(routing_domain, seen_ts));
|
||||
}
|
||||
fn network_class(&self, routing_domain: RoutingDomain) -> Option<NetworkClass> {
|
||||
self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.network_class))
|
||||
|
@ -204,7 +204,7 @@ pub struct RemotePrivateRouteInfo {
|
||||
// The private route itself
|
||||
private_route: Option<PrivateRoute>,
|
||||
/// Did this remote private route see our node info due to no safety route in use
|
||||
seen_our_node_info: bool,
|
||||
last_seen_our_node_info_ts: u64,
|
||||
/// Last time this remote private route was requested for any reason (cache expiration)
|
||||
last_touched_ts: u64,
|
||||
/// Stats
|
||||
@ -618,6 +618,10 @@ impl RouteSpecStore {
|
||||
bail!("Not allocating route longer than max route hop count");
|
||||
}
|
||||
|
||||
let Some(our_peer_info) = rti.get_own_peer_info(RoutingDomain::PublicInternet) else {
|
||||
bail!("Can't allocate route until we have our own peer info");
|
||||
};
|
||||
|
||||
// Get relay node id if we have one
|
||||
let opt_relay_id = rti
|
||||
.relay_node(RoutingDomain::PublicInternet)
|
||||
@ -764,7 +768,6 @@ impl RouteSpecStore {
|
||||
|
||||
// Ensure this route is viable by checking that each node can contact the next one
|
||||
if directions.contains(Direction::Outbound) {
|
||||
let our_peer_info = rti.get_own_peer_info(RoutingDomain::PublicInternet);
|
||||
let mut previous_node = &our_peer_info;
|
||||
let mut reachable = true;
|
||||
for n in permutation {
|
||||
@ -787,7 +790,6 @@ impl RouteSpecStore {
|
||||
}
|
||||
}
|
||||
if directions.contains(Direction::Inbound) {
|
||||
let our_peer_info = rti.get_own_peer_info(RoutingDomain::PublicInternet);
|
||||
let mut next_node = &our_peer_info;
|
||||
let mut reachable = true;
|
||||
for n in permutation.iter().rev() {
|
||||
@ -1452,9 +1454,15 @@ impl RouteSpecStore {
|
||||
// Make innermost route hop to our own node
|
||||
let mut route_hop = RouteHop {
|
||||
node: if optimized {
|
||||
if !rti.has_valid_own_node_info(RoutingDomain::PublicInternet) {
|
||||
bail!("can't make private routes until our node info is valid");
|
||||
}
|
||||
RouteNode::NodeId(NodeId::new(routing_table.node_id()))
|
||||
} else {
|
||||
RouteNode::PeerInfo(rti.get_own_peer_info(RoutingDomain::PublicInternet))
|
||||
let Some(pi) = rti.get_own_peer_info(RoutingDomain::PublicInternet) else {
|
||||
bail!("can't make private routes until our node info is valid");
|
||||
};
|
||||
RouteNode::PeerInfo(pi)
|
||||
},
|
||||
next_hop: None,
|
||||
};
|
||||
@ -1591,7 +1599,7 @@ impl RouteSpecStore {
|
||||
.and_modify(|rpr| {
|
||||
if cur_ts - rpr.last_touched_ts >= REMOTE_PRIVATE_ROUTE_CACHE_EXPIRY {
|
||||
// Start fresh if this had expired
|
||||
rpr.seen_our_node_info = false;
|
||||
rpr.last_seen_our_node_info_ts = 0;
|
||||
rpr.last_touched_ts = cur_ts;
|
||||
rpr.stats = RouteStats::new(cur_ts);
|
||||
} else {
|
||||
@ -1602,7 +1610,7 @@ impl RouteSpecStore {
|
||||
.or_insert_with(|| RemotePrivateRouteInfo {
|
||||
// New remote private route cache entry
|
||||
private_route: Some(private_route),
|
||||
seen_our_node_info: false,
|
||||
last_seen_our_node_info_ts: 0,
|
||||
last_touched_ts: cur_ts,
|
||||
stats: RouteStats::new(cur_ts),
|
||||
});
|
||||
@ -1665,22 +1673,52 @@ impl RouteSpecStore {
|
||||
}
|
||||
}
|
||||
|
||||
/// Check to see if this remote (not ours) private route has seen our node info yet
|
||||
/// This returns true if we have sent non-safety-route node info to the
|
||||
/// private route and gotten a response before
|
||||
/// Check to see if this remote (not ours) private route has seen our current node info yet
|
||||
/// This happens when you communicate with a private route without a safety route
|
||||
pub fn has_remote_private_route_seen_our_node_info(&self, key: &DHTKey) -> bool {
|
||||
let our_node_info_ts = {
|
||||
let rti = &*self.unlocked_inner.routing_table.inner.read();
|
||||
let Some(ts) = rti.get_own_node_info_ts(RoutingDomain::PublicInternet) else {
|
||||
return false;
|
||||
};
|
||||
ts
|
||||
};
|
||||
|
||||
let opt_rpr_node_info_ts = {
|
||||
let inner = &mut *self.inner.lock();
|
||||
let cur_ts = get_timestamp();
|
||||
Self::with_peek_remote_private_route(inner, cur_ts, key, |rpr| rpr.seen_our_node_info)
|
||||
.unwrap_or_default()
|
||||
Self::with_peek_remote_private_route(inner, cur_ts, key, |rpr| {
|
||||
rpr.last_seen_our_node_info_ts
|
||||
})
|
||||
};
|
||||
|
||||
let Some(rpr_node_info_ts) = opt_rpr_node_info_ts else {
|
||||
return false;
|
||||
};
|
||||
|
||||
our_node_info_ts == rpr_node_info_ts
|
||||
}
|
||||
|
||||
/// Mark a remote private route as having seen our node info
|
||||
/// Mark a remote private route as having seen our current node info
|
||||
/// PRIVACY:
|
||||
/// We do not accept node info timestamps from remote private routes because this would
|
||||
/// enable a deanonymization attack, whereby a node could be 'pinged' with a doctored node_info with a
|
||||
/// special 'timestamp', which then may be sent back over a private route, identifying that it
|
||||
/// was that node that had the private route.
|
||||
pub fn mark_remote_private_route_seen_our_node_info(
|
||||
&self,
|
||||
key: &DHTKey,
|
||||
cur_ts: u64,
|
||||
) -> EyreResult<()> {
|
||||
let our_node_info_ts = {
|
||||
let rti = &*self.unlocked_inner.routing_table.inner.read();
|
||||
let Some(ts) = rti.get_own_node_info_ts(RoutingDomain::PublicInternet) else {
|
||||
// Node info is invalid, skipping this
|
||||
return Ok(());
|
||||
};
|
||||
ts
|
||||
};
|
||||
|
||||
let inner = &mut *self.inner.lock();
|
||||
// Check for local route. If this is not a remote private route
|
||||
// then we just skip the recording. We may be running a test and using
|
||||
@ -1689,7 +1727,7 @@ impl RouteSpecStore {
|
||||
return Ok(());
|
||||
}
|
||||
if Self::with_get_remote_private_route(inner, cur_ts, key, |rpr| {
|
||||
rpr.seen_our_node_info = true;
|
||||
rpr.last_seen_our_node_info_ts = our_node_info_ts;
|
||||
})
|
||||
.is_none()
|
||||
{
|
||||
@ -1734,8 +1772,6 @@ impl RouteSpecStore {
|
||||
|
||||
// Reset private route cache
|
||||
for (_k, v) in &mut inner.cache.remote_private_route_cache {
|
||||
// Our node info has changed
|
||||
v.seen_our_node_info = false;
|
||||
// Restart stats for routes so we test the route again
|
||||
v.stats.reset();
|
||||
}
|
||||
|
@ -199,9 +199,7 @@ impl RoutingDomainEditor {
|
||||
}
|
||||
});
|
||||
if changed {
|
||||
// Mark that nothing in the routing table has seen our new node info
|
||||
inner.reset_all_seen_our_node_info(self.routing_domain);
|
||||
//
|
||||
// Allow signed node info updates at same timestamp from dead nodes if our network has changed
|
||||
inner.reset_all_updated_since_last_network_change();
|
||||
}
|
||||
}
|
||||
|
@ -226,16 +226,6 @@ impl RoutingTableInner {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn reset_all_seen_our_node_info(&mut self, routing_domain: RoutingDomain) {
|
||||
let cur_ts = get_timestamp();
|
||||
self.with_entries_mut(cur_ts, BucketEntryState::Dead, |rti, _, v| {
|
||||
v.with_mut(rti, |_rti, e| {
|
||||
e.set_seen_our_node_info(routing_domain, false);
|
||||
});
|
||||
Option::<()>::None
|
||||
});
|
||||
}
|
||||
|
||||
pub fn reset_all_updated_since_last_network_change(&mut self) {
|
||||
let cur_ts = get_timestamp();
|
||||
self.with_entries_mut(cur_ts, BucketEntryState::Dead, |rti, _, v| {
|
||||
@ -246,16 +236,43 @@ impl RoutingTableInner {
|
||||
});
|
||||
}
|
||||
|
||||
/// Return if our node info is valid yet, which is only true if we have a valid network class
|
||||
pub fn has_valid_own_node_info(&self, routing_domain: RoutingDomain) -> bool {
|
||||
self.with_routing_domain(routing_domain, |rdd| rdd.common().has_valid_own_node_info())
|
||||
}
|
||||
|
||||
/// Return a copy of our node's peerinfo
|
||||
pub fn get_own_peer_info(&self, routing_domain: RoutingDomain) -> PeerInfo {
|
||||
pub fn get_own_peer_info(&self, routing_domain: RoutingDomain) -> Option<PeerInfo> {
|
||||
self.with_routing_domain(routing_domain, |rdd| {
|
||||
if !rdd.common().has_valid_own_node_info() {
|
||||
None
|
||||
} else {
|
||||
Some(rdd.common().with_peer_info(self, |pi| pi.clone()))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the best effort copy of our node's peerinfo
|
||||
/// This may be invalid and should not be passed to other nodes,
|
||||
/// but may be used for contact method calculation
|
||||
pub fn get_best_effort_own_peer_info(&self, routing_domain: RoutingDomain) -> PeerInfo {
|
||||
self.with_routing_domain(routing_domain, |rdd| {
|
||||
rdd.common().with_peer_info(self, |pi| pi.clone())
|
||||
})
|
||||
}
|
||||
|
||||
/// Return our currently registered network class
|
||||
pub fn has_valid_own_node_info(&self, routing_domain: RoutingDomain) -> bool {
|
||||
self.with_routing_domain(routing_domain, |rdd| rdd.common().has_valid_own_node_info())
|
||||
/// Return our current node info timestamp
|
||||
pub fn get_own_node_info_ts(&self, routing_domain: RoutingDomain) -> Option<u64> {
|
||||
self.with_routing_domain(routing_domain, |rdd| {
|
||||
if !rdd.common().has_valid_own_node_info() {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
rdd.common()
|
||||
.with_peer_info(self, |pi| pi.signed_node_info.timestamp()),
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the domain's currently registered network class
|
||||
@ -334,7 +351,6 @@ impl RoutingTableInner {
|
||||
self.with_entries_mut(cur_ts, BucketEntryState::Dead, |rti, _, e| {
|
||||
e.with_mut(rti, |_rti, e| {
|
||||
e.clear_signed_node_info(RoutingDomain::LocalNetwork);
|
||||
e.set_seen_our_node_info(RoutingDomain::LocalNetwork, false);
|
||||
e.set_updated_since_last_network_change(false);
|
||||
});
|
||||
Option::<()>::None
|
||||
@ -504,6 +520,7 @@ impl RoutingTableInner {
|
||||
let opt_relay_id = self.with_routing_domain(routing_domain, |rd| {
|
||||
rd.common().relay_node().map(|rn| rn.node_id())
|
||||
});
|
||||
let own_node_info_ts = self.get_own_node_info_ts(routing_domain);
|
||||
|
||||
// Collect all entries that are 'needs_ping' and have some node info making them reachable somehow
|
||||
let mut node_refs = Vec::<NodeRef>::with_capacity(self.bucket_entry_count);
|
||||
|
@ -10,15 +10,15 @@ impl RoutingTable {
|
||||
cur_ts: u64,
|
||||
) -> EyreResult<()> {
|
||||
// Get our node's current node info and network class and do the right thing
|
||||
let own_peer_info = self.get_own_peer_info(RoutingDomain::PublicInternet);
|
||||
let Some(own_peer_info) = self.get_own_peer_info(RoutingDomain::PublicInternet) else {
|
||||
return Ok(());
|
||||
};
|
||||
let own_node_info = own_peer_info.signed_node_info.node_info();
|
||||
let network_class = self.get_network_class(RoutingDomain::PublicInternet);
|
||||
let network_class = own_node_info.network_class;
|
||||
|
||||
// Get routing domain editor
|
||||
let mut editor = self.edit_routing_domain(RoutingDomain::PublicInternet);
|
||||
|
||||
// Do we know our network class yet?
|
||||
if let Some(network_class) = network_class {
|
||||
// If we already have a relay, see if it is dead, or if we don't need it any more
|
||||
let has_relay = {
|
||||
if let Some(relay_node) = self.relay_node(RoutingDomain::PublicInternet) {
|
||||
@ -65,14 +65,12 @@ impl RoutingTable {
|
||||
}
|
||||
if !got_outbound_relay {
|
||||
// Find a node in our routing table that is an acceptable inbound relay
|
||||
if let Some(nr) = self.find_inbound_relay(RoutingDomain::PublicInternet, cur_ts)
|
||||
{
|
||||
if let Some(nr) = self.find_inbound_relay(RoutingDomain::PublicInternet, cur_ts) {
|
||||
info!("Inbound relay node selected: {}", nr);
|
||||
editor.set_relay_node(nr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Commit the changes
|
||||
editor.commit().await;
|
||||
|
@ -58,24 +58,30 @@ impl RPCOperationKind {
|
||||
pub struct RPCOperation {
|
||||
op_id: u64,
|
||||
sender_node_info: Option<SignedNodeInfo>,
|
||||
target_node_info_ts: u64,
|
||||
kind: RPCOperationKind,
|
||||
}
|
||||
|
||||
impl RPCOperation {
|
||||
pub fn new_question(question: RPCQuestion, sender_node_info: Option<SignedNodeInfo>) -> Self {
|
||||
pub fn new_question(
|
||||
question: RPCQuestion,
|
||||
sender_signed_node_info: SenderSignedNodeInfo,
|
||||
) -> Self {
|
||||
Self {
|
||||
op_id: get_random_u64(),
|
||||
sender_node_info,
|
||||
sender_node_info: sender_signed_node_info.signed_node_info,
|
||||
target_node_info_ts: sender_signed_node_info.target_node_info_ts,
|
||||
kind: RPCOperationKind::Question(question),
|
||||
}
|
||||
}
|
||||
pub fn new_statement(
|
||||
statement: RPCStatement,
|
||||
sender_node_info: Option<SignedNodeInfo>,
|
||||
sender_signed_node_info: SenderSignedNodeInfo,
|
||||
) -> Self {
|
||||
Self {
|
||||
op_id: get_random_u64(),
|
||||
sender_node_info,
|
||||
sender_node_info: sender_signed_node_info.signed_node_info,
|
||||
target_node_info_ts: sender_signed_node_info.target_node_info_ts,
|
||||
kind: RPCOperationKind::Statement(statement),
|
||||
}
|
||||
}
|
||||
@ -83,11 +89,12 @@ impl RPCOperation {
|
||||
pub fn new_answer(
|
||||
request: &RPCOperation,
|
||||
answer: RPCAnswer,
|
||||
sender_node_info: Option<SignedNodeInfo>,
|
||||
sender_signed_node_info: SenderSignedNodeInfo,
|
||||
) -> Self {
|
||||
Self {
|
||||
op_id: request.op_id,
|
||||
sender_node_info,
|
||||
sender_node_info: sender_signed_node_info.signed_node_info,
|
||||
target_node_info_ts: sender_signed_node_info.target_node_info_ts,
|
||||
kind: RPCOperationKind::Answer(answer),
|
||||
}
|
||||
}
|
||||
@ -99,6 +106,9 @@ impl RPCOperation {
|
||||
pub fn sender_node_info(&self) -> Option<&SignedNodeInfo> {
|
||||
self.sender_node_info.as_ref()
|
||||
}
|
||||
pub fn target_node_info_ts(&self) -> u64 {
|
||||
self.target_node_info_ts
|
||||
}
|
||||
|
||||
pub fn kind(&self) -> &RPCOperationKind {
|
||||
&self.kind
|
||||
@ -128,12 +138,15 @@ impl RPCOperation {
|
||||
None
|
||||
};
|
||||
|
||||
let target_node_info_ts = operation_reader.get_target_node_info_ts();
|
||||
|
||||
let kind_reader = operation_reader.get_kind();
|
||||
let kind = RPCOperationKind::decode(&kind_reader, opt_sender_node_id)?;
|
||||
|
||||
Ok(RPCOperation {
|
||||
op_id,
|
||||
sender_node_info,
|
||||
target_node_info_ts,
|
||||
kind,
|
||||
})
|
||||
}
|
||||
@ -144,6 +157,7 @@ impl RPCOperation {
|
||||
let mut si_builder = builder.reborrow().init_sender_node_info();
|
||||
encode_signed_node_info(&sender_info, &mut si_builder)?;
|
||||
}
|
||||
builder.set_target_node_info_ts(self.target_node_info_ts);
|
||||
let mut k_builder = builder.reborrow().init_kind();
|
||||
self.kind.encode(&mut k_builder)?;
|
||||
Ok(())
|
||||
|
@ -217,10 +217,19 @@ impl RPCProcessor {
|
||||
let route_node = match rss
|
||||
.has_remote_private_route_seen_our_node_info(&private_route.public_key)
|
||||
{
|
||||
true => RouteNode::NodeId(NodeId::new(routing_table.node_id())),
|
||||
false => RouteNode::PeerInfo(
|
||||
routing_table.get_own_peer_info(RoutingDomain::PublicInternet),
|
||||
),
|
||||
true => {
|
||||
if !routing_table.has_valid_own_node_info(RoutingDomain::PublicInternet) {
|
||||
return Ok(NetworkResult::no_connection_other("Own node info must be valid to use private route"));
|
||||
}
|
||||
RouteNode::NodeId(NodeId::new(routing_table.node_id()))
|
||||
}
|
||||
false => {
|
||||
let Some(own_peer_info) =
|
||||
routing_table.get_own_peer_info(RoutingDomain::PublicInternet) else {
|
||||
return Ok(NetworkResult::no_connection_other("Own peer info must be valid to use private route"));
|
||||
};
|
||||
RouteNode::PeerInfo(own_peer_info)
|
||||
},
|
||||
};
|
||||
|
||||
Ok(NetworkResult::value(RespondTo::PrivateRoute(
|
||||
|
@ -189,14 +189,45 @@ impl<T> Answer<T> {
|
||||
}
|
||||
}
|
||||
|
||||
/// An operation that has been fully prepared for envelope r
|
||||
struct RenderedOperation {
|
||||
message: Vec<u8>, // The rendered operation bytes
|
||||
node_id: DHTKey, // Destination node id we're sending to
|
||||
node_ref: NodeRef, // Node to send envelope to (may not be destination node id in case of relay)
|
||||
hop_count: usize, // Total safety + private route hop count + 1 hop for the initial send
|
||||
safety_route: Option<DHTKey>, // The safety route used to send the message
|
||||
remote_private_route: Option<DHTKey>, // The private route used to send the message
|
||||
reply_private_route: Option<DHTKey>, // The private route requested to receive the reply
|
||||
/// The rendered operation bytes
|
||||
message: Vec<u8>,
|
||||
/// Destination node id we're sending to
|
||||
node_id: DHTKey,
|
||||
/// Node to send envelope to (may not be destination node id in case of relay)
|
||||
node_ref: NodeRef,
|
||||
/// Total safety + private route hop count + 1 hop for the initial send
|
||||
hop_count: usize,
|
||||
/// The safety route used to send the message
|
||||
safety_route: Option<DHTKey>,
|
||||
/// The private route used to send the message
|
||||
remote_private_route: Option<DHTKey>,
|
||||
/// The private route requested to receive the reply
|
||||
reply_private_route: Option<DHTKey>,
|
||||
}
|
||||
|
||||
/// Node information exchanged during every RPC message
|
||||
#[derive(Default, Debug, Clone)]
|
||||
struct SenderSignedNodeInfo {
|
||||
/// The current signed node info of the sender if required
|
||||
signed_node_info: Option<SignedNodeInfo>,
|
||||
/// The last timestamp of the target's node info to assist remote node with sending its latest node info
|
||||
target_node_info_ts: u64,
|
||||
}
|
||||
impl SenderSignedNodeInfo {
|
||||
pub fn new_no_sni(target_node_info_ts: u64) -> Self {
|
||||
Self {
|
||||
signed_node_info: None,
|
||||
target_node_info_ts,
|
||||
}
|
||||
}
|
||||
pub fn new(sender_signed_node_info: SignedNodeInfo, target_node_info_ts: u64) -> Self {
|
||||
Self {
|
||||
signed_node_info: Some(sender_signed_node_info),
|
||||
target_node_info_ts,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
@ -474,11 +505,10 @@ impl RPCProcessor {
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
out
|
||||
}
|
||||
|
||||
// Wrap an operation with a private route inside a safety route
|
||||
/// Wrap an operation with a private route inside a safety route
|
||||
fn wrap_with_route(
|
||||
&self,
|
||||
safety_selection: SafetySelection,
|
||||
@ -528,9 +558,11 @@ impl RPCProcessor {
|
||||
safety_route: compiled_route.safety_route,
|
||||
operation,
|
||||
};
|
||||
let ssni_route =
|
||||
self.get_sender_signed_node_info(&Destination::direct(compiled_route.first_hop))?;
|
||||
let operation = RPCOperation::new_statement(
|
||||
RPCStatement::new(RPCStatementDetail::Route(route_operation)),
|
||||
None,
|
||||
ssni_route,
|
||||
);
|
||||
|
||||
// Convert message to bytes and return it
|
||||
@ -680,64 +712,75 @@ impl RPCProcessor {
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
// Get signed node info to package with RPC messages to improve
|
||||
// routing table caching when it is okay to do so
|
||||
// This is only done in the PublicInternet routing domain because
|
||||
// as far as we can tell this is the only domain that will really benefit
|
||||
fn get_sender_signed_node_info(&self, dest: &Destination) -> Option<SignedNodeInfo> {
|
||||
/// Get signed node info to package with RPC messages to improve
|
||||
/// routing table caching when it is okay to do so
|
||||
#[instrument(skip(self), ret, err)]
|
||||
fn get_sender_signed_node_info(
|
||||
&self,
|
||||
dest: &Destination,
|
||||
) -> Result<SenderSignedNodeInfo, RPCError> {
|
||||
// Don't do this if the sender is to remain private
|
||||
// Otherwise we would be attaching the original sender's identity to the final destination,
|
||||
// thus defeating the purpose of the safety route entirely :P
|
||||
match dest.get_safety_selection() {
|
||||
SafetySelection::Unsafe(_) => {}
|
||||
SafetySelection::Safe(_) => {
|
||||
return None;
|
||||
return Ok(SenderSignedNodeInfo::default());
|
||||
}
|
||||
}
|
||||
// Don't do this if our own signed node info isn't valid yet
|
||||
let routing_table = self.routing_table();
|
||||
if !routing_table.has_valid_own_node_info(RoutingDomain::PublicInternet) {
|
||||
return None;
|
||||
}
|
||||
|
||||
match dest {
|
||||
// Get the target we're sending to
|
||||
let routing_table = self.routing_table();
|
||||
let target = match dest {
|
||||
Destination::Direct {
|
||||
target,
|
||||
safety_selection: _,
|
||||
} => {
|
||||
// If the target has seen our node info already don't do this
|
||||
if target.has_seen_our_node_info(RoutingDomain::PublicInternet) {
|
||||
return None;
|
||||
}
|
||||
Some(
|
||||
routing_table
|
||||
.get_own_peer_info(RoutingDomain::PublicInternet)
|
||||
.signed_node_info,
|
||||
)
|
||||
}
|
||||
} => target.clone(),
|
||||
Destination::Relay {
|
||||
relay: _,
|
||||
target,
|
||||
safety_selection: _,
|
||||
} => {
|
||||
if let Some(target) = routing_table.lookup_node_ref(*target) {
|
||||
if target.has_seen_our_node_info(RoutingDomain::PublicInternet) {
|
||||
return None;
|
||||
}
|
||||
Some(
|
||||
routing_table
|
||||
.get_own_peer_info(RoutingDomain::PublicInternet)
|
||||
.signed_node_info,
|
||||
)
|
||||
target
|
||||
} else {
|
||||
None
|
||||
// Target was not in our routing table
|
||||
return Ok(SenderSignedNodeInfo::default());
|
||||
}
|
||||
}
|
||||
Destination::PrivateRoute {
|
||||
private_route: _,
|
||||
safety_selection: _,
|
||||
} => None,
|
||||
} => {
|
||||
return Ok(SenderSignedNodeInfo::default());
|
||||
}
|
||||
};
|
||||
|
||||
let Some(routing_domain) = target.best_routing_domain() else {
|
||||
// No routing domain for target?
|
||||
return Err(RPCError::internal(format!("No routing domain for target: {}", target)));
|
||||
};
|
||||
|
||||
// Get the target's node info timestamp
|
||||
let target_node_info_ts = target.node_info_ts(routing_domain);
|
||||
|
||||
// Don't return our node info if it's not valid yet
|
||||
let Some(own_peer_info) = routing_table.get_own_peer_info(routing_domain) else {
|
||||
return Ok(SenderSignedNodeInfo::new_no_sni(target_node_info_ts));
|
||||
};
|
||||
|
||||
// Get our node info timestamp
|
||||
let our_node_info_ts = own_peer_info.signed_node_info.timestamp();
|
||||
|
||||
// If the target has seen our node info already don't send it again
|
||||
if target.has_seen_our_node_info_ts(routing_domain, our_node_info_ts) {
|
||||
return Ok(SenderSignedNodeInfo::new_no_sni(target_node_info_ts));
|
||||
}
|
||||
|
||||
Ok(SenderSignedNodeInfo::new(
|
||||
own_peer_info.signed_node_info,
|
||||
target_node_info_ts,
|
||||
))
|
||||
}
|
||||
|
||||
/// Record failure to send to node or route
|
||||
@ -981,11 +1024,11 @@ impl RPCProcessor {
|
||||
dest: Destination,
|
||||
question: RPCQuestion,
|
||||
) -> Result<NetworkResult<WaitableReply>, RPCError> {
|
||||
// Get sender info if we should send that
|
||||
let opt_sender_info = self.get_sender_signed_node_info(&dest);
|
||||
// Get sender signed node info if we should send that
|
||||
let ssni = self.get_sender_signed_node_info(&dest)?;
|
||||
|
||||
// Wrap question in operation
|
||||
let operation = RPCOperation::new_question(question, opt_sender_info);
|
||||
let operation = RPCOperation::new_question(question, ssni);
|
||||
let op_id = operation.op_id();
|
||||
|
||||
// Log rpc send
|
||||
@ -1056,11 +1099,11 @@ impl RPCProcessor {
|
||||
dest: Destination,
|
||||
statement: RPCStatement,
|
||||
) -> Result<NetworkResult<()>, RPCError> {
|
||||
// Get sender info if we should send that
|
||||
let opt_sender_info = self.get_sender_signed_node_info(&dest);
|
||||
// Get sender signed node info if we should send that
|
||||
let ssni = self.get_sender_signed_node_info(&dest)?;
|
||||
|
||||
// Wrap statement in operation
|
||||
let operation = RPCOperation::new_statement(statement, opt_sender_info);
|
||||
let operation = RPCOperation::new_statement(statement, ssni);
|
||||
|
||||
// Log rpc send
|
||||
trace!(target: "rpc_message", dir = "send", kind = "statement", op_id = operation.op_id(), desc = operation.kind().desc(), ?dest);
|
||||
@ -1117,11 +1160,11 @@ impl RPCProcessor {
|
||||
// Extract destination from respond_to
|
||||
let dest = network_result_try!(self.get_respond_to_destination(&request));
|
||||
|
||||
// Get sender info if we should send that
|
||||
let opt_sender_info = self.get_sender_signed_node_info(&dest);
|
||||
// Get sender signed node info if we should send that
|
||||
let ssni = self.get_sender_signed_node_info(&dest)?;
|
||||
|
||||
// Wrap answer in operation
|
||||
let operation = RPCOperation::new_answer(&request.operation, answer, opt_sender_info);
|
||||
let operation = RPCOperation::new_answer(&request.operation, answer, ssni);
|
||||
|
||||
// Log rpc send
|
||||
trace!(target: "rpc_message", dir = "send", kind = "answer", op_id = operation.op_id(), desc = operation.kind().desc(), ?dest);
|
||||
@ -1213,10 +1256,10 @@ impl RPCProcessor {
|
||||
opt_sender_nr = self.routing_table().lookup_node_ref(sender_node_id)
|
||||
}
|
||||
|
||||
// Mark this sender as having seen our node info over this routing domain
|
||||
// because it managed to reach us over that routing domain
|
||||
// Update the 'seen our node info' timestamp to determine if this node needs a
|
||||
// 'node info update' ping
|
||||
if let Some(sender_nr) = &opt_sender_nr {
|
||||
sender_nr.set_seen_our_node_info(routing_domain);
|
||||
sender_nr.set_our_node_info_ts(routing_domain, operation.target_node_info_ts());
|
||||
}
|
||||
|
||||
// Make the RPC message
|
||||
|
@ -92,9 +92,8 @@ impl RPCProcessor {
|
||||
|
||||
// add node information for the requesting node to our routing table
|
||||
let routing_table = self.routing_table();
|
||||
let has_valid_own_node_info =
|
||||
routing_table.has_valid_own_node_info(RoutingDomain::PublicInternet);
|
||||
let own_peer_info = routing_table.get_own_peer_info(RoutingDomain::PublicInternet);
|
||||
let has_valid_own_node_info = own_peer_info.is_some();
|
||||
|
||||
// find N nodes closest to the target node in our routing table
|
||||
|
||||
@ -116,7 +115,7 @@ impl RPCProcessor {
|
||||
|rti, k, v| {
|
||||
rti.transform_to_peer_info(
|
||||
RoutingDomain::PublicInternet,
|
||||
own_peer_info.clone(),
|
||||
own_peer_info.as_ref().unwrap().clone(),
|
||||
k,
|
||||
v,
|
||||
)
|
||||
|
@ -30,11 +30,6 @@ extern crate wee_alloc;
|
||||
#[global_allocator]
|
||||
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
|
||||
|
||||
static SETUP_ONCE: Once = Once::new();
|
||||
pub fn setup() -> () {
|
||||
SETUP_ONCE.call_once(|| {});
|
||||
}
|
||||
|
||||
// API Singleton
|
||||
lazy_static! {
|
||||
static ref VEILID_API: SendWrapper<RefCell<Option<veilid_core::VeilidAPI>>> =
|
||||
@ -138,8 +133,10 @@ pub fn initialize_veilid_wasm() {
|
||||
console_error_panic_hook::set_once();
|
||||
}
|
||||
|
||||
static SETUP_ONCE: Once = Once::new();
|
||||
#[wasm_bindgen()]
|
||||
pub fn initialize_veilid_core(platform_config: String) {
|
||||
SETUP_ONCE.call_once(|| {
|
||||
let platform_config: VeilidWASMConfig = veilid_core::deserialize_json(&platform_config)
|
||||
.expect("failed to deserialize platform config json");
|
||||
|
||||
@ -150,8 +147,10 @@ pub fn initialize_veilid_core(platform_config: String) {
|
||||
|
||||
// Performance logger
|
||||
if platform_config.logging.performance.enabled {
|
||||
let filter =
|
||||
veilid_core::VeilidLayerFilter::new(platform_config.logging.performance.level, None);
|
||||
let filter = veilid_core::VeilidLayerFilter::new(
|
||||
platform_config.logging.performance.level,
|
||||
None,
|
||||
);
|
||||
let layer = WASMLayer::new(
|
||||
WASMLayerConfigBuilder::new()
|
||||
.set_report_logs_in_timings(platform_config.logging.performance.logs_in_timings)
|
||||
@ -169,7 +168,8 @@ pub fn initialize_veilid_core(platform_config: String) {
|
||||
|
||||
// API logger
|
||||
if platform_config.logging.api.enabled {
|
||||
let filter = veilid_core::VeilidLayerFilter::new(platform_config.logging.api.level, None);
|
||||
let filter =
|
||||
veilid_core::VeilidLayerFilter::new(platform_config.logging.api.level, None);
|
||||
let layer = veilid_core::ApiTracingLayer::get().with_filter(filter.clone());
|
||||
filters.insert("api", filter);
|
||||
layers.push(layer.boxed());
|
||||
@ -180,6 +180,7 @@ pub fn initialize_veilid_core(platform_config: String) {
|
||||
.try_init()
|
||||
.map_err(|e| format!("failed to initialize logging: {}", e))
|
||||
.expect("failed to initalize WASM platform");
|
||||
});
|
||||
}
|
||||
|
||||
#[wasm_bindgen()]
|
||||
|
Loading…
Reference in New Issue
Block a user