more setvalue fixes and concurrency cleanup
This commit is contained in:
@@ -54,6 +54,9 @@ const ROUTING_TABLE: &str = "routing_table";
|
||||
const SERIALIZED_BUCKET_MAP: &[u8] = b"serialized_bucket_map";
|
||||
const CACHE_VALIDITY_KEY: &[u8] = b"cache_validity_key";
|
||||
|
||||
// Critical sections
|
||||
const LOCK_TAG_TICK: &str = "TICK";
|
||||
|
||||
pub type LowLevelProtocolPorts = BTreeSet<(LowLevelProtocolType, AddressType, u16)>;
|
||||
pub type ProtocolToPortMapping = BTreeMap<(ProtocolType, AddressType), (LowLevelProtocolType, u16)>;
|
||||
#[derive(Clone, Debug)]
|
||||
|
||||
@@ -129,9 +129,11 @@ impl RoutingDomainEditor {
|
||||
}
|
||||
|
||||
// Briefly pause routing table ticker while changes are made
|
||||
if pause_tasks {
|
||||
self.routing_table.pause_tasks(true).await;
|
||||
}
|
||||
let _tick_guard = if pause_tasks {
|
||||
Some(self.routing_table.pause_tasks().await)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Apply changes
|
||||
let mut changed = false;
|
||||
@@ -262,8 +264,5 @@ impl RoutingDomainEditor {
|
||||
rss.reset();
|
||||
}
|
||||
}
|
||||
|
||||
// Unpause routing table ticker
|
||||
self.routing_table.pause_tasks(false).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ use super::*;
|
||||
use weak_table::PtrWeakHashSet;
|
||||
|
||||
const RECENT_PEERS_TABLE_SIZE: usize = 64;
|
||||
|
||||
pub type EntryCounts = BTreeMap<(RoutingDomain, CryptoKind), usize>;
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
|
||||
@@ -34,8 +35,9 @@ pub struct RoutingTableInner {
|
||||
pub(super) recent_peers: LruCache<TypedKey, RecentPeersEntry>,
|
||||
/// Storage for private/safety RouteSpecs
|
||||
pub(super) route_spec_store: Option<RouteSpecStore>,
|
||||
/// Tick paused or not
|
||||
pub(super) tick_paused: bool,
|
||||
/// Async tagged critical sections table
|
||||
/// Tag: "tick" -> in ticker
|
||||
pub(super) critical_sections: AsyncTagLockTable<&'static str>,
|
||||
}
|
||||
|
||||
impl RoutingTableInner {
|
||||
@@ -52,7 +54,7 @@ impl RoutingTableInner {
|
||||
self_transfer_stats: TransferStatsDownUp::default(),
|
||||
recent_peers: LruCache::new(RECENT_PEERS_TABLE_SIZE),
|
||||
route_spec_store: None,
|
||||
tick_paused: false,
|
||||
critical_sections: AsyncTagLockTable::new(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -126,9 +126,13 @@ impl RoutingTable {
|
||||
/// to run tick tasks which may run at slower tick rates as configured
|
||||
pub async fn tick(&self) -> EyreResult<()> {
|
||||
// Don't tick if paused
|
||||
if self.inner.read().tick_paused {
|
||||
let opt_tick_guard = {
|
||||
let inner = self.inner.read();
|
||||
inner.critical_sections.try_lock_tag(LOCK_TAG_TICK)
|
||||
};
|
||||
let Some(_tick_guard) = opt_tick_guard else {
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
// Do rolling transfers every ROLLING_TRANSFERS_INTERVAL_SECS secs
|
||||
self.unlocked_inner.rolling_transfers_task.tick().await?;
|
||||
@@ -183,22 +187,9 @@ impl RoutingTable {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
pub(crate) async fn pause_tasks(&self, paused: bool) {
|
||||
let cancel = {
|
||||
let mut inner = self.inner.write();
|
||||
if !inner.tick_paused && paused {
|
||||
inner.tick_paused = true;
|
||||
true
|
||||
} else if inner.tick_paused && !paused {
|
||||
inner.tick_paused = false;
|
||||
false
|
||||
} else {
|
||||
false
|
||||
}
|
||||
};
|
||||
if cancel {
|
||||
self.cancel_tasks().await;
|
||||
}
|
||||
pub(crate) async fn pause_tasks(&self) -> AsyncTagLockGuard<&'static str> {
|
||||
let critical_sections = self.inner.read().critical_sections.clone();
|
||||
critical_sections.lock_tag(LOCK_TAG_TICK).await
|
||||
}
|
||||
|
||||
pub(crate) async fn cancel_tasks(&self) {
|
||||
|
||||
@@ -78,7 +78,7 @@ impl RPCProcessor {
|
||||
log_rpc!(debug "{}", debug_string);
|
||||
|
||||
let waitable_reply = network_result_try!(
|
||||
self.question(dest, question, Some(question_context))
|
||||
self.question(dest.clone(), question, Some(question_context))
|
||||
.await?
|
||||
);
|
||||
|
||||
@@ -99,29 +99,35 @@ impl RPCProcessor {
|
||||
};
|
||||
|
||||
let (value, peers, descriptor) = get_value_a.destructure();
|
||||
#[cfg(feature="debug-dht")]
|
||||
{
|
||||
let debug_string_value = value.as_ref().map(|v| {
|
||||
format!(" len={} seq={} writer={}",
|
||||
v.value_data().data().len(),
|
||||
v.value_data().seq(),
|
||||
v.value_data().writer(),
|
||||
)
|
||||
}).unwrap_or_default();
|
||||
|
||||
let debug_string_answer = format!(
|
||||
"OUT <== GetValueA({} #{}{}{} peers={}) <= {}",
|
||||
key,
|
||||
subkey,
|
||||
debug_string_value,
|
||||
if descriptor.is_some() {
|
||||
" +desc"
|
||||
} else {
|
||||
""
|
||||
},
|
||||
peers.len(),
|
||||
dest
|
||||
);
|
||||
|
||||
let debug_string_value = value.as_ref().map(|v| {
|
||||
format!(" len={} seq={} writer={}",
|
||||
v.value_data().data().len(),
|
||||
v.value_data().seq(),
|
||||
v.value_data().writer(),
|
||||
)
|
||||
}).unwrap_or_default();
|
||||
|
||||
let debug_string_answer = format!(
|
||||
"OUT <== GetValueA({} #{}{}{} peers={})",
|
||||
key,
|
||||
subkey,
|
||||
debug_string_value,
|
||||
if descriptor.is_some() {
|
||||
" +desc"
|
||||
} else {
|
||||
""
|
||||
},
|
||||
peers.len(),
|
||||
);
|
||||
|
||||
log_rpc!(debug "{}", debug_string_answer);
|
||||
log_rpc!(debug "{}", debug_string_answer);
|
||||
|
||||
let peer_ids:Vec<String> = peers.iter().filter_map(|p| p.node_ids().get(key.kind).map(|k| k.to_string())).collect();
|
||||
log_rpc!(debug "Peers: {:#?}", peers);
|
||||
}
|
||||
|
||||
// Validate peers returned are, in fact, closer to the key than the node we sent this to
|
||||
let valid = match RoutingTable::verify_peers_closer(vcrypto, target_node_id, key, &peers) {
|
||||
@@ -203,19 +209,22 @@ impl RPCProcessor {
|
||||
let routing_table = self.routing_table();
|
||||
let closer_to_key_peers = network_result_try!(routing_table.find_preferred_peers_closer_to_key(key, vec![CAP_DHT]));
|
||||
|
||||
let debug_string = format!(
|
||||
"IN <=== GetValueQ({} #{}{}) <== {}",
|
||||
key,
|
||||
subkey,
|
||||
if want_descriptor {
|
||||
" +wantdesc"
|
||||
} else {
|
||||
""
|
||||
},
|
||||
msg.header.direct_sender_node_id()
|
||||
);
|
||||
#[cfg(feature="debug-dht")]
|
||||
{
|
||||
let debug_string = format!(
|
||||
"IN <=== GetValueQ({} #{}{}) <== {}",
|
||||
key,
|
||||
subkey,
|
||||
if want_descriptor {
|
||||
" +wantdesc"
|
||||
} else {
|
||||
""
|
||||
},
|
||||
msg.header.direct_sender_node_id()
|
||||
);
|
||||
|
||||
log_rpc!(debug "{}", debug_string);
|
||||
log_rpc!(debug "{}", debug_string);
|
||||
}
|
||||
|
||||
// See if we have this record ourselves
|
||||
let storage_manager = self.storage_manager();
|
||||
|
||||
@@ -92,7 +92,7 @@ impl RPCProcessor {
|
||||
log_rpc!(debug "{}", debug_string);
|
||||
|
||||
let waitable_reply = network_result_try!(
|
||||
self.question(dest, question, Some(question_context))
|
||||
self.question(dest.clone(), question, Some(question_context))
|
||||
.await?
|
||||
);
|
||||
|
||||
@@ -114,28 +114,35 @@ impl RPCProcessor {
|
||||
};
|
||||
|
||||
let (set, value, peers) = set_value_a.destructure();
|
||||
|
||||
let debug_string_value = value.as_ref().map(|v| {
|
||||
format!(" len={} writer={}",
|
||||
v.value_data().data().len(),
|
||||
v.value_data().writer(),
|
||||
)
|
||||
}).unwrap_or_default();
|
||||
|
||||
let debug_string_answer = format!(
|
||||
"OUT <== SetValueA({} #{}{}{} peers={})",
|
||||
key,
|
||||
subkey,
|
||||
if set {
|
||||
" +set"
|
||||
} else {
|
||||
""
|
||||
},
|
||||
debug_string_value,
|
||||
peers.len(),
|
||||
);
|
||||
|
||||
#[cfg(feature="debug-dht")]
|
||||
{
|
||||
let debug_string_value = value.as_ref().map(|v| {
|
||||
format!(" len={} writer={}",
|
||||
v.value_data().data().len(),
|
||||
v.value_data().writer(),
|
||||
)
|
||||
}).unwrap_or_default();
|
||||
|
||||
|
||||
log_rpc!(debug "{}", debug_string_answer);
|
||||
let debug_string_answer = format!(
|
||||
"OUT <== SetValueA({} #{}{}{} peers={}) <= {}",
|
||||
key,
|
||||
subkey,
|
||||
if set {
|
||||
" +set"
|
||||
} else {
|
||||
""
|
||||
},
|
||||
debug_string_value,
|
||||
peers.len(),
|
||||
dest,
|
||||
);
|
||||
|
||||
log_rpc!(debug "{}", debug_string_answer);
|
||||
let peer_ids:Vec<String> = peers.iter().filter_map(|p| p.node_ids().get(key.kind).map(|k| k.to_string())).collect();
|
||||
log_rpc!(debug "Peers: {:#?}", peer_ids);
|
||||
}
|
||||
|
||||
// Validate peers returned are, in fact, closer to the key than the node we sent this to
|
||||
let valid = match RoutingTable::verify_peers_closer(vcrypto, target_node_id, key, &peers) {
|
||||
@@ -235,7 +242,7 @@ impl RPCProcessor {
|
||||
// If there are less than 'set_value_count' peers that are closer, then store here too
|
||||
let set_value_count = {
|
||||
let c = self.config.get();
|
||||
c.network.dht.set_value_fanout as usize
|
||||
c.network.dht.set_value_count as usize
|
||||
};
|
||||
let (set, new_value) = if closer_to_key_peers.len() >= set_value_count {
|
||||
// Not close enough
|
||||
|
||||
Reference in New Issue
Block a user