more clippy
This commit is contained in:
parent
e4ee093951
commit
c7d4462e0e
@ -177,10 +177,10 @@ impl PrivateRoute {
|
|||||||
None => PrivateRouteHops::Empty,
|
None => PrivateRouteHops::Empty,
|
||||||
};
|
};
|
||||||
|
|
||||||
return Some(first_hop_node);
|
Some(first_hop_node)
|
||||||
}
|
}
|
||||||
PrivateRouteHops::Data(_) => return None,
|
PrivateRouteHops::Data(_) => None,
|
||||||
PrivateRouteHops::Empty => return None,
|
PrivateRouteHops::Empty => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -278,7 +278,7 @@ fn first_filtered_dial_info_detail_between_nodes(
|
|||||||
sequencing: Sequencing,
|
sequencing: Sequencing,
|
||||||
dif_sort: Option<Arc<DialInfoDetailSort>>
|
dif_sort: Option<Arc<DialInfoDetailSort>>
|
||||||
) -> Option<DialInfoDetail> {
|
) -> Option<DialInfoDetail> {
|
||||||
let dial_info_filter = dial_info_filter.clone().filtered(
|
let dial_info_filter = (*dial_info_filter).filtered(
|
||||||
&DialInfoFilter::all()
|
&DialInfoFilter::all()
|
||||||
.with_address_type_set(from_node.address_types())
|
.with_address_type_set(from_node.address_types())
|
||||||
.with_protocol_type_set(from_node.outbound_protocols()),
|
.with_protocol_type_set(from_node.outbound_protocols()),
|
||||||
@ -416,7 +416,6 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail {
|
|||||||
|
|
||||||
// Does node B have a direct udp dialinfo node A can reach?
|
// Does node B have a direct udp dialinfo node A can reach?
|
||||||
let udp_dial_info_filter = dial_info_filter
|
let udp_dial_info_filter = dial_info_filter
|
||||||
.clone()
|
|
||||||
.filtered(&DialInfoFilter::all().with_protocol_type(ProtocolType::UDP));
|
.filtered(&DialInfoFilter::all().with_protocol_type(ProtocolType::UDP));
|
||||||
if let Some(target_udp_did) = first_filtered_dial_info_detail_between_nodes(
|
if let Some(target_udp_did) = first_filtered_dial_info_detail_between_nodes(
|
||||||
node_a,
|
node_a,
|
||||||
@ -471,7 +470,7 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail {
|
|||||||
// Can we reach the inbound relay?
|
// Can we reach the inbound relay?
|
||||||
if first_filtered_dial_info_detail_between_nodes(
|
if first_filtered_dial_info_detail_between_nodes(
|
||||||
node_a,
|
node_a,
|
||||||
&node_b_relay,
|
node_b_relay,
|
||||||
&dial_info_filter,
|
&dial_info_filter,
|
||||||
sequencing,
|
sequencing,
|
||||||
dif_sort.clone()
|
dif_sort.clone()
|
||||||
|
@ -543,7 +543,7 @@ impl RoutingTableInner {
|
|||||||
// Collect all entries that are 'needs_ping' and have some node info making them reachable somehow
|
// Collect all entries that are 'needs_ping' and have some node info making them reachable somehow
|
||||||
let mut node_refs = Vec::<NodeRef>::with_capacity(self.bucket_entry_count());
|
let mut node_refs = Vec::<NodeRef>::with_capacity(self.bucket_entry_count());
|
||||||
self.with_entries(cur_ts, BucketEntryState::Unreliable, |rti, entry| {
|
self.with_entries(cur_ts, BucketEntryState::Unreliable, |rti, entry| {
|
||||||
if entry.with_inner(|e| {
|
let entry_needs_ping = |e: &BucketEntryInner| {
|
||||||
// If this entry isn't in the routing domain we are checking, don't include it
|
// If this entry isn't in the routing domain we are checking, don't include it
|
||||||
if !e.exists_in_routing_domain(rti, routing_domain) {
|
if !e.exists_in_routing_domain(rti, routing_domain) {
|
||||||
return false;
|
return false;
|
||||||
@ -566,7 +566,9 @@ impl RoutingTableInner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
false
|
false
|
||||||
}) {
|
};
|
||||||
|
|
||||||
|
if entry.with_inner(entry_needs_ping) {
|
||||||
node_refs.push(NodeRef::new(
|
node_refs.push(NodeRef::new(
|
||||||
outer_self.clone(),
|
outer_self.clone(),
|
||||||
entry,
|
entry,
|
||||||
@ -982,7 +984,7 @@ impl RoutingTableInner {
|
|||||||
match entry {
|
match entry {
|
||||||
None => has_valid_own_node_info,
|
None => has_valid_own_node_info,
|
||||||
Some(entry) => entry.with_inner(|e| {
|
Some(entry) => entry.with_inner(|e| {
|
||||||
e.signed_node_info(routing_domain.into())
|
e.signed_node_info(routing_domain)
|
||||||
.map(|sni| sni.has_any_signature())
|
.map(|sni| sni.has_any_signature())
|
||||||
.unwrap_or(false)
|
.unwrap_or(false)
|
||||||
}),
|
}),
|
||||||
@ -1079,11 +1081,7 @@ impl RoutingTableInner {
|
|||||||
move |_rti: &RoutingTableInner, v: Option<Arc<BucketEntry>>| {
|
move |_rti: &RoutingTableInner, v: Option<Arc<BucketEntry>>| {
|
||||||
if let Some(entry) = &v {
|
if let Some(entry) = &v {
|
||||||
// always filter out dead nodes
|
// always filter out dead nodes
|
||||||
if entry.with_inner(|e| e.state(cur_ts) == BucketEntryState::Dead) {
|
!entry.with_inner(|e| e.state(cur_ts) == BucketEntryState::Dead)
|
||||||
false
|
|
||||||
} else {
|
|
||||||
true
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
// always filter out self peer, as it is irrelevant to the 'fastest nodes' search
|
// always filter out self peer, as it is irrelevant to the 'fastest nodes' search
|
||||||
false
|
false
|
||||||
@ -1099,7 +1097,7 @@ impl RoutingTableInner {
|
|||||||
// same nodes are always the same
|
// same nodes are always the same
|
||||||
if let Some(a_entry) = a_entry {
|
if let Some(a_entry) = a_entry {
|
||||||
if let Some(b_entry) = b_entry {
|
if let Some(b_entry) = b_entry {
|
||||||
if Arc::ptr_eq(&a_entry, &b_entry) {
|
if Arc::ptr_eq(a_entry, b_entry) {
|
||||||
return core::cmp::Ordering::Equal;
|
return core::cmp::Ordering::Equal;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1150,9 +1148,7 @@ impl RoutingTableInner {
|
|||||||
})
|
})
|
||||||
};
|
};
|
||||||
|
|
||||||
let out =
|
self.find_peers_with_sort_and_filter(node_count, cur_ts, filters, sort, transform)
|
||||||
self.find_peers_with_sort_and_filter(node_count, cur_ts, filters, sort, transform);
|
|
||||||
out
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_preferred_closest_nodes<T, O>(
|
pub fn find_preferred_closest_nodes<T, O>(
|
||||||
@ -1193,7 +1189,7 @@ impl RoutingTableInner {
|
|||||||
// same nodes are always the same
|
// same nodes are always the same
|
||||||
if let Some(a_entry) = a_entry {
|
if let Some(a_entry) = a_entry {
|
||||||
if let Some(b_entry) = b_entry {
|
if let Some(b_entry) = b_entry {
|
||||||
if Arc::ptr_eq(&a_entry, &b_entry) {
|
if Arc::ptr_eq(a_entry, b_entry) {
|
||||||
return core::cmp::Ordering::Equal;
|
return core::cmp::Ordering::Equal;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -30,8 +30,8 @@ pub async fn test_routingtable_buckets_round_trip() {
|
|||||||
|
|
||||||
for crypto in routing_table_keys {
|
for crypto in routing_table_keys {
|
||||||
// The same keys are present in the original and copy RoutingTables.
|
// The same keys are present in the original and copy RoutingTables.
|
||||||
let original_buckets = original_inner.buckets.get(&crypto).unwrap();
|
let original_buckets = original_inner.buckets.get(crypto).unwrap();
|
||||||
let copy_buckets = copy_inner.buckets.get(&crypto).unwrap();
|
let copy_buckets = copy_inner.buckets.get(crypto).unwrap();
|
||||||
|
|
||||||
// Recurse into RoutingTable.inner.buckets
|
// Recurse into RoutingTable.inner.buckets
|
||||||
for (left_buckets, right_buckets) in original_buckets.iter().zip(copy_buckets.iter()) {
|
for (left_buckets, right_buckets) in original_buckets.iter().zip(copy_buckets.iter()) {
|
||||||
|
@ -16,7 +16,7 @@ pub fn encode_node_info(
|
|||||||
.reborrow()
|
.reborrow()
|
||||||
.init_envelope_support(node_info.envelope_support().len() as u32);
|
.init_envelope_support(node_info.envelope_support().len() as u32);
|
||||||
if let Some(s) = es_builder.as_slice() {
|
if let Some(s) = es_builder.as_slice() {
|
||||||
s.clone_from_slice(&node_info.envelope_support());
|
s.clone_from_slice(node_info.envelope_support());
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut cs_builder = builder
|
let mut cs_builder = builder
|
||||||
@ -100,7 +100,7 @@ pub fn decode_node_info(reader: &veilid_capnp::node_info::Reader) -> Result<Node
|
|||||||
if envelope_support.len() > MAX_ENVELOPE_VERSIONS {
|
if envelope_support.len() > MAX_ENVELOPE_VERSIONS {
|
||||||
return Err(RPCError::protocol("too many envelope versions"));
|
return Err(RPCError::protocol("too many envelope versions"));
|
||||||
}
|
}
|
||||||
if envelope_support.len() == 0 {
|
if envelope_support.is_empty() {
|
||||||
return Err(RPCError::protocol("no envelope versions"));
|
return Err(RPCError::protocol("no envelope versions"));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -129,7 +129,7 @@ pub fn decode_node_info(reader: &veilid_capnp::node_info::Reader) -> Result<Node
|
|||||||
if crypto_support.len() > MAX_CRYPTO_KINDS {
|
if crypto_support.len() > MAX_CRYPTO_KINDS {
|
||||||
return Err(RPCError::protocol("too many crypto kinds"));
|
return Err(RPCError::protocol("too many crypto kinds"));
|
||||||
}
|
}
|
||||||
if crypto_support.len() == 0 {
|
if crypto_support.is_empty() {
|
||||||
return Err(RPCError::protocol("no crypto kinds"));
|
return Err(RPCError::protocol("no crypto kinds"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -31,22 +31,22 @@ impl RPCAnswer {
|
|||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub enum RPCAnswerDetail {
|
pub enum RPCAnswerDetail {
|
||||||
StatusA(RPCOperationStatusA),
|
StatusA(Box<RPCOperationStatusA>),
|
||||||
FindNodeA(RPCOperationFindNodeA),
|
FindNodeA(Box<RPCOperationFindNodeA>),
|
||||||
AppCallA(RPCOperationAppCallA),
|
AppCallA(Box<RPCOperationAppCallA>),
|
||||||
GetValueA(RPCOperationGetValueA),
|
GetValueA(Box<RPCOperationGetValueA>),
|
||||||
SetValueA(RPCOperationSetValueA),
|
SetValueA(Box<RPCOperationSetValueA>),
|
||||||
WatchValueA(RPCOperationWatchValueA),
|
WatchValueA(Box<RPCOperationWatchValueA>),
|
||||||
#[cfg(feature = "unstable-blockstore")]
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
SupplyBlockA(RPCOperationSupplyBlockA),
|
SupplyBlockA(Box<RPCOperationSupplyBlockA>),
|
||||||
#[cfg(feature = "unstable-blockstore")]
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
FindBlockA(RPCOperationFindBlockA),
|
FindBlockA(Box<RPCOperationFindBlockA>),
|
||||||
#[cfg(feature = "unstable-tunnels")]
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
StartTunnelA(RPCOperationStartTunnelA),
|
StartTunnelA(Box<RPCOperationStartTunnelA>),
|
||||||
#[cfg(feature = "unstable-tunnels")]
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
CompleteTunnelA(RPCOperationCompleteTunnelA),
|
CompleteTunnelA(Box<RPCOperationCompleteTunnelA>),
|
||||||
#[cfg(feature = "unstable-tunnels")]
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
CancelTunnelA(RPCOperationCancelTunnelA),
|
CancelTunnelA(Box<RPCOperationCancelTunnelA>),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RPCAnswerDetail {
|
impl RPCAnswerDetail {
|
||||||
@ -98,62 +98,62 @@ impl RPCAnswerDetail {
|
|||||||
veilid_capnp::answer::detail::StatusA(r) => {
|
veilid_capnp::answer::detail::StatusA(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationStatusA::decode(&op_reader)?;
|
let out = RPCOperationStatusA::decode(&op_reader)?;
|
||||||
RPCAnswerDetail::StatusA(out)
|
RPCAnswerDetail::StatusA(Box::new(out))
|
||||||
}
|
}
|
||||||
veilid_capnp::answer::detail::FindNodeA(r) => {
|
veilid_capnp::answer::detail::FindNodeA(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationFindNodeA::decode(&op_reader)?;
|
let out = RPCOperationFindNodeA::decode(&op_reader)?;
|
||||||
RPCAnswerDetail::FindNodeA(out)
|
RPCAnswerDetail::FindNodeA(Box::new(out))
|
||||||
}
|
}
|
||||||
veilid_capnp::answer::detail::AppCallA(r) => {
|
veilid_capnp::answer::detail::AppCallA(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationAppCallA::decode(&op_reader)?;
|
let out = RPCOperationAppCallA::decode(&op_reader)?;
|
||||||
RPCAnswerDetail::AppCallA(out)
|
RPCAnswerDetail::AppCallA(Box::new(out))
|
||||||
}
|
}
|
||||||
veilid_capnp::answer::detail::GetValueA(r) => {
|
veilid_capnp::answer::detail::GetValueA(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationGetValueA::decode(&op_reader)?;
|
let out = RPCOperationGetValueA::decode(&op_reader)?;
|
||||||
RPCAnswerDetail::GetValueA(out)
|
RPCAnswerDetail::GetValueA(Box::new(out))
|
||||||
}
|
}
|
||||||
veilid_capnp::answer::detail::SetValueA(r) => {
|
veilid_capnp::answer::detail::SetValueA(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationSetValueA::decode(&op_reader)?;
|
let out = RPCOperationSetValueA::decode(&op_reader)?;
|
||||||
RPCAnswerDetail::SetValueA(out)
|
RPCAnswerDetail::SetValueA(Box::new(out))
|
||||||
}
|
}
|
||||||
veilid_capnp::answer::detail::WatchValueA(r) => {
|
veilid_capnp::answer::detail::WatchValueA(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationWatchValueA::decode(&op_reader)?;
|
let out = RPCOperationWatchValueA::decode(&op_reader)?;
|
||||||
RPCAnswerDetail::WatchValueA(out)
|
RPCAnswerDetail::WatchValueA(Box::new(out))
|
||||||
}
|
}
|
||||||
#[cfg(feature = "unstable-blockstore")]
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
veilid_capnp::answer::detail::SupplyBlockA(r) => {
|
veilid_capnp::answer::detail::SupplyBlockA(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationSupplyBlockA::decode(&op_reader)?;
|
let out = RPCOperationSupplyBlockA::decode(&op_reader)?;
|
||||||
RPCAnswerDetail::SupplyBlockA(out)
|
RPCAnswerDetail::SupplyBlockA(Box::new(out))
|
||||||
}
|
}
|
||||||
#[cfg(feature = "unstable-blockstore")]
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
veilid_capnp::answer::detail::FindBlockA(r) => {
|
veilid_capnp::answer::detail::FindBlockA(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationFindBlockA::decode(&op_reader)?;
|
let out = RPCOperationFindBlockA::decode(&op_reader)?;
|
||||||
RPCAnswerDetail::FindBlockA(out)
|
RPCAnswerDetail::FindBlockA(Box::new(out))
|
||||||
}
|
}
|
||||||
#[cfg(feature = "unstable-tunnels")]
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
veilid_capnp::answer::detail::StartTunnelA(r) => {
|
veilid_capnp::answer::detail::StartTunnelA(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationStartTunnelA::decode(&op_reader)?;
|
let out = RPCOperationStartTunnelA::decode(&op_reader)?;
|
||||||
RPCAnswerDetail::StartTunnelA(out)
|
RPCAnswerDetail::StartTunnelA(Box::new(out))
|
||||||
}
|
}
|
||||||
#[cfg(feature = "unstable-tunnels")]
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
veilid_capnp::answer::detail::CompleteTunnelA(r) => {
|
veilid_capnp::answer::detail::CompleteTunnelA(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationCompleteTunnelA::decode(&op_reader)?;
|
let out = RPCOperationCompleteTunnelA::decode(&op_reader)?;
|
||||||
RPCAnswerDetail::CompleteTunnelA(out)
|
RPCAnswerDetail::CompleteTunnelA(Box::new(out))
|
||||||
}
|
}
|
||||||
#[cfg(feature = "unstable-tunnels")]
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
veilid_capnp::answer::detail::CancelTunnelA(r) => {
|
veilid_capnp::answer::detail::CancelTunnelA(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationCancelTunnelA::decode(&op_reader)?;
|
let out = RPCOperationCancelTunnelA::decode(&op_reader)?;
|
||||||
RPCAnswerDetail::CancelTunnelA(out)
|
RPCAnswerDetail::CancelTunnelA(Box::new(out))
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
Ok(out)
|
Ok(out)
|
||||||
|
@ -2,9 +2,9 @@ use super::*;
|
|||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub enum RPCOperationKind {
|
pub enum RPCOperationKind {
|
||||||
Question(RPCQuestion),
|
Question(Box<RPCQuestion>),
|
||||||
Statement(RPCStatement),
|
Statement(Box<RPCStatement>),
|
||||||
Answer(RPCAnswer),
|
Answer(Box<RPCAnswer>),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RPCOperationKind {
|
impl RPCOperationKind {
|
||||||
@ -30,17 +30,17 @@ impl RPCOperationKind {
|
|||||||
veilid_capnp::operation::kind::Which::Question(r) => {
|
veilid_capnp::operation::kind::Which::Question(r) => {
|
||||||
let q_reader = r.map_err(RPCError::protocol)?;
|
let q_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCQuestion::decode(&q_reader)?;
|
let out = RPCQuestion::decode(&q_reader)?;
|
||||||
RPCOperationKind::Question(out)
|
RPCOperationKind::Question(Box::new(out))
|
||||||
}
|
}
|
||||||
veilid_capnp::operation::kind::Which::Statement(r) => {
|
veilid_capnp::operation::kind::Which::Statement(r) => {
|
||||||
let q_reader = r.map_err(RPCError::protocol)?;
|
let q_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCStatement::decode(&q_reader)?;
|
let out = RPCStatement::decode(&q_reader)?;
|
||||||
RPCOperationKind::Statement(out)
|
RPCOperationKind::Statement(Box::new(out))
|
||||||
}
|
}
|
||||||
veilid_capnp::operation::kind::Which::Answer(r) => {
|
veilid_capnp::operation::kind::Which::Answer(r) => {
|
||||||
let q_reader = r.map_err(RPCError::protocol)?;
|
let q_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCAnswer::decode(&q_reader)?;
|
let out = RPCAnswer::decode(&q_reader)?;
|
||||||
RPCOperationKind::Answer(out)
|
RPCOperationKind::Answer(Box::new(out))
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -73,7 +73,7 @@ impl RPCOperation {
|
|||||||
op_id: OperationId::new(get_random_u64()),
|
op_id: OperationId::new(get_random_u64()),
|
||||||
opt_sender_peer_info: sender_peer_info.opt_sender_peer_info,
|
opt_sender_peer_info: sender_peer_info.opt_sender_peer_info,
|
||||||
target_node_info_ts: sender_peer_info.target_node_info_ts,
|
target_node_info_ts: sender_peer_info.target_node_info_ts,
|
||||||
kind: RPCOperationKind::Question(question),
|
kind: RPCOperationKind::Question(Box::new(question)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn new_statement(statement: RPCStatement, sender_peer_info: SenderPeerInfo) -> Self {
|
pub fn new_statement(statement: RPCStatement, sender_peer_info: SenderPeerInfo) -> Self {
|
||||||
@ -81,7 +81,7 @@ impl RPCOperation {
|
|||||||
op_id: OperationId::new(get_random_u64()),
|
op_id: OperationId::new(get_random_u64()),
|
||||||
opt_sender_peer_info: sender_peer_info.opt_sender_peer_info,
|
opt_sender_peer_info: sender_peer_info.opt_sender_peer_info,
|
||||||
target_node_info_ts: sender_peer_info.target_node_info_ts,
|
target_node_info_ts: sender_peer_info.target_node_info_ts,
|
||||||
kind: RPCOperationKind::Statement(statement),
|
kind: RPCOperationKind::Statement(Box::new(statement)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -94,7 +94,7 @@ impl RPCOperation {
|
|||||||
op_id: request.op_id,
|
op_id: request.op_id,
|
||||||
opt_sender_peer_info: sender_peer_info.opt_sender_peer_info,
|
opt_sender_peer_info: sender_peer_info.opt_sender_peer_info,
|
||||||
target_node_info_ts: sender_peer_info.target_node_info_ts,
|
target_node_info_ts: sender_peer_info.target_node_info_ts,
|
||||||
kind: RPCOperationKind::Answer(answer),
|
kind: RPCOperationKind::Answer(Box::new(answer)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -163,7 +163,7 @@ impl RPCOperation {
|
|||||||
builder.set_op_id(self.op_id.as_u64());
|
builder.set_op_id(self.op_id.as_u64());
|
||||||
if let Some(sender_peer_info) = &self.opt_sender_peer_info {
|
if let Some(sender_peer_info) = &self.opt_sender_peer_info {
|
||||||
let mut pi_builder = builder.reborrow().init_sender_peer_info();
|
let mut pi_builder = builder.reborrow().init_sender_peer_info();
|
||||||
encode_peer_info(&sender_peer_info, &mut pi_builder)?;
|
encode_peer_info(sender_peer_info, &mut pi_builder)?;
|
||||||
}
|
}
|
||||||
builder.set_target_node_info_ts(self.target_node_info_ts.as_u64());
|
builder.set_target_node_info_ts(self.target_node_info_ts.as_u64());
|
||||||
let mut k_builder = builder.reborrow().init_kind();
|
let mut k_builder = builder.reborrow().init_kind();
|
||||||
|
@ -122,7 +122,7 @@ impl RPCOperationSetValueA {
|
|||||||
value: Option<SignedValueData>,
|
value: Option<SignedValueData>,
|
||||||
peers: Vec<PeerInfo>,
|
peers: Vec<PeerInfo>,
|
||||||
) -> Result<Self, RPCError> {
|
) -> Result<Self, RPCError> {
|
||||||
if peers.len() as usize > MAX_SET_VALUE_A_PEERS_LEN {
|
if peers.len() > MAX_SET_VALUE_A_PEERS_LEN {
|
||||||
return Err(RPCError::protocol(
|
return Err(RPCError::protocol(
|
||||||
"encoded SetValueA peers length too long",
|
"encoded SetValueA peers length too long",
|
||||||
));
|
));
|
||||||
|
@ -36,7 +36,7 @@ impl RPCOperationStatusQ {
|
|||||||
) -> Result<(), RPCError> {
|
) -> Result<(), RPCError> {
|
||||||
if let Some(ns) = &self.node_status {
|
if let Some(ns) = &self.node_status {
|
||||||
let mut ns_builder = builder.reborrow().init_node_status();
|
let mut ns_builder = builder.reborrow().init_node_status();
|
||||||
encode_node_status(&ns, &mut ns_builder)?;
|
encode_node_status(ns, &mut ns_builder)?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -98,11 +98,11 @@ impl RPCOperationStatusA {
|
|||||||
) -> Result<(), RPCError> {
|
) -> Result<(), RPCError> {
|
||||||
if let Some(ns) = &self.node_status {
|
if let Some(ns) = &self.node_status {
|
||||||
let mut ns_builder = builder.reborrow().init_node_status();
|
let mut ns_builder = builder.reborrow().init_node_status();
|
||||||
encode_node_status(&ns, &mut ns_builder)?;
|
encode_node_status(ns, &mut ns_builder)?;
|
||||||
}
|
}
|
||||||
if let Some(si) = &self.sender_info {
|
if let Some(si) = &self.sender_info {
|
||||||
let mut si_builder = builder.reborrow().init_sender_info();
|
let mut si_builder = builder.reborrow().init_sender_info();
|
||||||
encode_sender_info(&si, &mut si_builder)?;
|
encode_sender_info(si, &mut si_builder)?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -22,7 +22,7 @@ impl RPCOperationWatchValueQ {
|
|||||||
watcher: PublicKey,
|
watcher: PublicKey,
|
||||||
signature: Signature,
|
signature: Signature,
|
||||||
) -> Result<Self, RPCError> {
|
) -> Result<Self, RPCError> {
|
||||||
if subkeys.len() as usize > MAX_WATCH_VALUE_Q_SUBKEYS_LEN {
|
if subkeys.len() > MAX_WATCH_VALUE_Q_SUBKEYS_LEN {
|
||||||
return Err(RPCError::protocol("WatchValueQ subkeys length too long"));
|
return Err(RPCError::protocol("WatchValueQ subkeys length too long"));
|
||||||
}
|
}
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
@ -38,7 +38,7 @@ impl RPCOperationWatchValueQ {
|
|||||||
// signature covers: key, subkeys, expiration, count, using watcher key
|
// signature covers: key, subkeys, expiration, count, using watcher key
|
||||||
fn make_signature_data(&self) -> Vec<u8> {
|
fn make_signature_data(&self) -> Vec<u8> {
|
||||||
let mut sig_data =
|
let mut sig_data =
|
||||||
Vec::with_capacity(PUBLIC_KEY_LENGTH + 4 + (self.subkeys.len() as usize * 8) + 8 + 4);
|
Vec::with_capacity(PUBLIC_KEY_LENGTH + 4 + (self.subkeys.len() * 8) + 8 + 4);
|
||||||
sig_data.extend_from_slice(&self.key.kind.0);
|
sig_data.extend_from_slice(&self.key.kind.0);
|
||||||
sig_data.extend_from_slice(&self.key.value.bytes);
|
sig_data.extend_from_slice(&self.key.value.bytes);
|
||||||
for sk in self.subkeys.ranges() {
|
for sk in self.subkeys.ranges() {
|
||||||
|
@ -43,22 +43,22 @@ impl RPCQuestion {
|
|||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub enum RPCQuestionDetail {
|
pub enum RPCQuestionDetail {
|
||||||
StatusQ(RPCOperationStatusQ),
|
StatusQ(Box<RPCOperationStatusQ>),
|
||||||
FindNodeQ(RPCOperationFindNodeQ),
|
FindNodeQ(Box<RPCOperationFindNodeQ>),
|
||||||
AppCallQ(RPCOperationAppCallQ),
|
AppCallQ(Box<RPCOperationAppCallQ>),
|
||||||
GetValueQ(RPCOperationGetValueQ),
|
GetValueQ(Box<RPCOperationGetValueQ>),
|
||||||
SetValueQ(RPCOperationSetValueQ),
|
SetValueQ(Box<RPCOperationSetValueQ>),
|
||||||
WatchValueQ(RPCOperationWatchValueQ),
|
WatchValueQ(Box<RPCOperationWatchValueQ>),
|
||||||
#[cfg(feature = "unstable-blockstore")]
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
SupplyBlockQ(RPCOperationSupplyBlockQ),
|
SupplyBlockQ(Box<RPCOperationSupplyBlockQ>),
|
||||||
#[cfg(feature = "unstable-blockstore")]
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
FindBlockQ(RPCOperationFindBlockQ),
|
FindBlockQ(Box<RPCOperationFindBlockQ>),
|
||||||
#[cfg(feature = "unstable-tunnels")]
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
StartTunnelQ(RPCOperationStartTunnelQ),
|
StartTunnelQ(Box<RPCOperationStartTunnelQ>),
|
||||||
#[cfg(feature = "unstable-tunnels")]
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
CompleteTunnelQ(RPCOperationCompleteTunnelQ),
|
CompleteTunnelQ(Box<RPCOperationCompleteTunnelQ>),
|
||||||
#[cfg(feature = "unstable-tunnels")]
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
CancelTunnelQ(RPCOperationCancelTunnelQ),
|
CancelTunnelQ(Box<RPCOperationCancelTunnelQ>),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RPCQuestionDetail {
|
impl RPCQuestionDetail {
|
||||||
@ -111,62 +111,62 @@ impl RPCQuestionDetail {
|
|||||||
veilid_capnp::question::detail::StatusQ(r) => {
|
veilid_capnp::question::detail::StatusQ(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationStatusQ::decode(&op_reader)?;
|
let out = RPCOperationStatusQ::decode(&op_reader)?;
|
||||||
RPCQuestionDetail::StatusQ(out)
|
RPCQuestionDetail::StatusQ(Box::new(out))
|
||||||
}
|
}
|
||||||
veilid_capnp::question::detail::FindNodeQ(r) => {
|
veilid_capnp::question::detail::FindNodeQ(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationFindNodeQ::decode(&op_reader)?;
|
let out = RPCOperationFindNodeQ::decode(&op_reader)?;
|
||||||
RPCQuestionDetail::FindNodeQ(out)
|
RPCQuestionDetail::FindNodeQ(Box::new(out))
|
||||||
}
|
}
|
||||||
veilid_capnp::question::detail::Which::AppCallQ(r) => {
|
veilid_capnp::question::detail::Which::AppCallQ(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationAppCallQ::decode(&op_reader)?;
|
let out = RPCOperationAppCallQ::decode(&op_reader)?;
|
||||||
RPCQuestionDetail::AppCallQ(out)
|
RPCQuestionDetail::AppCallQ(Box::new(out))
|
||||||
}
|
}
|
||||||
veilid_capnp::question::detail::GetValueQ(r) => {
|
veilid_capnp::question::detail::GetValueQ(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationGetValueQ::decode(&op_reader)?;
|
let out = RPCOperationGetValueQ::decode(&op_reader)?;
|
||||||
RPCQuestionDetail::GetValueQ(out)
|
RPCQuestionDetail::GetValueQ(Box::new(out))
|
||||||
}
|
}
|
||||||
veilid_capnp::question::detail::SetValueQ(r) => {
|
veilid_capnp::question::detail::SetValueQ(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationSetValueQ::decode(&op_reader)?;
|
let out = RPCOperationSetValueQ::decode(&op_reader)?;
|
||||||
RPCQuestionDetail::SetValueQ(out)
|
RPCQuestionDetail::SetValueQ(Box::new(out))
|
||||||
}
|
}
|
||||||
veilid_capnp::question::detail::WatchValueQ(r) => {
|
veilid_capnp::question::detail::WatchValueQ(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationWatchValueQ::decode(&op_reader)?;
|
let out = RPCOperationWatchValueQ::decode(&op_reader)?;
|
||||||
RPCQuestionDetail::WatchValueQ(out)
|
RPCQuestionDetail::WatchValueQ(Box::new(out))
|
||||||
}
|
}
|
||||||
#[cfg(feature = "unstable-blockstore")]
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
veilid_capnp::question::detail::SupplyBlockQ(r) => {
|
veilid_capnp::question::detail::SupplyBlockQ(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationSupplyBlockQ::decode(&op_reader)?;
|
let out = RPCOperationSupplyBlockQ::decode(&op_reader)?;
|
||||||
RPCQuestionDetail::SupplyBlockQ(out)
|
RPCQuestionDetail::SupplyBlockQ(Box::new(out))
|
||||||
}
|
}
|
||||||
#[cfg(feature = "unstable-blockstore")]
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
veilid_capnp::question::detail::FindBlockQ(r) => {
|
veilid_capnp::question::detail::FindBlockQ(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationFindBlockQ::decode(&op_reader)?;
|
let out = RPCOperationFindBlockQ::decode(&op_reader)?;
|
||||||
RPCQuestionDetail::FindBlockQ(out)
|
RPCQuestionDetail::FindBlockQ(Box::new(out))
|
||||||
}
|
}
|
||||||
#[cfg(feature = "unstable-tunnels")]
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
veilid_capnp::question::detail::StartTunnelQ(r) => {
|
veilid_capnp::question::detail::StartTunnelQ(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationStartTunnelQ::decode(&op_reader)?;
|
let out = RPCOperationStartTunnelQ::decode(&op_reader)?;
|
||||||
RPCQuestionDetail::StartTunnelQ(out)
|
RPCQuestionDetail::StartTunnelQ(Box::new(out))
|
||||||
}
|
}
|
||||||
#[cfg(feature = "unstable-tunnels")]
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
veilid_capnp::question::detail::CompleteTunnelQ(r) => {
|
veilid_capnp::question::detail::CompleteTunnelQ(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationCompleteTunnelQ::decode(&op_reader)?;
|
let out = RPCOperationCompleteTunnelQ::decode(&op_reader)?;
|
||||||
RPCQuestionDetail::CompleteTunnelQ(out)
|
RPCQuestionDetail::CompleteTunnelQ(Box::new(out))
|
||||||
}
|
}
|
||||||
#[cfg(feature = "unstable-tunnels")]
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
veilid_capnp::question::detail::CancelTunnelQ(r) => {
|
veilid_capnp::question::detail::CancelTunnelQ(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationCancelTunnelQ::decode(&op_reader)?;
|
let out = RPCOperationCancelTunnelQ::decode(&op_reader)?;
|
||||||
RPCQuestionDetail::CancelTunnelQ(out)
|
RPCQuestionDetail::CancelTunnelQ(Box::new(out))
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
Ok(out)
|
Ok(out)
|
||||||
|
@ -34,12 +34,12 @@ impl RPCStatement {
|
|||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub enum RPCStatementDetail {
|
pub enum RPCStatementDetail {
|
||||||
ValidateDialInfo(RPCOperationValidateDialInfo),
|
ValidateDialInfo(Box<RPCOperationValidateDialInfo>),
|
||||||
Route(RPCOperationRoute),
|
Route(Box<RPCOperationRoute>),
|
||||||
ValueChanged(RPCOperationValueChanged),
|
ValueChanged(Box<RPCOperationValueChanged>),
|
||||||
Signal(RPCOperationSignal),
|
Signal(Box<RPCOperationSignal>),
|
||||||
ReturnReceipt(RPCOperationReturnReceipt),
|
ReturnReceipt(Box<RPCOperationReturnReceipt>),
|
||||||
AppMessage(RPCOperationAppMessage),
|
AppMessage(Box<RPCOperationAppMessage>),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RPCStatementDetail {
|
impl RPCStatementDetail {
|
||||||
@ -71,32 +71,32 @@ impl RPCStatementDetail {
|
|||||||
veilid_capnp::statement::detail::ValidateDialInfo(r) => {
|
veilid_capnp::statement::detail::ValidateDialInfo(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationValidateDialInfo::decode(&op_reader)?;
|
let out = RPCOperationValidateDialInfo::decode(&op_reader)?;
|
||||||
RPCStatementDetail::ValidateDialInfo(out)
|
RPCStatementDetail::ValidateDialInfo(Box::new(out))
|
||||||
}
|
}
|
||||||
veilid_capnp::statement::detail::Route(r) => {
|
veilid_capnp::statement::detail::Route(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationRoute::decode(&op_reader)?;
|
let out = RPCOperationRoute::decode(&op_reader)?;
|
||||||
RPCStatementDetail::Route(out)
|
RPCStatementDetail::Route(Box::new(out))
|
||||||
}
|
}
|
||||||
veilid_capnp::statement::detail::ValueChanged(r) => {
|
veilid_capnp::statement::detail::ValueChanged(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationValueChanged::decode(&op_reader)?;
|
let out = RPCOperationValueChanged::decode(&op_reader)?;
|
||||||
RPCStatementDetail::ValueChanged(out)
|
RPCStatementDetail::ValueChanged(Box::new(out))
|
||||||
}
|
}
|
||||||
veilid_capnp::statement::detail::Signal(r) => {
|
veilid_capnp::statement::detail::Signal(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationSignal::decode(&op_reader)?;
|
let out = RPCOperationSignal::decode(&op_reader)?;
|
||||||
RPCStatementDetail::Signal(out)
|
RPCStatementDetail::Signal(Box::new(out))
|
||||||
}
|
}
|
||||||
veilid_capnp::statement::detail::ReturnReceipt(r) => {
|
veilid_capnp::statement::detail::ReturnReceipt(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationReturnReceipt::decode(&op_reader)?;
|
let out = RPCOperationReturnReceipt::decode(&op_reader)?;
|
||||||
RPCStatementDetail::ReturnReceipt(out)
|
RPCStatementDetail::ReturnReceipt(Box::new(out))
|
||||||
}
|
}
|
||||||
veilid_capnp::statement::detail::AppMessage(r) => {
|
veilid_capnp::statement::detail::AppMessage(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationAppMessage::decode(&op_reader)?;
|
let out = RPCOperationAppMessage::decode(&op_reader)?;
|
||||||
RPCStatementDetail::AppMessage(out)
|
RPCStatementDetail::AppMessage(Box::new(out))
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
Ok(out)
|
Ok(out)
|
||||||
|
@ -41,7 +41,7 @@ pub fn decode_peer_info(reader: &veilid_capnp::peer_info::Reader) -> Result<Peer
|
|||||||
node_ids.add(decode_typed_key(&nid_reader)?);
|
node_ids.add(decode_typed_key(&nid_reader)?);
|
||||||
}
|
}
|
||||||
let signed_node_info = decode_signed_node_info(&sni_reader)?;
|
let signed_node_info = decode_signed_node_info(&sni_reader)?;
|
||||||
if node_ids.len() == 0 {
|
if node_ids.is_empty() {
|
||||||
return Err(RPCError::protocol("no verified node ids"));
|
return Err(RPCError::protocol("no verified node ids"));
|
||||||
}
|
}
|
||||||
Ok(PeerInfo::new(node_ids, signed_node_info))
|
Ok(PeerInfo::new(node_ids, signed_node_info))
|
||||||
|
@ -53,11 +53,11 @@ pub fn encode_route_hop(
|
|||||||
match &route_hop.node {
|
match &route_hop.node {
|
||||||
RouteNode::NodeId(ni) => {
|
RouteNode::NodeId(ni) => {
|
||||||
let mut ni_builder = node_builder.init_node_id();
|
let mut ni_builder = node_builder.init_node_id();
|
||||||
encode_key256(&ni, &mut ni_builder);
|
encode_key256(ni, &mut ni_builder);
|
||||||
}
|
}
|
||||||
RouteNode::PeerInfo(pi) => {
|
RouteNode::PeerInfo(pi) => {
|
||||||
let mut pi_builder = node_builder.init_peer_info();
|
let mut pi_builder = node_builder.init_peer_info();
|
||||||
encode_peer_info(&pi, &mut pi_builder)?;
|
encode_peer_info(pi, &mut pi_builder)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if let Some(rhd) = &route_hop.next_hop {
|
if let Some(rhd) = &route_hop.next_hop {
|
||||||
|
@ -361,9 +361,9 @@ impl RPCProcessor {
|
|||||||
if let Some(sender_noderef) = res {
|
if let Some(sender_noderef) = res {
|
||||||
NetworkResult::value(Destination::relay(peer_noderef, sender_noderef))
|
NetworkResult::value(Destination::relay(peer_noderef, sender_noderef))
|
||||||
} else {
|
} else {
|
||||||
return NetworkResult::invalid_message(
|
NetworkResult::invalid_message(
|
||||||
"not responding to sender that has no node info",
|
"not responding to sender that has no node info",
|
||||||
);
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -371,9 +371,9 @@ impl RPCProcessor {
|
|||||||
match &request.header.detail {
|
match &request.header.detail {
|
||||||
RPCMessageHeaderDetail::Direct(_) => {
|
RPCMessageHeaderDetail::Direct(_) => {
|
||||||
// If this was sent directly, we should only ever respond directly
|
// If this was sent directly, we should only ever respond directly
|
||||||
return NetworkResult::invalid_message(
|
NetworkResult::invalid_message(
|
||||||
"not responding to private route from direct question",
|
"not responding to private route from direct question",
|
||||||
);
|
)
|
||||||
}
|
}
|
||||||
RPCMessageHeaderDetail::SafetyRouted(detail) => {
|
RPCMessageHeaderDetail::SafetyRouted(detail) => {
|
||||||
// If this was sent via a safety route, but not received over our private route, don't respond with a safety route,
|
// If this was sent via a safety route, but not received over our private route, don't respond with a safety route,
|
||||||
@ -387,7 +387,7 @@ impl RPCProcessor {
|
|||||||
// If this was received over our private route, it's okay to respond to a private route via our safety route
|
// If this was received over our private route, it's okay to respond to a private route via our safety route
|
||||||
NetworkResult::value(Destination::private_route(
|
NetworkResult::value(Destination::private_route(
|
||||||
pr.clone(),
|
pr.clone(),
|
||||||
SafetySelection::Safe(detail.safety_spec.clone()),
|
SafetySelection::Safe(detail.safety_spec),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -60,6 +60,7 @@ where
|
|||||||
C: Fn(NodeRef) -> F,
|
C: Fn(NodeRef) -> F,
|
||||||
D: Fn(&[NodeRef]) -> Option<R>,
|
D: Fn(&[NodeRef]) -> Option<R>,
|
||||||
{
|
{
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn new(
|
pub fn new(
|
||||||
routing_table: RoutingTable,
|
routing_table: RoutingTable,
|
||||||
node_id: TypedKey,
|
node_id: TypedKey,
|
||||||
@ -103,7 +104,7 @@ where
|
|||||||
fn add_to_fanout_queue(self: Arc<Self>, new_nodes: &[NodeRef]) {
|
fn add_to_fanout_queue(self: Arc<Self>, new_nodes: &[NodeRef]) {
|
||||||
let ctx = &mut *self.context.lock();
|
let ctx = &mut *self.context.lock();
|
||||||
let this = self.clone();
|
let this = self.clone();
|
||||||
ctx.fanout_queue.add(&new_nodes, |current_nodes| {
|
ctx.fanout_queue.add(new_nodes, |current_nodes| {
|
||||||
let mut current_nodes_vec = this
|
let mut current_nodes_vec = this
|
||||||
.routing_table
|
.routing_table
|
||||||
.sort_and_clean_closest_noderefs(this.node_id, current_nodes);
|
.sort_and_clean_closest_noderefs(this.node_id, current_nodes);
|
||||||
@ -180,8 +181,10 @@ where
|
|||||||
let entry = opt_entry.unwrap();
|
let entry = opt_entry.unwrap();
|
||||||
|
|
||||||
// Filter entries
|
// Filter entries
|
||||||
entry.with(rti, |_rti, e| {
|
entry.with(rti, |_rti, e| {
|
||||||
let Some(signed_node_info) = e.signed_node_info(RoutingDomain::PublicInternet) else {
|
let Some(signed_node_info) =
|
||||||
|
e.signed_node_info(RoutingDomain::PublicInternet)
|
||||||
|
else {
|
||||||
return false;
|
return false;
|
||||||
};
|
};
|
||||||
// Ensure only things that are valid/signed in the PublicInternet domain are returned
|
// Ensure only things that are valid/signed in the PublicInternet domain are returned
|
||||||
|
@ -447,7 +447,7 @@ impl RPCProcessor {
|
|||||||
capabilities: &[Capability],
|
capabilities: &[Capability],
|
||||||
) -> bool {
|
) -> bool {
|
||||||
let routing_table = self.routing_table();
|
let routing_table = self.routing_table();
|
||||||
routing_table.signed_node_info_is_valid_in_routing_domain(routing_domain, &signed_node_info)
|
routing_table.signed_node_info_is_valid_in_routing_domain(routing_domain, signed_node_info)
|
||||||
&& signed_node_info.node_info().has_capabilities(capabilities)
|
&& signed_node_info.node_info().has_capabilities(capabilities)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -684,7 +684,7 @@ impl RPCProcessor {
|
|||||||
let ssni_route =
|
let ssni_route =
|
||||||
self.get_sender_peer_info(&Destination::direct(compiled_route.first_hop.clone()));
|
self.get_sender_peer_info(&Destination::direct(compiled_route.first_hop.clone()));
|
||||||
let operation = RPCOperation::new_statement(
|
let operation = RPCOperation::new_statement(
|
||||||
RPCStatement::new(RPCStatementDetail::Route(route_operation)),
|
RPCStatement::new(RPCStatementDetail::Route(Box::new(route_operation))),
|
||||||
ssni_route,
|
ssni_route,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -1021,6 +1021,7 @@ impl RPCProcessor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Record answer received from node or route
|
/// Record answer received from node or route
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
fn record_answer_received(
|
fn record_answer_received(
|
||||||
&self,
|
&self,
|
||||||
send_ts: Timestamp,
|
send_ts: Timestamp,
|
||||||
@ -1079,7 +1080,7 @@ impl RPCProcessor {
|
|||||||
|
|
||||||
// If we sent to a private route without a safety route
|
// If we sent to a private route without a safety route
|
||||||
// We need to mark our own node info as having been seen so we can optimize sending it
|
// We need to mark our own node info as having been seen so we can optimize sending it
|
||||||
if let Err(e) = rss.mark_remote_private_route_seen_our_node_info(&rpr_pubkey, recv_ts) {
|
if let Err(e) = rss.mark_remote_private_route_seen_our_node_info(rpr_pubkey, recv_ts) {
|
||||||
log_rpc!(error "private route missing: {}", e);
|
log_rpc!(error "private route missing: {}", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1116,7 +1117,6 @@ impl RPCProcessor {
|
|||||||
RPCMessageHeaderDetail::Direct(_) => {
|
RPCMessageHeaderDetail::Direct(_) => {
|
||||||
if let Some(sender_nr) = msg.opt_sender_nr.clone() {
|
if let Some(sender_nr) = msg.opt_sender_nr.clone() {
|
||||||
sender_nr.stats_question_rcvd(recv_ts, bytes);
|
sender_nr.stats_question_rcvd(recv_ts, bytes);
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Process messages that arrived with no private route (private route stub)
|
// Process messages that arrived with no private route (private route stub)
|
||||||
|
@ -17,7 +17,7 @@ impl RPCProcessor {
|
|||||||
let app_call_q = RPCOperationAppCallQ::new(message)?;
|
let app_call_q = RPCOperationAppCallQ::new(message)?;
|
||||||
let question = RPCQuestion::new(
|
let question = RPCQuestion::new(
|
||||||
network_result_try!(self.get_destination_respond_to(&dest)?),
|
network_result_try!(self.get_destination_respond_to(&dest)?),
|
||||||
RPCQuestionDetail::AppCallQ(app_call_q),
|
RPCQuestionDetail::AppCallQ(Box::new(app_call_q)),
|
||||||
);
|
);
|
||||||
|
|
||||||
// Send the app call question
|
// Send the app call question
|
||||||
@ -117,8 +117,11 @@ impl RPCProcessor {
|
|||||||
let app_call_a = RPCOperationAppCallA::new(message_a)?;
|
let app_call_a = RPCOperationAppCallA::new(message_a)?;
|
||||||
|
|
||||||
// Send status answer
|
// Send status answer
|
||||||
self.answer(msg, RPCAnswer::new(RPCAnswerDetail::AppCallA(app_call_a)))
|
self.answer(
|
||||||
.await
|
msg,
|
||||||
|
RPCAnswer::new(RPCAnswerDetail::AppCallA(Box::new(app_call_a))),
|
||||||
|
)
|
||||||
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Exposed to API for apps to return app call answers
|
/// Exposed to API for apps to return app call answers
|
||||||
|
@ -13,7 +13,7 @@ impl RPCProcessor {
|
|||||||
message: Vec<u8>,
|
message: Vec<u8>,
|
||||||
) -> Result<NetworkResult<()>, RPCError> {
|
) -> Result<NetworkResult<()>, RPCError> {
|
||||||
let app_message = RPCOperationAppMessage::new(message)?;
|
let app_message = RPCOperationAppMessage::new(message)?;
|
||||||
let statement = RPCStatement::new(RPCStatementDetail::AppMessage(app_message));
|
let statement = RPCStatement::new(RPCStatementDetail::AppMessage(Box::new(app_message)));
|
||||||
|
|
||||||
// Send the app message request
|
// Send the app message request
|
||||||
self.statement(dest, statement).await
|
self.statement(dest, statement).await
|
||||||
|
@ -38,7 +38,7 @@ impl RPCError {
|
|||||||
move |x| Self::Internal(format!("{}: {}", message.to_string(), x.to_string()))
|
move |x| Self::Internal(format!("{}: {}", message.to_string(), x.to_string()))
|
||||||
}
|
}
|
||||||
pub fn else_internal<M: ToString>(message: M) -> impl FnOnce() -> Self {
|
pub fn else_internal<M: ToString>(message: M) -> impl FnOnce() -> Self {
|
||||||
move || Self::Internal(format!("{}", message.to_string()))
|
move || Self::Internal(message.to_string())
|
||||||
}
|
}
|
||||||
pub fn network<X: ToString>(x: X) -> Self {
|
pub fn network<X: ToString>(x: X) -> Self {
|
||||||
Self::Network(x.to_string())
|
Self::Network(x.to_string())
|
||||||
|
@ -30,8 +30,9 @@ impl RPCProcessor {
|
|||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let find_node_q_detail =
|
let find_node_q_detail = RPCQuestionDetail::FindNodeQ(Box::new(
|
||||||
RPCQuestionDetail::FindNodeQ(RPCOperationFindNodeQ::new(node_id, capabilities.clone()));
|
RPCOperationFindNodeQ::new(node_id, capabilities.clone()),
|
||||||
|
));
|
||||||
let find_node_q = RPCQuestion::new(
|
let find_node_q = RPCQuestion::new(
|
||||||
network_result_try!(self.get_destination_respond_to(&dest)?),
|
network_result_try!(self.get_destination_respond_to(&dest)?),
|
||||||
find_node_q_detail,
|
find_node_q_detail,
|
||||||
@ -111,7 +112,10 @@ impl RPCProcessor {
|
|||||||
let find_node_a = RPCOperationFindNodeA::new(closest_nodes)?;
|
let find_node_a = RPCOperationFindNodeA::new(closest_nodes)?;
|
||||||
|
|
||||||
// Send FindNode answer
|
// Send FindNode answer
|
||||||
self.answer(msg, RPCAnswer::new(RPCAnswerDetail::FindNodeA(find_node_a)))
|
self.answer(
|
||||||
.await
|
msg,
|
||||||
|
RPCAnswer::new(RPCAnswerDetail::FindNodeA(Box::new(find_node_a))),
|
||||||
|
)
|
||||||
|
.await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -65,7 +65,7 @@ impl RPCProcessor {
|
|||||||
let get_value_q = RPCOperationGetValueQ::new(key, subkey, last_descriptor.is_none());
|
let get_value_q = RPCOperationGetValueQ::new(key, subkey, last_descriptor.is_none());
|
||||||
let question = RPCQuestion::new(
|
let question = RPCQuestion::new(
|
||||||
network_result_try!(self.get_destination_respond_to(&dest)?),
|
network_result_try!(self.get_destination_respond_to(&dest)?),
|
||||||
RPCQuestionDetail::GetValueQ(get_value_q),
|
RPCQuestionDetail::GetValueQ(Box::new(get_value_q)),
|
||||||
);
|
);
|
||||||
|
|
||||||
let question_context = QuestionContext::GetValue(ValidateGetValueContext {
|
let question_context = QuestionContext::GetValue(ValidateGetValueContext {
|
||||||
@ -268,7 +268,7 @@ impl RPCProcessor {
|
|||||||
)?;
|
)?;
|
||||||
|
|
||||||
// Send GetValue answer
|
// Send GetValue answer
|
||||||
self.answer(msg, RPCAnswer::new(RPCAnswerDetail::GetValueA(get_value_a)))
|
self.answer(msg, RPCAnswer::new(RPCAnswerDetail::GetValueA(Box::new(get_value_a))))
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -15,7 +15,8 @@ impl RPCProcessor {
|
|||||||
let receipt = receipt.as_ref().to_vec();
|
let receipt = receipt.as_ref().to_vec();
|
||||||
|
|
||||||
let return_receipt = RPCOperationReturnReceipt::new(receipt)?;
|
let return_receipt = RPCOperationReturnReceipt::new(receipt)?;
|
||||||
let statement = RPCStatement::new(RPCStatementDetail::ReturnReceipt(return_receipt));
|
let statement =
|
||||||
|
RPCStatement::new(RPCStatementDetail::ReturnReceipt(Box::new(return_receipt)));
|
||||||
|
|
||||||
// Send the return_receipt request
|
// Send the return_receipt request
|
||||||
network_result_try!(self.statement(dest, statement).await?);
|
network_result_try!(self.statement(dest, statement).await?);
|
||||||
|
@ -1,7 +1,10 @@
|
|||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
impl RPCProcessor {
|
impl RPCProcessor {
|
||||||
#[cfg_attr(feature="verbose-tracing", instrument(level = "trace", skip_all, err))]
|
#[cfg_attr(
|
||||||
|
feature = "verbose-tracing",
|
||||||
|
instrument(level = "trace", skip_all, err)
|
||||||
|
)]
|
||||||
async fn process_route_safety_route_hop(
|
async fn process_route_safety_route_hop(
|
||||||
&self,
|
&self,
|
||||||
routed_operation: RoutedOperation,
|
routed_operation: RoutedOperation,
|
||||||
@ -26,7 +29,10 @@ impl RPCProcessor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get next hop node ref
|
// Get next hop node ref
|
||||||
let Some(mut next_hop_nr) = route_hop.node.node_ref(self.routing_table.clone(), safety_route.public_key.kind) else {
|
let Some(mut next_hop_nr) = route_hop
|
||||||
|
.node
|
||||||
|
.node_ref(self.routing_table.clone(), safety_route.public_key.kind)
|
||||||
|
else {
|
||||||
return Err(RPCError::network(format!(
|
return Err(RPCError::network(format!(
|
||||||
"could not get route node hop ref: {}",
|
"could not get route node hop ref: {}",
|
||||||
route_hop.node.describe(safety_route.public_key.kind)
|
route_hop.node.describe(safety_route.public_key.kind)
|
||||||
@ -45,14 +51,18 @@ impl RPCProcessor {
|
|||||||
},
|
},
|
||||||
routed_operation,
|
routed_operation,
|
||||||
);
|
);
|
||||||
let next_hop_route_stmt = RPCStatement::new(RPCStatementDetail::Route(next_hop_route));
|
let next_hop_route_stmt =
|
||||||
|
RPCStatement::new(RPCStatementDetail::Route(Box::new(next_hop_route)));
|
||||||
|
|
||||||
// Send the next route statement
|
// Send the next route statement
|
||||||
self.statement(Destination::direct(next_hop_nr), next_hop_route_stmt)
|
self.statement(Destination::direct(next_hop_nr), next_hop_route_stmt)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature="verbose-tracing", instrument(level = "trace", skip_all, err))]
|
#[cfg_attr(
|
||||||
|
feature = "verbose-tracing",
|
||||||
|
instrument(level = "trace", skip_all, err)
|
||||||
|
)]
|
||||||
async fn process_route_private_route_hop(
|
async fn process_route_private_route_hop(
|
||||||
&self,
|
&self,
|
||||||
routed_operation: RoutedOperation,
|
routed_operation: RoutedOperation,
|
||||||
@ -68,7 +78,9 @@ impl RPCProcessor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get next hop node ref
|
// Get next hop node ref
|
||||||
let Some(mut next_hop_nr) = next_route_node.node_ref(self.routing_table.clone(), safety_route_public_key.kind) else {
|
let Some(mut next_hop_nr) =
|
||||||
|
next_route_node.node_ref(self.routing_table.clone(), safety_route_public_key.kind)
|
||||||
|
else {
|
||||||
return Err(RPCError::network(format!(
|
return Err(RPCError::network(format!(
|
||||||
"could not get route node hop ref: {}",
|
"could not get route node hop ref: {}",
|
||||||
next_route_node.describe(safety_route_public_key.kind)
|
next_route_node.describe(safety_route_public_key.kind)
|
||||||
@ -87,7 +99,8 @@ impl RPCProcessor {
|
|||||||
},
|
},
|
||||||
routed_operation,
|
routed_operation,
|
||||||
);
|
);
|
||||||
let next_hop_route_stmt = RPCStatement::new(RPCStatementDetail::Route(next_hop_route));
|
let next_hop_route_stmt =
|
||||||
|
RPCStatement::new(RPCStatementDetail::Route(Box::new(next_hop_route)));
|
||||||
|
|
||||||
// Send the next route statement
|
// Send the next route statement
|
||||||
self.statement(Destination::direct(next_hop_nr), next_hop_route_stmt)
|
self.statement(Destination::direct(next_hop_nr), next_hop_route_stmt)
|
||||||
@ -99,7 +112,10 @@ impl RPCProcessor {
|
|||||||
/// Note: it is important that we never respond with a safety route to questions that come
|
/// Note: it is important that we never respond with a safety route to questions that come
|
||||||
/// in without a private route. Giving away a safety route when the node id is known is
|
/// in without a private route. Giving away a safety route when the node id is known is
|
||||||
/// a privacy violation!
|
/// a privacy violation!
|
||||||
#[cfg_attr(feature="verbose-tracing", instrument(level = "trace", skip_all, err))]
|
#[cfg_attr(
|
||||||
|
feature = "verbose-tracing",
|
||||||
|
instrument(level = "trace", skip_all, err)
|
||||||
|
)]
|
||||||
fn process_safety_routed_operation(
|
fn process_safety_routed_operation(
|
||||||
&self,
|
&self,
|
||||||
detail: RPCMessageHeaderDetailDirect,
|
detail: RPCMessageHeaderDetailDirect,
|
||||||
@ -111,7 +127,9 @@ impl RPCProcessor {
|
|||||||
// xxx: punish nodes that send messages that fail to decrypt eventually? How to do this for safety routes?
|
// xxx: punish nodes that send messages that fail to decrypt eventually? How to do this for safety routes?
|
||||||
let node_id_secret = self.routing_table.node_id_secret_key(remote_sr_pubkey.kind);
|
let node_id_secret = self.routing_table.node_id_secret_key(remote_sr_pubkey.kind);
|
||||||
let Ok(dh_secret) = vcrypto.cached_dh(&remote_sr_pubkey.value, &node_id_secret) else {
|
let Ok(dh_secret) = vcrypto.cached_dh(&remote_sr_pubkey.value, &node_id_secret) else {
|
||||||
return Ok(NetworkResult::invalid_message("dh failed for remote safety route for safety routed operation"));
|
return Ok(NetworkResult::invalid_message(
|
||||||
|
"dh failed for remote safety route for safety routed operation",
|
||||||
|
));
|
||||||
};
|
};
|
||||||
let body = match vcrypto.decrypt_aead(
|
let body = match vcrypto.decrypt_aead(
|
||||||
routed_operation.data(),
|
routed_operation.data(),
|
||||||
@ -141,7 +159,10 @@ impl RPCProcessor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Process a routed operation that came in over both a safety route and a private route
|
/// Process a routed operation that came in over both a safety route and a private route
|
||||||
#[cfg_attr(feature="verbose-tracing", instrument(level = "trace", skip_all, err))]
|
#[cfg_attr(
|
||||||
|
feature = "verbose-tracing",
|
||||||
|
instrument(level = "trace", skip_all, err)
|
||||||
|
)]
|
||||||
fn process_private_routed_operation(
|
fn process_private_routed_operation(
|
||||||
&self,
|
&self,
|
||||||
detail: RPCMessageHeaderDetailDirect,
|
detail: RPCMessageHeaderDetailDirect,
|
||||||
@ -152,49 +173,54 @@ impl RPCProcessor {
|
|||||||
) -> Result<NetworkResult<()>, RPCError> {
|
) -> Result<NetworkResult<()>, RPCError> {
|
||||||
// Get sender id of the peer with the crypto kind of the route
|
// Get sender id of the peer with the crypto kind of the route
|
||||||
let Some(sender_id) = detail.peer_noderef.node_ids().get(pr_pubkey.kind) else {
|
let Some(sender_id) = detail.peer_noderef.node_ids().get(pr_pubkey.kind) else {
|
||||||
return Ok(NetworkResult::invalid_message("route node doesnt have a required crypto kind for routed operation"));
|
return Ok(NetworkResult::invalid_message(
|
||||||
|
"route node doesnt have a required crypto kind for routed operation",
|
||||||
|
));
|
||||||
};
|
};
|
||||||
|
|
||||||
// Look up the private route and ensure it's one in our spec store
|
// Look up the private route and ensure it's one in our spec store
|
||||||
// Ensure the route is validated, and construct a return safetyspec that matches the inbound preferences
|
// Ensure the route is validated, and construct a return safetyspec that matches the inbound preferences
|
||||||
let rss = self.routing_table.route_spec_store();
|
let rss = self.routing_table.route_spec_store();
|
||||||
let preferred_route = rss.get_route_id_for_key(&pr_pubkey.value);
|
let preferred_route = rss.get_route_id_for_key(&pr_pubkey.value);
|
||||||
let Some((secret_key, safety_spec)) = rss
|
let Some((secret_key, safety_spec)) = rss.with_signature_validated_route(
|
||||||
.with_signature_validated_route(
|
&pr_pubkey,
|
||||||
&pr_pubkey,
|
routed_operation.signatures(),
|
||||||
routed_operation.signatures(),
|
routed_operation.data(),
|
||||||
routed_operation.data(),
|
sender_id.value,
|
||||||
sender_id.value,
|
|rssd, rsd| {
|
||||||
|rssd, rsd| {
|
(
|
||||||
(
|
rsd.secret_key,
|
||||||
rsd.secret_key,
|
SafetySpec {
|
||||||
SafetySpec {
|
preferred_route,
|
||||||
preferred_route,
|
hop_count: rssd.hop_count(),
|
||||||
hop_count: rssd.hop_count(),
|
stability: rssd.get_stability(),
|
||||||
stability: rssd.get_stability(),
|
sequencing: routed_operation.sequencing(),
|
||||||
sequencing: routed_operation.sequencing(),
|
},
|
||||||
},
|
)
|
||||||
)
|
},
|
||||||
}
|
) else {
|
||||||
)
|
return Ok(NetworkResult::invalid_message(
|
||||||
else {
|
"signatures did not validate for private route",
|
||||||
return Ok(NetworkResult::invalid_message("signatures did not validate for private route"));
|
));
|
||||||
};
|
};
|
||||||
|
|
||||||
// Now that things are valid, decrypt the routed operation with DEC(nonce, DH(the SR's public key, the PR's (or node's) secret)
|
// Now that things are valid, decrypt the routed operation with DEC(nonce, DH(the SR's public key, the PR's (or node's) secret)
|
||||||
// xxx: punish nodes that send messages that fail to decrypt eventually. How to do this for private routes?
|
// xxx: punish nodes that send messages that fail to decrypt eventually. How to do this for private routes?
|
||||||
let Ok(dh_secret) = vcrypto.cached_dh(&remote_sr_pubkey.value, &secret_key) else {
|
let Ok(dh_secret) = vcrypto.cached_dh(&remote_sr_pubkey.value, &secret_key) else {
|
||||||
return Ok(NetworkResult::invalid_message("dh failed for remote safety route for private routed operation"));
|
return Ok(NetworkResult::invalid_message(
|
||||||
|
"dh failed for remote safety route for private routed operation",
|
||||||
|
));
|
||||||
|
};
|
||||||
|
let Ok(body) = vcrypto.decrypt_aead(
|
||||||
|
routed_operation.data(),
|
||||||
|
routed_operation.nonce(),
|
||||||
|
&dh_secret,
|
||||||
|
None,
|
||||||
|
) else {
|
||||||
|
return Ok(NetworkResult::invalid_message(
|
||||||
|
"decryption of routed operation failed",
|
||||||
|
));
|
||||||
};
|
};
|
||||||
let Ok(body) = vcrypto
|
|
||||||
.decrypt_aead(
|
|
||||||
routed_operation.data(),
|
|
||||||
routed_operation.nonce(),
|
|
||||||
&dh_secret,
|
|
||||||
None,
|
|
||||||
) else {
|
|
||||||
return Ok(NetworkResult::invalid_message("decryption of routed operation failed"));
|
|
||||||
};
|
|
||||||
|
|
||||||
// Pass message to RPC system
|
// Pass message to RPC system
|
||||||
self.enqueue_private_routed_message(
|
self.enqueue_private_routed_message(
|
||||||
@ -209,7 +235,10 @@ impl RPCProcessor {
|
|||||||
Ok(NetworkResult::value(()))
|
Ok(NetworkResult::value(()))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature="verbose-tracing", instrument(level = "trace", skip_all, err))]
|
#[cfg_attr(
|
||||||
|
feature = "verbose-tracing",
|
||||||
|
instrument(level = "trace", skip_all, err)
|
||||||
|
)]
|
||||||
fn process_routed_operation(
|
fn process_routed_operation(
|
||||||
&self,
|
&self,
|
||||||
detail: RPCMessageHeaderDetailDirect,
|
detail: RPCMessageHeaderDetailDirect,
|
||||||
@ -239,7 +268,10 @@ impl RPCProcessor {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#[cfg_attr(feature="verbose-tracing", instrument(level = "trace", skip_all, err))]
|
#[cfg_attr(
|
||||||
|
feature = "verbose-tracing",
|
||||||
|
instrument(level = "trace", skip_all, err)
|
||||||
|
)]
|
||||||
pub(crate) async fn process_private_route_first_hop(
|
pub(crate) async fn process_private_route_first_hop(
|
||||||
&self,
|
&self,
|
||||||
mut routed_operation: RoutedOperation,
|
mut routed_operation: RoutedOperation,
|
||||||
@ -247,14 +279,18 @@ impl RPCProcessor {
|
|||||||
mut private_route: PrivateRoute,
|
mut private_route: PrivateRoute,
|
||||||
) -> Result<NetworkResult<()>, RPCError> {
|
) -> Result<NetworkResult<()>, RPCError> {
|
||||||
let Some(pr_first_hop) = private_route.pop_first_hop() else {
|
let Some(pr_first_hop) = private_route.pop_first_hop() else {
|
||||||
return Ok(NetworkResult::invalid_message("switching from safety route to private route requires first hop"));
|
return Ok(NetworkResult::invalid_message(
|
||||||
|
"switching from safety route to private route requires first hop",
|
||||||
|
));
|
||||||
};
|
};
|
||||||
|
|
||||||
// Check for loopback test where private route is the same as safety route
|
// Check for loopback test where private route is the same as safety route
|
||||||
if sr_pubkey == private_route.public_key {
|
if sr_pubkey == private_route.public_key {
|
||||||
// If so, we're going to turn this thing right around without transiting the network
|
// If so, we're going to turn this thing right around without transiting the network
|
||||||
let PrivateRouteHops::Data(route_hop_data) = private_route.hops else {
|
let PrivateRouteHops::Data(route_hop_data) = private_route.hops else {
|
||||||
return Ok(NetworkResult::invalid_message("Loopback test requires hops"));
|
return Ok(NetworkResult::invalid_message(
|
||||||
|
"Loopback test requires hops",
|
||||||
|
));
|
||||||
};
|
};
|
||||||
|
|
||||||
// Decrypt route hop data
|
// Decrypt route hop data
|
||||||
@ -282,7 +318,7 @@ impl RPCProcessor {
|
|||||||
hop_count: private_route.hop_count - 1,
|
hop_count: private_route.hop_count - 1,
|
||||||
hops: route_hop
|
hops: route_hop
|
||||||
.next_hop
|
.next_hop
|
||||||
.map(|rhd| PrivateRouteHops::Data(rhd))
|
.map(PrivateRouteHops::Data)
|
||||||
.unwrap_or(PrivateRouteHops::Empty),
|
.unwrap_or(PrivateRouteHops::Empty),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
@ -342,9 +378,11 @@ impl RPCProcessor {
|
|||||||
.map_err(RPCError::protocol)?;
|
.map_err(RPCError::protocol)?;
|
||||||
decode_route_hop(&rh_reader)?
|
decode_route_hop(&rh_reader)?
|
||||||
};
|
};
|
||||||
|
|
||||||
// Validate the RouteHop
|
// Validate the RouteHop
|
||||||
route_hop.validate(self.crypto.clone()).map_err(RPCError::protocol)?;
|
route_hop
|
||||||
|
.validate(self.crypto.clone())
|
||||||
|
.map_err(RPCError::protocol)?;
|
||||||
|
|
||||||
// Sign the operation if this is not our last hop
|
// Sign the operation if this is not our last hop
|
||||||
// as the last hop is already signed by the envelope
|
// as the last hop is already signed by the envelope
|
||||||
@ -360,7 +398,10 @@ impl RPCProcessor {
|
|||||||
Ok(NetworkResult::value(route_hop))
|
Ok(NetworkResult::value(route_hop))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature="verbose-tracing", instrument(level = "trace", skip(self), ret, err))]
|
#[cfg_attr(
|
||||||
|
feature = "verbose-tracing",
|
||||||
|
instrument(level = "trace", skip(self), ret, err)
|
||||||
|
)]
|
||||||
pub(crate) async fn process_route(
|
pub(crate) async fn process_route(
|
||||||
&self,
|
&self,
|
||||||
msg: RPCMessage,
|
msg: RPCMessage,
|
||||||
@ -374,16 +415,10 @@ impl RPCProcessor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let opi = routing_table.get_own_peer_info(msg.header.routing_domain());
|
let opi = routing_table.get_own_peer_info(msg.header.routing_domain());
|
||||||
if !opi
|
if !opi.signed_node_info().node_info().has_capability(CAP_ROUTE) {
|
||||||
.signed_node_info()
|
return Ok(NetworkResult::service_unavailable("route is not available"));
|
||||||
.node_info()
|
|
||||||
.has_capability(CAP_ROUTE)
|
|
||||||
{
|
|
||||||
return Ok(NetworkResult::service_unavailable(
|
|
||||||
"route is not available",
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get header detail, must be direct and not inside a route itself
|
// Get header detail, must be direct and not inside a route itself
|
||||||
let detail = match msg.header.detail {
|
let detail = match msg.header.detail {
|
||||||
RPCMessageHeaderDetail::Direct(detail) => detail,
|
RPCMessageHeaderDetail::Direct(detail) => detail,
|
||||||
@ -395,7 +430,7 @@ impl RPCProcessor {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Get the statement
|
// Get the statement
|
||||||
let (_,_,_,kind) = msg.operation.destructure();
|
let (_, _, _, kind) = msg.operation.destructure();
|
||||||
let route = match kind {
|
let route = match kind {
|
||||||
RPCOperationKind::Statement(s) => match s.destructure() {
|
RPCOperationKind::Statement(s) => match s.destructure() {
|
||||||
RPCStatementDetail::Route(s) => s,
|
RPCStatementDetail::Route(s) => s,
|
||||||
@ -419,19 +454,22 @@ impl RPCProcessor {
|
|||||||
SafetyRouteHops::Data(ref route_hop_data) => {
|
SafetyRouteHops::Data(ref route_hop_data) => {
|
||||||
// Decrypt the blob with DEC(nonce, DH(the SR's public key, this hop's secret)
|
// Decrypt the blob with DEC(nonce, DH(the SR's public key, this hop's secret)
|
||||||
let node_id_secret = self.routing_table.node_id_secret_key(crypto_kind);
|
let node_id_secret = self.routing_table.node_id_secret_key(crypto_kind);
|
||||||
let Ok(dh_secret) = vcrypto
|
let Ok(dh_secret) =
|
||||||
.cached_dh(&safety_route.public_key.value, &node_id_secret) else {
|
vcrypto.cached_dh(&safety_route.public_key.value, &node_id_secret)
|
||||||
return Ok(NetworkResult::invalid_message("dh failed for safety route hop"));
|
else {
|
||||||
|
return Ok(NetworkResult::invalid_message(
|
||||||
|
"dh failed for safety route hop",
|
||||||
|
));
|
||||||
};
|
};
|
||||||
let Ok(mut dec_blob_data) = vcrypto
|
let Ok(mut dec_blob_data) = vcrypto.decrypt_aead(
|
||||||
.decrypt_aead(
|
&route_hop_data.blob,
|
||||||
&route_hop_data.blob,
|
&route_hop_data.nonce,
|
||||||
&route_hop_data.nonce,
|
&dh_secret,
|
||||||
&dh_secret,
|
None,
|
||||||
None,
|
) else {
|
||||||
)
|
return Ok(NetworkResult::invalid_message(
|
||||||
else {
|
"failed to decrypt route hop data for safety route hop",
|
||||||
return Ok(NetworkResult::invalid_message("failed to decrypt route hop data for safety route hop"));
|
));
|
||||||
};
|
};
|
||||||
|
|
||||||
// See if this is last hop in safety route, if so, we're decoding a PrivateRoute not a RouteHop
|
// See if this is last hop in safety route, if so, we're decoding a PrivateRoute not a RouteHop
|
||||||
@ -440,26 +478,35 @@ impl RPCProcessor {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let Ok(dec_blob_reader) = RPCMessageData::new(dec_blob_data).get_reader() else {
|
let Ok(dec_blob_reader) = RPCMessageData::new(dec_blob_data).get_reader() else {
|
||||||
return Ok(NetworkResult::invalid_message("Failed to decode RPCMessageData from blob"));
|
return Ok(NetworkResult::invalid_message(
|
||||||
|
"Failed to decode RPCMessageData from blob",
|
||||||
|
));
|
||||||
};
|
};
|
||||||
|
|
||||||
// Decode the blob appropriately
|
// Decode the blob appropriately
|
||||||
if dec_blob_tag == 1 {
|
if dec_blob_tag == 1 {
|
||||||
// PrivateRoute
|
// PrivateRoute
|
||||||
let private_route = {
|
let private_route = {
|
||||||
let Ok(pr_reader) = dec_blob_reader
|
let Ok(pr_reader) =
|
||||||
.get_root::<veilid_capnp::private_route::Reader>() else {
|
dec_blob_reader.get_root::<veilid_capnp::private_route::Reader>()
|
||||||
return Ok(NetworkResult::invalid_message("failed to get private route reader for blob"));
|
else {
|
||||||
|
return Ok(NetworkResult::invalid_message(
|
||||||
|
"failed to get private route reader for blob",
|
||||||
|
));
|
||||||
};
|
};
|
||||||
let Ok(private_route) = decode_private_route(&pr_reader) else {
|
let Ok(private_route) = decode_private_route(&pr_reader) else {
|
||||||
return Ok(NetworkResult::invalid_message("failed to decode private route"));
|
return Ok(NetworkResult::invalid_message(
|
||||||
|
"failed to decode private route",
|
||||||
|
));
|
||||||
};
|
};
|
||||||
private_route
|
private_route
|
||||||
};
|
};
|
||||||
|
|
||||||
// Validate the private route
|
// Validate the private route
|
||||||
if let Err(_) = private_route.validate(self.crypto.clone()) {
|
if private_route.validate(self.crypto.clone()).is_err() {
|
||||||
return Ok(NetworkResult::invalid_message("failed to validate private route"));
|
return Ok(NetworkResult::invalid_message(
|
||||||
|
"failed to validate private route",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Switching from full safety route to private route first hop
|
// Switching from full safety route to private route first hop
|
||||||
@ -474,19 +521,26 @@ impl RPCProcessor {
|
|||||||
} else if dec_blob_tag == 0 {
|
} else if dec_blob_tag == 0 {
|
||||||
// RouteHop
|
// RouteHop
|
||||||
let route_hop = {
|
let route_hop = {
|
||||||
let Ok(rh_reader) = dec_blob_reader
|
let Ok(rh_reader) =
|
||||||
.get_root::<veilid_capnp::route_hop::Reader>() else {
|
dec_blob_reader.get_root::<veilid_capnp::route_hop::Reader>()
|
||||||
return Ok(NetworkResult::invalid_message("failed to get route hop reader for blob"));
|
else {
|
||||||
|
return Ok(NetworkResult::invalid_message(
|
||||||
|
"failed to get route hop reader for blob",
|
||||||
|
));
|
||||||
};
|
};
|
||||||
let Ok(route_hop) = decode_route_hop(&rh_reader) else {
|
let Ok(route_hop) = decode_route_hop(&rh_reader) else {
|
||||||
return Ok(NetworkResult::invalid_message("failed to decode route hop"));
|
return Ok(NetworkResult::invalid_message(
|
||||||
|
"failed to decode route hop",
|
||||||
|
));
|
||||||
};
|
};
|
||||||
route_hop
|
route_hop
|
||||||
};
|
};
|
||||||
|
|
||||||
// Validate the route hop
|
// Validate the route hop
|
||||||
if let Err(_) = route_hop.validate(self.crypto.clone()) {
|
if route_hop.validate(self.crypto.clone()).is_err() {
|
||||||
return Ok(NetworkResult::invalid_message("failed to validate route hop"));
|
return Ok(NetworkResult::invalid_message(
|
||||||
|
"failed to validate route hop",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Continue the full safety route with another hop
|
// Continue the full safety route with another hop
|
||||||
@ -543,7 +597,7 @@ impl RPCProcessor {
|
|||||||
hop_count: private_route.hop_count - 1,
|
hop_count: private_route.hop_count - 1,
|
||||||
hops: route_hop
|
hops: route_hop
|
||||||
.next_hop
|
.next_hop
|
||||||
.map(|rhd| PrivateRouteHops::Data(rhd))
|
.map(PrivateRouteHops::Data)
|
||||||
.unwrap_or(PrivateRouteHops::Empty),
|
.unwrap_or(PrivateRouteHops::Empty),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
@ -80,7 +80,7 @@ impl RPCProcessor {
|
|||||||
);
|
);
|
||||||
let question = RPCQuestion::new(
|
let question = RPCQuestion::new(
|
||||||
network_result_try!(self.get_destination_respond_to(&dest)?),
|
network_result_try!(self.get_destination_respond_to(&dest)?),
|
||||||
RPCQuestionDetail::SetValueQ(set_value_q),
|
RPCQuestionDetail::SetValueQ(Box::new(set_value_q)),
|
||||||
);
|
);
|
||||||
let question_context = QuestionContext::SetValue(ValidateSetValueContext {
|
let question_context = QuestionContext::SetValue(ValidateSetValueContext {
|
||||||
descriptor,
|
descriptor,
|
||||||
@ -292,7 +292,7 @@ impl RPCProcessor {
|
|||||||
let set_value_a = RPCOperationSetValueA::new(set, new_value, closer_to_key_peers)?;
|
let set_value_a = RPCOperationSetValueA::new(set, new_value, closer_to_key_peers)?;
|
||||||
|
|
||||||
// Send SetValue answer
|
// Send SetValue answer
|
||||||
self.answer(msg, RPCAnswer::new(RPCAnswerDetail::SetValueA(set_value_a)))
|
self.answer(msg, RPCAnswer::new(RPCAnswerDetail::SetValueA(Box::new(set_value_a))))
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,7 @@ impl RPCProcessor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let signal = RPCOperationSignal::new(signal_info);
|
let signal = RPCOperationSignal::new(signal_info);
|
||||||
let statement = RPCStatement::new(RPCStatementDetail::Signal(signal));
|
let statement = RPCStatement::new(RPCStatementDetail::Signal(Box::new(signal)));
|
||||||
|
|
||||||
// Send the signal request
|
// Send the signal request
|
||||||
self.statement(dest, statement).await
|
self.statement(dest, statement).await
|
||||||
|
@ -101,7 +101,7 @@ impl RPCProcessor {
|
|||||||
let status_q = RPCOperationStatusQ::new(node_status);
|
let status_q = RPCOperationStatusQ::new(node_status);
|
||||||
let question = RPCQuestion::new(
|
let question = RPCQuestion::new(
|
||||||
network_result_try!(self.get_destination_respond_to(&dest)?),
|
network_result_try!(self.get_destination_respond_to(&dest)?),
|
||||||
RPCQuestionDetail::StatusQ(status_q),
|
RPCQuestionDetail::StatusQ(Box::new(status_q)),
|
||||||
);
|
);
|
||||||
|
|
||||||
let debug_string = format!("Status => {}", dest);
|
let debug_string = format!("Status => {}", dest);
|
||||||
@ -249,7 +249,10 @@ impl RPCProcessor {
|
|||||||
let status_a = RPCOperationStatusA::new(node_status, sender_info);
|
let status_a = RPCOperationStatusA::new(node_status, sender_info);
|
||||||
|
|
||||||
// Send status answer
|
// Send status answer
|
||||||
self.answer(msg, RPCAnswer::new(RPCAnswerDetail::StatusA(status_a)))
|
self.answer(
|
||||||
.await
|
msg,
|
||||||
|
RPCAnswer::new(RPCAnswerDetail::StatusA(Box::new(status_a))),
|
||||||
|
)
|
||||||
|
.await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,9 @@ impl RPCProcessor {
|
|||||||
.map_err(RPCError::internal)?;
|
.map_err(RPCError::internal)?;
|
||||||
|
|
||||||
let validate_dial_info = RPCOperationValidateDialInfo::new(dial_info, receipt, redirect)?;
|
let validate_dial_info = RPCOperationValidateDialInfo::new(dial_info, receipt, redirect)?;
|
||||||
let statement = RPCStatement::new(RPCStatementDetail::ValidateDialInfo(validate_dial_info));
|
let statement = RPCStatement::new(RPCStatementDetail::ValidateDialInfo(Box::new(
|
||||||
|
validate_dial_info,
|
||||||
|
)));
|
||||||
|
|
||||||
// Send the validate_dial_info request
|
// Send the validate_dial_info request
|
||||||
// This can only be sent directly, as relays can not validate dial info
|
// This can only be sent directly, as relays can not validate dial info
|
||||||
@ -153,8 +155,9 @@ impl RPCProcessor {
|
|||||||
// Make a copy of the request, without the redirect flag
|
// Make a copy of the request, without the redirect flag
|
||||||
let validate_dial_info =
|
let validate_dial_info =
|
||||||
RPCOperationValidateDialInfo::new(dial_info.clone(), receipt.clone(), false)?;
|
RPCOperationValidateDialInfo::new(dial_info.clone(), receipt.clone(), false)?;
|
||||||
let statement =
|
let statement = RPCStatement::new(RPCStatementDetail::ValidateDialInfo(Box::new(
|
||||||
RPCStatement::new(RPCStatementDetail::ValidateDialInfo(validate_dial_info));
|
validate_dial_info,
|
||||||
|
)));
|
||||||
|
|
||||||
// Send the validate_dial_info request
|
// Send the validate_dial_info request
|
||||||
// This can only be sent directly, as relays can not validate dial info
|
// This can only be sent directly, as relays can not validate dial info
|
||||||
|
@ -23,7 +23,7 @@ impl StorageManager {
|
|||||||
let reclaimed = local_record_store
|
let reclaimed = local_record_store
|
||||||
.reclaim_space(reclaim.unwrap_or(usize::MAX))
|
.reclaim_space(reclaim.unwrap_or(usize::MAX))
|
||||||
.await;
|
.await;
|
||||||
return format!("Local records purged: reclaimed {} bytes", reclaimed);
|
format!("Local records purged: reclaimed {} bytes", reclaimed)
|
||||||
}
|
}
|
||||||
pub(crate) async fn purge_remote_records(&self, reclaim: Option<usize>) -> String {
|
pub(crate) async fn purge_remote_records(&self, reclaim: Option<usize>) -> String {
|
||||||
let mut inner = self.inner.lock().await;
|
let mut inner = self.inner.lock().await;
|
||||||
@ -33,7 +33,7 @@ impl StorageManager {
|
|||||||
let reclaimed = remote_record_store
|
let reclaimed = remote_record_store
|
||||||
.reclaim_space(reclaim.unwrap_or(usize::MAX))
|
.reclaim_space(reclaim.unwrap_or(usize::MAX))
|
||||||
.await;
|
.await;
|
||||||
return format!("Remote records purged: reclaimed {} bytes", reclaimed);
|
format!("Remote records purged: reclaimed {} bytes", reclaimed)
|
||||||
}
|
}
|
||||||
pub(crate) async fn debug_local_record_subkey_info(
|
pub(crate) async fn debug_local_record_subkey_info(
|
||||||
&self,
|
&self,
|
||||||
|
@ -103,10 +103,10 @@ impl<T: PrimInt + Unsigned + fmt::Display + fmt::Debug> LimitedSize<T> {
|
|||||||
if let Some(uv) = self.uncommitted_value.take() {
|
if let Some(uv) = self.uncommitted_value.take() {
|
||||||
log_stor!(debug "Rollback ({}): {} (drop {})", self.description, self.value, uv);
|
log_stor!(debug "Rollback ({}): {} (drop {})", self.description, self.value, uv);
|
||||||
}
|
}
|
||||||
return self.value;
|
self.value
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get(&self) -> T {
|
pub fn get(&self) -> T {
|
||||||
return self.value;
|
self.value
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -133,9 +133,7 @@ impl StorageManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn online_writes_ready_inner(inner: &StorageManagerInner) -> Option<RPCProcessor> {
|
fn online_writes_ready_inner(inner: &StorageManagerInner) -> Option<RPCProcessor> {
|
||||||
if let Some(rpc_processor) = {
|
if let Some(rpc_processor) = { inner.rpc_processor.clone() } {
|
||||||
inner.rpc_processor.clone()
|
|
||||||
} {
|
|
||||||
if let Some(network_class) = rpc_processor
|
if let Some(network_class) = rpc_processor
|
||||||
.routing_table()
|
.routing_table()
|
||||||
.get_network_class(RoutingDomain::PublicInternet)
|
.get_network_class(RoutingDomain::PublicInternet)
|
||||||
@ -158,12 +156,12 @@ impl StorageManager {
|
|||||||
|
|
||||||
async fn online_writes_ready(&self) -> EyreResult<Option<RPCProcessor>> {
|
async fn online_writes_ready(&self) -> EyreResult<Option<RPCProcessor>> {
|
||||||
let inner = self.lock().await?;
|
let inner = self.lock().await?;
|
||||||
return Ok(Self::online_writes_ready_inner(&*inner));
|
Ok(Self::online_writes_ready_inner(&inner))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn has_offline_subkey_writes(&self) -> EyreResult<bool> {
|
async fn has_offline_subkey_writes(&self) -> EyreResult<bool> {
|
||||||
let inner = self.lock().await?;
|
let inner = self.lock().await?;
|
||||||
Ok(inner.offline_subkey_writes.len() != 0)
|
Ok(!inner.offline_subkey_writes.is_empty())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a local record from scratch with a new owner key, open it, and return the opened descriptor
|
/// Create a local record from scratch with a new owner key, open it, and return the opened descriptor
|
||||||
@ -394,7 +392,7 @@ impl StorageManager {
|
|||||||
|
|
||||||
// Make new subkey data
|
// Make new subkey data
|
||||||
let value_data = if let Some(last_signed_value_data) = last_subkey_result.value {
|
let value_data = if let Some(last_signed_value_data) = last_subkey_result.value {
|
||||||
if last_signed_value_data.value_data().data() == &data
|
if last_signed_value_data.value_data().data() == data
|
||||||
&& last_signed_value_data.value_data().writer() == &writer.key
|
&& last_signed_value_data.value_data().writer() == &writer.key
|
||||||
{
|
{
|
||||||
// Data and writer is the same, nothing is changing,
|
// Data and writer is the same, nothing is changing,
|
||||||
@ -433,13 +431,17 @@ impl StorageManager {
|
|||||||
|
|
||||||
log_stor!(debug "Writing subkey offline: {}:{} len={}", key, subkey, signed_value_data.value_data().data().len() );
|
log_stor!(debug "Writing subkey offline: {}:{} len={}", key, subkey, signed_value_data.value_data().data().len() );
|
||||||
// Add to offline writes to flush
|
// Add to offline writes to flush
|
||||||
inner.offline_subkey_writes.entry(key)
|
inner
|
||||||
.and_modify(|x| { x.subkeys.insert(subkey); } )
|
.offline_subkey_writes
|
||||||
.or_insert(OfflineSubkeyWrite{
|
.entry(key)
|
||||||
safety_selection,
|
.and_modify(|x| {
|
||||||
subkeys: ValueSubkeyRangeSet::single(subkey)
|
x.subkeys.insert(subkey);
|
||||||
|
})
|
||||||
|
.or_insert(OfflineSubkeyWrite {
|
||||||
|
safety_selection,
|
||||||
|
subkeys: ValueSubkeyRangeSet::single(subkey),
|
||||||
});
|
});
|
||||||
return Ok(None)
|
return Ok(None);
|
||||||
};
|
};
|
||||||
|
|
||||||
// Drop the lock for network access
|
// Drop the lock for network access
|
||||||
|
@ -65,7 +65,7 @@ where
|
|||||||
D: fmt::Debug + Clone + Serialize + for<'d> Deserialize<'d>,
|
D: fmt::Debug + Clone + Serialize + for<'d> Deserialize<'d>,
|
||||||
{
|
{
|
||||||
pub fn new(table_store: TableStore, name: &str, limits: RecordStoreLimits) -> Self {
|
pub fn new(table_store: TableStore, name: &str, limits: RecordStoreLimits) -> Self {
|
||||||
let subkey_cache_size = limits.subkey_cache_size as usize;
|
let subkey_cache_size = limits.subkey_cache_size;
|
||||||
let limit_subkey_cache_total_size = limits
|
let limit_subkey_cache_total_size = limits
|
||||||
.max_subkey_cache_memory_mb
|
.max_subkey_cache_memory_mb
|
||||||
.map(|mb| mb * 1_048_576usize);
|
.map(|mb| mb * 1_048_576usize);
|
||||||
@ -104,7 +104,7 @@ where
|
|||||||
.await?;
|
.await?;
|
||||||
let subkey_table = self
|
let subkey_table = self
|
||||||
.table_store
|
.table_store
|
||||||
.open(&&format!("{}_subkeys", self.name), 1)
|
.open(&format!("{}_subkeys", self.name), 1)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
// Pull record index from table into a vector to ensure we sort them
|
// Pull record index from table into a vector to ensure we sort them
|
||||||
@ -126,7 +126,7 @@ where
|
|||||||
self.total_storage_space
|
self.total_storage_space
|
||||||
.add((mem::size_of::<RecordTableKey>() + ri.1.total_size()) as u64)
|
.add((mem::size_of::<RecordTableKey>() + ri.1.total_size()) as u64)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
if let Err(_) = self.total_storage_space.commit() {
|
if self.total_storage_space.commit().is_err() {
|
||||||
// Revert the total storage space because the commit failed
|
// Revert the total storage space because the commit failed
|
||||||
self.total_storage_space.rollback();
|
self.total_storage_space.rollback();
|
||||||
|
|
||||||
@ -449,11 +449,15 @@ where
|
|||||||
) -> VeilidAPIResult<Option<SubkeyResult>> {
|
) -> VeilidAPIResult<Option<SubkeyResult>> {
|
||||||
// Get record from index
|
// Get record from index
|
||||||
let Some((subkey_count, has_subkey, opt_descriptor)) = self.with_record(key, |record| {
|
let Some((subkey_count, has_subkey, opt_descriptor)) = self.with_record(key, |record| {
|
||||||
(record.subkey_count(), record.stored_subkeys().contains(subkey), if want_descriptor {
|
(
|
||||||
Some(record.descriptor().clone())
|
record.subkey_count(),
|
||||||
} else {
|
record.stored_subkeys().contains(subkey),
|
||||||
None
|
if want_descriptor {
|
||||||
})
|
Some(record.descriptor().clone())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
},
|
||||||
|
)
|
||||||
}) else {
|
}) else {
|
||||||
// Record not available
|
// Record not available
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
@ -492,19 +496,20 @@ where
|
|||||||
let Some(record_data) = subkey_table
|
let Some(record_data) = subkey_table
|
||||||
.load_json::<RecordData>(0, &stk.bytes())
|
.load_json::<RecordData>(0, &stk.bytes())
|
||||||
.await
|
.await
|
||||||
.map_err(VeilidAPIError::internal)? else {
|
.map_err(VeilidAPIError::internal)?
|
||||||
apibail_internal!("failed to get subkey that was stored");
|
else {
|
||||||
};
|
apibail_internal!("failed to get subkey that was stored");
|
||||||
|
};
|
||||||
|
|
||||||
let out = record_data.signed_value_data().clone();
|
let out = record_data.signed_value_data().clone();
|
||||||
|
|
||||||
// Add to cache, do nothing with lru out
|
// Add to cache, do nothing with lru out
|
||||||
self.add_to_subkey_cache(stk, record_data);
|
self.add_to_subkey_cache(stk, record_data);
|
||||||
|
|
||||||
return Ok(Some(SubkeyResult {
|
Ok(Some(SubkeyResult {
|
||||||
value: Some(out),
|
value: Some(out),
|
||||||
descriptor: opt_descriptor,
|
descriptor: opt_descriptor,
|
||||||
}));
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn peek_subkey(
|
pub(crate) async fn peek_subkey(
|
||||||
@ -515,11 +520,15 @@ where
|
|||||||
) -> VeilidAPIResult<Option<SubkeyResult>> {
|
) -> VeilidAPIResult<Option<SubkeyResult>> {
|
||||||
// record from index
|
// record from index
|
||||||
let Some((subkey_count, has_subkey, opt_descriptor)) = self.peek_record(key, |record| {
|
let Some((subkey_count, has_subkey, opt_descriptor)) = self.peek_record(key, |record| {
|
||||||
(record.subkey_count(), record.stored_subkeys().contains(subkey), if want_descriptor {
|
(
|
||||||
Some(record.descriptor().clone())
|
record.subkey_count(),
|
||||||
} else {
|
record.stored_subkeys().contains(subkey),
|
||||||
None
|
if want_descriptor {
|
||||||
})
|
Some(record.descriptor().clone())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
},
|
||||||
|
)
|
||||||
}) else {
|
}) else {
|
||||||
// Record not available
|
// Record not available
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
@ -558,16 +567,17 @@ where
|
|||||||
let Some(record_data) = subkey_table
|
let Some(record_data) = subkey_table
|
||||||
.load_json::<RecordData>(0, &stk.bytes())
|
.load_json::<RecordData>(0, &stk.bytes())
|
||||||
.await
|
.await
|
||||||
.map_err(VeilidAPIError::internal)? else {
|
.map_err(VeilidAPIError::internal)?
|
||||||
apibail_internal!("failed to peek subkey that was stored");
|
else {
|
||||||
};
|
apibail_internal!("failed to peek subkey that was stored");
|
||||||
|
};
|
||||||
|
|
||||||
let out = record_data.signed_value_data().clone();
|
let out = record_data.signed_value_data().clone();
|
||||||
|
|
||||||
return Ok(Some(SubkeyResult {
|
Ok(Some(SubkeyResult {
|
||||||
value: Some(out),
|
value: Some(out),
|
||||||
descriptor: opt_descriptor,
|
descriptor: opt_descriptor,
|
||||||
}));
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn set_subkey(
|
pub async fn set_subkey(
|
||||||
@ -692,7 +702,7 @@ where
|
|||||||
for (rik, rec) in &self.record_index {
|
for (rik, rec) in &self.record_index {
|
||||||
out += &format!(
|
out += &format!(
|
||||||
" {} age={} len={} subkeys={}\n",
|
" {} age={} len={} subkeys={}\n",
|
||||||
rik.key.to_string(),
|
rik.key,
|
||||||
debug_duration(get_timestamp() - rec.last_touched().as_u64()),
|
debug_duration(get_timestamp() - rec.last_touched().as_u64()),
|
||||||
rec.record_data_size(),
|
rec.record_data_size(),
|
||||||
rec.stored_subkeys(),
|
rec.stored_subkeys(),
|
||||||
@ -706,11 +716,11 @@ where
|
|||||||
out += &format!("Total Storage Space: {}\n", self.total_storage_space.get());
|
out += &format!("Total Storage Space: {}\n", self.total_storage_space.get());
|
||||||
out += &format!("Dead Records: {}\n", self.dead_records.len());
|
out += &format!("Dead Records: {}\n", self.dead_records.len());
|
||||||
for dr in &self.dead_records {
|
for dr in &self.dead_records {
|
||||||
out += &format!(" {}\n", dr.key.key.to_string());
|
out += &format!(" {}\n", dr.key.key);
|
||||||
}
|
}
|
||||||
out += &format!("Changed Records: {}\n", self.changed_records.len());
|
out += &format!("Changed Records: {}\n", self.changed_records.len());
|
||||||
for cr in &self.changed_records {
|
for cr in &self.changed_records {
|
||||||
out += &format!(" {}\n", cr.key.to_string());
|
out += &format!(" {}\n", cr.key);
|
||||||
}
|
}
|
||||||
|
|
||||||
out
|
out
|
||||||
|
@ -37,9 +37,7 @@ fn local_limits_from_config(config: VeilidConfig) -> RecordStoreLimits {
|
|||||||
max_subkey_size: MAX_SUBKEY_SIZE,
|
max_subkey_size: MAX_SUBKEY_SIZE,
|
||||||
max_record_total_size: MAX_RECORD_DATA_SIZE,
|
max_record_total_size: MAX_RECORD_DATA_SIZE,
|
||||||
max_records: None,
|
max_records: None,
|
||||||
max_subkey_cache_memory_mb: Some(
|
max_subkey_cache_memory_mb: Some(c.network.dht.local_max_subkey_cache_memory_mb as usize),
|
||||||
c.network.dht.local_max_subkey_cache_memory_mb as usize,
|
|
||||||
),
|
|
||||||
max_storage_space_mb: None,
|
max_storage_space_mb: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -51,9 +49,7 @@ fn remote_limits_from_config(config: VeilidConfig) -> RecordStoreLimits {
|
|||||||
max_subkey_size: MAX_SUBKEY_SIZE,
|
max_subkey_size: MAX_SUBKEY_SIZE,
|
||||||
max_record_total_size: MAX_RECORD_DATA_SIZE,
|
max_record_total_size: MAX_RECORD_DATA_SIZE,
|
||||||
max_records: Some(c.network.dht.remote_max_records as usize),
|
max_records: Some(c.network.dht.remote_max_records as usize),
|
||||||
max_subkey_cache_memory_mb: Some(
|
max_subkey_cache_memory_mb: Some(c.network.dht.remote_max_subkey_cache_memory_mb as usize),
|
||||||
c.network.dht.remote_max_subkey_cache_memory_mb as usize,
|
|
||||||
),
|
|
||||||
max_storage_space_mb: Some(c.network.dht.remote_max_storage_space_mb as usize),
|
max_storage_space_mb: Some(c.network.dht.remote_max_storage_space_mb as usize),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -74,8 +70,8 @@ impl StorageManagerInner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn init(&mut self, outer_self: StorageManager) -> EyreResult<()> {
|
pub async fn init(&mut self, outer_self: StorageManager) -> EyreResult<()> {
|
||||||
|
let metadata_db = self
|
||||||
let metadata_db = self.unlocked_inner
|
.unlocked_inner
|
||||||
.table_store
|
.table_store
|
||||||
.open(STORAGE_MANAGER_METADATA, 1)
|
.open(STORAGE_MANAGER_METADATA, 1)
|
||||||
.await?;
|
.await?;
|
||||||
@ -120,7 +116,6 @@ impl StorageManagerInner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn terminate(&mut self) {
|
pub async fn terminate(&mut self) {
|
||||||
|
|
||||||
// Stop ticker
|
// Stop ticker
|
||||||
let tick_future = self.tick_future.take();
|
let tick_future = self.tick_future.take();
|
||||||
if let Some(f) = tick_future {
|
if let Some(f) = tick_future {
|
||||||
@ -130,19 +125,19 @@ impl StorageManagerInner {
|
|||||||
// Final flush on record stores
|
// Final flush on record stores
|
||||||
if let Some(mut local_record_store) = self.local_record_store.take() {
|
if let Some(mut local_record_store) = self.local_record_store.take() {
|
||||||
if let Err(e) = local_record_store.tick().await {
|
if let Err(e) = local_record_store.tick().await {
|
||||||
log_stor!(error "termination local record store tick failed: {}", e);
|
log_stor!(error "termination local record store tick failed: {}", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if let Some(mut remote_record_store) = self.remote_record_store.take() {
|
if let Some(mut remote_record_store) = self.remote_record_store.take() {
|
||||||
if let Err(e) = remote_record_store.tick().await {
|
if let Err(e) = remote_record_store.tick().await {
|
||||||
log_stor!(error "termination remote record store tick failed: {}", e);
|
log_stor!(error "termination remote record store tick failed: {}", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save metadata
|
// Save metadata
|
||||||
if self.metadata_db.is_some() {
|
if self.metadata_db.is_some() {
|
||||||
if let Err(e) = self.save_metadata().await {
|
if let Err(e) = self.save_metadata().await {
|
||||||
log_stor!(error "termination metadata save failed: {}", e);
|
log_stor!(error "termination metadata save failed: {}", e);
|
||||||
}
|
}
|
||||||
self.metadata_db = None;
|
self.metadata_db = None;
|
||||||
}
|
}
|
||||||
@ -152,7 +147,7 @@ impl StorageManagerInner {
|
|||||||
self.initialized = false;
|
self.initialized = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn save_metadata(&mut self) -> EyreResult<()>{
|
async fn save_metadata(&mut self) -> EyreResult<()> {
|
||||||
if let Some(metadata_db) = &self.metadata_db {
|
if let Some(metadata_db) = &self.metadata_db {
|
||||||
let tx = metadata_db.transact();
|
let tx = metadata_db.transact();
|
||||||
tx.store_json(0, OFFLINE_SUBKEY_WRITES, &self.offline_subkey_writes)?;
|
tx.store_json(0, OFFLINE_SUBKEY_WRITES, &self.offline_subkey_writes)?;
|
||||||
@ -163,7 +158,8 @@ impl StorageManagerInner {
|
|||||||
|
|
||||||
async fn load_metadata(&mut self) -> EyreResult<()> {
|
async fn load_metadata(&mut self) -> EyreResult<()> {
|
||||||
if let Some(metadata_db) = &self.metadata_db {
|
if let Some(metadata_db) = &self.metadata_db {
|
||||||
self.offline_subkey_writes = match metadata_db.load_json(0, OFFLINE_SUBKEY_WRITES).await {
|
self.offline_subkey_writes = match metadata_db.load_json(0, OFFLINE_SUBKEY_WRITES).await
|
||||||
|
{
|
||||||
Ok(v) => v.unwrap_or_default(),
|
Ok(v) => v.unwrap_or_default(),
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
if let Err(e) = metadata_db.delete(0, OFFLINE_SUBKEY_WRITES).await {
|
if let Err(e) = metadata_db.delete(0, OFFLINE_SUBKEY_WRITES).await {
|
||||||
@ -218,13 +214,16 @@ impl StorageManagerInner {
|
|||||||
Ok((dht_key, owner))
|
Ok((dht_key, owner))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn move_remote_record_to_local(&mut self, key: TypedKey, safety_selection: SafetySelection) -> VeilidAPIResult<Option<(PublicKey, DHTSchema)>>
|
async fn move_remote_record_to_local(
|
||||||
{
|
&mut self,
|
||||||
|
key: TypedKey,
|
||||||
|
safety_selection: SafetySelection,
|
||||||
|
) -> VeilidAPIResult<Option<(PublicKey, DHTSchema)>> {
|
||||||
// Get local record store
|
// Get local record store
|
||||||
let Some(local_record_store) = self.local_record_store.as_mut() else {
|
let Some(local_record_store) = self.local_record_store.as_mut() else {
|
||||||
apibail_not_initialized!();
|
apibail_not_initialized!();
|
||||||
};
|
};
|
||||||
|
|
||||||
// Get remote record store
|
// Get remote record store
|
||||||
let Some(remote_record_store) = self.remote_record_store.as_mut() else {
|
let Some(remote_record_store) = self.remote_record_store.as_mut() else {
|
||||||
apibail_not_initialized!();
|
apibail_not_initialized!();
|
||||||
@ -241,31 +240,36 @@ impl StorageManagerInner {
|
|||||||
|
|
||||||
// Make local record
|
// Make local record
|
||||||
let cur_ts = get_aligned_timestamp();
|
let cur_ts = get_aligned_timestamp();
|
||||||
let local_record = Record::new(cur_ts, remote_record.descriptor().clone(), LocalRecordDetail {
|
let local_record = Record::new(
|
||||||
safety_selection
|
cur_ts,
|
||||||
})?;
|
remote_record.descriptor().clone(),
|
||||||
|
LocalRecordDetail { safety_selection },
|
||||||
|
)?;
|
||||||
local_record_store.new_record(key, local_record).await?;
|
local_record_store.new_record(key, local_record).await?;
|
||||||
|
|
||||||
// Move copy subkey data from remote to local store
|
// Move copy subkey data from remote to local store
|
||||||
for subkey in remote_record.stored_subkeys().iter() {
|
for subkey in remote_record.stored_subkeys().iter() {
|
||||||
let Some(subkey_result) = remote_record_store.get_subkey(key, subkey, false).await? else {
|
let Some(subkey_result) = remote_record_store.get_subkey(key, subkey, false).await?
|
||||||
|
else {
|
||||||
// Subkey was missing
|
// Subkey was missing
|
||||||
warn!("Subkey was missing: {} #{}",key, subkey);
|
warn!("Subkey was missing: {} #{}", key, subkey);
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
let Some(subkey_data) = subkey_result.value else {
|
let Some(subkey_data) = subkey_result.value else {
|
||||||
// Subkey was missing
|
// Subkey was missing
|
||||||
warn!("Subkey data was missing: {} #{}",key, subkey);
|
warn!("Subkey data was missing: {} #{}", key, subkey);
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
local_record_store.set_subkey(key, subkey, subkey_data).await?;
|
local_record_store
|
||||||
|
.set_subkey(key, subkey, subkey_data)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete remote record from store
|
// Delete remote record from store
|
||||||
remote_record_store.delete_record(key).await?;
|
remote_record_store.delete_record(key).await?;
|
||||||
|
|
||||||
// Return record information as transferred to local record
|
// Return record information as transferred to local record
|
||||||
Ok(Some((remote_record.owner().clone(), remote_record.schema())))
|
Ok(Some((*remote_record.owner(), remote_record.schema())))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn open_existing_record(
|
pub async fn open_existing_record(
|
||||||
@ -292,14 +296,17 @@ impl StorageManagerInner {
|
|||||||
r.detail_mut().safety_selection = safety_selection;
|
r.detail_mut().safety_selection = safety_selection;
|
||||||
|
|
||||||
// Return record details
|
// Return record details
|
||||||
(r.owner().clone(), r.schema())
|
(*r.owner(), r.schema())
|
||||||
};
|
};
|
||||||
let (owner, schema) = match local_record_store.with_record_mut(key, cb){
|
let (owner, schema) = match local_record_store.with_record_mut(key, cb) {
|
||||||
Some(v) => v,
|
Some(v) => v,
|
||||||
None => {
|
None => {
|
||||||
// If we don't have a local record yet, check to see if we have a remote record
|
// If we don't have a local record yet, check to see if we have a remote record
|
||||||
// if so, migrate it to a local record
|
// if so, migrate it to a local record
|
||||||
let Some(v) = self.move_remote_record_to_local(key, safety_selection).await? else {
|
let Some(v) = self
|
||||||
|
.move_remote_record_to_local(key, safety_selection)
|
||||||
|
.await?
|
||||||
|
else {
|
||||||
// No remote record either
|
// No remote record either
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
};
|
};
|
||||||
@ -348,7 +355,7 @@ impl StorageManagerInner {
|
|||||||
apibail_generic!("no descriptor");
|
apibail_generic!("no descriptor");
|
||||||
};
|
};
|
||||||
// Get owner
|
// Get owner
|
||||||
let owner = signed_value_descriptor.owner().clone();
|
let owner = *signed_value_descriptor.owner();
|
||||||
|
|
||||||
// If the writer we chose is also the owner, we have the owner secret
|
// If the writer we chose is also the owner, we have the owner secret
|
||||||
// Otherwise this is just another subkey writer
|
// Otherwise this is just another subkey writer
|
||||||
@ -410,7 +417,10 @@ impl StorageManagerInner {
|
|||||||
let Some(local_record_store) = self.local_record_store.as_mut() else {
|
let Some(local_record_store) = self.local_record_store.as_mut() else {
|
||||||
apibail_not_initialized!();
|
apibail_not_initialized!();
|
||||||
};
|
};
|
||||||
if let Some(subkey_result) = local_record_store.get_subkey(key, subkey, want_descriptor).await? {
|
if let Some(subkey_result) = local_record_store
|
||||||
|
.get_subkey(key, subkey, want_descriptor)
|
||||||
|
.await?
|
||||||
|
{
|
||||||
return Ok(subkey_result);
|
return Ok(subkey_result);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -428,7 +438,7 @@ impl StorageManagerInner {
|
|||||||
) -> VeilidAPIResult<()> {
|
) -> VeilidAPIResult<()> {
|
||||||
// See if it's in the local record store
|
// See if it's in the local record store
|
||||||
let Some(local_record_store) = self.local_record_store.as_mut() else {
|
let Some(local_record_store) = self.local_record_store.as_mut() else {
|
||||||
apibail_not_initialized!();
|
apibail_not_initialized!();
|
||||||
};
|
};
|
||||||
|
|
||||||
// Write subkey to local store
|
// Write subkey to local store
|
||||||
@ -449,7 +459,10 @@ impl StorageManagerInner {
|
|||||||
let Some(remote_record_store) = self.remote_record_store.as_mut() else {
|
let Some(remote_record_store) = self.remote_record_store.as_mut() else {
|
||||||
apibail_not_initialized!();
|
apibail_not_initialized!();
|
||||||
};
|
};
|
||||||
if let Some(subkey_result) = remote_record_store.get_subkey(key, subkey, want_descriptor).await? {
|
if let Some(subkey_result) = remote_record_store
|
||||||
|
.get_subkey(key, subkey, want_descriptor)
|
||||||
|
.await?
|
||||||
|
{
|
||||||
return Ok(subkey_result);
|
return Ok(subkey_result);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -472,12 +485,15 @@ impl StorageManagerInner {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// See if we have a remote record already or not
|
// See if we have a remote record already or not
|
||||||
if remote_record_store.with_record(key, |_|{}).is_none() {
|
if remote_record_store.with_record(key, |_| {}).is_none() {
|
||||||
// record didn't exist, make it
|
// record didn't exist, make it
|
||||||
let cur_ts = get_aligned_timestamp();
|
let cur_ts = get_aligned_timestamp();
|
||||||
let remote_record_detail = RemoteRecordDetail { };
|
let remote_record_detail = RemoteRecordDetail {};
|
||||||
let record =
|
let record = Record::<RemoteRecordDetail>::new(
|
||||||
Record::<RemoteRecordDetail>::new(cur_ts, signed_value_descriptor, remote_record_detail)?;
|
cur_ts,
|
||||||
|
signed_value_descriptor,
|
||||||
|
remote_record_detail,
|
||||||
|
)?;
|
||||||
remote_record_store.new_record(key, record).await?
|
remote_record_store.new_record(key, record).await?
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -24,7 +24,7 @@ impl SignedValueData {
|
|||||||
) -> VeilidAPIResult<()> {
|
) -> VeilidAPIResult<()> {
|
||||||
let node_info_bytes = Self::make_signature_bytes(&self.value_data, owner, subkey)?;
|
let node_info_bytes = Self::make_signature_bytes(&self.value_data, owner, subkey)?;
|
||||||
// validate signature
|
// validate signature
|
||||||
vcrypto.verify(&self.value_data.writer(), &node_info_bytes, &self.signature)
|
vcrypto.verify(self.value_data.writer(), &node_info_bytes, &self.signature)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn make_signature(
|
pub fn make_signature(
|
||||||
@ -37,7 +37,7 @@ impl SignedValueData {
|
|||||||
let node_info_bytes = Self::make_signature_bytes(&value_data, owner, subkey)?;
|
let node_info_bytes = Self::make_signature_bytes(&value_data, owner, subkey)?;
|
||||||
|
|
||||||
// create signature
|
// create signature
|
||||||
let signature = vcrypto.sign(&value_data.writer(), &writer_secret, &node_info_bytes)?;
|
let signature = vcrypto.sign(value_data.writer(), &writer_secret, &node_info_bytes)?;
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
value_data,
|
value_data,
|
||||||
signature,
|
signature,
|
||||||
|
@ -18,7 +18,7 @@ async fn shutdown(api: VeilidAPI) {
|
|||||||
pub async fn test_delete_open_delete(ts: TableStore) {
|
pub async fn test_delete_open_delete(ts: TableStore) {
|
||||||
trace!("test_delete_open_delete");
|
trace!("test_delete_open_delete");
|
||||||
|
|
||||||
let _ = ts.delete("test");
|
let _ = ts.delete("test").await;
|
||||||
let db = ts.open("test", 3).await.expect("should have opened");
|
let db = ts.open("test", 3).await.expect("should have opened");
|
||||||
assert!(
|
assert!(
|
||||||
ts.delete("test").await.is_err(),
|
ts.delete("test").await.is_err(),
|
||||||
@ -50,7 +50,7 @@ pub async fn test_delete_open_delete(ts: TableStore) {
|
|||||||
pub async fn test_store_delete_load(ts: TableStore) {
|
pub async fn test_store_delete_load(ts: TableStore) {
|
||||||
trace!("test_store_delete_load");
|
trace!("test_store_delete_load");
|
||||||
|
|
||||||
let _ = ts.delete("test");
|
ts.delete("test").await;
|
||||||
let db = ts.open("test", 3).await.expect("should have opened");
|
let db = ts.open("test", 3).await.expect("should have opened");
|
||||||
assert!(
|
assert!(
|
||||||
ts.delete("test").await.is_err(),
|
ts.delete("test").await.is_err(),
|
||||||
@ -135,7 +135,7 @@ pub async fn test_store_delete_load(ts: TableStore) {
|
|||||||
pub async fn test_transaction(ts: TableStore) {
|
pub async fn test_transaction(ts: TableStore) {
|
||||||
trace!("test_transaction");
|
trace!("test_transaction");
|
||||||
|
|
||||||
let _ = ts.delete("test");
|
let _ = ts.delete("test").await;
|
||||||
let db = ts.open("test", 3).await.expect("should have opened");
|
let db = ts.open("test", 3).await.expect("should have opened");
|
||||||
assert!(
|
assert!(
|
||||||
ts.delete("test").await.is_err(),
|
ts.delete("test").await.is_err(),
|
||||||
@ -165,7 +165,7 @@ pub async fn test_transaction(ts: TableStore) {
|
|||||||
pub async fn test_json(vcrypto: CryptoSystemVersion, ts: TableStore) {
|
pub async fn test_json(vcrypto: CryptoSystemVersion, ts: TableStore) {
|
||||||
trace!("test_json");
|
trace!("test_json");
|
||||||
|
|
||||||
let _ = ts.delete("test");
|
let _ = ts.delete("test").await;
|
||||||
let db = ts.open("test", 3).await.expect("should have opened");
|
let db = ts.open("test", 3).await.expect("should have opened");
|
||||||
let keypair = vcrypto.generate_keypair();
|
let keypair = vcrypto.generate_keypair();
|
||||||
|
|
||||||
@ -229,10 +229,10 @@ pub async fn test_protect_unprotect(vcrypto: CryptoSystemVersion, ts: TableStore
|
|||||||
for password in passwords {
|
for password in passwords {
|
||||||
let dek_bytes = ts
|
let dek_bytes = ts
|
||||||
.maybe_protect_device_encryption_key(dek, password)
|
.maybe_protect_device_encryption_key(dek, password)
|
||||||
.expect(&format!("protect: dek: '{}' pw: '{}'", dek, password));
|
.unwrap_or_else(|_| panic!("protect: dek: '{}' pw: '{}'", dek, password));
|
||||||
let unprotected = ts
|
let unprotected = ts
|
||||||
.maybe_unprotect_device_encryption_key(&dek_bytes, password)
|
.maybe_unprotect_device_encryption_key(&dek_bytes, password)
|
||||||
.expect(&format!("unprotect: dek: '{}' pw: '{}'", dek, password));
|
.unwrap_or_else(|_| panic!("unprotect: dek: '{}' pw: '{}'", dek, password));
|
||||||
assert_eq!(unprotected, dek);
|
assert_eq!(unprotected, dek);
|
||||||
let invalid_password = format!("{}x", password);
|
let invalid_password = format!("{}x", password);
|
||||||
let _ = ts
|
let _ = ts
|
||||||
@ -241,7 +241,7 @@ pub async fn test_protect_unprotect(vcrypto: CryptoSystemVersion, ts: TableStore
|
|||||||
"invalid_password: dek: '{}' pw: '{}'",
|
"invalid_password: dek: '{}' pw: '{}'",
|
||||||
dek, &invalid_password
|
dek, &invalid_password
|
||||||
));
|
));
|
||||||
if password != "" {
|
if !password.is_empty() {
|
||||||
let _ = ts
|
let _ = ts
|
||||||
.maybe_unprotect_device_encryption_key(&dek_bytes, "")
|
.maybe_unprotect_device_encryption_key(&dek_bytes, "")
|
||||||
.expect_err(&format!("empty_password: dek: '{}' pw: ''", dek));
|
.expect_err(&format!("empty_password: dek: '{}' pw: ''", dek));
|
||||||
|
Loading…
Reference in New Issue
Block a user