checkpoint
This commit is contained in:
parent
a54da97393
commit
e672ae0319
94
Cargo.lock
generated
94
Cargo.lock
generated
@ -672,6 +672,27 @@ version = "1.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c"
|
||||
|
||||
[[package]]
|
||||
name = "bytecheck"
|
||||
version = "0.6.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d11cac2c12b5adc6570dad2ee1b87eff4955dac476fe12d81e5fdd352e52406f"
|
||||
dependencies = [
|
||||
"bytecheck_derive",
|
||||
"ptr_meta",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bytecheck_derive"
|
||||
version = "0.6.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "13e576ebe98e605500b3c8041bb888e966653577172df6dd97398714eb30b9bf"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "byteorder"
|
||||
version = "1.4.3"
|
||||
@ -3871,6 +3892,26 @@ version = "2.28.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94"
|
||||
|
||||
[[package]]
|
||||
name = "ptr_meta"
|
||||
version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0738ccf7ea06b608c10564b31debd4f5bc5e197fc8bfe088f68ae5ce81e7a4f1"
|
||||
dependencies = [
|
||||
"ptr_meta_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ptr_meta_derive"
|
||||
version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quick-error"
|
||||
version = "1.2.3"
|
||||
@ -4051,6 +4092,15 @@ dependencies = [
|
||||
"winapi 0.3.9",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rend"
|
||||
version = "0.3.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "79af64b4b6362ffba04eef3a4e10829718a4896dac19daa741851c86781edf95"
|
||||
dependencies = [
|
||||
"bytecheck",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "resolv-conf"
|
||||
version = "0.7.0"
|
||||
@ -4076,6 +4126,31 @@ dependencies = [
|
||||
"winapi 0.3.9",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rkyv"
|
||||
version = "0.7.39"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cec2b3485b07d96ddfd3134767b8a447b45ea4eb91448d0a35180ec0ffd5ed15"
|
||||
dependencies = [
|
||||
"bytecheck",
|
||||
"hashbrown",
|
||||
"ptr_meta",
|
||||
"rend",
|
||||
"rkyv_derive",
|
||||
"seahash",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rkyv_derive"
|
||||
version = "0.7.39"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6eaedadc88b53e36dd32d940ed21ae4d850d5916f2581526921f553a72ac34c4"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rlp"
|
||||
version = "0.5.2"
|
||||
@ -4274,6 +4349,12 @@ dependencies = [
|
||||
"untrusted",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "seahash"
|
||||
version = "4.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b"
|
||||
|
||||
[[package]]
|
||||
name = "secrecy"
|
||||
version = "0.7.0"
|
||||
@ -4371,6 +4452,15 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_bytes"
|
||||
version = "0.11.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cfc50e8183eeeb6178dcb167ae34a8051d63535023ae38b5d8d12beae193d37b"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_cbor"
|
||||
version = "0.11.2"
|
||||
@ -5477,6 +5567,7 @@ dependencies = [
|
||||
"backtrace",
|
||||
"blake3",
|
||||
"bugsalot",
|
||||
"bytecheck",
|
||||
"capnp",
|
||||
"capnpc",
|
||||
"cfg-if 1.0.0",
|
||||
@ -5520,6 +5611,7 @@ dependencies = [
|
||||
"owo-colors",
|
||||
"parking_lot 0.12.1",
|
||||
"rand 0.7.3",
|
||||
"rkyv",
|
||||
"rtnetlink",
|
||||
"rusqlite",
|
||||
"rust-fsm",
|
||||
@ -5529,7 +5621,7 @@ dependencies = [
|
||||
"send_wrapper 0.6.0",
|
||||
"serde",
|
||||
"serde-big-array",
|
||||
"serde_cbor",
|
||||
"serde_bytes",
|
||||
"serde_json",
|
||||
"serial_test",
|
||||
"simplelog 0.12.0",
|
||||
|
@ -59,6 +59,9 @@ rtnetlink = { version = "^0", default-features = false, optional = true }
|
||||
async-std-resolver = { version = "^0", optional = true }
|
||||
trust-dns-resolver = { version = "^0", optional = true }
|
||||
keyvaluedb = { path = "../external/keyvaluedb/keyvaluedb" }
|
||||
serde_bytes = { version = "^0" }
|
||||
rkyv = { version = "^0", default_features = false, features = ["std", "alloc", "strict", "size_64", "archive_le", "validation"] }
|
||||
bytecheck = "^0"
|
||||
|
||||
# Dependencies for native builds only
|
||||
# Linux, Windows, Mac, iOS, Android
|
||||
@ -82,7 +85,6 @@ futures-util = { version = "^0", default-features = false, features = ["async-aw
|
||||
keyvaluedb-sqlite = { path = "../external/keyvaluedb/keyvaluedb-sqlite" }
|
||||
data-encoding = { version = "^2" }
|
||||
serde = { version = "^1", features = ["derive" ] }
|
||||
serde_cbor = { version = "^0" }
|
||||
serde_json = { version = "^1" }
|
||||
socket2 = "^0"
|
||||
bugsalot = "^0"
|
||||
@ -99,7 +101,6 @@ no-std-net = { path = "../external/no-std-net", features = ["serde"] }
|
||||
keyvaluedb-web = { path = "../external/keyvaluedb/keyvaluedb-web" }
|
||||
data-encoding = { version = "^2", default_features = false, features = ["alloc"] }
|
||||
serde = { version = "^1", default-features = false, features = ["derive", "alloc"] }
|
||||
serde_cbor = { version = "^0", default-features = false, features = ["alloc"] }
|
||||
serde_json = { version = "^1", default-features = false, features = ["alloc"] }
|
||||
getrandom = { version = "^0", features = ["js"] }
|
||||
ws_stream_wasm = "^0"
|
||||
|
@ -173,7 +173,7 @@ struct ValueKey {
|
||||
# }
|
||||
|
||||
struct ValueData {
|
||||
data @0 :Data; # value or subvalue contents in CBOR format
|
||||
data @0 :Data; # value or subvalue contents
|
||||
seq @1 :ValueSeqNum; # sequence number of value
|
||||
}
|
||||
|
||||
@ -181,9 +181,10 @@ struct ValueData {
|
||||
##############################
|
||||
|
||||
enum NetworkClass {
|
||||
inboundCapable @0; # I = Inbound capable without relay, may require signal
|
||||
outboundOnly @1; # O = Outbound only, inbound relay required except with reverse connect signal
|
||||
webApp @2; # W = PWA, outbound relay is required in most cases
|
||||
invalid @0; # X = Invalid network class, network is not yet set up
|
||||
inboundCapable @1; # I = Inbound capable without relay, may require signal
|
||||
outboundOnly @2; # O = Outbound only, inbound relay required except with reverse connect signal
|
||||
webApp @3; # W = PWA, outbound relay is required in most cases
|
||||
}
|
||||
|
||||
enum DialInfoClass {
|
||||
@ -232,6 +233,10 @@ struct AddressTypeSet {
|
||||
ipv6 @1 :Bool;
|
||||
}
|
||||
|
||||
struct SenderInfo {
|
||||
socketAddress @0 :SocketAddress; # socket address that for the sending peer
|
||||
}
|
||||
|
||||
struct NodeInfo {
|
||||
networkClass @0 :NetworkClass; # network class of this node
|
||||
outboundProtocols @1 :ProtocolTypeSet; # protocols that can go outbound
|
||||
@ -239,17 +244,27 @@ struct NodeInfo {
|
||||
minVersion @3 :UInt8; # minimum protocol version for rpc
|
||||
maxVersion @4 :UInt8; # maximum protocol version for rpc
|
||||
dialInfoDetailList @5 :List(DialInfoDetail); # inbound dial info details for this node
|
||||
relayPeerInfo @6 :PeerInfo; # (optional) relay peer info for this node
|
||||
}
|
||||
|
||||
struct SignedDirectNodeInfo {
|
||||
nodeInfo @0 :NodeInfo; # node info
|
||||
timestamp @1 :UInt64; # when signed node info was generated
|
||||
signature @2 :Signature; # signature
|
||||
}
|
||||
|
||||
struct SignedRelayedNodeInfo {
|
||||
nodeInfo @0 :NodeInfo; # node info
|
||||
relayId @1 :NodeID; # node id for relay
|
||||
relayInfo @2 :SignedDirectNodeInfo; # signed node info for relay
|
||||
timestamp @3 :UInt64; # when signed node info was generated
|
||||
signature @4 :Signature; # signature
|
||||
}
|
||||
|
||||
struct SignedNodeInfo {
|
||||
nodeInfo @0 :NodeInfo; # node info
|
||||
signature @1 :Signature; # signature
|
||||
timestamp @2 :UInt64; # when signed node info was generated
|
||||
}
|
||||
|
||||
struct SenderInfo {
|
||||
socketAddress @0 :SocketAddress; # socket address that for the sending peer
|
||||
union {
|
||||
direct @0 :SignedDirectNodeInfo; # node info for nodes reachable without a relay
|
||||
relayed @1 :SignedRelayedNodeInfo; # node info for nodes requiring a relay
|
||||
}
|
||||
}
|
||||
|
||||
struct PeerInfo {
|
||||
@ -326,7 +341,7 @@ struct OperationGetValueA {
|
||||
|
||||
struct OperationSetValueQ {
|
||||
key @0 :ValueKey; # key for value to update
|
||||
value @1 :ValueData; # value or subvalue contents in CBOR format (older or equal seq number gets dropped)
|
||||
value @1 :ValueData; # value or subvalue contents (older or equal seq number gets dropped)
|
||||
}
|
||||
|
||||
struct OperationSetValueA {
|
||||
@ -347,7 +362,7 @@ struct OperationWatchValueA {
|
||||
|
||||
struct OperationValueChanged {
|
||||
key @0 :ValueKey; # key for value that changed
|
||||
value @1 :ValueData; # value or subvalue contents in CBOR format with sequence number
|
||||
value @1 :ValueData; # value or subvalue contents with sequence number
|
||||
}
|
||||
|
||||
struct OperationSupplyBlockQ {
|
||||
|
@ -9,7 +9,7 @@ use core::fmt;
|
||||
use serde::*;
|
||||
|
||||
state_machine! {
|
||||
derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)
|
||||
derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,)
|
||||
pub Attachment(Detached)
|
||||
//---
|
||||
Detached(AttachRequested) => Attaching [StartAttachment],
|
||||
|
@ -12,7 +12,6 @@ use digest::generic_array::typenum::U64;
|
||||
use digest::{Digest, Output};
|
||||
use ed25519_dalek::{Keypair, PublicKey, Signature};
|
||||
use generic_array::GenericArray;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
@ -39,13 +38,14 @@ pub const DHT_SIGNATURE_LENGTH_ENCODED: usize = 86;
|
||||
|
||||
macro_rules! byte_array_type {
|
||||
($name:ident, $size:expr) => {
|
||||
#[derive(Clone, Copy)]
|
||||
#[derive(Clone, Copy, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||
pub struct $name {
|
||||
pub bytes: [u8; $size],
|
||||
pub valid: bool,
|
||||
}
|
||||
|
||||
impl Serialize for $name {
|
||||
impl serde::Serialize for $name {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
@ -56,16 +56,16 @@ macro_rules! byte_array_type {
|
||||
} else {
|
||||
s = "".to_owned();
|
||||
}
|
||||
s.serialize(serializer)
|
||||
serde::Serialize::serialize(&s, serializer)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for $name {
|
||||
impl<'de> serde::Deserialize<'de> for $name {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let s = String::deserialize(deserializer)?;
|
||||
let s = <String as serde::Deserialize>::deserialize(deserializer)?;
|
||||
if s == "" {
|
||||
return Ok($name::default());
|
||||
}
|
||||
|
@ -2,7 +2,6 @@ use crate::xx::*;
|
||||
use crate::*;
|
||||
use data_encoding::BASE64URL_NOPAD;
|
||||
use keyring_manager::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::path::Path;
|
||||
|
||||
pub struct ProtectedStoreInner {
|
||||
@ -144,18 +143,31 @@ impl ProtectedStore {
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self, value))]
|
||||
pub async fn save_user_secret_cbor<T>(&self, key: &str, value: &T) -> EyreResult<bool>
|
||||
pub async fn save_user_secret_rkyv<T>(&self, key: &str, value: &T) -> EyreResult<bool>
|
||||
where
|
||||
T: Serialize,
|
||||
T: RkyvSerialize<rkyv::ser::serializers::AllocSerializer<1024>>,
|
||||
{
|
||||
let v = serde_cbor::to_vec(value).wrap_err("couldn't store as CBOR")?;
|
||||
let v = to_rkyv(value)?;
|
||||
self.save_user_secret(&key, &v).await
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self, value))]
|
||||
pub async fn save_user_secret_json<T>(&self, key: &str, value: &T) -> EyreResult<bool>
|
||||
where
|
||||
T: serde::Serialize,
|
||||
{
|
||||
let v = serde_json::to_vec(value)?;
|
||||
self.save_user_secret(&key, &v).await
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self))]
|
||||
pub async fn load_user_secret_cbor<T>(&self, key: &str) -> EyreResult<Option<T>>
|
||||
pub async fn load_user_secret_rkyv<T>(&self, key: &str) -> EyreResult<Option<T>>
|
||||
where
|
||||
T: for<'de> Deserialize<'de>,
|
||||
T: RkyvArchive,
|
||||
<T as RkyvArchive>::Archived:
|
||||
for<'t> bytecheck::CheckBytes<rkyv::validation::validators::DefaultValidator<'t>>,
|
||||
<T as RkyvArchive>::Archived:
|
||||
rkyv::Deserialize<T, rkyv::de::deserializers::SharedDeserializeMap>,
|
||||
{
|
||||
let out = self.load_user_secret(key).await?;
|
||||
let b = match out {
|
||||
@ -165,7 +177,24 @@ impl ProtectedStore {
|
||||
}
|
||||
};
|
||||
|
||||
let obj = serde_cbor::from_slice::<T>(&b).wrap_err("failed to deserialize")?;
|
||||
let obj = from_rkyv(b)?;
|
||||
Ok(Some(obj))
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self))]
|
||||
pub async fn load_user_secret_json<T>(&self, key: &str) -> EyreResult<Option<T>>
|
||||
where
|
||||
T: for<'de> serde::de::Deserialize<'de>,
|
||||
{
|
||||
let out = self.load_user_secret(key).await?;
|
||||
let b = match out {
|
||||
Some(v) => v,
|
||||
None => {
|
||||
return Ok(None);
|
||||
}
|
||||
};
|
||||
|
||||
let obj = serde_json::from_slice(&b)?;
|
||||
Ok(Some(obj))
|
||||
}
|
||||
|
||||
|
@ -85,12 +85,25 @@ impl TableDB {
|
||||
db.write(dbt).wrap_err("failed to store key")
|
||||
}
|
||||
|
||||
/// Store a key in CBOR format with a value in a column in the TableDB. Performs a single transaction immediately.
|
||||
pub fn store_cbor<T>(&self, col: u32, key: &[u8], value: &T) -> EyreResult<()>
|
||||
/// Store a key in rkyv format with a value in a column in the TableDB. Performs a single transaction immediately.
|
||||
pub fn store_rkyv<T>(&self, col: u32, key: &[u8], value: &T) -> EyreResult<()>
|
||||
where
|
||||
T: Serialize,
|
||||
T: RkyvSerialize<rkyv::ser::serializers::AllocSerializer<1024>>,
|
||||
{
|
||||
let v = serde_cbor::to_vec(value).wrap_err("couldn't store as CBOR")?;
|
||||
let v = to_rkyv(value)?;
|
||||
|
||||
let db = &self.inner.lock().database;
|
||||
let mut dbt = db.transaction();
|
||||
dbt.put(col, key, v.as_slice());
|
||||
db.write(dbt).wrap_err("failed to store key")
|
||||
}
|
||||
|
||||
/// Store a key in json format with a value in a column in the TableDB. Performs a single transaction immediately.
|
||||
pub fn store_json<T>(&self, col: u32, key: &[u8], value: &T) -> EyreResult<()>
|
||||
where
|
||||
T: serde::Serialize,
|
||||
{
|
||||
let v = serde_json::to_vec(value)?;
|
||||
|
||||
let db = &self.inner.lock().database;
|
||||
let mut dbt = db.transaction();
|
||||
@ -104,10 +117,14 @@ impl TableDB {
|
||||
db.get(col, key).wrap_err("failed to get key")
|
||||
}
|
||||
|
||||
/// Read a key from a column in the TableDB immediately, in CBOR format.
|
||||
pub fn load_cbor<T>(&self, col: u32, key: &[u8]) -> EyreResult<Option<T>>
|
||||
/// Read an rkyv key from a column in the TableDB immediately
|
||||
pub fn load_rkyv<T>(&self, col: u32, key: &[u8]) -> EyreResult<Option<T>>
|
||||
where
|
||||
T: for<'de> Deserialize<'de>,
|
||||
T: RkyvArchive,
|
||||
<T as RkyvArchive>::Archived:
|
||||
for<'t> bytecheck::CheckBytes<rkyv::validation::validators::DefaultValidator<'t>>,
|
||||
<T as RkyvArchive>::Archived:
|
||||
rkyv::Deserialize<T, rkyv::de::deserializers::SharedDeserializeMap>,
|
||||
{
|
||||
let db = &self.inner.lock().database;
|
||||
let out = db.get(col, key).wrap_err("failed to get key")?;
|
||||
@ -117,7 +134,24 @@ impl TableDB {
|
||||
return Ok(None);
|
||||
}
|
||||
};
|
||||
let obj = serde_cbor::from_slice::<T>(&b).wrap_err("failed to deserialize")?;
|
||||
let obj = from_rkyv(b)?;
|
||||
Ok(Some(obj))
|
||||
}
|
||||
|
||||
/// Read an serde-json key from a column in the TableDB immediately
|
||||
pub fn load_json<T>(&self, col: u32, key: &[u8]) -> EyreResult<Option<T>>
|
||||
where
|
||||
T: for<'de> serde::Deserialize<'de>,
|
||||
{
|
||||
let db = &self.inner.lock().database;
|
||||
let out = db.get(col, key).wrap_err("failed to get key")?;
|
||||
let b = match out {
|
||||
Some(v) => v,
|
||||
None => {
|
||||
return Ok(None);
|
||||
}
|
||||
};
|
||||
let obj = serde_json::from_slice(&b)?;
|
||||
Ok(Some(obj))
|
||||
}
|
||||
|
||||
@ -176,12 +210,22 @@ impl<'a> TableDBTransaction<'a> {
|
||||
self.dbt.as_mut().unwrap().put(col, key, value);
|
||||
}
|
||||
|
||||
/// Store a key in CBOR format with a value in a column in the TableDB
|
||||
pub fn store_cbor<T>(&mut self, col: u32, key: &[u8], value: &T) -> EyreResult<()>
|
||||
/// Store a key in rkyv format with a value in a column in the TableDB
|
||||
pub fn store_rkyv<T>(&mut self, col: u32, key: &[u8], value: &T) -> EyreResult<()>
|
||||
where
|
||||
T: RkyvSerialize<rkyv::ser::serializers::AllocSerializer<1024>>,
|
||||
{
|
||||
let v = to_rkyv(value)?;
|
||||
self.dbt.as_mut().unwrap().put(col, key, v.as_slice());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Store a key in rkyv format with a value in a column in the TableDB
|
||||
pub fn store_json<T>(&mut self, col: u32, key: &[u8], value: &T) -> EyreResult<()>
|
||||
where
|
||||
T: Serialize,
|
||||
{
|
||||
let v = serde_cbor::to_vec(value).wrap_err("couldn't store as CBOR")?;
|
||||
let v = serde_json::to_vec(value)?;
|
||||
self.dbt.as_mut().unwrap().put(col, key, v.as_slice());
|
||||
Ok(())
|
||||
}
|
||||
|
@ -136,18 +136,22 @@ impl ProtectedStore {
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self, value))]
|
||||
pub async fn save_user_secret_cbor<T>(&self, key: &str, value: &T) -> EyreResult<bool>
|
||||
pub async fn save_user_secret_frozen<T>(&self, key: &str, value: &T) -> EyreResult<bool>
|
||||
where
|
||||
T: Serialize,
|
||||
T: RkyvSerialize<rkyv::ser::serializers::AllocSerializer<1024>>,
|
||||
{
|
||||
let v = serde_cbor::to_vec(value).wrap_err("couldn't store as CBOR")?;
|
||||
let v = to_frozen(value)?;
|
||||
self.save_user_secret(&key, &v).await
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self))]
|
||||
pub async fn load_user_secret_cbor<T>(&self, key: &str) -> EyreResult<Option<T>>
|
||||
pub async fn load_user_secret_frozen<T>(&self, key: &str) -> EyreResult<Option<T>>
|
||||
where
|
||||
T: for<'de> Deserialize<'de>,
|
||||
T: RkyvArchive,
|
||||
<T as RkyvArchive>::Archived:
|
||||
for<'t> bytecheck::CheckBytes<rkyv::validation::validators::DefaultValidator<'t>>,
|
||||
<T as RkyvArchive>::Archived:
|
||||
rkyv::Deserialize<T, rkyv::de::deserializers::SharedDeserializeMap>,
|
||||
{
|
||||
let out = self.load_user_secret(key).await?;
|
||||
let b = match out {
|
||||
@ -157,7 +161,7 @@ impl ProtectedStore {
|
||||
}
|
||||
};
|
||||
|
||||
let obj = serde_cbor::from_slice::<T>(&b).wrap_err("failed to deserialize")?;
|
||||
let obj = from_frozen(&b)?;
|
||||
Ok(Some(obj))
|
||||
}
|
||||
|
||||
|
@ -295,7 +295,7 @@ impl NetworkManager {
|
||||
if let Some(nr) = routing_table.register_node_with_signed_node_info(
|
||||
RoutingDomain::PublicInternet,
|
||||
k,
|
||||
SignedNodeInfo::with_no_signature(NodeInfo {
|
||||
SignedDirectNodeInfo::with_no_signature(NodeInfo {
|
||||
network_class: NetworkClass::InboundCapable, // Bootstraps are always inbound capable
|
||||
outbound_protocols: ProtocolTypeSet::only(ProtocolType::UDP), // Bootstraps do not participate in relaying and will not make outbound requests, but will have UDP enabled
|
||||
address_types: AddressTypeSet::all(), // Bootstraps are always IPV4 and IPV6 capable
|
||||
|
@ -8,7 +8,19 @@ pub struct Bucket {
|
||||
}
|
||||
pub(super) type EntriesIter<'a> = alloc::collections::btree_map::Iter<'a, DHTKey, Arc<BucketEntry>>;
|
||||
|
||||
type BucketData = (Vec<(DHTKey, Vec<u8>)>, Option<DHTKey>);
|
||||
#[derive(Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||
struct BucketEntryData {
|
||||
key: DHTKey,
|
||||
value: Vec<u8>,
|
||||
}
|
||||
|
||||
#[derive(Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||
struct BucketData {
|
||||
entries: Vec<BucketEntryData>,
|
||||
newest_entry: Option<DHTKey>,
|
||||
}
|
||||
|
||||
fn state_ordering(state: BucketEntryState) -> usize {
|
||||
match state {
|
||||
@ -27,29 +39,33 @@ impl Bucket {
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn load_bucket(&mut self, data: &[u8]) -> EyreResult<()> {
|
||||
let bucket_data: BucketData =
|
||||
serde_cbor::from_slice::<BucketData>(data).wrap_err("failed to deserialize bucket")?;
|
||||
pub(super) fn load_bucket(&mut self, data: Vec<u8>) -> EyreResult<()> {
|
||||
let bucket_data: BucketData = from_rkyv(data)?;
|
||||
|
||||
for (k, d) in bucket_data.0 {
|
||||
let entryinner = serde_cbor::from_slice::<BucketEntryInner>(&d)
|
||||
.wrap_err("failed to deserialize bucket entry")?;
|
||||
for e in bucket_data.entries {
|
||||
let entryinner = from_rkyv(e.value).wrap_err("failed to deserialize bucket entry")?;
|
||||
self.entries
|
||||
.insert(k, Arc::new(BucketEntry::new_with_inner(entryinner)));
|
||||
.insert(e.key, Arc::new(BucketEntry::new_with_inner(entryinner)));
|
||||
}
|
||||
|
||||
self.newest_entry = bucket_data.1;
|
||||
self.newest_entry = bucket_data.newest_entry;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
pub(super) fn save_bucket(&self) -> EyreResult<Vec<u8>> {
|
||||
let mut entry_vec = Vec::new();
|
||||
let mut entries = Vec::new();
|
||||
for (k, v) in &self.entries {
|
||||
let entry_bytes = v.with_mut_inner(|e| serde_cbor::to_vec(e))?;
|
||||
entry_vec.push((*k, entry_bytes));
|
||||
let entry_bytes = v.with_inner(|e| to_rkyv(e))?;
|
||||
entries.push(BucketEntryData {
|
||||
key: *k,
|
||||
value: entry_bytes,
|
||||
});
|
||||
}
|
||||
let bucket_data: BucketData = (entry_vec, self.newest_entry.clone());
|
||||
let out = serde_cbor::to_vec(&bucket_data)?;
|
||||
let bucket_data = BucketData {
|
||||
entries,
|
||||
newest_entry: self.newest_entry.clone(),
|
||||
};
|
||||
let out = to_rkyv(&bucket_data)?;
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
|
@ -64,22 +64,44 @@ pub struct BucketEntryLocalNetwork {
|
||||
node_status: Option<LocalNetworkNodeStatus>,
|
||||
}
|
||||
|
||||
/// A range of cryptography versions supported by this entry
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct VersionRange {
|
||||
/// The minimum cryptography version supported by this entry
|
||||
min: u8,
|
||||
/// The maximum cryptography version supported by this entry
|
||||
max: u8,
|
||||
}
|
||||
|
||||
/// The data associated with each bucket entry
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct BucketEntryInner {
|
||||
min_max_version: Option<(u8, u8)>,
|
||||
/// The minimum and maximum range of cryptography versions supported by the node,
|
||||
/// inclusive of the requirements of any relay the node may be using
|
||||
min_max_version: Option<VersionRange>,
|
||||
/// Whether or not we have updated this peer with our node info since our network
|
||||
/// and dial info has last changed, for example when our IP address changes
|
||||
updated_since_last_network_change: bool,
|
||||
/// The last connection descriptors used to contact this node, per protocol type
|
||||
#[serde(skip)]
|
||||
last_connections: BTreeMap<LastConnectionKey, (ConnectionDescriptor, u64)>,
|
||||
/// The node info for this entry on the publicinternet routing domain
|
||||
public_internet: BucketEntryPublicInternet,
|
||||
/// The node info for this entry on the localnetwork routing domain
|
||||
local_network: BucketEntryLocalNetwork,
|
||||
/// Statistics gathered for the peer
|
||||
peer_stats: PeerStats,
|
||||
/// The accounting for the latency statistics
|
||||
#[serde(skip)]
|
||||
latency_stats_accounting: LatencyStatsAccounting,
|
||||
/// The accounting for the transfer statistics
|
||||
#[serde(skip)]
|
||||
transfer_stats_accounting: TransferStatsAccounting,
|
||||
/// Tracking identifier for NodeRef debugging
|
||||
#[cfg(feature = "tracking")]
|
||||
#[serde(skip)]
|
||||
next_track_id: usize,
|
||||
/// Backtraces for NodeRef debugging
|
||||
#[cfg(feature = "tracking")]
|
||||
#[serde(skip)]
|
||||
node_ref_tracks: HashMap<usize, backtrace::Backtrace>,
|
||||
@ -190,12 +212,12 @@ impl BucketEntryInner {
|
||||
// Always allow overwriting invalid/unsigned node
|
||||
if current_sni.has_valid_signature() {
|
||||
// If the timestamp hasn't changed or is less, ignore this update
|
||||
if signed_node_info.timestamp <= current_sni.timestamp {
|
||||
if signed_node_info.timestamp() <= current_sni.timestamp() {
|
||||
// If we received a node update with the same timestamp
|
||||
// we can make this node live again, but only if our network has recently changed
|
||||
// which may make nodes that were unreachable now reachable with the same dialinfo
|
||||
if !self.updated_since_last_network_change
|
||||
&& signed_node_info.timestamp == current_sni.timestamp
|
||||
&& signed_node_info.timestamp() == current_sni.timestamp()
|
||||
{
|
||||
// No need to update the signednodeinfo though since the timestamp is the same
|
||||
// Touch the node and let it try to live again
|
||||
@ -207,11 +229,22 @@ impl BucketEntryInner {
|
||||
}
|
||||
}
|
||||
|
||||
// Update the protocol min/max version we have
|
||||
self.min_max_version = Some((
|
||||
signed_node_info.node_info.min_version,
|
||||
signed_node_info.node_info.max_version,
|
||||
));
|
||||
// Update the protocol min/max version we have to use, to include relay requirements if needed
|
||||
let mut version_range = VersionRange {
|
||||
min: signed_node_info.node_info().min_version,
|
||||
max: signed_node_info.node_info().max_version,
|
||||
};
|
||||
if let Some(relay_info) = signed_node_info.relay_info() {
|
||||
version_range.min.max_assign(relay_info.min_version);
|
||||
version_range.max.min_assign(relay_info.max_version);
|
||||
}
|
||||
if version_range.min <= version_range.max {
|
||||
// Can be reached with at least one crypto version
|
||||
self.min_max_version = Some(version_range);
|
||||
} else {
|
||||
// No valid crypto version in range
|
||||
self.min_max_version = None;
|
||||
}
|
||||
|
||||
// Update the signed node info
|
||||
*opt_current_sni = Some(Box::new(signed_node_info));
|
||||
@ -238,7 +271,7 @@ impl BucketEntryInner {
|
||||
RoutingDomain::LocalNetwork => &self.local_network.signed_node_info,
|
||||
RoutingDomain::PublicInternet => &self.public_internet.signed_node_info,
|
||||
};
|
||||
opt_current_sni.as_ref().map(|s| &s.node_info)
|
||||
opt_current_sni.as_ref().map(|s| s.node_info())
|
||||
}
|
||||
|
||||
pub fn signed_node_info(&self, routing_domain: RoutingDomain) -> Option<&SignedNodeInfo> {
|
||||
@ -338,11 +371,11 @@ impl BucketEntryInner {
|
||||
out
|
||||
}
|
||||
|
||||
pub fn set_min_max_version(&mut self, min_max_version: (u8, u8)) {
|
||||
pub fn set_min_max_version(&mut self, min_max_version: VersionRange) {
|
||||
self.min_max_version = Some(min_max_version);
|
||||
}
|
||||
|
||||
pub fn min_max_version(&self) -> Option<(u8, u8)> {
|
||||
pub fn min_max_version(&self) -> Option<VersionRange> {
|
||||
self.min_max_version
|
||||
}
|
||||
|
||||
|
@ -163,7 +163,7 @@ impl RoutingTable {
|
||||
// Load bucket entries from table db if possible
|
||||
debug!("loading routing table entries");
|
||||
if let Err(e) = self.load_buckets().await {
|
||||
log_rtab!(warn "Error loading buckets from storage: {}. Resetting.", e);
|
||||
log_rtab!(debug "Error loading buckets from storage: {:#?}. Resetting.", e);
|
||||
let mut inner = self.inner.write();
|
||||
inner.init_buckets(self.clone());
|
||||
}
|
||||
@ -173,7 +173,7 @@ impl RoutingTable {
|
||||
let route_spec_store = match RouteSpecStore::load(self.clone()).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
log_rtab!(warn "Error loading route spec store: {}. Resetting.", e);
|
||||
log_rtab!(debug "Error loading route spec store: {:#?}. Resetting.", e);
|
||||
RouteSpecStore::new(self.clone())
|
||||
}
|
||||
};
|
||||
@ -239,7 +239,7 @@ impl RoutingTable {
|
||||
let tdb = table_store.open("routing_table", 1).await?;
|
||||
let bucket_count = bucketvec.len();
|
||||
let mut dbx = tdb.transact();
|
||||
if let Err(e) = dbx.store_cbor(0, b"bucket_count", &bucket_count) {
|
||||
if let Err(e) = dbx.store_frozen(0, b"bucket_count", &bucket_count) {
|
||||
dbx.rollback();
|
||||
return Err(e);
|
||||
}
|
||||
@ -253,14 +253,13 @@ impl RoutingTable {
|
||||
|
||||
async fn load_buckets(&self) -> EyreResult<()> {
|
||||
// Deserialize all entries
|
||||
let inner = &mut *self.inner.write();
|
||||
|
||||
let tstore = self.network_manager().table_store();
|
||||
let tdb = tstore.open("routing_table", 1).await?;
|
||||
let Some(bucket_count): Option<usize> = tdb.load_cbor(0, b"bucket_count")? else {
|
||||
let Some(bucket_count): Option<usize> = tdb.load_rkyv(0, b"bucket_count")? else {
|
||||
log_rtab!(debug "no bucket count in saved routing table");
|
||||
return Ok(());
|
||||
};
|
||||
let inner = &mut *self.inner.write();
|
||||
if bucket_count != inner.buckets.len() {
|
||||
// Must have the same number of buckets
|
||||
warn!("bucket count is different, not loading routing table");
|
||||
@ -275,8 +274,8 @@ impl RoutingTable {
|
||||
};
|
||||
bucketdata_vec.push(bucketdata);
|
||||
}
|
||||
for n in 0..bucket_count {
|
||||
inner.buckets[n].load_bucket(&bucketdata_vec[n])?;
|
||||
for (n, bucketdata) in bucketdata_vec.into_iter().enumerate() {
|
||||
inner.buckets[n].load_bucket(bucketdata)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -383,7 +382,7 @@ impl RoutingTable {
|
||||
}
|
||||
|
||||
/// Return a copy of our node's signednodeinfo
|
||||
pub fn get_own_signed_node_info(&self, routing_domain: RoutingDomain) -> SignedNodeInfo {
|
||||
pub fn get_own_signed_node_info(&self, routing_domain: RoutingDomain) -> SignedDirectNodeInfo {
|
||||
self.inner.read().get_own_signed_node_info(routing_domain)
|
||||
}
|
||||
|
||||
@ -526,7 +525,7 @@ impl RoutingTable {
|
||||
&self,
|
||||
routing_domain: RoutingDomain,
|
||||
node_id: DHTKey,
|
||||
signed_node_info: SignedNodeInfo,
|
||||
signed_node_info: SignedDirectNodeInfo,
|
||||
allow_invalid: bool,
|
||||
) -> Option<NodeRef> {
|
||||
self.inner.write().register_node_with_signed_node_info(
|
||||
|
@ -385,9 +385,12 @@ impl NodeRef {
|
||||
out
|
||||
}
|
||||
|
||||
pub fn locked<'a>(&self, rti: &'a mut RoutingTableInner) -> NodeRefLocked<'a> {
|
||||
pub fn locked<'a>(&self, rti: &'a RoutingTableInner) -> NodeRefLocked<'a> {
|
||||
NodeRefLocked::new(rti, self.clone())
|
||||
}
|
||||
pub fn locked_mut<'a>(&self, rti: &'a mut RoutingTableInner) -> NodeRefLockedMut<'a> {
|
||||
NodeRefLockedMut::new(rti, self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl NodeRefBase for NodeRef {
|
||||
@ -480,12 +483,12 @@ impl Drop for NodeRef {
|
||||
/// already locked a RoutingTableInner
|
||||
/// Keeps entry in the routing table until all references are gone
|
||||
pub struct NodeRefLocked<'a> {
|
||||
inner: Mutex<&'a mut RoutingTableInner>,
|
||||
inner: Mutex<&'a RoutingTableInner>,
|
||||
nr: NodeRef,
|
||||
}
|
||||
|
||||
impl<'a> NodeRefLocked<'a> {
|
||||
pub fn new(inner: &'a mut RoutingTableInner, nr: NodeRef) -> Self {
|
||||
pub fn new(inner: &'a RoutingTableInner, nr: NodeRef) -> Self {
|
||||
Self {
|
||||
inner: Mutex::new(inner),
|
||||
nr,
|
||||
@ -510,6 +513,65 @@ impl<'a> NodeRefBase for NodeRefLocked<'a> {
|
||||
self.nr.common.entry.with(inner, f)
|
||||
}
|
||||
|
||||
fn operate_mut<T, F>(&self, _f: F) -> T
|
||||
where
|
||||
F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner) -> T,
|
||||
{
|
||||
panic!("need to locked_mut() for this operation")
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> fmt::Display for NodeRefLocked<'a> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.nr)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> fmt::Debug for NodeRefLocked<'a> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("NodeRefLocked")
|
||||
.field("nr", &self.nr)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/// Mutable locked reference to a routing table entry
|
||||
/// For internal use inside the RoutingTable module where you have
|
||||
/// already locked a RoutingTableInner
|
||||
/// Keeps entry in the routing table until all references are gone
|
||||
pub struct NodeRefLockedMut<'a> {
|
||||
inner: Mutex<&'a mut RoutingTableInner>,
|
||||
nr: NodeRef,
|
||||
}
|
||||
|
||||
impl<'a> NodeRefLockedMut<'a> {
|
||||
pub fn new(inner: &'a mut RoutingTableInner, nr: NodeRef) -> Self {
|
||||
Self {
|
||||
inner: Mutex::new(inner),
|
||||
nr,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> NodeRefBase for NodeRefLockedMut<'a> {
|
||||
fn common(&self) -> &NodeRefBaseCommon {
|
||||
&self.nr.common
|
||||
}
|
||||
|
||||
fn common_mut(&mut self) -> &mut NodeRefBaseCommon {
|
||||
&mut self.nr.common
|
||||
}
|
||||
|
||||
fn operate<T, F>(&self, f: F) -> T
|
||||
where
|
||||
F: FnOnce(&RoutingTableInner, &BucketEntryInner) -> T,
|
||||
{
|
||||
let inner = &*self.inner.lock();
|
||||
self.nr.common.entry.with(inner, f)
|
||||
}
|
||||
|
||||
fn operate_mut<T, F>(&self, f: F) -> T
|
||||
where
|
||||
F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner) -> T,
|
||||
@ -519,15 +581,15 @@ impl<'a> NodeRefBase for NodeRefLocked<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> fmt::Display for NodeRefLocked<'a> {
|
||||
impl<'a> fmt::Display for NodeRefLockedMut<'a> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.nr)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> fmt::Debug for NodeRefLocked<'a> {
|
||||
impl<'a> fmt::Debug for NodeRefLockedMut<'a> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("NodeRefLocked")
|
||||
f.debug_struct("NodeRefLockedMut")
|
||||
.field("nr", &self.nr)
|
||||
.finish()
|
||||
}
|
||||
|
@ -221,11 +221,11 @@ impl RouteSpecStore {
|
||||
)
|
||||
};
|
||||
|
||||
// Get cbor blob from table store
|
||||
// Get frozen blob from table store
|
||||
let table_store = routing_table.network_manager().table_store();
|
||||
let rsstdb = table_store.open("RouteSpecStore", 1).await?;
|
||||
let mut content: RouteSpecStoreContent =
|
||||
rsstdb.load_cbor(0, b"content")?.unwrap_or_default();
|
||||
rsstdb.load_json(0, b"content")?.unwrap_or_default();
|
||||
|
||||
// Look up all route hop noderefs since we can't serialize those
|
||||
let mut dead_keys = Vec::new();
|
||||
@ -246,7 +246,7 @@ impl RouteSpecStore {
|
||||
// Load secrets from pstore
|
||||
let pstore = routing_table.network_manager().protected_store();
|
||||
let out: Vec<(DHTKey, DHTKeySecret)> = pstore
|
||||
.load_user_secret_cbor("RouteSpecStore")
|
||||
.load_user_secret_rkyv("RouteSpecStore")
|
||||
.await?
|
||||
.unwrap_or_default();
|
||||
|
||||
@ -289,14 +289,14 @@ impl RouteSpecStore {
|
||||
inner.content.clone()
|
||||
};
|
||||
|
||||
// Save all the fields we care about to the cbor blob in table storage
|
||||
// Save all the fields we care about to the frozen blob in table storage
|
||||
let table_store = self
|
||||
.unlocked_inner
|
||||
.routing_table
|
||||
.network_manager()
|
||||
.table_store();
|
||||
let rsstdb = table_store.open("RouteSpecStore", 1).await?;
|
||||
rsstdb.store_cbor(0, b"content", &content)?;
|
||||
rsstdb.store_json(0, b"content", &content)?;
|
||||
|
||||
// // Keep secrets in protected store as well
|
||||
let pstore = self
|
||||
@ -310,7 +310,9 @@ impl RouteSpecStore {
|
||||
out.push((*k, v.secret_key));
|
||||
}
|
||||
|
||||
let _ = pstore.save_user_secret_cbor("RouteSpecStore", &out).await?; // ignore if this previously existed or not
|
||||
let _ = pstore
|
||||
.save_user_secret_frozen("RouteSpecStore", &out)
|
||||
.await?; // ignore if this previously existed or not
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -101,6 +101,45 @@ impl RoutingDomainDetailCommon {
|
||||
self.network_class.unwrap_or(NetworkClass::Invalid) != NetworkClass::Invalid
|
||||
}
|
||||
|
||||
fn make_peer_info(&self, rti: &RoutingTableInner) -> PeerInfo {
|
||||
let node_info = NodeInfo {
|
||||
network_class: self.network_class.unwrap_or(NetworkClass::Invalid),
|
||||
outbound_protocols: self.outbound_protocols,
|
||||
address_types: self.address_types,
|
||||
min_version: MIN_CRYPTO_VERSION,
|
||||
max_version: MAX_CRYPTO_VERSION,
|
||||
dial_info_detail_list: self.dial_info_details.clone(),
|
||||
};
|
||||
|
||||
let relay_peer_info = self
|
||||
.relay_node
|
||||
.as_ref()
|
||||
.and_then(|rn| rn.locked(rti).make_peer_info(self.routing_domain));
|
||||
|
||||
let signed_node_info = match relay_peer_info {
|
||||
Some(relay_pi) => SignedNodeInfo::Relayed(
|
||||
SignedRelayedNodeInfo::with_secret(
|
||||
NodeId::new(rti.unlocked_inner.node_id),
|
||||
node_info,
|
||||
relay_pi.node_id,
|
||||
relay_pi.signed_node_info,
|
||||
&rti.unlocked_inner.node_id_secret,
|
||||
)
|
||||
.unwrap(),
|
||||
),
|
||||
None => SignedNodeInfo::Direct(
|
||||
SignedDirectNodeInfo::with_secret(
|
||||
NodeId::new(rti.unlocked_inner.node_id),
|
||||
node_info,
|
||||
&rti.unlocked_inner.node_id_secret,
|
||||
)
|
||||
.unwrap(),
|
||||
),
|
||||
};
|
||||
|
||||
PeerInfo::new(NodeId::new(rti.unlocked_inner.node_id), signed_node_info)
|
||||
}
|
||||
|
||||
pub fn with_peer_info<F, R>(&self, rti: &RoutingTableInner, f: F) -> R
|
||||
where
|
||||
F: FnOnce(&PeerInfo) -> R,
|
||||
@ -110,7 +149,7 @@ impl RoutingDomainDetailCommon {
|
||||
// Regenerate peer info
|
||||
let pi = PeerInfo::new(
|
||||
NodeId::new(rti.unlocked_inner.node_id),
|
||||
SignedNodeInfo::with_secret(
|
||||
SignedDirectNodeInfo::with_secret(
|
||||
NodeInfo {
|
||||
network_class: self.network_class.unwrap_or(NetworkClass::Invalid),
|
||||
outbound_protocols: self.outbound_protocols,
|
||||
@ -118,10 +157,11 @@ impl RoutingDomainDetailCommon {
|
||||
min_version: MIN_CRYPTO_VERSION,
|
||||
max_version: MAX_CRYPTO_VERSION,
|
||||
dial_info_detail_list: self.dial_info_details.clone(),
|
||||
relay_peer_info: self
|
||||
.relay_node
|
||||
.as_ref()
|
||||
.and_then(|rn| rn.make_peer_info(self.routing_domain).map(Box::new)),
|
||||
relay_peer_info: self.relay_node.as_ref().and_then(|rn| {
|
||||
rn.locked(rti)
|
||||
.make_peer_info(self.routing_domain)
|
||||
.map(Box::new)
|
||||
}),
|
||||
},
|
||||
NodeId::new(rti.unlocked_inner.node_id),
|
||||
&rti.unlocked_inner.node_id_secret,
|
||||
|
@ -253,7 +253,7 @@ impl RoutingTableInner {
|
||||
}
|
||||
|
||||
/// Return a copy of our node's signednodeinfo
|
||||
pub fn get_own_signed_node_info(&self, routing_domain: RoutingDomain) -> SignedNodeInfo {
|
||||
pub fn get_own_signed_node_info(&self, routing_domain: RoutingDomain) -> SignedDirectNodeInfo {
|
||||
self.with_routing_domain(routing_domain, |rdd| {
|
||||
rdd.common()
|
||||
.with_peer_info(self, |pi| pi.signed_node_info.clone())
|
||||
@ -662,7 +662,7 @@ impl RoutingTableInner {
|
||||
outer_self: RoutingTable,
|
||||
routing_domain: RoutingDomain,
|
||||
node_id: DHTKey,
|
||||
signed_node_info: SignedNodeInfo,
|
||||
signed_node_info: SignedDirectNodeInfo,
|
||||
allow_invalid: bool,
|
||||
) -> Option<NodeRef> {
|
||||
// validate signed node info is not something malicious
|
||||
@ -717,7 +717,8 @@ impl RoutingTableInner {
|
||||
});
|
||||
if let Some(nr) = &out {
|
||||
// set the most recent node address for connection finding and udp replies
|
||||
nr.locked(self).set_last_connection(descriptor, timestamp);
|
||||
nr.locked_mut(self)
|
||||
.set_last_connection(descriptor, timestamp);
|
||||
}
|
||||
out
|
||||
}
|
||||
|
@ -17,7 +17,9 @@ mod public_key;
|
||||
mod sender_info;
|
||||
mod signal_info;
|
||||
mod signature;
|
||||
mod signed_direct_node_info;
|
||||
mod signed_node_info;
|
||||
mod signed_relayed_node_info;
|
||||
mod socket_address;
|
||||
mod tunnel;
|
||||
mod value_data;
|
||||
@ -42,7 +44,9 @@ pub use public_key::*;
|
||||
pub use sender_info::*;
|
||||
pub use signal_info::*;
|
||||
pub use signature::*;
|
||||
pub use signed_direct_node_info::*;
|
||||
pub use signed_node_info::*;
|
||||
pub use signed_relayed_node_info::*;
|
||||
pub use socket_address::*;
|
||||
pub use tunnel::*;
|
||||
pub use value_data::*;
|
||||
|
@ -5,7 +5,7 @@ pub fn encode_network_class(network_class: NetworkClass) -> veilid_capnp::Networ
|
||||
NetworkClass::InboundCapable => veilid_capnp::NetworkClass::InboundCapable,
|
||||
NetworkClass::OutboundOnly => veilid_capnp::NetworkClass::OutboundOnly,
|
||||
NetworkClass::WebApp => veilid_capnp::NetworkClass::WebApp,
|
||||
NetworkClass::Invalid => panic!("invalid network class should not be encoded"),
|
||||
NetworkClass::Invalid => veilid_capnp::NetworkClass::Invalid,
|
||||
}
|
||||
}
|
||||
|
||||
@ -14,5 +14,6 @@ pub fn decode_network_class(network_class: veilid_capnp::NetworkClass) -> Networ
|
||||
veilid_capnp::NetworkClass::InboundCapable => NetworkClass::InboundCapable,
|
||||
veilid_capnp::NetworkClass::OutboundOnly => NetworkClass::OutboundOnly,
|
||||
veilid_capnp::NetworkClass::WebApp => NetworkClass::WebApp,
|
||||
veilid_capnp::NetworkClass::Invalid => NetworkClass::Invalid,
|
||||
}
|
||||
}
|
||||
|
@ -31,18 +31,10 @@ pub fn encode_node_info(
|
||||
encode_dial_info_detail(&node_info.dial_info_detail_list[idx], &mut did_builder)?;
|
||||
}
|
||||
|
||||
if let Some(rpi) = &node_info.relay_peer_info {
|
||||
let mut rpi_builder = builder.reborrow().init_relay_peer_info();
|
||||
encode_peer_info(rpi, &mut rpi_builder)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decode_node_info(
|
||||
reader: &veilid_capnp::node_info::Reader,
|
||||
allow_relay_peer_info: bool,
|
||||
) -> Result<NodeInfo, RPCError> {
|
||||
pub fn decode_node_info(reader: &veilid_capnp::node_info::Reader) -> Result<NodeInfo, RPCError> {
|
||||
let network_class = decode_network_class(
|
||||
reader
|
||||
.reborrow()
|
||||
@ -81,22 +73,6 @@ pub fn decode_node_info(
|
||||
dial_info_detail_list.push(decode_dial_info_detail(&did)?)
|
||||
}
|
||||
|
||||
let relay_peer_info = if allow_relay_peer_info {
|
||||
if reader.has_relay_peer_info() {
|
||||
Some(Box::new(decode_peer_info(
|
||||
&reader
|
||||
.reborrow()
|
||||
.get_relay_peer_info()
|
||||
.map_err(RPCError::protocol)?,
|
||||
false,
|
||||
)?))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(NodeInfo {
|
||||
network_class,
|
||||
outbound_protocols,
|
||||
@ -104,6 +80,5 @@ pub fn decode_node_info(
|
||||
min_version,
|
||||
max_version,
|
||||
dial_info_detail_list,
|
||||
relay_peer_info,
|
||||
})
|
||||
}
|
||||
|
@ -120,7 +120,7 @@ impl RPCOperation {
|
||||
let sni_reader = operation_reader
|
||||
.get_sender_node_info()
|
||||
.map_err(RPCError::protocol)?;
|
||||
let sni = decode_signed_node_info(&sni_reader, sender_node_id, true)?;
|
||||
let sni = decode_signed_node_info(&sni_reader, sender_node_id)?;
|
||||
Some(sni)
|
||||
} else {
|
||||
None
|
||||
|
@ -47,7 +47,7 @@ impl RPCOperationFindBlockA {
|
||||
.map_err(RPCError::map_internal("too many suppliers"))?,
|
||||
);
|
||||
for s in suppliers_reader.iter() {
|
||||
let peer_info = decode_peer_info(&s, true)?;
|
||||
let peer_info = decode_peer_info(&s)?;
|
||||
suppliers.push(peer_info);
|
||||
}
|
||||
|
||||
@ -59,7 +59,7 @@ impl RPCOperationFindBlockA {
|
||||
.map_err(RPCError::map_internal("too many peers"))?,
|
||||
);
|
||||
for p in peers_reader.iter() {
|
||||
let peer_info = decode_peer_info(&p, true)?;
|
||||
let peer_info = decode_peer_info(&p)?;
|
||||
peers.push(peer_info);
|
||||
}
|
||||
|
||||
|
@ -41,7 +41,7 @@ impl RPCOperationFindNodeA {
|
||||
.map_err(RPCError::map_internal("too many peers"))?,
|
||||
);
|
||||
for p in peers_reader.iter() {
|
||||
let peer_info = decode_peer_info(&p, true)?;
|
||||
let peer_info = decode_peer_info(&p)?;
|
||||
peers.push(peer_info);
|
||||
}
|
||||
|
||||
|
@ -48,7 +48,7 @@ impl RPCOperationGetValueA {
|
||||
.map_err(RPCError::map_internal("too many peers"))?,
|
||||
);
|
||||
for p in peers_reader.iter() {
|
||||
let peer_info = decode_peer_info(&p, true)?;
|
||||
let peer_info = decode_peer_info(&p)?;
|
||||
peers.push(peer_info);
|
||||
}
|
||||
|
||||
|
@ -18,7 +18,7 @@ impl RPCOperationNodeInfoUpdate {
|
||||
}
|
||||
let sender_node_id = opt_sender_node_id.unwrap();
|
||||
let sni_reader = reader.get_signed_node_info().map_err(RPCError::protocol)?;
|
||||
let signed_node_info = decode_signed_node_info(&sni_reader, sender_node_id, true)?;
|
||||
let signed_node_info = decode_signed_node_info(&sni_reader, sender_node_id)?;
|
||||
|
||||
Ok(RPCOperationNodeInfoUpdate { signed_node_info })
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ impl RPCOperationSetValueA {
|
||||
.map_err(RPCError::map_internal("too many peers"))?,
|
||||
);
|
||||
for p in peers_reader.iter() {
|
||||
let peer_info = decode_peer_info(&p, true)?;
|
||||
let peer_info = decode_peer_info(&p)?;
|
||||
peers.push(peer_info);
|
||||
}
|
||||
|
||||
|
@ -49,7 +49,7 @@ impl RPCOperationSupplyBlockA {
|
||||
.map_err(RPCError::map_internal("too many peers"))?,
|
||||
);
|
||||
for p in peers_reader.iter() {
|
||||
let peer_info = decode_peer_info(&p, true)?;
|
||||
let peer_info = decode_peer_info(&p)?;
|
||||
peers.push(peer_info);
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ impl RPCOperationWatchValueA {
|
||||
.map_err(RPCError::map_internal("too many peers"))?,
|
||||
);
|
||||
for p in peers_reader.iter() {
|
||||
let peer_info = decode_peer_info(&p, true)?;
|
||||
let peer_info = decode_peer_info(&p)?;
|
||||
peers.push(peer_info);
|
||||
}
|
||||
|
||||
|
@ -14,10 +14,7 @@ pub fn encode_peer_info(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decode_peer_info(
|
||||
reader: &veilid_capnp::peer_info::Reader,
|
||||
allow_relay_peer_info: bool,
|
||||
) -> Result<PeerInfo, RPCError> {
|
||||
pub fn decode_peer_info(reader: &veilid_capnp::peer_info::Reader) -> Result<PeerInfo, RPCError> {
|
||||
let nid_reader = reader
|
||||
.reborrow()
|
||||
.get_node_id()
|
||||
@ -27,8 +24,7 @@ pub fn decode_peer_info(
|
||||
.get_signed_node_info()
|
||||
.map_err(RPCError::protocol)?;
|
||||
let node_id = NodeId::new(decode_public_key(&nid_reader));
|
||||
let signed_node_info =
|
||||
decode_signed_node_info(&sni_reader, &node_id.key, allow_relay_peer_info)?;
|
||||
let signed_node_info = decode_signed_node_info(&sni_reader, &node_id.key)?;
|
||||
|
||||
Ok(PeerInfo {
|
||||
node_id,
|
||||
|
@ -77,7 +77,7 @@ pub fn decode_route_hop(reader: &veilid_capnp::route_hop::Reader) -> Result<Rout
|
||||
veilid_capnp::route_hop::node::Which::PeerInfo(pi) => {
|
||||
let pi_reader = pi.map_err(RPCError::protocol)?;
|
||||
RouteNode::PeerInfo(
|
||||
decode_peer_info(&pi_reader, true)
|
||||
decode_peer_info(&pi_reader)
|
||||
.map_err(RPCError::map_protocol("invalid peer info in route hop"))?,
|
||||
)
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ pub fn decode_signal_info(
|
||||
let pi_reader = r.get_peer_info().map_err(RPCError::map_protocol(
|
||||
"invalid peer info in hole punch signal info",
|
||||
))?;
|
||||
let peer_info = decode_peer_info(&pi_reader, true)?;
|
||||
let peer_info = decode_peer_info(&pi_reader)?;
|
||||
|
||||
SignalInfo::HolePunch { receipt, peer_info }
|
||||
}
|
||||
@ -69,7 +69,7 @@ pub fn decode_signal_info(
|
||||
let pi_reader = r.get_peer_info().map_err(RPCError::map_protocol(
|
||||
"invalid peer info in reverse connect signal info",
|
||||
))?;
|
||||
let peer_info = decode_peer_info(&pi_reader, true)?;
|
||||
let peer_info = decode_peer_info(&pi_reader)?;
|
||||
|
||||
SignalInfo::ReverseConnect { receipt, peer_info }
|
||||
}
|
||||
|
@ -0,0 +1,43 @@
|
||||
use crate::*;
|
||||
use rpc_processor::*;
|
||||
|
||||
pub fn encode_signed_direct_node_info(
|
||||
signed_direct_node_info: &SignedDirectNodeInfo,
|
||||
builder: &mut veilid_capnp::signed_direct_node_info::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
//
|
||||
let mut ni_builder = builder.reborrow().init_node_info();
|
||||
encode_node_info(&signed_direct_node_info.node_info, &mut ni_builder)?;
|
||||
|
||||
builder
|
||||
.reborrow()
|
||||
.set_timestamp(signed_direct_node_info.timestamp);
|
||||
|
||||
let mut sig_builder = builder.reborrow().init_signature();
|
||||
encode_signature(&signed_direct_node_info.signature, &mut sig_builder);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decode_signed_direct_node_info(
|
||||
reader: &veilid_capnp::signed_direct_node_info::Reader,
|
||||
node_id: &DHTKey,
|
||||
) -> Result<SignedDirectNodeInfo, RPCError> {
|
||||
let ni_reader = reader
|
||||
.reborrow()
|
||||
.get_node_info()
|
||||
.map_err(RPCError::protocol)?;
|
||||
let node_info = decode_node_info(&ni_reader)?;
|
||||
|
||||
let sig_reader = reader
|
||||
.reborrow()
|
||||
.get_signature()
|
||||
.map_err(RPCError::protocol)?;
|
||||
|
||||
let timestamp = reader.reborrow().get_timestamp();
|
||||
|
||||
let signature = decode_signature(&sig_reader);
|
||||
|
||||
SignedDirectNodeInfo::new(NodeId::new(*node_id), node_info, timestamp, signature)
|
||||
.map_err(RPCError::protocol)
|
||||
}
|
@ -5,14 +5,16 @@ pub fn encode_signed_node_info(
|
||||
signed_node_info: &SignedNodeInfo,
|
||||
builder: &mut veilid_capnp::signed_node_info::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
//
|
||||
let mut ni_builder = builder.reborrow().init_node_info();
|
||||
encode_node_info(&signed_node_info.node_info, &mut ni_builder)?;
|
||||
|
||||
let mut sig_builder = builder.reborrow().init_signature();
|
||||
encode_signature(&signed_node_info.signature, &mut sig_builder);
|
||||
|
||||
builder.reborrow().set_timestamp(signed_node_info.timestamp);
|
||||
match signed_node_info {
|
||||
SignedNodeInfo::Direct(d) => {
|
||||
let mut d_builder = builder.reborrow().init_direct();
|
||||
encode_signed_direct_node_info(d, &mut d_builder)?;
|
||||
}
|
||||
SignedNodeInfo::Relayed(r) => {
|
||||
let mut r_builder = builder.reborrow().init_relayed();
|
||||
encode_signed_relayed_node_info(r, &mut r_builder)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -20,22 +22,20 @@ pub fn encode_signed_node_info(
|
||||
pub fn decode_signed_node_info(
|
||||
reader: &veilid_capnp::signed_node_info::Reader,
|
||||
node_id: &DHTKey,
|
||||
allow_relay_peer_info: bool,
|
||||
) -> Result<SignedNodeInfo, RPCError> {
|
||||
let ni_reader = reader
|
||||
.reborrow()
|
||||
.get_node_info()
|
||||
.map_err(RPCError::protocol)?;
|
||||
let node_info = decode_node_info(&ni_reader, allow_relay_peer_info)?;
|
||||
|
||||
let sig_reader = reader
|
||||
.reborrow()
|
||||
.get_signature()
|
||||
.map_err(RPCError::protocol)?;
|
||||
let signature = decode_signature(&sig_reader);
|
||||
|
||||
let timestamp = reader.reborrow().get_timestamp();
|
||||
|
||||
SignedNodeInfo::new(node_info, NodeId::new(*node_id), signature, timestamp)
|
||||
.map_err(RPCError::protocol)
|
||||
match reader
|
||||
.which()
|
||||
.map_err(RPCError::map_internal("invalid signal operation"))?
|
||||
{
|
||||
veilid_capnp::signed_node_info::Direct(d) => {
|
||||
let d_reader = d.map_err(RPCError::protocol)?;
|
||||
let sdni = decode_signed_direct_node_info(&d_reader, node_id)?;
|
||||
Ok(SignedNodeInfo::Direct(sdni))
|
||||
}
|
||||
veilid_capnp::signed_node_info::Relayed(r) => {
|
||||
let r_reader = r.map_err(RPCError::protocol)?;
|
||||
let srni = decode_signed_relayed_node_info(&r_reader, node_id)?;
|
||||
Ok(SignedNodeInfo::Relayed(srni))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,67 @@
|
||||
use crate::*;
|
||||
use rpc_processor::*;
|
||||
|
||||
pub fn encode_signed_relayed_node_info(
|
||||
signed_relayed_node_info: &SignedRelayedNodeInfo,
|
||||
builder: &mut veilid_capnp::signed_relayed_node_info::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
//
|
||||
let mut ni_builder = builder.reborrow().init_node_info();
|
||||
encode_node_info(&signed_relayed_node_info.node_info, &mut ni_builder)?;
|
||||
|
||||
let mut rid_builder = builder.reborrow().init_relay_id();
|
||||
encode_public_key(&signed_relayed_node_info.relay_id.key, &mut rid_builder)?;
|
||||
|
||||
let mut ri_builder = builder.reborrow().init_relay_info();
|
||||
encode_signed_direct_node_info(&signed_relayed_node_info.relay_info, &mut ri_builder)?;
|
||||
|
||||
builder
|
||||
.reborrow()
|
||||
.set_timestamp(signed_relayed_node_info.timestamp);
|
||||
|
||||
let mut sig_builder = builder.reborrow().init_signature();
|
||||
encode_signature(&signed_relayed_node_info.signature, &mut sig_builder);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decode_signed_relayed_node_info(
|
||||
reader: &veilid_capnp::signed_relayed_node_info::Reader,
|
||||
node_id: &DHTKey,
|
||||
) -> Result<SignedRelayedNodeInfo, RPCError> {
|
||||
let ni_reader = reader
|
||||
.reborrow()
|
||||
.get_node_info()
|
||||
.map_err(RPCError::protocol)?;
|
||||
let node_info = decode_node_info(&ni_reader)?;
|
||||
|
||||
let rid_reader = reader
|
||||
.reborrow()
|
||||
.get_relay_id()
|
||||
.map_err(RPCError::protocol)?;
|
||||
let relay_id = decode_public_key(&rid_reader);
|
||||
|
||||
let ri_reader = reader
|
||||
.reborrow()
|
||||
.get_relay_info()
|
||||
.map_err(RPCError::protocol)?;
|
||||
let relay_info = decode_signed_direct_node_info(&ri_reader, &relay_id)?;
|
||||
|
||||
let sig_reader = reader
|
||||
.reborrow()
|
||||
.get_signature()
|
||||
.map_err(RPCError::protocol)?;
|
||||
let timestamp = reader.reborrow().get_timestamp();
|
||||
|
||||
let signature = decode_signature(&sig_reader);
|
||||
|
||||
SignedRelayedNodeInfo::new(
|
||||
NodeId::new(*node_id),
|
||||
node_info,
|
||||
NodeId::new(relay_id),
|
||||
relay_info,
|
||||
timestamp,
|
||||
signature,
|
||||
)
|
||||
.map_err(RPCError::protocol)
|
||||
}
|
@ -619,7 +619,7 @@ impl RPCProcessor {
|
||||
// routing table caching when it is okay to do so
|
||||
// This is only done in the PublicInternet routing domain because
|
||||
// as far as we can tell this is the only domain that will really benefit
|
||||
fn get_sender_signed_node_info(&self, dest: &Destination) -> Option<SignedNodeInfo> {
|
||||
fn get_sender_signed_node_info(&self, dest: &Destination) -> Option<SignedDirectNodeInfo> {
|
||||
// Don't do this if the sender is to remain private
|
||||
// Otherwise we would be attaching the original sender's identity to the final destination,
|
||||
// thus defeating the purpose of the safety route entirely :P
|
||||
@ -682,7 +682,7 @@ impl RPCProcessor {
|
||||
let op_id = operation.op_id();
|
||||
|
||||
// Log rpc send
|
||||
debug!(target: "rpc_message", dir = "send", kind = "question", op_id, desc = operation.kind().desc(), ?dest);
|
||||
trace!(target: "rpc_message", dir = "send", kind = "question", op_id, desc = operation.kind().desc(), ?dest);
|
||||
|
||||
// Produce rendered operation
|
||||
let RenderedOperation {
|
||||
@ -745,7 +745,7 @@ impl RPCProcessor {
|
||||
let operation = RPCOperation::new_statement(statement, opt_sender_info);
|
||||
|
||||
// Log rpc send
|
||||
debug!(target: "rpc_message", dir = "send", kind = "statement", op_id = operation.op_id(), desc = operation.kind().desc(), ?dest);
|
||||
trace!(target: "rpc_message", dir = "send", kind = "statement", op_id = operation.op_id(), desc = operation.kind().desc(), ?dest);
|
||||
|
||||
// Produce rendered operation
|
||||
let RenderedOperation {
|
||||
@ -865,7 +865,7 @@ impl RPCProcessor {
|
||||
let operation = RPCOperation::new_answer(&request.operation, answer, opt_sender_info);
|
||||
|
||||
// Log rpc send
|
||||
debug!(target: "rpc_message", dir = "send", kind = "answer", op_id = operation.op_id(), desc = operation.kind().desc(), ?dest);
|
||||
trace!(target: "rpc_message", dir = "send", kind = "answer", op_id = operation.op_id(), desc = operation.kind().desc(), ?dest);
|
||||
|
||||
// Produce rendered operation
|
||||
let RenderedOperation {
|
||||
@ -997,7 +997,7 @@ impl RPCProcessor {
|
||||
};
|
||||
|
||||
// Log rpc receive
|
||||
debug!(target: "rpc_message", dir = "recv", kind, op_id = msg.operation.op_id(), desc = msg.operation.kind().desc(), header = ?msg.header);
|
||||
trace!(target: "rpc_message", dir = "recv", kind, op_id = msg.operation.op_id(), desc = msg.operation.kind().desc(), header = ?msg.header);
|
||||
|
||||
// Process specific message kind
|
||||
match msg.operation.kind() {
|
||||
|
@ -122,21 +122,21 @@ pub async fn test_store_delete_load(ts: TableStore) {
|
||||
assert_eq!(db.load(2, b"baz").unwrap(), Some(b"QWERTY".to_vec()));
|
||||
}
|
||||
|
||||
pub async fn test_cbor(ts: TableStore) {
|
||||
trace!("test_cbor");
|
||||
pub async fn test_frozen(ts: TableStore) {
|
||||
trace!("test_frozen");
|
||||
|
||||
let _ = ts.delete("test");
|
||||
let db = ts.open("test", 3).await.expect("should have opened");
|
||||
let (dht_key, _) = generate_secret();
|
||||
|
||||
assert!(db.store_cbor(0, b"asdf", &dht_key).is_ok());
|
||||
assert!(db.store_rkyv(0, b"asdf", &dht_key).is_ok());
|
||||
|
||||
assert_eq!(db.load_cbor::<DHTKey>(0, b"qwer").unwrap(), None);
|
||||
assert_eq!(db.load_rkyv::<DHTKey>(0, b"qwer").unwrap(), None);
|
||||
|
||||
let d = match db.load_cbor::<DHTKey>(0, b"asdf") {
|
||||
let d = match db.load_rkyv::<DHTKey>(0, b"asdf") {
|
||||
Ok(x) => x,
|
||||
Err(e) => {
|
||||
panic!("couldn't decode cbor: {}", e);
|
||||
panic!("couldn't decode: {}", e);
|
||||
}
|
||||
};
|
||||
assert_eq!(d, Some(dht_key), "keys should be equal");
|
||||
@ -147,8 +147,8 @@ pub async fn test_cbor(ts: TableStore) {
|
||||
);
|
||||
|
||||
assert!(
|
||||
db.load_cbor::<DHTKey>(1, b"foo").is_err(),
|
||||
"should fail to load cbor"
|
||||
db.load_rkyv::<DHTKey>(1, b"foo").is_err(),
|
||||
"should fail to unfreeze"
|
||||
);
|
||||
}
|
||||
|
||||
@ -157,7 +157,7 @@ pub async fn test_all() {
|
||||
let ts = api.table_store().unwrap();
|
||||
test_delete_open_delete(ts.clone()).await;
|
||||
test_store_delete_load(ts.clone()).await;
|
||||
test_cbor(ts.clone()).await;
|
||||
test_frozen(ts.clone()).await;
|
||||
|
||||
let _ = ts.delete("test").await;
|
||||
|
||||
|
@ -43,7 +43,78 @@ pub async fn test_attach_detach() {
|
||||
api.shutdown().await;
|
||||
}
|
||||
|
||||
pub async fn test_signed_node_info() {
|
||||
info!("--- test_signed_node_info ---");
|
||||
|
||||
let (update_callback, config_callback) = setup_veilid_core();
|
||||
let api = api_startup(update_callback, config_callback)
|
||||
.await
|
||||
.expect("startup failed");
|
||||
|
||||
// Test direct
|
||||
let node_info = NodeInfo {
|
||||
network_class: NetworkClass::InboundCapable,
|
||||
outbound_protocols: ProtocolTypeSet::all(),
|
||||
address_types: AddressTypeSet::all(),
|
||||
min_version: 0,
|
||||
max_version: 0,
|
||||
dial_info_detail_list: vec![DialInfoDetail {
|
||||
class: DialInfoClass::Mapped,
|
||||
dial_info: DialInfo::udp(SocketAddress::default()),
|
||||
}],
|
||||
};
|
||||
|
||||
let (pkey, skey) = generate_secret();
|
||||
|
||||
let sni =
|
||||
SignedDirectNodeInfo::with_secret(NodeId::new(pkey.clone()), node_info.clone(), &skey)
|
||||
.unwrap();
|
||||
let _ = SignedDirectNodeInfo::new(
|
||||
NodeId::new(pkey),
|
||||
node_info.clone(),
|
||||
sni.timestamp,
|
||||
sni.signature,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Test relayed
|
||||
let node_info2 = NodeInfo {
|
||||
network_class: NetworkClass::OutboundOnly,
|
||||
outbound_protocols: ProtocolTypeSet::all(),
|
||||
address_types: AddressTypeSet::all(),
|
||||
min_version: 0,
|
||||
max_version: 0,
|
||||
dial_info_detail_list: vec![DialInfoDetail {
|
||||
class: DialInfoClass::Blocked,
|
||||
dial_info: DialInfo::udp(SocketAddress::default()),
|
||||
}],
|
||||
};
|
||||
|
||||
let (pkey2, skey2) = generate_secret();
|
||||
|
||||
let sni2 = SignedRelayedNodeInfo::with_secret(
|
||||
NodeId::new(pkey2.clone()),
|
||||
node_info2.clone(),
|
||||
NodeId::new(pkey.clone()),
|
||||
sni.clone(),
|
||||
&skey2,
|
||||
)
|
||||
.unwrap();
|
||||
let _ = SignedRelayedNodeInfo::new(
|
||||
NodeId::new(pkey2),
|
||||
node_info2,
|
||||
NodeId::new(pkey),
|
||||
sni,
|
||||
sni2.timestamp,
|
||||
sni2.signature,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
api.shutdown().await;
|
||||
}
|
||||
|
||||
pub async fn test_all() {
|
||||
test_startup_shutdown().await;
|
||||
test_attach_detach().await;
|
||||
test_signed_node_info().await;
|
||||
}
|
||||
|
@ -4,7 +4,6 @@
|
||||
use super::*;
|
||||
use data_encoding::BASE64URL_NOPAD;
|
||||
use routing_table::*;
|
||||
use rpc_processor::*;
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
struct DebugCache {
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,9 @@
|
||||
use super::*;
|
||||
pub use bytecheck::CheckBytes;
|
||||
use core::fmt::Debug;
|
||||
pub use rkyv::Archive as RkyvArchive;
|
||||
pub use rkyv::Deserialize as RkyvDeserialize;
|
||||
pub use rkyv::Serialize as RkyvSerialize;
|
||||
|
||||
// XXX: Don't trace these functions as they are used in the transfer of API logs, which will recurse!
|
||||
|
||||
@ -128,3 +132,79 @@ pub mod arc_serialize {
|
||||
Ok(Arc::new(T::deserialize(d)?))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_rkyv<T>(v: &T) -> EyreResult<Vec<u8>>
|
||||
where
|
||||
T: RkyvSerialize<rkyv::ser::serializers::AllocSerializer<1024>>,
|
||||
{
|
||||
Ok(rkyv::to_bytes::<T, 1024>(v)
|
||||
.wrap_err("failed to freeze object")?
|
||||
.to_vec())
|
||||
}
|
||||
|
||||
pub fn from_rkyv<T>(v: Vec<u8>) -> EyreResult<T>
|
||||
where
|
||||
T: RkyvArchive,
|
||||
<T as RkyvArchive>::Archived:
|
||||
for<'t> bytecheck::CheckBytes<rkyv::validation::validators::DefaultValidator<'t>>,
|
||||
<T as RkyvArchive>::Archived:
|
||||
rkyv::Deserialize<T, rkyv::de::deserializers::SharedDeserializeMap>,
|
||||
{
|
||||
match rkyv::from_bytes::<T>(&v) {
|
||||
Ok(v) => Ok(v),
|
||||
Err(e) => {
|
||||
bail!("failed to deserialize frozen object: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RkyvEnumSet;
|
||||
|
||||
impl<T> rkyv::with::ArchiveWith<EnumSet<T>> for RkyvEnumSet
|
||||
where
|
||||
T: EnumSetType + EnumSetTypeWithRepr,
|
||||
<T as EnumSetTypeWithRepr>::Repr: rkyv::Archive,
|
||||
{
|
||||
type Archived = rkyv::Archived<<T as EnumSetTypeWithRepr>::Repr>;
|
||||
type Resolver = rkyv::Resolver<<T as EnumSetTypeWithRepr>::Repr>;
|
||||
|
||||
#[inline]
|
||||
unsafe fn resolve_with(
|
||||
field: &EnumSet<T>,
|
||||
pos: usize,
|
||||
resolver: Self::Resolver,
|
||||
out: *mut Self::Archived,
|
||||
) {
|
||||
let r = field.as_repr();
|
||||
r.resolve(pos, resolver, out);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S> rkyv::with::SerializeWith<EnumSet<T>, S> for RkyvEnumSet
|
||||
where
|
||||
S: rkyv::Fallible + ?Sized,
|
||||
T: EnumSetType + EnumSetTypeWithRepr,
|
||||
<T as EnumSetTypeWithRepr>::Repr: rkyv::Serialize<S>,
|
||||
{
|
||||
fn serialize_with(field: &EnumSet<T>, serializer: &mut S) -> Result<Self::Resolver, S::Error> {
|
||||
let r = field.as_repr();
|
||||
r.serialize(serializer)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, D>
|
||||
rkyv::with::DeserializeWith<rkyv::Archived<<T as EnumSetTypeWithRepr>::Repr>, EnumSet<T>, D>
|
||||
for RkyvEnumSet
|
||||
where
|
||||
D: rkyv::Fallible + ?Sized,
|
||||
T: EnumSetType + EnumSetTypeWithRepr,
|
||||
<T as EnumSetTypeWithRepr>::Repr: rkyv::Archive,
|
||||
rkyv::Archived<<T as EnumSetTypeWithRepr>::Repr>: rkyv::Deserialize<EnumSet<T>, D>,
|
||||
{
|
||||
fn deserialize_with(
|
||||
field: &rkyv::Archived<<T as EnumSetTypeWithRepr>::Repr>,
|
||||
deserializer: &mut D,
|
||||
) -> Result<EnumSet<T>, D::Error> {
|
||||
Ok(field.deserialize(deserializer)?.into())
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user