removing dev branch, many changes

This commit is contained in:
John Smith 2023-05-29 19:24:57 +00:00
parent 1430f3f656
commit 0a890c8707
250 changed files with 18084 additions and 8040 deletions

1843
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,4 @@
[workspace]
members = [
"veilid-tools",
"veilid-core",

View File

@ -23,13 +23,15 @@ logging:
enabled: false
level: 'trace'
grpc_endpoint: 'localhost:4317'
console:
enabled: false
testing:
subnode_index: 0
core:
protected_store:
allow_insecure_fallback: true
always_use_insecure_storage: true
insecure_fallback_directory: '%INSECURE_FALLBACK_DIRECTORY%'
directory: '%DIRECTORY%'
delete: false
table_store:
directory: '%TABLE_STORE_DIRECTORY%'
@ -47,10 +49,10 @@ core:
client_whitelist_timeout_ms: 300000
reverse_connection_receipt_time_ms: 5000
hole_punch_receipt_time_ms: 5000
node_id: ''
node_id_secret: ''
bootstrap: ['bootstrap.dev.veilid.net']
routing_table:
node_id: null
node_id_secret: null
bootstrap: ['bootstrap.dev.veilid.net']
limit_over_attached: 64
limit_fully_attached: 32
limit_attached_strong: 16
@ -61,27 +63,31 @@ core:
queue_size: 1024
max_timestamp_behind_ms: 10000
max_timestamp_ahead_ms: 10000
timeout_ms: 10000
timeout_ms: 5000
max_route_hop_count: 4
default_route_hop_count: 1
dht:
resolve_node_timeout:
resolve_node_count: 20
resolve_node_fanout: 3
max_find_node_count: 20
get_value_timeout:
get_value_count: 20
get_value_fanout: 3
set_value_timeout:
set_value_count: 20
set_value_fanout: 5
resolve_node_timeout_ms: 10000
resolve_node_count: 1
resolve_node_fanout: 4
get_value_timeout_ms: 10000
get_value_count: 3
get_value_fanout: 4
set_value_timeout_ms: 10000
set_value_count: 5
set_value_fanout: 4
min_peer_count: 20
min_peer_refresh_time_ms: 2000
validate_dial_info_receipt_time_ms: 2000
local_subkey_cache_size: 128
local_max_subkey_cache_memory_mb: 256
remote_subkey_cache_size: 1024
remote_max_records: 65536
remote_max_subkey_cache_memory_mb: %REMOTE_MAX_SUBKEY_CACHE_MEMORY_MB%
remote_max_storage_space_mb: 0
upnp: true
detect_address_changes: true
enable_local_peer_scope: false
restricted_nat_retries: 0
tls:
certificate_path: '%CERTIFICATE_PATH%'
@ -124,4 +130,3 @@ core:
listen_address: ':5150'
path: 'ws'
# url: ''

View File

@ -155,7 +155,7 @@ testing:
protected_store:
allow_insecure_fallback: true
always_use_insecure_storage: true
insecure_fallback_directory: '%INSECURE_FALLBACK_DIRECTORY%'
directory: '%DIRECTORY%'
delete: false
```
@ -225,7 +225,7 @@ rpc:
queue_size: 1024
max_timestamp_behind_ms: 10000
max_timestamp_ahead_ms: 10000
timeout_ms: 10000
timeout_ms: 5000
max_route_hop_count: 4
default_route_hop_count: 1
```
@ -234,19 +234,25 @@ rpc:
```yaml
dht:
resolve_node_timeout:
resolve_node_count: 20
resolve_node_fanout: 3
max_find_node_count: 20
get_value_timeout:
get_value_count: 20
get_value_fanout: 3
set_value_timeout:
set_value_count: 20
set_value_fanout: 5
resolve_node_timeout_ms: 10000
resolve_node_count: 1
resolve_node_fanout: 4
get_value_timeout_ms: 10000
get_value_count: 3
get_value_fanout: 4
set_value_timeout_ms: 10000
set_value_count: 5
set_value_fanout: 4
min_peer_count: 20
min_peer_refresh_time_ms: 2000
validate_dial_info_receipt_time_ms: 2000
local_subkey_cache_size: 128
local_max_subkey_cache_memory_mb: 256
remote_subkey_cache_size: 1024
remote_max_records: 65536
remote_max_subkey_cache_memory_mb: %REMOTE_MAX_SUBKEY_CACHE_MEMORY_MB%
remote_max_storage_space_mb: 0
```
#### core:network:tls

2
external/keyvaluedb vendored

@ -1 +1 @@
Subproject commit 3408e0b2ae3df0088e0714bc23fb33c82a58e22c
Subproject commit 9bb05a54b4c0278a289841b2bf7c1749aa0fbd5d

View File

@ -14,7 +14,7 @@ logging:
enabled: false
core:
protected_store:
insecure_fallback_directory: '/var/db/veilid-server/protected_store'
directory: '/var/db/veilid-server/protected_store'
table_store:
directory: '/var/db/veilid-server/table_store'
block_store:

View File

@ -57,7 +57,7 @@ do
HOMEBREW_DIR=$(dirname `which brew`)
fi
env -i PATH=/usr/bin:/bin:$HOMEBREW_DIR:$CARGO_DIR HOME="$HOME" USER="$USER" cargo $CARGO_TOOLCHAIN build $EXTRA_CARGO_OPTIONS --target $CARGO_TARGET --manifest-path $CARGO_MANIFEST_PATH
env -i PATH=/usr/bin:/bin:$HOMEBREW_DIR:$CARGO_DIR HOME="$HOME" USER="$USER" IPHONEOS_DEPLOYMENT_TARGET="$IPHONEOS_DEPLOYMENT_TARGET" cargo $CARGO_TOOLCHAIN build $EXTRA_CARGO_OPTIONS --target $CARGO_TARGET --manifest-path $CARGO_MANIFEST_PATH
LIPOS="$LIPOS $TARGET_PATH/$CARGO_TARGET/$BUILD_MODE/lib$PACKAGE_NAME.a"

View File

@ -47,7 +47,7 @@ do
HOMEBREW_DIR=$(dirname `which brew`)
fi
env -i PATH=/usr/bin:/bin:$HOMEBREW_DIR:$CARGO_DIR HOME="$HOME" USER="$USER" cargo $CARGO_TOOLCHAIN build $EXTRA_CARGO_OPTIONS --target $CARGO_TARGET --manifest-path $CARGO_MANIFEST_PATH
env -i PATH=/usr/bin:/bin:$HOMEBREW_DIR:$CARGO_DIR HOME="$HOME" USER="$USER" MACOSX_DEPLOYMENT_TARGET="$MACOSX_DEPLOYMENT_TARGET" cargo $CARGO_TOOLCHAIN build $EXTRA_CARGO_OPTIONS --target $CARGO_TARGET --manifest-path $CARGO_MANIFEST_PATH
LIPOS="$LIPOS $TARGET_PATH/$CARGO_TARGET/$BUILD_MODE/lib$PACKAGE_NAME.dylib"

View File

@ -1,5 +1,6 @@
#!/bin/bash
ID=$(xcrun simctl create test-iphone com.apple.CoreSimulator.SimDeviceType.iPhone-14-Pro com.apple.CoreSimulator.SimRuntime.iOS-16-1 2>/dev/null)
RUNTIME=$(xcrun simctl runtime list -j | jq '.[].runtimeIdentifier' -r | head -1)
ID=$(xcrun simctl create test-iphone com.apple.CoreSimulator.SimDeviceType.iPhone-14-Pro $RUNTIME 2>/dev/null)
xcrun simctl boot $ID
xcrun simctl bootstatus $ID
echo Simulator ID is $ID

View File

@ -122,5 +122,5 @@ if [ "$BREW_USER" == "" ]; then
BREW_USER=`whoami`
fi
fi
sudo -H -u $BREW_USER brew install capnp cmake wabt llvm protobuf openjdk@11
sudo -H -u $BREW_USER brew install capnp cmake wabt llvm protobuf openjdk@11 jq
sudo gem install cocoapods

View File

@ -38,7 +38,7 @@ cfg-if = "^1"
capnp = "^0"
capnp-rpc = "^0"
config = { version = "^0", features = ["yaml"] }
bugsalot = "^0"
bugsalot = { git = "https://github.com/crioux/bugsalot.git" }
flexi_logger = { version = "^0", features = ["use_chrono_for_offset"] }
thiserror = "^1"
crossbeam-channel = "^0"

View File

@ -38,7 +38,7 @@ fn map_to_internal_error<T: ToString>(e: T) -> VeilidAPIError {
fn decode_api_result<T: DeserializeOwned + fmt::Debug>(
reader: &api_result::Reader,
) -> Result<T, VeilidAPIError> {
) -> VeilidAPIResult<T> {
match reader.which().map_err(map_to_internal_error)? {
api_result::Which::Ok(v) => {
let ok_val = v.map_err(map_to_internal_error)?;
@ -92,7 +92,7 @@ impl veilid_client::Server for VeilidClientImpl {
VeilidUpdate::Config(config) => {
self.comproc.update_config(config);
}
VeilidUpdate::Route(route) => {
VeilidUpdate::RouteChange(route) => {
self.comproc.update_route(route);
}
VeilidUpdate::Shutdown => self.comproc.update_shutdown(),
@ -355,7 +355,7 @@ impl ClientApiConnection {
.map_err(map_to_string)?
.get_result()
.map_err(map_to_string)?;
let res: Result<(), VeilidAPIError> = decode_api_result(&reader);
let res: VeilidAPIResult<()> = decode_api_result(&reader);
res.map_err(map_to_string)
}
@ -379,7 +379,7 @@ impl ClientApiConnection {
.map_err(map_to_string)?
.get_result()
.map_err(map_to_string)?;
let res: Result<(), VeilidAPIError> = decode_api_result(&reader);
let res: VeilidAPIResult<()> = decode_api_result(&reader);
res.map_err(map_to_string)
}
@ -422,7 +422,7 @@ impl ClientApiConnection {
.map_err(map_to_string)?
.get_result()
.map_err(map_to_string)?;
let res: Result<String, VeilidAPIError> = decode_api_result(&reader);
let res: VeilidAPIResult<String> = decode_api_result(&reader);
res.map_err(map_to_string)
}
@ -453,7 +453,7 @@ impl ClientApiConnection {
.map_err(map_to_string)?
.get_result()
.map_err(map_to_string)?;
let res: Result<(), VeilidAPIError> = decode_api_result(&reader);
let res: VeilidAPIResult<()> = decode_api_result(&reader);
res.map_err(map_to_string)
}
@ -483,7 +483,7 @@ impl ClientApiConnection {
.map_err(map_to_string)?
.get_result()
.map_err(map_to_string)?;
let res: Result<(), VeilidAPIError> = decode_api_result(&reader);
let res: VeilidAPIResult<()> = decode_api_result(&reader);
res.map_err(map_to_string)
}

View File

@ -406,7 +406,7 @@ reply - reply to an AppCall not handled directly by the server
pub fn update_config(&mut self, config: veilid_core::VeilidStateConfig) {
self.inner_mut().ui.set_config(config.config)
}
pub fn update_route(&mut self, route: veilid_core::VeilidStateRoute) {
pub fn update_route(&mut self, route: veilid_core::VeilidRouteChange) {
let mut out = String::new();
if !route.dead_routes.is_empty() {
out.push_str(&format!("Dead routes: {:?}", route.dead_routes));
@ -445,46 +445,46 @@ reply - reply to an AppCall not handled directly by the server
pub fn update_app_message(&mut self, msg: veilid_core::VeilidAppMessage) {
// check is message body is ascii printable
let mut printable = true;
for c in &msg.message {
for c in msg.message() {
if *c < 32 || *c > 126 {
printable = false;
}
}
let strmsg = if printable {
String::from_utf8_lossy(&msg.message).to_string()
String::from_utf8_lossy(msg.message()).to_string()
} else {
hex::encode(&msg.message)
hex::encode(msg.message())
};
self.inner()
.ui
.add_node_event(format!("AppMessage ({:?}): {}", msg.sender, strmsg));
.add_node_event(format!("AppMessage ({:?}): {}", msg.sender(), strmsg));
}
pub fn update_app_call(&mut self, call: veilid_core::VeilidAppCall) {
// check is message body is ascii printable
let mut printable = true;
for c in &call.message {
for c in call.message() {
if *c < 32 || *c > 126 {
printable = false;
}
}
let strmsg = if printable {
String::from_utf8_lossy(&call.message).to_string()
String::from_utf8_lossy(call.message()).to_string()
} else {
format!("#{}", hex::encode(&call.message))
format!("#{}", hex::encode(call.message()))
};
self.inner().ui.add_node_event(format!(
"AppCall ({:?}) id = {:016x} : {}",
call.sender,
call.id.as_u64(),
call.sender(),
call.id().as_u64(),
strmsg
));
self.inner_mut().last_call_id = Some(call.id);
self.inner_mut().last_call_id = Some(call.id());
}
pub fn update_shutdown(&mut self) {

View File

@ -53,13 +53,9 @@ impl TableViewItem<PeerTableColumn> for PeerTableData {
PeerTableColumn::NodeId => self
.node_ids
.first()
.cloned()
.map(|n| n.to_string())
.unwrap_or_else(|| "???".to_owned()),
PeerTableColumn::Address => format!(
"{:?}:{}",
self.peer_address.protocol_type(),
self.peer_address.to_socket_addr()
),
PeerTableColumn::Address => self.peer_address.clone(),
PeerTableColumn::LatencyAvg => format!(
"{}",
self.peer_stats

View File

@ -17,6 +17,7 @@ enable-crypto-vld0 = []
enable-crypto-none = []
rt-async-std = ["async-std", "async-std-resolver", "async_executors/async_std", "rtnetlink?/smol_socket", "veilid-tools/rt-async-std"]
rt-tokio = ["tokio", "tokio-util", "tokio-stream", "trust-dns-resolver/tokio-runtime", "async_executors/tokio_tp", "async_executors/tokio_io", "async_executors/tokio_timer", "rtnetlink?/tokio_socket", "veilid-tools/rt-tokio"]
rt-wasm-bindgen = ["veilid-tools/rt-wasm-bindgen", "async_executors/bindgen"]
veilid_core_android_tests = ["dep:paranoid-android"]
veilid_core_ios_tests = ["dep:tracing-oslog"]
@ -65,11 +66,11 @@ rtnetlink = { version = "^0", default-features = false, optional = true }
async-std-resolver = { version = "^0", optional = true }
trust-dns-resolver = { version = "^0", optional = true }
keyvaluedb = { path = "../external/keyvaluedb/keyvaluedb" }
#rkyv = { version = "^0", default_features = false, features = ["std", "alloc", "strict", "size_32", "validation"] }
rkyv = { git = "https://github.com/rkyv/rkyv.git", rev = "57e2a8d", default_features = false, features = ["std", "alloc", "strict", "size_32", "validation"] }
bytecheck = "^0"
rkyv = { version = "^0", default_features = false, features = ["std", "alloc", "strict", "size_32", "validation"] }
data-encoding = { version = "^2" }
weak-table = "0.3.2"
range-set-blaze = "0.1.5"
argon2 = "0.5.0"
# Dependencies for native builds only
# Linux, Windows, Mac, iOS, Android
@ -91,8 +92,8 @@ rustls = "^0.19"
rustls-pemfile = "^0.2"
futures-util = { version = "^0", default-features = false, features = ["async-await", "sink", "std", "io"] }
keyvaluedb-sqlite = { path = "../external/keyvaluedb/keyvaluedb-sqlite" }
socket2 = "^0"
bugsalot = "^0"
socket2 = { version = "^0", features = ["all"] }
bugsalot = { git = "https://github.com/crioux/bugsalot.git" }
chrono = "^0"
libc = "^0"
nix = "^0"

View File

@ -27,13 +27,12 @@ struct Nonce24 @0xb6260db25d8d7dfc {
u2 @2 :UInt64;
}
using PublicKey = Key256; # Node id / DHT key / Route id, etc
using PublicKey = Key256; # Node id / Hash / DHT key / Route id, etc
using Nonce = Nonce24; # One-time encryption nonce
using Signature = Signature512; # Signature block
using TunnelID = UInt64; # Id for tunnels
using CryptoKind = UInt32; # FOURCC code for cryptography type
using ValueSeqNum = UInt32; # sequence numbers for values
using ValueSchema = UInt32; # FOURCC code for schema (0 = freeform, SUB0 = subkey control v0)
using Subkey = UInt32; # subkey index for dht
struct TypedKey @0xe2d567a9f1e61b29 {
@ -312,47 +311,66 @@ struct OperationAppMessage @0x9baf542d81b411f5 {
message @0 :Data; # opaque message to application
}
struct SubkeyRange {
struct SubkeyRange @0xf592dac0a4d0171c {
start @0 :Subkey; # the start of a subkey range
end @1 :Subkey; # the end of a subkey range
}
struct ValueData @0xb4b7416f169f2a3d {
struct SignedValueData @0xb4b7416f169f2a3d {
seq @0 :ValueSeqNum; # sequence number of value
schema @1 :ValueSchema; # fourcc code of schema for value
data @2 :Data; # value or subvalue contents
data @1 :Data; # value or subvalue contents
writer @2 :PublicKey; # the public key of the writer
signature @3 :Signature; # signature of data at this subkey, using the writer key (which may be the same as the owner key)
# signature covers:
# * ownerKey
# * subkey
# * sequence number
# * data
# signature does not need to cover schema because schema is validated upon every set
# so the data either fits, or it doesn't.
}
struct SignedValueDescriptor @0xe7911cd3f9e1b0e7 {
owner @0 :PublicKey; # the public key of the owner
schemaData @1 :Data; # the schema data
# Changing this after key creation is not supported as it would change the dht key
signature @2 :Signature; # Schema data is signed by ownerKey and is verified both by set and get operations
}
struct OperationGetValueQ @0xf88a5b6da5eda5d0 {
key @0 :TypedKey; # the location of the value
subkey @1 :Subkey; # the index of the subkey (0 for the default subkey)
key @0 :TypedKey; # DHT Key = Hash(ownerKeyKind) of: [ ownerKeyValue, schema ]
subkey @1 :Subkey; # the index of the subkey
wantDescriptor @2 :Bool; # whether or not to include the descriptor for the key
}
struct OperationGetValueA @0xd896bb46f2e0249f {
union {
data @0 :ValueData; # the value if successful
peers @1 :List(PeerInfo); # returned 'closer peer' information if not successful
}
value @0 :SignedValueData; # optional: the value if successful, or if unset, no value returned
peers @1 :List(PeerInfo); # returned 'closer peer' information on either success or failure
descriptor @2 :SignedValueDescriptor; # optional: the descriptor if requested if the value is also returned
}
struct OperationSetValueQ @0xbac06191ff8bdbc5 {
key @0 :TypedKey; # the location of the value
subkey @1 :Subkey; # the index of the subkey (0 for the default subkey)
value @2 :ValueData; # value or subvalue contents (older or equal seq number gets dropped)
key @0 :TypedKey; # DHT Key = Hash(ownerKeyKind) of: [ ownerKeyValue, schema ]
subkey @1 :Subkey; # the index of the subkey
value @2 :SignedValueData; # value or subvalue contents (older or equal seq number gets dropped)
descriptor @3 :SignedValueDescriptor; # optional: the descriptor if needed
}
struct OperationSetValueA @0x9378d0732dc95be2 {
union {
data @0 :ValueData; # the new value if successful, may be a different value than what was set if the seq number was lower or equal
peers @1 :List(PeerInfo); # returned 'closer peer' information if not successful
}
set @0 :Bool; # true if the set was close enough to be set
value @1 :SignedValueData; # optional: the current value at the key if the set seq number was lower or equal to what was there before
peers @2 :List(PeerInfo); # returned 'closer peer' information on either success or failure
}
struct OperationWatchValueQ @0xf9a5a6c547b9b228 {
key @0 :TypedKey; # key for value to watch
subkeys @1 :List(SubkeyRange); # subkey range to watch, if empty, watch everything
subkeys @1 :List(SubkeyRange); # subkey range to watch (up to 512 subranges), if empty, watch everything
expiration @2 :UInt64; # requested timestamp when this watch will expire in usec since epoch (can be return less, 0 for max)
count @3 :UInt32; # requested number of changes to watch for (0 = cancel, 1 = single shot, 2+ = counter, UINT32_MAX = continuous)
watcher @4 :PublicKey; # the watcher performing the watch, can be the owner or a schema member
signature @5 :Signature; # signature of the watcher, must be one of the schema members or the key owner. signature covers: key, subkeys, expiration, count
}
struct OperationWatchValueA @0xa726cab7064ba893 {
@ -364,7 +382,7 @@ struct OperationValueChanged @0xd1c59ebdd8cc1bf6 {
key @0 :TypedKey; # key for value that changed
subkeys @1 :List(SubkeyRange); # subkey range that changed (up to 512 ranges at a time)
count @2 :UInt32; # remaining changes left (0 means watch has expired)
value @3 :ValueData; # first value that changed (the rest can be gotten with getvalue)
value @3 :SignedValueData; # first value that changed (the rest can be gotten with getvalue)
}
struct OperationSupplyBlockQ @0xadbf4c542d749971 {
@ -372,11 +390,9 @@ struct OperationSupplyBlockQ @0xadbf4c542d749971 {
}
struct OperationSupplyBlockA @0xf003822e83b5c0d7 {
union {
expiration @0 :UInt64; # when the block supplier entry will need to be refreshed
expiration @0 :UInt64; # when the block supplier entry will need to be refreshed, or 0 if not successful
peers @1 :List(PeerInfo); # returned 'closer peer' information if not successful
}
}
struct OperationFindBlockQ @0xaf4353ff004c7156 {
blockId @0 :TypedKey; # hash of the block to locate

View File

@ -3,7 +3,7 @@ SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
pushd $SCRIPTDIR 2>/dev/null
if [[ "$1" == "wasm" ]]; then
WASM_BINDGEN_TEST_TIMEOUT=120 wasm-pack test --firefox --headless
WASM_BINDGEN_TEST_TIMEOUT=120 wasm-pack test --firefox --headless --features=rt-wasm-bindgen
elif [[ "$1" == "ios" ]]; then
SYMROOT=/tmp/testout
APPNAME=veilidcore-tests

View File

@ -1,7 +1,8 @@
use crate::crypto::Crypto;
use crate::network_manager::*;
use crate::routing_table::*;
use crate::*;
use crypto::Crypto;
use network_manager::*;
use routing_table::*;
use storage_manager::*;
pub struct AttachmentManagerInner {
last_attachment_state: AttachmentState,
@ -26,6 +27,7 @@ pub struct AttachmentManager {
impl AttachmentManager {
fn new_unlocked_inner(
config: VeilidConfig,
storage_manager: StorageManager,
protected_store: ProtectedStore,
table_store: TableStore,
block_store: BlockStore,
@ -35,6 +37,7 @@ impl AttachmentManager {
config: config.clone(),
network_manager: NetworkManager::new(
config,
storage_manager,
protected_store,
table_store,
block_store,
@ -54,6 +57,7 @@ impl AttachmentManager {
}
pub fn new(
config: VeilidConfig,
storage_manager: StorageManager,
protected_store: ProtectedStore,
table_store: TableStore,
block_store: BlockStore,
@ -63,6 +67,7 @@ impl AttachmentManager {
inner: Arc::new(Mutex::new(Self::new_inner())),
unlocked_inner: Arc::new(Self::new_unlocked_inner(
config,
storage_manager,
protected_store,
table_store,
block_store,

View File

@ -1,12 +1,16 @@
use crate::api_tracing_layer::*;
use crate::attachment_manager::*;
use crate::crypto::Crypto;
use crate::storage_manager::*;
use crate::veilid_api::*;
use crate::veilid_config::*;
use crate::*;
pub type UpdateCallback = Arc<dyn Fn(VeilidUpdate) + Send + Sync>;
/// Internal services startup mechanism
/// Ensures that everything is started up, and shut down in the right order
/// and provides an atomic state for if the system is properly operational
struct ServicesContext {
pub config: VeilidConfig,
pub update_callback: UpdateCallback,
@ -16,6 +20,7 @@ struct ServicesContext {
pub block_store: Option<BlockStore>,
pub crypto: Option<Crypto>,
pub attachment_manager: Option<AttachmentManager>,
pub storage_manager: Option<StorageManager>,
}
impl ServicesContext {
@ -28,6 +33,7 @@ impl ServicesContext {
block_store: None,
crypto: None,
attachment_manager: None,
storage_manager: None,
}
}
@ -39,6 +45,7 @@ impl ServicesContext {
block_store: BlockStore,
crypto: Crypto,
attachment_manager: AttachmentManager,
storage_manager: StorageManager,
) -> Self {
Self {
config,
@ -48,6 +55,7 @@ impl ServicesContext {
block_store: Some(block_store),
crypto: Some(crypto),
attachment_manager: Some(attachment_manager),
storage_manager: Some(storage_manager),
}
}
@ -62,15 +70,24 @@ impl ServicesContext {
trace!("init protected store");
let protected_store = ProtectedStore::new(self.config.clone());
if let Err(e) = protected_store.init().await {
error!("failed to init protected store: {}", e);
self.shutdown().await;
return Err(e);
}
self.protected_store = Some(protected_store.clone());
// Set up tablestore
// Set up tablestore and crypto system
trace!("create table store and crypto system");
let table_store = TableStore::new(self.config.clone(), protected_store.clone());
let crypto = Crypto::new(self.config.clone(), table_store.clone());
table_store.set_crypto(crypto.clone());
// Initialize table store first, so crypto code can load caches
// Tablestore can use crypto during init, just not any cached operations or things
// that require flushing back to the tablestore
trace!("init table store");
let table_store = TableStore::new(self.config.clone());
if let Err(e) = table_store.init().await {
error!("failed to init table store: {}", e);
self.shutdown().await;
return Err(e);
}
@ -78,12 +95,8 @@ impl ServicesContext {
// Set up crypto
trace!("init crypto");
let crypto = Crypto::new(
self.config.clone(),
table_store.clone(),
protected_store.clone(),
);
if let Err(e) = crypto.init().await {
error!("failed to init crypto: {}", e);
self.shutdown().await;
return Err(e);
}
@ -93,22 +106,41 @@ impl ServicesContext {
trace!("init block store");
let block_store = BlockStore::new(self.config.clone());
if let Err(e) = block_store.init().await {
error!("failed to init block store: {}", e);
self.shutdown().await;
return Err(e);
}
self.block_store = Some(block_store.clone());
// Set up storage manager
trace!("init storage manager");
let storage_manager = StorageManager::new(
self.config.clone(),
self.crypto.clone().unwrap(),
self.protected_store.clone().unwrap(),
self.table_store.clone().unwrap(),
self.block_store.clone().unwrap(),
);
if let Err(e) = storage_manager.init().await {
error!("failed to init storage manager: {}", e);
self.shutdown().await;
return Err(e);
}
self.storage_manager = Some(storage_manager.clone());
// Set up attachment manager
trace!("init attachment manager");
let update_callback = self.update_callback.clone();
let attachment_manager = AttachmentManager::new(
self.config.clone(),
storage_manager,
protected_store,
table_store,
block_store,
crypto,
);
if let Err(e) = attachment_manager.init(update_callback).await {
error!("failed to init attachment manager: {}", e);
self.shutdown().await;
return Err(e);
}
@ -126,6 +158,10 @@ impl ServicesContext {
trace!("terminate attachment manager");
attachment_manager.terminate().await;
}
if let Some(storage_manager) = &mut self.storage_manager {
trace!("terminate storage manager");
storage_manager.terminate().await;
}
if let Some(block_store) = &mut self.block_store {
trace!("terminate block store");
block_store.terminate().await;
@ -159,6 +195,7 @@ pub struct VeilidCoreContext {
pub config: VeilidConfig,
pub update_callback: UpdateCallback,
// Services
pub storage_manager: StorageManager,
pub protected_store: ProtectedStore,
pub table_store: TableStore,
pub block_store: BlockStore,
@ -171,7 +208,7 @@ impl VeilidCoreContext {
async fn new_with_config_callback(
update_callback: UpdateCallback,
config_callback: ConfigCallback,
) -> Result<VeilidCoreContext, VeilidAPIError> {
) -> VeilidAPIResult<VeilidCoreContext> {
// Set up config from callback
trace!("setup config with callback");
let mut config = VeilidConfig::new();
@ -184,7 +221,7 @@ impl VeilidCoreContext {
async fn new_with_config_json(
update_callback: UpdateCallback,
config_json: String,
) -> Result<VeilidCoreContext, VeilidAPIError> {
) -> VeilidAPIResult<VeilidCoreContext> {
// Set up config from callback
trace!("setup config with json");
let mut config = VeilidConfig::new();
@ -196,7 +233,7 @@ impl VeilidCoreContext {
async fn new_common(
update_callback: UpdateCallback,
config: VeilidConfig,
) -> Result<VeilidCoreContext, VeilidAPIError> {
) -> VeilidAPIResult<VeilidCoreContext> {
cfg_if! {
if #[cfg(target_os = "android")] {
if !crate::intf::android::is_android_ready() {
@ -209,8 +246,9 @@ impl VeilidCoreContext {
sc.startup().await.map_err(VeilidAPIError::generic)?;
Ok(VeilidCoreContext {
update_callback: sc.update_callback,
config: sc.config,
update_callback: sc.update_callback,
storage_manager: sc.storage_manager.unwrap(),
protected_store: sc.protected_store.unwrap(),
table_store: sc.table_store.unwrap(),
block_store: sc.block_store.unwrap(),
@ -229,6 +267,7 @@ impl VeilidCoreContext {
self.block_store,
self.crypto,
self.attachment_manager,
self.storage_manager,
);
sc.shutdown().await;
}
@ -244,7 +283,7 @@ lazy_static::lazy_static! {
pub async fn api_startup(
update_callback: UpdateCallback,
config_callback: ConfigCallback,
) -> Result<VeilidAPI, VeilidAPIError> {
) -> VeilidAPIResult<VeilidAPI> {
// See if we have an API started up already
let mut initialized_lock = INITIALIZED.lock().await;
if *initialized_lock {
@ -267,7 +306,7 @@ pub async fn api_startup(
pub async fn api_startup_json(
update_callback: UpdateCallback,
config_json: String,
) -> Result<VeilidAPI, VeilidAPIError> {
) -> VeilidAPIResult<VeilidAPI> {
// See if we have an API started up already
let mut initialized_lock = INITIALIZED.lock().await;
if *initialized_lock {

View File

@ -7,22 +7,26 @@ use core::hash::Hash;
use data_encoding::BASE64URL_NOPAD;
use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize};
//////////////////////////////////////////////////////////////////////
/// Length of a public key in bytes
/// Length of a crypto key in bytes
#[allow(dead_code)]
pub const PUBLIC_KEY_LENGTH: usize = 32;
/// Length of a public key in bytes after encoding to base64url
pub const CRYPTO_KEY_LENGTH: usize = 32;
/// Length of a crypto key in bytes after encoding to base64url
#[allow(dead_code)]
pub const PUBLIC_KEY_LENGTH_ENCODED: usize = 43;
pub const CRYPTO_KEY_LENGTH_ENCODED: usize = 43;
/// Length of a crypto key in bytes
#[allow(dead_code)]
pub const PUBLIC_KEY_LENGTH: usize = CRYPTO_KEY_LENGTH;
/// Length of a crypto key in bytes after encoding to base64url
#[allow(dead_code)]
pub const PUBLIC_KEY_LENGTH_ENCODED: usize = CRYPTO_KEY_LENGTH_ENCODED;
/// Length of a secret key in bytes
#[allow(dead_code)]
pub const SECRET_KEY_LENGTH: usize = 32;
pub const SECRET_KEY_LENGTH: usize = CRYPTO_KEY_LENGTH;
/// Length of a secret key in bytes after encoding to base64url
#[allow(dead_code)]
pub const SECRET_KEY_LENGTH_ENCODED: usize = 43;
pub const SECRET_KEY_LENGTH_ENCODED: usize = CRYPTO_KEY_LENGTH_ENCODED;
/// Length of a signature in bytes
#[allow(dead_code)]
pub const SIGNATURE_LENGTH: usize = 64;
@ -37,16 +41,22 @@ pub const NONCE_LENGTH: usize = 24;
pub const NONCE_LENGTH_ENCODED: usize = 32;
/// Length of a shared secret in bytes
#[allow(dead_code)]
pub const SHARED_SECRET_LENGTH: usize = 32;
pub const SHARED_SECRET_LENGTH: usize = CRYPTO_KEY_LENGTH;
/// Length of a shared secret in bytes after encoding to base64url
#[allow(dead_code)]
pub const SHARED_SECRET_LENGTH_ENCODED: usize = 43;
pub const SHARED_SECRET_LENGTH_ENCODED: usize = CRYPTO_KEY_LENGTH_ENCODED;
/// Length of a route id in bytes
#[allow(dead_code)]
pub const ROUTE_ID_LENGTH: usize = 32;
pub const ROUTE_ID_LENGTH: usize = CRYPTO_KEY_LENGTH;
/// Length of a route id in bytes afer encoding to base64url
#[allow(dead_code)]
pub const ROUTE_ID_LENGTH_ENCODED: usize = 43;
pub const ROUTE_ID_LENGTH_ENCODED: usize = CRYPTO_KEY_LENGTH_ENCODED;
/// Length of a hash digest in bytes
#[allow(dead_code)]
pub const HASH_DIGEST_LENGTH: usize = CRYPTO_KEY_LENGTH;
/// Length of a hash digest in bytes after encoding to base64url
#[allow(dead_code)]
pub const HASH_DIGEST_LENGTH_ENCODED: usize = CRYPTO_KEY_LENGTH_ENCODED;
//////////////////////////////////////////////////////////////////////
@ -56,11 +66,11 @@ where
{
fn encode(&self) -> String;
fn encoded_len() -> usize;
fn try_decode<S: AsRef<str>>(input: S) -> Result<Self, VeilidAPIError> {
fn try_decode<S: AsRef<str>>(input: S) -> VeilidAPIResult<Self> {
let b = input.as_ref().as_bytes();
Self::try_decode_bytes(b)
}
fn try_decode_bytes(b: &[u8]) -> Result<Self, VeilidAPIError>;
fn try_decode_bytes(b: &[u8]) -> VeilidAPIResult<Self>;
}
//////////////////////////////////////////////////////////////////////
@ -120,18 +130,6 @@ macro_rules! byte_array_type {
Self { bytes }
}
pub fn try_from_vec(v: Vec<u8>) -> Result<Self, VeilidAPIError> {
let vl = v.len();
Ok(Self {
bytes: v.try_into().map_err(|_| {
VeilidAPIError::generic(format!(
"Expected a Vec of length {} but it was {}",
$size, vl
))
})?,
})
}
pub fn bit(&self, index: usize) -> bool {
assert!(index < ($size * 8));
let bi = index / 8;
@ -182,7 +180,7 @@ macro_rules! byte_array_type {
fn encoded_len() -> usize {
$encoded_size
}
fn try_decode_bytes(b: &[u8]) -> Result<Self, VeilidAPIError> {
fn try_decode_bytes(b: &[u8]) -> VeilidAPIResult<Self> {
let mut bytes = [0u8; $size];
let res = BASE64URL_NOPAD.decode_len(b.len());
match res {
@ -244,23 +242,47 @@ macro_rules! byte_array_type {
Self::try_decode(value)
}
}
impl TryFrom<&[u8]> for $name {
type Error = VeilidAPIError;
fn try_from(v: &[u8]) -> Result<Self, Self::Error> {
let vl = v.len();
Ok(Self {
bytes: v.try_into().map_err(|_| {
VeilidAPIError::generic(format!(
"Expected a slice of length {} but it was {}",
$size, vl
))
})?,
})
}
}
impl core::ops::Deref for $name {
type Target = [u8; $size];
fn deref(&self) -> &Self::Target {
&self.bytes
}
}
impl core::ops::DerefMut for $name {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.bytes
}
}
};
}
/////////////////////////////////////////
byte_array_type!(PublicKey, PUBLIC_KEY_LENGTH, PUBLIC_KEY_LENGTH_ENCODED);
byte_array_type!(SecretKey, SECRET_KEY_LENGTH, SECRET_KEY_LENGTH_ENCODED);
byte_array_type!(CryptoKey, CRYPTO_KEY_LENGTH, CRYPTO_KEY_LENGTH_ENCODED);
pub type PublicKey = CryptoKey;
pub type SecretKey = CryptoKey;
pub type HashDigest = CryptoKey;
pub type SharedSecret = CryptoKey;
pub type RouteId = CryptoKey;
pub type CryptoKeyDistance = CryptoKey;
byte_array_type!(Signature, SIGNATURE_LENGTH, SIGNATURE_LENGTH_ENCODED);
byte_array_type!(
PublicKeyDistance,
PUBLIC_KEY_LENGTH,
PUBLIC_KEY_LENGTH_ENCODED
);
byte_array_type!(Nonce, NONCE_LENGTH, NONCE_LENGTH_ENCODED);
byte_array_type!(
SharedSecret,
SHARED_SECRET_LENGTH,
SHARED_SECRET_LENGTH_ENCODED
);
byte_array_type!(RouteId, ROUTE_ID_LENGTH, ROUTE_ID_LENGTH_ENCODED);

View File

@ -6,52 +6,36 @@ pub trait CryptoSystem {
fn crypto(&self) -> Crypto;
// Cached Operations
fn cached_dh(
&self,
key: &PublicKey,
secret: &SecretKey,
) -> Result<SharedSecret, VeilidAPIError>;
fn cached_dh(&self, key: &PublicKey, secret: &SecretKey) -> VeilidAPIResult<SharedSecret>;
// Generation
fn random_bytes(&self, len: u32) -> Vec<u8>;
fn default_salt_length(&self) -> u32;
fn hash_password(&self, password: &[u8], salt: &[u8]) -> VeilidAPIResult<String>;
fn verify_password(&self, password: &[u8], password_hash: &str) -> VeilidAPIResult<bool>;
fn derive_shared_secret(&self, password: &[u8], salt: &[u8]) -> VeilidAPIResult<SharedSecret>;
fn random_nonce(&self) -> Nonce;
fn random_shared_secret(&self) -> SharedSecret;
fn compute_dh(
&self,
key: &PublicKey,
secret: &SecretKey,
) -> Result<SharedSecret, VeilidAPIError>;
fn compute_dh(&self, key: &PublicKey, secret: &SecretKey) -> VeilidAPIResult<SharedSecret>;
fn generate_keypair(&self) -> KeyPair;
fn generate_hash(&self, data: &[u8]) -> PublicKey;
fn generate_hash_reader(
&self,
reader: &mut dyn std::io::Read,
) -> Result<PublicKey, VeilidAPIError>;
fn generate_hash(&self, data: &[u8]) -> HashDigest;
fn generate_hash_reader(&self, reader: &mut dyn std::io::Read) -> VeilidAPIResult<HashDigest>;
// Validation
fn validate_keypair(&self, dht_key: &PublicKey, dht_key_secret: &SecretKey) -> bool;
fn validate_hash(&self, data: &[u8], dht_key: &PublicKey) -> bool;
fn validate_keypair(&self, key: &PublicKey, secret: &SecretKey) -> bool;
fn validate_hash(&self, data: &[u8], hash: &HashDigest) -> bool;
fn validate_hash_reader(
&self,
reader: &mut dyn std::io::Read,
key: &PublicKey,
) -> Result<bool, VeilidAPIError>;
hash: &HashDigest,
) -> VeilidAPIResult<bool>;
// Distance Metric
fn distance(&self, key1: &PublicKey, key2: &PublicKey) -> PublicKeyDistance;
fn distance(&self, key1: &CryptoKey, key2: &CryptoKey) -> CryptoKeyDistance;
// Authentication
fn sign(
&self,
key: &PublicKey,
secret: &SecretKey,
data: &[u8],
) -> Result<Signature, VeilidAPIError>;
fn verify(
&self,
key: &PublicKey,
data: &[u8],
signature: &Signature,
) -> Result<(), VeilidAPIError>;
fn sign(&self, key: &PublicKey, secret: &SecretKey, data: &[u8]) -> VeilidAPIResult<Signature>;
fn verify(&self, key: &PublicKey, data: &[u8], signature: &Signature) -> VeilidAPIResult<()>;
// AEAD Encrypt/Decrypt
fn aead_overhead(&self) -> usize;
@ -61,53 +45,53 @@ pub trait CryptoSystem {
nonce: &Nonce,
shared_secret: &SharedSecret,
associated_data: Option<&[u8]>,
) -> Result<(), VeilidAPIError>;
) -> VeilidAPIResult<()>;
fn decrypt_aead(
&self,
body: &[u8],
nonce: &Nonce,
shared_secret: &SharedSecret,
associated_data: Option<&[u8]>,
) -> Result<Vec<u8>, VeilidAPIError>;
) -> VeilidAPIResult<Vec<u8>>;
fn encrypt_in_place_aead(
&self,
body: &mut Vec<u8>,
nonce: &Nonce,
shared_secret: &SharedSecret,
associated_data: Option<&[u8]>,
) -> Result<(), VeilidAPIError>;
) -> VeilidAPIResult<()>;
fn encrypt_aead(
&self,
body: &[u8],
nonce: &Nonce,
shared_secret: &SharedSecret,
associated_data: Option<&[u8]>,
) -> Result<Vec<u8>, VeilidAPIError>;
) -> VeilidAPIResult<Vec<u8>>;
// NoAuth Encrypt/Decrypt
fn crypt_in_place_no_auth(
&self,
body: &mut Vec<u8>,
nonce: &Nonce,
body: &mut [u8],
nonce: &[u8; NONCE_LENGTH],
shared_secret: &SharedSecret,
);
fn crypt_b2b_no_auth(
&self,
in_buf: &[u8],
out_buf: &mut [u8],
nonce: &Nonce,
nonce: &[u8; NONCE_LENGTH],
shared_secret: &SharedSecret,
);
fn crypt_no_auth_aligned_8(
&self,
body: &[u8],
nonce: &Nonce,
nonce: &[u8; NONCE_LENGTH],
shared_secret: &SharedSecret,
) -> Vec<u8>;
fn crypt_no_auth_unaligned(
&self,
body: &[u8],
nonce: &Nonce,
nonce: &[u8; NONCE_LENGTH],
shared_secret: &SharedSecret,
) -> Vec<u8>;
}

View File

@ -66,7 +66,7 @@ impl Envelope {
}
}
pub fn from_signed_data(crypto: Crypto, data: &[u8]) -> Result<Envelope, VeilidAPIError> {
pub fn from_signed_data(crypto: Crypto, data: &[u8]) -> VeilidAPIResult<Envelope> {
// Ensure we are at least the length of the envelope
// Silent drop here, as we use zero length packets as part of the protocol for hole punching
if data.len() < MIN_ENVELOPE_SIZE {
@ -175,7 +175,7 @@ impl Envelope {
crypto: Crypto,
data: &[u8],
node_id_secret: &SecretKey,
) -> Result<Vec<u8>, VeilidAPIError> {
) -> VeilidAPIResult<Vec<u8>> {
// Get DH secret
let vcrypto = crypto
.get(self.crypto_kind)
@ -183,8 +183,11 @@ impl Envelope {
let dh_secret = vcrypto.cached_dh(&self.sender_id, node_id_secret)?;
// Decrypt message without authentication
let body =
vcrypto.crypt_no_auth_aligned_8(&data[0x6A..data.len() - 64], &self.nonce, &dh_secret);
let body = vcrypto.crypt_no_auth_aligned_8(
&data[0x6A..data.len() - 64],
&self.nonce.bytes,
&dh_secret,
);
Ok(body)
}
@ -194,7 +197,7 @@ impl Envelope {
crypto: Crypto,
body: &[u8],
node_id_secret: &SecretKey,
) -> Result<Vec<u8>, VeilidAPIError> {
) -> VeilidAPIResult<Vec<u8>> {
// Ensure body isn't too long
let envelope_size: usize = body.len() + MIN_ENVELOPE_SIZE;
if envelope_size > MAX_ENVELOPE_SIZE {
@ -227,7 +230,7 @@ impl Envelope {
data[0x4A..0x6A].copy_from_slice(&self.recipient_id.bytes);
// Encrypt and authenticate message
let encrypted_body = vcrypto.crypt_no_auth_unaligned(body, &self.nonce, &dh_secret);
let encrypted_body = vcrypto.crypt_no_auth_unaligned(body, &self.nonce.bytes, &dh_secret);
// Write body
if !encrypted_body.is_empty() {

View File

@ -4,7 +4,6 @@ mod dh_cache;
mod envelope;
mod receipt;
mod types;
mod value;
pub mod crypto_system;
#[cfg(feature = "enable-crypto-none")]
@ -20,7 +19,6 @@ pub use dh_cache::*;
pub use envelope::*;
pub use receipt::*;
pub use types::*;
pub use value::*;
#[cfg(feature = "enable-crypto-none")]
pub use none::*;
@ -84,7 +82,6 @@ struct CryptoInner {
struct CryptoUnlockedInner {
config: VeilidConfig,
table_store: TableStore,
protected_store: ProtectedStore,
}
/// Crypto factory implementation
@ -106,16 +103,11 @@ impl Crypto {
}
}
pub fn new(
config: VeilidConfig,
table_store: TableStore,
protected_store: ProtectedStore,
) -> Self {
pub fn new(config: VeilidConfig, table_store: TableStore) -> Self {
let out = Self {
unlocked_inner: Arc::new(CryptoUnlockedInner {
config,
table_store,
protected_store,
}),
inner: Arc::new(Mutex::new(Self::new_inner())),
};
@ -140,12 +132,11 @@ impl Crypto {
pub async fn init(&self) -> EyreResult<()> {
trace!("Crypto::init");
let table_store = self.unlocked_inner.table_store.clone();
// Init node id from config
if let Err(e) = self
.unlocked_inner
.config
.init_node_ids(self.clone(), self.unlocked_inner.protected_store.clone())
.init_node_ids(self.clone(), table_store.clone())
.await
{
return Err(e).wrap_err("init node id failed");
@ -171,13 +162,16 @@ impl Crypto {
};
// load caches if they are valid for this node id
let mut db = table_store.open("crypto_caches", 1).await?;
let caches_valid = match db.load(0, b"cache_validity_key")? {
let mut db = table_store
.open("crypto_caches", 1)
.await
.wrap_err("failed to open crypto_caches")?;
let caches_valid = match db.load(0, b"cache_validity_key").await? {
Some(v) => v == cache_validity_key,
None => false,
};
if caches_valid {
if let Some(b) = db.load(0, b"dh_cache")? {
if let Some(b) = db.load(0, b"dh_cache").await? {
let mut inner = self.inner.lock();
bytes_to_cache(&b, &mut inner.dh_cache);
}
@ -263,7 +257,7 @@ impl Crypto {
node_ids: &[TypedKey],
data: &[u8],
typed_signatures: &[TypedSignature],
) -> Result<TypedKeySet, VeilidAPIError> {
) -> VeilidAPIResult<TypedKeySet> {
let mut out = TypedKeySet::with_capacity(node_ids.len());
for sig in typed_signatures {
for nid in node_ids {
@ -286,7 +280,7 @@ impl Crypto {
data: &[u8],
typed_key_pairs: &[TypedKeyPair],
transform: F,
) -> Result<Vec<R>, VeilidAPIError>
) -> VeilidAPIResult<Vec<R>>
where
F: Fn(&TypedKeyPair, Signature) -> R,
{
@ -302,7 +296,7 @@ impl Crypto {
/// Generate keypair
/// Does not require startup/init
pub fn generate_keypair(crypto_kind: CryptoKind) -> Result<TypedKeyPair, VeilidAPIError> {
pub fn generate_keypair(crypto_kind: CryptoKind) -> VeilidAPIResult<TypedKeyPair> {
#[cfg(feature = "enable-crypto-vld0")]
if crypto_kind == CRYPTO_KIND_VLD0 {
let kp = vld0_generate_keypair();
@ -323,7 +317,7 @@ impl Crypto {
vcrypto: &T,
key: &PublicKey,
secret: &SecretKey,
) -> Result<SharedSecret, VeilidAPIError> {
) -> VeilidAPIResult<SharedSecret> {
Ok(
match self.inner.lock().dh_cache.entry(
DHCacheKey {

View File

@ -1,7 +1,8 @@
use super::*;
use argon2::password_hash::Salt;
use data_encoding::BASE64URL_NOPAD;
use digest::Digest;
use rand::RngCore;
const AEAD_OVERHEAD: usize = PUBLIC_KEY_LENGTH;
pub const CRYPTO_KIND_NONE: CryptoKind = FourCC([b'N', b'O', b'N', b'E']);
@ -70,16 +71,49 @@ impl CryptoSystem for CryptoSystemNONE {
}
// Cached Operations
fn cached_dh(
&self,
key: &PublicKey,
secret: &SecretKey,
) -> Result<SharedSecret, VeilidAPIError> {
fn cached_dh(&self, key: &PublicKey, secret: &SecretKey) -> VeilidAPIResult<SharedSecret> {
self.crypto
.cached_dh_internal::<CryptoSystemNONE>(self, key, secret)
}
// Generation
fn random_bytes(&self, len: u32) -> Vec<u8> {
let mut bytes = unsafe { unaligned_u8_vec_uninit(len as usize) };
random_bytes(bytes.as_mut());
bytes
}
fn default_salt_length(&self) -> u32 {
4
}
fn hash_password(&self, password: &[u8], salt: &[u8]) -> VeilidAPIResult<String> {
if salt.len() < Salt::MIN_LENGTH || salt.len() > Salt::MAX_LENGTH {
apibail_generic!("invalid salt length");
}
Ok(format!(
"{}:{}",
BASE64URL_NOPAD.encode(salt),
BASE64URL_NOPAD.encode(password)
))
}
fn verify_password(&self, password: &[u8], password_hash: &str) -> VeilidAPIResult<bool> {
let Some((salt, _)) = password_hash.split_once(":") else {
apibail_generic!("invalid format");
};
let Ok(salt) = BASE64URL_NOPAD.decode(salt.as_bytes()) else {
apibail_generic!("invalid salt");
};
return Ok(&self.hash_password(password, &salt)? == password_hash);
}
fn derive_shared_secret(&self, password: &[u8], salt: &[u8]) -> VeilidAPIResult<SharedSecret> {
if salt.len() < Salt::MIN_LENGTH || salt.len() > Salt::MAX_LENGTH {
apibail_generic!("invalid salt length");
}
Ok(SharedSecret::new(
*blake3::hash(self.hash_password(password, salt)?.as_bytes()).as_bytes(),
))
}
fn random_nonce(&self) -> Nonce {
let mut nonce = [0u8; NONCE_LENGTH];
random_bytes(&mut nonce).unwrap();
@ -90,11 +124,7 @@ impl CryptoSystem for CryptoSystemNONE {
random_bytes(&mut s).unwrap();
SharedSecret::new(s)
}
fn compute_dh(
&self,
key: &PublicKey,
secret: &SecretKey,
) -> Result<SharedSecret, VeilidAPIError> {
fn compute_dh(&self, key: &PublicKey, secret: &SecretKey) -> VeilidAPIResult<SharedSecret> {
let s = do_xor_32(&key.bytes, &secret.bytes);
Ok(SharedSecret::new(s))
}
@ -104,10 +134,7 @@ impl CryptoSystem for CryptoSystemNONE {
fn generate_hash(&self, data: &[u8]) -> PublicKey {
PublicKey::new(*blake3::hash(data).as_bytes())
}
fn generate_hash_reader(
&self,
reader: &mut dyn std::io::Read,
) -> Result<PublicKey, VeilidAPIError> {
fn generate_hash_reader(&self, reader: &mut dyn std::io::Read) -> VeilidAPIResult<PublicKey> {
let mut hasher = blake3::Hasher::new();
std::io::copy(reader, &mut hasher).map_err(VeilidAPIError::generic)?;
Ok(PublicKey::new(*hasher.finalize().as_bytes()))
@ -132,21 +159,21 @@ impl CryptoSystem for CryptoSystemNONE {
&self,
reader: &mut dyn std::io::Read,
dht_key: &PublicKey,
) -> Result<bool, VeilidAPIError> {
) -> VeilidAPIResult<bool> {
let mut hasher = blake3::Hasher::new();
std::io::copy(reader, &mut hasher).map_err(VeilidAPIError::generic)?;
let bytes = *hasher.finalize().as_bytes();
Ok(bytes == dht_key.bytes)
}
// Distance Metric
fn distance(&self, key1: &PublicKey, key2: &PublicKey) -> PublicKeyDistance {
fn distance(&self, key1: &PublicKey, key2: &PublicKey) -> CryptoKeyDistance {
let mut bytes = [0u8; PUBLIC_KEY_LENGTH];
for (n, byte) in bytes.iter_mut().enumerate() {
*byte = key1.bytes[n] ^ key2.bytes[n];
}
PublicKeyDistance::new(bytes)
CryptoKeyDistance::new(bytes)
}
// Authentication
@ -155,7 +182,7 @@ impl CryptoSystem for CryptoSystemNONE {
dht_key: &PublicKey,
dht_key_secret: &SecretKey,
data: &[u8],
) -> Result<Signature, VeilidAPIError> {
) -> VeilidAPIResult<Signature> {
if !is_bytes_eq_32(&do_xor_32(&dht_key.bytes, &dht_key_secret.bytes), 0xFFu8) {
return Err(VeilidAPIError::parse_error(
"Keypair is invalid",
@ -178,7 +205,7 @@ impl CryptoSystem for CryptoSystemNONE {
dht_key: &PublicKey,
data: &[u8],
signature: &Signature,
) -> Result<(), VeilidAPIError> {
) -> VeilidAPIResult<()> {
let mut dig = Blake3Digest512::new();
dig.update(data);
let sig = dig.finalize();
@ -215,7 +242,7 @@ impl CryptoSystem for CryptoSystemNONE {
nonce: &Nonce,
shared_secret: &SharedSecret,
_associated_data: Option<&[u8]>,
) -> Result<(), VeilidAPIError> {
) -> VeilidAPIResult<()> {
let mut blob = nonce.bytes.to_vec();
blob.extend_from_slice(&[0u8; 8]);
let blob = do_xor_32(&blob, &shared_secret.bytes);
@ -237,7 +264,7 @@ impl CryptoSystem for CryptoSystemNONE {
nonce: &Nonce,
shared_secret: &SharedSecret,
associated_data: Option<&[u8]>,
) -> Result<Vec<u8>, VeilidAPIError> {
) -> VeilidAPIResult<Vec<u8>> {
let mut out = body.to_vec();
self.decrypt_in_place_aead(&mut out, nonce, shared_secret, associated_data)
.map_err(map_to_string)
@ -251,7 +278,7 @@ impl CryptoSystem for CryptoSystemNONE {
nonce: &Nonce,
shared_secret: &SharedSecret,
_associated_data: Option<&[u8]>,
) -> Result<(), VeilidAPIError> {
) -> VeilidAPIResult<()> {
let mut blob = nonce.bytes.to_vec();
blob.extend_from_slice(&[0u8; 8]);
let blob = do_xor_32(&blob, &shared_secret.bytes);
@ -266,7 +293,7 @@ impl CryptoSystem for CryptoSystemNONE {
nonce: &Nonce,
shared_secret: &SharedSecret,
associated_data: Option<&[u8]>,
) -> Result<Vec<u8>, VeilidAPIError> {
) -> VeilidAPIResult<Vec<u8>> {
let mut out = body.to_vec();
self.encrypt_in_place_aead(&mut out, nonce, shared_secret, associated_data)
.map_err(map_to_string)
@ -275,12 +302,7 @@ impl CryptoSystem for CryptoSystemNONE {
}
// NoAuth Encrypt/Decrypt
fn crypt_in_place_no_auth(
&self,
body: &mut Vec<u8>,
nonce: &Nonce,
shared_secret: &SharedSecret,
) {
fn crypt_in_place_no_auth(&self, body: &mut [u8], nonce: &Nonce, shared_secret: &SharedSecret) {
let mut blob = nonce.bytes.to_vec();
blob.extend_from_slice(&[0u8; 8]);
let blob = do_xor_32(&blob, &shared_secret.bytes);

View File

@ -49,7 +49,7 @@ impl Receipt {
nonce: Nonce,
sender_id: PublicKey,
extra_data: D,
) -> Result<Self, VeilidAPIError> {
) -> VeilidAPIResult<Self> {
assert!(VALID_ENVELOPE_VERSIONS.contains(&version));
assert!(VALID_CRYPTO_KINDS.contains(&crypto_kind));
@ -68,7 +68,7 @@ impl Receipt {
})
}
pub fn from_signed_data(crypto: Crypto, data: &[u8]) -> Result<Receipt, VeilidAPIError> {
pub fn from_signed_data(crypto: Crypto, data: &[u8]) -> VeilidAPIResult<Receipt> {
// Ensure we are at least the length of the envelope
if data.len() < MIN_RECEIPT_SIZE {
apibail_parse_error!("receipt too small", data.len());
@ -153,11 +153,7 @@ impl Receipt {
})
}
pub fn to_signed_data(
&self,
crypto: Crypto,
secret: &SecretKey,
) -> Result<Vec<u8>, VeilidAPIError> {
pub fn to_signed_data(&self, crypto: Crypto, secret: &SecretKey) -> VeilidAPIResult<Vec<u8>> {
// Ensure extra data isn't too long
let receipt_size: usize = self.extra_data.len() + MIN_RECEIPT_SIZE;
if receipt_size > MAX_RECEIPT_SIZE {

View File

@ -162,6 +162,66 @@ pub async fn test_dh(vcrypto: CryptoSystemVersion) {
trace!("cached_dh: {:?}", r5);
}
pub async fn test_generation(vcrypto: CryptoSystemVersion) {
let b1 = vcrypto.random_bytes(32);
let b2 = vcrypto.random_bytes(32);
assert_ne!(b1, b2);
assert_eq!(b1.len(), 32);
assert_eq!(b2.len(), 32);
let b3 = vcrypto.random_bytes(0);
let b4 = vcrypto.random_bytes(0);
assert_eq!(b3, b4);
assert_eq!(b3.len(), 0);
assert_ne!(vcrypto.default_salt_length(), 0);
let pstr1 = vcrypto.hash_password(b"abc123", b"qwerasdf").unwrap();
let pstr2 = vcrypto.hash_password(b"abc123", b"qwerasdf").unwrap();
assert_eq!(pstr1, pstr2);
let pstr3 = vcrypto.hash_password(b"abc123", b"qwerasdg").unwrap();
assert_ne!(pstr1, pstr3);
let pstr4 = vcrypto.hash_password(b"abc124", b"qwerasdf").unwrap();
assert_ne!(pstr1, pstr4);
let pstr5 = vcrypto.hash_password(b"abc124", b"qwerasdg").unwrap();
assert_ne!(pstr3, pstr5);
vcrypto
.hash_password(b"abc123", b"qwe")
.expect_err("should reject short salt");
vcrypto
.hash_password(
b"abc123",
b"qwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerz",
)
.expect_err("should reject long salt");
assert!(vcrypto.verify_password(b"abc123", &pstr1).unwrap());
assert!(vcrypto.verify_password(b"abc123", &pstr2).unwrap());
assert!(vcrypto.verify_password(b"abc123", &pstr3).unwrap());
assert!(!vcrypto.verify_password(b"abc123", &pstr4).unwrap());
assert!(!vcrypto.verify_password(b"abc123", &pstr5).unwrap());
let ss1 = vcrypto.derive_shared_secret(b"abc123", b"qwerasdf");
let ss2 = vcrypto.derive_shared_secret(b"abc123", b"qwerasdf");
assert_eq!(ss1, ss2);
let ss3 = vcrypto.derive_shared_secret(b"abc123", b"qwerasdg");
assert_ne!(ss1, ss3);
let ss4 = vcrypto.derive_shared_secret(b"abc124", b"qwerasdf");
assert_ne!(ss1, ss4);
let ss5 = vcrypto.derive_shared_secret(b"abc124", b"qwerasdg");
assert_ne!(ss3, ss5);
vcrypto
.derive_shared_secret(b"abc123", b"qwe")
.expect_err("should reject short salt");
vcrypto
.derive_shared_secret(
b"abc123",
b"qwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerz",
)
.expect_err("should reject long salt");
}
pub async fn test_all() {
let api = crypto_tests_startup().await;
let crypto = api.crypto().unwrap();
@ -171,7 +231,8 @@ pub async fn test_all() {
let vcrypto = crypto.get(v).unwrap();
test_aead(vcrypto.clone()).await;
test_no_auth(vcrypto.clone()).await;
test_dh(vcrypto).await;
test_dh(vcrypto.clone()).await;
test_generation(vcrypto).await;
}
crypto_tests_shutdown(api.clone()).await;

View File

@ -225,6 +225,38 @@ pub async fn test_encode_decode(vcrypto: CryptoSystemVersion) {
assert!(f2.is_err());
}
pub async fn test_typed_convert(vcrypto: CryptoSystemVersion) {
let tks1 = format!(
"{}:7lxDEabK_qgjbe38RtBa3IZLrud84P6NhGP-pRTZzdQ",
vcrypto.kind().to_string()
);
let tk1 = TypedKey::from_str(&tks1).expect("failed");
let tks1x = tk1.to_string();
assert_eq!(tks1, tks1x);
let tks2 = format!(
"{}:7lxDEabK_qgjbe38RtBa3IZLrud84P6NhGP-pRTZzd",
vcrypto.kind().to_string()
);
let _tk2 = TypedKey::from_str(&tks2).expect_err("succeeded when it shouldnt have");
let tks3 = format!("XXXX:7lxDEabK_qgjbe38RtBa3IZLrud84P6NhGP-pRTZzdQ",);
let tk3 = TypedKey::from_str(&tks3).expect("failed");
let tks3x = tk3.to_string();
assert_eq!(tks3, tks3x);
let tks4 = format!("XXXX:7lxDEabK_qgjbe38RtBa3IZLrud84P6NhGP-pRTZzd",);
let _tk4 = TypedKey::from_str(&tks4).expect_err("succeeded when it shouldnt have");
let tks5 = format!("XXX:7lxDEabK_qgjbe38RtBa3IZLrud84P6NhGP-pRTZzdQ",);
let _tk5 = TypedKey::from_str(&tks5).expect_err("succeeded when it shouldnt have");
let tks6 = format!("7lxDEabK_qgjbe38RtBa3IZLrud84P6NhGP-pRTZzdQ",);
let tk6 = TypedKey::from_str(&tks6).expect("failed");
let tks6x = tk6.to_string();
assert!(tks6x.ends_with(&tks6));
}
async fn test_hash(vcrypto: CryptoSystemVersion) {
let mut s = BTreeSet::<PublicKey>::new();
@ -333,6 +365,7 @@ pub async fn test_all() {
test_sign_and_verify(vcrypto.clone()).await;
test_key_conversions(vcrypto.clone()).await;
test_encode_decode(vcrypto.clone()).await;
test_typed_convert(vcrypto.clone()).await;
test_hash(vcrypto.clone()).await;
test_operations(vcrypto).await;
}

View File

@ -127,12 +127,17 @@ where
type Err = VeilidAPIError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let b = s.as_bytes();
if b.len() != (5 + K::encoded_len()) || b[4..5] != b":"[..] {
apibail_parse_error!("invalid typed key", s);
}
if b.len() == (5 + K::encoded_len()) && b[4..5] == b":"[..] {
let kind: CryptoKind = b[0..4].try_into().expect("should not fail to convert");
let value = K::try_decode_bytes(&b[5..])?;
Ok(Self { kind, value })
} else if b.len() == K::encoded_len() {
let kind = best_crypto_kind();
let value = K::try_decode_bytes(b)?;
Ok(Self { kind, value })
} else {
apibail_generic!("invalid cryptotyped format");
}
}
}
impl<'de, K> Deserialize<'de> for CryptoTyped<K>

View File

@ -141,9 +141,9 @@ where
}
false
}
pub fn contains_key(&self, key: &K) -> bool {
pub fn contains_value(&self, value: &K) -> bool {
for tk in &self.items {
if tk.value == *key {
if tk.value == *value {
return true;
}
}
@ -282,6 +282,28 @@ where
tks
}
}
impl<K> From<&[CryptoTyped<K>]> for CryptoTypedSet<K>
where
K: Clone
+ Copy
+ fmt::Debug
+ fmt::Display
+ FromStr
+ PartialEq
+ Eq
+ PartialOrd
+ Ord
+ Hash
+ RkyvArchive
+ Encodable,
<K as RkyvArchive>::Archived: Hash + PartialEq + Eq,
{
fn from(x: &[CryptoTyped<K>]) -> Self {
let mut tks = CryptoTypedSet::<K>::with_capacity(x.len());
tks.add_all(x);
tks
}
}
impl<K> Into<Vec<CryptoTyped<K>>> for CryptoTypedSet<K>
where
K: Clone

View File

@ -39,7 +39,7 @@ impl Encodable for KeyPair {
fn encoded_len() -> usize {
PublicKey::encoded_len() + 1 + SecretKey::encoded_len()
}
fn try_decode_bytes(b: &[u8]) -> Result<Self, VeilidAPIError> {
fn try_decode_bytes(b: &[u8]) -> VeilidAPIResult<Self> {
if b.len() != Self::encoded_len() {
apibail_parse_error!("input has wrong encoded length", format!("len={}", b.len()));
}
@ -56,9 +56,7 @@ impl fmt::Display for KeyPair {
impl fmt::Debug for KeyPair {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, concat!(stringify!($name), "("))?;
write!(f, "{}", self.encode())?;
write!(f, ")")
write!(f, "KeyPair({})", self.encode())
}
}

View File

@ -5,8 +5,6 @@ use core::convert::TryInto;
use core::fmt;
use core::hash::Hash;
use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize};
/// Cryptography version fourcc code
pub type CryptoKind = FourCC;
@ -55,5 +53,10 @@ pub type TypedKey = CryptoTyped<PublicKey>;
pub type TypedSecret = CryptoTyped<SecretKey>;
pub type TypedKeyPair = CryptoTyped<KeyPair>;
pub type TypedSignature = CryptoTyped<Signature>;
pub type TypedSharedSecret = CryptoTyped<SharedSecret>;
pub type TypedKeySet = CryptoTypedSet<PublicKey>;
pub type TypedSecretSet = CryptoTypedSet<SecretKey>;
pub type TypedKeyPairSet = CryptoTypedSet<KeyPair>;
pub type TypedSignatureSet = CryptoTypedSet<Signature>;
pub type TypedSharedSecretSet = CryptoTypedSet<SharedSecret>;

View File

@ -1,5 +1,9 @@
use super::*;
use argon2::{
password_hash::{PasswordHash, PasswordHasher, PasswordVerifier, Salt, SaltString},
Argon2,
};
use chacha20::cipher::{KeyIvInit, StreamCipher};
use chacha20::XChaCha20;
use chacha20poly1305 as ch;
@ -13,7 +17,7 @@ use x25519_dalek as xd;
const AEAD_OVERHEAD: usize = 16;
pub const CRYPTO_KIND_VLD0: CryptoKind = FourCC([b'V', b'L', b'D', b'0']);
fn ed25519_to_x25519_pk(key: &ed::PublicKey) -> Result<xd::PublicKey, VeilidAPIError> {
fn ed25519_to_x25519_pk(key: &ed::PublicKey) -> VeilidAPIResult<xd::PublicKey> {
let bytes = key.to_bytes();
let compressed = cd::edwards::CompressedEdwardsY(bytes);
let point = compressed
@ -22,7 +26,7 @@ fn ed25519_to_x25519_pk(key: &ed::PublicKey) -> Result<xd::PublicKey, VeilidAPIE
let mp = point.to_montgomery();
Ok(xd::PublicKey::from(mp.to_bytes()))
}
fn ed25519_to_x25519_sk(key: &ed::SecretKey) -> Result<xd::StaticSecret, VeilidAPIError> {
fn ed25519_to_x25519_sk(key: &ed::SecretKey) -> VeilidAPIResult<xd::StaticSecret> {
let exp = ed::ExpandedSecretKey::from(key);
let bytes: [u8; ed::EXPANDED_SECRET_KEY_LENGTH] = exp.to_bytes();
let lowbytes: [u8; 32] = bytes[0..32].try_into().map_err(VeilidAPIError::internal)?;
@ -61,31 +65,71 @@ impl CryptoSystem for CryptoSystemVLD0 {
}
// Cached Operations
fn cached_dh(
&self,
key: &PublicKey,
secret: &SecretKey,
) -> Result<SharedSecret, VeilidAPIError> {
fn cached_dh(&self, key: &PublicKey, secret: &SecretKey) -> VeilidAPIResult<SharedSecret> {
self.crypto
.cached_dh_internal::<CryptoSystemVLD0>(self, key, secret)
}
// Generation
fn random_bytes(&self, len: u32) -> Vec<u8> {
let mut bytes = unsafe { unaligned_u8_vec_uninit(len as usize) };
random_bytes(bytes.as_mut());
bytes
}
fn default_salt_length(&self) -> u32 {
16
}
fn hash_password(&self, password: &[u8], salt: &[u8]) -> VeilidAPIResult<String> {
if salt.len() < Salt::MIN_LENGTH || salt.len() > Salt::MAX_LENGTH {
apibail_generic!("invalid salt length");
}
// Hash password to PHC string ($argon2id$v=19$...)
let salt = SaltString::encode_b64(salt).map_err(VeilidAPIError::generic)?;
// Argon2 with default params (Argon2id v19)
let argon2 = Argon2::default();
let password_hash = argon2
.hash_password(password, &salt)
.map_err(VeilidAPIError::generic)?
.to_string();
Ok(password_hash)
}
fn verify_password(&self, password: &[u8], password_hash: &str) -> VeilidAPIResult<bool> {
let parsed_hash = PasswordHash::new(password_hash).map_err(VeilidAPIError::generic)?;
// Argon2 with default params (Argon2id v19)
let argon2 = Argon2::default();
Ok(argon2.verify_password(password, &parsed_hash).is_ok())
}
fn derive_shared_secret(&self, password: &[u8], salt: &[u8]) -> VeilidAPIResult<SharedSecret> {
if salt.len() < Salt::MIN_LENGTH || salt.len() > Salt::MAX_LENGTH {
apibail_generic!("invalid salt length");
}
// Argon2 with default params (Argon2id v19)
let argon2 = Argon2::default();
let mut output_key_material = [0u8; SHARED_SECRET_LENGTH];
argon2
.hash_password_into(password, salt, &mut output_key_material)
.map_err(VeilidAPIError::generic)?;
Ok(SharedSecret::new(output_key_material))
}
fn random_nonce(&self) -> Nonce {
let mut nonce = [0u8; NONCE_LENGTH];
random_bytes(&mut nonce).unwrap();
random_bytes(&mut nonce);
Nonce::new(nonce)
}
fn random_shared_secret(&self) -> SharedSecret {
let mut s = [0u8; SHARED_SECRET_LENGTH];
random_bytes(&mut s).unwrap();
random_bytes(&mut s);
SharedSecret::new(s)
}
fn compute_dh(
&self,
key: &PublicKey,
secret: &SecretKey,
) -> Result<SharedSecret, VeilidAPIError> {
fn compute_dh(&self, key: &PublicKey, secret: &SecretKey) -> VeilidAPIResult<SharedSecret> {
let pk_ed = ed::PublicKey::from_bytes(&key.bytes).map_err(VeilidAPIError::internal)?;
let pk_xd = ed25519_to_x25519_pk(&pk_ed)?;
let sk_ed = ed::SecretKey::from_bytes(&secret.bytes).map_err(VeilidAPIError::internal)?;
@ -98,10 +142,7 @@ impl CryptoSystem for CryptoSystemVLD0 {
fn generate_hash(&self, data: &[u8]) -> PublicKey {
PublicKey::new(*blake3::hash(data).as_bytes())
}
fn generate_hash_reader(
&self,
reader: &mut dyn std::io::Read,
) -> Result<PublicKey, VeilidAPIError> {
fn generate_hash_reader(&self, reader: &mut dyn std::io::Read) -> VeilidAPIResult<PublicKey> {
let mut hasher = blake3::Hasher::new();
std::io::copy(reader, &mut hasher).map_err(VeilidAPIError::generic)?;
Ok(PublicKey::new(*hasher.finalize().as_bytes()))
@ -127,21 +168,21 @@ impl CryptoSystem for CryptoSystemVLD0 {
&self,
reader: &mut dyn std::io::Read,
dht_key: &PublicKey,
) -> Result<bool, VeilidAPIError> {
) -> VeilidAPIResult<bool> {
let mut hasher = blake3::Hasher::new();
std::io::copy(reader, &mut hasher).map_err(VeilidAPIError::generic)?;
let bytes = *hasher.finalize().as_bytes();
Ok(bytes == dht_key.bytes)
}
// Distance Metric
fn distance(&self, key1: &PublicKey, key2: &PublicKey) -> PublicKeyDistance {
fn distance(&self, key1: &PublicKey, key2: &PublicKey) -> CryptoKeyDistance {
let mut bytes = [0u8; PUBLIC_KEY_LENGTH];
for (n, byte) in bytes.iter_mut().enumerate() {
*byte = key1.bytes[n] ^ key2.bytes[n];
}
PublicKeyDistance::new(bytes)
CryptoKeyDistance::new(bytes)
}
// Authentication
@ -150,7 +191,7 @@ impl CryptoSystem for CryptoSystemVLD0 {
dht_key: &PublicKey,
dht_key_secret: &SecretKey,
data: &[u8],
) -> Result<Signature, VeilidAPIError> {
) -> VeilidAPIResult<Signature> {
let mut kpb: [u8; SECRET_KEY_LENGTH + PUBLIC_KEY_LENGTH] =
[0u8; SECRET_KEY_LENGTH + PUBLIC_KEY_LENGTH];
@ -177,7 +218,7 @@ impl CryptoSystem for CryptoSystemVLD0 {
dht_key: &PublicKey,
data: &[u8],
signature: &Signature,
) -> Result<(), VeilidAPIError> {
) -> VeilidAPIResult<()> {
let pk = ed::PublicKey::from_bytes(&dht_key.bytes)
.map_err(|e| VeilidAPIError::parse_error("Public key is invalid", e))?;
let sig = ed::Signature::from_bytes(&signature.bytes)
@ -201,7 +242,7 @@ impl CryptoSystem for CryptoSystemVLD0 {
nonce: &Nonce,
shared_secret: &SharedSecret,
associated_data: Option<&[u8]>,
) -> Result<(), VeilidAPIError> {
) -> VeilidAPIResult<()> {
let key = ch::Key::from(shared_secret.bytes);
let xnonce = ch::XNonce::from(nonce.bytes);
let aead = ch::XChaCha20Poly1305::new(&key);
@ -216,7 +257,7 @@ impl CryptoSystem for CryptoSystemVLD0 {
nonce: &Nonce,
shared_secret: &SharedSecret,
associated_data: Option<&[u8]>,
) -> Result<Vec<u8>, VeilidAPIError> {
) -> VeilidAPIResult<Vec<u8>> {
let mut out = body.to_vec();
self.decrypt_in_place_aead(&mut out, nonce, shared_secret, associated_data)
.map_err(map_to_string)
@ -230,7 +271,7 @@ impl CryptoSystem for CryptoSystemVLD0 {
nonce: &Nonce,
shared_secret: &SharedSecret,
associated_data: Option<&[u8]>,
) -> Result<(), VeilidAPIError> {
) -> VeilidAPIResult<()> {
let key = ch::Key::from(shared_secret.bytes);
let xnonce = ch::XNonce::from(nonce.bytes);
let aead = ch::XChaCha20Poly1305::new(&key);
@ -246,7 +287,7 @@ impl CryptoSystem for CryptoSystemVLD0 {
nonce: &Nonce,
shared_secret: &SharedSecret,
associated_data: Option<&[u8]>,
) -> Result<Vec<u8>, VeilidAPIError> {
) -> VeilidAPIResult<Vec<u8>> {
let mut out = body.to_vec();
self.encrypt_in_place_aead(&mut out, nonce, shared_secret, associated_data)
.map_err(map_to_string)
@ -257,11 +298,11 @@ impl CryptoSystem for CryptoSystemVLD0 {
// NoAuth Encrypt/Decrypt
fn crypt_in_place_no_auth(
&self,
body: &mut Vec<u8>,
nonce: &Nonce,
body: &mut [u8],
nonce: &[u8; NONCE_LENGTH],
shared_secret: &SharedSecret,
) {
let mut cipher = XChaCha20::new(&shared_secret.bytes.into(), &nonce.bytes.into());
let mut cipher = XChaCha20::new(&shared_secret.bytes.into(), nonce.into());
cipher.apply_keystream(body);
}
@ -269,17 +310,17 @@ impl CryptoSystem for CryptoSystemVLD0 {
&self,
in_buf: &[u8],
out_buf: &mut [u8],
nonce: &Nonce,
nonce: &[u8; NONCE_LENGTH],
shared_secret: &SharedSecret,
) {
let mut cipher = XChaCha20::new(&shared_secret.bytes.into(), &nonce.bytes.into());
let mut cipher = XChaCha20::new(&shared_secret.bytes.into(), nonce.into());
cipher.apply_keystream_b2b(in_buf, out_buf).unwrap();
}
fn crypt_no_auth_aligned_8(
&self,
in_buf: &[u8],
nonce: &Nonce,
nonce: &[u8; NONCE_LENGTH],
shared_secret: &SharedSecret,
) -> Vec<u8> {
let mut out_buf = unsafe { aligned_8_u8_vec_uninit(in_buf.len()) };
@ -290,7 +331,7 @@ impl CryptoSystem for CryptoSystemVLD0 {
fn crypt_no_auth_unaligned(
&self,
in_buf: &[u8],
nonce: &Nonce,
nonce: &[u8; NONCE_LENGTH],
shared_secret: &SharedSecret,
) -> Vec<u8> {
let mut out_buf = unsafe { unaligned_u8_vec_uninit(in_buf.len()) };

View File

@ -1,4 +1,4 @@
mod table_db;
use super::*;
#[cfg(target_arch = "wasm32")]
mod wasm;
@ -8,3 +8,5 @@ pub use wasm::*;
mod native;
#[cfg(not(target_arch = "wasm32"))]
pub use native::*;
pub static KNOWN_PROTECTED_STORE_KEYS: [&'static str; 2] = ["device_encryption_key", "_test_key"];

View File

@ -1,13 +1,13 @@
mod block_store;
mod protected_store;
mod system;
mod table_store;
pub use block_store::*;
pub use protected_store::*;
pub use system::*;
pub use table_store::*;
#[cfg(target_os = "android")]
pub mod android;
pub mod network_interfaces;
use super::*;

View File

@ -1,7 +1,6 @@
use crate::*;
use super::*;
use data_encoding::BASE64URL_NOPAD;
use keyring_manager::*;
use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize};
use std::path::Path;
pub struct ProtectedStoreInner {
@ -30,18 +29,12 @@ impl ProtectedStore {
#[instrument(level = "trace", skip(self), err)]
pub async fn delete_all(&self) -> EyreResult<()> {
// Delete all known keys
if self.remove_user_secret("node_id").await? {
debug!("deleted protected_store key 'node_id'");
for kpsk in &KNOWN_PROTECTED_STORE_KEYS {
if let Err(e) = self.remove_user_secret(kpsk).await {
error!("failed to delete '{}': {}", kpsk, e);
} else {
debug!("deleted table '{}'", kpsk);
}
if self.remove_user_secret("node_id_secret").await? {
debug!("deleted protected_store key 'node_id_secret'");
}
if self.remove_user_secret("_test_key").await? {
debug!("deleted protected_store key '_test_key'");
}
if self.remove_user_secret("RouteSpecStore").await? {
debug!("deleted protected_store key 'RouteSpecStore'");
}
Ok(())
}
@ -65,9 +58,8 @@ impl ProtectedStore {
|| c.protected_store.allow_insecure_fallback)
&& inner.keyring_manager.is_none()
{
let insecure_fallback_directory =
Path::new(&c.protected_store.insecure_fallback_directory);
let insecure_keyring_file = insecure_fallback_directory.to_owned().join(format!(
let directory = Path::new(&c.protected_store.directory);
let insecure_keyring_file = directory.to_owned().join(format!(
"insecure_keyring{}",
if c.namespace.is_empty() {
"".to_owned()
@ -153,7 +145,7 @@ impl ProtectedStore {
pub async fn save_user_secret_rkyv<K, T>(&self, key: K, value: &T) -> EyreResult<bool>
where
K: AsRef<str> + fmt::Debug,
T: RkyvSerialize<rkyv::ser::serializers::AllocSerializer<1024>>,
T: RkyvSerialize<DefaultVeilidRkyvSerializer>,
{
let v = to_rkyv(value)?;
self.save_user_secret(key, &v).await
@ -175,9 +167,8 @@ impl ProtectedStore {
K: AsRef<str> + fmt::Debug,
T: RkyvArchive,
<T as RkyvArchive>::Archived:
for<'t> bytecheck::CheckBytes<rkyv::validation::validators::DefaultValidator<'t>>,
<T as RkyvArchive>::Archived:
RkyvDeserialize<T, rkyv::de::deserializers::SharedDeserializeMap>,
for<'t> CheckBytes<rkyv::validation::validators::DefaultValidator<'t>>,
<T as RkyvArchive>::Archived: RkyvDeserialize<T, VeilidSharedDeserializeMap>,
{
let out = self.load_user_secret(key).await?;
let b = match out {

View File

@ -2,7 +2,7 @@
use crate::*;
pub async fn get_outbound_relay_peer() -> Option<crate::veilid_api::PeerInfo> {
pub async fn get_outbound_relay_peer() -> Option<crate::routing_table::PeerInfo> {
panic!("Native Veilid should never require an outbound relay");
}

View File

@ -1,147 +0,0 @@
use crate::intf::table_db::TableDBInner;
pub use crate::intf::table_db::{TableDB, TableDBTransaction};
use crate::*;
use keyvaluedb_sqlite::*;
use std::path::PathBuf;
struct TableStoreInner {
opened: BTreeMap<String, Weak<Mutex<TableDBInner>>>,
}
/// Veilid Table Storage
/// Database for storing key value pairs persistently across runs
#[derive(Clone)]
pub struct TableStore {
config: VeilidConfig,
inner: Arc<Mutex<TableStoreInner>>,
}
impl TableStore {
fn new_inner() -> TableStoreInner {
TableStoreInner {
opened: BTreeMap::new(),
}
}
pub(crate) fn new(config: VeilidConfig) -> Self {
Self {
config,
inner: Arc::new(Mutex::new(Self::new_inner())),
}
}
/// Delete all known tables
pub async fn delete_all(&self) {
if let Err(e) = self.delete("crypto_caches").await {
error!("failed to delete 'crypto_caches': {}", e);
}
if let Err(e) = self.delete("RouteSpecStore").await {
error!("failed to delete 'RouteSpecStore': {}", e);
}
if let Err(e) = self.delete("routing_table").await {
error!("failed to delete 'routing_table': {}", e);
}
}
pub(crate) async fn init(&self) -> EyreResult<()> {
Ok(())
}
pub(crate) async fn terminate(&self) {
assert!(
self.inner.lock().opened.is_empty(),
"all open databases should have been closed"
);
}
pub(crate) fn on_table_db_drop(&self, table: String) {
let mut inner = self.inner.lock();
if inner.opened.remove(&table).is_none() {
unreachable!("should have removed an item");
}
}
fn get_dbpath(&self, table: &str) -> EyreResult<PathBuf> {
if !table
.chars()
.all(|c| char::is_alphanumeric(c) || c == '_' || c == '-')
{
bail!("table name '{}' is invalid", table);
}
let c = self.config.get();
let tablestoredir = c.table_store.directory.clone();
std::fs::create_dir_all(&tablestoredir).wrap_err("failed to create tablestore path")?;
let dbpath: PathBuf = [tablestoredir, String::from(table)].iter().collect();
Ok(dbpath)
}
fn get_table_name(&self, table: &str) -> EyreResult<String> {
if !table
.chars()
.all(|c| char::is_alphanumeric(c) || c == '_' || c == '-')
{
bail!("table name '{}' is invalid", table);
}
let c = self.config.get();
let namespace = c.namespace.clone();
Ok(if namespace.is_empty() {
table.to_string()
} else {
format!("_ns_{}_{}", namespace, table)
})
}
/// Get or create a TableDB database table. If the column count is greater than an
/// existing TableDB's column count, the database will be upgraded to add the missing columns
pub async fn open(&self, name: &str, column_count: u32) -> EyreResult<TableDB> {
let table_name = self.get_table_name(name)?;
let mut inner = self.inner.lock();
if let Some(table_db_weak_inner) = inner.opened.get(&table_name) {
match TableDB::try_new_from_weak_inner(table_db_weak_inner.clone()) {
Some(tdb) => {
return Ok(tdb);
}
None => {
inner.opened.remove(&table_name);
}
};
}
let dbpath = self.get_dbpath(&table_name)?;
// Ensure permissions are correct
ensure_file_private_owner(&dbpath)?;
let cfg = DatabaseConfig::with_columns(column_count);
let db = Database::open(&dbpath, cfg).wrap_err("failed to open tabledb")?;
// Ensure permissions are correct
ensure_file_private_owner(&dbpath)?;
trace!(
"opened table store '{}' at path '{:?}' with {} columns",
name,
dbpath,
column_count
);
let table_db = TableDB::new(table_name.clone(), self.clone(), db);
inner.opened.insert(table_name, table_db.weak_inner());
Ok(table_db)
}
/// Delete a TableDB table by name
pub async fn delete(&self, name: &str) -> EyreResult<bool> {
let table_name = self.get_table_name(name)?;
let inner = self.inner.lock();
if inner.opened.contains_key(&table_name) {
bail!("Not deleting table that is still opened");
}
let dbpath = self.get_dbpath(&table_name)?;
let ret = std::fs::remove_file(dbpath).is_ok();
Ok(ret)
}
}

View File

@ -1,276 +0,0 @@
use crate::*;
use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize};
cfg_if! {
if #[cfg(target_arch = "wasm32")] {
use keyvaluedb_web::*;
use keyvaluedb::*;
} else {
use keyvaluedb_sqlite::*;
use keyvaluedb::*;
}
}
pub struct TableDBInner {
table: String,
table_store: TableStore,
database: Database,
}
impl fmt::Debug for TableDBInner {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "TableDBInner(table={})", self.table)
}
}
impl Drop for TableDBInner {
fn drop(&mut self) {
self.table_store.on_table_db_drop(self.table.clone());
}
}
#[derive(Debug, Clone)]
pub struct TableDB {
inner: Arc<Mutex<TableDBInner>>,
}
impl TableDB {
pub(super) fn new(table: String, table_store: TableStore, database: Database) -> Self {
Self {
inner: Arc::new(Mutex::new(TableDBInner {
table,
table_store,
database,
})),
}
}
pub(super) fn try_new_from_weak_inner(weak_inner: Weak<Mutex<TableDBInner>>) -> Option<Self> {
weak_inner.upgrade().map(|table_db_inner| Self {
inner: table_db_inner,
})
}
pub(super) fn weak_inner(&self) -> Weak<Mutex<TableDBInner>> {
Arc::downgrade(&self.inner)
}
/// Get the total number of columns in the TableDB
pub fn get_column_count(&self) -> EyreResult<u32> {
let db = &self.inner.lock().database;
db.num_columns().wrap_err("failed to get column count: {}")
}
/// Get the list of keys in a column of the TableDB
pub fn get_keys(&self, col: u32) -> EyreResult<Vec<Box<[u8]>>> {
let db = &self.inner.lock().database;
let mut out: Vec<Box<[u8]>> = Vec::new();
db.iter(col, None, &mut |kv| {
out.push(kv.0.clone().into_boxed_slice());
Ok(true)
})
.wrap_err("failed to get keys for column")?;
Ok(out)
}
/// Start a TableDB write transaction. The transaction object must be committed or rolled back before dropping.
pub fn transact(&self) -> TableDBTransaction {
let dbt = {
let db = &self.inner.lock().database;
db.transaction()
};
TableDBTransaction::new(self.clone(), dbt)
}
/// Store a key with a value in a column in the TableDB. Performs a single transaction immediately.
pub async fn store(&self, col: u32, key: &[u8], value: &[u8]) -> EyreResult<()> {
let db = self.inner.lock().database.clone();
let mut dbt = db.transaction();
dbt.put(col, key, value);
db.write(dbt).await.wrap_err("failed to store key")
}
/// Store a key in rkyv format with a value in a column in the TableDB. Performs a single transaction immediately.
pub async fn store_rkyv<T>(&self, col: u32, key: &[u8], value: &T) -> EyreResult<()>
where
T: RkyvSerialize<rkyv::ser::serializers::AllocSerializer<1024>>,
{
let v = to_rkyv(value)?;
let db = self.inner.lock().database.clone();
let mut dbt = db.transaction();
dbt.put(col, key, v.as_slice());
db.write(dbt).await.wrap_err("failed to store key")
}
/// Store a key in json format with a value in a column in the TableDB. Performs a single transaction immediately.
pub async fn store_json<T>(&self, col: u32, key: &[u8], value: &T) -> EyreResult<()>
where
T: serde::Serialize,
{
let v = serde_json::to_vec(value)?;
let db = self.inner.lock().database.clone();
let mut dbt = db.transaction();
dbt.put(col, key, v.as_slice());
db.write(dbt).await.wrap_err("failed to store key")
}
/// Read a key from a column in the TableDB immediately.
pub fn load(&self, col: u32, key: &[u8]) -> EyreResult<Option<Vec<u8>>> {
let db = self.inner.lock().database.clone();
db.get(col, key).wrap_err("failed to get key")
}
/// Read an rkyv key from a column in the TableDB immediately
pub fn load_rkyv<T>(&self, col: u32, key: &[u8]) -> EyreResult<Option<T>>
where
T: RkyvArchive,
<T as RkyvArchive>::Archived:
for<'t> bytecheck::CheckBytes<rkyv::validation::validators::DefaultValidator<'t>>,
<T as RkyvArchive>::Archived:
RkyvDeserialize<T, rkyv::de::deserializers::SharedDeserializeMap>,
{
let db = self.inner.lock().database.clone();
let out = db.get(col, key).wrap_err("failed to get key")?;
let b = match out {
Some(v) => v,
None => {
return Ok(None);
}
};
let obj = from_rkyv(b)?;
Ok(Some(obj))
}
/// Read an serde-json key from a column in the TableDB immediately
pub fn load_json<T>(&self, col: u32, key: &[u8]) -> EyreResult<Option<T>>
where
T: for<'de> serde::Deserialize<'de>,
{
let db = self.inner.lock().database.clone();
let out = db.get(col, key).wrap_err("failed to get key")?;
let b = match out {
Some(v) => v,
None => {
return Ok(None);
}
};
let obj = serde_json::from_slice(&b)?;
Ok(Some(obj))
}
/// Delete key with from a column in the TableDB
pub async fn delete(&self, col: u32, key: &[u8]) -> EyreResult<bool> {
let db = self.inner.lock().database.clone();
let found = db.get(col, key).wrap_err("failed to get key")?;
match found {
None => Ok(false),
Some(_) => {
let mut dbt = db.transaction();
dbt.delete(col, key);
db.write(dbt).await.wrap_err("failed to delete key")?;
Ok(true)
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
struct TableDBTransactionInner {
dbt: Option<DBTransaction>,
}
impl fmt::Debug for TableDBTransactionInner {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"TableDBTransactionInner({})",
match &self.dbt {
Some(dbt) => format!("len={}", dbt.ops.len()),
None => "".to_owned(),
}
)
}
}
/// A TableDB transaction
/// Atomically commits a group of writes or deletes to the TableDB
#[derive(Debug, Clone)]
pub struct TableDBTransaction {
db: TableDB,
inner: Arc<Mutex<TableDBTransactionInner>>,
}
impl TableDBTransaction {
fn new(db: TableDB, dbt: DBTransaction) -> Self {
Self {
db,
inner: Arc::new(Mutex::new(TableDBTransactionInner { dbt: Some(dbt) })),
}
}
/// Commit the transaction. Performs all actions atomically.
pub async fn commit(self) -> EyreResult<()> {
let dbt = {
let mut inner = self.inner.lock();
inner
.dbt
.take()
.ok_or_else(|| eyre!("transaction already completed"))?
};
let db = self.db.inner.lock().database.clone();
db.write(dbt)
.await
.wrap_err("commit failed, transaction lost")
}
/// Rollback the transaction. Does nothing to the TableDB.
pub fn rollback(self) {
let mut inner = self.inner.lock();
inner.dbt = None;
}
/// Store a key with a value in a column in the TableDB
pub fn store(&self, col: u32, key: &[u8], value: &[u8]) {
let mut inner = self.inner.lock();
inner.dbt.as_mut().unwrap().put(col, key, value);
}
/// Store a key in rkyv format with a value in a column in the TableDB
pub fn store_rkyv<T>(&self, col: u32, key: &[u8], value: &T) -> EyreResult<()>
where
T: RkyvSerialize<rkyv::ser::serializers::AllocSerializer<1024>>,
{
let v = to_rkyv(value)?;
let mut inner = self.inner.lock();
inner.dbt.as_mut().unwrap().put(col, key, v.as_slice());
Ok(())
}
/// Store a key in rkyv format with a value in a column in the TableDB
pub fn store_json<T>(&self, col: u32, key: &[u8], value: &T) -> EyreResult<()>
where
T: serde::Serialize,
{
let v = serde_json::to_vec(value)?;
let mut inner = self.inner.lock();
inner.dbt.as_mut().unwrap().put(col, key, v.as_slice());
Ok(())
}
/// Delete key with from a column in the TableDB
pub fn delete(&self, col: u32, key: &[u8]) {
let mut inner = self.inner.lock();
inner.dbt.as_mut().unwrap().delete(col, key);
}
}
impl Drop for TableDBTransactionInner {
fn drop(&mut self) {
if self.dbt.is_some() {
warn!("Dropped transaction without commit or rollback");
}
}
}

View File

@ -1,9 +1,9 @@
mod block_store;
mod protected_store;
mod system;
mod table_store;
pub use block_store::*;
pub use protected_store::*;
pub use system::*;
pub use table_store::*;
use super::*;

View File

@ -1,6 +1,9 @@
use crate::*;
use super::*;
use data_encoding::BASE64URL_NOPAD;
use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize};
use rkyv::{
bytecheck::CheckBytes, Archive as RkyvArchive, Deserialize as RkyvDeserialize,
Serialize as RkyvSerialize,
};
use web_sys::*;
@ -16,18 +19,12 @@ impl ProtectedStore {
#[instrument(level = "trace", skip(self), err)]
pub async fn delete_all(&self) -> EyreResult<()> {
// Delete all known keys
if self.remove_user_secret("node_id").await? {
debug!("deleted protected_store key 'node_id'");
for kpsk in &KNOWN_PROTECTED_STORE_KEYS {
if let Err(e) = self.remove_user_secret(kpsk).await {
error!("failed to delete '{}': {}", kpsk, e);
} else {
debug!("deleted table '{}'", kpsk);
}
if self.remove_user_secret("node_id_secret").await? {
debug!("deleted protected_store key 'node_id_secret'");
}
if self.remove_user_secret("_test_key").await? {
debug!("deleted protected_store key '_test_key'");
}
if self.remove_user_secret("RouteSpecStore").await? {
debug!("deleted protected_store key 'RouteSpecStore'");
}
Ok(())
}
@ -133,7 +130,7 @@ impl ProtectedStore {
pub async fn save_user_secret_rkyv<K, T>(&self, key: K, value: &T) -> EyreResult<bool>
where
K: AsRef<str> + fmt::Debug,
T: RkyvSerialize<rkyv::ser::serializers::AllocSerializer<1024>>,
T: RkyvSerialize<DefaultVeilidRkyvSerializer>,
{
let v = to_rkyv(value)?;
self.save_user_secret(key, &v).await
@ -155,9 +152,8 @@ impl ProtectedStore {
K: AsRef<str> + fmt::Debug,
T: RkyvArchive,
<T as RkyvArchive>::Archived:
for<'t> bytecheck::CheckBytes<rkyv::validation::validators::DefaultValidator<'t>>,
<T as RkyvArchive>::Archived:
RkyvDeserialize<T, rkyv::de::deserializers::SharedDeserializeMap>,
for<'t> CheckBytes<rkyv::validation::validators::DefaultValidator<'t>>,
<T as RkyvArchive>::Archived: RkyvDeserialize<T, VeilidSharedDeserializeMap>,
{
let out = self.load_user_secret(key).await?;
let b = match out {

View File

@ -2,7 +2,7 @@ use crate::*;
//use js_sys::*;
pub async fn get_outbound_relay_peer() -> Option<crate::veilid_api::PeerInfo> {
pub async fn get_outbound_relay_peer() -> Option<crate::routing_table::PeerInfo> {
// unimplemented!
None
}

View File

@ -1,151 +0,0 @@
use crate::intf::table_db::TableDBInner;
pub use crate::intf::table_db::{TableDB, TableDBTransaction};
use crate::*;
use keyvaluedb_web::*;
struct TableStoreInner {
opened: BTreeMap<String, Weak<Mutex<TableDBInner>>>,
}
#[derive(Clone)]
pub struct TableStore {
config: VeilidConfig,
inner: Arc<Mutex<TableStoreInner>>,
async_lock: Arc<AsyncMutex<()>>,
}
impl TableStore {
fn new_inner() -> TableStoreInner {
TableStoreInner {
opened: BTreeMap::new(),
}
}
pub(crate) fn new(config: VeilidConfig) -> Self {
Self {
config,
inner: Arc::new(Mutex::new(Self::new_inner())),
async_lock: Arc::new(AsyncMutex::new(())),
}
}
/// Delete all known tables
pub async fn delete_all(&self) {
if let Err(e) = self.delete("crypto_caches").await {
error!("failed to delete 'crypto_caches': {}", e);
}
if let Err(e) = self.delete("RouteSpecStore").await {
error!("failed to delete 'RouteSpecStore': {}", e);
}
if let Err(e) = self.delete("routing_table").await {
error!("failed to delete 'routing_table': {}", e);
}
}
pub(crate) async fn init(&self) -> EyreResult<()> {
let _async_guard = self.async_lock.lock().await;
Ok(())
}
pub(crate) async fn terminate(&self) {
let _async_guard = self.async_lock.lock().await;
assert!(
self.inner.lock().opened.len() == 0,
"all open databases should have been closed"
);
}
pub(crate) fn on_table_db_drop(&self, table: String) {
let mut inner = self.inner.lock();
match inner.opened.remove(&table) {
Some(_) => (),
None => {
assert!(false, "should have removed an item");
}
}
}
fn get_table_name(&self, table: &str) -> EyreResult<String> {
if !table
.chars()
.all(|c| char::is_alphanumeric(c) || c == '_' || c == '-')
{
bail!("table name '{}' is invalid", table);
}
let c = self.config.get();
let namespace = c.namespace.clone();
Ok(if namespace.len() == 0 {
format!("{}", table)
} else {
format!("_ns_{}_{}", namespace, table)
})
}
/// Get or create a TableDB database table. If the column count is greater than an
/// existing TableDB's column count, the database will be upgraded to add the missing columns
pub async fn open(&self, name: &str, column_count: u32) -> EyreResult<TableDB> {
let _async_guard = self.async_lock.lock().await;
let table_name = self.get_table_name(name)?;
{
let mut inner = self.inner.lock();
if let Some(table_db_weak_inner) = inner.opened.get(&table_name) {
match TableDB::try_new_from_weak_inner(table_db_weak_inner.clone()) {
Some(tdb) => {
return Ok(tdb);
}
None => {
inner.opened.remove(&table_name);
}
};
}
}
let db = Database::open(table_name.clone(), column_count)
.await
.wrap_err("failed to open tabledb")?;
trace!(
"opened table store '{}' with table name '{:?}' with {} columns",
name,
table_name,
column_count
);
let table_db = TableDB::new(table_name.clone(), self.clone(), db);
{
let mut inner = self.inner.lock();
inner.opened.insert(table_name, table_db.weak_inner());
}
Ok(table_db)
}
/// Delete a TableDB table by name
pub async fn delete(&self, name: &str) -> EyreResult<bool> {
let _async_guard = self.async_lock.lock().await;
trace!("TableStore::delete {}", name);
let table_name = self.get_table_name(name)?;
{
let inner = self.inner.lock();
if inner.opened.contains_key(&table_name) {
trace!(
"TableStore::delete {}: Not deleting, still open.",
table_name
);
bail!("Not deleting table that is still opened");
}
}
if is_browser() {
let out = match Database::delete(table_name.clone()).await {
Ok(_) => true,
Err(_) => false,
};
//.map_err(|e| format!("failed to delete tabledb at: {} ({})", table_name, e))?;
trace!("TableStore::deleted {}", table_name);
Ok(out)
} else {
unimplemented!();
}
}
}

View File

@ -28,8 +28,9 @@ mod network_manager;
mod receipt_manager;
mod routing_table;
mod rpc_processor;
mod storage_manager;
mod table_store;
mod veilid_api;
#[macro_use]
mod veilid_config;
mod veilid_layer_filter;
@ -40,6 +41,14 @@ pub use self::veilid_config::*;
pub use self::veilid_layer_filter::*;
pub use veilid_tools as tools;
use enumset::*;
use rkyv::{
bytecheck, bytecheck::CheckBytes, de::deserializers::SharedDeserializeMap, with::Skip,
Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize,
};
type RkyvDefaultValidator<'t> = rkyv::validation::validators::DefaultValidator<'t>;
use serde::*;
pub mod veilid_capnp {
include!(concat!(env!("OUT_DIR"), "/proto/veilid_capnp.rs"));
}

View File

@ -187,7 +187,7 @@ impl ConnectionTable {
pub fn get_last_connection_by_remote(&self, remote: PeerAddress) -> Option<ConnectionHandle> {
let mut inner = self.inner.lock();
let id = inner.ids_by_remote.get(&remote).map(|v| v[(v.len() - 1)])?;
let id = inner.ids_by_remote.get(&remote).map(|v| v[v.len() - 1])?;
let protocol_index = Self::protocol_to_index(remote.protocol_type());
let out = inner.conn_by_id[protocol_index].get(&id).unwrap();
Some(out.get_handle())

View File

@ -11,6 +11,7 @@ mod connection_manager;
mod connection_table;
mod network_connection;
mod tasks;
mod types;
pub mod tests;
@ -18,6 +19,7 @@ pub mod tests;
pub use connection_manager::*;
pub use network_connection::*;
pub use types::*;
////////////////////////////////////////////////////////////////////////////////////////
use connection_handle::*;
@ -31,6 +33,7 @@ use native::*;
use receipt_manager::*;
use routing_table::*;
use rpc_processor::*;
use storage_manager::*;
#[cfg(target_arch = "wasm32")]
use wasm::*;
@ -144,6 +147,7 @@ struct NetworkManagerInner {
struct NetworkManagerUnlockedInner {
// Handles
config: VeilidConfig,
storage_manager: StorageManager,
protected_store: ProtectedStore,
table_store: TableStore,
block_store: BlockStore,
@ -174,6 +178,7 @@ impl NetworkManager {
}
fn new_unlocked_inner(
config: VeilidConfig,
storage_manager: StorageManager,
protected_store: ProtectedStore,
table_store: TableStore,
block_store: BlockStore,
@ -181,6 +186,7 @@ impl NetworkManager {
) -> NetworkManagerUnlockedInner {
NetworkManagerUnlockedInner {
config,
storage_manager,
protected_store,
table_store,
block_store,
@ -195,6 +201,7 @@ impl NetworkManager {
pub fn new(
config: VeilidConfig,
storage_manager: StorageManager,
protected_store: ProtectedStore,
table_store: TableStore,
block_store: BlockStore,
@ -204,6 +211,7 @@ impl NetworkManager {
inner: Arc::new(Mutex::new(Self::new_inner())),
unlocked_inner: Arc::new(Self::new_unlocked_inner(
config,
storage_manager,
protected_store,
table_store,
block_store,
@ -211,7 +219,7 @@ impl NetworkManager {
)),
};
this.start_tasks();
this.setup_tasks();
this
}
@ -224,6 +232,9 @@ impl NetworkManager {
{
f(&*self.unlocked_inner.config.get())
}
pub fn storage_manager(&self) -> StorageManager {
self.unlocked_inner.storage_manager.clone()
}
pub fn protected_store(&self) -> ProtectedStore {
self.unlocked_inner.protected_store.clone()
}
@ -368,7 +379,7 @@ impl NetworkManager {
debug!("starting network manager shutdown");
// Cancel all tasks
self.stop_tasks().await;
self.cancel_tasks().await;
// Shutdown network components if they started up
debug!("shutting down network components");
@ -461,7 +472,7 @@ impl NetworkManager {
will_validate_dial_info: false,
};
};
let own_node_info = own_peer_info.signed_node_info.node_info();
let own_node_info = own_peer_info.signed_node_info().node_info();
let will_route = own_node_info.can_inbound_relay(); // xxx: eventually this may have more criteria added
let will_tunnel = own_node_info.can_inbound_relay(); // xxx: we may want to restrict by battery life and network bandwidth at some point
@ -488,7 +499,7 @@ impl NetworkManager {
};
};
let own_node_info = own_peer_info.signed_node_info.node_info();
let own_node_info = own_peer_info.signed_node_info().node_info();
let will_relay = own_node_info.can_inbound_relay();
let will_validate_dial_info = own_node_info.can_validate_dial_info();
@ -1389,7 +1400,7 @@ impl NetworkManager {
let some_relay_nr = if self.check_client_whitelist(sender_id) {
// Full relay allowed, do a full resolve_node
match rpc.resolve_node(recipient_id.value).await {
match rpc.resolve_node(recipient_id, SafetySelection::Unsafe(Sequencing::default())).await {
Ok(v) => v,
Err(e) => {
log_net!(debug "failed to resolve recipient node for relay, dropping outbound relayed packet: {}" ,e);
@ -1551,8 +1562,8 @@ impl NetworkManager {
if let Some(nr) = routing_table.lookup_node_ref(k) {
let peer_stats = nr.peer_stats();
let peer = PeerTableData {
node_ids: nr.node_ids().iter().map(|x| x.to_string()).collect(),
peer_address: v.last_connection.remote(),
node_ids: nr.node_ids().iter().copied().collect(),
peer_address: v.last_connection.remote().to_string(),
peer_stats,
};
out.push(peer);

View File

@ -645,7 +645,7 @@ impl Network {
log_net!(debug "enable address {:?} as ipv4", addr);
inner.enable_ipv4 = true;
} else if addr.is_ipv6() {
let address = crate::Address::from_ip_addr(addr);
let address = Address::from_ip_addr(addr);
if address.is_global() {
log_net!(debug "enable address {:?} as ipv6 global", address);
inner.enable_ipv6_global = true;

View File

@ -4,7 +4,7 @@ pub mod rolling_transfers;
use super::*;
impl NetworkManager {
pub(crate) fn start_tasks(&self) {
pub(crate) fn setup_tasks(&self) {
// Set rolling transfers tick task
{
let this = self.clone();
@ -67,7 +67,7 @@ impl NetworkManager {
Ok(())
}
pub(crate) async fn stop_tasks(&self) {
pub(crate) async fn cancel_tasks(&self) {
debug!("stopping rolling transfers task");
if let Err(e) = self.unlocked_inner.rolling_transfers_task.stop().await {
warn!("rolling_transfers_task not stopped: {}", e);

View File

@ -1,2 +1,4 @@
pub mod test_connection_table;
pub mod test_signed_node_info;
use super::*;

View File

@ -0,0 +1,145 @@
use super::*;
use crate::tests::common::test_veilid_config::*;
pub async fn test_signed_node_info() {
info!("--- test_signed_node_info ---");
let (update_callback, config_callback) = setup_veilid_core();
let api = api_startup(update_callback, config_callback)
.await
.expect("startup failed");
let crypto = api.crypto().unwrap();
for ck in VALID_CRYPTO_KINDS {
let vcrypto = crypto.get(ck).unwrap();
// Test direct
let node_info = NodeInfo::new(
NetworkClass::InboundCapable,
ProtocolTypeSet::all(),
AddressTypeSet::all(),
VALID_ENVELOPE_VERSIONS.to_vec(),
VALID_CRYPTO_KINDS.to_vec(),
vec![DialInfoDetail {
class: DialInfoClass::Mapped,
dial_info: DialInfo::udp(SocketAddress::default()),
}],
);
// Test correct validation
let keypair = vcrypto.generate_keypair();
let sni = SignedDirectNodeInfo::make_signatures(
crypto.clone(),
vec![TypedKeyPair::new(ck, keypair)],
node_info.clone(),
)
.unwrap();
let tks: TypedKeySet = TypedKey::new(ck, keypair.key).into();
let oldtkslen = tks.len();
let sdni = SignedDirectNodeInfo::new(
node_info.clone(),
sni.timestamp(),
sni.signatures().to_vec(),
);
let tks_validated = sdni.validate(&tks, crypto.clone()).unwrap();
assert_eq!(tks_validated.len(), oldtkslen);
assert_eq!(tks_validated.len(), sni.signatures().len());
// Test incorrect validation
let keypair1 = vcrypto.generate_keypair();
let tks1: TypedKeySet = TypedKey::new(ck, keypair1.key).into();
let sdni = SignedDirectNodeInfo::new(
node_info.clone(),
sni.timestamp(),
sni.signatures().to_vec(),
);
sdni.validate(&tks1, crypto.clone()).unwrap_err();
// Test unsupported cryptosystem validation
let fake_crypto_kind: CryptoKind = FourCC::from([0, 1, 2, 3]);
let mut tksfake: TypedKeySet = TypedKey::new(fake_crypto_kind, PublicKey::default()).into();
let mut sigsfake = sni.signatures().to_vec();
sigsfake.push(TypedSignature::new(fake_crypto_kind, Signature::default()));
tksfake.add(TypedKey::new(ck, keypair.key));
let sdnifake =
SignedDirectNodeInfo::new(node_info.clone(), sni.timestamp(), sigsfake.clone());
let tksfake_validated = sdnifake.validate(&tksfake, crypto.clone()).unwrap();
assert_eq!(tksfake_validated.len(), 1);
assert_eq!(sdnifake.signatures().len(), sigsfake.len());
// Test relayed
let node_info2 = NodeInfo::new(
NetworkClass::OutboundOnly,
ProtocolTypeSet::all(),
AddressTypeSet::all(),
VALID_ENVELOPE_VERSIONS.to_vec(),
VALID_CRYPTO_KINDS.to_vec(),
vec![DialInfoDetail {
class: DialInfoClass::Blocked,
dial_info: DialInfo::udp(SocketAddress::default()),
}],
);
// Test correct validation
let keypair2 = vcrypto.generate_keypair();
let tks2: TypedKeySet = TypedKey::new(ck, keypair2.key).into();
let oldtks2len = tks2.len();
let sni2 = SignedRelayedNodeInfo::make_signatures(
crypto.clone(),
vec![TypedKeyPair::new(ck, keypair2)],
node_info2.clone(),
tks.clone(),
sni.clone(),
)
.unwrap();
let srni = SignedRelayedNodeInfo::new(
node_info2.clone(),
tks.clone(),
sni.clone(),
sni2.timestamp(),
sni2.signatures().to_vec(),
);
let tks2_validated = srni.validate(&tks2, crypto.clone()).unwrap();
assert_eq!(tks2_validated.len(), oldtks2len);
assert_eq!(tks2_validated.len(), sni2.signatures().len());
// Test incorrect validation
let keypair3 = vcrypto.generate_keypair();
let tks3: TypedKeySet = TypedKey::new(ck, keypair3.key).into();
let srni = SignedRelayedNodeInfo::new(
node_info2.clone(),
tks.clone(),
sni.clone(),
sni2.timestamp(),
sni2.signatures().to_vec(),
);
srni.validate(&tks3, crypto.clone()).unwrap_err();
// Test unsupported cryptosystem validation
let fake_crypto_kind: CryptoKind = FourCC::from([0, 1, 2, 3]);
let mut tksfake3: TypedKeySet =
TypedKey::new(fake_crypto_kind, PublicKey::default()).into();
let mut sigsfake3 = sni2.signatures().to_vec();
sigsfake3.push(TypedSignature::new(fake_crypto_kind, Signature::default()));
tksfake3.add(TypedKey::new(ck, keypair2.key));
let srnifake = SignedRelayedNodeInfo::new(
node_info2.clone(),
tks.clone(),
sni.clone(),
sni2.timestamp(),
sigsfake3.clone(),
);
let tksfake3_validated = srnifake.validate(&tksfake3, crypto.clone()).unwrap();
assert_eq!(tksfake3_validated.len(), 1);
assert_eq!(srnifake.signatures().len(), sigsfake3.len());
}
api.shutdown().await;
}
pub async fn test_all() {
test_signed_node_info().await;
}

View File

@ -0,0 +1,130 @@
use super::*;
#[derive(
Copy,
Clone,
Debug,
PartialEq,
PartialOrd,
Ord,
Eq,
Hash,
Serialize,
Deserialize,
RkyvArchive,
RkyvSerialize,
RkyvDeserialize,
)]
#[archive_attr(repr(u8), derive(CheckBytes))]
pub enum Address {
IPV4(Ipv4Addr),
IPV6(Ipv6Addr),
}
impl Default for Address {
fn default() -> Self {
Address::IPV4(Ipv4Addr::new(0, 0, 0, 0))
}
}
impl Address {
pub fn from_socket_addr(sa: SocketAddr) -> Address {
match sa {
SocketAddr::V4(v4) => Address::IPV4(*v4.ip()),
SocketAddr::V6(v6) => Address::IPV6(*v6.ip()),
}
}
pub fn from_ip_addr(addr: IpAddr) -> Address {
match addr {
IpAddr::V4(v4) => Address::IPV4(v4),
IpAddr::V6(v6) => Address::IPV6(v6),
}
}
pub fn address_type(&self) -> AddressType {
match self {
Address::IPV4(_) => AddressType::IPV4,
Address::IPV6(_) => AddressType::IPV6,
}
}
pub fn address_string(&self) -> String {
match self {
Address::IPV4(v4) => v4.to_string(),
Address::IPV6(v6) => v6.to_string(),
}
}
pub fn address_string_with_port(&self, port: u16) -> String {
match self {
Address::IPV4(v4) => format!("{}:{}", v4, port),
Address::IPV6(v6) => format!("[{}]:{}", v6, port),
}
}
pub fn is_unspecified(&self) -> bool {
match self {
Address::IPV4(v4) => ipv4addr_is_unspecified(v4),
Address::IPV6(v6) => ipv6addr_is_unspecified(v6),
}
}
pub fn is_global(&self) -> bool {
match self {
Address::IPV4(v4) => ipv4addr_is_global(v4) && !ipv4addr_is_multicast(v4),
Address::IPV6(v6) => ipv6addr_is_unicast_global(v6),
}
}
pub fn is_local(&self) -> bool {
match self {
Address::IPV4(v4) => {
ipv4addr_is_private(v4)
|| ipv4addr_is_link_local(v4)
|| ipv4addr_is_ietf_protocol_assignment(v4)
}
Address::IPV6(v6) => {
ipv6addr_is_unicast_site_local(v6)
|| ipv6addr_is_unicast_link_local(v6)
|| ipv6addr_is_unique_local(v6)
}
}
}
pub fn to_ip_addr(&self) -> IpAddr {
match self {
Self::IPV4(a) => IpAddr::V4(*a),
Self::IPV6(a) => IpAddr::V6(*a),
}
}
pub fn to_socket_addr(&self, port: u16) -> SocketAddr {
SocketAddr::new(self.to_ip_addr(), port)
}
pub fn to_canonical(&self) -> Address {
match self {
Address::IPV4(v4) => Address::IPV4(*v4),
Address::IPV6(v6) => match v6.to_ipv4() {
Some(v4) => Address::IPV4(v4),
None => Address::IPV6(*v6),
},
}
}
}
impl fmt::Display for Address {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Address::IPV4(v4) => write!(f, "{}", v4),
Address::IPV6(v6) => write!(f, "{}", v6),
}
}
}
impl FromStr for Address {
type Err = VeilidAPIError;
fn from_str(host: &str) -> VeilidAPIResult<Address> {
if let Ok(addr) = Ipv4Addr::from_str(host) {
Ok(Address::IPV4(addr))
} else if let Ok(addr) = Ipv6Addr::from_str(host) {
Ok(Address::IPV6(addr))
} else {
Err(VeilidAPIError::parse_error(
"Address::from_str failed",
host,
))
}
}
}

View File

@ -0,0 +1,22 @@
use super::*;
#[allow(clippy::derive_hash_xor_eq)]
#[derive(
Debug,
PartialOrd,
Ord,
Hash,
Serialize,
Deserialize,
RkyvArchive,
RkyvSerialize,
RkyvDeserialize,
EnumSetType,
)]
#[enumset(repr = "u8")]
#[archive_attr(repr(u8), derive(CheckBytes))]
pub enum AddressType {
IPV4,
IPV6,
}
pub type AddressTypeSet = EnumSet<AddressType>;

View File

@ -0,0 +1,80 @@
use super::*;
/// Represents the 5-tuple of an established connection
/// Not used to specify connections to create, that is reserved for DialInfo
///
/// ConnectionDescriptors should never be from unspecified local addresses for connection oriented protocols
/// If the medium does not allow local addresses, None should have been used or 'new_no_local'
/// If we are specifying only a port, then the socket's 'local_address()' should have been used, since an
/// established connection is always from a real address to another real address.
#[derive(
Copy,
Clone,
Debug,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Serialize,
Deserialize,
RkyvArchive,
RkyvSerialize,
RkyvDeserialize,
)]
#[archive_attr(repr(C), derive(CheckBytes))]
pub struct ConnectionDescriptor {
remote: PeerAddress,
local: Option<SocketAddress>,
}
impl ConnectionDescriptor {
pub fn new(remote: PeerAddress, local: SocketAddress) -> Self {
assert!(
!remote.protocol_type().is_connection_oriented() || !local.address().is_unspecified()
);
Self {
remote,
local: Some(local),
}
}
pub fn new_no_local(remote: PeerAddress) -> Self {
Self {
remote,
local: None,
}
}
pub fn remote(&self) -> PeerAddress {
self.remote
}
pub fn remote_address(&self) -> &SocketAddress {
self.remote.socket_address()
}
pub fn local(&self) -> Option<SocketAddress> {
self.local
}
pub fn protocol_type(&self) -> ProtocolType {
self.remote.protocol_type()
}
pub fn address_type(&self) -> AddressType {
self.remote.address_type()
}
pub fn make_dial_info_filter(&self) -> DialInfoFilter {
DialInfoFilter::all()
.with_protocol_type(self.protocol_type())
.with_address_type(self.address_type())
}
}
impl MatchesDialInfoFilter for ConnectionDescriptor {
fn matches_filter(&self, filter: &DialInfoFilter) -> bool {
if !filter.protocol_type_set.contains(self.protocol_type()) {
return false;
}
if !filter.address_type_set.contains(self.address_type()) {
return false;
}
true
}
}

View File

@ -0,0 +1,522 @@
mod tcp;
mod udp;
mod ws;
mod wss;
use super::*;
pub use tcp::*;
pub use udp::*;
pub use ws::*;
pub use wss::*;
// Keep member order appropriate for sorting < preference
// Must match ProtocolType order
#[derive(
Clone,
Debug,
PartialEq,
PartialOrd,
Ord,
Eq,
Hash,
Serialize,
Deserialize,
RkyvArchive,
RkyvSerialize,
RkyvDeserialize,
)]
#[archive_attr(repr(u8), derive(CheckBytes))]
#[serde(tag = "kind")]
pub enum DialInfo {
UDP(DialInfoUDP),
TCP(DialInfoTCP),
WS(DialInfoWS),
WSS(DialInfoWSS),
}
impl Default for DialInfo {
fn default() -> Self {
DialInfo::UDP(DialInfoUDP::default())
}
}
impl fmt::Display for DialInfo {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
match self {
DialInfo::UDP(di) => write!(f, "udp|{}", di.socket_address),
DialInfo::TCP(di) => write!(f, "tcp|{}", di.socket_address),
DialInfo::WS(di) => {
let url = format!("ws://{}", di.request);
let split_url = SplitUrl::from_str(&url).unwrap();
match split_url.host {
SplitUrlHost::Hostname(_) => {
write!(f, "ws|{}|{}", di.socket_address.to_ip_addr(), di.request)
}
SplitUrlHost::IpAddr(a) => {
if di.socket_address.to_ip_addr() == a {
write!(f, "ws|{}", di.request)
} else {
panic!("resolved address does not match url: {}", di.request);
}
}
}
}
DialInfo::WSS(di) => {
let url = format!("wss://{}", di.request);
let split_url = SplitUrl::from_str(&url).unwrap();
match split_url.host {
SplitUrlHost::Hostname(_) => {
write!(f, "wss|{}|{}", di.socket_address.to_ip_addr(), di.request)
}
SplitUrlHost::IpAddr(_) => {
panic!(
"secure websockets can not use ip address in request: {}",
di.request
);
}
}
}
}
}
}
impl FromStr for DialInfo {
type Err = VeilidAPIError;
fn from_str(s: &str) -> VeilidAPIResult<DialInfo> {
let (proto, rest) = s.split_once('|').ok_or_else(|| {
VeilidAPIError::parse_error("DialInfo::from_str missing protocol '|' separator", s)
})?;
match proto {
"udp" => {
let socket_address = SocketAddress::from_str(rest)?;
Ok(DialInfo::udp(socket_address))
}
"tcp" => {
let socket_address = SocketAddress::from_str(rest)?;
Ok(DialInfo::tcp(socket_address))
}
"ws" => {
let url = format!("ws://{}", rest);
let split_url = SplitUrl::from_str(&url).map_err(|e| {
VeilidAPIError::parse_error(format!("unable to split WS url: {}", e), &url)
})?;
if split_url.scheme != "ws" || !url.starts_with("ws://") {
apibail_parse_error!("incorrect scheme for WS dialinfo", url);
}
let url_port = split_url.port.unwrap_or(80u16);
match rest.split_once('|') {
Some((sa, rest)) => {
let address = Address::from_str(sa)?;
DialInfo::try_ws(
SocketAddress::new(address, url_port),
format!("ws://{}", rest),
)
}
None => {
let address = Address::from_str(&split_url.host.to_string())?;
DialInfo::try_ws(
SocketAddress::new(address, url_port),
format!("ws://{}", rest),
)
}
}
}
"wss" => {
let url = format!("wss://{}", rest);
let split_url = SplitUrl::from_str(&url).map_err(|e| {
VeilidAPIError::parse_error(format!("unable to split WSS url: {}", e), &url)
})?;
if split_url.scheme != "wss" || !url.starts_with("wss://") {
apibail_parse_error!("incorrect scheme for WSS dialinfo", url);
}
let url_port = split_url.port.unwrap_or(443u16);
let (a, rest) = rest.split_once('|').ok_or_else(|| {
VeilidAPIError::parse_error(
"DialInfo::from_str missing socket address '|' separator",
s,
)
})?;
let address = Address::from_str(a)?;
DialInfo::try_wss(
SocketAddress::new(address, url_port),
format!("wss://{}", rest),
)
}
_ => Err(VeilidAPIError::parse_error(
"DialInfo::from_str has invalid scheme",
s,
)),
}
}
}
impl DialInfo {
pub fn udp_from_socketaddr(socket_addr: SocketAddr) -> Self {
Self::UDP(DialInfoUDP {
socket_address: SocketAddress::from_socket_addr(socket_addr).to_canonical(),
})
}
pub fn tcp_from_socketaddr(socket_addr: SocketAddr) -> Self {
Self::TCP(DialInfoTCP {
socket_address: SocketAddress::from_socket_addr(socket_addr).to_canonical(),
})
}
pub fn udp(socket_address: SocketAddress) -> Self {
Self::UDP(DialInfoUDP {
socket_address: socket_address.to_canonical(),
})
}
pub fn tcp(socket_address: SocketAddress) -> Self {
Self::TCP(DialInfoTCP {
socket_address: socket_address.to_canonical(),
})
}
pub fn try_ws(socket_address: SocketAddress, url: String) -> VeilidAPIResult<Self> {
let split_url = SplitUrl::from_str(&url).map_err(|e| {
VeilidAPIError::parse_error(format!("unable to split WS url: {}", e), &url)
})?;
if split_url.scheme != "ws" || !url.starts_with("ws://") {
apibail_parse_error!("incorrect scheme for WS dialinfo", url);
}
let url_port = split_url.port.unwrap_or(80u16);
if url_port != socket_address.port() {
apibail_parse_error!("socket address port doesn't match url port", url);
}
if let SplitUrlHost::IpAddr(a) = split_url.host {
if socket_address.to_ip_addr() != a {
apibail_parse_error!(
format!("request address does not match socket address: {}", a),
socket_address
);
}
}
Ok(Self::WS(DialInfoWS {
socket_address: socket_address.to_canonical(),
request: url[5..].to_string(),
}))
}
pub fn try_wss(socket_address: SocketAddress, url: String) -> VeilidAPIResult<Self> {
let split_url = SplitUrl::from_str(&url).map_err(|e| {
VeilidAPIError::parse_error(format!("unable to split WSS url: {}", e), &url)
})?;
if split_url.scheme != "wss" || !url.starts_with("wss://") {
apibail_parse_error!("incorrect scheme for WSS dialinfo", url);
}
let url_port = split_url.port.unwrap_or(443u16);
if url_port != socket_address.port() {
apibail_parse_error!("socket address port doesn't match url port", url);
}
if !matches!(split_url.host, SplitUrlHost::Hostname(_)) {
apibail_parse_error!(
"WSS url can not use address format, only hostname format",
url
);
}
Ok(Self::WSS(DialInfoWSS {
socket_address: socket_address.to_canonical(),
request: url[6..].to_string(),
}))
}
pub fn protocol_type(&self) -> ProtocolType {
match self {
Self::UDP(_) => ProtocolType::UDP,
Self::TCP(_) => ProtocolType::TCP,
Self::WS(_) => ProtocolType::WS,
Self::WSS(_) => ProtocolType::WSS,
}
}
pub fn address_type(&self) -> AddressType {
self.socket_address().address_type()
}
pub fn address(&self) -> Address {
match self {
Self::UDP(di) => di.socket_address.address(),
Self::TCP(di) => di.socket_address.address(),
Self::WS(di) => di.socket_address.address(),
Self::WSS(di) => di.socket_address.address(),
}
}
pub fn set_address(&mut self, address: Address) {
match self {
Self::UDP(di) => di.socket_address.set_address(address),
Self::TCP(di) => di.socket_address.set_address(address),
Self::WS(di) => di.socket_address.set_address(address),
Self::WSS(di) => di.socket_address.set_address(address),
}
}
pub fn socket_address(&self) -> SocketAddress {
match self {
Self::UDP(di) => di.socket_address,
Self::TCP(di) => di.socket_address,
Self::WS(di) => di.socket_address,
Self::WSS(di) => di.socket_address,
}
}
pub fn to_ip_addr(&self) -> IpAddr {
match self {
Self::UDP(di) => di.socket_address.to_ip_addr(),
Self::TCP(di) => di.socket_address.to_ip_addr(),
Self::WS(di) => di.socket_address.to_ip_addr(),
Self::WSS(di) => di.socket_address.to_ip_addr(),
}
}
pub fn port(&self) -> u16 {
match self {
Self::UDP(di) => di.socket_address.port(),
Self::TCP(di) => di.socket_address.port(),
Self::WS(di) => di.socket_address.port(),
Self::WSS(di) => di.socket_address.port(),
}
}
pub fn set_port(&mut self, port: u16) {
match self {
Self::UDP(di) => di.socket_address.set_port(port),
Self::TCP(di) => di.socket_address.set_port(port),
Self::WS(di) => di.socket_address.set_port(port),
Self::WSS(di) => di.socket_address.set_port(port),
}
}
pub fn to_socket_addr(&self) -> SocketAddr {
match self {
Self::UDP(di) => di.socket_address.to_socket_addr(),
Self::TCP(di) => di.socket_address.to_socket_addr(),
Self::WS(di) => di.socket_address.to_socket_addr(),
Self::WSS(di) => di.socket_address.to_socket_addr(),
}
}
pub fn to_peer_address(&self) -> PeerAddress {
match self {
Self::UDP(di) => PeerAddress::new(di.socket_address, ProtocolType::UDP),
Self::TCP(di) => PeerAddress::new(di.socket_address, ProtocolType::TCP),
Self::WS(di) => PeerAddress::new(di.socket_address, ProtocolType::WS),
Self::WSS(di) => PeerAddress::new(di.socket_address, ProtocolType::WSS),
}
}
pub fn request(&self) -> Option<String> {
match self {
Self::UDP(_) => None,
Self::TCP(_) => None,
Self::WS(di) => Some(format!("ws://{}", di.request)),
Self::WSS(di) => Some(format!("wss://{}", di.request)),
}
}
pub fn is_valid(&self) -> bool {
let socket_address = self.socket_address();
let address = socket_address.address();
let port = socket_address.port();
(address.is_global() || address.is_local()) && port > 0
}
pub fn make_filter(&self) -> DialInfoFilter {
DialInfoFilter {
protocol_type_set: ProtocolTypeSet::only(self.protocol_type()),
address_type_set: AddressTypeSet::only(self.address_type()),
}
}
pub fn try_vec_from_short<S: AsRef<str>, H: AsRef<str>>(
short: S,
hostname: H,
) -> VeilidAPIResult<Vec<Self>> {
let short = short.as_ref();
let hostname = hostname.as_ref();
if short.len() < 2 {
apibail_parse_error!("invalid short url length", short);
}
let url = match &short[0..1] {
"U" => {
format!("udp://{}:{}", hostname, &short[1..])
}
"T" => {
format!("tcp://{}:{}", hostname, &short[1..])
}
"W" => {
format!("ws://{}:{}", hostname, &short[1..])
}
"S" => {
format!("wss://{}:{}", hostname, &short[1..])
}
_ => {
apibail_parse_error!("invalid short url type", short);
}
};
Self::try_vec_from_url(url)
}
pub fn try_vec_from_url<S: AsRef<str>>(url: S) -> VeilidAPIResult<Vec<Self>> {
let url = url.as_ref();
let split_url = SplitUrl::from_str(url)
.map_err(|e| VeilidAPIError::parse_error(format!("unable to split url: {}", e), url))?;
let port = match split_url.scheme.as_str() {
"udp" | "tcp" => split_url
.port
.ok_or_else(|| VeilidAPIError::parse_error("Missing port in udp url", url))?,
"ws" => split_url.port.unwrap_or(80u16),
"wss" => split_url.port.unwrap_or(443u16),
_ => {
apibail_parse_error!("Invalid dial info url scheme", split_url.scheme);
}
};
let socket_addrs = {
// Resolve if possible, WASM doesn't support resolution and doesn't need it to connect to the dialinfo
// This will not be used on signed dialinfo, only for bootstrapping, so we don't need to worry about
// the '0.0.0.0' address being propagated across the routing table
cfg_if::cfg_if! {
if #[cfg(target_arch = "wasm32")] {
vec![SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0,0,0,0)), port)]
} else {
match split_url.host {
SplitUrlHost::Hostname(_) => split_url
.host_port(port)
.to_socket_addrs()
.map_err(|_| VeilidAPIError::parse_error("couldn't resolve hostname in url", url))?
.collect(),
SplitUrlHost::IpAddr(a) => vec![SocketAddr::new(a, port)],
}
}
}
};
let mut out = Vec::new();
for sa in socket_addrs {
out.push(match split_url.scheme.as_str() {
"udp" => Self::udp_from_socketaddr(sa),
"tcp" => Self::tcp_from_socketaddr(sa),
"ws" => Self::try_ws(
SocketAddress::from_socket_addr(sa).to_canonical(),
url.to_string(),
)?,
"wss" => Self::try_wss(
SocketAddress::from_socket_addr(sa).to_canonical(),
url.to_string(),
)?,
_ => {
unreachable!("Invalid dial info url scheme")
}
});
}
Ok(out)
}
pub async fn to_short(&self) -> (String, String) {
match self {
DialInfo::UDP(di) => (
format!("U{}", di.socket_address.port()),
intf::ptr_lookup(di.socket_address.to_ip_addr())
.await
.unwrap_or_else(|_| di.socket_address.to_string()),
),
DialInfo::TCP(di) => (
format!("T{}", di.socket_address.port()),
intf::ptr_lookup(di.socket_address.to_ip_addr())
.await
.unwrap_or_else(|_| di.socket_address.to_string()),
),
DialInfo::WS(di) => {
let mut split_url = SplitUrl::from_str(&format!("ws://{}", di.request)).unwrap();
if let SplitUrlHost::IpAddr(a) = split_url.host {
if let Ok(host) = intf::ptr_lookup(a).await {
split_url.host = SplitUrlHost::Hostname(host);
}
}
(
format!(
"W{}{}",
split_url.port.unwrap_or(80),
split_url
.path
.map(|p| format!("/{}", p))
.unwrap_or_default()
),
split_url.host.to_string(),
)
}
DialInfo::WSS(di) => {
let mut split_url = SplitUrl::from_str(&format!("wss://{}", di.request)).unwrap();
if let SplitUrlHost::IpAddr(a) = split_url.host {
if let Ok(host) = intf::ptr_lookup(a).await {
split_url.host = SplitUrlHost::Hostname(host);
}
}
(
format!(
"S{}{}",
split_url.port.unwrap_or(443),
split_url
.path
.map(|p| format!("/{}", p))
.unwrap_or_default()
),
split_url.host.to_string(),
)
}
}
}
pub async fn to_url(&self) -> String {
match self {
DialInfo::UDP(di) => intf::ptr_lookup(di.socket_address.to_ip_addr())
.await
.map(|h| format!("udp://{}:{}", h, di.socket_address.port()))
.unwrap_or_else(|_| format!("udp://{}", di.socket_address)),
DialInfo::TCP(di) => intf::ptr_lookup(di.socket_address.to_ip_addr())
.await
.map(|h| format!("tcp://{}:{}", h, di.socket_address.port()))
.unwrap_or_else(|_| format!("tcp://{}", di.socket_address)),
DialInfo::WS(di) => {
let mut split_url = SplitUrl::from_str(&format!("ws://{}", di.request)).unwrap();
if let SplitUrlHost::IpAddr(a) = split_url.host {
if let Ok(host) = intf::ptr_lookup(a).await {
split_url.host = SplitUrlHost::Hostname(host);
}
}
split_url.to_string()
}
DialInfo::WSS(di) => {
let mut split_url = SplitUrl::from_str(&format!("wss://{}", di.request)).unwrap();
if let SplitUrlHost::IpAddr(a) = split_url.host {
if let Ok(host) = intf::ptr_lookup(a).await {
split_url.host = SplitUrlHost::Hostname(host);
}
}
split_url.to_string()
}
}
}
pub fn ordered_sequencing_sort(a: &DialInfo, b: &DialInfo) -> core::cmp::Ordering {
let ca = a.protocol_type().sort_order(Sequencing::EnsureOrdered);
let cb = b.protocol_type().sort_order(Sequencing::EnsureOrdered);
if ca < cb {
return core::cmp::Ordering::Less;
}
if ca > cb {
return core::cmp::Ordering::Greater;
}
match (a, b) {
(DialInfo::UDP(a), DialInfo::UDP(b)) => a.cmp(b),
(DialInfo::TCP(a), DialInfo::TCP(b)) => a.cmp(b),
(DialInfo::WS(a), DialInfo::WS(b)) => a.cmp(b),
(DialInfo::WSS(a), DialInfo::WSS(b)) => a.cmp(b),
_ => unreachable!(),
}
}
}
impl MatchesDialInfoFilter for DialInfo {
fn matches_filter(&self, filter: &DialInfoFilter) -> bool {
if !filter.protocol_type_set.contains(self.protocol_type()) {
return false;
}
if !filter.address_type_set.contains(self.address_type()) {
return false;
}
true
}
}

View File

@ -0,0 +1,21 @@
use super::*;
#[derive(
Clone,
Default,
Debug,
PartialEq,
PartialOrd,
Ord,
Eq,
Hash,
Serialize,
Deserialize,
RkyvArchive,
RkyvSerialize,
RkyvDeserialize,
)]
#[archive_attr(repr(C), derive(CheckBytes))]
pub struct DialInfoTCP {
pub socket_address: SocketAddress,
}

View File

@ -0,0 +1,21 @@
use super::*;
#[derive(
Clone,
Default,
Debug,
PartialEq,
PartialOrd,
Ord,
Eq,
Hash,
Serialize,
Deserialize,
RkyvArchive,
RkyvSerialize,
RkyvDeserialize,
)]
#[archive_attr(repr(C), derive(CheckBytes))]
pub struct DialInfoUDP {
pub socket_address: SocketAddress,
}

View File

@ -0,0 +1,22 @@
use super::*;
#[derive(
Clone,
Default,
Debug,
PartialEq,
PartialOrd,
Ord,
Eq,
Hash,
Serialize,
Deserialize,
RkyvArchive,
RkyvSerialize,
RkyvDeserialize,
)]
#[archive_attr(repr(C), derive(CheckBytes))]
pub struct DialInfoWS {
pub socket_address: SocketAddress,
pub request: String,
}

View File

@ -0,0 +1,22 @@
use super::*;
#[derive(
Clone,
Default,
Debug,
PartialEq,
PartialOrd,
Ord,
Eq,
Hash,
Serialize,
Deserialize,
RkyvArchive,
RkyvSerialize,
RkyvDeserialize,
)]
#[archive_attr(repr(C), derive(CheckBytes))]
pub struct DialInfoWSS {
pub socket_address: SocketAddress,
pub request: String,
}

View File

@ -0,0 +1,50 @@
use super::*;
// Keep member order appropriate for sorting < preference
#[derive(
Copy,
Clone,
Debug,
Eq,
PartialEq,
Ord,
PartialOrd,
Hash,
Serialize,
Deserialize,
RkyvArchive,
RkyvSerialize,
RkyvDeserialize,
)]
#[archive_attr(repr(u8), derive(CheckBytes))]
pub enum DialInfoClass {
Direct = 0, // D = Directly reachable with public IP and no firewall, with statically configured port
Mapped = 1, // M = Directly reachable with via portmap behind any NAT or firewalled with dynamically negotiated port
FullConeNAT = 2, // F = Directly reachable device without portmap behind full-cone NAT
Blocked = 3, // B = Inbound blocked at firewall but may hole punch with public address
AddressRestrictedNAT = 4, // A = Device without portmap behind address-only restricted NAT
PortRestrictedNAT = 5, // P = Device without portmap behind address-and-port restricted NAT
}
impl DialInfoClass {
// Is a signal required to do an inbound hole-punch?
pub fn requires_signal(&self) -> bool {
matches!(
self,
Self::Blocked | Self::AddressRestrictedNAT | Self::PortRestrictedNAT
)
}
// Does a relay node need to be allocated for this dial info?
// For full cone NAT, the relay itself may not be used but the keepalive sent to it
// is required to keep the NAT mapping valid in the router state table
pub fn requires_relay(&self) -> bool {
matches!(
self,
Self::FullConeNAT
| Self::Blocked
| Self::AddressRestrictedNAT
| Self::PortRestrictedNAT
)
}
}

View File

@ -0,0 +1,86 @@
use super::*;
#[derive(
Copy,
Clone,
PartialEq,
Eq,
PartialOrd,
Ord,
Serialize,
Deserialize,
RkyvArchive,
RkyvSerialize,
RkyvDeserialize,
)]
#[archive_attr(repr(C), derive(CheckBytes))]
pub struct DialInfoFilter {
#[with(RkyvEnumSet)]
pub protocol_type_set: ProtocolTypeSet,
#[with(RkyvEnumSet)]
pub address_type_set: AddressTypeSet,
}
impl Default for DialInfoFilter {
fn default() -> Self {
Self {
protocol_type_set: ProtocolTypeSet::all(),
address_type_set: AddressTypeSet::all(),
}
}
}
impl DialInfoFilter {
pub fn all() -> Self {
Self {
protocol_type_set: ProtocolTypeSet::all(),
address_type_set: AddressTypeSet::all(),
}
}
pub fn with_protocol_type(mut self, protocol_type: ProtocolType) -> Self {
self.protocol_type_set = ProtocolTypeSet::only(protocol_type);
self
}
pub fn with_protocol_type_set(mut self, protocol_set: ProtocolTypeSet) -> Self {
self.protocol_type_set = protocol_set;
self
}
pub fn with_address_type(mut self, address_type: AddressType) -> Self {
self.address_type_set = AddressTypeSet::only(address_type);
self
}
pub fn with_address_type_set(mut self, address_set: AddressTypeSet) -> Self {
self.address_type_set = address_set;
self
}
pub fn filtered(mut self, other_dif: &DialInfoFilter) -> Self {
self.protocol_type_set &= other_dif.protocol_type_set;
self.address_type_set &= other_dif.address_type_set;
self
}
pub fn is_dead(&self) -> bool {
self.protocol_type_set.is_empty() || self.address_type_set.is_empty()
}
}
impl fmt::Debug for DialInfoFilter {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
let mut out = String::new();
if self.protocol_type_set != ProtocolTypeSet::all() {
out += &format!("+{:?}", self.protocol_type_set);
} else {
out += "*";
}
if self.address_type_set != AddressTypeSet::all() {
out += &format!("+{:?}", self.address_type_set);
} else {
out += "*";
}
write!(f, "[{}]", out)
}
}
pub trait MatchesDialInfoFilter {
fn matches_filter(&self, filter: &DialInfoFilter) -> bool;
}

View File

@ -0,0 +1,31 @@
use super::*;
// Keep member order appropriate for sorting < preference
// Must match DialInfo order
#[allow(clippy::derive_hash_xor_eq)]
#[derive(
Debug,
PartialOrd,
Ord,
Hash,
EnumSetType,
Serialize,
Deserialize,
RkyvArchive,
RkyvSerialize,
RkyvDeserialize,
)]
#[enumset(repr = "u8")]
#[archive_attr(repr(u8), derive(CheckBytes))]
pub enum LowLevelProtocolType {
UDP,
TCP,
}
impl LowLevelProtocolType {
pub fn is_connection_oriented(&self) -> bool {
matches!(self, LowLevelProtocolType::TCP)
}
}
// pub type LowLevelProtocolTypeSet = EnumSet<LowLevelProtocolType>;

View File

@ -0,0 +1,27 @@
mod address;
mod address_type;
mod connection_descriptor;
mod dial_info;
mod dial_info_class;
mod dial_info_filter;
mod low_level_protocol_type;
mod network_class;
mod peer_address;
mod protocol_type;
mod signal_info;
mod socket_address;
use super::*;
pub use address::*;
pub use address_type::*;
pub use connection_descriptor::*;
pub use dial_info::*;
pub use dial_info_class::*;
pub use dial_info_filter::*;
pub use low_level_protocol_type::*;
pub use network_class::*;
pub use peer_address::*;
pub use protocol_type::*;
pub use signal_info::*;
pub use socket_address::*;

View File

@ -0,0 +1,37 @@
use super::*;
#[derive(
Copy,
Clone,
Debug,
Eq,
PartialEq,
Ord,
PartialOrd,
Hash,
Serialize,
Deserialize,
RkyvArchive,
RkyvSerialize,
RkyvDeserialize,
)]
#[archive_attr(repr(u8), derive(CheckBytes))]
pub enum NetworkClass {
InboundCapable = 0, // I = Inbound capable without relay, may require signal
OutboundOnly = 1, // O = Outbound only, inbound relay required except with reverse connect signal
WebApp = 2, // W = PWA, outbound relay is required in most cases
Invalid = 3, // X = Invalid network class, we don't know how to reach this node
}
impl Default for NetworkClass {
fn default() -> Self {
Self::Invalid
}
}
impl NetworkClass {
// Should an outbound relay be kept available?
pub fn outbound_wants_relay(&self) -> bool {
matches!(self, Self::WebApp)
}
}

View File

@ -0,0 +1,66 @@
use super::*;
#[derive(
Copy,
Clone,
Debug,
PartialEq,
PartialOrd,
Eq,
Ord,
Hash,
Serialize,
Deserialize,
RkyvArchive,
RkyvSerialize,
RkyvDeserialize,
)]
#[archive_attr(repr(C), derive(CheckBytes))]
pub struct PeerAddress {
protocol_type: ProtocolType,
#[serde(with = "json_as_string")]
socket_address: SocketAddress,
}
impl PeerAddress {
pub fn new(socket_address: SocketAddress, protocol_type: ProtocolType) -> Self {
Self {
socket_address: socket_address.to_canonical(),
protocol_type,
}
}
pub fn socket_address(&self) -> &SocketAddress {
&self.socket_address
}
pub fn protocol_type(&self) -> ProtocolType {
self.protocol_type
}
pub fn to_socket_addr(&self) -> SocketAddr {
self.socket_address.to_socket_addr()
}
pub fn address_type(&self) -> AddressType {
self.socket_address.address_type()
}
}
impl fmt::Display for PeerAddress {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}:{}", self.protocol_type, self.socket_address)
}
}
impl FromStr for PeerAddress {
type Err = VeilidAPIError;
fn from_str(s: &str) -> VeilidAPIResult<PeerAddress> {
let Some((first, second)) = s.split_once(':') else {
return Err(VeilidAPIError::parse_error("PeerAddress is missing a colon: {}", s));
};
let protocol_type = ProtocolType::from_str(first)?;
let socket_address = SocketAddress::from_str(second)?;
Ok(PeerAddress::new(socket_address, protocol_type))
}
}

View File

@ -0,0 +1,104 @@
use super::*;
// Keep member order appropriate for sorting < preference
// Must match DialInfo order
#[allow(clippy::derive_hash_xor_eq)]
#[derive(
Debug,
PartialOrd,
Ord,
Hash,
EnumSetType,
Serialize,
Deserialize,
RkyvArchive,
RkyvSerialize,
RkyvDeserialize,
)]
#[enumset(repr = "u8")]
#[archive_attr(repr(u8), derive(CheckBytes))]
pub enum ProtocolType {
UDP,
TCP,
WS,
WSS,
}
impl ProtocolType {
pub fn is_connection_oriented(&self) -> bool {
matches!(
self,
ProtocolType::TCP | ProtocolType::WS | ProtocolType::WSS
)
}
pub fn low_level_protocol_type(&self) -> LowLevelProtocolType {
match self {
ProtocolType::UDP => LowLevelProtocolType::UDP,
ProtocolType::TCP | ProtocolType::WS | ProtocolType::WSS => LowLevelProtocolType::TCP,
}
}
pub fn sort_order(&self, sequencing: Sequencing) -> usize {
match self {
ProtocolType::UDP => {
if sequencing != Sequencing::NoPreference {
3
} else {
0
}
}
ProtocolType::TCP => {
if sequencing != Sequencing::NoPreference {
0
} else {
1
}
}
ProtocolType::WS => {
if sequencing != Sequencing::NoPreference {
1
} else {
2
}
}
ProtocolType::WSS => {
if sequencing != Sequencing::NoPreference {
2
} else {
3
}
}
}
}
pub fn all_ordered_set() -> ProtocolTypeSet {
ProtocolType::TCP | ProtocolType::WS | ProtocolType::WSS
}
}
impl fmt::Display for ProtocolType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ProtocolType::UDP => write!(f, "UDP"),
ProtocolType::TCP => write!(f, "TCP"),
ProtocolType::WS => write!(f, "WS"),
ProtocolType::WSS => write!(f, "WSS"),
}
}
}
impl FromStr for ProtocolType {
type Err = VeilidAPIError;
fn from_str(s: &str) -> VeilidAPIResult<ProtocolType> {
match s.to_ascii_uppercase().as_str() {
"UDP" => Ok(ProtocolType::UDP),
"TCP" => Ok(ProtocolType::TCP),
"WS" => Ok(ProtocolType::WS),
"WSS" => Ok(ProtocolType::WSS),
_ => Err(VeilidAPIError::parse_error(
"ProtocolType::from_str failed",
s,
)),
}
}
}
pub type ProtocolTypeSet = EnumSet<ProtocolType>;

View File

@ -0,0 +1,51 @@
use super::*;
/// Parameter for Signal operation
#[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
#[archive_attr(repr(u8), derive(CheckBytes))]
pub enum SignalInfo {
/// UDP Hole Punch Request
HolePunch {
/// /// Receipt to be returned after the hole punch
receipt: Vec<u8>,
/// Sender's peer info
peer_info: PeerInfo,
},
/// Reverse Connection Request
ReverseConnect {
/// Receipt to be returned by the reverse connection
receipt: Vec<u8>,
/// Sender's peer info
peer_info: PeerInfo,
},
// XXX: WebRTC
}
impl SignalInfo {
pub fn validate(&self, crypto: Crypto) -> Result<(), RPCError> {
match self {
SignalInfo::HolePunch { receipt, peer_info } => {
if receipt.len() < MIN_RECEIPT_SIZE {
return Err(RPCError::protocol("SignalInfo HolePunch receipt too short"));
}
if receipt.len() > MAX_RECEIPT_SIZE {
return Err(RPCError::protocol("SignalInfo HolePunch receipt too long"));
}
peer_info.validate(crypto).map_err(RPCError::protocol)
}
SignalInfo::ReverseConnect { receipt, peer_info } => {
if receipt.len() < MIN_RECEIPT_SIZE {
return Err(RPCError::protocol(
"SignalInfo ReverseConnect receipt too short",
));
}
if receipt.len() > MAX_RECEIPT_SIZE {
return Err(RPCError::protocol(
"SignalInfo ReverseConnect receipt too long",
));
}
peer_info.validate(crypto).map_err(RPCError::protocol)
}
}
}
}

View File

@ -0,0 +1,77 @@
use super::*;
#[derive(
Copy,
Default,
Clone,
Debug,
PartialEq,
PartialOrd,
Ord,
Eq,
Hash,
Serialize,
Deserialize,
RkyvArchive,
RkyvSerialize,
RkyvDeserialize,
)]
#[archive_attr(repr(C), derive(CheckBytes))]
pub struct SocketAddress {
address: Address,
port: u16,
}
impl SocketAddress {
pub fn new(address: Address, port: u16) -> Self {
Self { address, port }
}
pub fn from_socket_addr(sa: SocketAddr) -> SocketAddress {
Self {
address: Address::from_socket_addr(sa),
port: sa.port(),
}
}
pub fn address(&self) -> Address {
self.address
}
pub fn set_address(&mut self, address: Address) {
self.address = address;
}
pub fn address_type(&self) -> AddressType {
self.address.address_type()
}
pub fn port(&self) -> u16 {
self.port
}
pub fn set_port(&mut self, port: u16) {
self.port = port
}
pub fn to_canonical(&self) -> SocketAddress {
SocketAddress {
address: self.address.to_canonical(),
port: self.port,
}
}
pub fn to_ip_addr(&self) -> IpAddr {
self.address.to_ip_addr()
}
pub fn to_socket_addr(&self) -> SocketAddr {
self.address.to_socket_addr(self.port)
}
}
impl fmt::Display for SocketAddress {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "{}", self.to_socket_addr())
}
}
impl FromStr for SocketAddress {
type Err = VeilidAPIError;
fn from_str(s: &str) -> VeilidAPIResult<SocketAddress> {
let sa = SocketAddr::from_str(s)
.map_err(|e| VeilidAPIError::parse_error("Failed to parse SocketAddress", e))?;
Ok(SocketAddress::from_socket_addr(sa))
}
}

View File

@ -1,8 +1,6 @@
use super::*;
use core::sync::atomic::{AtomicU32, Ordering};
use rkyv::{
with::Skip, Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize,
};
/// Reliable pings are done with increased spacing between pings
@ -73,7 +71,9 @@ pub struct BucketEntryLocalNetwork {
#[archive_attr(repr(C), derive(CheckBytes))]
pub struct BucketEntryInner {
/// The node ids matching this bucket entry, with the cryptography versions supported by this node as the 'kind' field
node_ids: TypedKeySet,
validated_node_ids: TypedKeySet,
/// The node ids claimed by the remote node that use cryptography versions we do not support
unsupported_node_ids: TypedKeySet,
/// The set of envelope versions supported by the node inclusive of the requirements of any relay the node may be using
envelope_support: Vec<u8>,
/// If this node has updated it's SignedNodeInfo since our network
@ -122,9 +122,11 @@ impl BucketEntryInner {
self.node_ref_tracks.remove(&track_id);
}
/// Get node ids
/// Get all node ids
pub fn node_ids(&self) -> TypedKeySet {
self.node_ids.clone()
let mut node_ids = self.validated_node_ids.clone();
node_ids.add_all(&self.unsupported_node_ids);
node_ids
}
/// Add a node id for a particular crypto kind.
@ -132,33 +134,40 @@ impl BucketEntryInner {
/// Returns Ok(None) if no previous existing node id was associated with that crypto kind
/// Results Err() if this operation would add more crypto kinds than we support
pub fn add_node_id(&mut self, node_id: TypedKey) -> EyreResult<Option<TypedKey>> {
if let Some(old_node_id) = self.node_ids.get(node_id.kind) {
let total_node_id_count = self.validated_node_ids.len() + self.unsupported_node_ids.len();
let node_ids = if VALID_CRYPTO_KINDS.contains(&node_id.kind) {
&mut self.validated_node_ids
} else {
&mut self.unsupported_node_ids
};
if let Some(old_node_id) = node_ids.get(node_id.kind) {
// If this was already there we do nothing
if old_node_id == node_id {
return Ok(None);
}
// Won't change number of crypto kinds
self.node_ids.add(node_id);
node_ids.add(node_id);
return Ok(Some(old_node_id));
}
// Check to ensure we aren't adding more crypto kinds than we support
if self.node_ids.len() == MAX_CRYPTO_KINDS {
if total_node_id_count == MAX_CRYPTO_KINDS {
bail!("too many crypto kinds for this node");
}
self.node_ids.add(node_id);
node_ids.add(node_id);
Ok(None)
}
pub fn best_node_id(&self) -> TypedKey {
self.node_ids.best().unwrap()
self.validated_node_ids.best().unwrap()
}
/// Get crypto kinds
pub fn crypto_kinds(&self) -> Vec<CryptoKind> {
self.node_ids.kinds()
self.validated_node_ids.kinds()
}
/// Compare sets of crypto kinds
pub fn common_crypto_kinds(&self, other: &[CryptoKind]) -> Vec<CryptoKind> {
common_crypto_kinds(&self.node_ids.kinds(), other)
common_crypto_kinds(&self.validated_node_ids.kinds(), other)
}
@ -270,7 +279,7 @@ impl BucketEntryInner {
}
// Update the envelope version support we have to use
let envelope_support = signed_node_info.node_info().envelope_support.clone();
let envelope_support = signed_node_info.node_info().envelope_support().to_vec();
// Update the signed node info
*opt_current_sni = Some(Box::new(signed_node_info));
@ -333,10 +342,12 @@ impl BucketEntryInner {
RoutingDomain::LocalNetwork => &self.local_network.signed_node_info,
RoutingDomain::PublicInternet => &self.public_internet.signed_node_info,
};
opt_current_sni.as_ref().map(|s| PeerInfo {
node_ids: self.node_ids.clone(),
signed_node_info: *s.clone(),
})
// Peer info includes all node ids, even unvalidated ones
let node_ids = self.node_ids();
opt_current_sni.as_ref().map(|s| PeerInfo::new(
node_ids,
*s.clone(),
))
}
pub fn best_routing_domain(
@ -527,7 +538,7 @@ impl BucketEntryInner {
}
}
pub fn set_our_node_info_ts(&mut self, routing_domain: RoutingDomain, seen_ts: Timestamp) {
pub fn set_seen_our_node_info_ts(&mut self, routing_domain: RoutingDomain, seen_ts: Timestamp) {
match routing_domain {
RoutingDomain::LocalNetwork => {
self.local_network.last_seen_our_node_info_ts = seen_ts;
@ -780,12 +791,14 @@ pub struct BucketEntry {
impl BucketEntry {
pub(super) fn new(first_node_id: TypedKey) -> Self {
let now = get_aligned_timestamp();
let mut node_ids = TypedKeySet::new();
node_ids.add(first_node_id);
// First node id should always be one we support since TypedKeySets are sorted and we must have at least one supported key
assert!(VALID_CRYPTO_KINDS.contains(&first_node_id.kind));
let now = get_aligned_timestamp();
let inner = BucketEntryInner {
node_ids,
validated_node_ids: TypedKeySet::from(first_node_id),
unsupported_node_ids: TypedKeySet::new(),
envelope_support: Vec::new(),
updated_since_last_network_change: false,
last_connections: BTreeMap::new(),

View File

@ -2,28 +2,6 @@ use super::*;
use routing_table::tasks::bootstrap::BOOTSTRAP_TXT_VERSION_0;
impl RoutingTable {
pub(crate) fn debug_info_nodeinfo(&self) -> String {
let mut out = String::new();
let inner = self.inner.read();
out += "Routing Table Info:\n";
out += &format!(" Node Ids: {}\n", self.unlocked_inner.node_ids());
out += &format!(
" Self Latency Stats Accounting: {:#?}\n\n",
inner.self_latency_stats_accounting
);
out += &format!(
" Self Transfer Stats Accounting: {:#?}\n\n",
inner.self_transfer_stats_accounting
);
out += &format!(
" Self Transfer Stats: {:#?}\n\n",
inner.self_transfer_stats
);
out
}
pub(crate) async fn debug_info_txtrecord(&self) -> String {
let mut out = String::new();
@ -71,14 +49,34 @@ impl RoutingTable {
node_ids,
some_hostname.unwrap()
);
for short_url in short_urls {
out += &format!(",{}", short_url);
}
out += &short_urls.join(",");
out += "\n";
}
out
}
pub(crate) fn debug_info_nodeinfo(&self) -> String {
let mut out = String::new();
let inner = self.inner.read();
out += "Routing Table Info:\n";
out += &format!(" Node Ids: {}\n", self.unlocked_inner.node_ids());
out += &format!(
" Self Latency Stats Accounting: {:#?}\n\n",
inner.self_latency_stats_accounting
);
out += &format!(
" Self Transfer Stats Accounting: {:#?}\n\n",
inner.self_transfer_stats_accounting
);
out += &format!(
" Self Transfer Stats: {:#?}\n\n",
inner.self_transfer_stats
);
out
}
pub(crate) fn debug_info_dialinfo(&self) -> String {
let ldis = self.dial_info_details(RoutingDomain::LocalNetwork);
let gdis = self.dial_info_details(RoutingDomain::PublicInternet);
@ -132,13 +130,25 @@ impl RoutingTable {
for e in filtered_entries {
let state = e.1.with(inner, |_rti, e| e.state(cur_ts));
out += &format!(
" {} [{}]\n",
" {} [{}] {}\n",
e.0.encode(),
match state {
BucketEntryState::Reliable => "R",
BucketEntryState::Unreliable => "U",
BucketEntryState::Dead => "D",
}
},
e.1.with(inner, |_rti, e| {
e.peer_stats()
.latency
.as_ref()
.map(|l| {
format!(
"{:.2}ms",
timestamp_to_secs(l.average.as_u64()) * 1000.0
)
})
.unwrap_or_else(|| "???.??ms".to_string())
})
);
}
}

View File

@ -0,0 +1,103 @@
use super::*;
impl RoutingTable {
/// Utility to find all closest nodes to a particular key, including possibly our own node and nodes further away from the key than our own, returning their peer info
pub fn find_all_closest_peers(&self, key: TypedKey) -> NetworkResult<Vec<PeerInfo>> {
let Some(own_peer_info) = self.get_own_peer_info(RoutingDomain::PublicInternet) else {
// Our own node info is not yet available, drop this request.
return NetworkResult::service_unavailable();
};
// find N nodes closest to the target node in our routing table
let filter = Box::new(
move |rti: &RoutingTableInner, opt_entry: Option<Arc<BucketEntry>>| {
// Ensure only things that are valid/signed in the PublicInternet domain are returned
rti.filter_has_valid_signed_node_info(
RoutingDomain::PublicInternet,
true,
opt_entry,
)
},
) as RoutingTableEntryFilter;
let filters = VecDeque::from([filter]);
let node_count = {
let c = self.config.get();
c.network.dht.max_find_node_count as usize
};
let closest_nodes = self.find_closest_nodes(
node_count,
key,
filters,
// transform
|rti, entry| {
rti.transform_to_peer_info(RoutingDomain::PublicInternet, &own_peer_info, entry)
},
);
NetworkResult::value(closest_nodes)
}
/// Utility to find nodes that are closer to a key than our own node, returning their peer info
pub fn find_peers_closer_to_key(&self, key: TypedKey) -> NetworkResult<Vec<PeerInfo>> {
// add node information for the requesting node to our routing table
let crypto_kind = key.kind;
let own_node_id = self.node_id(crypto_kind);
// find N nodes closest to the target node in our routing table
// ensure the nodes returned are only the ones closer to the target node than ourself
let Some(vcrypto) = self.crypto().get(crypto_kind) else {
return NetworkResult::invalid_message("unsupported cryptosystem");
};
let own_distance = vcrypto.distance(&own_node_id.value, &key.value);
let filter = Box::new(
move |rti: &RoutingTableInner, opt_entry: Option<Arc<BucketEntry>>| {
// Exclude our own node
let Some(entry) = opt_entry else {
return false;
};
// Ensure only things that are valid/signed in the PublicInternet domain are returned
if !rti.filter_has_valid_signed_node_info(
RoutingDomain::PublicInternet,
true,
Some(entry.clone()),
) {
return false;
}
// Ensure things further from the key than our own node are not included
let Some(entry_node_id) = entry.with(rti, |_rti, e| e.node_ids().get(crypto_kind)) else {
return false;
};
let entry_distance = vcrypto.distance(&entry_node_id.value, &key.value);
if entry_distance >= own_distance {
return false;
}
true
},
) as RoutingTableEntryFilter;
let filters = VecDeque::from([filter]);
let node_count = {
let c = self.config.get();
c.network.dht.max_find_node_count as usize
};
//
let closest_nodes = self.find_closest_nodes(
node_count,
key,
filters,
// transform
|rti, entry| {
entry.unwrap().with(rti, |_rti, e| {
e.make_peer_info(RoutingDomain::PublicInternet).unwrap()
})
},
);
NetworkResult::value(closest_nodes)
}
}

View File

@ -1,6 +1,7 @@
mod bucket;
mod bucket_entry;
mod debug;
mod find_peers;
mod node_ref;
mod node_ref_filter;
mod privacy;
@ -10,16 +11,21 @@ mod routing_domains;
mod routing_table_inner;
mod stats_accounting;
mod tasks;
mod types;
use crate::*;
pub mod tests;
use super::*;
use crate::crypto::*;
use crate::network_manager::*;
use crate::rpc_processor::*;
use bucket::*;
use hashlink::LruCache;
pub use bucket_entry::*;
pub use debug::*;
use hashlink::LruCache;
pub use find_peers::*;
pub use node_ref::*;
pub use node_ref_filter::*;
pub use privacy::*;
@ -28,6 +34,7 @@ pub use routing_domain_editor::*;
pub use routing_domains::*;
pub use routing_table_inner::*;
pub use stats_accounting::*;
pub use types::*;
//////////////////////////////////////////////////////////////////////////
@ -50,6 +57,8 @@ pub struct LowLevelPortInfo {
}
pub type RoutingTableEntryFilter<'t> =
Box<dyn FnMut(&RoutingTableInner, Option<Arc<BucketEntry>>) -> bool + Send + 't>;
pub type SerializedBuckets = Vec<Vec<u8>>;
pub type SerializedBucketMap = BTreeMap<CryptoKind, SerializedBuckets>;
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct RoutingTableHealth {
@ -208,7 +217,7 @@ impl RoutingTable {
unlocked_inner,
};
this.start_tasks();
this.setup_tasks();
this
}
@ -259,7 +268,7 @@ impl RoutingTable {
debug!("starting routing table terminate");
// Stop tasks
self.stop_tasks().await;
self.cancel_tasks().await;
// Load bucket entries from table db if possible
debug!("saving routing table entries");
@ -285,14 +294,14 @@ impl RoutingTable {
debug!("finished routing table terminate");
}
/// Serialize routing table to table store
async fn save_buckets(&self) -> EyreResult<()> {
/// Serialize the routing table.
fn serialized_buckets(&self) -> EyreResult<(SerializedBucketMap, SerializedBuckets)> {
// Since entries are shared by multiple buckets per cryptokind
// we need to get the list of all unique entries when serializing
let mut all_entries: Vec<Arc<BucketEntry>> = Vec::new();
// Serialize all buckets and get map of entries
let mut serialized_bucket_map: BTreeMap<CryptoKind, Vec<Vec<u8>>> = BTreeMap::new();
let mut serialized_bucket_map: SerializedBucketMap = BTreeMap::new();
{
let mut entry_map: HashMap<*const BucketEntry, u32> = HashMap::new();
let inner = &*self.inner.read();
@ -314,38 +323,55 @@ impl RoutingTable {
all_entry_bytes.push(entry_bytes);
}
Ok((serialized_bucket_map, all_entry_bytes))
}
/// Write the serialized routing table to the table store.
async fn save_buckets(&self) -> EyreResult<()> {
let (serialized_bucket_map, all_entry_bytes) = self.serialized_buckets()?;
let table_store = self.unlocked_inner.network_manager().table_store();
let tdb = table_store.open("routing_table", 1).await?;
let dbx = tdb.transact();
if let Err(e) = dbx.store_rkyv(0, b"serialized_bucket_map", &serialized_bucket_map) {
dbx.rollback();
return Err(e);
return Err(e.into());
}
if let Err(e) = dbx.store_rkyv(0, b"all_entry_bytes", &all_entry_bytes) {
dbx.rollback();
return Err(e);
return Err(e.into());
}
dbx.commit().await?;
Ok(())
}
/// Deserialize routing table from table store
async fn load_buckets(&self) -> EyreResult<()> {
// Deserialize bucket map and all entries from the table store
let tstore = self.unlocked_inner.network_manager().table_store();
let tdb = tstore.open("routing_table", 1).await?;
let Some(serialized_bucket_map): Option<BTreeMap<CryptoKind, Vec<Vec<u8>>>> = tdb.load_rkyv(0, b"serialized_bucket_map")? else {
let Some(serialized_bucket_map): Option<SerializedBucketMap> = tdb.load_rkyv(0, b"serialized_bucket_map").await? else {
log_rtab!(debug "no bucket map in saved routing table");
return Ok(());
};
let Some(all_entry_bytes): Option<Vec<Vec<u8>>> = tdb.load_rkyv(0, b"all_entry_bytes")? else {
let Some(all_entry_bytes): Option<SerializedBuckets> = tdb.load_rkyv(0, b"all_entry_bytes").await? else {
log_rtab!(debug "no all_entry_bytes in saved routing table");
return Ok(());
};
// Reconstruct all entries
let inner = &mut *self.inner.write();
self.populate_routing_table(inner, serialized_bucket_map, all_entry_bytes)?;
Ok(())
}
/// Write the deserialized table store data to the routing table.
pub fn populate_routing_table(
&self,
inner: &mut RoutingTableInner,
serialized_bucket_map: SerializedBucketMap,
all_entry_bytes: SerializedBuckets,
) -> EyreResult<()> {
let mut all_entries: Vec<Arc<BucketEntry>> = Vec::with_capacity(all_entry_bytes.len());
for entry_bytes in all_entry_bytes {
let entryinner =
@ -789,8 +815,8 @@ impl RoutingTable {
e.with(rti, |_rti, e| {
if let Some(ni) = e.node_info(routing_domain) {
let dif = DialInfoFilter::all()
.with_protocol_type_set(ni.outbound_protocols)
.with_address_type_set(ni.address_types);
.with_protocol_type_set(ni.outbound_protocols())
.with_address_type_set(ni.address_types());
if dial_info.matches_filter(&dif) {
return true;
}
@ -848,7 +874,7 @@ impl RoutingTable {
// does it have some dial info we need?
let filter = |n: &NodeInfo| {
let mut keep = false;
for did in &n.dial_info_detail_list {
for did in n.dial_info_detail_list() {
if matches!(did.dial_info.address_type(), AddressType::IPV4) {
for (n, protocol_type) in protocol_types.iter().enumerate() {
if nodes_proto_v4[n] < max_per_type
@ -961,6 +987,16 @@ impl RoutingTable {
.find_closest_nodes(node_count, node_id, filters, transform)
}
pub fn sort_and_clean_closest_noderefs(
&self,
node_id: TypedKey,
closest_nodes: &mut Vec<NodeRef>,
) {
self.inner
.read()
.sort_and_clean_closest_noderefs(node_id, closest_nodes)
}
#[instrument(level = "trace", skip(self), ret)]
pub fn register_find_node_answer(
&self,
@ -971,12 +1007,12 @@ impl RoutingTable {
let mut out = Vec::<NodeRef>::with_capacity(peers.len());
for p in peers {
// Ensure we're getting back nodes we asked for
if !p.node_ids.kinds().contains(&crypto_kind) {
if !p.node_ids().kinds().contains(&crypto_kind) {
continue;
}
// Don't register our own node
if self.matches_own_node_id(&p.node_ids) {
if self.matches_own_node_id(p.node_ids()) {
continue;
}

View File

@ -170,17 +170,17 @@ pub trait NodeRefBase: Sized {
) -> bool {
self.operate(|_rti, e| e.has_seen_our_node_info_ts(routing_domain, our_node_info_ts))
}
fn set_our_node_info_ts(&self, routing_domain: RoutingDomain, seen_ts: Timestamp) {
self.operate_mut(|_rti, e| e.set_our_node_info_ts(routing_domain, seen_ts));
fn set_seen_our_node_info_ts(&self, routing_domain: RoutingDomain, seen_ts: Timestamp) {
self.operate_mut(|_rti, e| e.set_seen_our_node_info_ts(routing_domain, seen_ts));
}
fn network_class(&self, routing_domain: RoutingDomain) -> Option<NetworkClass> {
self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.network_class))
self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.network_class()))
}
fn outbound_protocols(&self, routing_domain: RoutingDomain) -> Option<ProtocolTypeSet> {
self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.outbound_protocols))
self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.outbound_protocols()))
}
fn address_types(&self, routing_domain: RoutingDomain) -> Option<AddressTypeSet> {
self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.address_types))
self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.address_types()))
}
fn node_info_outbound_filter(&self, routing_domain: RoutingDomain) -> DialInfoFilter {
let mut dif = DialInfoFilter::all();
@ -199,7 +199,7 @@ pub trait NodeRefBase: Sized {
.and_then(|rpi| {
// If relay is ourselves, then return None, because we can't relay through ourselves
// and to contact this node we should have had an existing inbound connection
if rti.unlocked_inner.matches_own_node_id(&rpi.node_ids) {
if rti.unlocked_inner.matches_own_node_id(rpi.node_ids()) {
return None;
}

View File

@ -22,6 +22,13 @@ pub enum RouteNode {
}
impl RouteNode {
pub fn validate(&self, crypto: Crypto) -> VeilidAPIResult<()> {
match self {
RouteNode::NodeId(_) => Ok(()),
RouteNode::PeerInfo(pi) => pi.validate(crypto),
}
}
pub fn node_ref(
&self,
routing_table: RoutingTable,
@ -48,10 +55,10 @@ impl RouteNode {
RouteNode::NodeId(id) => {
format!("{}", TypedKey::new(crypto_kind, *id))
}
RouteNode::PeerInfo(pi) => match pi.node_ids.get(crypto_kind) {
RouteNode::PeerInfo(pi) => match pi.node_ids().get(crypto_kind) {
Some(id) => format!("{}", id),
None => {
format!("({})?{}", crypto_kind, pi.node_ids)
format!("({})?{}", crypto_kind, pi.node_ids())
}
},
}
@ -66,6 +73,11 @@ pub struct RouteHop {
/// The encrypted blob to pass to the next hop as its data (None for stubs)
pub next_hop: Option<RouteHopData>,
}
impl RouteHop {
pub fn validate(&self, crypto: Crypto) -> VeilidAPIResult<()> {
self.node.validate(crypto)
}
}
/// The kind of hops a private route can have
#[derive(Clone, Debug)]
@ -78,6 +90,15 @@ pub enum PrivateRouteHops {
Empty,
}
impl PrivateRouteHops {
pub fn validate(&self, crypto: Crypto) -> VeilidAPIResult<()> {
match self {
PrivateRouteHops::FirstHop(rh) => rh.validate(crypto),
PrivateRouteHops::Data(_) => Ok(()),
PrivateRouteHops::Empty => Ok(()),
}
}
}
/// A private route for receiver privacy
#[derive(Clone, Debug)]
pub struct PrivateRoute {
@ -108,6 +129,10 @@ impl PrivateRoute {
}
}
pub fn validate(&self, crypto: Crypto) -> VeilidAPIResult<()> {
self.hops.validate(crypto)
}
/// Check if this is a stub route
pub fn is_stub(&self) -> bool {
if let PrivateRouteHops::FirstHop(first_hop) = &self.hops {
@ -155,7 +180,7 @@ impl PrivateRoute {
// Get the safety route to use from the spec
Some(match &pr_first_hop.node {
RouteNode::NodeId(n) => TypedKey::new(self.public_key.kind, *n),
RouteNode::PeerInfo(p) => p.node_ids.get(self.public_key.kind).unwrap(),
RouteNode::PeerInfo(p) => p.node_ids().get(self.public_key.kind).unwrap(),
})
}
}

View File

@ -16,9 +16,6 @@ pub use route_spec_store_content::*;
pub use route_stats::*;
use crate::veilid_api::*;
use rkyv::{
with::Skip, Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize,
};
/// The size of the remote private route cache
const REMOTE_PRIVATE_ROUTE_CACHE_SIZE: usize = 1024;

View File

@ -6,7 +6,6 @@ pub struct RouteSpecDetail {
/// Crypto kind
pub crypto_kind: CryptoKind,
/// Secret key
#[with(Skip)]
pub secret_key: SecretKey,
/// Route hops (node id keys)
pub hops: Vec<PublicKey>,

View File

@ -115,7 +115,7 @@ impl RouteSpecStore {
dr
};
let update = VeilidUpdate::Route(VeilidStateRoute {
let update = VeilidUpdate::RouteChange(VeilidRouteChange {
dead_routes,
dead_remote_routes,
});
@ -1550,7 +1550,9 @@ impl RouteSpecStore {
.get_root::<veilid_capnp::private_route::Reader>()
.map_err(RPCError::internal)
.wrap_err("failed to make reader for private_route")?;
let private_route = decode_private_route(&pr_reader, crypto.clone()).wrap_err("failed to decode private route")?;
let private_route = decode_private_route(&pr_reader).wrap_err("failed to decode private route")?;
private_route.validate(crypto.clone()).wrap_err("failed to validate private route")?;
out.push(private_route);
}

View File

@ -23,7 +23,7 @@ impl RouteSpecStoreContent {
let table_store = routing_table.network_manager().table_store();
let rsstdb = table_store.open("RouteSpecStore", 1).await?;
let mut content: RouteSpecStoreContent =
rsstdb.load_rkyv(0, b"content")?.unwrap_or_default();
rsstdb.load_rkyv(0, b"content").await?.unwrap_or_default();
// Look up all route hop noderefs since we can't serialize those
let mut dead_ids = Vec::new();
@ -55,47 +55,6 @@ impl RouteSpecStoreContent {
content.remove_detail(&id);
}
// Load secrets from pstore
let pstore = routing_table.network_manager().protected_store();
let secret_key_map: HashMap<PublicKey, SecretKey> = pstore
.load_user_secret_rkyv("RouteSpecStore")
.await?
.unwrap_or_default();
// Ensure we got secret keys for all the public keys
let mut got_secret_key_ids = HashSet::new();
for (rsid, rssd) in content.details.iter_mut() {
let mut found_all = true;
for (pk, rsd) in rssd.iter_route_set_mut() {
if let Some(sk) = secret_key_map.get(pk) {
rsd.secret_key = *sk;
} else {
found_all = false;
break;
}
}
if found_all {
got_secret_key_ids.insert(rsid.clone());
}
}
// If we missed any, nuke those route ids
let dead_ids: Vec<RouteId> = content
.details
.keys()
.filter_map(|id| {
if !got_secret_key_ids.contains(id) {
Some(*id)
} else {
None
}
})
.collect();
for id in dead_ids {
log_rtab!(debug "missing secret key, killing off private route: {}", id);
content.remove_detail(&id);
}
Ok(content)
}
@ -106,18 +65,6 @@ impl RouteSpecStoreContent {
let rsstdb = table_store.open("RouteSpecStore", 1).await?;
rsstdb.store_rkyv(0, b"content", self).await?;
// Keep secrets in protected store as well
let pstore = routing_table.network_manager().protected_store();
let mut out: HashMap<PublicKey, SecretKey> = HashMap::new();
for (_rsid, rssd) in self.details.iter() {
for (pk, rsd) in rssd.iter_route_set() {
out.insert(*pk, rsd.secret_key);
}
}
let _ = pstore.save_user_secret_rkyv("RouteSpecStore", &out).await?; // ignore if this previously existed or not
Ok(())
}

View File

@ -102,14 +102,14 @@ impl RoutingDomainDetailCommon {
}
fn make_peer_info(&self, rti: &RoutingTableInner) -> PeerInfo {
let node_info = NodeInfo {
network_class: self.network_class.unwrap_or(NetworkClass::Invalid),
outbound_protocols: self.outbound_protocols,
address_types: self.address_types,
envelope_support: VALID_ENVELOPE_VERSIONS.to_vec(),
crypto_support: VALID_CRYPTO_KINDS.to_vec(),
dial_info_detail_list: self.dial_info_details.clone(),
};
let node_info = NodeInfo::new(
self.network_class.unwrap_or(NetworkClass::Invalid),
self.outbound_protocols,
self.address_types,
VALID_ENVELOPE_VERSIONS.to_vec(),
VALID_CRYPTO_KINDS.to_vec(),
self.dial_info_details.clone()
);
let relay_info = self
.relay_node
@ -117,8 +117,9 @@ impl RoutingDomainDetailCommon {
.and_then(|rn| {
let opt_relay_pi = rn.locked(rti).make_peer_info(self.routing_domain);
if let Some(relay_pi) = opt_relay_pi {
match relay_pi.signed_node_info {
SignedNodeInfo::Direct(d) => Some((relay_pi.node_ids, d)),
let (relay_ids, relay_sni) = relay_pi.destructure();
match relay_sni {
SignedNodeInfo::Direct(d) => Some((relay_ids, d)),
SignedNodeInfo::Relayed(_) => {
warn!("relay node should not have a relay itself! if this happens, a relay updated its signed node info and became a relay, which should cause the relay to be dropped");
None
@ -230,8 +231,8 @@ fn first_filtered_dial_info_detail(
) -> Option<DialInfoDetail> {
let dial_info_filter = dial_info_filter.clone().filtered(
&DialInfoFilter::all()
.with_address_type_set(from_node.address_types)
.with_protocol_type_set(from_node.outbound_protocols),
.with_address_type_set(from_node.address_types())
.with_protocol_type_set(from_node.outbound_protocols()),
);
// Get first filtered dialinfo
@ -278,18 +279,18 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail {
sequencing: Sequencing,
) -> ContactMethod {
// Get the nodeinfos for convenience
let node_a = peer_a.signed_node_info.node_info();
let node_b = peer_b.signed_node_info.node_info();
let node_a = peer_a.signed_node_info().node_info();
let node_b = peer_b.signed_node_info().node_info();
// Get the node ids that would be used between these peers
let cck = common_crypto_kinds(&peer_a.node_ids.kinds(), &peer_b.node_ids.kinds());
let cck = common_crypto_kinds(&peer_a.node_ids().kinds(), &peer_b.node_ids().kinds());
let Some(best_ck) = cck.first().copied() else {
// No common crypto kinds between these nodes, can't contact
return ContactMethod::Unreachable;
};
//let node_a_id = peer_a.node_ids.get(best_ck).unwrap();
let node_b_id = peer_b.node_ids.get(best_ck).unwrap();
//let node_a_id = peer_a.node_ids().get(best_ck).unwrap();
let node_b_id = peer_b.node_ids().get(best_ck).unwrap();
// Get the best match dial info for node B if we have it
if let Some(target_did) =
@ -302,17 +303,17 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail {
}
// Get the target's inbound relay, it must have one or it is not reachable
if let Some(node_b_relay) = peer_b.signed_node_info.relay_info() {
if let Some(node_b_relay) = peer_b.signed_node_info().relay_info() {
// Note that relay_peer_info could be node_a, in which case a connection already exists
// and we only get here if the connection had dropped, in which case node_a is unreachable until
// it gets a new relay connection up
if peer_b.signed_node_info.relay_ids().contains_any(&peer_a.node_ids) {
if peer_b.signed_node_info().relay_ids().contains_any(peer_a.node_ids()) {
return ContactMethod::Existing;
}
// Get best node id to contact relay with
let Some(node_b_relay_id) = peer_b.signed_node_info.relay_ids().get(best_ck) else {
let Some(node_b_relay_id) = peer_b.signed_node_info().relay_ids().get(best_ck) else {
// No best relay id
return ContactMethod::Unreachable;
};
@ -327,7 +328,7 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail {
.is_some()
{
// Can node A receive anything inbound ever?
if matches!(node_a.network_class, NetworkClass::InboundCapable) {
if matches!(node_a.network_class(), NetworkClass::InboundCapable) {
///////// Reverse connection
// Get the best match dial info for an reverse inbound connection from node B to node A
@ -390,17 +391,17 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail {
}
}
// If the node B has no direct dial info, it needs to have an inbound relay
else if let Some(node_b_relay) = peer_b.signed_node_info.relay_info() {
else if let Some(node_b_relay) = peer_b.signed_node_info().relay_info() {
// Note that relay_peer_info could be node_a, in which case a connection already exists
// and we only get here if the connection had dropped, in which case node_a is unreachable until
// it gets a new relay connection up
if peer_b.signed_node_info.relay_ids().contains_any(&peer_a.node_ids) {
if peer_b.signed_node_info().relay_ids().contains_any(peer_a.node_ids()) {
return ContactMethod::Existing;
}
// Get best node id to contact relay with
let Some(node_b_relay_id) = peer_b.signed_node_info.relay_ids().get(best_ck) else {
let Some(node_b_relay_id) = peer_b.signed_node_info().relay_ids().get(best_ck) else {
// No best relay id
return ContactMethod::Unreachable;
};
@ -419,7 +420,7 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail {
}
// If node A can't reach the node by other means, it may need to use its own relay
if let Some(node_a_relay_id) = peer_a.signed_node_info.relay_ids().get(best_ck) {
if let Some(node_a_relay_id) = peer_a.signed_node_info().relay_ids().get(best_ck) {
return ContactMethod::OutboundRelay(node_a_relay_id);
}
@ -484,8 +485,8 @@ impl RoutingDomainDetail for LocalNetworkRoutingDomainDetail {
// Scope the filter down to protocols node A can do outbound
let dial_info_filter = dial_info_filter.filtered(
&DialInfoFilter::all()
.with_address_type_set(peer_a.signed_node_info.node_info().address_types)
.with_protocol_type_set(peer_a.signed_node_info.node_info().outbound_protocols),
.with_address_type_set(peer_a.signed_node_info().node_info().address_types())
.with_protocol_type_set(peer_a.signed_node_info().node_info().outbound_protocols()),
);
// Get first filtered dialinfo
@ -509,7 +510,7 @@ impl RoutingDomainDetail for LocalNetworkRoutingDomainDetail {
let filter = |did: &DialInfoDetail| did.matches_filter(&dial_info_filter);
let opt_target_did = peer_b.signed_node_info.node_info().first_filtered_dial_info_detail(sort, filter);
let opt_target_did = peer_b.signed_node_info().node_info().first_filtered_dial_info_detail(sort, filter);
if let Some(target_did) = opt_target_did {
return ContactMethod::Direct(target_did.dial_info);
}

View File

@ -171,11 +171,11 @@ impl RoutingTableInner {
node_info: &NodeInfo,
) -> bool {
// Should not be passing around nodeinfo with an invalid network class
if matches!(node_info.network_class, NetworkClass::Invalid) {
if matches!(node_info.network_class(), NetworkClass::Invalid) {
return false;
}
// Ensure all of the dial info works in this routing domain
for did in &node_info.dial_info_detail_list {
for did in node_info.dial_info_detail_list() {
if !self.ensure_dial_info_is_valid(routing_domain, &did.dial_info) {
return false;
}
@ -258,7 +258,7 @@ impl RoutingTableInner {
} else {
Some(
rdd.common()
.with_peer_info(self, |pi| pi.signed_node_info.timestamp()),
.with_peer_info(self, |pi| pi.signed_node_info().timestamp()),
)
}
})
@ -557,11 +557,18 @@ impl RoutingTableInner {
.map(|nr| nr.same_bucket_entry(&entry))
.unwrap_or(false);
if e.needs_ping(cur_ts, is_our_relay) {
debug!("needs_ping: {}", e.best_node_id());
return true;
}
// If we need a ping because this node hasn't seen our latest node info, then do it
if let Some(own_node_info_ts) = own_node_info_ts {
if !e.has_seen_our_node_info_ts(routing_domain, own_node_info_ts) {
//xxx remove this when we fix #208
debug!(
"!has_seen_our_node_info_ts: {} own_node_info_ts={}",
e.best_node_id(),
own_node_info_ts
);
return true;
}
}
@ -803,37 +810,42 @@ impl RoutingTableInner {
peer_info: PeerInfo,
allow_invalid: bool,
) -> Option<NodeRef> {
// if our own node if is in the list then ignore it, as we don't add ourselves to our own routing table
if self.unlocked_inner.matches_own_node_id(&peer_info.node_ids) {
// if our own node is in the list, then ignore it as we don't add ourselves to our own routing table
if self
.unlocked_inner
.matches_own_node_id(peer_info.node_ids())
{
log_rtab!(debug "can't register own node id in routing table");
return None;
}
// node can not be its own relay
let rids = peer_info.signed_node_info.relay_ids();
if self.unlocked_inner.matches_own_node_id(&rids) {
let rids = peer_info.signed_node_info().relay_ids();
let nids = peer_info.node_ids();
if nids.contains_any(&rids) {
log_rtab!(debug "node can not be its own relay");
return None;
}
if !allow_invalid {
// verify signature
if !peer_info.signed_node_info.has_any_signature() {
log_rtab!(debug "signed node info for {:?} has invalid signature", &peer_info.node_ids);
if !peer_info.signed_node_info().has_any_signature() {
log_rtab!(debug "signed node info for {:?} has no valid signature", peer_info.node_ids());
return None;
}
// verify signed node info is valid in this routing domain
if !self.signed_node_info_is_valid_in_routing_domain(
routing_domain,
&peer_info.signed_node_info,
peer_info.signed_node_info(),
) {
log_rtab!(debug "signed node info for {:?} not valid in the {:?} routing domain", peer_info.node_ids, routing_domain);
log_rtab!(debug "signed node info for {:?} not valid in the {:?} routing domain", peer_info.node_ids(), routing_domain);
return None;
}
}
self.create_node_ref(outer_self, &peer_info.node_ids, |_rti, e| {
e.update_signed_node_info(routing_domain, peer_info.signed_node_info);
let (node_ids, signed_node_info) = peer_info.destructure();
self.create_node_ref(outer_self, &node_ids, |_rti, e| {
e.update_signed_node_info(routing_domain, signed_node_info);
})
.map(|mut nr| {
nr.set_filter(Some(
@ -1149,7 +1161,6 @@ impl RoutingTableInner {
let vcrypto = self.unlocked_inner.crypto().get(crypto_kind).unwrap();
// Filter to ensure entries support the crypto kind in use
let filter = Box::new(
move |_rti: &RoutingTableInner, opt_entry: Option<Arc<BucketEntry>>| {
if let Some(entry) = opt_entry {
@ -1205,9 +1216,6 @@ impl RoutingTableInner {
};
// distance is the next metric, closer nodes first
// since multiple cryptosystems are in use, the distance for a key is the shortest
// distance to that key over all supported cryptosystems
let da = vcrypto.distance(&a_key.value, &node_id.value);
let db = vcrypto.distance(&b_key.value, &node_id.value);
da.cmp(&db)
@ -1218,4 +1226,71 @@ impl RoutingTableInner {
log_rtab!(">> find_closest_nodes: node count = {}", out.len());
out
}
pub fn sort_and_clean_closest_noderefs(
&self,
node_id: TypedKey,
closest_nodes: &mut Vec<NodeRef>,
) {
// Lock all noderefs
let kind = node_id.kind;
let mut closest_nodes_locked: Vec<NodeRefLocked> = closest_nodes
.iter()
.filter_map(|x| {
if x.node_ids().kinds().contains(&kind) {
Some(x.locked(self))
} else {
None
}
})
.collect();
// Sort closest
let sort = make_closest_noderef_sort(self.unlocked_inner.crypto(), node_id);
closest_nodes_locked.sort_by(sort);
// Unlock noderefs
*closest_nodes = closest_nodes_locked.iter().map(|x| x.unlocked()).collect();
}
}
fn make_closest_noderef_sort(
crypto: Crypto,
node_id: TypedKey,
) -> impl Fn(&NodeRefLocked, &NodeRefLocked) -> core::cmp::Ordering {
let cur_ts = get_aligned_timestamp();
let kind = node_id.kind;
// Get cryptoversion to check distance with
let vcrypto = crypto.get(kind).unwrap();
move |a: &NodeRefLocked, b: &NodeRefLocked| -> core::cmp::Ordering {
// same nodes are always the same
if a.same_entry(b) {
return core::cmp::Ordering::Equal;
}
// reliable nodes come first, pessimistically treating our own node as unreliable
a.operate(|_rti, a_entry| {
b.operate(|_rti, b_entry| {
let ra = a_entry.check_reliable(cur_ts);
let rb = b_entry.check_reliable(cur_ts);
if ra != rb {
if ra {
return core::cmp::Ordering::Less;
} else {
return core::cmp::Ordering::Greater;
}
}
// get keys
let a_key = a_entry.node_ids().get(kind).unwrap();
let b_key = b_entry.node_ids().get(kind).unwrap();
// distance is the next metric, closer nodes first
let da = vcrypto.distance(&a_key.value, &node_id.value);
let db = vcrypto.distance(&b_key.value, &node_id.value);
da.cmp(&db)
})
})
}
}

View File

@ -329,14 +329,15 @@ impl RoutingTable {
let crypto_support = bsrec.node_ids.kinds();
// Make unsigned SignedNodeInfo
let sni = SignedNodeInfo::Direct(SignedDirectNodeInfo::with_no_signature(NodeInfo {
network_class: NetworkClass::InboundCapable, // Bootstraps are always inbound capable
outbound_protocols: ProtocolTypeSet::only(ProtocolType::UDP), // Bootstraps do not participate in relaying and will not make outbound requests, but will have UDP enabled
address_types: AddressTypeSet::all(), // Bootstraps are always IPV4 and IPV6 capable
envelope_support: bsrec.envelope_support, // Envelope support is as specified in the bootstrap list
let sni =
SignedNodeInfo::Direct(SignedDirectNodeInfo::with_no_signature(NodeInfo::new(
NetworkClass::InboundCapable, // Bootstraps are always inbound capable
ProtocolTypeSet::only(ProtocolType::UDP), // Bootstraps do not participate in relaying and will not make outbound requests, but will have UDP enabled
AddressTypeSet::all(), // Bootstraps are always IPV4 and IPV6 capable
bsrec.envelope_support, // Envelope support is as specified in the bootstrap list
crypto_support, // Crypto support is derived from list of node ids
dial_info_detail_list: bsrec.dial_info_details, // Dial info is as specified in the bootstrap list
}));
bsrec.dial_info_details, // Dial info is as specified in the bootstrap list
)));
let pi = PeerInfo::new(bsrec.node_ids, sni);

View File

@ -9,7 +9,7 @@ pub mod rolling_transfers;
use super::*;
impl RoutingTable {
pub(crate) fn start_tasks(&self) {
pub(crate) fn setup_tasks(&self) {
// Set rolling transfers tick task
{
let this = self.clone();
@ -176,7 +176,7 @@ impl RoutingTable {
Ok(())
}
pub(crate) async fn stop_tasks(&self) {
pub(crate) async fn cancel_tasks(&self) {
// Cancel all tasks being ticked
debug!("stopping rolling transfers task");
if let Err(e) = self.unlocked_inner.rolling_transfers_task.stop().await {

View File

@ -13,8 +13,8 @@ impl RoutingTable {
let Some(own_peer_info) = self.get_own_peer_info(RoutingDomain::PublicInternet) else {
return Ok(());
};
let own_node_info = own_peer_info.signed_node_info.node_info();
let network_class = own_node_info.network_class;
let own_node_info = own_peer_info.signed_node_info().node_info();
let network_class = own_node_info.network_class();
// Get routing domain editor
let mut editor = self.edit_routing_domain(RoutingDomain::PublicInternet);

View File

@ -0,0 +1 @@
pub mod test_serialize;

View File

@ -0,0 +1,84 @@
use crate::*;
fn fake_routing_table() -> routing_table::RoutingTable {
let veilid_config = VeilidConfig::new();
let block_store = BlockStore::new(veilid_config.clone());
let protected_store = ProtectedStore::new(veilid_config.clone());
let table_store = TableStore::new(veilid_config.clone(), protected_store.clone());
let crypto = Crypto::new(veilid_config.clone(), table_store.clone());
let storage_manager = storage_manager::StorageManager::new(
veilid_config.clone(),
crypto.clone(),
protected_store.clone(),
table_store.clone(),
block_store.clone(),
);
let network_manager = network_manager::NetworkManager::new(
veilid_config.clone(),
storage_manager,
protected_store.clone(),
table_store.clone(),
block_store.clone(),
crypto.clone(),
);
routing_table::RoutingTable::new(network_manager)
}
pub async fn test_routingtable_buckets_round_trip() {
let original = fake_routing_table();
let copy = fake_routing_table();
original.init().await.unwrap();
copy.init().await.unwrap();
// Add lots of routes to `original` here to exercise all various types.
let (serialized_bucket_map, all_entry_bytes) = original.serialized_buckets().unwrap();
copy.populate_routing_table(
&mut copy.inner.write(),
serialized_bucket_map,
all_entry_bytes,
)
.unwrap();
// Wrap to close lifetime of 'inner' which is borrowed here so terminate() can succeed
// (it also .write() locks routing table inner)
{
let original_inner = &*original.inner.read();
let copy_inner = &*copy.inner.read();
let routing_table_keys: Vec<_> = original_inner.buckets.keys().clone().collect();
let copy_keys: Vec<_> = copy_inner.buckets.keys().clone().collect();
assert_eq!(routing_table_keys.len(), copy_keys.len());
for crypto in routing_table_keys {
// The same keys are present in the original and copy RoutingTables.
let original_buckets = original_inner.buckets.get(&crypto).unwrap();
let copy_buckets = copy_inner.buckets.get(&crypto).unwrap();
// Recurse into RoutingTable.inner.buckets
for (left_buckets, right_buckets) in original_buckets.iter().zip(copy_buckets.iter()) {
// Recurse into RoutingTable.inner.buckets.entries
for ((left_crypto, left_entries), (right_crypto, right_entries)) in
left_buckets.entries().zip(right_buckets.entries())
{
assert_eq!(left_crypto, right_crypto);
assert_eq!(
format!("{:?}", left_entries),
format!("{:?}", right_entries)
);
}
}
}
}
// Even if these are mocks, we should still practice good hygiene.
original.terminate().await;
copy.terminate().await;
}
pub async fn test_all() {
test_routingtable_buckets_round_trip().await;
}

View File

@ -0,0 +1,43 @@
use super::*;
// Keep member order appropriate for sorting < preference
#[derive(
Debug,
Clone,
PartialEq,
PartialOrd,
Ord,
Eq,
Hash,
Serialize,
Deserialize,
RkyvArchive,
RkyvSerialize,
RkyvDeserialize,
)]
#[archive_attr(repr(C), derive(CheckBytes))]
pub struct DialInfoDetail {
pub class: DialInfoClass,
pub dial_info: DialInfo,
}
impl MatchesDialInfoFilter for DialInfoDetail {
fn matches_filter(&self, filter: &DialInfoFilter) -> bool {
self.dial_info.matches_filter(filter)
}
}
impl DialInfoDetail {
pub fn ordered_sequencing_sort(a: &DialInfoDetail, b: &DialInfoDetail) -> core::cmp::Ordering {
if a.class < b.class {
return core::cmp::Ordering::Less;
}
if a.class > b.class {
return core::cmp::Ordering::Greater;
}
DialInfo::ordered_sequencing_sort(&a.dial_info, &b.dial_info)
}
pub const NO_SORT: std::option::Option<
for<'r, 's> fn(&'r DialInfoDetail, &'s DialInfoDetail) -> std::cmp::Ordering,
> = None::<fn(&DialInfoDetail, &DialInfoDetail) -> core::cmp::Ordering>;
}

View File

@ -0,0 +1,22 @@
use super::*;
#[allow(clippy::derive_hash_xor_eq)]
#[derive(
Debug,
PartialOrd,
Ord,
Hash,
EnumSetType,
Serialize,
Deserialize,
RkyvArchive,
RkyvSerialize,
RkyvDeserialize,
)]
#[enumset(repr = "u8")]
#[archive_attr(repr(u8), derive(CheckBytes))]
pub enum Direction {
Inbound,
Outbound,
}
pub type DirectionSet = EnumSet<Direction>;

View File

@ -0,0 +1,21 @@
mod dial_info_detail;
mod direction;
mod node_info;
mod node_status;
mod peer_info;
mod routing_domain;
mod signed_direct_node_info;
mod signed_node_info;
mod signed_relayed_node_info;
use super::*;
pub use dial_info_detail::*;
pub use direction::*;
pub use node_info::*;
pub use node_status::*;
pub use peer_info::*;
pub use routing_domain::*;
pub use signed_direct_node_info::*;
pub use signed_node_info::*;
pub use signed_relayed_node_info::*;

View File

@ -0,0 +1,164 @@
use super::*;
#[derive(
Clone, Default, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,
)]
#[archive_attr(repr(C), derive(CheckBytes))]
pub struct NodeInfo {
network_class: NetworkClass,
#[with(RkyvEnumSet)]
outbound_protocols: ProtocolTypeSet,
#[with(RkyvEnumSet)]
address_types: AddressTypeSet,
envelope_support: Vec<u8>,
crypto_support: Vec<CryptoKind>,
dial_info_detail_list: Vec<DialInfoDetail>,
}
impl NodeInfo {
pub fn new(
network_class: NetworkClass,
outbound_protocols: ProtocolTypeSet,
address_types: AddressTypeSet,
envelope_support: Vec<u8>,
crypto_support: Vec<CryptoKind>,
dial_info_detail_list: Vec<DialInfoDetail>,
) -> Self {
Self {
network_class,
outbound_protocols,
address_types,
envelope_support,
crypto_support,
dial_info_detail_list,
}
}
pub fn network_class(&self) -> NetworkClass {
self.network_class
}
pub fn outbound_protocols(&self) -> ProtocolTypeSet {
self.outbound_protocols
}
pub fn address_types(&self) -> AddressTypeSet {
self.address_types
}
pub fn envelope_support(&self) -> &[u8] {
&self.envelope_support
}
pub fn crypto_support(&self) -> &[CryptoKind] {
&self.crypto_support
}
pub fn dial_info_detail_list(&self) -> &[DialInfoDetail] {
&self.dial_info_detail_list
}
pub fn first_filtered_dial_info_detail<S, F>(
&self,
sort: Option<S>,
filter: F,
) -> Option<DialInfoDetail>
where
S: Fn(&DialInfoDetail, &DialInfoDetail) -> std::cmp::Ordering,
F: Fn(&DialInfoDetail) -> bool,
{
if let Some(sort) = sort {
let mut dids = self.dial_info_detail_list.clone();
dids.sort_by(sort);
for did in dids {
if filter(&did) {
return Some(did);
}
}
} else {
for did in &self.dial_info_detail_list {
if filter(did) {
return Some(did.clone());
}
}
};
None
}
pub fn all_filtered_dial_info_details<S, F>(
&self,
sort: Option<S>,
filter: F,
) -> Vec<DialInfoDetail>
where
S: Fn(&DialInfoDetail, &DialInfoDetail) -> std::cmp::Ordering,
F: Fn(&DialInfoDetail) -> bool,
{
let mut dial_info_detail_list = Vec::new();
if let Some(sort) = sort {
let mut dids = self.dial_info_detail_list.clone();
dids.sort_by(sort);
for did in dids {
if filter(&did) {
dial_info_detail_list.push(did);
}
}
} else {
for did in &self.dial_info_detail_list {
if filter(did) {
dial_info_detail_list.push(did.clone());
}
}
};
dial_info_detail_list
}
/// Does this node has some dial info
pub fn has_dial_info(&self) -> bool {
!self.dial_info_detail_list.is_empty()
}
/// Is some relay required either for signal or inbound relay or outbound relay?
pub fn requires_relay(&self) -> bool {
match self.network_class {
NetworkClass::InboundCapable => {
for did in &self.dial_info_detail_list {
if did.class.requires_relay() {
return true;
}
}
}
NetworkClass::OutboundOnly => {
return true;
}
NetworkClass::WebApp => {
return true;
}
NetworkClass::Invalid => {}
}
false
}
/// Can this node assist with signalling? Yes but only if it doesn't require signalling, itself.
pub fn can_signal(&self) -> bool {
// Must be inbound capable
if !matches!(self.network_class, NetworkClass::InboundCapable) {
return false;
}
// Do any of our dial info require signalling? if so, we can't offer signalling
for did in &self.dial_info_detail_list {
if did.class.requires_signal() {
return false;
}
}
true
}
/// Can this node relay be an inbound relay?
pub fn can_inbound_relay(&self) -> bool {
// For now this is the same
self.can_signal()
}
/// Is this node capable of validating dial info
pub fn can_validate_dial_info(&self) -> bool {
// For now this is the same
self.can_signal()
}
}

View File

@ -0,0 +1,66 @@
use super::*;
/// RoutingDomain-specific status for each node
/// is returned by the StatusA call
/// PublicInternet RoutingDomain Status
#[derive(
Clone, Debug, Default, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,
)]
#[archive_attr(repr(C), derive(CheckBytes))]
pub struct PublicInternetNodeStatus {
pub will_route: bool,
pub will_tunnel: bool,
pub will_signal: bool,
pub will_relay: bool,
pub will_validate_dial_info: bool,
}
#[derive(
Clone, Debug, Default, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,
)]
#[archive_attr(repr(C), derive(CheckBytes))]
pub struct LocalNetworkNodeStatus {
pub will_relay: bool,
pub will_validate_dial_info: bool,
}
#[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
#[archive_attr(repr(u8), derive(CheckBytes))]
pub enum NodeStatus {
PublicInternet(PublicInternetNodeStatus),
LocalNetwork(LocalNetworkNodeStatus),
}
impl NodeStatus {
pub fn will_route(&self) -> bool {
match self {
NodeStatus::PublicInternet(pi) => pi.will_route,
NodeStatus::LocalNetwork(_) => false,
}
}
pub fn will_tunnel(&self) -> bool {
match self {
NodeStatus::PublicInternet(pi) => pi.will_tunnel,
NodeStatus::LocalNetwork(_) => false,
}
}
pub fn will_signal(&self) -> bool {
match self {
NodeStatus::PublicInternet(pi) => pi.will_signal,
NodeStatus::LocalNetwork(_) => false,
}
}
pub fn will_relay(&self) -> bool {
match self {
NodeStatus::PublicInternet(pi) => pi.will_relay,
NodeStatus::LocalNetwork(ln) => ln.will_relay,
}
}
pub fn will_validate_dial_info(&self) -> bool {
match self {
NodeStatus::PublicInternet(pi) => pi.will_validate_dial_info,
NodeStatus::LocalNetwork(ln) => ln.will_validate_dial_info,
}
}
}

View File

@ -0,0 +1,49 @@
use super::*;
#[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
#[archive_attr(repr(C), derive(CheckBytes))]
pub struct PeerInfo {
node_ids: TypedKeySet,
signed_node_info: SignedNodeInfo,
}
impl PeerInfo {
pub fn new(node_ids: TypedKeySet, signed_node_info: SignedNodeInfo) -> Self {
assert!(node_ids.len() > 0 && node_ids.len() <= MAX_CRYPTO_KINDS);
Self {
node_ids,
signed_node_info,
}
}
pub fn validate(&self, crypto: Crypto) -> VeilidAPIResult<()> {
let validated_node_ids = self.signed_node_info.validate(&self.node_ids, crypto)?;
if validated_node_ids.is_empty() {
// Shouldn't get here because signed node info validation also checks this
apibail_generic!("no validated node ids");
}
Ok(())
}
pub fn node_ids(&self) -> &TypedKeySet {
&self.node_ids
}
pub fn signed_node_info(&self) -> &SignedNodeInfo {
&self.signed_node_info
}
pub fn destructure(self) -> (TypedKeySet, SignedNodeInfo) {
(self.node_ids, self.signed_node_info)
}
pub fn validate_vec(peer_info_vec: &mut Vec<PeerInfo>, crypto: Crypto) {
let mut n = 0usize;
while n < peer_info_vec.len() {
let pi = peer_info_vec.get(n).unwrap();
if pi.validate(crypto.clone()).is_err() {
peer_info_vec.remove(n);
} else {
n += 1;
}
}
}
}

View File

@ -0,0 +1,32 @@
use super::*;
// Routing domain here is listed in order of preference, keep in order
#[allow(clippy::derive_hash_xor_eq)]
#[derive(
Debug,
Ord,
PartialOrd,
Hash,
EnumSetType,
Serialize,
Deserialize,
RkyvArchive,
RkyvSerialize,
RkyvDeserialize,
)]
#[enumset(repr = "u8")]
#[archive_attr(repr(u8), derive(CheckBytes))]
pub enum RoutingDomain {
LocalNetwork = 0,
PublicInternet = 1,
}
impl RoutingDomain {
pub const fn count() -> usize {
2
}
pub const fn all() -> [RoutingDomain; RoutingDomain::count()] {
// Routing domain here is listed in order of preference, keep in order
[RoutingDomain::LocalNetwork, RoutingDomain::PublicInternet]
}
}
pub type RoutingDomainSet = EnumSet<RoutingDomain>;

View File

@ -0,0 +1,93 @@
use super::*;
/// Signed NodeInfo that can be passed around amongst peers and verifiable
#[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
#[archive_attr(repr(C), derive(CheckBytes))]
pub struct SignedDirectNodeInfo {
node_info: NodeInfo,
timestamp: Timestamp,
signatures: Vec<TypedSignature>,
}
impl SignedDirectNodeInfo {
/// Returns a new SignedDirectNodeInfo that has its signatures validated.
/// On success, this will modify the node_ids set to only include node_ids whose signatures validate.
/// All signatures are stored however, as this can be passed to other nodes that may be able to validate those signatures.
pub fn new(node_info: NodeInfo, timestamp: Timestamp, signatures: Vec<TypedSignature>) -> Self {
Self {
node_info,
timestamp,
signatures,
}
}
pub fn validate(&self, node_ids: &TypedKeySet, crypto: Crypto) -> VeilidAPIResult<TypedKeySet> {
let node_info_bytes = Self::make_signature_bytes(&self.node_info, self.timestamp)?;
// Verify the signatures that we can
let validated_node_ids =
crypto.verify_signatures(node_ids, &node_info_bytes, &self.signatures)?;
if validated_node_ids.len() == 0 {
apibail_generic!("no valid node ids in direct node info");
}
Ok(validated_node_ids)
}
pub fn make_signatures(
crypto: Crypto,
typed_key_pairs: Vec<TypedKeyPair>,
node_info: NodeInfo,
) -> VeilidAPIResult<Self> {
let timestamp = get_aligned_timestamp();
let node_info_bytes = Self::make_signature_bytes(&node_info, timestamp)?;
let typed_signatures =
crypto.generate_signatures(&node_info_bytes, &typed_key_pairs, |kp, s| {
TypedSignature::new(kp.kind, s)
})?;
Ok(Self {
node_info,
timestamp,
signatures: typed_signatures,
})
}
fn make_signature_bytes(
node_info: &NodeInfo,
timestamp: Timestamp,
) -> VeilidAPIResult<Vec<u8>> {
let mut node_info_bytes = Vec::new();
// Add nodeinfo to signature
let mut ni_msg = ::capnp::message::Builder::new_default();
let mut ni_builder = ni_msg.init_root::<veilid_capnp::node_info::Builder>();
encode_node_info(node_info, &mut ni_builder).map_err(VeilidAPIError::internal)?;
node_info_bytes.append(&mut builder_to_vec(ni_msg).map_err(VeilidAPIError::internal)?);
// Add timestamp to signature
node_info_bytes.append(&mut timestamp.as_u64().to_le_bytes().to_vec());
Ok(node_info_bytes)
}
pub fn with_no_signature(node_info: NodeInfo) -> Self {
Self {
node_info,
timestamp: get_aligned_timestamp(),
signatures: Vec::new(),
}
}
pub fn has_any_signature(&self) -> bool {
!self.signatures.is_empty()
}
pub fn node_info(&self) -> &NodeInfo {
&self.node_info
}
pub fn timestamp(&self) -> Timestamp {
self.timestamp
}
pub fn signatures(&self) -> &[TypedSignature] {
&self.signatures
}
}

View File

@ -0,0 +1,96 @@
use super::*;
#[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
#[archive_attr(repr(u8), derive(CheckBytes))]
pub enum SignedNodeInfo {
Direct(SignedDirectNodeInfo),
Relayed(SignedRelayedNodeInfo),
}
impl SignedNodeInfo {
pub fn validate(&self, node_ids: &TypedKeySet, crypto: Crypto) -> VeilidAPIResult<TypedKeySet> {
match self {
SignedNodeInfo::Direct(d) => d.validate(node_ids, crypto),
SignedNodeInfo::Relayed(r) => r.validate(node_ids, crypto),
}
}
pub fn has_any_signature(&self) -> bool {
match self {
SignedNodeInfo::Direct(d) => d.has_any_signature(),
SignedNodeInfo::Relayed(r) => r.has_any_signature(),
}
}
pub fn timestamp(&self) -> Timestamp {
match self {
SignedNodeInfo::Direct(d) => d.timestamp(),
SignedNodeInfo::Relayed(r) => r.timestamp(),
}
}
pub fn node_info(&self) -> &NodeInfo {
match self {
SignedNodeInfo::Direct(d) => &d.node_info(),
SignedNodeInfo::Relayed(r) => &r.node_info(),
}
}
pub fn relay_ids(&self) -> TypedKeySet {
match self {
SignedNodeInfo::Direct(_) => TypedKeySet::new(),
SignedNodeInfo::Relayed(r) => r.relay_ids().clone(),
}
}
pub fn relay_info(&self) -> Option<&NodeInfo> {
match self {
SignedNodeInfo::Direct(_) => None,
SignedNodeInfo::Relayed(r) => Some(r.relay_info().node_info()),
}
}
pub fn relay_peer_info(&self) -> Option<PeerInfo> {
match self {
SignedNodeInfo::Direct(_) => None,
SignedNodeInfo::Relayed(r) => Some(PeerInfo::new(
r.relay_ids().clone(),
SignedNodeInfo::Direct(r.relay_info().clone()),
)),
}
}
pub fn has_any_dial_info(&self) -> bool {
self.node_info().has_dial_info()
|| self
.relay_info()
.map(|relay_ni| relay_ni.has_dial_info())
.unwrap_or_default()
}
pub fn has_sequencing_matched_dial_info(&self, sequencing: Sequencing) -> bool {
// Check our dial info
for did in self.node_info().dial_info_detail_list() {
match sequencing {
Sequencing::NoPreference | Sequencing::PreferOrdered => return true,
Sequencing::EnsureOrdered => {
if did.dial_info.protocol_type().is_connection_oriented() {
return true;
}
}
}
}
// Check our relay if we have one
return self
.relay_info()
.map(|relay_ni| {
for did in relay_ni.dial_info_detail_list() {
match sequencing {
Sequencing::NoPreference | Sequencing::PreferOrdered => return true,
Sequencing::EnsureOrdered => {
if did.dial_info.protocol_type().is_connection_oriented() {
return true;
}
}
}
}
false
})
.unwrap_or_default();
}
}

View File

@ -0,0 +1,138 @@
use super::*;
/// Signed NodeInfo with a relay that can be passed around amongst peers and verifiable
#[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
#[archive_attr(repr(C), derive(CheckBytes))]
pub struct SignedRelayedNodeInfo {
node_info: NodeInfo,
relay_ids: TypedKeySet,
relay_info: SignedDirectNodeInfo,
timestamp: Timestamp,
signatures: Vec<TypedSignature>,
}
impl SignedRelayedNodeInfo {
/// Returns a new SignedRelayedNodeInfo that has its signatures validated.
/// On success, this will modify the node_ids set to only include node_ids whose signatures validate.
/// All signatures are stored however, as this can be passed to other nodes that may be able to validate those signatures.
pub fn new(
node_info: NodeInfo,
relay_ids: TypedKeySet,
relay_info: SignedDirectNodeInfo,
timestamp: Timestamp,
signatures: Vec<TypedSignature>,
) -> Self {
Self {
node_info,
relay_ids,
relay_info,
timestamp,
signatures,
}
}
pub fn validate(&self, node_ids: &TypedKeySet, crypto: Crypto) -> VeilidAPIResult<TypedKeySet> {
// Ensure the relay info for the node has a superset of the crypto kinds of the node it is relaying
if common_crypto_kinds(
self.node_info.crypto_support(),
self.relay_info.node_info().crypto_support(),
)
.len()
!= self.node_info.crypto_support().len()
{
apibail_generic!("relay should have superset of node crypto kinds");
}
// Verify signatures
let node_info_bytes = Self::make_signature_bytes(
&self.node_info,
&self.relay_ids,
&self.relay_info,
self.timestamp,
)?;
let validated_node_ids =
crypto.verify_signatures(node_ids, &node_info_bytes, &self.signatures)?;
if validated_node_ids.len() == 0 {
apibail_generic!("no valid node ids in relayed node info");
}
Ok(validated_node_ids)
}
pub fn make_signatures(
crypto: Crypto,
typed_key_pairs: Vec<TypedKeyPair>,
node_info: NodeInfo,
relay_ids: TypedKeySet,
relay_info: SignedDirectNodeInfo,
) -> VeilidAPIResult<Self> {
let timestamp = get_aligned_timestamp();
let node_info_bytes =
Self::make_signature_bytes(&node_info, &relay_ids, &relay_info, timestamp)?;
let typed_signatures =
crypto.generate_signatures(&node_info_bytes, &typed_key_pairs, |kp, s| {
TypedSignature::new(kp.kind, s)
})?;
Ok(Self {
node_info,
relay_ids,
relay_info,
timestamp,
signatures: typed_signatures,
})
}
fn make_signature_bytes(
node_info: &NodeInfo,
relay_ids: &[TypedKey],
relay_info: &SignedDirectNodeInfo,
timestamp: Timestamp,
) -> VeilidAPIResult<Vec<u8>> {
let mut sig_bytes = Vec::new();
// Add nodeinfo to signature
let mut ni_msg = ::capnp::message::Builder::new_default();
let mut ni_builder = ni_msg.init_root::<veilid_capnp::node_info::Builder>();
encode_node_info(node_info, &mut ni_builder).map_err(VeilidAPIError::internal)?;
sig_bytes.append(&mut builder_to_vec(ni_msg).map_err(VeilidAPIError::internal)?);
// Add relay ids to signature
for relay_id in relay_ids {
let mut rid_msg = ::capnp::message::Builder::new_default();
let mut rid_builder = rid_msg.init_root::<veilid_capnp::typed_key::Builder>();
encode_typed_key(relay_id, &mut rid_builder);
sig_bytes.append(&mut builder_to_vec(rid_msg).map_err(VeilidAPIError::internal)?);
}
// Add relay info to signature
let mut ri_msg = ::capnp::message::Builder::new_default();
let mut ri_builder = ri_msg.init_root::<veilid_capnp::signed_direct_node_info::Builder>();
encode_signed_direct_node_info(relay_info, &mut ri_builder)
.map_err(VeilidAPIError::internal)?;
sig_bytes.append(&mut builder_to_vec(ri_msg).map_err(VeilidAPIError::internal)?);
// Add timestamp to signature
sig_bytes.append(&mut timestamp.as_u64().to_le_bytes().to_vec());
Ok(sig_bytes)
}
pub fn has_any_signature(&self) -> bool {
!self.signatures.is_empty()
}
pub fn node_info(&self) -> &NodeInfo {
&self.node_info
}
pub fn timestamp(&self) -> Timestamp {
self.timestamp
}
pub fn relay_ids(&self) -> &TypedKeySet {
&self.relay_ids
}
pub fn relay_info(&self) -> &SignedDirectNodeInfo {
&self.relay_info
}
pub fn signatures(&self) -> &[TypedSignature] {
&self.signatures
}
}

View File

@ -19,11 +19,12 @@ mod signature512;
mod signed_direct_node_info;
mod signed_node_info;
mod signed_relayed_node_info;
mod signed_value_data;
mod signed_value_descriptor;
mod socket_address;
mod tunnel;
mod typed_key;
mod typed_signature;
mod value_data;
pub use address::*;
pub use address_type_set::*;
@ -46,10 +47,24 @@ pub use signature512::*;
pub use signed_direct_node_info::*;
pub use signed_node_info::*;
pub use signed_relayed_node_info::*;
pub use signed_value_data::*;
pub use signed_value_descriptor::*;
pub use socket_address::*;
pub use tunnel::*;
pub use typed_key::*;
pub use typed_signature::*;
pub use value_data::*;
use super::*;
#[derive(Debug, Clone)]
pub enum QuestionContext {
GetValue(ValidateGetValueContext),
SetValue(ValidateSetValueContext),
}
#[derive(Clone)]
pub struct RPCValidateContext {
pub crypto: Crypto,
pub rpc_processor: RPCProcessor,
pub question_context: Option<QuestionContext>,
}

View File

@ -4,27 +4,27 @@ pub fn encode_node_info(
node_info: &NodeInfo,
builder: &mut veilid_capnp::node_info::Builder,
) -> Result<(), RPCError> {
builder.set_network_class(encode_network_class(node_info.network_class));
builder.set_network_class(encode_network_class(node_info.network_class()));
let mut ps_builder = builder.reborrow().init_outbound_protocols();
encode_protocol_type_set(&node_info.outbound_protocols, &mut ps_builder)?;
encode_protocol_type_set(&node_info.outbound_protocols(), &mut ps_builder)?;
let mut ats_builder = builder.reborrow().init_address_types();
encode_address_type_set(&node_info.address_types, &mut ats_builder)?;
encode_address_type_set(&node_info.address_types(), &mut ats_builder)?;
let mut es_builder = builder
.reborrow()
.init_envelope_support(node_info.envelope_support.len() as u32);
.init_envelope_support(node_info.envelope_support().len() as u32);
if let Some(s) = es_builder.as_slice() {
s.clone_from_slice(&node_info.envelope_support);
s.clone_from_slice(&node_info.envelope_support());
}
let mut cs_builder = builder
.reborrow()
.init_crypto_support(node_info.crypto_support.len() as u32);
.init_crypto_support(node_info.crypto_support().len() as u32);
if let Some(s) = cs_builder.as_slice() {
let csvec: Vec<u32> = node_info
.crypto_support
.crypto_support()
.iter()
.map(|x| u32::from_be_bytes(x.0))
.collect();
@ -33,7 +33,7 @@ pub fn encode_node_info(
let mut didl_builder = builder.reborrow().init_dial_info_detail_list(
node_info
.dial_info_detail_list
.dial_info_detail_list()
.len()
.try_into()
.map_err(RPCError::map_protocol(
@ -41,9 +41,9 @@ pub fn encode_node_info(
))?,
);
for idx in 0..node_info.dial_info_detail_list.len() {
for idx in 0..node_info.dial_info_detail_list().len() {
let mut did_builder = didl_builder.reborrow().get(idx as u32);
encode_dial_info_detail(&node_info.dial_info_detail_list[idx], &mut did_builder)?;
encode_dial_info_detail(&node_info.dial_info_detail_list()[idx], &mut did_builder)?;
}
Ok(())
@ -131,12 +131,12 @@ pub fn decode_node_info(reader: &veilid_capnp::node_info::Reader) -> Result<Node
dial_info_detail_list.push(decode_dial_info_detail(&did)?)
}
Ok(NodeInfo {
Ok(NodeInfo::new(
network_class,
outbound_protocols,
address_types,
envelope_support,
crypto_support,
dial_info_detail_list,
})
))
}

View File

@ -9,18 +9,18 @@ impl RPCAnswer {
pub fn new(detail: RPCAnswerDetail) -> Self {
Self { detail }
}
pub fn into_detail(self) -> RPCAnswerDetail {
self.detail
pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> {
self.detail.validate(validate_context)
}
pub fn desc(&self) -> &'static str {
self.detail.desc()
}
pub fn decode(
reader: &veilid_capnp::answer::Reader,
crypto: Crypto,
) -> Result<RPCAnswer, RPCError> {
pub fn destructure(self) -> RPCAnswerDetail {
self.detail
}
pub fn decode(reader: &veilid_capnp::answer::Reader) -> Result<RPCAnswer, RPCError> {
let d_reader = reader.get_detail();
let detail = RPCAnswerDetail::decode(&d_reader, crypto)?;
let detail = RPCAnswerDetail::decode(&d_reader)?;
Ok(RPCAnswer { detail })
}
pub fn encode(&self, builder: &mut veilid_capnp::answer::Builder) -> Result<(), RPCError> {
@ -60,10 +60,23 @@ impl RPCAnswerDetail {
RPCAnswerDetail::CancelTunnelA(_) => "CancelTunnelA",
}
}
pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> {
match self {
RPCAnswerDetail::StatusA(r) => r.validate(validate_context),
RPCAnswerDetail::FindNodeA(r) => r.validate(validate_context),
RPCAnswerDetail::AppCallA(r) => r.validate(validate_context),
RPCAnswerDetail::GetValueA(r) => r.validate(validate_context),
RPCAnswerDetail::SetValueA(r) => r.validate(validate_context),
RPCAnswerDetail::WatchValueA(r) => r.validate(validate_context),
RPCAnswerDetail::SupplyBlockA(r) => r.validate(validate_context),
RPCAnswerDetail::FindBlockA(r) => r.validate(validate_context),
RPCAnswerDetail::StartTunnelA(r) => r.validate(validate_context),
RPCAnswerDetail::CompleteTunnelA(r) => r.validate(validate_context),
RPCAnswerDetail::CancelTunnelA(r) => r.validate(validate_context),
}
}
pub fn decode(
reader: &veilid_capnp::answer::detail::Reader,
crypto: Crypto,
) -> Result<RPCAnswerDetail, RPCError> {
let which_reader = reader.which().map_err(RPCError::protocol)?;
let out = match which_reader {
@ -74,7 +87,7 @@ impl RPCAnswerDetail {
}
veilid_capnp::answer::detail::FindNodeA(r) => {
let op_reader = r.map_err(RPCError::protocol)?;
let out = RPCOperationFindNodeA::decode(&op_reader, crypto)?;
let out = RPCOperationFindNodeA::decode(&op_reader)?;
RPCAnswerDetail::FindNodeA(out)
}
veilid_capnp::answer::detail::AppCallA(r) => {
@ -84,27 +97,27 @@ impl RPCAnswerDetail {
}
veilid_capnp::answer::detail::GetValueA(r) => {
let op_reader = r.map_err(RPCError::protocol)?;
let out = RPCOperationGetValueA::decode(&op_reader, crypto)?;
let out = RPCOperationGetValueA::decode(&op_reader)?;
RPCAnswerDetail::GetValueA(out)
}
veilid_capnp::answer::detail::SetValueA(r) => {
let op_reader = r.map_err(RPCError::protocol)?;
let out = RPCOperationSetValueA::decode(&op_reader, crypto)?;
let out = RPCOperationSetValueA::decode(&op_reader)?;
RPCAnswerDetail::SetValueA(out)
}
veilid_capnp::answer::detail::WatchValueA(r) => {
let op_reader = r.map_err(RPCError::protocol)?;
let out = RPCOperationWatchValueA::decode(&op_reader, crypto)?;
let out = RPCOperationWatchValueA::decode(&op_reader)?;
RPCAnswerDetail::WatchValueA(out)
}
veilid_capnp::answer::detail::SupplyBlockA(r) => {
let op_reader = r.map_err(RPCError::protocol)?;
let out = RPCOperationSupplyBlockA::decode(&op_reader, crypto)?;
let out = RPCOperationSupplyBlockA::decode(&op_reader)?;
RPCAnswerDetail::SupplyBlockA(out)
}
veilid_capnp::answer::detail::FindBlockA(r) => {
let op_reader = r.map_err(RPCError::protocol)?;
let out = RPCOperationFindBlockA::decode(&op_reader, crypto)?;
let out = RPCOperationFindBlockA::decode(&op_reader)?;
RPCAnswerDetail::FindBlockA(out)
}
veilid_capnp::answer::detail::StartTunnelA(r) => {

View File

@ -16,25 +16,30 @@ impl RPCOperationKind {
}
}
pub fn decode(
kind_reader: &veilid_capnp::operation::kind::Reader,
crypto: Crypto,
) -> Result<Self, RPCError> {
pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> {
match self {
RPCOperationKind::Question(r) => r.validate(validate_context),
RPCOperationKind::Statement(r) => r.validate(validate_context),
RPCOperationKind::Answer(r) => r.validate(validate_context),
}
}
pub fn decode(kind_reader: &veilid_capnp::operation::kind::Reader) -> Result<Self, RPCError> {
let which_reader = kind_reader.which().map_err(RPCError::protocol)?;
let out = match which_reader {
veilid_capnp::operation::kind::Which::Question(r) => {
let q_reader = r.map_err(RPCError::protocol)?;
let out = RPCQuestion::decode(&q_reader, crypto)?;
let out = RPCQuestion::decode(&q_reader)?;
RPCOperationKind::Question(out)
}
veilid_capnp::operation::kind::Which::Statement(r) => {
let q_reader = r.map_err(RPCError::protocol)?;
let out = RPCStatement::decode(&q_reader, crypto)?;
let out = RPCStatement::decode(&q_reader)?;
RPCOperationKind::Statement(out)
}
veilid_capnp::operation::kind::Which::Answer(r) => {
let q_reader = r.map_err(RPCError::protocol)?;
let out = RPCAnswer::decode(&q_reader, crypto)?;
let out = RPCAnswer::decode(&q_reader)?;
RPCOperationKind::Answer(out)
}
};
@ -93,6 +98,17 @@ impl RPCOperation {
}
}
pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> {
// Validate sender peer info
if let Some(sender_peer_info) = &self.opt_sender_peer_info {
sender_peer_info
.validate(validate_context.crypto.clone())
.map_err(RPCError::protocol)?;
}
// Validate operation kind
self.kind.validate(validate_context)
}
pub fn op_id(&self) -> OperationId {
self.op_id
}
@ -108,21 +124,23 @@ impl RPCOperation {
&self.kind
}
pub fn into_kind(self) -> RPCOperationKind {
self.kind
pub fn destructure(self) -> (OperationId, Option<PeerInfo>, Timestamp, RPCOperationKind) {
(
self.op_id,
self.opt_sender_peer_info,
self.target_node_info_ts,
self.kind,
)
}
pub fn decode(
operation_reader: &veilid_capnp::operation::Reader,
crypto: Crypto,
) -> Result<Self, RPCError> {
pub fn decode(operation_reader: &veilid_capnp::operation::Reader) -> Result<Self, RPCError> {
let op_id = OperationId::new(operation_reader.get_op_id());
let sender_peer_info = if operation_reader.has_sender_peer_info() {
let pi_reader = operation_reader
.get_sender_peer_info()
.map_err(RPCError::protocol)?;
let pi = decode_peer_info(&pi_reader, crypto.clone())?;
let pi = decode_peer_info(&pi_reader)?;
Some(pi)
} else {
None
@ -131,7 +149,7 @@ impl RPCOperation {
let target_node_info_ts = Timestamp::new(operation_reader.get_target_node_info_ts());
let kind_reader = operation_reader.get_kind();
let kind = RPCOperationKind::decode(&kind_reader, crypto)?;
let kind = RPCOperationKind::decode(&kind_reader)?;
Ok(RPCOperation {
op_id,

View File

@ -1,16 +1,40 @@
use super::*;
const MAX_APP_CALL_Q_MESSAGE_LEN: usize = 32768;
const MAX_APP_CALL_A_MESSAGE_LEN: usize = 32768;
#[derive(Debug, Clone)]
pub struct RPCOperationAppCallQ {
pub message: Vec<u8>,
message: Vec<u8>,
}
impl RPCOperationAppCallQ {
pub fn decode(
reader: &veilid_capnp::operation_app_call_q::Reader,
) -> Result<RPCOperationAppCallQ, RPCError> {
let message = reader.get_message().map_err(RPCError::protocol)?.to_vec();
Ok(RPCOperationAppCallQ { message })
pub fn new(message: Vec<u8>) -> Result<Self, RPCError> {
if message.len() > MAX_APP_CALL_Q_MESSAGE_LEN {
return Err(RPCError::protocol("AppCallQ message too long to set"));
}
Ok(Self { message })
}
pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> {
Ok(())
}
// pub fn message(&self) -> &[u8] {
// &self.message
// }
pub fn destructure(self) -> Vec<u8> {
self.message
}
pub fn decode(reader: &veilid_capnp::operation_app_call_q::Reader) -> Result<Self, RPCError> {
let mr = reader.get_message().map_err(RPCError::protocol)?;
if mr.len() > MAX_APP_CALL_Q_MESSAGE_LEN {
return Err(RPCError::protocol("AppCallQ message too long to set"));
}
Ok(Self {
message: mr.to_vec(),
})
}
pub fn encode(
&self,
@ -23,15 +47,37 @@ impl RPCOperationAppCallQ {
#[derive(Debug, Clone)]
pub struct RPCOperationAppCallA {
pub message: Vec<u8>,
message: Vec<u8>,
}
impl RPCOperationAppCallA {
pub fn decode(
reader: &veilid_capnp::operation_app_call_a::Reader,
) -> Result<RPCOperationAppCallA, RPCError> {
let message = reader.get_message().map_err(RPCError::protocol)?.to_vec();
Ok(RPCOperationAppCallA { message })
pub fn new(message: Vec<u8>) -> Result<Self, RPCError> {
if message.len() > MAX_APP_CALL_A_MESSAGE_LEN {
return Err(RPCError::protocol("AppCallA message too long to set"));
}
Ok(Self { message })
}
pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> {
Ok(())
}
// pub fn message(&self) -> &[u8] {
// &self.message
// }
pub fn destructure(self) -> Vec<u8> {
self.message
}
pub fn decode(reader: &veilid_capnp::operation_app_call_a::Reader) -> Result<Self, RPCError> {
let mr = reader.get_message().map_err(RPCError::protocol)?;
if mr.len() > MAX_APP_CALL_A_MESSAGE_LEN {
return Err(RPCError::protocol("AppCallA message too long to set"));
}
Ok(Self {
message: mr.to_vec(),
})
}
pub fn encode(
&self,

View File

@ -1,16 +1,39 @@
use super::*;
const MAX_APP_MESSAGE_MESSAGE_LEN: usize = 32768;
#[derive(Debug, Clone)]
pub struct RPCOperationAppMessage {
pub message: Vec<u8>,
message: Vec<u8>,
}
impl RPCOperationAppMessage {
pub fn decode(
reader: &veilid_capnp::operation_app_message::Reader,
) -> Result<RPCOperationAppMessage, RPCError> {
let message = reader.get_message().map_err(RPCError::protocol)?.to_vec();
Ok(RPCOperationAppMessage { message })
pub fn new(message: Vec<u8>) -> Result<Self, RPCError> {
if message.len() > MAX_APP_MESSAGE_MESSAGE_LEN {
return Err(RPCError::protocol("AppMessage message too long to set"));
}
Ok(Self { message })
}
pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> {
Ok(())
}
// pub fn message(&self) -> &[u8] {
// &self.message
// }
pub fn destructure(self) -> Vec<u8> {
self.message
}
pub fn decode(reader: &veilid_capnp::operation_app_message::Reader) -> Result<Self, RPCError> {
let mr = reader.get_message().map_err(RPCError::protocol)?;
if mr.len() > MAX_APP_MESSAGE_MESSAGE_LEN {
return Err(RPCError::protocol("AppMessage message too long to set"));
}
Ok(Self {
message: mr.to_vec(),
})
}
pub fn encode(
&self,

Some files were not shown because too many files have changed in this diff Show More