checkpoint
This commit is contained in:
parent
064e6c018c
commit
1ba0cdb9cf
@ -230,8 +230,8 @@ struct NodeInfo @0xe125d847e3f9f419 {
|
|||||||
networkClass @0 :NetworkClass; # network class of this node
|
networkClass @0 :NetworkClass; # network class of this node
|
||||||
outboundProtocols @1 :ProtocolTypeSet; # protocols that can go outbound
|
outboundProtocols @1 :ProtocolTypeSet; # protocols that can go outbound
|
||||||
addressTypes @2 :AddressTypeSet; # address types supported
|
addressTypes @2 :AddressTypeSet; # address types supported
|
||||||
envelopeSupport @3 :List(UInt8); # supported envelope versions
|
envelopeSupport @3 :List(UInt8); # supported rpc envelope/receipt versions
|
||||||
cryptoSupport @4 :List(CryptoKind); # maximum protocol version for rpc
|
cryptoSupport @4 :List(CryptoKind); # cryptography systems supported
|
||||||
dialInfoDetailList @5 :List(DialInfoDetail); # inbound dial info details for this node
|
dialInfoDetailList @5 :List(DialInfoDetail); # inbound dial info details for this node
|
||||||
}
|
}
|
||||||
|
|
||||||
|
45
veilid-core/src/crypto/dh_cache.rs
Normal file
45
veilid-core/src/crypto/dh_cache.rs
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
use super::*;
|
||||||
|
use crate::*;
|
||||||
|
|
||||||
|
// Diffie-Hellman key exchange cache
|
||||||
|
#[derive(Serialize, Deserialize, PartialEq, Eq, Hash)]
|
||||||
|
pub struct DHCacheKey {
|
||||||
|
pub key: PublicKey,
|
||||||
|
pub secret: SecretKey,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
pub struct DHCacheValue {
|
||||||
|
pub shared_secret: SharedSecret,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type DHCache = LruCache<DHCacheKey, DHCacheValue>;
|
||||||
|
pub const DH_CACHE_SIZE: usize = 4096;
|
||||||
|
|
||||||
|
pub fn cache_to_bytes(cache: &DHCache) -> Vec<u8> {
|
||||||
|
let cnt: usize = cache.len();
|
||||||
|
let mut out: Vec<u8> = Vec::with_capacity(cnt * (32 + 32 + 32));
|
||||||
|
for e in cache.iter() {
|
||||||
|
out.extend(&e.0.key.bytes);
|
||||||
|
out.extend(&e.0.secret.bytes);
|
||||||
|
out.extend(&e.1.shared_secret.bytes);
|
||||||
|
}
|
||||||
|
let mut rev: Vec<u8> = Vec::with_capacity(out.len());
|
||||||
|
for d in out.chunks(32 + 32 + 32).rev() {
|
||||||
|
rev.extend(d);
|
||||||
|
}
|
||||||
|
rev
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn bytes_to_cache(bytes: &[u8], cache: &mut DHCache) {
|
||||||
|
for d in bytes.chunks(32 + 32 + 32) {
|
||||||
|
let k = DHCacheKey {
|
||||||
|
key: PublicKey::new(d[0..32].try_into().expect("asdf")),
|
||||||
|
secret: SecretKey::new(d[32..64].try_into().expect("asdf")),
|
||||||
|
};
|
||||||
|
let v = DHCacheValue {
|
||||||
|
shared_secret: SharedSecret::new(d[64..96].try_into().expect("asdf")),
|
||||||
|
};
|
||||||
|
cache.insert(k, v);
|
||||||
|
}
|
||||||
|
}
|
@ -37,7 +37,7 @@ pub const ENVELOPE_MAGIC: &[u8; 3] = b"VLD";
|
|||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Default)]
|
#[derive(Debug, Clone, PartialEq, Eq, Default)]
|
||||||
pub struct Envelope {
|
pub struct Envelope {
|
||||||
version: u8,
|
version: EnvelopeVersion,
|
||||||
crypto_kind: CryptoKind,
|
crypto_kind: CryptoKind,
|
||||||
timestamp: Timestamp,
|
timestamp: Timestamp,
|
||||||
nonce: Nonce,
|
nonce: Nonce,
|
||||||
@ -47,15 +47,14 @@ pub struct Envelope {
|
|||||||
|
|
||||||
impl Envelope {
|
impl Envelope {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
version: u8,
|
version: EnvelopeVersion,
|
||||||
crypto_kind: CryptoKind,
|
crypto_kind: CryptoKind,
|
||||||
timestamp: Timestamp,
|
timestamp: Timestamp,
|
||||||
nonce: Nonce,
|
nonce: Nonce,
|
||||||
sender_id: PublicKey,
|
sender_id: PublicKey,
|
||||||
recipient_id: PublicKey,
|
recipient_id: PublicKey,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
assert!(version >= MIN_ENVELOPE_VERSION);
|
assert!(VALID_ENVELOPE_VERSIONS.contains(&version));
|
||||||
assert!(version <= MAX_ENVELOPE_VERSION);
|
|
||||||
assert!(VALID_CRYPTO_KINDS.contains(&crypto_kind));
|
assert!(VALID_CRYPTO_KINDS.contains(&crypto_kind));
|
||||||
Self {
|
Self {
|
||||||
version,
|
version,
|
||||||
@ -84,7 +83,7 @@ impl Envelope {
|
|||||||
|
|
||||||
// Check envelope version
|
// Check envelope version
|
||||||
let version = data[0x03];
|
let version = data[0x03];
|
||||||
if version > MAX_ENVELOPE_VERSION || version < MIN_ENVELOPE_VERSION {
|
if !VALID_ENVELOPE_VERSIONS.contains(&version) {
|
||||||
apibail_parse_error!("unsupported envelope version", version);
|
apibail_parse_error!("unsupported envelope version", version);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
mod byte_array_types;
|
mod byte_array_types;
|
||||||
|
mod dh_cache;
|
||||||
mod envelope;
|
mod envelope;
|
||||||
mod receipt;
|
mod receipt;
|
||||||
mod types;
|
mod types;
|
||||||
@ -10,6 +11,7 @@ pub mod vld0;
|
|||||||
|
|
||||||
pub use byte_array_types::*;
|
pub use byte_array_types::*;
|
||||||
pub use crypto_system::*;
|
pub use crypto_system::*;
|
||||||
|
pub use dh_cache::*;
|
||||||
pub use envelope::*;
|
pub use envelope::*;
|
||||||
pub use receipt::*;
|
pub use receipt::*;
|
||||||
pub use types::*;
|
pub use types::*;
|
||||||
@ -22,52 +24,20 @@ use hashlink::linked_hash_map::Entry;
|
|||||||
use hashlink::LruCache;
|
use hashlink::LruCache;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
// Handle to a particular cryptosystem
|
||||||
pub type CryptoSystemVersion = Arc<dyn CryptoSystem + Send + Sync>;
|
pub type CryptoSystemVersion = Arc<dyn CryptoSystem + Send + Sync>;
|
||||||
|
|
||||||
|
/// Crypto kinds in order of preference, best cryptosystem is the first one, worst is the last one
|
||||||
pub const VALID_CRYPTO_KINDS: [CryptoKind; 1] = [CRYPTO_KIND_VLD0];
|
pub const VALID_CRYPTO_KINDS: [CryptoKind; 1] = [CRYPTO_KIND_VLD0];
|
||||||
|
pub fn best_crypto_kind() -> CryptoKind {
|
||||||
pub const MIN_ENVELOPE_VERSION: u8 = 0u8;
|
VALID_CRYPTO_KINDS[0]
|
||||||
pub const MAX_ENVELOPE_VERSION: u8 = 0u8;
|
|
||||||
|
|
||||||
const DH_CACHE_SIZE: usize = 4096;
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Hash)]
|
|
||||||
struct DHCacheKey {
|
|
||||||
key: PublicKey,
|
|
||||||
secret: SecretKey,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize)]
|
/// Envelope versions in order of preference, best envelope version is the first one, worst is the last one
|
||||||
struct DHCacheValue {
|
pub type EnvelopeVersion = u8;
|
||||||
shared_secret: SharedSecret,
|
pub const VALID_ENVELOPE_VERSIONS: [EnvelopeVersion; 1] = [0u8];
|
||||||
}
|
pub fn best_envelope_version() -> EnvelopeVersion {
|
||||||
type DHCache = LruCache<DHCacheKey, DHCacheValue>;
|
VALID_ENVELOPE_VERSIONS[0]
|
||||||
|
|
||||||
fn cache_to_bytes(cache: &DHCache) -> Vec<u8> {
|
|
||||||
let cnt: usize = cache.len();
|
|
||||||
let mut out: Vec<u8> = Vec::with_capacity(cnt * (32 + 32 + 32));
|
|
||||||
for e in cache.iter() {
|
|
||||||
out.extend(&e.0.key.bytes);
|
|
||||||
out.extend(&e.0.secret.bytes);
|
|
||||||
out.extend(&e.1.shared_secret.bytes);
|
|
||||||
}
|
|
||||||
let mut rev: Vec<u8> = Vec::with_capacity(out.len());
|
|
||||||
for d in out.chunks(32 + 32 + 32).rev() {
|
|
||||||
rev.extend(d);
|
|
||||||
}
|
|
||||||
rev
|
|
||||||
}
|
|
||||||
|
|
||||||
fn bytes_to_cache(bytes: &[u8], cache: &mut DHCache) {
|
|
||||||
for d in bytes.chunks(32 + 32 + 32) {
|
|
||||||
let k = DHCacheKey {
|
|
||||||
key: PublicKey::new(d[0..32].try_into().expect("asdf")),
|
|
||||||
secret: SecretKey::new(d[32..64].try_into().expect("asdf")),
|
|
||||||
};
|
|
||||||
let v = DHCacheValue {
|
|
||||||
shared_secret: SharedSecret::new(d[64..96].try_into().expect("asdf")),
|
|
||||||
};
|
|
||||||
cache.insert(k, v);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct CryptoInner {
|
struct CryptoInner {
|
||||||
@ -227,6 +197,11 @@ impl Crypto {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Factory method to get the best crypto version
|
||||||
|
pub fn best(&self) -> CryptoSystemVersion {
|
||||||
|
self.get(best_crypto_kind()).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
/// Signature set verification
|
/// Signature set verification
|
||||||
/// Returns the set of signature cryptokinds that validate and are supported
|
/// Returns the set of signature cryptokinds that validate and are supported
|
||||||
/// If any cryptokinds are supported and do not validate, the whole operation
|
/// If any cryptokinds are supported and do not validate, the whole operation
|
||||||
|
@ -50,8 +50,7 @@ impl Receipt {
|
|||||||
sender_id: PublicKey,
|
sender_id: PublicKey,
|
||||||
extra_data: D,
|
extra_data: D,
|
||||||
) -> Result<Self, VeilidAPIError> {
|
) -> Result<Self, VeilidAPIError> {
|
||||||
assert!(version >= MIN_ENVELOPE_VERSION);
|
assert!(VALID_ENVELOPE_VERSIONS.contains(&version));
|
||||||
assert!(version <= MAX_ENVELOPE_VERSION);
|
|
||||||
assert!(VALID_CRYPTO_KINDS.contains(&crypto_kind));
|
assert!(VALID_CRYPTO_KINDS.contains(&crypto_kind));
|
||||||
|
|
||||||
if extra_data.as_ref().len() > MAX_EXTRA_DATA_SIZE {
|
if extra_data.as_ref().len() > MAX_EXTRA_DATA_SIZE {
|
||||||
@ -85,7 +84,7 @@ impl Receipt {
|
|||||||
|
|
||||||
// Check version
|
// Check version
|
||||||
let version = data[0x03];
|
let version = data[0x03];
|
||||||
if version > MAX_ENVELOPE_VERSION || version < MIN_ENVELOPE_VERSION {
|
if !VALID_ENVELOPE_VERSIONS.contains(&version) {
|
||||||
apibail_parse_error!("unsupported envelope version", version);
|
apibail_parse_error!("unsupported envelope version", version);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,9 @@
|
|||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
pub async fn test_envelope_round_trip(vcrypto: CryptoSystemVersion) {
|
pub async fn test_envelope_round_trip(
|
||||||
|
envelope_version: EnvelopeVersion,
|
||||||
|
vcrypto: CryptoSystemVersion,
|
||||||
|
) {
|
||||||
info!("--- test envelope round trip ---");
|
info!("--- test envelope round trip ---");
|
||||||
|
|
||||||
// Create envelope
|
// Create envelope
|
||||||
@ -9,7 +12,7 @@ pub async fn test_envelope_round_trip(vcrypto: CryptoSystemVersion) {
|
|||||||
let (sender_id, sender_secret) = vcrypto.generate_keypair();
|
let (sender_id, sender_secret) = vcrypto.generate_keypair();
|
||||||
let (recipient_id, recipient_secret) = vcrypto.generate_keypair();
|
let (recipient_id, recipient_secret) = vcrypto.generate_keypair();
|
||||||
let envelope = Envelope::new(
|
let envelope = Envelope::new(
|
||||||
MAX_ENVELOPE_VERSION,
|
envelope_version,
|
||||||
vcrypto.kind(),
|
vcrypto.kind(),
|
||||||
ts,
|
ts,
|
||||||
nonce,
|
nonce,
|
||||||
@ -53,7 +56,10 @@ pub async fn test_envelope_round_trip(vcrypto: CryptoSystemVersion) {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn test_receipt_round_trip(vcrypto: CryptoSystemVersion) {
|
pub async fn test_receipt_round_trip(
|
||||||
|
envelope_version: EnvelopeVersion,
|
||||||
|
vcrypto: CryptoSystemVersion,
|
||||||
|
) {
|
||||||
info!("--- test receipt round trip ---");
|
info!("--- test receipt round trip ---");
|
||||||
// Create arbitrary body
|
// Create arbitrary body
|
||||||
let body = b"This is an arbitrary body";
|
let body = b"This is an arbitrary body";
|
||||||
@ -61,7 +67,7 @@ pub async fn test_receipt_round_trip(vcrypto: CryptoSystemVersion) {
|
|||||||
// Create receipt
|
// Create receipt
|
||||||
let nonce = vcrypto.random_nonce();
|
let nonce = vcrypto.random_nonce();
|
||||||
let (sender_id, sender_secret) = vcrypto.generate_keypair();
|
let (sender_id, sender_secret) = vcrypto.generate_keypair();
|
||||||
let receipt = Receipt::try_new(MAX_ENVELOPE_VERSION, vcrypto.kind(), nonce, sender_id, body)
|
let receipt = Receipt::try_new(envelope_version, vcrypto.kind(), nonce, sender_id, body)
|
||||||
.expect("should not fail");
|
.expect("should not fail");
|
||||||
|
|
||||||
// Serialize to bytes
|
// Serialize to bytes
|
||||||
@ -87,11 +93,13 @@ pub async fn test_all() {
|
|||||||
let crypto = api.crypto().unwrap();
|
let crypto = api.crypto().unwrap();
|
||||||
|
|
||||||
// Test versions
|
// Test versions
|
||||||
|
for ev in VALID_ENVELOPE_VERSIONS {
|
||||||
for v in VALID_CRYPTO_KINDS {
|
for v in VALID_CRYPTO_KINDS {
|
||||||
let vcrypto = crypto.get(v).unwrap();
|
let vcrypto = crypto.get(v).unwrap();
|
||||||
|
|
||||||
test_envelope_round_trip(vcrypto.clone()).await;
|
test_envelope_round_trip(ev, vcrypto.clone()).await;
|
||||||
test_receipt_round_trip(vcrypto).await;
|
test_receipt_round_trip(ev, vcrypto).await;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
crypto_tests_shutdown(api.clone()).await;
|
crypto_tests_shutdown(api.clone()).await;
|
||||||
|
@ -10,6 +10,23 @@ use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as
|
|||||||
/// Cryptography version fourcc code
|
/// Cryptography version fourcc code
|
||||||
pub type CryptoKind = FourCC;
|
pub type CryptoKind = FourCC;
|
||||||
|
|
||||||
|
/// Sort best crypto kinds first
|
||||||
|
pub fn compare_crypto_kind(a: CryptoKind, b: CryptoKind) -> cmp::Ordering {
|
||||||
|
let a_idx = VALID_CRYPTO_KINDS.iter().position(|&k| k == a);
|
||||||
|
let b_idx = VALID_CRYPTO_KINDS.iter().position(|&k| k == b);
|
||||||
|
if let Some(a_idx) = a_idx {
|
||||||
|
if let Some(b_idx) = b_idx {
|
||||||
|
a_idx.cmp(&b_idx)
|
||||||
|
} else {
|
||||||
|
cmp::Ordering::Less
|
||||||
|
}
|
||||||
|
} else if let Some(b_idx) = b_idx {
|
||||||
|
cmp::Ordering::Greater
|
||||||
|
} else {
|
||||||
|
a.cmp(&b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(
|
#[derive(
|
||||||
Clone,
|
Clone,
|
||||||
Copy,
|
Copy,
|
||||||
@ -43,8 +60,6 @@ impl KeyPair {
|
|||||||
Debug,
|
Debug,
|
||||||
Serialize,
|
Serialize,
|
||||||
Deserialize,
|
Deserialize,
|
||||||
PartialOrd,
|
|
||||||
Ord,
|
|
||||||
PartialEq,
|
PartialEq,
|
||||||
Eq,
|
Eq,
|
||||||
Hash,
|
Hash,
|
||||||
@ -63,6 +78,21 @@ impl TypedKey {
|
|||||||
Self { kind, key }
|
Self { kind, key }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
impl PartialOrd for TypedKey {
|
||||||
|
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
|
||||||
|
Some(self.cmp(other))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Ord for TypedKey {
|
||||||
|
fn cmp(&self, other: &Self) -> cmp::Ordering {
|
||||||
|
let x = compare_crypto_kind(self.kind, other.kind);
|
||||||
|
if x != cmp::Ordering::Equal {
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
self.key.cmp(&other.key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl fmt::Display for TypedKey {
|
impl fmt::Display for TypedKey {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||||
@ -84,13 +114,121 @@ impl FromStr for TypedKey {
|
|||||||
|
|
||||||
#[derive(
|
#[derive(
|
||||||
Clone,
|
Clone,
|
||||||
Copy,
|
|
||||||
Debug,
|
Debug,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
PartialOrd,
|
PartialOrd,
|
||||||
Ord,
|
Ord,
|
||||||
PartialEq,
|
PartialEq,
|
||||||
Eq,
|
Eq,
|
||||||
Hash,
|
Hash,
|
||||||
|
RkyvArchive,
|
||||||
|
RkyvSerialize,
|
||||||
|
RkyvDeserialize,
|
||||||
|
)]
|
||||||
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
|
pub struct TypedKeySet {
|
||||||
|
items: Vec<TypedKey>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TypedKeySet {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self { items: Vec::new() }
|
||||||
|
}
|
||||||
|
pub fn with_capacity(cap: usize) -> Self {
|
||||||
|
Self {
|
||||||
|
items: Vec::with_capacity(cap),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn get(&self, kind: CryptoKind) -> Option<TypedKey> {
|
||||||
|
self.items.iter().find(|x| x.kind == kind).copied()
|
||||||
|
}
|
||||||
|
pub fn add(&mut self, typed_key: TypedKey) {
|
||||||
|
for x in &mut self.items {
|
||||||
|
if x.kind == typed_key.kind {
|
||||||
|
*x = typed_key;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.items.push(typed_key);
|
||||||
|
self.items.sort()
|
||||||
|
}
|
||||||
|
pub fn remove(&self, kind: CryptoKind) {
|
||||||
|
if let Some(idx) = self.items.iter().position(|x| x.kind == kind) {
|
||||||
|
self.items.remove(idx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn best(&self) -> Option<TypedKey> {
|
||||||
|
self.items.first().copied()
|
||||||
|
}
|
||||||
|
pub fn len(&self) -> usize {
|
||||||
|
self.items.len()
|
||||||
|
}
|
||||||
|
pub fn iter(&self) -> core::slice::Iter<'_, TypedKey> {
|
||||||
|
self.items.iter()
|
||||||
|
}
|
||||||
|
pub fn contains(&self, typed_key: &TypedKey) -> bool {
|
||||||
|
self.items.contains(typed_key)
|
||||||
|
}
|
||||||
|
pub fn contains_any(&self, typed_keys: &[TypedKey]) -> bool {
|
||||||
|
for typed_key in typed_keys {
|
||||||
|
if self.items.contains(typed_key) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl core::ops::Deref for TypedKeySet {
|
||||||
|
type Target = [TypedKey];
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn deref(&self) -> &[TypedKey] {
|
||||||
|
&self.items
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for TypedKeySet {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||||
|
write!(f, "[")?;
|
||||||
|
let mut first = true;
|
||||||
|
for x in &self.items {
|
||||||
|
if !first {
|
||||||
|
write!(f, ",")?;
|
||||||
|
first = false;
|
||||||
|
}
|
||||||
|
write!(f, "{}", x)?;
|
||||||
|
}
|
||||||
|
write!(f, "]")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl FromStr for TypedKeySet {
|
||||||
|
type Err = VeilidAPIError;
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
let mut items = Vec::new();
|
||||||
|
if s.len() < 2 {
|
||||||
|
apibail_parse_error!("invalid length", s);
|
||||||
|
}
|
||||||
|
if &s[0..1] != "[" || &s[(s.len() - 1)..] != "]" {
|
||||||
|
apibail_parse_error!("invalid format", s);
|
||||||
|
}
|
||||||
|
for x in s[1..s.len() - 1].split(",") {
|
||||||
|
let tk = TypedKey::from_str(x.trim())?;
|
||||||
|
items.push(tk);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Self { items })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(
|
||||||
|
Clone,
|
||||||
|
Copy,
|
||||||
|
Debug,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
Hash,
|
||||||
Serialize,
|
Serialize,
|
||||||
Deserialize,
|
Deserialize,
|
||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
@ -110,6 +248,26 @@ impl TypedKeyPair {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl PartialOrd for TypedKeyPair {
|
||||||
|
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
|
||||||
|
Some(self.cmp(other))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Ord for TypedKeyPair {
|
||||||
|
fn cmp(&self, other: &Self) -> cmp::Ordering {
|
||||||
|
let x = compare_crypto_kind(self.kind, other.kind);
|
||||||
|
if x != cmp::Ordering::Equal {
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
let x = self.key.cmp(&other.key);
|
||||||
|
if x != cmp::Ordering::Equal {
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
self.secret.cmp(&other.secret)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl fmt::Display for TypedKeyPair {
|
impl fmt::Display for TypedKeyPair {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||||
write!(
|
write!(
|
||||||
@ -142,8 +300,6 @@ impl FromStr for TypedKeyPair {
|
|||||||
Clone,
|
Clone,
|
||||||
Copy,
|
Copy,
|
||||||
Debug,
|
Debug,
|
||||||
PartialOrd,
|
|
||||||
Ord,
|
|
||||||
PartialEq,
|
PartialEq,
|
||||||
Eq,
|
Eq,
|
||||||
Hash,
|
Hash,
|
||||||
@ -176,6 +332,22 @@ impl TypedSignature {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl PartialOrd for TypedSignature {
|
||||||
|
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
|
||||||
|
Some(self.cmp(other))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Ord for TypedSignature {
|
||||||
|
fn cmp(&self, other: &Self) -> cmp::Ordering {
|
||||||
|
let x = compare_crypto_kind(self.kind, other.kind);
|
||||||
|
if x != cmp::Ordering::Equal {
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
self.signature.cmp(&other.signature)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl fmt::Display for TypedSignature {
|
impl fmt::Display for TypedSignature {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||||
write!(f, "{}:{}", self.kind, self.signature.encode())
|
write!(f, "{}:{}", self.kind, self.signature.encode())
|
||||||
@ -198,8 +370,6 @@ impl FromStr for TypedSignature {
|
|||||||
Clone,
|
Clone,
|
||||||
Copy,
|
Copy,
|
||||||
Debug,
|
Debug,
|
||||||
PartialOrd,
|
|
||||||
Ord,
|
|
||||||
PartialEq,
|
PartialEq,
|
||||||
Eq,
|
Eq,
|
||||||
Hash,
|
Hash,
|
||||||
@ -232,6 +402,26 @@ impl TypedKeySignature {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl PartialOrd for TypedKeySignature {
|
||||||
|
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
|
||||||
|
Some(self.cmp(other))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Ord for TypedKeySignature {
|
||||||
|
fn cmp(&self, other: &Self) -> cmp::Ordering {
|
||||||
|
let x = compare_crypto_kind(self.kind, other.kind);
|
||||||
|
if x != cmp::Ordering::Equal {
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
let x = self.key.cmp(&other.key);
|
||||||
|
if x != cmp::Ordering::Equal {
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
self.signature.cmp(&other.signature)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl fmt::Display for TypedKeySignature {
|
impl fmt::Display for TypedKeySignature {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||||
write!(
|
write!(
|
||||||
|
@ -134,7 +134,7 @@ struct PublicAddressCheckCacheKey(ProtocolType, AddressType);
|
|||||||
// The mutable state of the network manager
|
// The mutable state of the network manager
|
||||||
struct NetworkManagerInner {
|
struct NetworkManagerInner {
|
||||||
stats: NetworkManagerStats,
|
stats: NetworkManagerStats,
|
||||||
client_whitelist: LruCache<PublicKey, ClientWhitelistEntry>,
|
client_whitelist: LruCache<TypedKey, ClientWhitelistEntry>,
|
||||||
public_address_check_cache:
|
public_address_check_cache:
|
||||||
BTreeMap<PublicAddressCheckCacheKey, LruCache<IpAddr, SocketAddress>>,
|
BTreeMap<PublicAddressCheckCacheKey, LruCache<IpAddr, SocketAddress>>,
|
||||||
public_address_inconsistencies_table:
|
public_address_inconsistencies_table:
|
||||||
@ -396,7 +396,7 @@ impl NetworkManager {
|
|||||||
debug!("finished network manager shutdown");
|
debug!("finished network manager shutdown");
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn update_client_whitelist(&self, client: PublicKey) {
|
pub fn update_client_whitelist(&self, client: TypedKey) {
|
||||||
let mut inner = self.inner.lock();
|
let mut inner = self.inner.lock();
|
||||||
match inner.client_whitelist.entry(client) {
|
match inner.client_whitelist.entry(client) {
|
||||||
hashlink::lru_cache::Entry::Occupied(mut entry) => {
|
hashlink::lru_cache::Entry::Occupied(mut entry) => {
|
||||||
@ -411,7 +411,7 @@ impl NetworkManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "trace", skip(self), ret)]
|
#[instrument(level = "trace", skip(self), ret)]
|
||||||
pub fn check_client_whitelist(&self, client: PublicKey) -> bool {
|
pub fn check_client_whitelist(&self, client: TypedKey) -> bool {
|
||||||
let mut inner = self.inner.lock();
|
let mut inner = self.inner.lock();
|
||||||
|
|
||||||
match inner.client_whitelist.entry(client) {
|
match inner.client_whitelist.entry(client) {
|
||||||
@ -519,12 +519,15 @@ impl NetworkManager {
|
|||||||
let routing_table = self.routing_table();
|
let routing_table = self.routing_table();
|
||||||
|
|
||||||
// Generate receipt and serialized form to return
|
// Generate receipt and serialized form to return
|
||||||
xxx add 'preferred_kind' and propagate envelope changes so we can make recent_peers work with cryptokind
|
let vcrypto = self.crypto().best();
|
||||||
|
|
||||||
let nonce = Crypto::get_random_nonce();
|
let nonce = vcrypto.random_nonce();
|
||||||
let receipt = Receipt::try_new(0, nonce, routing_table.node_id(), extra_data)?;
|
let node_id = routing_table.node_id(vcrypto.kind());
|
||||||
|
let node_id_secret = routing_table.node_id_secret(vcrypto.kind());
|
||||||
|
|
||||||
|
let receipt = Receipt::try_new(MAX_ENVELOPE_VERSION, node_id.kind, nonce, node_id.key, extra_data)?;
|
||||||
let out = receipt
|
let out = receipt
|
||||||
.to_signed_data(&routing_table.node_id_secret())
|
.to_signed_data(self.crypto(), &node_id_secret)
|
||||||
.wrap_err("failed to generate signed receipt")?;
|
.wrap_err("failed to generate signed receipt")?;
|
||||||
|
|
||||||
// Record the receipt for later
|
// Record the receipt for later
|
||||||
@ -545,10 +548,15 @@ xxx add 'preferred_kind' and propagate envelope changes so we can make recent_pe
|
|||||||
let routing_table = self.routing_table();
|
let routing_table = self.routing_table();
|
||||||
|
|
||||||
// Generate receipt and serialized form to return
|
// Generate receipt and serialized form to return
|
||||||
let nonce = Crypto::get_random_nonce();
|
let vcrypto = self.crypto().best();
|
||||||
let receipt = Receipt::try_new(0, nonce, routing_table.node_id(), extra_data)?;
|
|
||||||
|
let nonce = vcrypto.random_nonce();
|
||||||
|
let node_id = routing_table.node_id(vcrypto.kind());
|
||||||
|
let node_id_secret = routing_table.node_id_secret(vcrypto.kind());
|
||||||
|
|
||||||
|
let receipt = Receipt::try_new(MAX_ENVELOPE_VERSION, node_id.kind, nonce, node_id.key, extra_data)?;
|
||||||
let out = receipt
|
let out = receipt
|
||||||
.to_signed_data(&routing_table.node_id_secret())
|
.to_signed_data(self.crypto(), &node_id_secret)
|
||||||
.wrap_err("failed to generate signed receipt")?;
|
.wrap_err("failed to generate signed receipt")?;
|
||||||
|
|
||||||
// Record the receipt for later
|
// Record the receipt for later
|
||||||
@ -568,7 +576,7 @@ xxx add 'preferred_kind' and propagate envelope changes so we can make recent_pe
|
|||||||
) -> NetworkResult<()> {
|
) -> NetworkResult<()> {
|
||||||
let receipt_manager = self.receipt_manager();
|
let receipt_manager = self.receipt_manager();
|
||||||
|
|
||||||
let receipt = match Receipt::from_signed_data(receipt_data.as_ref()) {
|
let receipt = match Receipt::from_signed_data(self.crypto(), receipt_data.as_ref()) {
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
return NetworkResult::invalid_message(e.to_string());
|
return NetworkResult::invalid_message(e.to_string());
|
||||||
}
|
}
|
||||||
@ -589,7 +597,7 @@ xxx add 'preferred_kind' and propagate envelope changes so we can make recent_pe
|
|||||||
) -> NetworkResult<()> {
|
) -> NetworkResult<()> {
|
||||||
let receipt_manager = self.receipt_manager();
|
let receipt_manager = self.receipt_manager();
|
||||||
|
|
||||||
let receipt = match Receipt::from_signed_data(receipt_data.as_ref()) {
|
let receipt = match Receipt::from_signed_data(self.crypto(), receipt_data.as_ref()) {
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
return NetworkResult::invalid_message(e.to_string());
|
return NetworkResult::invalid_message(e.to_string());
|
||||||
}
|
}
|
||||||
@ -609,7 +617,7 @@ xxx add 'preferred_kind' and propagate envelope changes so we can make recent_pe
|
|||||||
) -> NetworkResult<()> {
|
) -> NetworkResult<()> {
|
||||||
let receipt_manager = self.receipt_manager();
|
let receipt_manager = self.receipt_manager();
|
||||||
|
|
||||||
let receipt = match Receipt::from_signed_data(receipt_data.as_ref()) {
|
let receipt = match Receipt::from_signed_data(self.crypto(), receipt_data.as_ref()) {
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
return NetworkResult::invalid_message(e.to_string());
|
return NetworkResult::invalid_message(e.to_string());
|
||||||
}
|
}
|
||||||
@ -626,11 +634,11 @@ xxx add 'preferred_kind' and propagate envelope changes so we can make recent_pe
|
|||||||
pub async fn handle_private_receipt<R: AsRef<[u8]>>(
|
pub async fn handle_private_receipt<R: AsRef<[u8]>>(
|
||||||
&self,
|
&self,
|
||||||
receipt_data: R,
|
receipt_data: R,
|
||||||
private_route: PublicKey,
|
private_route: TypedKey,
|
||||||
) -> NetworkResult<()> {
|
) -> NetworkResult<()> {
|
||||||
let receipt_manager = self.receipt_manager();
|
let receipt_manager = self.receipt_manager();
|
||||||
|
|
||||||
let receipt = match Receipt::from_signed_data(receipt_data.as_ref()) {
|
let receipt = match Receipt::from_signed_data(self.crypto(), receipt_data.as_ref()) {
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
return NetworkResult::invalid_message(e.to_string());
|
return NetworkResult::invalid_message(e.to_string());
|
||||||
}
|
}
|
||||||
@ -653,8 +661,7 @@ xxx add 'preferred_kind' and propagate envelope changes so we can make recent_pe
|
|||||||
// Add the peer info to our routing table
|
// Add the peer info to our routing table
|
||||||
let peer_nr = match routing_table.register_node_with_peer_info(
|
let peer_nr = match routing_table.register_node_with_peer_info(
|
||||||
RoutingDomain::PublicInternet,
|
RoutingDomain::PublicInternet,
|
||||||
peer_info.node_id.key,
|
peer_info,
|
||||||
peer_info.signed_node_info,
|
|
||||||
false,
|
false,
|
||||||
) {
|
) {
|
||||||
None => {
|
None => {
|
||||||
@ -677,8 +684,7 @@ xxx add 'preferred_kind' and propagate envelope changes so we can make recent_pe
|
|||||||
// Add the peer info to our routing table
|
// Add the peer info to our routing table
|
||||||
let mut peer_nr = match routing_table.register_node_with_peer_info(
|
let mut peer_nr = match routing_table.register_node_with_peer_info(
|
||||||
RoutingDomain::PublicInternet,
|
RoutingDomain::PublicInternet,
|
||||||
peer_info.node_id.key,
|
peer_info,
|
||||||
peer_info.signed_node_info,
|
|
||||||
false,
|
false,
|
||||||
) {
|
) {
|
||||||
None => {
|
None => {
|
||||||
@ -733,21 +739,25 @@ xxx add 'preferred_kind' and propagate envelope changes so we can make recent_pe
|
|||||||
#[instrument(level = "trace", skip(self, body), err)]
|
#[instrument(level = "trace", skip(self, body), err)]
|
||||||
fn build_envelope<B: AsRef<[u8]>>(
|
fn build_envelope<B: AsRef<[u8]>>(
|
||||||
&self,
|
&self,
|
||||||
dest_node_id: PublicKey,
|
dest_node_id: TypedKey,
|
||||||
version: u8,
|
version: u8,
|
||||||
body: B,
|
body: B,
|
||||||
) -> EyreResult<Vec<u8>> {
|
) -> EyreResult<Vec<u8>> {
|
||||||
// DH to get encryption key
|
// DH to get encryption key
|
||||||
let routing_table = self.routing_table();
|
let routing_table = self.routing_table();
|
||||||
let node_id = routing_table.node_id();
|
let Some(vcrypto) = self.crypto().get(dest_node_id.kind) else {
|
||||||
let node_id_secret = routing_table.node_id_secret();
|
bail!("should not have a destination with incompatible crypto here");
|
||||||
|
};
|
||||||
|
|
||||||
|
let node_id = routing_table.node_id(vcrypto.kind());
|
||||||
|
let node_id_secret = routing_table.node_id_secret(vcrypto.kind());
|
||||||
|
|
||||||
// Get timestamp, nonce
|
// Get timestamp, nonce
|
||||||
let ts = get_aligned_timestamp();
|
let ts = get_aligned_timestamp();
|
||||||
let nonce = Crypto::get_random_nonce();
|
let nonce = vcrypto.random_nonce();
|
||||||
|
|
||||||
// Encode envelope
|
// Encode envelope
|
||||||
let envelope = Envelope::new(version, ts, nonce, node_id, dest_node_id);
|
let envelope = Envelope::new(version, node_id.kind, ts, nonce, node_id.key, dest_node_id.key);
|
||||||
envelope
|
envelope
|
||||||
.to_encrypted_data(self.crypto(), body.as_ref(), &node_id_secret)
|
.to_encrypted_data(self.crypto(), body.as_ref(), &node_id_secret)
|
||||||
.wrap_err("envelope failed to encode")
|
.wrap_err("envelope failed to encode")
|
||||||
@ -755,19 +765,22 @@ xxx add 'preferred_kind' and propagate envelope changes so we can make recent_pe
|
|||||||
|
|
||||||
/// Called by the RPC handler when we want to issue an RPC request or response
|
/// Called by the RPC handler when we want to issue an RPC request or response
|
||||||
/// node_ref is the direct destination to which the envelope will be sent
|
/// node_ref is the direct destination to which the envelope will be sent
|
||||||
/// If 'node_id' is specified, it can be different than node_ref.node_id()
|
/// If 'envelope_node_id' is specified, it can be different than the node_ref being sent to
|
||||||
/// which will cause the envelope to be relayed
|
/// which will cause the envelope to be relayed
|
||||||
#[instrument(level = "trace", skip(self, body), ret, err)]
|
#[instrument(level = "trace", skip(self, body), ret, err)]
|
||||||
pub async fn send_envelope<B: AsRef<[u8]>>(
|
pub async fn send_envelope<B: AsRef<[u8]>>(
|
||||||
&self,
|
&self,
|
||||||
node_ref: NodeRef,
|
node_ref: NodeRef,
|
||||||
envelope_node_id: Option<PublicKey>,
|
envelope_node_id: Option<TypedKey>,
|
||||||
body: B,
|
body: B,
|
||||||
) -> EyreResult<NetworkResult<SendDataKind>> {
|
) -> EyreResult<NetworkResult<SendDataKind>> {
|
||||||
let via_node_id = node_ref.node_id();
|
let via_node_ids = node_ref.node_ids();
|
||||||
let envelope_node_id = envelope_node_id.unwrap_or(via_node_id);
|
let Some(best_via_node_id) = via_node_ids.best() else {
|
||||||
|
bail!("should have a best node id");
|
||||||
|
};
|
||||||
|
let envelope_node_id = envelope_node_id.unwrap_or(best_via_node_id);
|
||||||
|
|
||||||
if envelope_node_id != via_node_id {
|
if !via_node_ids.contains(&envelope_node_id) {
|
||||||
log_net!(
|
log_net!(
|
||||||
"sending envelope to {:?} via {:?}",
|
"sending envelope to {:?} via {:?}",
|
||||||
envelope_node_id,
|
envelope_node_id,
|
||||||
@ -776,11 +789,13 @@ xxx add 'preferred_kind' and propagate envelope changes so we can make recent_pe
|
|||||||
} else {
|
} else {
|
||||||
log_net!("sending envelope to {:?}", node_ref);
|
log_net!("sending envelope to {:?}", node_ref);
|
||||||
}
|
}
|
||||||
// Get node's min/max version and see if we can send to it
|
|
||||||
|
// Get node's min/max envelope version and see if we can send to it
|
||||||
// and if so, get the max version we can use
|
// and if so, get the max version we can use
|
||||||
let version = if let Some(min_max_version) = node_ref.min_max_version() {
|
node_ref.envelope_support()
|
||||||
|
let envelope_version = if let Some(min_max_version) = {
|
||||||
#[allow(clippy::absurd_extreme_comparisons)]
|
#[allow(clippy::absurd_extreme_comparisons)]
|
||||||
if min_max_version.min > MAX_CRYPTO_VERSION || min_max_version.max < MIN_CRYPTO_VERSION
|
if min_max_version.min > MAX_ENVELOPE_VERSION || min_max_version.max < MIN_ENVELOPE_VERSION
|
||||||
{
|
{
|
||||||
bail!(
|
bail!(
|
||||||
"can't talk to this node {} because version is unsupported: ({},{})",
|
"can't talk to this node {} because version is unsupported: ({},{})",
|
||||||
@ -1374,7 +1389,7 @@ xxx add 'preferred_kind' and propagate envelope changes so we can make recent_pe
|
|||||||
// If the recipient id is not our node id, then it needs relaying
|
// If the recipient id is not our node id, then it needs relaying
|
||||||
let sender_id = TypedKey::new(envelope.get_crypto_kind(), envelope.get_sender_id());
|
let sender_id = TypedKey::new(envelope.get_crypto_kind(), envelope.get_sender_id());
|
||||||
let recipient_id = TypedKey::new(envelope.get_crypto_kind(), envelope.get_recipient_id());
|
let recipient_id = TypedKey::new(envelope.get_crypto_kind(), envelope.get_recipient_id());
|
||||||
if recipient_id != routing_table.node_id() {
|
if !routing_table.matches_own_node_id(&[recipient_id]) {
|
||||||
// See if the source node is allowed to resolve nodes
|
// See if the source node is allowed to resolve nodes
|
||||||
// This is a costly operation, so only outbound-relay permitted
|
// This is a costly operation, so only outbound-relay permitted
|
||||||
// nodes are allowed to do this, for example PWA users
|
// nodes are allowed to do this, for example PWA users
|
||||||
@ -1419,7 +1434,7 @@ xxx add 'preferred_kind' and propagate envelope changes so we can make recent_pe
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DH to get decryption key (cached)
|
// DH to get decryption key (cached)
|
||||||
let node_id_secret = routing_table.node_id_secret();
|
let node_id_secret = routing_table.node_id_secret(envelope.get_crypto_kind());
|
||||||
|
|
||||||
// Decrypt the envelope body
|
// Decrypt the envelope body
|
||||||
let body = match envelope
|
let body = match envelope
|
||||||
@ -1445,7 +1460,7 @@ xxx add 'preferred_kind' and propagate envelope changes so we can make recent_pe
|
|||||||
}
|
}
|
||||||
Some(v) => v,
|
Some(v) => v,
|
||||||
};
|
};
|
||||||
source_noderef.set_min_max_version(envelope.get_min_max_version());
|
source_noderef.add_envelope_version(envelope.get_version());
|
||||||
|
|
||||||
// xxx: deal with spoofing and flooding here?
|
// xxx: deal with spoofing and flooding here?
|
||||||
|
|
||||||
@ -1539,7 +1554,7 @@ xxx add 'preferred_kind' and propagate envelope changes so we can make recent_pe
|
|||||||
if let Some(nr) = routing_table.lookup_node_ref(k) {
|
if let Some(nr) = routing_table.lookup_node_ref(k) {
|
||||||
let peer_stats = nr.peer_stats();
|
let peer_stats = nr.peer_stats();
|
||||||
let peer = PeerTableData {
|
let peer = PeerTableData {
|
||||||
node_ids: k,
|
node_ids: nr.node_ids(),
|
||||||
peer_address: v.last_connection.remote(),
|
peer_address: v.last_connection.remote(),
|
||||||
peer_stats,
|
peer_stats,
|
||||||
};
|
};
|
||||||
|
@ -112,7 +112,7 @@ impl DiscoveryContext {
|
|||||||
&self,
|
&self,
|
||||||
protocol_type: ProtocolType,
|
protocol_type: ProtocolType,
|
||||||
address_type: AddressType,
|
address_type: AddressType,
|
||||||
ignore_node: Option<PublicKey>,
|
ignore_node_ids: Option<TypedKeySet>,
|
||||||
) -> Option<(SocketAddress, NodeRef)> {
|
) -> Option<(SocketAddress, NodeRef)> {
|
||||||
let node_count = {
|
let node_count = {
|
||||||
let config = self.routing_table.network_manager().config();
|
let config = self.routing_table.network_manager().config();
|
||||||
@ -121,7 +121,7 @@ impl DiscoveryContext {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Build an filter that matches our protocol and address type
|
// Build an filter that matches our protocol and address type
|
||||||
// and excludes relays so we can get an accurate external address
|
// and excludes relayed nodes so we can get an accurate external address
|
||||||
let dial_info_filter = DialInfoFilter::all()
|
let dial_info_filter = DialInfoFilter::all()
|
||||||
.with_protocol_type(protocol_type)
|
.with_protocol_type(protocol_type)
|
||||||
.with_address_type(address_type);
|
.with_address_type(address_type);
|
||||||
@ -130,11 +130,11 @@ impl DiscoveryContext {
|
|||||||
dial_info_filter.clone(),
|
dial_info_filter.clone(),
|
||||||
);
|
);
|
||||||
let disallow_relays_filter = Box::new(
|
let disallow_relays_filter = Box::new(
|
||||||
move |rti: &RoutingTableInner, _k: PublicKey, v: Option<Arc<BucketEntry>>| {
|
move |rti: &RoutingTableInner, v: Option<Arc<BucketEntry>>| {
|
||||||
let v = v.unwrap();
|
let v = v.unwrap();
|
||||||
v.with(rti, |_rti, e| {
|
v.with(rti, |_rti, e| {
|
||||||
if let Some(n) = e.signed_node_info(RoutingDomain::PublicInternet) {
|
if let Some(n) = e.signed_node_info(RoutingDomain::PublicInternet) {
|
||||||
n.relay_id().is_none()
|
n.relay_ids().is_empty()
|
||||||
} else {
|
} else {
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
@ -158,8 +158,8 @@ impl DiscoveryContext {
|
|||||||
|
|
||||||
// For each peer, if it's not our ignore-node, ask them for our public address, filtering on desired dial info
|
// For each peer, if it's not our ignore-node, ask them for our public address, filtering on desired dial info
|
||||||
for mut peer in peers {
|
for mut peer in peers {
|
||||||
if let Some(ignore_node) = ignore_node {
|
if let Some(ignore_node_ids) = &ignore_node_ids {
|
||||||
if peer.node_id() == ignore_node {
|
if peer.node_ids().contains_any(ignore_node_ids) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -478,12 +478,12 @@ impl DiscoveryContext {
|
|||||||
|
|
||||||
// Get our external address from some fast node, that is not node 1, call it node 2
|
// Get our external address from some fast node, that is not node 1, call it node 2
|
||||||
let (external_2_address, node_2) = match self
|
let (external_2_address, node_2) = match self
|
||||||
.discover_external_address(protocol_type, address_type, Some(node_1.node_id()))
|
.discover_external_address(protocol_type, address_type, Some(node_1.node_ids()))
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
None => {
|
None => {
|
||||||
// If we can't get an external address, allow retry
|
// If we can't get an external address, allow retry
|
||||||
log_net!(debug "failed to discover external address 2 for {:?}:{:?}, skipping node {:?}", protocol_type, address_type, node_1.node_id());
|
log_net!(debug "failed to discover external address 2 for {:?}:{:?}, skipping node {:?}", protocol_type, address_type, node_1);
|
||||||
return Ok(false);
|
return Ok(false);
|
||||||
}
|
}
|
||||||
Some(v) => v,
|
Some(v) => v,
|
||||||
|
@ -11,7 +11,7 @@ pub enum ReceiptEvent {
|
|||||||
ReturnedOutOfBand,
|
ReturnedOutOfBand,
|
||||||
ReturnedInBand { inbound_noderef: NodeRef },
|
ReturnedInBand { inbound_noderef: NodeRef },
|
||||||
ReturnedSafety,
|
ReturnedSafety,
|
||||||
ReturnedPrivate { private_route: PublicKey },
|
ReturnedPrivate { private_route: TypedKey },
|
||||||
Expired,
|
Expired,
|
||||||
Cancelled,
|
Cancelled,
|
||||||
}
|
}
|
||||||
@ -21,7 +21,7 @@ pub enum ReceiptReturned {
|
|||||||
OutOfBand,
|
OutOfBand,
|
||||||
InBand { inbound_noderef: NodeRef },
|
InBand { inbound_noderef: NodeRef },
|
||||||
Safety,
|
Safety,
|
||||||
Private { private_route: PublicKey },
|
Private { private_route: TypedKey },
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait ReceiptCallback: Send + 'static {
|
pub trait ReceiptCallback: Send + 'static {
|
||||||
|
@ -2,18 +2,28 @@ use super::*;
|
|||||||
use core::sync::atomic::Ordering;
|
use core::sync::atomic::Ordering;
|
||||||
use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize};
|
use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize};
|
||||||
|
|
||||||
|
/// Routing Table Bucket
|
||||||
|
/// Stores map of public keys to entries, which may be in multiple routing tables per crypto kind
|
||||||
|
/// Keeps entries at a particular 'dht distance' from this cryptokind's node id
|
||||||
|
/// Helps to keep managed lists at particular distances so we can evict nodes by priority
|
||||||
|
/// where the priority comes from liveness and age of the entry (older is better)
|
||||||
pub struct Bucket {
|
pub struct Bucket {
|
||||||
|
/// handle to the routing table
|
||||||
routing_table: RoutingTable,
|
routing_table: RoutingTable,
|
||||||
entries: BTreeMap<PublicKey, Arc<BucketEntry>>,
|
/// Map of keys to entries for this bucket
|
||||||
newest_entry: Option<PublicKey>,
|
entries: BTreeMap<TypedKey, Arc<BucketEntry>>,
|
||||||
|
/// The most recent entry in this bucket
|
||||||
|
newest_entry: Option<TypedKey>,
|
||||||
|
/// The crypto kind in use for the public keys in this bucket
|
||||||
|
kind: CryptoKind,
|
||||||
}
|
}
|
||||||
pub(super) type EntriesIter<'a> =
|
pub(super) type EntriesIter<'a> =
|
||||||
alloc::collections::btree_map::Iter<'a, PublicKey, Arc<BucketEntry>>;
|
alloc::collections::btree_map::Iter<'a, TypedKey, Arc<BucketEntry>>;
|
||||||
|
|
||||||
#[derive(Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
#[derive(Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
struct SerializedBucketEntryData {
|
struct SerializedBucketEntryData {
|
||||||
key: PublicKey,
|
key: TypedKey,
|
||||||
value: u32, // index into serialized entries list
|
value: u32, // index into serialized entries list
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -21,7 +31,7 @@ struct SerializedBucketEntryData {
|
|||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
struct SerializedBucketData {
|
struct SerializedBucketData {
|
||||||
entries: Vec<SerializedBucketEntryData>,
|
entries: Vec<SerializedBucketEntryData>,
|
||||||
newest_entry: Option<PublicKey>,
|
newest_entry: Option<TypedKey>,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn state_ordering(state: BucketEntryState) -> usize {
|
fn state_ordering(state: BucketEntryState) -> usize {
|
||||||
@ -33,11 +43,12 @@ fn state_ordering(state: BucketEntryState) -> usize {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Bucket {
|
impl Bucket {
|
||||||
pub fn new(routing_table: RoutingTable) -> Self {
|
pub fn new(routing_table: RoutingTable, kind: CryptoKind) -> Self {
|
||||||
Self {
|
Self {
|
||||||
routing_table,
|
routing_table,
|
||||||
entries: BTreeMap::new(),
|
entries: BTreeMap::new(),
|
||||||
newest_entry: None,
|
newest_entry: None,
|
||||||
|
kind,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -83,22 +94,43 @@ impl Bucket {
|
|||||||
Ok(out)
|
Ok(out)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) fn add_entry(&mut self, node_id: PublicKey) -> NodeRef {
|
/// Create a new entry with a node_id of this crypto kind and return it
|
||||||
log_rtab!("Node added: {}", node_id.encode());
|
pub(super) fn add_entry(&mut self, node_id: TypedKey) -> NodeRef {
|
||||||
|
assert_eq!(node_id.kind, self.kind);
|
||||||
|
|
||||||
|
log_rtab!("Node added: {}", node_id);
|
||||||
|
|
||||||
// Add new entry
|
// Add new entry
|
||||||
self.entries.insert(node_id, Arc::new(BucketEntry::new()));
|
let entry = Arc::new(BucketEntry::new());
|
||||||
|
entry.with_mut_inner(|e| e.add_node_id(node_id));
|
||||||
|
self.entries.insert(node_id.key, entry.clone());
|
||||||
|
|
||||||
// This is now the newest bucket entry
|
// This is now the newest bucket entry
|
||||||
self.newest_entry = Some(node_id);
|
self.newest_entry = Some(node_id.key);
|
||||||
|
|
||||||
// Get a node ref to return
|
// Get a node ref to return since this is new
|
||||||
let entry = self.entries.get(&node_id).unwrap().clone();
|
NodeRef::new(self.routing_table.clone(), entry, None)
|
||||||
NodeRef::new(self.routing_table.clone(), node_id, entry, None)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) fn remove_entry(&mut self, node_id: &PublicKey) {
|
/// Add an existing entry with a new node_id for this crypto kind
|
||||||
log_rtab!("Node removed: {}", node_id);
|
pub(super) fn add_existing_entry(&mut self, node_id: TypedKey, entry: Arc<BucketEntry>) {
|
||||||
|
assert_eq!(node_id.kind, self.kind);
|
||||||
|
|
||||||
|
log_rtab!("Existing node added: {}", node_id);
|
||||||
|
|
||||||
|
// Add existing entry
|
||||||
|
entry.with_mut_inner(|e| e.add_node_id(node_id));
|
||||||
|
self.entries.insert(node_id.key, entry);
|
||||||
|
|
||||||
|
// This is now the newest bucket entry
|
||||||
|
self.newest_entry = Some(node_id.key);
|
||||||
|
|
||||||
|
// No need to return a noderef here because the noderef will already exist in the caller
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Remove an entry with a node_id for this crypto kind from the bucket
|
||||||
|
fn remove_entry(&mut self, node_id: &TypedKey) {
|
||||||
|
log_rtab!("Node removed: {}:{}", self.kind, node_id);
|
||||||
|
|
||||||
// Remove the entry
|
// Remove the entry
|
||||||
self.entries.remove(node_id);
|
self.entries.remove(node_id);
|
||||||
@ -106,7 +138,7 @@ impl Bucket {
|
|||||||
// newest_entry is updated by kick_bucket()
|
// newest_entry is updated by kick_bucket()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) fn entry(&self, key: &PublicKey) -> Option<Arc<BucketEntry>> {
|
pub(super) fn entry(&self, key: &TypedKey) -> Option<Arc<BucketEntry>> {
|
||||||
self.entries.get(key).cloned()
|
self.entries.get(key).cloned()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -114,7 +146,7 @@ impl Bucket {
|
|||||||
self.entries.iter()
|
self.entries.iter()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) fn kick(&mut self, bucket_depth: usize) -> Option<BTreeSet<PublicKey>> {
|
pub(super) fn kick(&mut self, bucket_depth: usize) -> Option<BTreeSet<TypedKey>> {
|
||||||
// Get number of entries to attempt to purge from bucket
|
// Get number of entries to attempt to purge from bucket
|
||||||
let bucket_len = self.entries.len();
|
let bucket_len = self.entries.len();
|
||||||
|
|
||||||
@ -124,11 +156,11 @@ impl Bucket {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Try to purge the newest entries that overflow the bucket
|
// Try to purge the newest entries that overflow the bucket
|
||||||
let mut dead_node_ids: BTreeSet<PublicKey> = BTreeSet::new();
|
let mut dead_node_ids: BTreeSet<TypedKey> = BTreeSet::new();
|
||||||
let mut extra_entries = bucket_len - bucket_depth;
|
let mut extra_entries = bucket_len - bucket_depth;
|
||||||
|
|
||||||
// Get the sorted list of entries by their kick order
|
// Get the sorted list of entries by their kick order
|
||||||
let mut sorted_entries: Vec<(PublicKey, Arc<BucketEntry>)> = self
|
let mut sorted_entries: Vec<(TypedKey, Arc<BucketEntry>)> = self
|
||||||
.entries
|
.entries
|
||||||
.iter()
|
.iter()
|
||||||
.map(|(k, v)| (k.clone(), v.clone()))
|
.map(|(k, v)| (k.clone(), v.clone()))
|
||||||
|
@ -68,22 +68,12 @@ pub struct BucketEntryLocalNetwork {
|
|||||||
node_status: Option<LocalNetworkNodeStatus>,
|
node_status: Option<LocalNetworkNodeStatus>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A range of cryptography versions supported by this entry
|
|
||||||
#[derive(Copy, Clone, Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
|
||||||
pub struct VersionRange {
|
|
||||||
/// The minimum cryptography version supported by this entry
|
|
||||||
pub min: u8,
|
|
||||||
/// The maximum cryptography version supported by this entry
|
|
||||||
pub max: u8,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The data associated with each bucket entry
|
/// The data associated with each bucket entry
|
||||||
#[derive(Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
#[derive(Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct BucketEntryInner {
|
pub struct BucketEntryInner {
|
||||||
/// The node ids matching this bucket entry, with the cryptography versions supported by this node as the 'kind' field
|
/// The node ids matching this bucket entry, with the cryptography versions supported by this node as the 'kind' field
|
||||||
node_ids: Vec<TypedKey>,
|
node_ids: TypedKeySet,
|
||||||
/// The set of envelope versions supported by the node inclusive of the requirements of any relay the node may be using
|
/// The set of envelope versions supported by the node inclusive of the requirements of any relay the node may be using
|
||||||
envelope_support: Vec<u8>,
|
envelope_support: Vec<u8>,
|
||||||
/// If this node has updated it's SignedNodeInfo since our network
|
/// If this node has updated it's SignedNodeInfo since our network
|
||||||
@ -133,9 +123,12 @@ impl BucketEntryInner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Node ids
|
// Node ids
|
||||||
pub fn node_ids(&self) -> Vec<TypedKey> {
|
pub fn node_ids(&self) -> TypedKeySet {
|
||||||
self.node_ids.clone()
|
self.node_ids.clone()
|
||||||
}
|
}
|
||||||
|
pub fn add_node_id(&mut self, node_id: TypedKey) {
|
||||||
|
self.node_ids.add(node_id);
|
||||||
|
}
|
||||||
|
|
||||||
// Less is faster
|
// Less is faster
|
||||||
pub fn cmp_fastest(e1: &Self, e2: &Self) -> std::cmp::Ordering {
|
pub fn cmp_fastest(e1: &Self, e2: &Self) -> std::cmp::Ordering {
|
||||||
@ -453,12 +446,23 @@ impl BucketEntryInner {
|
|||||||
out
|
out
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_min_max_version(&mut self, min_max_version: VersionRange) {
|
pub fn add_envelope_version(&mut self, envelope_version: u8) {
|
||||||
self.min_max_version = Some(min_max_version);
|
if self.envelope_support.contains(&envelope_version) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
self.envelope_support.push(envelope_version);
|
||||||
|
self.envelope_support.dedup();
|
||||||
|
self.envelope_support.sort();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn min_max_version(&self) -> Option<VersionRange> {
|
pub fn set_envelope_support(&mut self, envelope_support: Vec<u8>) {
|
||||||
self.min_max_version
|
envelope_support.dedup();
|
||||||
|
envelope_support.sort();
|
||||||
|
self.envelope_support = envelope_support;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn envelope_support(&self) -> Vec<u8> {
|
||||||
|
self.envelope_support.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn state(&self, cur_ts: Timestamp) -> BucketEntryState {
|
pub fn state(&self, cur_ts: Timestamp) -> BucketEntryState {
|
||||||
@ -757,7 +761,8 @@ impl BucketEntry {
|
|||||||
Self {
|
Self {
|
||||||
ref_count: AtomicU32::new(0),
|
ref_count: AtomicU32::new(0),
|
||||||
inner: RwLock::new(BucketEntryInner {
|
inner: RwLock::new(BucketEntryInner {
|
||||||
min_max_version: None,
|
node_ids: TypedKeySet::new(),
|
||||||
|
envelope_support: Vec::new(),
|
||||||
updated_since_last_network_change: false,
|
updated_since_last_network_change: false,
|
||||||
last_connections: BTreeMap::new(),
|
last_connections: BTreeMap::new(),
|
||||||
local_network: BucketEntryLocalNetwork {
|
local_network: BucketEntryLocalNetwork {
|
||||||
|
@ -113,7 +113,7 @@ impl RoutingTable {
|
|||||||
let mut cnt = 0;
|
let mut cnt = 0;
|
||||||
out += &format!("Entries: {}\n", inner.bucket_entry_count);
|
out += &format!("Entries: {}\n", inner.bucket_entry_count);
|
||||||
while b < blen {
|
while b < blen {
|
||||||
let filtered_entries: Vec<(&PublicKey, &Arc<BucketEntry>)> = inner.buckets[b]
|
let filtered_entries: Vec<(&TypedKey, &Arc<BucketEntry>)> = inner.buckets[b]
|
||||||
.entries()
|
.entries()
|
||||||
.filter(|e| {
|
.filter(|e| {
|
||||||
let state = e.1.with(inner, |_rti, e| e.state(cur_ts));
|
let state = e.1.with(inner, |_rti, e| e.state(cur_ts));
|
||||||
@ -149,7 +149,7 @@ impl RoutingTable {
|
|||||||
out
|
out
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn debug_info_entry(&self, node_id: PublicKey) -> String {
|
pub(crate) fn debug_info_entry(&self, node_id: TypedKey) -> String {
|
||||||
let mut out = String::new();
|
let mut out = String::new();
|
||||||
out += &format!("Entry {:?}:\n", node_id);
|
out += &format!("Entry {:?}:\n", node_id);
|
||||||
if let Some(nr) = self.lookup_node_ref(node_id) {
|
if let Some(nr) = self.lookup_node_ref(node_id) {
|
||||||
|
@ -49,7 +49,7 @@ pub struct LowLevelPortInfo {
|
|||||||
pub protocol_to_port: ProtocolToPortMapping,
|
pub protocol_to_port: ProtocolToPortMapping,
|
||||||
}
|
}
|
||||||
pub type RoutingTableEntryFilter<'t> =
|
pub type RoutingTableEntryFilter<'t> =
|
||||||
Box<dyn FnMut(&RoutingTableInner, PublicKey, Option<Arc<BucketEntry>>) -> bool + Send + 't>;
|
Box<dyn FnMut(&RoutingTableInner, Option<Arc<BucketEntry>>) -> bool + Send + 't>;
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, Eq, PartialEq)]
|
#[derive(Clone, Debug, Default, Eq, PartialEq)]
|
||||||
pub struct RoutingTableHealth {
|
pub struct RoutingTableHealth {
|
||||||
@ -110,7 +110,7 @@ impl RoutingTableUnlockedInner {
|
|||||||
f(&*self.config.get())
|
f(&*self.config.get())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn node_id(&self, kind: CryptoKind) -> PublicKey {
|
pub fn node_id(&self, kind: CryptoKind) -> TypedKey {
|
||||||
self.node_id_keypairs.get(&kind).unwrap().key
|
self.node_id_keypairs.get(&kind).unwrap().key
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -192,6 +192,27 @@ impl RoutingTable {
|
|||||||
this
|
this
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/////////////////////////////////////
|
||||||
|
/// Unlocked passthrough
|
||||||
|
pub fn network_manager(&self) -> NetworkManager {
|
||||||
|
self.unlocked_inner.network_manager()
|
||||||
|
}
|
||||||
|
pub fn crypto(&self) -> Crypto {
|
||||||
|
self.unlocked_inner.crypto()
|
||||||
|
}
|
||||||
|
pub fn rpc_processor(&self) -> RPCProcessor {
|
||||||
|
self.unlocked_inner.rpc_processor()
|
||||||
|
}
|
||||||
|
pub fn node_id(&self, kind: CryptoKind) -> TypedKey {
|
||||||
|
self.unlocked_inner.node_id(kind)
|
||||||
|
}
|
||||||
|
pub fn node_id_secret(&self, kind: CryptoKind) -> SecretKey {
|
||||||
|
self.unlocked_inner.node_id_secret(kind)
|
||||||
|
}
|
||||||
|
pub fn matches_own_node_id(&self, node_ids: &[TypedKey]) -> bool {
|
||||||
|
self.unlocked_inner.matches_own_node_id(node_ids)
|
||||||
|
}
|
||||||
|
|
||||||
/////////////////////////////////////
|
/////////////////////////////////////
|
||||||
/// Initialization
|
/// Initialization
|
||||||
|
|
||||||
@ -556,14 +577,14 @@ impl RoutingTable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Resolve an existing routing table entry and return a reference to it
|
/// Resolve an existing routing table entry and return a reference to it
|
||||||
pub fn lookup_node_ref(&self, node_id: PublicKey) -> Option<NodeRef> {
|
pub fn lookup_node_ref(&self, node_id: TypedKey) -> Option<NodeRef> {
|
||||||
self.inner.read().lookup_node_ref(self.clone(), node_id)
|
self.inner.read().lookup_node_ref(self.clone(), node_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Resolve an existing routing table entry and return a filtered reference to it
|
/// Resolve an existing routing table entry and return a filtered reference to it
|
||||||
pub fn lookup_and_filter_noderef(
|
pub fn lookup_and_filter_noderef(
|
||||||
&self,
|
&self,
|
||||||
node_id: PublicKey,
|
node_id: TypedKey,
|
||||||
routing_domain_set: RoutingDomainSet,
|
routing_domain_set: RoutingDomainSet,
|
||||||
dial_info_filter: DialInfoFilter,
|
dial_info_filter: DialInfoFilter,
|
||||||
) -> Option<NodeRef> {
|
) -> Option<NodeRef> {
|
||||||
@ -596,7 +617,7 @@ impl RoutingTable {
|
|||||||
/// and add the last peer address we have for it, since that's pretty common
|
/// and add the last peer address we have for it, since that's pretty common
|
||||||
pub fn register_node_with_existing_connection(
|
pub fn register_node_with_existing_connection(
|
||||||
&self,
|
&self,
|
||||||
node_id: PublicKey,
|
node_id: TypedKey,
|
||||||
descriptor: ConnectionDescriptor,
|
descriptor: ConnectionDescriptor,
|
||||||
timestamp: Timestamp,
|
timestamp: Timestamp,
|
||||||
) -> Option<NodeRef> {
|
) -> Option<NodeRef> {
|
||||||
@ -615,7 +636,7 @@ impl RoutingTable {
|
|||||||
self.inner.read().get_routing_table_health()
|
self.inner.read().get_routing_table_health()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_recent_peers(&self) -> Vec<(PublicKey, RecentPeersEntry)> {
|
pub fn get_recent_peers(&self) -> Vec<(TypedKey, RecentPeersEntry)> {
|
||||||
let mut recent_peers = Vec::new();
|
let mut recent_peers = Vec::new();
|
||||||
let mut dead_peers = Vec::new();
|
let mut dead_peers = Vec::new();
|
||||||
let mut out = Vec::new();
|
let mut out = Vec::new();
|
||||||
@ -654,7 +675,7 @@ impl RoutingTable {
|
|||||||
out
|
out
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn touch_recent_peer(&self, node_id: PublicKey, last_connection: ConnectionDescriptor) {
|
pub fn touch_recent_peer(&self, node_id: TypedKey, last_connection: ConnectionDescriptor) {
|
||||||
self.inner
|
self.inner
|
||||||
.write()
|
.write()
|
||||||
.touch_recent_peer(node_id, last_connection)
|
.touch_recent_peer(node_id, last_connection)
|
||||||
@ -703,7 +724,7 @@ impl RoutingTable {
|
|||||||
dial_info_filter: DialInfoFilter,
|
dial_info_filter: DialInfoFilter,
|
||||||
) -> RoutingTableEntryFilter<'a> {
|
) -> RoutingTableEntryFilter<'a> {
|
||||||
// does it have matching public dial info?
|
// does it have matching public dial info?
|
||||||
Box::new(move |rti, _k, e| {
|
Box::new(move |rti, e| {
|
||||||
if let Some(e) = e {
|
if let Some(e) = e {
|
||||||
e.with(rti, |_rti, e| {
|
e.with(rti, |_rti, e| {
|
||||||
if let Some(ni) = e.node_info(routing_domain) {
|
if let Some(ni) = e.node_info(routing_domain) {
|
||||||
@ -731,7 +752,7 @@ impl RoutingTable {
|
|||||||
dial_info: DialInfo,
|
dial_info: DialInfo,
|
||||||
) -> RoutingTableEntryFilter<'a> {
|
) -> RoutingTableEntryFilter<'a> {
|
||||||
// does the node's outbound capabilities match the dialinfo?
|
// does the node's outbound capabilities match the dialinfo?
|
||||||
Box::new(move |rti, _k, e| {
|
Box::new(move |rti, e| {
|
||||||
if let Some(e) = e {
|
if let Some(e) = e {
|
||||||
e.with(rti, |_rti, e| {
|
e.with(rti, |_rti, e| {
|
||||||
if let Some(ni) = e.node_info(routing_domain) {
|
if let Some(ni) = e.node_info(routing_domain) {
|
||||||
@ -774,8 +795,8 @@ impl RoutingTable {
|
|||||||
let mut nodes_proto_v6 = vec![0usize, 0usize, 0usize, 0usize];
|
let mut nodes_proto_v6 = vec![0usize, 0usize, 0usize, 0usize];
|
||||||
|
|
||||||
let filter = Box::new(
|
let filter = Box::new(
|
||||||
move |rti: &RoutingTableInner, _k: PublicKey, v: Option<Arc<BucketEntry>>| {
|
move |rti: &RoutingTableInner, entry: Option<Arc<BucketEntry>>| {
|
||||||
let entry = v.unwrap();
|
let entry = entry.unwrap();
|
||||||
entry.with(rti, |_rti, e| {
|
entry.with(rti, |_rti, e| {
|
||||||
// skip nodes on our local network here
|
// skip nodes on our local network here
|
||||||
if e.has_node_info(RoutingDomain::LocalNetwork.into()) {
|
if e.has_node_info(RoutingDomain::LocalNetwork.into()) {
|
||||||
@ -821,8 +842,8 @@ impl RoutingTable {
|
|||||||
self.find_fastest_nodes(
|
self.find_fastest_nodes(
|
||||||
protocol_types_len * 2 * max_per_type,
|
protocol_types_len * 2 * max_per_type,
|
||||||
filters,
|
filters,
|
||||||
|_rti, k: PublicKey, v: Option<Arc<BucketEntry>>| {
|
|_rti, entry: Option<Arc<BucketEntry>>| {
|
||||||
NodeRef::new(self.clone(), k, v.unwrap().clone(), None)
|
NodeRef::new(self.clone(), entry.unwrap().clone(), None)
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -838,10 +859,10 @@ impl RoutingTable {
|
|||||||
where
|
where
|
||||||
C: for<'a, 'b> FnMut(
|
C: for<'a, 'b> FnMut(
|
||||||
&'a RoutingTableInner,
|
&'a RoutingTableInner,
|
||||||
&'b (PublicKey, Option<Arc<BucketEntry>>),
|
&'b Option<Arc<BucketEntry>>,
|
||||||
&'b (PublicKey, Option<Arc<BucketEntry>>),
|
&'b Option<Arc<BucketEntry>>,
|
||||||
) -> core::cmp::Ordering,
|
) -> core::cmp::Ordering,
|
||||||
T: for<'r> FnMut(&'r RoutingTableInner, PublicKey, Option<Arc<BucketEntry>>) -> O + Send,
|
T: for<'r> FnMut(&'r RoutingTableInner, Option<Arc<BucketEntry>>) -> O + Send,
|
||||||
{
|
{
|
||||||
self.inner
|
self.inner
|
||||||
.read()
|
.read()
|
||||||
@ -855,7 +876,7 @@ impl RoutingTable {
|
|||||||
transform: T,
|
transform: T,
|
||||||
) -> Vec<O>
|
) -> Vec<O>
|
||||||
where
|
where
|
||||||
T: for<'r> FnMut(&'r RoutingTableInner, PublicKey, Option<Arc<BucketEntry>>) -> O + Send,
|
T: for<'r> FnMut(&'r RoutingTableInner, Option<Arc<BucketEntry>>) -> O + Send,
|
||||||
{
|
{
|
||||||
self.inner
|
self.inner
|
||||||
.read()
|
.read()
|
||||||
@ -864,12 +885,12 @@ impl RoutingTable {
|
|||||||
|
|
||||||
pub fn find_closest_nodes<'a, T, O>(
|
pub fn find_closest_nodes<'a, T, O>(
|
||||||
&self,
|
&self,
|
||||||
node_id: PublicKey,
|
node_id: TypedKey,
|
||||||
filters: VecDeque<RoutingTableEntryFilter>,
|
filters: VecDeque<RoutingTableEntryFilter>,
|
||||||
transform: T,
|
transform: T,
|
||||||
) -> Vec<O>
|
) -> Vec<O>
|
||||||
where
|
where
|
||||||
T: for<'r> FnMut(&'r RoutingTableInner, PublicKey, Option<Arc<BucketEntry>>) -> O + Send,
|
T: for<'r> FnMut(&'r RoutingTableInner, Option<Arc<BucketEntry>>) -> O + Send,
|
||||||
{
|
{
|
||||||
self.inner
|
self.inner
|
||||||
.read()
|
.read()
|
||||||
@ -895,7 +916,7 @@ impl RoutingTable {
|
|||||||
pub async fn find_node(
|
pub async fn find_node(
|
||||||
&self,
|
&self,
|
||||||
node_ref: NodeRef,
|
node_ref: NodeRef,
|
||||||
node_id: PublicKey,
|
node_id: TypedKey,
|
||||||
) -> EyreResult<NetworkResult<Vec<NodeRef>>> {
|
) -> EyreResult<NetworkResult<Vec<NodeRef>>> {
|
||||||
let rpc_processor = self.rpc_processor();
|
let rpc_processor = self.rpc_processor();
|
||||||
|
|
||||||
@ -1021,7 +1042,7 @@ impl RoutingTable {
|
|||||||
// Go through all entries and find fastest entry that matches filter function
|
// Go through all entries and find fastest entry that matches filter function
|
||||||
let inner = self.inner.read();
|
let inner = self.inner.read();
|
||||||
let inner = &*inner;
|
let inner = &*inner;
|
||||||
let mut best_inbound_relay: Option<(PublicKey, Arc<BucketEntry>)> = None;
|
let mut best_inbound_relay: Option<(TypedKey, Arc<BucketEntry>)> = None;
|
||||||
|
|
||||||
// Iterate all known nodes for candidates
|
// Iterate all known nodes for candidates
|
||||||
inner.with_entries(cur_ts, BucketEntryState::Unreliable, |rti, k, v| {
|
inner.with_entries(cur_ts, BucketEntryState::Unreliable, |rti, k, v| {
|
||||||
@ -1054,6 +1075,6 @@ impl RoutingTable {
|
|||||||
Option::<()>::None
|
Option::<()>::None
|
||||||
});
|
});
|
||||||
// Return the best inbound relay noderef
|
// Return the best inbound relay noderef
|
||||||
best_inbound_relay.map(|(k, e)| NodeRef::new(self.clone(), k, e, None))
|
best_inbound_relay.map(|(k, e)| NodeRef::new(self.clone(), e, None))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -98,7 +98,7 @@ pub trait NodeRefBase: Sized {
|
|||||||
fn routing_table(&self) -> RoutingTable {
|
fn routing_table(&self) -> RoutingTable {
|
||||||
self.common().routing_table.clone()
|
self.common().routing_table.clone()
|
||||||
}
|
}
|
||||||
fn node_ids(&self) -> Vec<TypedKey> {
|
fn node_ids(&self) -> TypedKeySet {
|
||||||
self.operate(|_rti, e| e.node_ids())
|
self.operate(|_rti, e| e.node_ids())
|
||||||
}
|
}
|
||||||
fn has_updated_since_last_network_change(&self) -> bool {
|
fn has_updated_since_last_network_change(&self) -> bool {
|
||||||
@ -112,11 +112,14 @@ pub trait NodeRefBase: Sized {
|
|||||||
e.update_node_status(node_status);
|
e.update_node_status(node_status);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
fn min_max_version(&self) -> Option<VersionRange> {
|
fn envelope_support(&self) -> Vec<u8> {
|
||||||
self.operate(|_rti, e| e.min_max_version())
|
self.operate(|_rti, e| e.envelope_support())
|
||||||
}
|
}
|
||||||
fn set_min_max_version(&self, min_max_version: VersionRange) {
|
fn add_envelope_version(&self, envelope_version: u8) {
|
||||||
self.operate_mut(|_rti, e| e.set_min_max_version(min_max_version))
|
self.operate_mut(|_rti, e| e.add_envelope_version(envelope_version))
|
||||||
|
}
|
||||||
|
fn set_envelope_support(&self, envelope_support: Vec<u8>) {
|
||||||
|
self.operate_mut(|_rti, e| e.set_envelope_support(envelope_support))
|
||||||
}
|
}
|
||||||
fn state(&self, cur_ts: Timestamp) -> BucketEntryState {
|
fn state(&self, cur_ts: Timestamp) -> BucketEntryState {
|
||||||
self.operate(|_rti, e| e.state(cur_ts))
|
self.operate(|_rti, e| e.state(cur_ts))
|
||||||
|
@ -117,14 +117,14 @@ impl PrivateRoute {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn first_hop_node_id(&self) -> Option<PublicKey> {
|
pub fn first_hop_node_id(&self) -> Option<TypedKey> {
|
||||||
let PrivateRouteHops::FirstHop(pr_first_hop) = &self.hops else {
|
let PrivateRouteHops::FirstHop(pr_first_hop) = &self.hops else {
|
||||||
return None;
|
return None;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Get the safety route to use from the spec
|
// Get the safety route to use from the spec
|
||||||
Some(match &pr_first_hop.node {
|
Some(match &pr_first_hop.node {
|
||||||
RouteNode::NodeId(n) => n.key,
|
RouteNode::NodeId(n) => n,
|
||||||
RouteNode::PeerInfo(p) => p.node_id.key,
|
RouteNode::PeerInfo(p) => p.node_id.key,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -162,13 +162,13 @@ pub enum SafetyRouteHops {
|
|||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct SafetyRoute {
|
pub struct SafetyRoute {
|
||||||
pub public_key: PublicKey,
|
pub public_key: TypedKey,
|
||||||
pub hop_count: u8,
|
pub hop_count: u8,
|
||||||
pub hops: SafetyRouteHops,
|
pub hops: SafetyRouteHops,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SafetyRoute {
|
impl SafetyRoute {
|
||||||
pub fn new_stub(public_key: PublicKey, private_route: PrivateRoute) -> Self {
|
pub fn new_stub(public_key: TypedKey, private_route: PrivateRoute) -> Self {
|
||||||
// First hop should have already been popped off for stubbed safety routes since
|
// First hop should have already been popped off for stubbed safety routes since
|
||||||
// we are sending directly to the first hop
|
// we are sending directly to the first hop
|
||||||
assert!(matches!(private_route.hops, PrivateRouteHops::Data(_)));
|
assert!(matches!(private_route.hops, PrivateRouteHops::Data(_)));
|
||||||
|
@ -10,13 +10,13 @@ pub enum ContactMethod {
|
|||||||
/// Contact the node directly
|
/// Contact the node directly
|
||||||
Direct(DialInfo),
|
Direct(DialInfo),
|
||||||
/// Request via signal the node connect back directly (relay, target)
|
/// Request via signal the node connect back directly (relay, target)
|
||||||
SignalReverse(PublicKey, PublicKey),
|
SignalReverse(TypedKey, TypedKey),
|
||||||
/// Request via signal the node negotiate a hole punch (relay, target_node)
|
/// Request via signal the node negotiate a hole punch (relay, target_node)
|
||||||
SignalHolePunch(PublicKey, PublicKey),
|
SignalHolePunch(TypedKey, TypedKey),
|
||||||
/// Must use an inbound relay to reach the node
|
/// Must use an inbound relay to reach the node
|
||||||
InboundRelay(PublicKey),
|
InboundRelay(TypedKey),
|
||||||
/// Must use outbound relay to reach the node
|
/// Must use outbound relay to reach the node
|
||||||
OutboundRelay(PublicKey),
|
OutboundRelay(TypedKey),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
@ -28,7 +28,7 @@ pub struct RoutingTableInner {
|
|||||||
/// Statistics about the total bandwidth to/from this node
|
/// Statistics about the total bandwidth to/from this node
|
||||||
pub(super) self_transfer_stats: TransferStatsDownUp,
|
pub(super) self_transfer_stats: TransferStatsDownUp,
|
||||||
/// Peers we have recently communicated with
|
/// Peers we have recently communicated with
|
||||||
pub(super) recent_peers: LruCache<PublicKey, RecentPeersEntry>,
|
pub(super) recent_peers: LruCache<TypedKey, RecentPeersEntry>,
|
||||||
/// Storage for private/safety RouteSpecs
|
/// Storage for private/safety RouteSpecs
|
||||||
pub(super) route_spec_store: Option<RouteSpecStore>,
|
pub(super) route_spec_store: Option<RouteSpecStore>,
|
||||||
}
|
}
|
||||||
@ -416,7 +416,7 @@ impl RoutingTableInner {
|
|||||||
|
|
||||||
pub fn with_entries<
|
pub fn with_entries<
|
||||||
T,
|
T,
|
||||||
F: FnMut(&RoutingTableInner, PublicKey, Arc<BucketEntry>) -> Option<T>,
|
F: FnMut(&RoutingTableInner, TypedKey, Arc<BucketEntry>) -> Option<T>,
|
||||||
>(
|
>(
|
||||||
&self,
|
&self,
|
||||||
cur_ts: Timestamp,
|
cur_ts: Timestamp,
|
||||||
@ -442,7 +442,7 @@ impl RoutingTableInner {
|
|||||||
|
|
||||||
pub fn with_entries_mut<
|
pub fn with_entries_mut<
|
||||||
T,
|
T,
|
||||||
F: FnMut(&mut RoutingTableInner, PublicKey, Arc<BucketEntry>) -> Option<T>,
|
F: FnMut(&mut RoutingTableInner, TypedKey, Arc<BucketEntry>) -> Option<T>,
|
||||||
>(
|
>(
|
||||||
&mut self,
|
&mut self,
|
||||||
cur_ts: Timestamp,
|
cur_ts: Timestamp,
|
||||||
@ -537,20 +537,53 @@ impl RoutingTableInner {
|
|||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Look up all bucket entries and make sure we only have zero or one
|
||||||
|
// If we have more than one, pick the one with the best cryptokind to add node ids to
|
||||||
|
let mut best_entry: Option<Arc<BucketEntry>> = None;
|
||||||
|
for node_id in node_ids {
|
||||||
|
if let Some((kind, idx)) = self.unlocked_inner.find_bucket_index(*node_id) {
|
||||||
|
let bucket = &self.buckets[&kind][idx];
|
||||||
|
if let Some(entry) = bucket.entry(&node_id.key) {
|
||||||
|
// Best entry is the first one in sorted order that exists from the node id list
|
||||||
|
// Everything else that matches will be overwritten in the bucket and the
|
||||||
|
// existing noderefs will eventually unref and drop the old unindexed bucketentry
|
||||||
|
// We do this instead of merging for now. We could 'kill' entries and have node_refs
|
||||||
|
// rewrite themselves to point to the merged entry upon dereference. The use case for this
|
||||||
|
// may not be worth the effort.
|
||||||
|
best_entry = Some(entry);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the entry does exist already, update it
|
||||||
|
if let Some(best_entry) = best_entry {
|
||||||
|
let nr = best_entry.map(|e| NodeRef::new(outer_self.clone(), best_entry, None));
|
||||||
|
|
||||||
|
// Update the entry with all of the node ids
|
||||||
|
nr.update_node_ids(node_ids);
|
||||||
|
|
||||||
|
// Update the entry with the update func
|
||||||
|
nr.operate_mut(|rti, e| update_func(rti, e));
|
||||||
|
|
||||||
|
return Some(nr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find a bucket for the first node id crypto kind we can handle
|
||||||
|
let (node_id, kind, idx) = node_ids.iter().find_map(|x| {
|
||||||
|
self.unlocked_inner
|
||||||
|
.find_bucket_index(*x)
|
||||||
|
.map(|v| (*x, v.0, v.1))
|
||||||
|
})?;
|
||||||
|
|
||||||
// Look up existing entry
|
// Look up existing entry
|
||||||
let idx = node_ids
|
|
||||||
.iter()
|
|
||||||
.find_map(|x| self.unlocked_inner.find_bucket_index(x));
|
|
||||||
let noderef = {
|
let noderef = {
|
||||||
let bucket = &self.buckets[idx];
|
let bucket = &self.buckets[&kind][idx];
|
||||||
let entry = bucket.entry(&node_id);
|
let entry = bucket.entry(&node_id.key);
|
||||||
entry.map(|e| NodeRef::new(outer_self.clone(), node_id, e, None))
|
entry.map(|e| NodeRef::new(outer_self.clone(), e, None))
|
||||||
};
|
};
|
||||||
|
|
||||||
// If one doesn't exist, insert into bucket, possibly evicting a bucket member
|
// If one doesn't exist, insert into bucket, possibly evicting a bucket member
|
||||||
let noderef = match noderef {
|
|
||||||
None => {
|
|
||||||
// Make new entry
|
|
||||||
self.bucket_entry_count += 1;
|
self.bucket_entry_count += 1;
|
||||||
let cnt = self.bucket_entry_count;
|
let cnt = self.bucket_entry_count;
|
||||||
let bucket = &mut self.buckets[idx];
|
let bucket = &mut self.buckets[idx];
|
||||||
@ -564,39 +597,27 @@ impl RoutingTableInner {
|
|||||||
self.unlocked_inner.kick_queue.lock().insert(idx);
|
self.unlocked_inner.kick_queue.lock().insert(idx);
|
||||||
log_rtab!(debug "Routing table now has {} nodes, {} live", cnt, self.get_entry_count(RoutingDomainSet::all(), BucketEntryState::Unreliable));
|
log_rtab!(debug "Routing table now has {} nodes, {} live", cnt, self.get_entry_count(RoutingDomainSet::all(), BucketEntryState::Unreliable));
|
||||||
|
|
||||||
nr
|
Some(NodeRef::new(outer_self.clone(), e, None))
|
||||||
}
|
|
||||||
Some(nr) => {
|
|
||||||
// Update the entry
|
|
||||||
let bucket = &mut self.buckets[idx];
|
|
||||||
let entry = bucket.entry(&node_id).unwrap();
|
|
||||||
entry.with_mut(self, update_func);
|
|
||||||
|
|
||||||
nr
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Some(noderef)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Resolve an existing routing table entry and return a reference to it
|
/// Resolve an existing routing table entry and return a reference to it
|
||||||
pub fn lookup_node_ref(&self, outer_self: RoutingTable, node_id: PublicKey) -> Option<NodeRef> {
|
pub fn lookup_node_ref(&self, outer_self: RoutingTable, node_id: TypedKey) -> Option<NodeRef> {
|
||||||
if node_id == self.unlocked_inner.node_id {
|
if self.unlocked_inner.matches_own_node_id(&[node_id]) {
|
||||||
log_rtab!(error "can't look up own node id in routing table");
|
log_rtab!(error "can't look up own node id in routing table");
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
let idx = self.unlocked_inner.find_bucket_index(node_id);
|
let (kind, idx) = self.unlocked_inner.find_bucket_index(node_id)?;
|
||||||
let bucket = &self.buckets[idx];
|
let bucket = &self.buckets[&kind][idx];
|
||||||
bucket
|
bucket
|
||||||
.entry(&node_id)
|
.entry(&node_id.key)
|
||||||
.map(|e| NodeRef::new(outer_self, node_id, e, None))
|
.map(|e| NodeRef::new(outer_self, e, None))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Resolve an existing routing table entry and return a filtered reference to it
|
/// Resolve an existing routing table entry and return a filtered reference to it
|
||||||
pub fn lookup_and_filter_noderef(
|
pub fn lookup_and_filter_noderef(
|
||||||
&self,
|
&self,
|
||||||
outer_self: RoutingTable,
|
outer_self: RoutingTable,
|
||||||
node_id: PublicKey,
|
node_id: TypedKey,
|
||||||
routing_domain_set: RoutingDomainSet,
|
routing_domain_set: RoutingDomainSet,
|
||||||
dial_info_filter: DialInfoFilter,
|
dial_info_filter: DialInfoFilter,
|
||||||
) -> Option<NodeRef> {
|
) -> Option<NodeRef> {
|
||||||
@ -611,7 +632,7 @@ impl RoutingTableInner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Resolve an existing routing table entry and call a function on its entry without using a noderef
|
/// Resolve an existing routing table entry and call a function on its entry without using a noderef
|
||||||
pub fn with_node_entry<F, R>(&self, node_id: PublicKey, f: F) -> Option<R>
|
pub fn with_node_entry<F, R>(&self, node_id: TypedKey, f: F) -> Option<R>
|
||||||
where
|
where
|
||||||
F: FnOnce(Arc<BucketEntry>) -> R,
|
F: FnOnce(Arc<BucketEntry>) -> R,
|
||||||
{
|
{
|
||||||
@ -682,11 +703,11 @@ impl RoutingTableInner {
|
|||||||
pub fn register_node_with_existing_connection(
|
pub fn register_node_with_existing_connection(
|
||||||
&mut self,
|
&mut self,
|
||||||
outer_self: RoutingTable,
|
outer_self: RoutingTable,
|
||||||
node_id: PublicKey,
|
node_id: TypedKey,
|
||||||
descriptor: ConnectionDescriptor,
|
descriptor: ConnectionDescriptor,
|
||||||
timestamp: Timestamp,
|
timestamp: Timestamp,
|
||||||
) -> Option<NodeRef> {
|
) -> Option<NodeRef> {
|
||||||
let out = self.create_node_ref(outer_self, node_id, |_rti, e| {
|
let out = self.create_node_ref(outer_self, &[node_id], |_rti, e| {
|
||||||
// this node is live because it literally just connected to us
|
// this node is live because it literally just connected to us
|
||||||
e.touch_last_seen(timestamp);
|
e.touch_last_seen(timestamp);
|
||||||
});
|
});
|
||||||
@ -743,7 +764,7 @@ impl RoutingTableInner {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn touch_recent_peer(&mut self, node_id: PublicKey, last_connection: ConnectionDescriptor) {
|
pub fn touch_recent_peer(&mut self, node_id: TypedKey, last_connection: ConnectionDescriptor) {
|
||||||
self.recent_peers
|
self.recent_peers
|
||||||
.insert(node_id, RecentPeersEntry { last_connection });
|
.insert(node_id, RecentPeersEntry { last_connection });
|
||||||
}
|
}
|
||||||
@ -758,8 +779,7 @@ impl RoutingTableInner {
|
|||||||
node_count: usize,
|
node_count: usize,
|
||||||
mut filters: VecDeque<RoutingTableEntryFilter>,
|
mut filters: VecDeque<RoutingTableEntryFilter>,
|
||||||
) -> Vec<NodeRef> {
|
) -> Vec<NodeRef> {
|
||||||
let public_node_filter = Box::new(
|
let public_node_filter = Box::new(|rti: &RoutingTableInner, v: Option<Arc<BucketEntry>>| {
|
||||||
|rti: &RoutingTableInner, _k: PublicKey, v: Option<Arc<BucketEntry>>| {
|
|
||||||
let entry = v.unwrap();
|
let entry = v.unwrap();
|
||||||
entry.with(rti, |_rti, e| {
|
entry.with(rti, |_rti, e| {
|
||||||
// skip nodes on local network
|
// skip nodes on local network
|
||||||
@ -772,15 +792,14 @@ impl RoutingTableInner {
|
|||||||
}
|
}
|
||||||
true
|
true
|
||||||
})
|
})
|
||||||
},
|
}) as RoutingTableEntryFilter;
|
||||||
) as RoutingTableEntryFilter;
|
|
||||||
filters.push_front(public_node_filter);
|
filters.push_front(public_node_filter);
|
||||||
|
|
||||||
self.find_fastest_nodes(
|
self.find_fastest_nodes(
|
||||||
node_count,
|
node_count,
|
||||||
filters,
|
filters,
|
||||||
|_rti: &RoutingTableInner, k: PublicKey, v: Option<Arc<BucketEntry>>| {
|
|_rti: &RoutingTableInner, k: TypedKey, v: Option<Arc<BucketEntry>>| {
|
||||||
NodeRef::new(outer_self.clone(), k, v.unwrap().clone(), None)
|
NodeRef::new(outer_self.clone(), v.unwrap().clone(), None)
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -789,9 +808,9 @@ impl RoutingTableInner {
|
|||||||
&self,
|
&self,
|
||||||
routing_domain: RoutingDomain,
|
routing_domain: RoutingDomain,
|
||||||
has_valid_own_node_info: bool,
|
has_valid_own_node_info: bool,
|
||||||
v: Option<Arc<BucketEntry>>,
|
entry: Option<Arc<BucketEntry>>,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
match v {
|
match entry {
|
||||||
None => has_valid_own_node_info,
|
None => has_valid_own_node_info,
|
||||||
Some(entry) => entry.with(self, |_rti, e| {
|
Some(entry) => entry.with(self, |_rti, e| {
|
||||||
e.signed_node_info(routing_domain.into())
|
e.signed_node_info(routing_domain.into())
|
||||||
@ -805,12 +824,11 @@ impl RoutingTableInner {
|
|||||||
&self,
|
&self,
|
||||||
routing_domain: RoutingDomain,
|
routing_domain: RoutingDomain,
|
||||||
own_peer_info: &PeerInfo,
|
own_peer_info: &PeerInfo,
|
||||||
k: PublicKey,
|
entry: Option<Arc<BucketEntry>>,
|
||||||
v: Option<Arc<BucketEntry>>,
|
|
||||||
) -> PeerInfo {
|
) -> PeerInfo {
|
||||||
match v {
|
match entry {
|
||||||
None => own_peer_info.clone(),
|
None => own_peer_info.clone(),
|
||||||
Some(entry) => entry.with(self, |_rti, e| e.make_peer_info(k, routing_domain).unwrap()),
|
Some(entry) => entry.with(self, |_rti, e| e.make_peer_info(routing_domain).unwrap()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -825,15 +843,14 @@ impl RoutingTableInner {
|
|||||||
where
|
where
|
||||||
C: for<'a, 'b> FnMut(
|
C: for<'a, 'b> FnMut(
|
||||||
&'a RoutingTableInner,
|
&'a RoutingTableInner,
|
||||||
&'b (PublicKey, Option<Arc<BucketEntry>>),
|
&'b Option<Arc<BucketEntry>>,
|
||||||
&'b (PublicKey, Option<Arc<BucketEntry>>),
|
&'b Option<Arc<BucketEntry>>,
|
||||||
) -> core::cmp::Ordering,
|
) -> core::cmp::Ordering,
|
||||||
T: for<'r> FnMut(&'r RoutingTableInner, PublicKey, Option<Arc<BucketEntry>>) -> O,
|
T: for<'r> FnMut(&'r RoutingTableInner, TypedKey, Option<Arc<BucketEntry>>) -> O,
|
||||||
{
|
{
|
||||||
// collect all the nodes for sorting
|
// collect all the nodes for sorting
|
||||||
let mut nodes = Vec::<(PublicKey, Option<Arc<BucketEntry>>)>::with_capacity(
|
let mut nodes =
|
||||||
self.bucket_entry_count + 1,
|
Vec::<(TypedKey, Option<Arc<BucketEntry>>)>::with_capacity(self.bucket_entry_count + 1);
|
||||||
);
|
|
||||||
|
|
||||||
// add our own node (only one of there with the None entry)
|
// add our own node (only one of there with the None entry)
|
||||||
let mut filtered = false;
|
let mut filtered = false;
|
||||||
@ -880,13 +897,13 @@ impl RoutingTableInner {
|
|||||||
transform: T,
|
transform: T,
|
||||||
) -> Vec<O>
|
) -> Vec<O>
|
||||||
where
|
where
|
||||||
T: for<'r> FnMut(&'r RoutingTableInner, PublicKey, Option<Arc<BucketEntry>>) -> O,
|
T: for<'r> FnMut(&'r RoutingTableInner, Option<Arc<BucketEntry>>) -> O,
|
||||||
{
|
{
|
||||||
let cur_ts = get_aligned_timestamp();
|
let cur_ts = get_aligned_timestamp();
|
||||||
|
|
||||||
// Add filter to remove dead nodes always
|
// Add filter to remove dead nodes always
|
||||||
let filter_dead = Box::new(
|
let filter_dead = Box::new(
|
||||||
move |rti: &RoutingTableInner, _k: PublicKey, v: Option<Arc<BucketEntry>>| {
|
move |rti: &RoutingTableInner, v: Option<Arc<BucketEntry>>| {
|
||||||
if let Some(entry) = &v {
|
if let Some(entry) = &v {
|
||||||
// always filter out dead nodes
|
// always filter out dead nodes
|
||||||
if entry.with(rti, |_rti, e| e.state(cur_ts) == BucketEntryState::Dead) {
|
if entry.with(rti, |_rti, e| e.state(cur_ts) == BucketEntryState::Dead) {
|
||||||
@ -904,8 +921,8 @@ impl RoutingTableInner {
|
|||||||
|
|
||||||
// Fastest sort
|
// Fastest sort
|
||||||
let sort = |rti: &RoutingTableInner,
|
let sort = |rti: &RoutingTableInner,
|
||||||
(a_key, a_entry): &(PublicKey, Option<Arc<BucketEntry>>),
|
(a_key, a_entry): &(TypedKey, Option<Arc<BucketEntry>>),
|
||||||
(b_key, b_entry): &(PublicKey, Option<Arc<BucketEntry>>)| {
|
(b_key, b_entry): &(TypedKey, Option<Arc<BucketEntry>>)| {
|
||||||
// same nodes are always the same
|
// same nodes are always the same
|
||||||
if a_key == b_key {
|
if a_key == b_key {
|
||||||
return core::cmp::Ordering::Equal;
|
return core::cmp::Ordering::Equal;
|
||||||
@ -960,12 +977,12 @@ impl RoutingTableInner {
|
|||||||
|
|
||||||
pub fn find_closest_nodes<T, O>(
|
pub fn find_closest_nodes<T, O>(
|
||||||
&self,
|
&self,
|
||||||
node_id: PublicKey,
|
node_id: TypedKey,
|
||||||
filters: VecDeque<RoutingTableEntryFilter>,
|
filters: VecDeque<RoutingTableEntryFilter>,
|
||||||
transform: T,
|
transform: T,
|
||||||
) -> Vec<O>
|
) -> Vec<O>
|
||||||
where
|
where
|
||||||
T: for<'r> FnMut(&'r RoutingTableInner, PublicKey, Option<Arc<BucketEntry>>) -> O,
|
T: for<'r> FnMut(&'r RoutingTableInner, TypedKey, Option<Arc<BucketEntry>>) -> O,
|
||||||
{
|
{
|
||||||
let cur_ts = get_aligned_timestamp();
|
let cur_ts = get_aligned_timestamp();
|
||||||
let node_count = {
|
let node_count = {
|
||||||
@ -976,8 +993,8 @@ impl RoutingTableInner {
|
|||||||
|
|
||||||
// closest sort
|
// closest sort
|
||||||
let sort = |rti: &RoutingTableInner,
|
let sort = |rti: &RoutingTableInner,
|
||||||
(a_key, a_entry): &(PublicKey, Option<Arc<BucketEntry>>),
|
(a_key, a_entry): &(TypedKey, Option<Arc<BucketEntry>>),
|
||||||
(b_key, b_entry): &(PublicKey, Option<Arc<BucketEntry>>)| {
|
(b_key, b_entry): &(TypedKey, Option<Arc<BucketEntry>>)| {
|
||||||
// same nodes are always the same
|
// same nodes are always the same
|
||||||
if a_key == b_key {
|
if a_key == b_key {
|
||||||
return core::cmp::Ordering::Equal;
|
return core::cmp::Ordering::Equal;
|
||||||
|
@ -205,7 +205,7 @@ impl RoutingTable {
|
|||||||
for pi in peer_info {
|
for pi in peer_info {
|
||||||
let k = pi.node_id.key;
|
let k = pi.node_id.key;
|
||||||
// Register the node
|
// Register the node
|
||||||
if let Some(nr) = self.register_node_with_peer_info(
|
if let Some(nr) = self.register_node_with_signed_node_info(
|
||||||
RoutingDomain::PublicInternet,
|
RoutingDomain::PublicInternet,
|
||||||
k,
|
k,
|
||||||
pi.signed_node_info,
|
pi.signed_node_info,
|
||||||
@ -301,7 +301,7 @@ impl RoutingTable {
|
|||||||
log_rtab!("--- bootstrapping {} with {:?}", k.encode(), &v);
|
log_rtab!("--- bootstrapping {} with {:?}", k.encode(), &v);
|
||||||
|
|
||||||
// Make invalid signed node info (no signature)
|
// Make invalid signed node info (no signature)
|
||||||
if let Some(nr) = self.register_node_with_peer_info(
|
if let Some(nr) = self.register_node_with_signed_node_info(
|
||||||
RoutingDomain::PublicInternet,
|
RoutingDomain::PublicInternet,
|
||||||
k,
|
k,
|
||||||
SignedNodeInfo::Direct(SignedDirectNodeInfo::with_no_signature(NodeInfo {
|
SignedNodeInfo::Direct(SignedDirectNodeInfo::with_no_signature(NodeInfo {
|
||||||
|
@ -24,7 +24,7 @@ impl RoutingTable {
|
|||||||
let noderefs = routing_table.find_fastest_nodes(
|
let noderefs = routing_table.find_fastest_nodes(
|
||||||
min_peer_count,
|
min_peer_count,
|
||||||
VecDeque::new(),
|
VecDeque::new(),
|
||||||
|_rti, k: PublicKey, v: Option<Arc<BucketEntry>>| {
|
|_rti, k: TypedKey, v: Option<Arc<BucketEntry>>| {
|
||||||
NodeRef::new(routing_table.clone(), k, v.unwrap().clone(), None)
|
NodeRef::new(routing_table.clone(), k, v.unwrap().clone(), None)
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
@ -8,7 +8,7 @@ const BACKGROUND_SAFETY_ROUTE_COUNT: usize = 2;
|
|||||||
|
|
||||||
impl RoutingTable {
|
impl RoutingTable {
|
||||||
/// Fastest routes sort
|
/// Fastest routes sort
|
||||||
fn route_sort_latency_fn(a: &(PublicKey, u64), b: &(PublicKey, u64)) -> cmp::Ordering {
|
fn route_sort_latency_fn(a: &(TypedKey, u64), b: &(TypedKey, u64)) -> cmp::Ordering {
|
||||||
let mut al = a.1;
|
let mut al = a.1;
|
||||||
let mut bl = b.1;
|
let mut bl = b.1;
|
||||||
// Treat zero latency as uncalculated
|
// Treat zero latency as uncalculated
|
||||||
@ -35,14 +35,14 @@ impl RoutingTable {
|
|||||||
///
|
///
|
||||||
/// If a route doesn't 'need_testing', then we neither test nor drop it
|
/// If a route doesn't 'need_testing', then we neither test nor drop it
|
||||||
#[instrument(level = "trace", skip(self))]
|
#[instrument(level = "trace", skip(self))]
|
||||||
fn get_allocated_routes_to_test(&self, cur_ts: Timestamp) -> Vec<PublicKey> {
|
fn get_allocated_routes_to_test(&self, cur_ts: Timestamp) -> Vec<TypedKey> {
|
||||||
let default_route_hop_count =
|
let default_route_hop_count =
|
||||||
self.with_config(|c| c.network.rpc.default_route_hop_count as usize);
|
self.with_config(|c| c.network.rpc.default_route_hop_count as usize);
|
||||||
|
|
||||||
let rss = self.route_spec_store();
|
let rss = self.route_spec_store();
|
||||||
let mut must_test_routes = Vec::<PublicKey>::new();
|
let mut must_test_routes = Vec::<TypedKey>::new();
|
||||||
let mut unpublished_routes = Vec::<(PublicKey, u64)>::new();
|
let mut unpublished_routes = Vec::<(TypedKey, u64)>::new();
|
||||||
let mut expired_routes = Vec::<PublicKey>::new();
|
let mut expired_routes = Vec::<TypedKey>::new();
|
||||||
rss.list_allocated_routes(|k, v| {
|
rss.list_allocated_routes(|k, v| {
|
||||||
let stats = v.get_stats();
|
let stats = v.get_stats();
|
||||||
// Ignore nodes that don't need testing
|
// Ignore nodes that don't need testing
|
||||||
@ -95,7 +95,7 @@ impl RoutingTable {
|
|||||||
async fn test_route_set(
|
async fn test_route_set(
|
||||||
&self,
|
&self,
|
||||||
stop_token: StopToken,
|
stop_token: StopToken,
|
||||||
routes_needing_testing: Vec<PublicKey>,
|
routes_needing_testing: Vec<TypedKey>,
|
||||||
) -> EyreResult<()> {
|
) -> EyreResult<()> {
|
||||||
if routes_needing_testing.is_empty() {
|
if routes_needing_testing.is_empty() {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
@ -107,7 +107,7 @@ impl RoutingTable {
|
|||||||
#[derive(Default, Debug)]
|
#[derive(Default, Debug)]
|
||||||
struct TestRouteContext {
|
struct TestRouteContext {
|
||||||
failed: bool,
|
failed: bool,
|
||||||
dead_routes: Vec<PublicKey>,
|
dead_routes: Vec<TypedKey>,
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut unord = FuturesUnordered::new();
|
let mut unord = FuturesUnordered::new();
|
||||||
|
@ -117,7 +117,7 @@ impl RPCOperation {
|
|||||||
|
|
||||||
pub fn decode(
|
pub fn decode(
|
||||||
operation_reader: &veilid_capnp::operation::Reader,
|
operation_reader: &veilid_capnp::operation::Reader,
|
||||||
opt_sender_node_id: Option<&PublicKey>,
|
opt_sender_node_id: Option<&TypedKey>,
|
||||||
) -> Result<Self, RPCError> {
|
) -> Result<Self, RPCError> {
|
||||||
let op_id = OperationId::new(operation_reader.get_op_id());
|
let op_id = OperationId::new(operation_reader.get_op_id());
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@ use super::*;
|
|||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct RPCOperationFindBlockQ {
|
pub struct RPCOperationFindBlockQ {
|
||||||
pub block_id: PublicKey,
|
pub block_id: TypedKey,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RPCOperationFindBlockQ {
|
impl RPCOperationFindBlockQ {
|
||||||
|
@ -2,7 +2,7 @@ use super::*;
|
|||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct RPCOperationFindNodeQ {
|
pub struct RPCOperationFindNodeQ {
|
||||||
pub node_id: PublicKey,
|
pub node_id: TypedKey,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RPCOperationFindNodeQ {
|
impl RPCOperationFindNodeQ {
|
||||||
|
@ -39,9 +39,9 @@ pub fn decode_peer_info(
|
|||||||
.reborrow()
|
.reborrow()
|
||||||
.get_signed_node_info()
|
.get_signed_node_info()
|
||||||
.map_err(RPCError::protocol)?;
|
.map_err(RPCError::protocol)?;
|
||||||
let node_ids = Vec::with_capacity(nids_reader.len() as usize);
|
let node_ids = TypedKeySet::with_capacity(nids_reader.len() as usize);
|
||||||
for nid_reader in nids_reader.iter() {
|
for nid_reader in nids_reader.iter() {
|
||||||
node_ids.push(decode_typed_key(&nid_reader)?);
|
node_ids.add(decode_typed_key(&nid_reader)?);
|
||||||
}
|
}
|
||||||
let signed_node_info = decode_signed_node_info(&sni_reader, crypto, &node_ids)?;
|
let signed_node_info = decode_signed_node_info(&sni_reader, crypto, &node_ids)?;
|
||||||
|
|
||||||
|
@ -67,10 +67,10 @@ pub fn decode_signed_relayed_node_info(
|
|||||||
.reborrow()
|
.reborrow()
|
||||||
.get_relay_ids()
|
.get_relay_ids()
|
||||||
.map_err(RPCError::protocol)?;
|
.map_err(RPCError::protocol)?;
|
||||||
let mut relay_ids = Vec::with_capacity(rids_reader.len() as usize);
|
let mut relay_ids = TypedKeySet::with_capacity(rids_reader.len() as usize);
|
||||||
for rid_reader in rids_reader {
|
for rid_reader in rids_reader {
|
||||||
let relay_id = decode_typed_key(&rid_reader)?;
|
let relay_id = decode_typed_key(&rid_reader)?;
|
||||||
relay_ids.push(relay_id);
|
relay_ids.add(relay_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
let ri_reader = reader
|
let ri_reader = reader
|
||||||
|
@ -15,7 +15,7 @@ pub enum Destination {
|
|||||||
/// The relay to send to
|
/// The relay to send to
|
||||||
relay: NodeRef,
|
relay: NodeRef,
|
||||||
/// The final destination the relay should send to
|
/// The final destination the relay should send to
|
||||||
target: PublicKey,
|
target: TypedKey,
|
||||||
/// Require safety route or not
|
/// Require safety route or not
|
||||||
safety_selection: SafetySelection,
|
safety_selection: SafetySelection,
|
||||||
},
|
},
|
||||||
@ -36,7 +36,7 @@ impl Destination {
|
|||||||
safety_selection: SafetySelection::Unsafe(sequencing),
|
safety_selection: SafetySelection::Unsafe(sequencing),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn relay(relay: NodeRef, target: PublicKey) -> Self {
|
pub fn relay(relay: NodeRef, target: TypedKey) -> Self {
|
||||||
let sequencing = relay.sequencing();
|
let sequencing = relay.sequencing();
|
||||||
Self::Relay {
|
Self::Relay {
|
||||||
relay,
|
relay,
|
||||||
|
@ -520,12 +520,8 @@ impl RPCProcessor {
|
|||||||
|
|
||||||
// Make the routed operation
|
// Make the routed operation
|
||||||
// xxx: replace MAX_CRYPTO_VERSION with the version from the factory
|
// xxx: replace MAX_CRYPTO_VERSION with the version from the factory
|
||||||
let operation = RoutedOperation::new(
|
let operation =
|
||||||
MAX_CRYPTO_VERSION,
|
RoutedOperation::new(safety_selection.get_sequencing(), nonce, enc_msg_data);
|
||||||
safety_selection.get_sequencing(),
|
|
||||||
nonce,
|
|
||||||
enc_msg_data,
|
|
||||||
);
|
|
||||||
|
|
||||||
// Prepare route operation
|
// Prepare route operation
|
||||||
let sr_hop_count = compiled_route.safety_route.hop_count;
|
let sr_hop_count = compiled_route.safety_route.hop_count;
|
||||||
@ -1218,7 +1214,7 @@ impl RPCProcessor {
|
|||||||
"sender signednodeinfo has invalid peer scope",
|
"sender signednodeinfo has invalid peer scope",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
opt_sender_nr = self.routing_table().register_node_with_peer_info(
|
opt_sender_nr = self.routing_table().register_node_with_signed_node_info(
|
||||||
routing_domain,
|
routing_domain,
|
||||||
sender_node_id,
|
sender_node_id,
|
||||||
sender_node_info.clone(),
|
sender_node_info.clone(),
|
||||||
|
@ -100,8 +100,8 @@ impl RPCProcessor {
|
|||||||
// find N nodes closest to the target node in our routing table
|
// find N nodes closest to the target node in our routing table
|
||||||
|
|
||||||
let filter = Box::new(
|
let filter = Box::new(
|
||||||
move |rti: &RoutingTableInner, _k: PublicKey, v: Option<Arc<BucketEntry>>| {
|
move |rti: &RoutingTableInner, entry: Option<Arc<BucketEntry>>| {
|
||||||
rti.filter_has_valid_signed_node_info(RoutingDomain::PublicInternet, true, v)
|
rti.filter_has_valid_signed_node_info(RoutingDomain::PublicInternet, true, entry)
|
||||||
},
|
},
|
||||||
) as RoutingTableEntryFilter;
|
) as RoutingTableEntryFilter;
|
||||||
let filters = VecDeque::from([filter]);
|
let filters = VecDeque::from([filter]);
|
||||||
@ -110,8 +110,8 @@ impl RPCProcessor {
|
|||||||
find_node_q.node_id,
|
find_node_q.node_id,
|
||||||
filters,
|
filters,
|
||||||
// transform
|
// transform
|
||||||
|rti, k, v| {
|
|rti, entry| {
|
||||||
rti.transform_to_peer_info(RoutingDomain::PublicInternet, &own_peer_info, k, v)
|
rti.transform_to_peer_info(RoutingDomain::PublicInternet, &own_peer_info, entry)
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -77,7 +77,7 @@ impl RPCProcessor {
|
|||||||
&self,
|
&self,
|
||||||
routed_operation: RoutedOperation,
|
routed_operation: RoutedOperation,
|
||||||
next_route_node: RouteNode,
|
next_route_node: RouteNode,
|
||||||
safety_route_public_key: PublicKey,
|
safety_route_public_key: TypedKey,
|
||||||
next_private_route: PrivateRoute,
|
next_private_route: PrivateRoute,
|
||||||
) -> Result<NetworkResult<()>, RPCError> {
|
) -> Result<NetworkResult<()>, RPCError> {
|
||||||
// Make sure hop count makes sense
|
// Make sure hop count makes sense
|
||||||
@ -142,7 +142,7 @@ impl RPCProcessor {
|
|||||||
&self,
|
&self,
|
||||||
_detail: RPCMessageHeaderDetailDirect,
|
_detail: RPCMessageHeaderDetailDirect,
|
||||||
routed_operation: RoutedOperation,
|
routed_operation: RoutedOperation,
|
||||||
remote_sr_pubkey: PublicKey,
|
remote_sr_pubkey: TypedKey,
|
||||||
) -> Result<NetworkResult<()>, RPCError> {
|
) -> Result<NetworkResult<()>, RPCError> {
|
||||||
|
|
||||||
// Now that things are valid, decrypt the routed operation with DEC(nonce, DH(the SR's public key, the PR's (or node's) secret)
|
// Now that things are valid, decrypt the routed operation with DEC(nonce, DH(the SR's public key, the PR's (or node's) secret)
|
||||||
@ -177,8 +177,8 @@ impl RPCProcessor {
|
|||||||
&self,
|
&self,
|
||||||
detail: RPCMessageHeaderDetailDirect,
|
detail: RPCMessageHeaderDetailDirect,
|
||||||
routed_operation: RoutedOperation,
|
routed_operation: RoutedOperation,
|
||||||
remote_sr_pubkey: PublicKey,
|
remote_sr_pubkey: TypedKey,
|
||||||
pr_pubkey: PublicKey,
|
pr_pubkey: TypedKey,
|
||||||
) -> Result<NetworkResult<()>, RPCError> {
|
) -> Result<NetworkResult<()>, RPCError> {
|
||||||
// Get sender id
|
// Get sender id
|
||||||
let sender_id = detail.envelope.get_sender_id();
|
let sender_id = detail.envelope.get_sender_id();
|
||||||
@ -237,8 +237,8 @@ impl RPCProcessor {
|
|||||||
&self,
|
&self,
|
||||||
detail: RPCMessageHeaderDetailDirect,
|
detail: RPCMessageHeaderDetailDirect,
|
||||||
routed_operation: RoutedOperation,
|
routed_operation: RoutedOperation,
|
||||||
remote_sr_pubkey: PublicKey,
|
remote_sr_pubkey: TypedKey,
|
||||||
pr_pubkey: PublicKey,
|
pr_pubkey: TypedKey,
|
||||||
) -> Result<NetworkResult<()>, RPCError> {
|
) -> Result<NetworkResult<()>, RPCError> {
|
||||||
|
|
||||||
// If the private route public key is our node id, then this was sent via safety route to our node directly
|
// If the private route public key is our node id, then this was sent via safety route to our node directly
|
||||||
@ -260,7 +260,7 @@ impl RPCProcessor {
|
|||||||
pub(crate) async fn process_private_route_first_hop(
|
pub(crate) async fn process_private_route_first_hop(
|
||||||
&self,
|
&self,
|
||||||
mut routed_operation: RoutedOperation,
|
mut routed_operation: RoutedOperation,
|
||||||
sr_pubkey: PublicKey,
|
sr_pubkey: TypedKey,
|
||||||
mut private_route: PrivateRoute,
|
mut private_route: PrivateRoute,
|
||||||
) -> Result<NetworkResult<()>, RPCError> {
|
) -> Result<NetworkResult<()>, RPCError> {
|
||||||
let Some(pr_first_hop) = private_route.pop_first_hop() else {
|
let Some(pr_first_hop) = private_route.pop_first_hop() else {
|
||||||
@ -312,7 +312,7 @@ impl RPCProcessor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Decrypt route hop data and sign routed operation
|
/// Decrypt route hop data and sign routed operation
|
||||||
pub(crate) fn decrypt_private_route_hop_data(&self, route_hop_data: &RouteHopData, pr_pubkey: &PublicKey, route_operation: &mut RoutedOperation) -> Result<NetworkResult<RouteHop>, RPCError>
|
pub(crate) fn decrypt_private_route_hop_data(&self, route_hop_data: &RouteHopData, pr_pubkey: &TypedKey, route_operation: &mut RoutedOperation) -> Result<NetworkResult<RouteHop>, RPCError>
|
||||||
{
|
{
|
||||||
// Decrypt the blob with DEC(nonce, DH(the PR's public key, this hop's secret)
|
// Decrypt the blob with DEC(nonce, DH(the PR's public key, this hop's secret)
|
||||||
let node_id_secret = self.routing_table.node_id_secret();
|
let node_id_secret = self.routing_table.node_id_secret();
|
||||||
|
@ -102,7 +102,7 @@ impl RPCProcessor {
|
|||||||
dial_info.clone(),
|
dial_info.clone(),
|
||||||
);
|
);
|
||||||
let will_validate_dial_info_filter = Box::new(
|
let will_validate_dial_info_filter = Box::new(
|
||||||
move |rti: &RoutingTableInner, _k: PublicKey, v: Option<Arc<BucketEntry>>| {
|
move |rti: &RoutingTableInner, _k: TypedKey, v: Option<Arc<BucketEntry>>| {
|
||||||
let entry = v.unwrap();
|
let entry = v.unwrap();
|
||||||
entry.with(rti, move |_rti, e| {
|
entry.with(rti, move |_rti, e| {
|
||||||
if let Some(status) = &e.node_status(routing_domain) {
|
if let Some(status) = &e.node_status(routing_domain) {
|
||||||
|
@ -132,9 +132,9 @@ pub async fn test_frozen(ts: TableStore) {
|
|||||||
|
|
||||||
assert!(db.store_rkyv(0, b"asdf", &dht_key).await.is_ok());
|
assert!(db.store_rkyv(0, b"asdf", &dht_key).await.is_ok());
|
||||||
|
|
||||||
assert_eq!(db.load_rkyv::<PublicKey>(0, b"qwer").unwrap(), None);
|
assert_eq!(db.load_rkyv::<TypedKey>(0, b"qwer").unwrap(), None);
|
||||||
|
|
||||||
let d = match db.load_rkyv::<PublicKey>(0, b"asdf") {
|
let d = match db.load_rkyv::<TypedKey>(0, b"asdf") {
|
||||||
Ok(x) => x,
|
Ok(x) => x,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
panic!("couldn't decode: {}", e);
|
panic!("couldn't decode: {}", e);
|
||||||
@ -148,7 +148,7 @@ pub async fn test_frozen(ts: TableStore) {
|
|||||||
);
|
);
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
db.load_rkyv::<PublicKey>(1, b"foo").is_err(),
|
db.load_rkyv::<TypedKey>(1, b"foo").is_err(),
|
||||||
"should fail to unfreeze"
|
"should fail to unfreeze"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -192,7 +192,7 @@ fn config_callback(key: String) -> ConfigCallbackReturn {
|
|||||||
"network.client_whitelist_timeout_ms" => Ok(Box::new(300_000u32)),
|
"network.client_whitelist_timeout_ms" => Ok(Box::new(300_000u32)),
|
||||||
"network.reverse_connection_receipt_time_ms" => Ok(Box::new(5_000u32)),
|
"network.reverse_connection_receipt_time_ms" => Ok(Box::new(5_000u32)),
|
||||||
"network.hole_punch_receipt_time_ms" => Ok(Box::new(5_000u32)),
|
"network.hole_punch_receipt_time_ms" => Ok(Box::new(5_000u32)),
|
||||||
"network.node_id" => Ok(Box::new(Option::<PublicKey>::None)),
|
"network.node_id" => Ok(Box::new(Option::<TypedKey>::None)),
|
||||||
"network.node_id_secret" => Ok(Box::new(Option::<SecretKey>::None)),
|
"network.node_id_secret" => Ok(Box::new(Option::<SecretKey>::None)),
|
||||||
"network.bootstrap" => Ok(Box::new(Vec::<String>::new())),
|
"network.bootstrap" => Ok(Box::new(Vec::<String>::new())),
|
||||||
"network.bootstrap_nodes" => Ok(Box::new(Vec::<String>::new())),
|
"network.bootstrap_nodes" => Ok(Box::new(Vec::<String>::new())),
|
||||||
|
@ -167,7 +167,7 @@ impl VeilidAPI {
|
|||||||
// Private route allocation
|
// Private route allocation
|
||||||
|
|
||||||
#[instrument(level = "debug", skip(self))]
|
#[instrument(level = "debug", skip(self))]
|
||||||
pub async fn new_private_route(&self) -> Result<(PublicKey, Vec<u8>), VeilidAPIError> {
|
pub async fn new_private_route(&self) -> Result<(TypedKey, Vec<u8>), VeilidAPIError> {
|
||||||
self.new_custom_private_route(Stability::default(), Sequencing::default())
|
self.new_custom_private_route(Stability::default(), Sequencing::default())
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
@ -177,7 +177,7 @@ impl VeilidAPI {
|
|||||||
&self,
|
&self,
|
||||||
stability: Stability,
|
stability: Stability,
|
||||||
sequencing: Sequencing,
|
sequencing: Sequencing,
|
||||||
) -> Result<(PublicKey, Vec<u8>), VeilidAPIError> {
|
) -> Result<(TypedKey, Vec<u8>), VeilidAPIError> {
|
||||||
let default_route_hop_count: usize = {
|
let default_route_hop_count: usize = {
|
||||||
let config = self.config()?;
|
let config = self.config()?;
|
||||||
let c = config.get();
|
let c = config.get();
|
||||||
@ -223,14 +223,14 @@ impl VeilidAPI {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "debug", skip(self))]
|
#[instrument(level = "debug", skip(self))]
|
||||||
pub fn import_remote_private_route(&self, blob: Vec<u8>) -> Result<PublicKey, VeilidAPIError> {
|
pub fn import_remote_private_route(&self, blob: Vec<u8>) -> Result<TypedKey, VeilidAPIError> {
|
||||||
let rss = self.routing_table()?.route_spec_store();
|
let rss = self.routing_table()?.route_spec_store();
|
||||||
rss.import_remote_private_route(blob)
|
rss.import_remote_private_route(blob)
|
||||||
.map_err(|e| VeilidAPIError::invalid_argument(e, "blob", "private route blob"))
|
.map_err(|e| VeilidAPIError::invalid_argument(e, "blob", "private route blob"))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "debug", skip(self))]
|
#[instrument(level = "debug", skip(self))]
|
||||||
pub fn release_private_route(&self, key: &PublicKey) -> Result<(), VeilidAPIError> {
|
pub fn release_private_route(&self, key: &TypedKey) -> Result<(), VeilidAPIError> {
|
||||||
let rss = self.routing_table()?.route_spec_store();
|
let rss = self.routing_table()?.route_spec_store();
|
||||||
if rss.release_route(key) {
|
if rss.release_route(key) {
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -7,7 +7,7 @@ use routing_table::*;
|
|||||||
|
|
||||||
#[derive(Default, Debug)]
|
#[derive(Default, Debug)]
|
||||||
struct DebugCache {
|
struct DebugCache {
|
||||||
imported_routes: Vec<PublicKey>,
|
imported_routes: Vec<TypedKey>,
|
||||||
}
|
}
|
||||||
|
|
||||||
static DEBUG_CACHE: Mutex<DebugCache> = Mutex::new(DebugCache {
|
static DEBUG_CACHE: Mutex<DebugCache> = Mutex::new(DebugCache {
|
||||||
@ -30,12 +30,12 @@ fn get_string(text: &str) -> Option<String> {
|
|||||||
Some(text.to_owned())
|
Some(text.to_owned())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_route_id(rss: RouteSpecStore) -> impl Fn(&str) -> Option<PublicKey> {
|
fn get_route_id(rss: RouteSpecStore) -> impl Fn(&str) -> Option<TypedKey> {
|
||||||
return move |text: &str| {
|
return move |text: &str| {
|
||||||
if text.is_empty() {
|
if text.is_empty() {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
match PublicKey::try_decode(text).ok() {
|
match TypedKey::try_decode(text).ok() {
|
||||||
Some(key) => {
|
Some(key) => {
|
||||||
let routes = rss.list_allocated_routes(|k, _| Some(*k));
|
let routes = rss.list_allocated_routes(|k, _| Some(*k));
|
||||||
if routes.contains(&key) {
|
if routes.contains(&key) {
|
||||||
@ -187,8 +187,8 @@ fn get_destination(routing_table: RoutingTable) -> impl FnOnce(&str) -> Option<D
|
|||||||
fn get_number(text: &str) -> Option<usize> {
|
fn get_number(text: &str) -> Option<usize> {
|
||||||
usize::from_str(text).ok()
|
usize::from_str(text).ok()
|
||||||
}
|
}
|
||||||
fn get_dht_key(text: &str) -> Option<PublicKey> {
|
fn get_dht_key(text: &str) -> Option<TypedKey> {
|
||||||
PublicKey::try_decode(text).ok()
|
TypedKey::try_decode(text).ok()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_node_ref(routing_table: RoutingTable) -> impl FnOnce(&str) -> Option<NodeRef> {
|
fn get_node_ref(routing_table: RoutingTable) -> impl FnOnce(&str) -> Option<NodeRef> {
|
||||||
|
@ -108,11 +108,11 @@ pub enum VeilidAPIError {
|
|||||||
#[error("Shutdown")]
|
#[error("Shutdown")]
|
||||||
Shutdown,
|
Shutdown,
|
||||||
#[error("Key not found: {key}")]
|
#[error("Key not found: {key}")]
|
||||||
KeyNotFound { key: PublicKey },
|
KeyNotFound { key: TypedKey },
|
||||||
#[error("No connection: {message}")]
|
#[error("No connection: {message}")]
|
||||||
NoConnection { message: String },
|
NoConnection { message: String },
|
||||||
#[error("No peer info: {node_id}")]
|
#[error("No peer info: {node_id}")]
|
||||||
NoPeerInfo { node_id: PublicKey },
|
NoPeerInfo { node_id: TypedKey },
|
||||||
#[error("Internal: {message}")]
|
#[error("Internal: {message}")]
|
||||||
Internal { message: String },
|
Internal { message: String },
|
||||||
#[error("Unimplemented: {message}")]
|
#[error("Unimplemented: {message}")]
|
||||||
@ -147,7 +147,7 @@ impl VeilidAPIError {
|
|||||||
pub fn shutdown() -> Self {
|
pub fn shutdown() -> Self {
|
||||||
Self::Shutdown
|
Self::Shutdown
|
||||||
}
|
}
|
||||||
pub fn key_not_found(key: PublicKey) -> Self {
|
pub fn key_not_found(key: TypedKey) -> Self {
|
||||||
Self::KeyNotFound { key }
|
Self::KeyNotFound { key }
|
||||||
}
|
}
|
||||||
pub fn no_connection<T: ToString>(msg: T) -> Self {
|
pub fn no_connection<T: ToString>(msg: T) -> Self {
|
||||||
@ -155,7 +155,7 @@ impl VeilidAPIError {
|
|||||||
message: msg.to_string(),
|
message: msg.to_string(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn no_peer_info(node_id: PublicKey) -> Self {
|
pub fn no_peer_info(node_id: TypedKey) -> Self {
|
||||||
Self::NoPeerInfo { node_id }
|
Self::NoPeerInfo { node_id }
|
||||||
}
|
}
|
||||||
pub fn internal<T: ToString>(msg: T) -> Self {
|
pub fn internal<T: ToString>(msg: T) -> Self {
|
||||||
|
@ -4,8 +4,8 @@ use super::*;
|
|||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub enum Target {
|
pub enum Target {
|
||||||
NodeId(PublicKey),
|
NodeId(TypedKey),
|
||||||
PrivateRoute(PublicKey),
|
PrivateRoute(TypedKey),
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct RoutingContextInner {}
|
pub struct RoutingContextInner {}
|
||||||
|
@ -256,7 +256,7 @@ pub struct VeilidStateAttachment {
|
|||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct PeerTableData {
|
pub struct PeerTableData {
|
||||||
pub node_ids: Vec<TypedKey>,
|
pub node_ids: TypedKeySet,
|
||||||
pub peer_address: PeerAddress,
|
pub peer_address: PeerAddress,
|
||||||
pub peer_stats: PeerStats,
|
pub peer_stats: PeerStats,
|
||||||
}
|
}
|
||||||
@ -279,8 +279,8 @@ pub struct VeilidStateNetwork {
|
|||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct VeilidStateRoute {
|
pub struct VeilidStateRoute {
|
||||||
pub dead_routes: Vec<PublicKey>,
|
pub dead_routes: Vec<TypedKey>,
|
||||||
pub dead_remote_routes: Vec<PublicKey>,
|
pub dead_remote_routes: Vec<TypedKey>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(
|
#[derive(
|
||||||
@ -513,7 +513,7 @@ impl SafetySelection {
|
|||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct SafetySpec {
|
pub struct SafetySpec {
|
||||||
/// preferred safety route if it still exists
|
/// preferred safety route if it still exists
|
||||||
pub preferred_route: Option<PublicKey>,
|
pub preferred_route: Option<TypedKey>,
|
||||||
/// must be greater than 0
|
/// must be greater than 0
|
||||||
pub hop_count: usize,
|
pub hop_count: usize,
|
||||||
/// prefer reliability over speed
|
/// prefer reliability over speed
|
||||||
@ -1924,7 +1924,7 @@ impl SignedDirectNodeInfo {
|
|||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct SignedRelayedNodeInfo {
|
pub struct SignedRelayedNodeInfo {
|
||||||
pub node_info: NodeInfo,
|
pub node_info: NodeInfo,
|
||||||
pub relay_ids: Vec<TypedKey>,
|
pub relay_ids: TypedKeySet,
|
||||||
pub relay_info: SignedDirectNodeInfo,
|
pub relay_info: SignedDirectNodeInfo,
|
||||||
pub timestamp: Timestamp,
|
pub timestamp: Timestamp,
|
||||||
pub signatures: Vec<TypedSignature>,
|
pub signatures: Vec<TypedSignature>,
|
||||||
@ -1935,7 +1935,7 @@ impl SignedRelayedNodeInfo {
|
|||||||
crypto: Crypto,
|
crypto: Crypto,
|
||||||
node_ids: &[TypedKey],
|
node_ids: &[TypedKey],
|
||||||
node_info: NodeInfo,
|
node_info: NodeInfo,
|
||||||
relay_ids: Vec<TypedKey>,
|
relay_ids: TypedKeySet,
|
||||||
relay_info: SignedDirectNodeInfo,
|
relay_info: SignedDirectNodeInfo,
|
||||||
timestamp: Timestamp,
|
timestamp: Timestamp,
|
||||||
typed_signatures: Vec<TypedSignature>,
|
typed_signatures: Vec<TypedSignature>,
|
||||||
@ -1955,7 +1955,7 @@ impl SignedRelayedNodeInfo {
|
|||||||
pub fn make_signatures(
|
pub fn make_signatures(
|
||||||
crypto: Crypto,
|
crypto: Crypto,
|
||||||
node_info: NodeInfo,
|
node_info: NodeInfo,
|
||||||
relay_ids: Vec<TypedKey>,
|
relay_ids: TypedKeySet,
|
||||||
relay_info: SignedDirectNodeInfo,
|
relay_info: SignedDirectNodeInfo,
|
||||||
typed_key_pairs: Vec<TypedKeyPair>,
|
typed_key_pairs: Vec<TypedKeyPair>,
|
||||||
) -> Result<Self, VeilidAPIError> {
|
) -> Result<Self, VeilidAPIError> {
|
||||||
@ -2043,9 +2043,9 @@ impl SignedNodeInfo {
|
|||||||
SignedNodeInfo::Relayed(r) => &r.node_info,
|
SignedNodeInfo::Relayed(r) => &r.node_info,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn relay_ids(&self) -> Vec<TypedKey> {
|
pub fn relay_ids(&self) -> TypedKeySet {
|
||||||
match self {
|
match self {
|
||||||
SignedNodeInfo::Direct(_) => Vec::new(),
|
SignedNodeInfo::Direct(_) => TypedKeySet::new(),
|
||||||
SignedNodeInfo::Relayed(r) => r.relay_ids.clone(),
|
SignedNodeInfo::Relayed(r) => r.relay_ids.clone(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2107,12 +2107,12 @@ impl SignedNodeInfo {
|
|||||||
#[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
#[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct PeerInfo {
|
pub struct PeerInfo {
|
||||||
pub node_ids: Vec<TypedKey>,
|
pub node_ids: TypedKeySet,
|
||||||
pub signed_node_info: SignedNodeInfo,
|
pub signed_node_info: SignedNodeInfo,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PeerInfo {
|
impl PeerInfo {
|
||||||
pub fn new(node_ids: Vec<TypedKey>, signed_node_info: SignedNodeInfo) -> Self {
|
pub fn new(node_ids: TypedKeySet, signed_node_info: SignedNodeInfo) -> Self {
|
||||||
Self {
|
Self {
|
||||||
node_ids,
|
node_ids,
|
||||||
signed_node_info,
|
signed_node_info,
|
||||||
|
@ -93,7 +93,7 @@ pub struct VeilidFFIConfig {
|
|||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
#[derive(Debug, Deserialize, Serialize)]
|
||||||
pub struct VeilidFFIKeyBlob {
|
pub struct VeilidFFIKeyBlob {
|
||||||
pub key: veilid_core::PublicKey,
|
pub key: veilid_core::TypedKey,
|
||||||
#[serde(with = "veilid_core::json_as_base64")]
|
#[serde(with = "veilid_core::json_as_base64")]
|
||||||
pub blob: Vec<u8>,
|
pub blob: Vec<u8>,
|
||||||
}
|
}
|
||||||
@ -417,7 +417,7 @@ pub extern "C" fn routing_context_with_sequencing(id: u32, sequencing: FfiStr) -
|
|||||||
|
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub extern "C" fn routing_context_app_call(port: i64, id: u32, target: FfiStr, request: FfiStr) {
|
pub extern "C" fn routing_context_app_call(port: i64, id: u32, target: FfiStr, request: FfiStr) {
|
||||||
let target: veilid_core::PublicKey =
|
let target: veilid_core::TypedKey =
|
||||||
veilid_core::deserialize_opt_json(target.into_opt_string()).unwrap();
|
veilid_core::deserialize_opt_json(target.into_opt_string()).unwrap();
|
||||||
let request: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
let request: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
||||||
.decode(
|
.decode(
|
||||||
@ -453,7 +453,7 @@ pub extern "C" fn routing_context_app_call(port: i64, id: u32, target: FfiStr, r
|
|||||||
|
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub extern "C" fn routing_context_app_message(port: i64, id: u32, target: FfiStr, message: FfiStr) {
|
pub extern "C" fn routing_context_app_message(port: i64, id: u32, target: FfiStr, message: FfiStr) {
|
||||||
let target: veilid_core::PublicKey =
|
let target: veilid_core::TypedKey =
|
||||||
veilid_core::deserialize_opt_json(target.into_opt_string()).unwrap();
|
veilid_core::deserialize_opt_json(target.into_opt_string()).unwrap();
|
||||||
let message: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
let message: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
||||||
.decode(
|
.decode(
|
||||||
@ -539,7 +539,7 @@ pub extern "C" fn import_remote_private_route(port: i64, blob: FfiStr) {
|
|||||||
|
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub extern "C" fn release_private_route(port: i64, key: FfiStr) {
|
pub extern "C" fn release_private_route(port: i64, key: FfiStr) {
|
||||||
let key: veilid_core::PublicKey =
|
let key: veilid_core::TypedKey =
|
||||||
veilid_core::deserialize_opt_json(key.into_opt_string()).unwrap();
|
veilid_core::deserialize_opt_json(key.into_opt_string()).unwrap();
|
||||||
DartIsolateWrapper::new(port).spawn_result(async move {
|
DartIsolateWrapper::new(port).spawn_result(async move {
|
||||||
let veilid_api = get_veilid_api().await?;
|
let veilid_api = get_veilid_api().await?;
|
||||||
|
@ -4,7 +4,7 @@ use clap::{Arg, ArgMatches, Command};
|
|||||||
use std::ffi::OsStr;
|
use std::ffi::OsStr;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use veilid_core::{PublicKey, SecretKey};
|
use veilid_core::{SecretKey, TypedKey};
|
||||||
|
|
||||||
fn do_clap_matches(default_config_path: &OsStr) -> Result<clap::ArgMatches, clap::Error> {
|
fn do_clap_matches(default_config_path: &OsStr) -> Result<clap::ArgMatches, clap::Error> {
|
||||||
let matches = Command::new("veilid-server")
|
let matches = Command::new("veilid-server")
|
||||||
@ -246,11 +246,11 @@ pub fn process_command_line() -> EyreResult<(Settings, ArgMatches)> {
|
|||||||
// Split or get secret
|
// Split or get secret
|
||||||
let (k, s) = if let Some((k, s)) = v.split_once(':') {
|
let (k, s) = if let Some((k, s)) = v.split_once(':') {
|
||||||
let k =
|
let k =
|
||||||
PublicKey::try_decode(k).wrap_err("failed to decode node id from command line")?;
|
TypedKey::try_decode(k).wrap_err("failed to decode node id from command line")?;
|
||||||
let s = SecretKey::try_decode(s)?;
|
let s = SecretKey::try_decode(s)?;
|
||||||
(k, s)
|
(k, s)
|
||||||
} else {
|
} else {
|
||||||
let k = PublicKey::try_decode(v)?;
|
let k = TypedKey::try_decode(v)?;
|
||||||
let buffer = rpassword::prompt_password("Enter secret key (will not echo): ")
|
let buffer = rpassword::prompt_password("Enter secret key (will not echo): ")
|
||||||
.wrap_err("invalid secret key")?;
|
.wrap_err("invalid secret key")?;
|
||||||
let buffer = buffer.trim().to_string();
|
let buffer = buffer.trim().to_string();
|
||||||
|
@ -595,7 +595,7 @@ pub struct Network {
|
|||||||
pub client_whitelist_timeout_ms: u32,
|
pub client_whitelist_timeout_ms: u32,
|
||||||
pub reverse_connection_receipt_time_ms: u32,
|
pub reverse_connection_receipt_time_ms: u32,
|
||||||
pub hole_punch_receipt_time_ms: u32,
|
pub hole_punch_receipt_time_ms: u32,
|
||||||
pub node_id: Option<veilid_core::PublicKey>,
|
pub node_id: Option<veilid_core::TypedKey>,
|
||||||
pub node_id_secret: Option<veilid_core::SecretKey>,
|
pub node_id_secret: Option<veilid_core::SecretKey>,
|
||||||
pub bootstrap: Vec<String>,
|
pub bootstrap: Vec<String>,
|
||||||
pub bootstrap_nodes: Vec<ParsedNodeDialInfo>,
|
pub bootstrap_nodes: Vec<ParsedNodeDialInfo>,
|
||||||
|
@ -137,7 +137,7 @@ pub struct VeilidWASMConfig {
|
|||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
#[derive(Debug, Deserialize, Serialize)]
|
||||||
pub struct VeilidKeyBlob {
|
pub struct VeilidKeyBlob {
|
||||||
pub key: veilid_core::PublicKey,
|
pub key: veilid_core::TypedKey,
|
||||||
#[serde(with = "veilid_core::json_as_base64")]
|
#[serde(with = "veilid_core::json_as_base64")]
|
||||||
pub blob: Vec<u8>,
|
pub blob: Vec<u8>,
|
||||||
}
|
}
|
||||||
@ -459,7 +459,7 @@ pub fn import_remote_private_route(blob: String) -> Promise {
|
|||||||
|
|
||||||
#[wasm_bindgen()]
|
#[wasm_bindgen()]
|
||||||
pub fn release_private_route(key: String) -> Promise {
|
pub fn release_private_route(key: String) -> Promise {
|
||||||
let key: veilid_core::PublicKey = veilid_core::deserialize_json(&key).unwrap();
|
let key: veilid_core::TypedKey = veilid_core::deserialize_json(&key).unwrap();
|
||||||
wrap_api_future_void(async move {
|
wrap_api_future_void(async move {
|
||||||
let veilid_api = get_veilid_api()?;
|
let veilid_api = get_veilid_api()?;
|
||||||
veilid_api.release_private_route(&key)?;
|
veilid_api.release_private_route(&key)?;
|
||||||
|
Loading…
Reference in New Issue
Block a user