Merge branch 'dev' into 'main'

Multi-Crypto support

See merge request veilid/veilid!18
This commit is contained in:
John Smith 2023-03-15 01:59:18 +00:00
commit 1430f3f656
24 changed files with 369 additions and 336 deletions

View File

@ -43,7 +43,7 @@ flexi_logger = { version = "^0", features = ["use_chrono_for_offset"] }
thiserror = "^1" thiserror = "^1"
crossbeam-channel = "^0" crossbeam-channel = "^0"
hex = "^0" hex = "^0"
veilid-core = { path = "../veilid-core", default_features = false } veilid-core = { path = "../veilid-core" }
json = "^0" json = "^0"
[dev-dependencies] [dev-dependencies]

View File

@ -52,8 +52,8 @@ impl TableViewItem<PeerTableColumn> for PeerTableData {
match column { match column {
PeerTableColumn::NodeId => self PeerTableColumn::NodeId => self
.node_ids .node_ids
.best() .first()
.map(|n| n.value.encode()) .cloned()
.unwrap_or_else(|| "???".to_owned()), .unwrap_or_else(|| "???".to_owned()),
PeerTableColumn::Address => format!( PeerTableColumn::Address => format!(
"{:?}:{}", "{:?}:{}",
@ -78,21 +78,7 @@ impl TableViewItem<PeerTableColumn> for PeerTableData {
Self: Sized, Self: Sized,
{ {
match column { match column {
PeerTableColumn::NodeId => { PeerTableColumn::NodeId => self.to_column(column).cmp(&other.to_column(column)),
let n1 = self
.node_ids
.best()
.map(|n| n.value.encode())
.unwrap_or_else(|| "???".to_owned());
let n2 = other
.node_ids
.best()
.map(|n| n.value.encode())
.unwrap_or_else(|| "???".to_owned());
n1.cmp(&n2)
}
PeerTableColumn::Address => self.to_column(column).cmp(&other.to_column(column)), PeerTableColumn::Address => self.to_column(column).cmp(&other.to_column(column)),
PeerTableColumn::LatencyAvg => self PeerTableColumn::LatencyAvg => self
.peer_stats .peer_stats

View File

@ -753,7 +753,7 @@ impl UI {
.full_screen(); .full_screen();
let peers_table_view = PeersTableView::new() let peers_table_view = PeersTableView::new()
.column(PeerTableColumn::NodeId, "Node Id", |c| c.width(43)) .column(PeerTableColumn::NodeId, "Node Id", |c| c.width(48))
.column(PeerTableColumn::Address, "Address", |c| c) .column(PeerTableColumn::Address, "Address", |c| c)
.column(PeerTableColumn::LatencyAvg, "Ping", |c| c.width(8)) .column(PeerTableColumn::LatencyAvg, "Ping", |c| c.width(8))
.column(PeerTableColumn::TransferDownAvg, "Down", |c| c.width(8)) .column(PeerTableColumn::TransferDownAvg, "Down", |c| c.width(8))

View File

@ -11,6 +11,8 @@ crate-type = ["cdylib", "staticlib", "rlib"]
[features] [features]
default = [ "enable-crypto-vld0" ] default = [ "enable-crypto-vld0" ]
crypto-test = [ "enable-crypto-vld0", "enable-crypto-none" ]
crypto-test-none = [ "enable-crypto-none" ]
enable-crypto-vld0 = [] enable-crypto-vld0 = []
enable-crypto-none = [] enable-crypto-none = []
rt-async-std = ["async-std", "async-std-resolver", "async_executors/async_std", "rtnetlink?/smol_socket", "veilid-tools/rt-async-std"] rt-async-std = ["async-std", "async-std-resolver", "async_executors/async_std", "rtnetlink?/smol_socket", "veilid-tools/rt-async-std"]

View File

@ -1,3 +1,4 @@
mod blake3digest512;
mod byte_array_types; mod byte_array_types;
mod dh_cache; mod dh_cache;
mod envelope; mod envelope;
@ -6,9 +7,13 @@ mod types;
mod value; mod value;
pub mod crypto_system; pub mod crypto_system;
#[cfg(feature = "enable-crypto-none")]
pub mod none;
pub mod tests; pub mod tests;
#[cfg(feature = "enable-crypto-vld0")]
pub mod vld0; pub mod vld0;
pub use blake3digest512::*;
pub use byte_array_types::*; pub use byte_array_types::*;
pub use crypto_system::*; pub use crypto_system::*;
pub use dh_cache::*; pub use dh_cache::*;
@ -16,6 +21,10 @@ pub use envelope::*;
pub use receipt::*; pub use receipt::*;
pub use types::*; pub use types::*;
pub use value::*; pub use value::*;
#[cfg(feature = "enable-crypto-none")]
pub use none::*;
#[cfg(feature = "enable-crypto-vld0")]
pub use vld0::*; pub use vld0::*;
use crate::*; use crate::*;
@ -24,11 +33,26 @@ use hashlink::linked_hash_map::Entry;
use hashlink::LruCache; use hashlink::LruCache;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
// Handle to a particular cryptosystem /// Handle to a particular cryptosystem
pub type CryptoSystemVersion = Arc<dyn CryptoSystem + Send + Sync>; pub type CryptoSystemVersion = Arc<dyn CryptoSystem + Send + Sync>;
/// Crypto kinds in order of preference, best cryptosystem is the first one, worst is the last one cfg_if! {
pub const VALID_CRYPTO_KINDS: [CryptoKind; 1] = [CRYPTO_KIND_VLD0]; if #[cfg(all(feature = "enable-crypto-none", feature = "enable-crypto-vld0"))] {
/// Crypto kinds in order of preference, best cryptosystem is the first one, worst is the last one
pub const VALID_CRYPTO_KINDS: [CryptoKind; 2] = [CRYPTO_KIND_VLD0, CRYPTO_KIND_NONE];
}
else if #[cfg(feature = "enable-crypto-none")] {
/// Crypto kinds in order of preference, best cryptosystem is the first one, worst is the last one
pub const VALID_CRYPTO_KINDS: [CryptoKind; 1] = [CRYPTO_KIND_NONE];
}
else if #[cfg(feature = "enable-crypto-vld0")] {
/// Crypto kinds in order of preference, best cryptosystem is the first one, worst is the last one
pub const VALID_CRYPTO_KINDS: [CryptoKind; 1] = [CRYPTO_KIND_VLD0];
}
else {
compile_error!("No crypto kinds enabled, specify an enable-crypto- feature");
}
}
/// Number of cryptosystem signatures to keep on structures if many are present beyond the ones we consider valid /// Number of cryptosystem signatures to keep on structures if many are present beyond the ones we consider valid
pub const MAX_CRYPTO_KINDS: usize = 3; pub const MAX_CRYPTO_KINDS: usize = 3;
/// Return the best cryptosystem kind we support /// Return the best cryptosystem kind we support
@ -36,7 +60,7 @@ pub fn best_crypto_kind() -> CryptoKind {
VALID_CRYPTO_KINDS[0] VALID_CRYPTO_KINDS[0]
} }
// Version number of envelope format /// Version number of envelope format
pub type EnvelopeVersion = u8; pub type EnvelopeVersion = u8;
/// Envelope versions in order of preference, best envelope version is the first one, worst is the last one /// Envelope versions in order of preference, best envelope version is the first one, worst is the last one
@ -51,7 +75,10 @@ pub fn best_envelope_version() -> EnvelopeVersion {
struct CryptoInner { struct CryptoInner {
dh_cache: DHCache, dh_cache: DHCache,
flush_future: Option<SendPinBoxFuture<()>>, flush_future: Option<SendPinBoxFuture<()>>,
#[cfg(feature = "enable-crypto-vld0")]
crypto_vld0: Option<Arc<dyn CryptoSystem + Send + Sync>>, crypto_vld0: Option<Arc<dyn CryptoSystem + Send + Sync>>,
#[cfg(feature = "enable-crypto-none")]
crypto_none: Option<Arc<dyn CryptoSystem + Send + Sync>>,
} }
struct CryptoUnlockedInner { struct CryptoUnlockedInner {
@ -72,7 +99,10 @@ impl Crypto {
CryptoInner { CryptoInner {
dh_cache: DHCache::new(DH_CACHE_SIZE), dh_cache: DHCache::new(DH_CACHE_SIZE),
flush_future: None, flush_future: None,
#[cfg(feature = "enable-crypto-vld0")]
crypto_vld0: None, crypto_vld0: None,
#[cfg(feature = "enable-crypto-none")]
crypto_none: None,
} }
} }
@ -90,7 +120,15 @@ impl Crypto {
inner: Arc::new(Mutex::new(Self::new_inner())), inner: Arc::new(Mutex::new(Self::new_inner())),
}; };
out.inner.lock().crypto_vld0 = Some(Arc::new(vld0::CryptoSystemVLD0::new(out.clone()))); #[cfg(feature = "enable-crypto-vld0")]
{
out.inner.lock().crypto_vld0 = Some(Arc::new(vld0::CryptoSystemVLD0::new(out.clone())));
}
#[cfg(feature = "enable-crypto-none")]
{
out.inner.lock().crypto_none = Some(Arc::new(none::CryptoSystemNONE::new(out.clone())));
}
out out
} }
@ -203,7 +241,10 @@ impl Crypto {
pub fn get(&self, kind: CryptoKind) -> Option<CryptoSystemVersion> { pub fn get(&self, kind: CryptoKind) -> Option<CryptoSystemVersion> {
let inner = self.inner.lock(); let inner = self.inner.lock();
match kind { match kind {
#[cfg(feature = "enable-crypto-vld0")]
CRYPTO_KIND_VLD0 => Some(inner.crypto_vld0.clone().unwrap()), CRYPTO_KIND_VLD0 => Some(inner.crypto_vld0.clone().unwrap()),
#[cfg(feature = "enable-crypto-none")]
CRYPTO_KIND_NONE => Some(inner.crypto_none.clone().unwrap()),
_ => None, _ => None,
} }
} }
@ -262,10 +303,16 @@ impl Crypto {
/// Generate keypair /// Generate keypair
/// Does not require startup/init /// Does not require startup/init
pub fn generate_keypair(crypto_kind: CryptoKind) -> Result<TypedKeyPair, VeilidAPIError> { pub fn generate_keypair(crypto_kind: CryptoKind) -> Result<TypedKeyPair, VeilidAPIError> {
#[cfg(feature = "enable-crypto-vld0")]
if crypto_kind == CRYPTO_KIND_VLD0 { if crypto_kind == CRYPTO_KIND_VLD0 {
let kp = vld0_generate_keypair(); let kp = vld0_generate_keypair();
return Ok(TypedKeyPair::new(crypto_kind, kp)); return Ok(TypedKeyPair::new(crypto_kind, kp));
} }
#[cfg(feature = "enable-crypto-none")]
if crypto_kind == CRYPTO_KIND_NONE {
let kp = none_generate_keypair();
return Ok(TypedKeyPair::new(crypto_kind, kp));
}
Err(VeilidAPIError::generic("invalid crypto kind")) Err(VeilidAPIError::generic("invalid crypto kind"))
} }

View File

@ -1,62 +1,68 @@
pub mod blake3digest512;
pub use blake3digest512::*;
use super::*; use super::*;
use chacha20::cipher::{KeyIvInit, StreamCipher};
use chacha20::XChaCha20;
use chacha20poly1305 as ch;
use chacha20poly1305::aead::{AeadInPlace, NewAead};
use core::convert::TryInto;
use curve25519_dalek as cd;
use digest::Digest; use digest::Digest;
use ed25519_dalek as ed; use rand::RngCore;
use x25519_dalek as xd;
const AEAD_OVERHEAD: usize = 16; const AEAD_OVERHEAD: usize = PUBLIC_KEY_LENGTH;
pub const CRYPTO_KIND_VLD0: CryptoKind = FourCC([b'V', b'L', b'D', b'0']); pub const CRYPTO_KIND_NONE: CryptoKind = FourCC([b'N', b'O', b'N', b'E']);
fn ed25519_to_x25519_pk(key: &ed::PublicKey) -> Result<xd::PublicKey, VeilidAPIError> { pub fn none_generate_keypair() -> KeyPair {
let bytes = key.to_bytes();
let compressed = cd::edwards::CompressedEdwardsY(bytes);
let point = compressed
.decompress()
.ok_or_else(|| VeilidAPIError::internal("ed25519_to_x25519_pk failed"))?;
let mp = point.to_montgomery();
Ok(xd::PublicKey::from(mp.to_bytes()))
}
fn ed25519_to_x25519_sk(key: &ed::SecretKey) -> Result<xd::StaticSecret, VeilidAPIError> {
let exp = ed::ExpandedSecretKey::from(key);
let bytes: [u8; ed::EXPANDED_SECRET_KEY_LENGTH] = exp.to_bytes();
let lowbytes: [u8; 32] = bytes[0..32].try_into().map_err(VeilidAPIError::internal)?;
Ok(xd::StaticSecret::from(lowbytes))
}
pub fn vld0_generate_keypair() -> KeyPair {
let mut csprng = VeilidRng {}; let mut csprng = VeilidRng {};
let keypair = ed::Keypair::generate(&mut csprng); let mut pub_bytes = [0u8; PUBLIC_KEY_LENGTH];
let dht_key = PublicKey::new(keypair.public.to_bytes()); let mut sec_bytes = [0u8; SECRET_KEY_LENGTH];
let dht_key_secret = SecretKey::new(keypair.secret.to_bytes()); csprng.fill_bytes(&mut pub_bytes);
for n in 0..PUBLIC_KEY_LENGTH {
sec_bytes[n] = !pub_bytes[n];
}
let dht_key = PublicKey::new(pub_bytes);
let dht_key_secret = SecretKey::new(sec_bytes);
KeyPair::new(dht_key, dht_key_secret) KeyPair::new(dht_key, dht_key_secret)
} }
/// V0 CryptoSystem fn do_xor_32(a: &[u8], b: &[u8]) -> [u8; 32] {
let mut out = [0u8; 32];
for n in 0..32 {
out[n] = a[n] ^ b[n];
}
out
}
fn do_xor_inplace(a: &mut [u8], key: &[u8]) {
for n in 0..a.len() {
a[n] ^= key[n % key.len()];
}
}
fn do_xor_b2b(a: &[u8], b: &mut [u8], key: &[u8]) {
for n in 0..a.len() {
b[n] = a[n] ^ key[n % key.len()];
}
}
fn is_bytes_eq_32(a: &[u8], v: u8) -> bool {
for n in 0..32 {
if a[n] != v {
return false;
}
}
true
}
/// None CryptoSystem
#[derive(Clone)] #[derive(Clone)]
pub struct CryptoSystemVLD0 { pub struct CryptoSystemNONE {
crypto: Crypto, crypto: Crypto,
} }
impl CryptoSystemVLD0 { impl CryptoSystemNONE {
pub fn new(crypto: Crypto) -> Self { pub fn new(crypto: Crypto) -> Self {
Self { crypto } Self { crypto }
} }
} }
impl CryptoSystem for CryptoSystemVLD0 { impl CryptoSystem for CryptoSystemNONE {
// Accessors // Accessors
fn kind(&self) -> CryptoKind { fn kind(&self) -> CryptoKind {
CRYPTO_KIND_VLD0 CRYPTO_KIND_NONE
} }
fn crypto(&self) -> Crypto { fn crypto(&self) -> Crypto {
@ -70,17 +76,17 @@ impl CryptoSystem for CryptoSystemVLD0 {
secret: &SecretKey, secret: &SecretKey,
) -> Result<SharedSecret, VeilidAPIError> { ) -> Result<SharedSecret, VeilidAPIError> {
self.crypto self.crypto
.cached_dh_internal::<CryptoSystemVLD0>(self, key, secret) .cached_dh_internal::<CryptoSystemNONE>(self, key, secret)
} }
// Generation // Generation
fn random_nonce(&self) -> Nonce { fn random_nonce(&self) -> Nonce {
let mut nonce = [0u8; 24]; let mut nonce = [0u8; NONCE_LENGTH];
random_bytes(&mut nonce).unwrap(); random_bytes(&mut nonce).unwrap();
Nonce::new(nonce) Nonce::new(nonce)
} }
fn random_shared_secret(&self) -> SharedSecret { fn random_shared_secret(&self) -> SharedSecret {
let mut s = [0u8; 32]; let mut s = [0u8; SHARED_SECRET_LENGTH];
random_bytes(&mut s).unwrap(); random_bytes(&mut s).unwrap();
SharedSecret::new(s) SharedSecret::new(s)
} }
@ -89,14 +95,11 @@ impl CryptoSystem for CryptoSystemVLD0 {
key: &PublicKey, key: &PublicKey,
secret: &SecretKey, secret: &SecretKey,
) -> Result<SharedSecret, VeilidAPIError> { ) -> Result<SharedSecret, VeilidAPIError> {
let pk_ed = ed::PublicKey::from_bytes(&key.bytes).map_err(VeilidAPIError::internal)?; let s = do_xor_32(&key.bytes, &secret.bytes);
let pk_xd = ed25519_to_x25519_pk(&pk_ed)?; Ok(SharedSecret::new(s))
let sk_ed = ed::SecretKey::from_bytes(&secret.bytes).map_err(VeilidAPIError::internal)?;
let sk_xd = ed25519_to_x25519_sk(&sk_ed)?;
Ok(SharedSecret::new(sk_xd.diffie_hellman(&pk_xd).to_bytes()))
} }
fn generate_keypair(&self) -> KeyPair { fn generate_keypair(&self) -> KeyPair {
vld0_generate_keypair() none_generate_keypair()
} }
fn generate_hash(&self, data: &[u8]) -> PublicKey { fn generate_hash(&self, data: &[u8]) -> PublicKey {
PublicKey::new(*blake3::hash(data).as_bytes()) PublicKey::new(*blake3::hash(data).as_bytes())
@ -123,7 +126,6 @@ impl CryptoSystem for CryptoSystemVLD0 {
} }
fn validate_hash(&self, data: &[u8], dht_key: &PublicKey) -> bool { fn validate_hash(&self, data: &[u8], dht_key: &PublicKey) -> bool {
let bytes = *blake3::hash(data).as_bytes(); let bytes = *blake3::hash(data).as_bytes();
bytes == dht_key.bytes bytes == dht_key.bytes
} }
fn validate_hash_reader( fn validate_hash_reader(
@ -154,22 +156,21 @@ impl CryptoSystem for CryptoSystemVLD0 {
dht_key_secret: &SecretKey, dht_key_secret: &SecretKey,
data: &[u8], data: &[u8],
) -> Result<Signature, VeilidAPIError> { ) -> Result<Signature, VeilidAPIError> {
let mut kpb: [u8; SECRET_KEY_LENGTH + PUBLIC_KEY_LENGTH] = if !is_bytes_eq_32(&do_xor_32(&dht_key.bytes, &dht_key_secret.bytes), 0xFFu8) {
[0u8; SECRET_KEY_LENGTH + PUBLIC_KEY_LENGTH]; return Err(VeilidAPIError::parse_error(
"Keypair is invalid",
kpb[..SECRET_KEY_LENGTH].copy_from_slice(&dht_key_secret.bytes); "invalid keys",
kpb[SECRET_KEY_LENGTH..].copy_from_slice(&dht_key.bytes); ));
let keypair = ed::Keypair::from_bytes(&kpb) }
.map_err(|e| VeilidAPIError::parse_error("Keypair is invalid", e))?;
let mut dig = Blake3Digest512::new(); let mut dig = Blake3Digest512::new();
dig.update(data); dig.update(data);
let sig = dig.finalize();
let sig = keypair let in_sig_bytes: [u8; SIGNATURE_LENGTH] = sig.into();
.sign_prehashed(dig, None) let mut sig_bytes = [0u8; SIGNATURE_LENGTH];
.map_err(VeilidAPIError::internal)?; sig_bytes[0..32].copy_from_slice(&in_sig_bytes[0..32]);
sig_bytes[32..64].copy_from_slice(&do_xor_32(&in_sig_bytes[32..64], &dht_key_secret.bytes));
let dht_sig = Signature::new(sig.to_bytes()); let dht_sig = Signature::new(sig_bytes.into());
Ok(dht_sig) Ok(dht_sig)
} }
fn verify( fn verify(
@ -178,16 +179,29 @@ impl CryptoSystem for CryptoSystemVLD0 {
data: &[u8], data: &[u8],
signature: &Signature, signature: &Signature,
) -> Result<(), VeilidAPIError> { ) -> Result<(), VeilidAPIError> {
let pk = ed::PublicKey::from_bytes(&dht_key.bytes)
.map_err(|e| VeilidAPIError::parse_error("Public key is invalid", e))?;
let sig = ed::Signature::from_bytes(&signature.bytes)
.map_err(|e| VeilidAPIError::parse_error("Signature is invalid", e))?;
let mut dig = Blake3Digest512::new(); let mut dig = Blake3Digest512::new();
dig.update(data); dig.update(data);
let sig = dig.finalize();
let in_sig_bytes: [u8; SIGNATURE_LENGTH] = sig.into();
let mut verify_bytes = [0u8; SIGNATURE_LENGTH];
verify_bytes[0..32]
.copy_from_slice(&do_xor_32(&in_sig_bytes[0..32], &signature.bytes[0..32]));
verify_bytes[32..64]
.copy_from_slice(&do_xor_32(&in_sig_bytes[32..64], &signature.bytes[32..64]));
if !is_bytes_eq_32(&verify_bytes[0..32], 0u8) {
return Err(VeilidAPIError::parse_error(
"Verification failed",
"signature 0..32 is invalid",
));
}
if !is_bytes_eq_32(&do_xor_32(&verify_bytes[32..64], &dht_key.bytes), 0xFFu8) {
return Err(VeilidAPIError::parse_error(
"Verification failed",
"signature 32..64 is invalid",
));
}
pk.verify_prehashed(dig, None, &sig)
.map_err(|e| VeilidAPIError::parse_error("Verification failed", e))?;
Ok(()) Ok(())
} }
@ -200,14 +214,21 @@ impl CryptoSystem for CryptoSystemVLD0 {
body: &mut Vec<u8>, body: &mut Vec<u8>,
nonce: &Nonce, nonce: &Nonce,
shared_secret: &SharedSecret, shared_secret: &SharedSecret,
associated_data: Option<&[u8]>, _associated_data: Option<&[u8]>,
) -> Result<(), VeilidAPIError> { ) -> Result<(), VeilidAPIError> {
let key = ch::Key::from(shared_secret.bytes); let mut blob = nonce.bytes.to_vec();
let xnonce = ch::XNonce::from(nonce.bytes); blob.extend_from_slice(&[0u8; 8]);
let aead = ch::XChaCha20Poly1305::new(&key); let blob = do_xor_32(&blob, &shared_secret.bytes);
aead.decrypt_in_place(&xnonce, associated_data.unwrap_or(b""), body)
.map_err(map_to_string) if body.len() < AEAD_OVERHEAD {
.map_err(VeilidAPIError::generic) return Err(VeilidAPIError::generic("invalid length"));
}
if &body[body.len() - AEAD_OVERHEAD..] != &blob {
return Err(VeilidAPIError::generic("invalid keyblob"));
}
body.truncate(body.len() - AEAD_OVERHEAD);
do_xor_inplace(body, &blob);
Ok(())
} }
fn decrypt_aead( fn decrypt_aead(
@ -229,15 +250,14 @@ impl CryptoSystem for CryptoSystemVLD0 {
body: &mut Vec<u8>, body: &mut Vec<u8>,
nonce: &Nonce, nonce: &Nonce,
shared_secret: &SharedSecret, shared_secret: &SharedSecret,
associated_data: Option<&[u8]>, _associated_data: Option<&[u8]>,
) -> Result<(), VeilidAPIError> { ) -> Result<(), VeilidAPIError> {
let key = ch::Key::from(shared_secret.bytes); let mut blob = nonce.bytes.to_vec();
let xnonce = ch::XNonce::from(nonce.bytes); blob.extend_from_slice(&[0u8; 8]);
let aead = ch::XChaCha20Poly1305::new(&key); let blob = do_xor_32(&blob, &shared_secret.bytes);
do_xor_inplace(body, &blob);
aead.encrypt_in_place(&xnonce, associated_data.unwrap_or(b""), body) body.append(&mut blob.to_vec());
.map_err(map_to_string) Ok(())
.map_err(VeilidAPIError::generic)
} }
fn encrypt_aead( fn encrypt_aead(
@ -261,8 +281,10 @@ impl CryptoSystem for CryptoSystemVLD0 {
nonce: &Nonce, nonce: &Nonce,
shared_secret: &SharedSecret, shared_secret: &SharedSecret,
) { ) {
let mut cipher = XChaCha20::new(&shared_secret.bytes.into(), &nonce.bytes.into()); let mut blob = nonce.bytes.to_vec();
cipher.apply_keystream(body); blob.extend_from_slice(&[0u8; 8]);
let blob = do_xor_32(&blob, &shared_secret.bytes);
do_xor_inplace(body, &blob);
} }
fn crypt_b2b_no_auth( fn crypt_b2b_no_auth(
@ -272,8 +294,10 @@ impl CryptoSystem for CryptoSystemVLD0 {
nonce: &Nonce, nonce: &Nonce,
shared_secret: &SharedSecret, shared_secret: &SharedSecret,
) { ) {
let mut cipher = XChaCha20::new(&shared_secret.bytes.into(), &nonce.bytes.into()); let mut blob = nonce.bytes.to_vec();
cipher.apply_keystream_b2b(in_buf, out_buf).unwrap(); blob.extend_from_slice(&[0u8; 8]);
let blob = do_xor_32(&blob, &shared_secret.bytes);
do_xor_b2b(in_buf, out_buf, &blob);
} }
fn crypt_no_auth_aligned_8( fn crypt_no_auth_aligned_8(

View File

@ -55,20 +55,14 @@ pub async fn test_sign_and_verify(vcrypto: CryptoSystemVersion) {
let a2 = vcrypto let a2 = vcrypto
.sign(&dht_key2, &dht_key_secret2, LOREM_IPSUM.as_bytes()) .sign(&dht_key2, &dht_key_secret2, LOREM_IPSUM.as_bytes())
.unwrap(); .unwrap();
let b1 = vcrypto let _b1 = vcrypto
.sign(&dht_key, &dht_key_secret2, LOREM_IPSUM.as_bytes()) .sign(&dht_key, &dht_key_secret2, LOREM_IPSUM.as_bytes())
.unwrap(); .unwrap_err();
let b2 = vcrypto let _b2 = vcrypto
.sign(&dht_key2, &dht_key_secret, LOREM_IPSUM.as_bytes()) .sign(&dht_key2, &dht_key_secret, LOREM_IPSUM.as_bytes())
.unwrap(); .unwrap_err();
assert_ne!(a1, b1);
assert_ne!(a2, b2);
assert_ne!(a1, b2);
assert_ne!(a2, b1);
assert_ne!(a1, a2); assert_ne!(a1, a2);
assert_ne!(b1, b2);
assert_ne!(a1, b2);
assert_ne!(b1, a2);
assert_eq!( assert_eq!(
vcrypto.verify(&dht_key, LOREM_IPSUM.as_bytes(), &a1), vcrypto.verify(&dht_key, LOREM_IPSUM.as_bytes(), &a1),
@ -79,10 +73,10 @@ pub async fn test_sign_and_verify(vcrypto: CryptoSystemVersion) {
Ok(()) Ok(())
); );
assert!(vcrypto assert!(vcrypto
.verify(&dht_key, LOREM_IPSUM.as_bytes(), &b1) .verify(&dht_key, LOREM_IPSUM.as_bytes(), &a2)
.is_err()); .is_err());
assert!(vcrypto assert!(vcrypto
.verify(&dht_key2, LOREM_IPSUM.as_bytes(), &b2) .verify(&dht_key2, LOREM_IPSUM.as_bytes(), &a1)
.is_err()); .is_err());
// Try verifications that should work // Try verifications that should work

View File

@ -195,9 +195,10 @@ where
write!(f, "[")?; write!(f, "[")?;
let mut first = true; let mut first = true;
for x in &self.items { for x in &self.items {
if !first { if first {
write!(f, ",")?;
first = false; first = false;
} else {
write!(f, ",")?;
} }
write!(f, "{}", x)?; write!(f, "{}", x)?;
} }

View File

@ -1,70 +0,0 @@
use digest::generic_array::typenum::U64;
use digest::{Digest, Output};
use generic_array::GenericArray;
pub struct Blake3Digest512 {
dig: blake3::Hasher,
}
impl Digest for Blake3Digest512 {
type OutputSize = U64;
fn new() -> Self {
Self {
dig: blake3::Hasher::new(),
}
}
fn update(&mut self, data: impl AsRef<[u8]>) {
self.dig.update(data.as_ref());
}
fn chain(mut self, data: impl AsRef<[u8]>) -> Self
where
Self: Sized,
{
self.update(data);
self
}
fn finalize(self) -> Output<Self> {
let mut b = [0u8; 64];
self.dig.finalize_xof().fill(&mut b);
let mut out = GenericArray::<u8, U64>::default();
for n in 0..64 {
out[n] = b[n];
}
out
}
fn finalize_reset(&mut self) -> Output<Self> {
let mut b = [0u8; 64];
self.dig.finalize_xof().fill(&mut b);
let mut out = GenericArray::<u8, U64>::default();
for n in 0..64 {
out[n] = b[n];
}
self.reset();
out
}
fn reset(&mut self) {
self.dig.reset();
}
fn output_size() -> usize {
64
}
fn digest(data: &[u8]) -> Output<Self> {
let mut dig = blake3::Hasher::new();
dig.update(data);
let mut b = [0u8; 64];
dig.finalize_xof().fill(&mut b);
let mut out = GenericArray::<u8, U64>::default();
for n in 0..64 {
out[n] = b[n];
}
out
}
}

View File

@ -1,6 +1,3 @@
pub mod blake3digest512;
pub use blake3digest512::*;
use super::*; use super::*;
use chacha20::cipher::{KeyIvInit, StreamCipher}; use chacha20::cipher::{KeyIvInit, StreamCipher};
@ -75,12 +72,12 @@ impl CryptoSystem for CryptoSystemVLD0 {
// Generation // Generation
fn random_nonce(&self) -> Nonce { fn random_nonce(&self) -> Nonce {
let mut nonce = [0u8; 24]; let mut nonce = [0u8; NONCE_LENGTH];
random_bytes(&mut nonce).unwrap(); random_bytes(&mut nonce).unwrap();
Nonce::new(nonce) Nonce::new(nonce)
} }
fn random_shared_secret(&self) -> SharedSecret { fn random_shared_secret(&self) -> SharedSecret {
let mut s = [0u8; 32]; let mut s = [0u8; SHARED_SECRET_LENGTH];
random_bytes(&mut s).unwrap(); random_bytes(&mut s).unwrap();
SharedSecret::new(s) SharedSecret::new(s)
} }
@ -165,12 +162,15 @@ impl CryptoSystem for CryptoSystemVLD0 {
let mut dig = Blake3Digest512::new(); let mut dig = Blake3Digest512::new();
dig.update(data); dig.update(data);
let sig = keypair let sig_bytes = keypair
.sign_prehashed(dig, None) .sign_prehashed(dig, None)
.map_err(VeilidAPIError::internal)?; .map_err(VeilidAPIError::internal)?;
let dht_sig = Signature::new(sig.to_bytes()); let sig = Signature::new(sig_bytes.to_bytes());
Ok(dht_sig)
self.verify(dht_key, &data, &sig)?;
Ok(sig)
} }
fn verify( fn verify(
&self, &self,

View File

@ -35,7 +35,6 @@ mod veilid_layer_filter;
pub use self::api_tracing_layer::ApiTracingLayer; pub use self::api_tracing_layer::ApiTracingLayer;
pub use self::core_context::{api_startup, api_startup_json, UpdateCallback}; pub use self::core_context::{api_startup, api_startup_json, UpdateCallback};
pub use self::crypto::vld0_generate_keypair;
pub use self::veilid_api::*; pub use self::veilid_api::*;
pub use self::veilid_config::*; pub use self::veilid_config::*;
pub use self::veilid_layer_filter::*; pub use self::veilid_layer_filter::*;

View File

@ -1551,7 +1551,7 @@ impl NetworkManager {
if let Some(nr) = routing_table.lookup_node_ref(k) { if let Some(nr) = routing_table.lookup_node_ref(k) {
let peer_stats = nr.peer_stats(); let peer_stats = nr.peer_stats();
let peer = PeerTableData { let peer = PeerTableData {
node_ids: nr.node_ids(), node_ids: nr.node_ids().iter().map(|x| x.to_string()).collect(),
peer_address: v.last_connection.remote(), peer_address: v.last_connection.remote(),
peer_stats, peer_stats,
}; };

View File

@ -126,19 +126,27 @@ impl BucketEntryInner {
pub fn node_ids(&self) -> TypedKeySet { pub fn node_ids(&self) -> TypedKeySet {
self.node_ids.clone() self.node_ids.clone()
} }
/// Add a node id for a particular crypto kind. /// Add a node id for a particular crypto kind.
/// Returns any previous existing node id associated with that crypto kind /// Returns Ok(Some(node)) any previous existing node id associated with that crypto kind
pub fn add_node_id(&mut self, node_id: TypedKey) -> Option<TypedKey> { /// Returns Ok(None) if no previous existing node id was associated with that crypto kind
/// Results Err() if this operation would add more crypto kinds than we support
pub fn add_node_id(&mut self, node_id: TypedKey) -> EyreResult<Option<TypedKey>> {
if let Some(old_node_id) = self.node_ids.get(node_id.kind) { if let Some(old_node_id) = self.node_ids.get(node_id.kind) {
// If this was already there we do nothing // If this was already there we do nothing
if old_node_id == node_id { if old_node_id == node_id {
return None; return Ok(None);
} }
// Won't change number of crypto kinds
self.node_ids.add(node_id); self.node_ids.add(node_id);
return Some(old_node_id); return Ok(Some(old_node_id));
}
// Check to ensure we aren't adding more crypto kinds than we support
if self.node_ids.len() == MAX_CRYPTO_KINDS {
bail!("too many crypto kinds for this node");
} }
self.node_ids.add(node_id); self.node_ids.add(node_id);
None Ok(None)
} }
pub fn best_node_id(&self) -> TypedKey { pub fn best_node_id(&self) -> TypedKey {
self.node_ids.best().unwrap() self.node_ids.best().unwrap()

View File

@ -108,18 +108,16 @@ impl RoutingTable {
out out
} }
pub(crate) fn debug_info_entries(&self, limit: usize, min_state: BucketEntryState) -> String { pub(crate) fn debug_info_entries(&self, min_state: BucketEntryState) -> String {
let inner = self.inner.read(); let inner = self.inner.read();
let inner = &*inner; let inner = &*inner;
let cur_ts = get_aligned_timestamp(); let cur_ts = get_aligned_timestamp();
let mut out = String::new(); let mut out = String::new();
let mut b = 0;
let mut cnt = 0;
out += &format!("Entries: {}\n", inner.bucket_entry_count()); out += &format!("Entries: {}\n", inner.bucket_entry_count());
for ck in &VALID_CRYPTO_KINDS { for ck in &VALID_CRYPTO_KINDS {
let mut b = 0;
let blen = inner.buckets[ck].len(); let blen = inner.buckets[ck].len();
while b < blen { while b < blen {
let filtered_entries: Vec<(&PublicKey, &Arc<BucketEntry>)> = inner.buckets[ck][b] let filtered_entries: Vec<(&PublicKey, &Arc<BucketEntry>)> = inner.buckets[ck][b]
@ -142,14 +140,6 @@ impl RoutingTable {
BucketEntryState::Dead => "D", BucketEntryState::Dead => "D",
} }
); );
cnt += 1;
if cnt >= limit {
break;
}
}
if cnt >= limit {
break;
} }
} }
b += 1; b += 1;
@ -174,12 +164,13 @@ impl RoutingTable {
const COLS: usize = 16; const COLS: usize = 16;
out += "Buckets:\n"; out += "Buckets:\n";
for ck in &VALID_CRYPTO_KINDS { for ck in &VALID_CRYPTO_KINDS {
out += &format!(" {}:\n", ck);
let rows = inner.buckets[ck].len() / COLS; let rows = inner.buckets[ck].len() / COLS;
let mut r = 0; let mut r = 0;
let mut b = 0; let mut b = 0;
while r < rows { while r < rows {
let mut c = 0; let mut c = 0;
out += format!(" {:>3}: ", b).as_str(); out += format!(" {:>3}: ", b).as_str();
while c < COLS { while c < COLS {
let mut cnt = 0; let mut cnt = 0;
for e in inner.buckets[ck][b].entries() { for e in inner.buckets[ck][b].entries() {

View File

@ -590,6 +590,12 @@ impl RoutingTable {
fn queue_bucket_kicks(&self, node_ids: TypedKeySet) { fn queue_bucket_kicks(&self, node_ids: TypedKeySet) {
for node_id in node_ids.iter() { for node_id in node_ids.iter() {
// Skip node ids we didn't add to buckets
if !VALID_CRYPTO_KINDS.contains(&node_id.kind) {
continue;
}
// Put it in the kick queue
let x = self.unlocked_inner.calculate_bucket_index(node_id); let x = self.unlocked_inner.calculate_bucket_index(node_id);
self.unlocked_inner.kick_queue.lock().insert(x); self.unlocked_inner.kick_queue.lock().insert(x);
} }

View File

@ -604,21 +604,33 @@ impl RoutingTableInner {
} }
// Update buckets with new node ids we may have learned belong to this entry // Update buckets with new node ids we may have learned belong to this entry
fn update_bucket_entries(&mut self, entry: Arc<BucketEntry>, node_ids: &[TypedKey]) { fn update_bucket_entries(
&mut self,
entry: Arc<BucketEntry>,
node_ids: &[TypedKey],
) -> EyreResult<()> {
entry.with_mut_inner(|e| { entry.with_mut_inner(|e| {
let existing_node_ids = e.node_ids(); let existing_node_ids = e.node_ids();
for node_id in node_ids { for node_id in node_ids {
if !existing_node_ids.contains(node_id) { // Skip node ids that exist already
// Add new node id to entry if existing_node_ids.contains(node_id) {
if let Some(old_node_id) = e.add_node_id(*node_id) { continue;
// Remove any old node id for this crypto kind }
// Add new node id to entry
let ck = node_id.kind;
if let Some(old_node_id) = e.add_node_id(*node_id)? {
// Remove any old node id for this crypto kind
if VALID_CRYPTO_KINDS.contains(&ck) {
let bucket_index = self.unlocked_inner.calculate_bucket_index(&old_node_id); let bucket_index = self.unlocked_inner.calculate_bucket_index(&old_node_id);
let bucket = self.get_bucket_mut(bucket_index); let bucket = self.get_bucket_mut(bucket_index);
bucket.remove_entry(&old_node_id.value); bucket.remove_entry(&old_node_id.value);
self.unlocked_inner.kick_queue.lock().insert(bucket_index); self.unlocked_inner.kick_queue.lock().insert(bucket_index);
} }
}
// Bucket the entry appropriately // Bucket the entry appropriately
if VALID_CRYPTO_KINDS.contains(&ck) {
let bucket_index = self.unlocked_inner.calculate_bucket_index(node_id); let bucket_index = self.unlocked_inner.calculate_bucket_index(node_id);
let bucket = self.get_bucket_mut(bucket_index); let bucket = self.get_bucket_mut(bucket_index);
bucket.add_existing_entry(node_id.value, entry.clone()); bucket.add_existing_entry(node_id.value, entry.clone());
@ -627,6 +639,7 @@ impl RoutingTableInner {
self.unlocked_inner.kick_queue.lock().insert(bucket_index); self.unlocked_inner.kick_queue.lock().insert(bucket_index);
} }
} }
Ok(())
}) })
} }
@ -636,7 +649,7 @@ impl RoutingTableInner {
fn create_node_ref<F>( fn create_node_ref<F>(
&mut self, &mut self,
outer_self: RoutingTable, outer_self: RoutingTable,
node_ids: &[TypedKey], node_ids: &TypedKeySet,
update_func: F, update_func: F,
) -> Option<NodeRef> ) -> Option<NodeRef>
where where
@ -651,11 +664,12 @@ impl RoutingTableInner {
// Look up all bucket entries and make sure we only have zero or one // Look up all bucket entries and make sure we only have zero or one
// If we have more than one, pick the one with the best cryptokind to add node ids to // If we have more than one, pick the one with the best cryptokind to add node ids to
let mut best_entry: Option<Arc<BucketEntry>> = None; let mut best_entry: Option<Arc<BucketEntry>> = None;
for node_id in node_ids { for node_id in node_ids.iter() {
// Ignore node ids we don't support
if !VALID_CRYPTO_KINDS.contains(&node_id.kind) { if !VALID_CRYPTO_KINDS.contains(&node_id.kind) {
log_rtab!(error "can't look up node id with invalid crypto kind"); continue;
return None;
} }
// Find the first in crypto sort order
let bucket_index = self.unlocked_inner.calculate_bucket_index(node_id); let bucket_index = self.unlocked_inner.calculate_bucket_index(node_id);
let bucket = self.get_bucket(bucket_index); let bucket = self.get_bucket(bucket_index);
if let Some(entry) = bucket.entry(&node_id.value) { if let Some(entry) = bucket.entry(&node_id.value) {
@ -673,7 +687,10 @@ impl RoutingTableInner {
// If the entry does exist already, update it // If the entry does exist already, update it
if let Some(best_entry) = best_entry { if let Some(best_entry) = best_entry {
// Update the entry with all of the node ids // Update the entry with all of the node ids
self.update_bucket_entries(best_entry.clone(), node_ids); if let Err(e) = self.update_bucket_entries(best_entry.clone(), node_ids) {
log_rtab!(debug "Not registering new ids for existing node: {}", e);
return None;
}
// Make a noderef to return // Make a noderef to return
let nr = NodeRef::new(outer_self.clone(), best_entry.clone(), None); let nr = NodeRef::new(outer_self.clone(), best_entry.clone(), None);
@ -694,7 +711,10 @@ impl RoutingTableInner {
self.unlocked_inner.kick_queue.lock().insert(bucket_entry); self.unlocked_inner.kick_queue.lock().insert(bucket_entry);
// Update the other bucket entries with the remaining node ids // Update the other bucket entries with the remaining node ids
self.update_bucket_entries(new_entry.clone(), node_ids); if let Err(e) = self.update_bucket_entries(new_entry.clone(), node_ids) {
log_rtab!(debug "Not registering new node: {}", e);
return None;
}
// Make node ref to return // Make node ref to return
let nr = NodeRef::new(outer_self.clone(), new_entry.clone(), None); let nr = NodeRef::new(outer_self.clone(), new_entry.clone(), None);
@ -832,7 +852,7 @@ impl RoutingTableInner {
descriptor: ConnectionDescriptor, descriptor: ConnectionDescriptor,
timestamp: Timestamp, timestamp: Timestamp,
) -> Option<NodeRef> { ) -> Option<NodeRef> {
let out = self.create_node_ref(outer_self, &[node_id], |_rti, e| { let out = self.create_node_ref(outer_self, &TypedKeySet::from(node_id), |_rti, e| {
// this node is live because it literally just connected to us // this node is live because it literally just connected to us
e.touch_last_seen(timestamp); e.touch_last_seen(timestamp);
}); });

View File

@ -150,8 +150,10 @@ impl RPCProcessor {
remote_sr_pubkey: TypedKey, remote_sr_pubkey: TypedKey,
pr_pubkey: TypedKey, pr_pubkey: TypedKey,
) -> Result<NetworkResult<()>, RPCError> { ) -> Result<NetworkResult<()>, RPCError> {
// Get sender id // Get sender id of the peer with the crypto kind of the route
let sender_id = detail.envelope.get_sender_id(); let Some(sender_id) = detail.peer_noderef.node_ids().get(pr_pubkey.kind) else {
return Ok(NetworkResult::invalid_message("route node doesnt have a required crypto kind for routed operation"));
};
// Look up the private route and ensure it's one in our spec store // Look up the private route and ensure it's one in our spec store
// Ensure the route is validated, and construct a return safetyspec that matches the inbound preferences // Ensure the route is validated, and construct a return safetyspec that matches the inbound preferences
@ -162,7 +164,7 @@ impl RPCProcessor {
&pr_pubkey, &pr_pubkey,
&routed_operation.signatures, &routed_operation.signatures,
&routed_operation.data, &routed_operation.data,
sender_id, sender_id.value,
|rssd, rsd| { |rssd, rsd| {
( (
rsd.secret_key, rsd.secret_key,

View File

@ -360,12 +360,9 @@ impl VeilidAPI {
let args: Vec<String> = args.split_whitespace().map(|s| s.to_owned()).collect(); let args: Vec<String> = args.split_whitespace().map(|s| s.to_owned()).collect();
let mut min_state = BucketEntryState::Unreliable; let mut min_state = BucketEntryState::Unreliable;
let mut limit = 20;
for arg in args { for arg in args {
if let Some(ms) = get_bucket_entry_state(&arg) { if let Some(ms) = get_bucket_entry_state(&arg) {
min_state = ms; min_state = ms;
} else if let Some(lim) = get_number(&arg) {
limit = lim;
} else { } else {
apibail_invalid_argument!("debug_entries", "unknown", arg); apibail_invalid_argument!("debug_entries", "unknown", arg);
} }
@ -373,7 +370,7 @@ impl VeilidAPI {
// Dump routing table entries // Dump routing table entries
let routing_table = self.network_manager()?.routing_table(); let routing_table = self.network_manager()?.routing_table();
Ok(routing_table.debug_info_entries(limit, min_state)) Ok(routing_table.debug_info_entries(min_state))
} }
async fn debug_entry(&self, args: String) -> Result<String, VeilidAPIError> { async fn debug_entry(&self, args: String) -> Result<String, VeilidAPIError> {
@ -881,7 +878,7 @@ impl VeilidAPI {
help help
buckets [dead|reliable] buckets [dead|reliable]
dialinfo dialinfo
entries [dead|reliable] [limit] entries [dead|reliable]
entry <node> entry <node>
nodeinfo nodeinfo
config [key [new value]] config [key [new value]]

View File

@ -267,7 +267,7 @@ pub struct VeilidStateAttachment {
)] )]
#[archive_attr(repr(C), derive(CheckBytes))] #[archive_attr(repr(C), derive(CheckBytes))]
pub struct PeerTableData { pub struct PeerTableData {
pub node_ids: TypedKeySet, pub node_ids: Vec<String>,
pub peer_address: PeerAddress, pub peer_address: PeerAddress,
pub peer_stats: PeerStats, pub peer_stats: PeerStats,
} }

View File

@ -919,8 +919,99 @@ impl VeilidConfig {
Ok(()) Ok(())
} }
// Get the node id from config if one is specified #[cfg(not(test))]
// Must be done -after- protected store startup async fn init_node_id(
&self,
vcrypto: CryptoSystemVersion,
protected_store: intf::ProtectedStore,
) -> Result<(TypedKey, TypedSecret), VeilidAPIError> {
let ck = vcrypto.kind();
let mut node_id = self.inner.read().network.routing_table.node_id.get(ck);
let mut node_id_secret = self
.inner
.read()
.network
.routing_table
.node_id_secret
.get(ck);
// See if node id was previously stored in the protected store
if node_id.is_none() {
debug!("pulling node_id_{} from storage", ck);
if let Some(s) = protected_store
.load_user_secret_string(format!("node_id_{}", ck))
.await
.map_err(VeilidAPIError::internal)?
{
debug!("node_id_{} found in storage", ck);
node_id = match TypedKey::from_str(s.as_str()) {
Ok(v) => Some(v),
Err(_) => {
debug!("node id in protected store is not valid");
None
}
}
} else {
debug!("node_id_{} not found in storage", ck);
}
}
// See if node id secret was previously stored in the protected store
if node_id_secret.is_none() {
debug!("pulling node id secret from storage");
if let Some(s) = protected_store
.load_user_secret_string(format!("node_id_secret_{}", ck))
.await
.map_err(VeilidAPIError::internal)?
{
debug!("node_id_secret_{} found in storage", ck);
node_id_secret = match TypedSecret::from_str(s.as_str()) {
Ok(v) => Some(v),
Err(_) => {
debug!("node id secret in protected store is not valid");
None
}
}
} else {
debug!("node_id_secret_{} not found in storage", ck);
}
}
// If we have a node id from storage, check it
let (node_id, node_id_secret) =
if let (Some(node_id), Some(node_id_secret)) = (node_id, node_id_secret) {
// Validate node id
if !vcrypto.validate_keypair(&node_id.value, &node_id_secret.value) {
apibail_generic!(format!(
"node_id_secret_{} and node_id_key_{} don't match",
ck, ck
));
}
(node_id, node_id_secret)
} else {
// If we still don't have a valid node id, generate one
debug!("generating new node_id_{}", ck);
let kp = vcrypto.generate_keypair();
(TypedKey::new(ck, kp.key), TypedSecret::new(ck, kp.secret))
};
info!("Node Id: {}", node_id);
// Save the node id / secret in storage
protected_store
.save_user_secret_string(format!("node_id_{}", ck), node_id.to_string())
.await
.map_err(VeilidAPIError::internal)?;
protected_store
.save_user_secret_string(format!("node_id_secret_{}", ck), node_id_secret.to_string())
.await
.map_err(VeilidAPIError::internal)?;
Ok((node_id, node_id_secret))
}
/// Get the node id from config if one is specified
/// Must be done -after- protected store startup
#[cfg_attr(test, allow(unused_variables))]
pub async fn init_node_ids( pub async fn init_node_ids(
&self, &self,
crypto: Crypto, crypto: Crypto,
@ -934,88 +1025,14 @@ impl VeilidConfig {
.get(ck) .get(ck)
.expect("Valid crypto kind is not actually valid."); .expect("Valid crypto kind is not actually valid.");
let mut node_id = self.inner.read().network.routing_table.node_id.get(ck); #[cfg(test)]
let mut node_id_secret = self let (node_id, node_id_secret) = {
.inner let kp = vcrypto.generate_keypair();
.read() (TypedKey::new(ck, kp.key), TypedSecret::new(ck, kp.secret))
.network };
.routing_table #[cfg(not(test))]
.node_id_secret
.get(ck);
// See if node id was previously stored in the protected store
if node_id.is_none() {
debug!("pulling node_id_{} from storage", ck);
if let Some(s) = protected_store
.load_user_secret_string(format!("node_id_{}", ck))
.await
.map_err(VeilidAPIError::internal)?
{
debug!("node_id_{} found in storage", ck);
node_id = match TypedKey::from_str(s.as_str()) {
Ok(v) => Some(v),
Err(_) => {
debug!("node id in protected store is not valid");
None
}
}
} else {
debug!("node_id_{} not found in storage", ck);
}
}
// See if node id secret was previously stored in the protected store
if node_id_secret.is_none() {
debug!("pulling node id secret from storage");
if let Some(s) = protected_store
.load_user_secret_string(format!("node_id_secret_{}", ck))
.await
.map_err(VeilidAPIError::internal)?
{
debug!("node_id_secret_{} found in storage", ck);
node_id_secret = match TypedSecret::from_str(s.as_str()) {
Ok(v) => Some(v),
Err(_) => {
debug!("node id secret in protected store is not valid");
None
}
}
} else {
debug!("node_id_secret_{} not found in storage", ck);
}
}
// If we have a node id from storage, check it
let (node_id, node_id_secret) = let (node_id, node_id_secret) =
if let (Some(node_id), Some(node_id_secret)) = (node_id, node_id_secret) { self.init_node_id(vcrypto, protected_store.clone()).await?;
// Validate node id
if !vcrypto.validate_keypair(&node_id.value, &node_id_secret.value) {
apibail_generic!(format!(
"node_id_secret_{} and node_id_key_{} don't match",
ck, ck
));
}
(node_id, node_id_secret)
} else {
// If we still don't have a valid node id, generate one
debug!("generating new node_id_{}", ck);
let kp = vcrypto.generate_keypair();
(TypedKey::new(ck, kp.key), TypedSecret::new(ck, kp.secret))
};
info!("Node Id: {}", node_id);
// Save the node id / secret in storage
protected_store
.save_user_secret_string(format!("node_id_{}", ck), node_id.to_string())
.await
.map_err(VeilidAPIError::internal)?;
protected_store
.save_user_secret_string(
format!("node_id_secret_{}", ck),
node_id_secret.to_string(),
)
.await
.map_err(VeilidAPIError::internal)?;
// Save for config // Save for config
out_node_id.add(node_id); out_node_id.add(node_id);

View File

@ -7,12 +7,13 @@ edition = "2021"
crate-type = ["cdylib", "staticlib", "rlib"] crate-type = ["cdylib", "staticlib", "rlib"]
[features] [features]
default = [ "rt-tokio" ] default = [ "rt-tokio", "veilid-core/default" ]
crypto-test = [ "rt-tokio", "veilid-core/crypto-test"]
rt-async-std = [ "veilid-core/rt-async-std", "async-std", "opentelemetry/rt-async-std", "opentelemetry-otlp/grpc-sys"] rt-async-std = [ "veilid-core/rt-async-std", "async-std", "opentelemetry/rt-async-std", "opentelemetry-otlp/grpc-sys"]
rt-tokio = [ "veilid-core/rt-tokio", "tokio", "tokio-stream", "tokio-util", "opentelemetry/rt-tokio"] rt-tokio = [ "veilid-core/rt-tokio", "tokio", "tokio-stream", "tokio-util", "opentelemetry/rt-tokio"]
[dependencies] [dependencies]
veilid-core = { path="../../veilid-core" } veilid-core = { path="../../veilid-core", default-features = false }
tracing = { version = "^0", features = ["log", "attributes"] } tracing = { version = "^0", features = ["log", "attributes"] }
tracing-subscriber = "^0" tracing-subscriber = "^0"
parking_lot = "^0" parking_lot = "^0"

View File

@ -11,13 +11,16 @@ name = "veilid-server"
path = "src/main.rs" path = "src/main.rs"
[features] [features]
default = [ "rt-tokio" ] default = [ "rt-tokio", "veilid-core/default" ]
crypto-test = [ "rt-tokio", "veilid-core/crypto-test"]
crypto-test-none = [ "rt-tokio", "veilid-core/crypto-test-none"]
rt-async-std = [ "veilid-core/rt-async-std", "async-std", "opentelemetry/rt-async-std", "opentelemetry-otlp/grpc-sys" ] rt-async-std = [ "veilid-core/rt-async-std", "async-std", "opentelemetry/rt-async-std", "opentelemetry-otlp/grpc-sys" ]
rt-tokio = [ "veilid-core/rt-tokio", "tokio", "tokio-stream", "tokio-util", "opentelemetry/rt-tokio", "console-subscriber" ] rt-tokio = [ "veilid-core/rt-tokio", "tokio", "tokio-stream", "tokio-util", "opentelemetry/rt-tokio", "console-subscriber" ]
tracking = [ "veilid-core/tracking" ] tracking = [ "veilid-core/tracking" ]
[dependencies] [dependencies]
veilid-core = { path = "../veilid-core" } veilid-core = { path = "../veilid-core", default-features = false }
tracing = { version = "^0", features = ["log", "attributes"] } tracing = { version = "^0", features = ["log", "attributes"] }
tracing-subscriber = { version = "^0", features = ["env-filter"] } tracing-subscriber = { version = "^0", features = ["env-filter"] }
tracing-appender = "^0" tracing-appender = "^0"

View File

@ -8,8 +8,13 @@ license = "LGPL-2.0-or-later OR MPL-2.0 OR (MIT AND BSD-3-Clause)"
[lib] [lib]
crate-type = ["cdylib", "rlib"] crate-type = ["cdylib", "rlib"]
[features]
default = [ "veilid-core/rt-tokio", "veilid-core/default" ]
crypto-test = [ "veilid-core/rt-tokio", "veilid-core/crypto-test"]
[dependencies] [dependencies]
veilid-core = { path = "../veilid-core" } veilid-core = { path = "../veilid-core", default-features = false }
tracing = { version = "^0", features = ["log", "attributes"] } tracing = { version = "^0", features = ["log", "attributes"] }
tracing-wasm = "^0" tracing-wasm = "^0"