eyre work

This commit is contained in:
John Smith 2022-07-06 23:15:51 -04:00
parent 2f05611170
commit cd0cd78e30
21 changed files with 345 additions and 229 deletions

1
Cargo.lock generated
View File

@ -5047,6 +5047,7 @@ dependencies = [
"capnpc", "capnpc",
"cfg-if 1.0.0", "cfg-if 1.0.0",
"clap 3.2.8", "clap 3.2.8",
"color-eyre",
"config", "config",
"ctrlc", "ctrlc",
"daemonize", "daemonize",

View File

@ -30,3 +30,6 @@ rtnetlink = { path = "./external/netlink/rtnetlink" }
[profile.release] [profile.release]
opt-level = "s" opt-level = "s"
lto = true lto = true
[profile.dev.package.backtrace]
opt-level = 3

View File

@ -222,26 +222,29 @@ impl AttachmentManager {
#[instrument(level = "debug", skip(self))] #[instrument(level = "debug", skip(self))]
async fn attachment_maintainer(self) { async fn attachment_maintainer(self) {
trace!("attachment starting"); debug!("attachment starting");
let netman = { let netman = {
let mut inner = self.inner.lock(); let mut inner = self.inner.lock();
inner.attach_timestamp = Some(intf::get_timestamp()); inner.attach_timestamp = Some(intf::get_timestamp());
inner.network_manager.clone() inner.network_manager.clone()
}; };
let mut started = true; let mut restart;
loop {
restart = false;
if let Err(err) = netman.startup().await { if let Err(err) = netman.startup().await {
error!("network startup failed: {}", err); error!("network startup failed: {}", err);
started = false; netman.shutdown().await;
break;
} }
if started { debug!("started maintaining peers");
trace!("started maintaining peers");
while self.inner.lock().maintain_peers { while self.inner.lock().maintain_peers {
// tick network manager // tick network manager
if let Err(err) = netman.tick().await { if let Err(err) = netman.tick().await {
error!("Error in network manager: {}", err); error!("Error in network manager: {}", err);
self.inner.lock().maintain_peers = false; self.inner.lock().maintain_peers = false;
restart = true;
break; break;
} }
@ -250,10 +253,18 @@ impl AttachmentManager {
// sleep should be at the end in case maintain_peers changes state // sleep should be at the end in case maintain_peers changes state
intf::sleep(1000).await; intf::sleep(1000).await;
} }
trace!("stopped maintaining peers"); debug!("stopped maintaining peers");
trace!("stopping network"); debug!("stopping network");
netman.shutdown().await; netman.shutdown().await;
if !restart {
break;
}
debug!("completely restarting attachment");
// chill out for a second first, give network stack time to settle out
intf::sleep(1000).await;
} }
trace!("stopping attachment"); trace!("stopping attachment");
@ -261,7 +272,7 @@ impl AttachmentManager {
let _output = attachment_machine let _output = attachment_machine
.consume(&AttachmentInput::AttachmentStopped) .consume(&AttachmentInput::AttachmentStopped)
.await; .await;
trace!("attachment stopped"); debug!("attachment stopped");
self.inner.lock().attach_timestamp = None; self.inner.lock().attach_timestamp = None;
} }

View File

@ -76,7 +76,7 @@ impl ServicesContext {
// Init node id from config now that protected store is set up // Init node id from config now that protected store is set up
if let Err(e) = self.config.init_node_id(protected_store.clone()).await { if let Err(e) = self.config.init_node_id(protected_store.clone()).await {
self.shutdown().await; self.shutdown().await;
return Err(VeilidAPIError::Internal { message: e }); return Err(e);
} }
// Set up tablestore // Set up tablestore
@ -177,9 +177,7 @@ impl VeilidCoreContext {
// Set up config from callback // Set up config from callback
trace!("setup config with callback"); trace!("setup config with callback");
let mut config = VeilidConfig::new(); let mut config = VeilidConfig::new();
if let Err(e) = config.setup(config_callback) { config.setup(config_callback)?;
return Err(VeilidAPIError::Internal { message: e });
}
Self::new_common(update_callback, config).await Self::new_common(update_callback, config).await
} }
@ -192,9 +190,7 @@ impl VeilidCoreContext {
// Set up config from callback // Set up config from callback
trace!("setup config with json"); trace!("setup config with json");
let mut config = VeilidConfig::new(); let mut config = VeilidConfig::new();
if let Err(e) = config.setup_from_json(config_json) { config.setup_from_json(config_json)?;
return Err(VeilidAPIError::Internal { message: e });
}
Self::new_common(update_callback, config).await Self::new_common(update_callback, config).await
} }

View File

@ -1,17 +1,18 @@
use crate::veilid_rng::*;
use crate::xx::*; use crate::xx::*;
use crate::*;
use core::cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd}; use core::cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd};
use core::convert::{TryFrom, TryInto}; use core::convert::{TryFrom, TryInto};
use core::fmt; use core::fmt;
use core::hash::{Hash, Hasher}; use core::hash::{Hash, Hasher};
use crate::veilid_rng::*;
use ed25519_dalek::{Keypair, PublicKey, Signature};
use serde::{Deserialize, Serialize};
use data_encoding::BASE64URL_NOPAD; use data_encoding::BASE64URL_NOPAD;
use digest::generic_array::typenum::U64; use digest::generic_array::typenum::U64;
use digest::{Digest, Output}; use digest::{Digest, Output};
use ed25519_dalek::{Keypair, PublicKey, Signature};
use generic_array::GenericArray; use generic_array::GenericArray;
use serde::{Deserialize, Serialize};
////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////
@ -71,14 +72,14 @@ macro_rules! byte_array_type {
Self { bytes, valid: true } Self { bytes, valid: true }
} }
pub fn try_from_vec(v: Vec<u8>) -> Result<Self, String> { pub fn try_from_vec(v: Vec<u8>) -> Result<Self, VeilidAPIError> {
let mut this = Self { let mut this = Self {
bytes: [0u8; $size], bytes: [0u8; $size],
valid: true, valid: true,
}; };
if v.len() != $size { if v.len() != $size {
return Err(format!( apibail_generic!(format!(
"Expected a Vec of length {} but it was {}", "Expected a Vec of length {} but it was {}",
$size, $size,
v.len() v.len()
@ -139,25 +140,25 @@ macro_rules! byte_array_type {
BASE64URL_NOPAD.encode(&self.bytes) BASE64URL_NOPAD.encode(&self.bytes)
} }
pub fn try_decode(input: &str) -> Result<Self, String> { pub fn try_decode(input: &str) -> Result<Self, VeilidAPIError> {
let mut bytes = [0u8; $size]; let mut bytes = [0u8; $size];
let res = BASE64URL_NOPAD.decode_len(input.len()); let res = BASE64URL_NOPAD.decode_len(input.len());
match res { match res {
Ok(v) => { Ok(v) => {
if v != $size { if v != $size {
return Err("Incorrect length in decode".to_owned()); apibail_generic!("Incorrect length in decode");
} }
} }
Err(_) => { Err(_) => {
return Err("Failed to decode".to_owned()); apibail_generic!("Failed to decode");
} }
} }
let res = BASE64URL_NOPAD.decode_mut(input.as_bytes(), &mut bytes); let res = BASE64URL_NOPAD.decode_mut(input.as_bytes(), &mut bytes);
match res { match res {
Ok(_) => Ok(Self::new(bytes)), Ok(_) => Ok(Self::new(bytes)),
Err(_) => Err("Failed to decode".to_owned()), Err(_) => apierr_generic!("Failed to decode"),
} }
} }
} }
@ -255,28 +256,28 @@ macro_rules! byte_array_type {
} }
impl TryFrom<String> for $name { impl TryFrom<String> for $name {
type Error = String; type Error = VeilidAPIError;
fn try_from(value: String) -> Result<Self, Self::Error> { fn try_from(value: String) -> Result<Self, Self::Error> {
$name::try_from(value.as_str()) $name::try_from(value.as_str())
} }
} }
impl TryFrom<&str> for $name { impl TryFrom<&str> for $name {
type Error = String; type Error = VeilidAPIError;
fn try_from(value: &str) -> Result<Self, Self::Error> { fn try_from(value: &str) -> Result<Self, Self::Error> {
let mut out = $name::default(); let mut out = $name::default();
if value == "" { if value == "" {
return Ok(out); return Ok(out);
} }
if value.len() != ($size * 2) { if value.len() != ($size * 2) {
return Err(concat!(stringify!($name), " is incorrect length").to_owned()); apibail_generic!(concat!(stringify!($name), " is incorrect length"));
} }
match hex::decode_to_slice(value, &mut out.bytes) { match hex::decode_to_slice(value, &mut out.bytes) {
Ok(_) => { Ok(_) => {
out.valid = true; out.valid = true;
Ok(out) Ok(out)
} }
Err(err) => Err(format!("{}", err)), Err(err) => apierr_generic!(err),
} }
} }
} }
@ -372,7 +373,7 @@ pub fn sign(
dht_key: &DHTKey, dht_key: &DHTKey,
dht_key_secret: &DHTKeySecret, dht_key_secret: &DHTKeySecret,
data: &[u8], data: &[u8],
) -> Result<DHTSignature, String> { ) -> Result<DHTSignature, VeilidAPIError> {
assert!(dht_key.valid); assert!(dht_key.valid);
assert!(dht_key_secret.valid); assert!(dht_key_secret.valid);
@ -381,32 +382,36 @@ pub fn sign(
kpb[..DHT_KEY_SECRET_LENGTH].copy_from_slice(&dht_key_secret.bytes); kpb[..DHT_KEY_SECRET_LENGTH].copy_from_slice(&dht_key_secret.bytes);
kpb[DHT_KEY_SECRET_LENGTH..].copy_from_slice(&dht_key.bytes); kpb[DHT_KEY_SECRET_LENGTH..].copy_from_slice(&dht_key.bytes);
let keypair = Keypair::from_bytes(&kpb).map_err(|_| "Keypair is invalid".to_owned())?; let keypair = Keypair::from_bytes(&kpb).map_err(mapapierr_parse!("Keypair is invalid"))?;
let mut dig = Blake3Digest512::new(); let mut dig = Blake3Digest512::new();
dig.update(data); dig.update(data);
let sig = keypair let sig = keypair
.sign_prehashed(dig, None) .sign_prehashed(dig, None)
.map_err(|_| "Signature failed".to_owned())?; .map_err(VeilidAPIError::internal)?;
let dht_sig = DHTSignature::new(sig.to_bytes()); let dht_sig = DHTSignature::new(sig.to_bytes());
Ok(dht_sig) Ok(dht_sig)
} }
pub fn verify(dht_key: &DHTKey, data: &[u8], signature: &DHTSignature) -> Result<(), String> { pub fn verify(
dht_key: &DHTKey,
data: &[u8],
signature: &DHTSignature,
) -> Result<(), VeilidAPIError> {
assert!(dht_key.valid); assert!(dht_key.valid);
assert!(signature.valid); assert!(signature.valid);
let pk = let pk =
PublicKey::from_bytes(&dht_key.bytes).map_err(|_| "Public key is invalid".to_owned())?; PublicKey::from_bytes(&dht_key.bytes).map_err(mapapierr_parse!("Public key is invalid"))?;
let sig = let sig = Signature::from_bytes(&signature.bytes)
Signature::from_bytes(&signature.bytes).map_err(|_| "Signature is invalid".to_owned())?; .map_err(mapapierr_parse!("Signature is invalid"))?;
let mut dig = Blake3Digest512::new(); let mut dig = Blake3Digest512::new();
dig.update(data); dig.update(data);
pk.verify_prehashed(dig, None, &sig) pk.verify_prehashed(dig, None, &sig)
.map_err(|_| "Verification failed".to_owned())?; .map_err(mapapierr_parse!("Verification failed"))?;
Ok(()) Ok(())
} }

View File

@ -57,14 +57,8 @@ pub async fn test_sign_and_verify() {
assert_eq!(key::verify(&dht_key, LOREM_IPSUM.as_bytes(), &a1), Ok(())); assert_eq!(key::verify(&dht_key, LOREM_IPSUM.as_bytes(), &a1), Ok(()));
assert_eq!(key::verify(&dht_key2, LOREM_IPSUM.as_bytes(), &a2), Ok(())); assert_eq!(key::verify(&dht_key2, LOREM_IPSUM.as_bytes(), &a2), Ok(()));
assert_eq!( assert!(key::verify(&dht_key, LOREM_IPSUM.as_bytes(), &b1).is_err());
key::verify(&dht_key, LOREM_IPSUM.as_bytes(), &b1), assert!(key::verify(&dht_key2, LOREM_IPSUM.as_bytes(), &b2).is_err());
Err("Verification failed".to_owned())
);
assert_eq!(
key::verify(&dht_key2, LOREM_IPSUM.as_bytes(), &b2),
Err("Verification failed".to_owned())
);
// Try verifications that should work // Try verifications that should work
assert_eq!( assert_eq!(
@ -84,22 +78,10 @@ pub async fn test_sign_and_verify() {
Ok(()) Ok(())
); );
// Try verifications that shouldn't work // Try verifications that shouldn't work
assert_eq!( assert!(key::verify(&dht_key2, LOREM_IPSUM.as_bytes(), &dht_sig).is_err());
key::verify(&dht_key2, LOREM_IPSUM.as_bytes(), &dht_sig), assert!(key::verify(&dht_key, LOREM_IPSUM.as_bytes(), &dht_sig2).is_err());
Err("Verification failed".to_owned()) assert!(key::verify(&dht_key2, CHEEZBURGER.as_bytes(), &dht_sig_c).is_err());
); assert!(key::verify(&dht_key, CHEEZBURGER.as_bytes(), &dht_sig).is_err());
assert_eq!(
key::verify(&dht_key, LOREM_IPSUM.as_bytes(), &dht_sig2),
Err("Verification failed".to_owned())
);
assert_eq!(
key::verify(&dht_key2, CHEEZBURGER.as_bytes(), &dht_sig_c),
Err("Verification failed".to_owned())
);
assert_eq!(
key::verify(&dht_key, CHEEZBURGER.as_bytes(), &dht_sig),
Err("Verification failed".to_owned())
);
} }
pub async fn test_key_conversions() { pub async fn test_key_conversions() {
@ -214,9 +196,9 @@ pub async fn test_encode_decode() {
// Failures // Failures
let f1 = key::DHTKeySecret::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"); let f1 = key::DHTKeySecret::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA");
assert_eq!(f1, Err("Incorrect length in decode".to_owned())); assert!(f1.is_err());
let f2 = key::DHTKeySecret::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA&"); let f2 = key::DHTKeySecret::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA&");
assert_eq!(f2, Err("Failed to decode".to_owned())); assert!(f2.is_err());
} }
async fn test_hash() { async fn test_hash() {

View File

@ -29,6 +29,7 @@ mod receipt_manager;
mod routing_table; mod routing_table;
mod rpc_processor; mod rpc_processor;
mod veilid_api; mod veilid_api;
#[macro_use]
mod veilid_config; mod veilid_config;
mod veilid_layer_filter; mod veilid_layer_filter;
mod veilid_rng; mod veilid_rng;

View File

@ -1227,8 +1227,8 @@ impl NetworkManager {
// pinging this node regularly to keep itself in the routing table // pinging this node regularly to keep itself in the routing table
routing_table.lookup_node_ref(recipient_id).ok_or_else(|| { routing_table.lookup_node_ref(recipient_id).ok_or_else(|| {
format!( format!(
"Inbound relay asked for recipient not in routing table: {}", "Inbound relay asked for recipient not in routing table: sender_id={:?} recipient={:?}",
recipient_id sender_id, recipient_id
) )
})? })?
}; };

View File

@ -98,7 +98,7 @@ impl LatencyStatsAccounting {
self.rolling_latencies.push_back(latency); self.rolling_latencies.push_back(latency);
let mut ls = LatencyStats { let mut ls = LatencyStats {
fastest: 0, fastest: u64::MAX,
average: 0, average: 0,
slowest: 0, slowest: 0,
}; };

View File

@ -10,17 +10,20 @@ pub enum RPCError {
Internal(String), Internal(String),
} }
pub fn rpc_error_internal<T: AsRef<str>>(x: T) -> RPCError { pub fn rpc_error_internal<T: ToString>(x: T) -> RPCError {
error!("RPCError Internal: {}", x.as_ref()); let x = x.to_string();
RPCError::Internal(x.as_ref().to_owned()) error!("RPCError Internal: {}", x);
RPCError::Internal(x)
} }
pub fn rpc_error_invalid_format<T: AsRef<str>>(x: T) -> RPCError { pub fn rpc_error_invalid_format<T: ToString>(x: T) -> RPCError {
error!("RPCError Invalid Format: {}", x.as_ref()); let x = x.to_string();
RPCError::InvalidFormat(x.as_ref().to_owned()) error!("RPCError Invalid Format: {}", x);
RPCError::InvalidFormat(x)
} }
pub fn rpc_error_protocol<T: AsRef<str>>(x: T) -> RPCError { pub fn rpc_error_protocol<T: ToString>(x: T) -> RPCError {
error!("RPCError Protocol: {}", x.as_ref()); let x = x.to_string();
RPCError::Protocol(x.as_ref().to_owned()) error!("RPCError Protocol: {}", x);
RPCError::Protocol(x)
} }
pub fn rpc_error_capnp_error(e: capnp::Error) -> RPCError { pub fn rpc_error_capnp_error(e: capnp::Error) -> RPCError {
error!("RPCError Protocol: capnp error: {}", &e.description); error!("RPCError Protocol: capnp error: {}", &e.description);
@ -30,9 +33,10 @@ pub fn rpc_error_capnp_notinschema(e: capnp::NotInSchema) -> RPCError {
error!("RPCError Protocol: not in schema: {}", &e.0); error!("RPCError Protocol: not in schema: {}", &e.0);
RPCError::Protocol(format!("not in schema: {}", &e.0)) RPCError::Protocol(format!("not in schema: {}", &e.0))
} }
pub fn rpc_error_unimplemented<T: AsRef<str>>(x: T) -> RPCError { pub fn rpc_error_unimplemented<T: ToString>(x: T) -> RPCError {
error!("RPCError Unimplemented: {}", x.as_ref()); let x = x.to_string();
RPCError::Unimplemented(x.as_ref().to_owned()) error!("RPCError Unimplemented: {}", x);
RPCError::Unimplemented(x)
} }
impl fmt::Display for RPCError { impl fmt::Display for RPCError {

View File

@ -260,7 +260,7 @@ fn config_callback(key: String) -> ConfigCallbackReturn {
_ => { _ => {
let err = format!("config key '{}' doesn't exist", key); let err = format!("config key '{}' doesn't exist", key);
debug!("{}", err); debug!("{}", err);
Err(err) apierr_internal!(err)
} }
} }
} }

View File

@ -135,18 +135,14 @@ impl VeilidAPI {
let config = self.config()?; let config = self.config()?;
let args = args.trim_start(); let args = args.trim_start();
if args.is_empty() { if args.is_empty() {
return config return config.get_key_json("");
.get_key_json("")
.map_err(|e| VeilidAPIError::Internal { message: e });
} }
let (arg, rest) = args.split_once(' ').unwrap_or((args, "")); let (arg, rest) = args.split_once(' ').unwrap_or((args, ""));
let rest = rest.trim_start().to_owned(); let rest = rest.trim_start().to_owned();
// One argument is 'config get' // One argument is 'config get'
if rest.is_empty() { if rest.is_empty() {
return config return config.get_key_json(arg);
.get_key_json(arg)
.map_err(|e| VeilidAPIError::Internal { message: e });
} }
// More than one argument is 'config set' // More than one argument is 'config set'
@ -156,15 +152,11 @@ impl VeilidAPI {
self.get_state().await?.attachment.state, self.get_state().await?.attachment.state,
AttachmentState::Detached AttachmentState::Detached
) { ) {
return Err(VeilidAPIError::Internal { apibail_internal!("Must be detached to change config");
message: "Must be detached to change config".to_owned(),
});
} }
// Change the config key // Change the config key
config config.set_key_json(arg, &rest)?;
.set_key_json(arg, &rest)
.map_err(|e| VeilidAPIError::Internal { message: e })?;
Ok("Config value set".to_owned()) Ok("Config value set".to_owned())
} }
@ -177,9 +169,7 @@ impl VeilidAPI {
self.get_state().await?.attachment.state, self.get_state().await?.attachment.state,
AttachmentState::Detached | AttachmentState::Detaching AttachmentState::Detached | AttachmentState::Detaching
) { ) {
return Err(VeilidAPIError::Internal { apibail_internal!("Must be detached to purge");
message: "Must be detached to purge".to_owned(),
});
} }
self.network_manager()?.routing_table().purge(); self.network_manager()?.routing_table().purge();
Ok("Buckets purged".to_owned()) Ok("Buckets purged".to_owned())
@ -203,10 +193,8 @@ impl VeilidAPI {
self.get_state().await?.attachment.state, self.get_state().await?.attachment.state,
AttachmentState::Detached AttachmentState::Detached
) { ) {
return Err(VeilidAPIError::Internal { apibail_internal!("Not detached");
message: "Not detached".to_owned(), }
});
};
self.attach().await?; self.attach().await?;
@ -218,9 +206,7 @@ impl VeilidAPI {
self.get_state().await?.attachment.state, self.get_state().await?.attachment.state,
AttachmentState::Detaching AttachmentState::Detaching
) { ) {
return Err(VeilidAPIError::Internal { apibail_internal!("Not attached");
message: "Not attached".to_owned(),
});
}; };
self.detach().await?; self.detach().await?;

View File

@ -31,6 +31,62 @@ use xx::*;
///////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////
#[allow(unused_macros)]
#[macro_export]
macro_rules! apierr_generic {
($x:expr) => {
Err(VeilidAPIError::generic($x))
};
}
#[allow(unused_macros)]
#[macro_export]
macro_rules! apierr_internal {
($x:expr) => {
Err(VeilidAPIError::internal($x))
};
}
#[allow(unused_macros)]
#[macro_export]
macro_rules! apierr_parse {
($x:expr, $y:expr) => {
Err(VeilidAPIError::parse_error($x, $y))
};
}
#[allow(unused_macros)]
#[macro_export]
macro_rules! mapapierr_parse {
($x:expr) => {
|e| VeilidAPIError::parse_error($x, e)
};
}
#[allow(unused_macros)]
#[macro_export]
macro_rules! apibail_generic {
($x:expr) => {
return Err(VeilidAPIError::generic($x));
};
}
#[allow(unused_macros)]
#[macro_export]
macro_rules! apibail_internal {
($x:expr) => {
return Err(VeilidAPIError::internal($x));
};
}
#[allow(unused_macros)]
#[macro_export]
macro_rules! apibail_parse {
($x:expr, $y:expr) => {
return Err(VeilidAPIError::parse_error($x, $y));
};
}
#[derive(Clone, Debug, PartialOrd, PartialEq, Eq, Ord, Serialize, Deserialize)] #[derive(Clone, Debug, PartialOrd, PartialEq, Eq, Ord, Serialize, Deserialize)]
#[serde(tag = "kind")] #[serde(tag = "kind")]
pub enum VeilidAPIError { pub enum VeilidAPIError {
@ -66,8 +122,63 @@ pub enum VeilidAPIError {
context: String, context: String,
argument: String, argument: String,
}, },
Generic {
message: String,
},
} }
impl VeilidAPIError {
pub fn node_not_found(node_id: NodeId) -> Self {
Self::NodeNotFound { node_id }
}
pub fn no_dial_info(node_id: NodeId) -> Self {
Self::NoDialInfo { node_id }
}
pub fn no_peer_info(node_id: NodeId) -> Self {
Self::NoPeerInfo { node_id }
}
pub fn internal<T: ToString>(msg: T) -> Self {
Self::Internal {
message: msg.to_string(),
}
}
pub fn unimplemented<T: ToString>(msg: T) -> Self {
Self::Unimplemented {
message: msg.to_string(),
}
}
pub fn parse_error<T: ToString, S: ToString>(msg: T, value: S) -> Self {
Self::ParseError {
message: msg.to_string(),
value: value.to_string(),
}
}
pub fn invalid_argument<T: ToString, S: ToString, R: ToString>(
context: T,
argument: S,
value: R,
) -> Self {
Self::InvalidArgument {
context: context.to_string(),
argument: argument.to_string(),
value: value.to_string(),
}
}
pub fn missing_argument<T: ToString, S: ToString>(context: T, argument: S) -> Self {
Self::MissingArgument {
context: context.to_string(),
argument: argument.to_string(),
}
}
pub fn generic<T: ToString>(msg: T) -> Self {
Self::Generic {
message: msg.to_string(),
}
}
}
impl std::error::Error for VeilidAPIError {}
impl fmt::Display for VeilidAPIError { impl fmt::Display for VeilidAPIError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
match self { match self {
@ -111,6 +222,9 @@ impl fmt::Display for VeilidAPIError {
context, argument context, argument
) )
} }
VeilidAPIError::Generic { message } => {
write!(f, "VeilidAPIError::Generic({})", message)
}
} }
} }
} }
@ -1321,9 +1435,11 @@ impl SignedNodeInfo {
node_id: NodeId, node_id: NodeId,
signature: DHTSignature, signature: DHTSignature,
timestamp: u64, timestamp: u64,
) -> Result<Self, String> { ) -> Result<Self, VeilidAPIError> {
let mut node_info_bytes = serde_cbor::to_vec(&node_info).map_err(map_to_string)?; let mut node_info_bytes = serde_cbor::to_vec(&node_info)
let mut timestamp_bytes = serde_cbor::to_vec(&timestamp).map_err(map_to_string)?; .map_err(mapapierr_parse!("failed to encode node info as cbor"))?;
let mut timestamp_bytes = serde_cbor::to_vec(&timestamp)
.map_err(mapapierr_parse!("failed to encode timestamp as cbor"))?;
node_info_bytes.append(&mut timestamp_bytes); node_info_bytes.append(&mut timestamp_bytes);
@ -1339,11 +1455,13 @@ impl SignedNodeInfo {
node_info: NodeInfo, node_info: NodeInfo,
node_id: NodeId, node_id: NodeId,
secret: &DHTKeySecret, secret: &DHTKeySecret,
) -> Result<Self, String> { ) -> Result<Self, VeilidAPIError> {
let timestamp = intf::get_timestamp(); let timestamp = intf::get_timestamp();
let mut node_info_bytes = serde_cbor::to_vec(&node_info).map_err(map_to_string)?; let mut node_info_bytes = serde_cbor::to_vec(&node_info)
let mut timestamp_bytes = serde_cbor::to_vec(&timestamp).map_err(map_to_string)?; .map_err(mapapierr_parse!("failed to encode node info as cbor"))?;
let mut timestamp_bytes = serde_cbor::to_vec(&timestamp)
.map_err(mapapierr_parse!("failed to encode timestamp as cbor"))?;
node_info_bytes.append(&mut timestamp_bytes); node_info_bytes.append(&mut timestamp_bytes);

View File

@ -5,11 +5,11 @@ use serde::*;
//////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////
cfg_if! { cfg_if! {
if #[cfg(target_arch = "wasm32")] { if #[cfg(target_arch = "wasm32")] {
pub type ConfigCallbackReturn = Result<Box<dyn core::any::Any>, String>; pub type ConfigCallbackReturn = Result<Box<dyn core::any::Any>, VeilidAPIError>;
pub type ConfigCallback = Arc<dyn Fn(String) -> ConfigCallbackReturn>; pub type ConfigCallback = Arc<dyn Fn(String) -> ConfigCallbackReturn>;
} else { } else {
pub type ConfigCallbackReturn = Result<Box<dyn core::any::Any + Send>, String>; pub type ConfigCallbackReturn = Result<Box<dyn core::any::Any + Send>, VeilidAPIError>;
pub type ConfigCallback = Arc<dyn Fn(String) -> ConfigCallbackReturn + Send + Sync>; pub type ConfigCallback = Arc<dyn Fn(String) -> ConfigCallbackReturn + Send + Sync>;
} }
} }
@ -281,10 +281,10 @@ impl VeilidConfig {
} }
} }
pub fn setup_from_json(&mut self, config: String) -> Result<(), String> { pub fn setup_from_json(&mut self, config: String) -> Result<(), VeilidAPIError> {
{ {
let mut inner = self.inner.write(); let mut inner = self.inner.write();
*inner = serde_json::from_str(&config).map_err(map_to_string)?; *inner = serde_json::from_str(&config).map_err(VeilidAPIError::generic)?;
} }
// Validate settings // Validate settings
@ -293,14 +293,14 @@ impl VeilidConfig {
Ok(()) Ok(())
} }
pub fn setup(&mut self, cb: ConfigCallback) -> Result<(), String> { pub fn setup(&mut self, cb: ConfigCallback) -> Result<(), VeilidAPIError> {
macro_rules! get_config { macro_rules! get_config {
($key:expr) => { ($key:expr) => {
let keyname = &stringify!($key)[6..]; let keyname = &stringify!($key)[6..];
$key = *cb(keyname.to_owned())?.downcast().map_err(|_| { $key = *cb(keyname.to_owned())?.downcast().map_err(|_| {
let err = format!("incorrect type for key: {}", keyname); let err = format!("incorrect type for key {}", keyname);
debug!("{}", err); debug!("{}", err);
err VeilidAPIError::generic(err)
})?; })?;
}; };
} }
@ -411,12 +411,12 @@ impl VeilidConfig {
self.inner.write() self.inner.write()
} }
pub fn get_key_json(&self, key: &str) -> Result<String, String> { pub fn get_key_json(&self, key: &str) -> Result<String, VeilidAPIError> {
let c = self.get(); let c = self.get();
// Generate json from whole config // Generate json from whole config
let jc = serde_json::to_string(&*c).map_err(map_to_string)?; let jc = serde_json::to_string(&*c).map_err(VeilidAPIError::generic)?;
let jvc = json::parse(&jc).map_err(map_to_string)?; let jvc = json::parse(&jc).map_err(VeilidAPIError::generic)?;
// Find requested subkey // Find requested subkey
if key.is_empty() { if key.is_empty() {
@ -427,25 +427,25 @@ impl VeilidConfig {
let mut out = &jvc; let mut out = &jvc;
for k in keypath { for k in keypath {
if !out.has_key(k) { if !out.has_key(k) {
return Err(format!("invalid subkey '{}' in key '{}'", k, key)); apibail_parse!(format!("invalid subkey in key '{}'", key), k);
} }
out = &out[k]; out = &out[k];
} }
Ok(out.to_string()) Ok(out.to_string())
} }
} }
pub fn set_key_json(&self, key: &str, value: &str) -> Result<(), String> { pub fn set_key_json(&self, key: &str, value: &str) -> Result<(), VeilidAPIError> {
let mut c = self.get_mut(); let mut c = self.get_mut();
// Split key into path parts // Split key into path parts
let keypath: Vec<&str> = key.split('.').collect(); let keypath: Vec<&str> = key.split('.').collect();
// Convert value into jsonvalue // Convert value into jsonvalue
let newval = json::parse(value).map_err(map_to_string)?; let newval = json::parse(value).map_err(VeilidAPIError::generic)?;
// Generate json from whole config // Generate json from whole config
let jc = serde_json::to_string(&*c).map_err(map_to_string)?; let jc = serde_json::to_string(&*c).map_err(VeilidAPIError::generic)?;
let mut jvc = json::parse(&jc).map_err(map_to_string)?; let mut jvc = json::parse(&jc).map_err(VeilidAPIError::generic)?;
// Find requested subkey // Find requested subkey
let newconfigstring = if let Some((objkeyname, objkeypath)) = keypath.split_last() { let newconfigstring = if let Some((objkeyname, objkeypath)) = keypath.split_last() {
@ -453,12 +453,12 @@ impl VeilidConfig {
let mut out = &mut jvc; let mut out = &mut jvc;
for k in objkeypath { for k in objkeypath {
if !out.has_key(*k) { if !out.has_key(*k) {
return Err(format!("invalid subkey '{}' in key '{}'", *k, key)); apibail_parse!(format!("invalid subkey in key '{}'", key), k);
} }
out = &mut out[*k]; out = &mut out[*k];
} }
if !out.has_key(objkeyname) { if !out.has_key(objkeyname) {
return Err(format!("invalid subkey '{}' in key '{}'", objkeyname, key)); apibail_parse!(format!("invalid subkey in key '{}'", key), objkeyname);
} }
out[*objkeyname] = newval; out[*objkeyname] = newval;
jvc.to_string() jvc.to_string()
@ -473,11 +473,11 @@ impl VeilidConfig {
Ok(()) Ok(())
} }
fn validate(&self) -> Result<(), String> { fn validate(&self) -> Result<(), VeilidAPIError> {
let inner = self.inner.read(); let inner = self.inner.read();
if inner.program_name.is_empty() { if inner.program_name.is_empty() {
return Err("Program name must not be empty in 'program_name'".to_owned()); apibail_generic!("Program name must not be empty in 'program_name'");
} }
// if inner.network.protocol.udp.enabled { // if inner.network.protocol.udp.enabled {
@ -486,29 +486,29 @@ impl VeilidConfig {
if inner.network.protocol.tcp.listen { if inner.network.protocol.tcp.listen {
// Validate TCP settings // Validate TCP settings
if inner.network.protocol.tcp.max_connections == 0 { if inner.network.protocol.tcp.max_connections == 0 {
return Err("TCP max connections must be > 0 in config key 'network.protocol.tcp.max_connections'".to_owned()); apibail_generic!("TCP max connections must be > 0 in config key 'network.protocol.tcp.max_connections'");
} }
} }
if inner.network.protocol.ws.listen { if inner.network.protocol.ws.listen {
// Validate WS settings // Validate WS settings
if inner.network.protocol.ws.max_connections == 0 { if inner.network.protocol.ws.max_connections == 0 {
return Err("WS max connections must be > 0 in config key 'network.protocol.ws.max_connections'".to_owned()); apibail_generic!("WS max connections must be > 0 in config key 'network.protocol.ws.max_connections'");
} }
if inner.network.application.https.enabled if inner.network.application.https.enabled
&& inner.network.application.https.path == inner.network.protocol.ws.path && inner.network.application.https.path == inner.network.protocol.ws.path
{ {
return Err("WS path conflicts with HTTPS application path in config key 'network.protocol.ws.path'".to_owned()); apibail_generic!("WS path conflicts with HTTPS application path in config key 'network.protocol.ws.path'");
} }
if inner.network.application.http.enabled if inner.network.application.http.enabled
&& inner.network.application.http.path == inner.network.protocol.ws.path && inner.network.application.http.path == inner.network.protocol.ws.path
{ {
return Err("WS path conflicts with HTTP application path in config key 'network.protocol.ws.path'".to_owned()); apibail_generic!("WS path conflicts with HTTP application path in config key 'network.protocol.ws.path'");
} }
} }
if inner.network.protocol.wss.listen { if inner.network.protocol.wss.listen {
// Validate WSS settings // Validate WSS settings
if inner.network.protocol.wss.max_connections == 0 { if inner.network.protocol.wss.max_connections == 0 {
return Err("WSS max connections must be > 0 in config key 'network.protocol.wss.max_connections'".to_owned()); apibail_generic!("WSS max connections must be > 0 in config key 'network.protocol.wss.max_connections'");
} }
if inner if inner
.network .network
@ -519,19 +519,19 @@ impl VeilidConfig {
.map(|u| u.is_empty()) .map(|u| u.is_empty())
.unwrap_or_default() .unwrap_or_default()
{ {
return Err( apibail_generic!(
"WSS URL must be specified in config key 'network.protocol.wss.url'".to_owned(), "WSS URL must be specified in config key 'network.protocol.wss.url'"
); );
} }
if inner.network.application.https.enabled if inner.network.application.https.enabled
&& inner.network.application.https.path == inner.network.protocol.wss.path && inner.network.application.https.path == inner.network.protocol.wss.path
{ {
return Err("WSS path conflicts with HTTPS application path in config key 'network.protocol.ws.path'".to_owned()); apibail_generic!("WSS path conflicts with HTTPS application path in config key 'network.protocol.ws.path'");
} }
if inner.network.application.http.enabled if inner.network.application.http.enabled
&& inner.network.application.http.path == inner.network.protocol.wss.path && inner.network.application.http.path == inner.network.protocol.wss.path
{ {
return Err("WSS path conflicts with HTTP application path in config key 'network.protocol.ws.path'".to_owned()); apibail_generic!("WSS path conflicts with HTTP application path in config key 'network.protocol.ws.path'");
} }
} }
if inner.network.application.https.enabled { if inner.network.application.https.enabled {
@ -545,9 +545,8 @@ impl VeilidConfig {
.map(|u| u.is_empty()) .map(|u| u.is_empty())
.unwrap_or_default() .unwrap_or_default()
{ {
return Err( apibail_generic!(
"HTTPS URL must be specified in config key 'network.application.https.url'" "HTTPS URL must be specified in config key 'network.application.https.url'"
.to_owned(),
); );
} }
} }
@ -556,15 +555,22 @@ impl VeilidConfig {
// Get the node id from config if one is specified // Get the node id from config if one is specified
// Must be done -after- protected store startup // Must be done -after- protected store startup
pub async fn init_node_id(&self, protected_store: intf::ProtectedStore) -> Result<(), String> { pub async fn init_node_id(
&self,
protected_store: intf::ProtectedStore,
) -> Result<(), VeilidAPIError> {
let mut node_id = self.inner.read().network.node_id; let mut node_id = self.inner.read().network.node_id;
let mut node_id_secret = self.inner.read().network.node_id_secret; let mut node_id_secret = self.inner.read().network.node_id_secret;
// See if node id was previously stored in the protected store // See if node id was previously stored in the protected store
if !node_id.valid { if !node_id.valid {
debug!("pulling node id from storage"); debug!("pulling node id from storage");
if let Some(s) = protected_store.load_user_secret_string("node_id").await? { if let Some(s) = protected_store
.load_user_secret_string("node_id")
.await
.map_err(VeilidAPIError::internal)?
{
debug!("node id found in storage"); debug!("node id found in storage");
node_id = DHTKey::try_decode(s.as_str())? node_id = DHTKey::try_decode(s.as_str()).map_err(VeilidAPIError::internal)?
} else { } else {
debug!("node id not found in storage"); debug!("node id not found in storage");
} }
@ -575,10 +581,12 @@ impl VeilidConfig {
debug!("pulling node id secret from storage"); debug!("pulling node id secret from storage");
if let Some(s) = protected_store if let Some(s) = protected_store
.load_user_secret_string("node_id_secret") .load_user_secret_string("node_id_secret")
.await? .await
.map_err(VeilidAPIError::internal)?
{ {
debug!("node id secret found in storage"); debug!("node id secret found in storage");
node_id_secret = DHTKeySecret::try_decode(s.as_str())? node_id_secret =
DHTKeySecret::try_decode(s.as_str()).map_err(VeilidAPIError::internal)?
} else { } else {
debug!("node id secret not found in storage"); debug!("node id secret not found in storage");
} }
@ -588,7 +596,7 @@ impl VeilidConfig {
if node_id.valid && node_id_secret.valid { if node_id.valid && node_id_secret.valid {
// Validate node id // Validate node id
if !dht::validate_key(&node_id, &node_id_secret) { if !dht::validate_key(&node_id, &node_id_secret) {
return Err("node id secret and node id key don't match".to_owned()); apibail_generic!("node id secret and node id key don't match");
} }
} }
@ -605,10 +613,12 @@ impl VeilidConfig {
// Save the node id / secret in storage // Save the node id / secret in storage
protected_store protected_store
.save_user_secret_string("node_id", node_id.encode().as_str()) .save_user_secret_string("node_id", node_id.encode().as_str())
.await?; .await
.map_err(VeilidAPIError::internal)?;
protected_store protected_store
.save_user_secret_string("node_id_secret", node_id_secret.encode().as_str()) .save_user_secret_string("node_id_secret", node_id_secret.encode().as_str())
.await?; .await
.map_err(VeilidAPIError::internal)?;
self.inner.write().network.node_id = node_id; self.inner.write().network.node_id = node_id;
self.inner.write().network.node_id_secret = node_id_secret; self.inner.write().network.node_id_secret = node_id_secret;

View File

@ -30,6 +30,7 @@ tokio = { version = "^1", features = ["full"], optional = true }
tokio-stream = { version = "^0", features = ["net"], optional = true } tokio-stream = { version = "^0", features = ["net"], optional = true }
tokio-util = { version = "^0", features = ["compat"], optional = true} tokio-util = { version = "^0", features = ["compat"], optional = true}
async-tungstenite = { version = "^0", features = ["async-tls"] } async-tungstenite = { version = "^0", features = ["async-tls"] }
color-eyre = "^0.6"
clap = "^3" clap = "^3"
directories = "^4" directories = "^4"
capnp = "^0" capnp = "^0"

View File

@ -1,4 +1,5 @@
use crate::settings::*; use crate::settings::*;
use crate::*;
use clap::{Arg, ArgMatches, Command}; use clap::{Arg, ArgMatches, Command};
use std::ffi::OsStr; use std::ffi::OsStr;
use std::path::Path; use std::path::Path;
@ -145,11 +146,11 @@ fn do_clap_matches(default_config_path: &OsStr) -> Result<clap::ArgMatches, clap
Ok(matches.get_matches()) Ok(matches.get_matches())
} }
pub fn process_command_line() -> Result<(Settings, ArgMatches), String> { pub fn process_command_line() -> EyreResult<(Settings, ArgMatches)> {
// Get command line options // Get command line options
let default_config_path = Settings::get_default_config_path(); let default_config_path = Settings::get_default_config_path();
let matches = do_clap_matches(default_config_path.as_os_str()) let matches = do_clap_matches(default_config_path.as_os_str())
.map_err(|e| format!("failed to parse command line: {}", e))?; .wrap_err("failed to parse command line: {}")?;
// Check for one-off commands // Check for one-off commands
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
@ -169,8 +170,7 @@ pub fn process_command_line() -> Result<(Settings, ArgMatches), String> {
None None
}; };
let settings = let settings = Settings::new(settings_path).wrap_err("configuration is invalid")?;
Settings::new(settings_path).map_err(|e| format!("configuration is invalid: {}", e))?;
// write lock the settings // write lock the settings
let mut settingsrw = settings.write(); let mut settingsrw = settings.write();
@ -185,15 +185,13 @@ pub fn process_command_line() -> Result<(Settings, ArgMatches), String> {
} }
if matches.occurrences_of("subnode-index") != 0 { if matches.occurrences_of("subnode-index") != 0 {
let subnode_index = match matches.value_of("subnode-index") { let subnode_index = match matches.value_of("subnode-index") {
Some(x) => x Some(x) => x.parse().wrap_err("couldn't parse subnode index")?,
.parse()
.map_err(|e| format!("couldn't parse subnode index: {}", e))?,
None => { None => {
return Err("value not specified for subnode-index".to_owned()); bail!("value not specified for subnode-index");
} }
}; };
if subnode_index == 0 { if subnode_index == 0 {
return Err("value of subnode_index should be between 1 and 65535".to_owned()); bail!("value of subnode_index should be between 1 and 65535");
} }
settingsrw.testing.subnode_index = subnode_index; settingsrw.testing.subnode_index = subnode_index;
} }
@ -214,7 +212,7 @@ pub fn process_command_line() -> Result<(Settings, ArgMatches), String> {
.expect("should not be null because of default missing value") .expect("should not be null because of default missing value")
.to_string(), .to_string(),
) )
.map_err(|e| format!("failed to parse OTLP address: {}", e))?; .wrap_err("failed to parse OTLP address")?;
settingsrw.logging.otlp.level = LogLevel::Trace; settingsrw.logging.otlp.level = LogLevel::Trace;
} }
if matches.is_present("attach") { if matches.is_present("attach") {
@ -242,13 +240,13 @@ pub fn process_command_line() -> Result<(Settings, ArgMatches), String> {
// Split or get secret // Split or get secret
let (k, s) = if let Some((k, s)) = v.split_once(':') { let (k, s) = if let Some((k, s)) = v.split_once(':') {
let k = DHTKey::try_decode(k)?; let k = DHTKey::try_decode(k).wrap_err("failed to decode node id from command line")?;
let s = DHTKeySecret::try_decode(s)?; let s = DHTKeySecret::try_decode(s)?;
(k, s) (k, s)
} else { } else {
let k = DHTKey::try_decode(v)?; let k = DHTKey::try_decode(v)?;
let buffer = rpassword::prompt_password("Enter secret key (will not echo): ") let buffer = rpassword::prompt_password("Enter secret key (will not echo): ")
.map_err(|e| e.to_string())?; .wrap_err("invalid secret key")?;
let buffer = buffer.trim().to_string(); let buffer = buffer.trim().to_string();
let s = DHTKeySecret::try_decode(&buffer)?; let s = DHTKeySecret::try_decode(&buffer)?;
(k, s) (k, s)
@ -270,7 +268,7 @@ pub fn process_command_line() -> Result<(Settings, ArgMatches), String> {
out out
} }
None => { None => {
return Err("value not specified for bootstrap".to_owned()); bail!("value not specified for bootstrap");
} }
}; };
settingsrw.core.network.bootstrap = bootstrap_list; settingsrw.core.network.bootstrap = bootstrap_list;
@ -284,17 +282,15 @@ pub fn process_command_line() -> Result<(Settings, ArgMatches), String> {
for x in x.split(',') { for x in x.split(',') {
let x = x.trim(); let x = x.trim();
println!(" {}", x); println!(" {}", x);
out.push(ParsedNodeDialInfo::from_str(x).map_err(|e| { out.push(
format!( ParsedNodeDialInfo::from_str(x)
"unable to parse dial info in bootstrap node list: {} for {}", .wrap_err("unable to parse dial info in bootstrap node list")?,
e, x );
)
})?);
} }
out out
} }
None => { None => {
return Err("value not specified for bootstrap node list".to_owned()); bail!("value not specified for bootstrap node list");
} }
}; };
settingsrw.core.network.bootstrap_nodes = bootstrap_list; settingsrw.core.network.bootstrap_nodes = bootstrap_list;
@ -315,7 +311,7 @@ pub fn process_command_line() -> Result<(Settings, ArgMatches), String> {
// Apply subnode index if we're testing // Apply subnode index if we're testing
settings settings
.apply_subnode_index() .apply_subnode_index()
.map_err(|_| "failed to apply subnode index".to_owned())?; .wrap_err("failed to apply subnode index")?;
Ok((settings, matches)) Ok((settings, matches))
} }

View File

@ -14,6 +14,8 @@ mod veilid_logs;
mod windows; mod windows;
use cfg_if::*; use cfg_if::*;
#[allow(unused_imports)]
use color_eyre::eyre::{bail, ensure, eyre, Result as EyreResult, WrapErr};
use server::*; use server::*;
use tools::*; use tools::*;
use tracing::*; use tracing::*;
@ -25,16 +27,17 @@ pub mod veilid_client_capnp {
} }
#[instrument(err)] #[instrument(err)]
fn main() -> Result<(), String> { fn main() -> EyreResult<()> {
#[cfg(windows)] #[cfg(windows)]
let _ = ansi_term::enable_ansi_support(); let _ = ansi_term::enable_ansi_support();
color_eyre::install()?;
let (settings, matches) = cmdline::process_command_line()?; let (settings, matches) = cmdline::process_command_line()?;
// --- Dump Config --- // --- Dump Config ---
if matches.occurrences_of("dump-config") != 0 { if matches.occurrences_of("dump-config") != 0 {
return serde_yaml::to_writer(std::io::stdout(), &*settings.read()) return serde_yaml::to_writer(std::io::stdout(), &*settings.read())
.map_err(|e| e.to_string()); .wrap_err("failed to write yaml");
} }
// --- Generate DHT Key --- // --- Generate DHT Key ---

View File

@ -2,6 +2,7 @@ use crate::client_api;
use crate::settings::*; use crate::settings::*;
use crate::tools::*; use crate::tools::*;
use crate::veilid_logs::*; use crate::veilid_logs::*;
use crate::*;
use flume::{unbounded, Receiver, Sender}; use flume::{unbounded, Receiver, Sender};
use lazy_static::*; use lazy_static::*;
use parking_lot::Mutex; use parking_lot::Mutex;
@ -34,16 +35,16 @@ pub async fn run_veilid_server(
settings: Settings, settings: Settings,
server_mode: ServerMode, server_mode: ServerMode,
veilid_logs: VeilidLogs, veilid_logs: VeilidLogs,
) -> Result<(), String> { ) -> EyreResult<()> {
run_veilid_server_internal(settings, server_mode, veilid_logs).await run_veilid_server_internal(settings, server_mode, veilid_logs).await
} }
#[instrument(err, skip_all)] //#[instrument(err, skip_all)]
pub async fn run_veilid_server_internal( pub async fn run_veilid_server_internal(
settings: Settings, settings: Settings,
server_mode: ServerMode, server_mode: ServerMode,
veilid_logs: VeilidLogs, veilid_logs: VeilidLogs,
) -> Result<(), String> { ) -> EyreResult<()> {
trace!(?settings, ?server_mode); trace!(?settings, ?server_mode);
let settingsr = settings.read(); let settingsr = settings.read();
@ -65,7 +66,7 @@ pub async fn run_veilid_server_internal(
// Start Veilid Core and get API // Start Veilid Core and get API
let veilid_api = veilid_core::api_startup(update_callback, config_callback) let veilid_api = veilid_core::api_startup(update_callback, config_callback)
.await .await
.map_err(|e| format!("VeilidCore startup failed: {}", e))?; .wrap_err("VeilidCore startup failed")?;
// Start client api if one is requested // Start client api if one is requested
let mut capi = if settingsr.client_api.enabled && matches!(server_mode, ServerMode::Normal) { let mut capi = if settingsr.client_api.enabled && matches!(server_mode, ServerMode::Normal) {
@ -98,9 +99,10 @@ pub async fn run_veilid_server_internal(
if auto_attach { if auto_attach {
info!("Auto-attach to the Veilid network"); info!("Auto-attach to the Veilid network");
if let Err(e) = veilid_api.attach().await { if let Err(e) = veilid_api.attach().await {
let outerr = format!("Auto-attaching to the Veilid network failed: {:?}", e); out = Err(eyre!(
error!("{}", outerr); "Auto-attaching to the Veilid network failed: {:?}",
out = Err(outerr); e
));
shutdown(); shutdown();
} }
} }
@ -116,9 +118,7 @@ pub async fn run_veilid_server_internal(
} }
} }
Err(e) => { Err(e) => {
let outerr = format!("Getting state failed: {:?}", e); out = Err(eyre!("Getting state failed: {:?}", e));
error!("{}", outerr);
out = Err(outerr);
break; break;
} }
} }
@ -129,9 +129,7 @@ pub async fn run_veilid_server_internal(
print!("{}", v); print!("{}", v);
} }
Err(e) => { Err(e) => {
let outerr = format!("Getting TXT record failed: {:?}", e); out = Err(eyre!("Getting TXT record failed: {:?}", e));
error!("{}", outerr);
out = Err(outerr);
} }
}; };
shutdown(); shutdown();

View File

@ -1,5 +1,7 @@
#![allow(clippy::bool_assert_comparison)] #![allow(clippy::bool_assert_comparison)]
use crate::*;
use directories::*; use directories::*;
use parking_lot::*; use parking_lot::*;
@ -11,8 +13,9 @@ use std::str::FromStr;
use std::sync::Arc; use std::sync::Arc;
use url::Url; use url::Url;
use veilid_core::xx::*; use veilid_core::xx::*;
use veilid_core::*;
pub fn load_default_config() -> Result<config::Config, config::ConfigError> { pub fn load_default_config() -> EyreResult<config::Config> {
let default_config = String::from( let default_config = String::from(
r#"--- r#"---
daemon: daemon:
@ -172,21 +175,18 @@ core:
config::FileFormat::Yaml, config::FileFormat::Yaml,
)) ))
.build() .build()
.wrap_err("failed to parse default config")
} }
pub fn load_config( pub fn load_config(cfg: config::Config, config_file: &Path) -> EyreResult<config::Config> {
cfg: config::Config,
config_file: &Path,
) -> Result<config::Config, config::ConfigError> {
if let Some(config_file_str) = config_file.to_str() { if let Some(config_file_str) = config_file.to_str() {
config::Config::builder() config::Config::builder()
.add_source(cfg) .add_source(cfg)
.add_source(config::File::new(config_file_str, config::FileFormat::Yaml)) .add_source(config::File::new(config_file_str, config::FileFormat::Yaml))
.build() .build()
.wrap_err("failed to load config")
} else { } else {
Err(config::ConfigError::Message( bail!("config file path is not valid UTF-8")
"config file path is not valid UTF-8".to_owned(),
))
} }
} }
@ -254,9 +254,11 @@ pub struct ParsedUrl {
} }
impl ParsedUrl { impl ParsedUrl {
pub fn offset_port(&mut self, offset: u16) -> Result<(), ()> { pub fn offset_port(&mut self, offset: u16) -> EyreResult<()> {
// Bump port on url // Bump port on url
self.url.set_port(Some(self.url.port().unwrap() + offset))?; self.url
.set_port(Some(self.url.port().unwrap() + offset))
.map_err(|_| eyre!("failed to set port on url"))?;
self.urlstring = self.url.to_string(); self.urlstring = self.url.to_string();
Ok(()) Ok(())
} }
@ -388,16 +390,16 @@ impl serde::Serialize for NamedSocketAddrs {
} }
impl NamedSocketAddrs { impl NamedSocketAddrs {
pub fn offset_port(&mut self, offset: u16) -> Result<(), ()> { pub fn offset_port(&mut self, offset: u16) -> EyreResult<()> {
// Bump port on name // Bump port on name
if let Some(split) = self.name.rfind(':') { if let Some(split) = self.name.rfind(':') {
let hoststr = &self.name[0..split]; let hoststr = &self.name[0..split];
let portstr = &self.name[split + 1..]; let portstr = &self.name[split + 1..];
let port: u16 = portstr.parse::<u16>().map_err(drop)? + offset; let port: u16 = portstr.parse::<u16>().wrap_err("failed to parse port")? + offset;
self.name = format!("{}:{}", hoststr, port); self.name = format!("{}:{}", hoststr, port);
} else { } else {
return Err(()); bail!("no port specified to offset");
} }
// Bump port on addresses // Bump port on addresses
@ -655,7 +657,7 @@ pub struct Settings {
} }
impl Settings { impl Settings {
pub fn new(config_file: Option<&OsStr>) -> Result<Self, config::ConfigError> { pub fn new(config_file: Option<&OsStr>) -> EyreResult<Self> {
// Load the default config // Load the default config
let mut cfg = load_default_config()?; let mut cfg = load_default_config()?;
@ -681,7 +683,7 @@ impl Settings {
self.inner.write() self.inner.write()
} }
pub fn apply_subnode_index(&self) -> Result<(), ()> { pub fn apply_subnode_index(&self) -> EyreResult<()> {
let mut settingsrw = self.write(); let mut settingsrw = self.write();
let idx = settingsrw.testing.subnode_index; let idx = settingsrw.testing.subnode_index;
if idx == 0 { if idx == 0 {
@ -869,7 +871,7 @@ impl Settings {
pk_path pk_path
} }
pub fn set(&self, key: &str, value: &str) -> Result<(), String> { pub fn set(&self, key: &str, value: &str) -> EyreResult<()> {
let mut inner = self.inner.write(); let mut inner = self.inner.write();
macro_rules! set_config_value { macro_rules! set_config_value {
@ -882,9 +884,11 @@ impl Settings {
return Ok(()); return Ok(());
} }
Err(e) => { Err(e) => {
return Err(format!( return Err(eyre!(
"invalid type for key {}, value: {}: {}", "invalid type for key {}, value: {}: {}",
key, value, e key,
value,
e
)) ))
} }
} }
@ -1005,7 +1009,7 @@ impl Settings {
set_config_value!(inner.core.network.protocol.wss.listen_address, value); set_config_value!(inner.core.network.protocol.wss.listen_address, value);
set_config_value!(inner.core.network.protocol.wss.path, value); set_config_value!(inner.core.network.protocol.wss.path, value);
set_config_value!(inner.core.network.protocol.wss.url, value); set_config_value!(inner.core.network.protocol.wss.url, value);
Err("settings key not found".to_owned()) Err(eyre!("settings key not found"))
} }
pub fn get_core_config_callback(&self) -> veilid_core::ConfigCallback { pub fn get_core_config_callback(&self) -> veilid_core::ConfigCallback {
@ -1013,7 +1017,7 @@ impl Settings {
Arc::new(move |key: String| { Arc::new(move |key: String| {
let inner = inner.read(); let inner = inner.read();
let out: Result<Box<dyn core::any::Any + Send>, String> = match key.as_str() { let out: ConfigCallbackReturn = match key.as_str() {
"program_name" => Ok(Box::new("veilid-server".to_owned())), "program_name" => Ok(Box::new("veilid-server".to_owned())),
"namespace" => Ok(Box::new(if inner.testing.subnode_index == 0 { "namespace" => Ok(Box::new(if inner.testing.subnode_index == 0 {
"".to_owned() "".to_owned()
@ -1365,7 +1369,10 @@ impl Settings {
.as_ref() .as_ref()
.map(|a| a.urlstring.clone()), .map(|a| a.urlstring.clone()),
)), )),
_ => Err(format!("config key '{}' doesn't exist", key)), _ => Err(VeilidAPIError::generic(format!(
"config key '{}' doesn't exist",
key
))),
}; };
out out
}) })

View File

@ -2,6 +2,7 @@ use crate::server::*;
use crate::settings::Settings; use crate::settings::Settings;
use crate::tools::*; use crate::tools::*;
use crate::veilid_logs::*; use crate::veilid_logs::*;
use crate::*;
use clap::ArgMatches; use clap::ArgMatches;
use futures_util::StreamExt; use futures_util::StreamExt;
use signal_hook::consts::signal::*; use signal_hook::consts::signal::*;
@ -26,7 +27,7 @@ async fn handle_signals(mut signals: Signals) {
} }
#[instrument(err)] #[instrument(err)]
pub fn run_daemon(settings: Settings, _matches: ArgMatches) -> Result<(), String> { pub fn run_daemon(settings: Settings, _matches: ArgMatches) -> EyreResult<()> {
let daemon = { let daemon = {
let mut daemon = daemonize::Daemonize::new(); let mut daemon = daemonize::Daemonize::new();
let s = settings.read(); let s = settings.read();
@ -64,10 +65,7 @@ pub fn run_daemon(settings: Settings, _matches: ArgMatches) -> Result<(), String
} }
let stdout_file = if let Some(stdout_file) = &s.daemon.stdout_file { let stdout_file = if let Some(stdout_file) = &s.daemon.stdout_file {
Some( Some(std::fs::File::create(stdout_file).wrap_err("Failed to create stdio file")?)
std::fs::File::create(stdout_file)
.map_err(|e| format!("Failed to create stdio file: {}", e))?,
)
} else { } else {
None None
}; };
@ -79,12 +77,11 @@ pub fn run_daemon(settings: Settings, _matches: ArgMatches) -> Result<(), String
.as_ref() .as_ref()
.unwrap() .unwrap()
.try_clone() .try_clone()
.map_err(|e| format!("Failed to clone stdout file: {}", e))?, .wrap_err("Failed to clone stdout file")?,
); );
} else { } else {
daemon = daemon.stderr( daemon = daemon.stderr(
std::fs::File::create(stderr_file) std::fs::File::create(stderr_file).wrap_err("Failed to create stderr file")?,
.map_err(|e| format!("Failed to create stderr file: {}", e))?,
); );
} }
} }
@ -101,13 +98,11 @@ pub fn run_daemon(settings: Settings, _matches: ArgMatches) -> Result<(), String
let veilid_logs = VeilidLogs::setup(settings.clone())?; let veilid_logs = VeilidLogs::setup(settings.clone())?;
// Daemonize // Daemonize
daemon daemon.start().wrap_err("Failed to daemonize")?;
.start()
.map_err(|e| format!("Failed to daemonize: {}", e))?;
// Catch signals // Catch signals
let signals = Signals::new(&[SIGHUP, SIGTERM, SIGINT, SIGQUIT]) let signals =
.map_err(|e| format!("failed to init signals: {}", e))?; Signals::new(&[SIGHUP, SIGTERM, SIGINT, SIGQUIT]).wrap_err("failed to init signals")?;
let handle = signals.handle(); let handle = signals.handle();
let signals_task = spawn(handle_signals(signals)); let signals_task = spawn(handle_signals(signals));

View File

@ -1,4 +1,5 @@
use crate::settings::*; use crate::settings::*;
use crate::*;
use cfg_if::*; use cfg_if::*;
use opentelemetry::sdk::*; use opentelemetry::sdk::*;
use opentelemetry::*; use opentelemetry::*;
@ -22,7 +23,7 @@ pub struct VeilidLogs {
} }
impl VeilidLogs { impl VeilidLogs {
pub fn setup(settings: Settings) -> Result<VeilidLogs, String> { pub fn setup(settings: Settings) -> EyreResult<VeilidLogs> {
let settingsr = settings.read(); let settingsr = settings.read();
// Set up subscriber and layers // Set up subscriber and layers
@ -77,7 +78,7 @@ impl VeilidLogs {
)]), )]),
)) ))
.install_batch(batch) .install_batch(batch)
.map_err(|e| format!("failed to install OpenTelemetry tracer: {}", e))?; .wrap_err("failed to install OpenTelemetry tracer")?;
let filter = veilid_core::VeilidLayerFilter::new( let filter = veilid_core::VeilidLayerFilter::new(
convert_loglevel(settingsr.logging.otlp.level), convert_loglevel(settingsr.logging.otlp.level),
@ -101,13 +102,11 @@ impl VeilidLogs {
.parent() .parent()
.unwrap_or(Path::new(&MAIN_SEPARATOR.to_string())) .unwrap_or(Path::new(&MAIN_SEPARATOR.to_string()))
.canonicalize() .canonicalize()
.map_err(|e| { .wrap_err(format!(
format!( "File log path parent does not exist: {}",
"File log path parent does not exist: {} ({})", settingsr.logging.file.path
settingsr.logging.file.path, e ))?;
) let log_filename = full_path.file_name().ok_or(eyre!(
})?;
let log_filename = full_path.file_name().ok_or(format!(
"File log filename not specified in path: {}", "File log filename not specified in path: {}",
settingsr.logging.file.path settingsr.logging.file.path
))?; ))?;
@ -149,7 +148,7 @@ impl VeilidLogs {
convert_loglevel(settingsr.logging.system.level), convert_loglevel(settingsr.logging.system.level),
None, None,
); );
let layer =tracing_journald::layer().map_err(|e| format!("failed to set up journald logging: {}", e))? let layer =tracing_journald::layer().wrap_err("failed to set up journald logging")?
.with_filter(filter.clone()); .with_filter(filter.clone());
filters.insert("system", filter); filters.insert("system", filter);
layers.push(layer.boxed()); layers.push(layer.boxed());
@ -160,7 +159,7 @@ impl VeilidLogs {
let subscriber = subscriber.with(layers); let subscriber = subscriber.with(layers);
subscriber subscriber
.try_init() .try_init()
.map_err(|e| format!("failed to initialize logging: {}", e))?; .wrap_err("failed to initialize logging")?;
Ok(VeilidLogs { Ok(VeilidLogs {
inner: Arc::new(Mutex::new(VeilidLogsInner { inner: Arc::new(Mutex::new(VeilidLogsInner {