fix tokio
This commit is contained in:
@@ -68,7 +68,7 @@ pub async fn sleep(millis: u32) {
|
||||
}
|
||||
} else {
|
||||
cfg_if! {
|
||||
if #[cfg(feature="rt-async-std")] {
|
||||
if #[cfg(feature="rt-async-std")] {
|
||||
async_std::task::sleep(Duration::from_millis(u64::from(millis))).await;
|
||||
} else if #[cfg(feature="rt-tokio")] {
|
||||
tokio::time::sleep(Duration::from_millis(u64::from(millis))).await;
|
||||
|
||||
@@ -61,8 +61,14 @@ pub fn veilid_version() -> (u32, u32, u32) {
|
||||
#[cfg(target_os = "android")]
|
||||
pub use intf::utils::android::{veilid_core_setup_android, veilid_core_setup_android_no_log};
|
||||
|
||||
pub static DEFAULT_LOG_IGNORE_LIST: [&str; 12] = [
|
||||
pub static DEFAULT_LOG_IGNORE_LIST: [&str; 18] = [
|
||||
"mio",
|
||||
"h2",
|
||||
"hyper",
|
||||
"tower",
|
||||
"tonic",
|
||||
"tokio_util",
|
||||
"want",
|
||||
"serial_test",
|
||||
"async_std",
|
||||
"async_io",
|
||||
|
||||
@@ -46,6 +46,7 @@ impl ConnectionTable {
|
||||
let mut unord = FuturesUnordered::new();
|
||||
for table in &mut self.conn_by_descriptor {
|
||||
for (_, v) in table.drain() {
|
||||
trace!("connection table join: {:?}", v);
|
||||
unord.push(v);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -319,9 +319,9 @@ impl NetworkManager {
|
||||
let components = self.inner.lock().components.clone();
|
||||
if let Some(components) = components {
|
||||
components.net.shutdown().await;
|
||||
components.connection_manager.shutdown().await;
|
||||
components.rpc_processor.shutdown().await;
|
||||
components.receipt_manager.shutdown().await;
|
||||
components.connection_manager.shutdown().await;
|
||||
}
|
||||
|
||||
// reset the state
|
||||
|
||||
@@ -561,7 +561,7 @@ impl Network {
|
||||
// Drop the stop
|
||||
drop(inner.stop_source.take());
|
||||
}
|
||||
debug!("stopping {} low level network tasks", unord.len(),);
|
||||
debug!("stopping {} low level network tasks", unord.len());
|
||||
// Wait for everything to stop
|
||||
while unord.next().await.is_some() {}
|
||||
|
||||
|
||||
@@ -208,6 +208,7 @@ impl Network {
|
||||
if #[cfg(feature="rt-async-std")] {
|
||||
let listener = TcpListener::from(std_listener);
|
||||
} else if #[cfg(feature="rt-tokio")] {
|
||||
std_listener.set_nonblocking(true).expect("failed to set nonblocking");
|
||||
let listener = TcpListener::from_std(std_listener).map_err(map_to_string)?;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ impl Network {
|
||||
// Run thread task to process stream of messages
|
||||
let this = self.clone();
|
||||
|
||||
let jh = spawn_with_local_set(async move {
|
||||
let jh = spawn(async move {
|
||||
trace!("UDP listener task spawned");
|
||||
|
||||
// Collect all our protocol handlers into a vector
|
||||
@@ -49,7 +49,7 @@ impl Network {
|
||||
for ph in protocol_handlers {
|
||||
let network_manager = network_manager.clone();
|
||||
let stop_token = stop_token.clone();
|
||||
let jh = intf::spawn_local(async move {
|
||||
let ph_future = async move {
|
||||
let mut data = vec![0u8; 65536];
|
||||
|
||||
loop {
|
||||
@@ -84,26 +84,18 @@ impl Network {
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
protocol_handlers_unordered.push(jh);
|
||||
protocol_handlers_unordered.push(ph_future);
|
||||
}
|
||||
// Now we wait for join handles to exit,
|
||||
// if any error out it indicates an error needing
|
||||
// us to completely restart the network
|
||||
loop {
|
||||
match protocol_handlers_unordered.next().await {
|
||||
Some(v) => {
|
||||
// true = stopped, false = errored
|
||||
if !v {
|
||||
// If any protocol handler fails, our socket died and we need to restart the network
|
||||
this.inner.lock().network_needs_restart = true;
|
||||
}
|
||||
}
|
||||
None => {
|
||||
// All protocol handlers exited
|
||||
break;
|
||||
}
|
||||
while let Some(v) = protocol_handlers_unordered.next().await {
|
||||
// true = stopped, false = errored
|
||||
if !v {
|
||||
// If any protocol handler fails, our socket died and we need to restart the network
|
||||
this.inner.lock().network_needs_restart = true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -138,6 +130,7 @@ impl Network {
|
||||
if #[cfg(feature="rt-async-std")] {
|
||||
let udp_socket = UdpSocket::from(std_udp_socket);
|
||||
} else if #[cfg(feature="rt-tokio")] {
|
||||
std_udp_socket.set_nonblocking(true).expect("failed to set nonblocking");
|
||||
let udp_socket = UdpSocket::from_std(std_udp_socket).map_err(map_to_string)?;
|
||||
}
|
||||
}
|
||||
@@ -158,6 +151,7 @@ impl Network {
|
||||
if #[cfg(feature="rt-async-std")] {
|
||||
let udp_socket = UdpSocket::from(std_udp_socket);
|
||||
} else if #[cfg(feature="rt-tokio")] {
|
||||
std_udp_socket.set_nonblocking(true).expect("failed to set nonblocking");
|
||||
let udp_socket = UdpSocket::from_std(std_udp_socket).map_err(map_to_string)?;
|
||||
}
|
||||
}
|
||||
@@ -184,6 +178,7 @@ impl Network {
|
||||
if #[cfg(feature="rt-async-std")] {
|
||||
let udp_socket = UdpSocket::from(std_udp_socket);
|
||||
} else if #[cfg(feature="rt-tokio")] {
|
||||
std_udp_socket.set_nonblocking(true).expect("failed to set nonblocking");
|
||||
let udp_socket = UdpSocket::from_std(std_udp_socket).map_err(map_to_string)?;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -137,7 +137,7 @@ impl NetworkConnection {
|
||||
let local_stop_token = stop_source.token();
|
||||
|
||||
// Spawn connection processor and pass in protocol connection
|
||||
let processor = intf::spawn_local(Self::process_connection(
|
||||
let processor = intf::spawn(Self::process_connection(
|
||||
connection_manager,
|
||||
local_stop_token,
|
||||
manager_stop_token,
|
||||
@@ -355,8 +355,20 @@ impl Future for NetworkConnection {
|
||||
type Output = ();
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> task::Poll<Self::Output> {
|
||||
let mut pending = 0usize;
|
||||
|
||||
// Process all sub-futures, nulling them out when they return ready
|
||||
if let Some(mut processor) = self.processor.as_mut() {
|
||||
Pin::new(&mut processor).poll(cx)
|
||||
if Pin::new(&mut processor).poll(cx).is_ready() {
|
||||
self.processor = None;
|
||||
} else {
|
||||
pending += 1
|
||||
}
|
||||
}
|
||||
|
||||
// Any sub-futures pending?
|
||||
if pending > 0 {
|
||||
task::Poll::Pending
|
||||
} else {
|
||||
task::Poll::Ready(())
|
||||
}
|
||||
|
||||
@@ -369,7 +369,7 @@ impl RoutingTable {
|
||||
Self::with_entries(&*inner, cur_ts, BucketEntryState::Unreliable, |k, v| {
|
||||
if v.with(|e| e.needs_ping(&k, cur_ts, relay_node_id)) {
|
||||
let nr = NodeRef::new(self.clone(), k, v, None);
|
||||
unord.push(intf::spawn_local(rpc.clone().rpc_call_status(nr)));
|
||||
unord.push(intf::spawn(rpc.clone().rpc_call_status(nr)));
|
||||
}
|
||||
Option::<()>::None
|
||||
});
|
||||
|
||||
@@ -59,12 +59,25 @@ impl<T: 'static> Future for MustJoinHandle<T> {
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
match Pin::new(self.join_handle.as_mut().unwrap()).poll(cx) {
|
||||
Poll::Ready(t) => {
|
||||
if self.completed {
|
||||
panic!("should not poll completed join handle");
|
||||
}
|
||||
self.completed = true;
|
||||
cfg_if! {
|
||||
if #[cfg(feature="rt-async-std")] {
|
||||
Poll::Ready(t)
|
||||
} else if #[cfg(feature="rt-tokio")] {
|
||||
Poll::Ready(t.unwrap())
|
||||
match t {
|
||||
Ok(t) => Poll::Ready(t),
|
||||
Err(e) => {
|
||||
if e.is_panic() {
|
||||
// Resume the panic on the main task
|
||||
std::panic::resume_unwind(e.into_panic());
|
||||
} else {
|
||||
panic!("join error was not a panic, should not poll after abort");
|
||||
}
|
||||
}
|
||||
}
|
||||
}else if #[cfg(target_arch = "wasm32")] {
|
||||
Poll::Ready(t)
|
||||
} else {
|
||||
|
||||
@@ -202,7 +202,7 @@ cfg_if! {
|
||||
}
|
||||
// Run if we should do that
|
||||
if run {
|
||||
self.unlock(Some(intf::spawn_with_local_set(future)));
|
||||
self.unlock(Some(intf::spawn(future)));
|
||||
}
|
||||
// Return the prior result if we have one
|
||||
Ok((out, run))
|
||||
|
||||
Reference in New Issue
Block a user