checkpoint

This commit is contained in:
John Smith
2022-12-16 20:07:28 -05:00
parent 10a0e3b629
commit 221c09b555
40 changed files with 428 additions and 298 deletions

View File

@@ -120,7 +120,7 @@ impl Bucket {
.iter()
.map(|(k, v)| (k.clone(), v.clone()))
.collect();
let cur_ts = get_timestamp();
let cur_ts = get_aligned_timestamp();
sorted_entries.sort_by(|a, b| -> core::cmp::Ordering {
if a.0 == b.0 {
return core::cmp::Ordering::Equal;

View File

@@ -51,7 +51,7 @@ pub struct BucketEntryPublicInternet {
/// The PublicInternet node info
signed_node_info: Option<Box<SignedNodeInfo>>,
/// The last node info timestamp of ours that this entry has seen
last_seen_our_node_info_ts: u64,
last_seen_our_node_info_ts: Timestamp,
/// Last known node status
node_status: Option<PublicInternetNodeStatus>,
}
@@ -63,7 +63,7 @@ pub struct BucketEntryLocalNetwork {
/// The LocalNetwork node info
signed_node_info: Option<Box<SignedNodeInfo>>,
/// The last node info timestamp of ours that this entry has seen
last_seen_our_node_info_ts: u64,
last_seen_our_node_info_ts: Timestamp,
/// Last known node status
node_status: Option<LocalNetworkNodeStatus>,
}
@@ -93,7 +93,7 @@ pub struct BucketEntryInner {
updated_since_last_network_change: bool,
/// The last connection descriptors used to contact this node, per protocol type
#[with(Skip)]
last_connections: BTreeMap<LastConnectionKey, (ConnectionDescriptor, u64)>,
last_connections: BTreeMap<LastConnectionKey, (ConnectionDescriptor, Timestamp)>,
/// The node info for this entry on the publicinternet routing domain
public_internet: BucketEntryPublicInternet,
/// The node info for this entry on the localnetwork routing domain
@@ -148,7 +148,7 @@ impl BucketEntryInner {
}
// Less is more reliable then faster
pub fn cmp_fastest_reliable(cur_ts: u64, e1: &Self, e2: &Self) -> std::cmp::Ordering {
pub fn cmp_fastest_reliable(cur_ts: Timestamp, e1: &Self, e2: &Self) -> std::cmp::Ordering {
// Reverse compare so most reliable is at front
let ret = e2.state(cur_ts).cmp(&e1.state(cur_ts));
if ret != std::cmp::Ordering::Equal {
@@ -170,7 +170,7 @@ impl BucketEntryInner {
}
// Less is more reliable then older
pub fn cmp_oldest_reliable(cur_ts: u64, e1: &Self, e2: &Self) -> std::cmp::Ordering {
pub fn cmp_oldest_reliable(cur_ts: Timestamp, e1: &Self, e2: &Self) -> std::cmp::Ordering {
// Reverse compare so most reliable is at front
let ret = e2.state(cur_ts).cmp(&e1.state(cur_ts));
if ret != std::cmp::Ordering::Equal {
@@ -191,7 +191,7 @@ impl BucketEntryInner {
}
}
pub fn sort_fastest_reliable_fn(cur_ts: u64) -> impl FnMut(&Self, &Self) -> std::cmp::Ordering {
pub fn sort_fastest_reliable_fn(cur_ts: Timestamp) -> impl FnMut(&Self, &Self) -> std::cmp::Ordering {
move |e1, e2| Self::cmp_fastest_reliable(cur_ts, e1, e2)
}
@@ -231,7 +231,7 @@ impl BucketEntryInner {
// No need to update the signednodeinfo though since the timestamp is the same
// Touch the node and let it try to live again
self.updated_since_last_network_change = true;
self.touch_last_seen(get_timestamp());
self.touch_last_seen(get_aligned_timestamp());
}
return;
}
@@ -258,7 +258,7 @@ impl BucketEntryInner {
// Update the signed node info
*opt_current_sni = Some(Box::new(signed_node_info));
self.updated_since_last_network_change = true;
self.touch_last_seen(get_timestamp());
self.touch_last_seen(get_aligned_timestamp());
}
pub fn has_node_info(&self, routing_domain_set: RoutingDomainSet) -> bool {
@@ -367,7 +367,7 @@ impl BucketEntryInner {
}
// Stores a connection descriptor in this entry's table of last connections
pub fn set_last_connection(&mut self, last_connection: ConnectionDescriptor, timestamp: u64) {
pub fn set_last_connection(&mut self, last_connection: ConnectionDescriptor, timestamp: Timestamp) {
let key = self.descriptor_to_key(last_connection);
self.last_connections
.insert(key, (last_connection, timestamp));
@@ -431,7 +431,7 @@ impl BucketEntryInner {
} else {
// If this is not connection oriented, then we check our last seen time
// to see if this mapping has expired (beyond our timeout)
let cur_ts = get_timestamp();
let cur_ts = get_aligned_timestamp();
(v.1 + (CONNECTIONLESS_TIMEOUT_SECS as u64 * 1_000_000u64)) >= cur_ts
};
@@ -455,7 +455,7 @@ impl BucketEntryInner {
self.min_max_version
}
pub fn state(&self, cur_ts: u64) -> BucketEntryState {
pub fn state(&self, cur_ts: Timestamp) -> BucketEntryState {
if self.check_reliable(cur_ts) {
BucketEntryState::Reliable
} else if self.check_dead(cur_ts) {
@@ -494,7 +494,7 @@ impl BucketEntryInner {
}
}
pub fn set_our_node_info_ts(&mut self, routing_domain: RoutingDomain, seen_ts: u64) {
pub fn set_our_node_info_ts(&mut self, routing_domain: RoutingDomain, seen_ts: Timestamp) {
match routing_domain {
RoutingDomain::LocalNetwork => {
self.local_network.last_seen_our_node_info_ts = seen_ts;
@@ -508,7 +508,7 @@ impl BucketEntryInner {
pub fn has_seen_our_node_info_ts(
&self,
routing_domain: RoutingDomain,
our_node_info_ts: u64,
our_node_info_ts: Timestamp,
) -> bool {
match routing_domain {
RoutingDomain::LocalNetwork => {
@@ -530,7 +530,7 @@ impl BucketEntryInner {
///// stats methods
// called every ROLLING_TRANSFERS_INTERVAL_SECS seconds
pub(super) fn roll_transfers(&mut self, last_ts: u64, cur_ts: u64) {
pub(super) fn roll_transfers(&mut self, last_ts: Timestamp, cur_ts: Timestamp) {
self.transfer_stats_accounting.roll_transfers(
last_ts,
cur_ts,
@@ -539,12 +539,12 @@ impl BucketEntryInner {
}
// Called for every round trip packet we receive
fn record_latency(&mut self, latency: u64) {
fn record_latency(&mut self, latency: TimestampDuration) {
self.peer_stats.latency = Some(self.latency_stats_accounting.record_latency(latency));
}
///// state machine handling
pub(super) fn check_reliable(&self, cur_ts: u64) -> bool {
pub(super) fn check_reliable(&self, cur_ts: Timestamp) -> bool {
// If we have had any failures to send, this is not reliable
if self.peer_stats.rpc_stats.failed_to_send > 0 {
return false;
@@ -558,7 +558,7 @@ impl BucketEntryInner {
}
}
}
pub(super) fn check_dead(&self, cur_ts: u64) -> bool {
pub(super) fn check_dead(&self, cur_ts: Timestamp) -> bool {
// If we have failured to send NEVER_REACHED_PING_COUNT times in a row, the node is dead
if self.peer_stats.rpc_stats.failed_to_send >= NEVER_REACHED_PING_COUNT {
return true;
@@ -575,14 +575,14 @@ impl BucketEntryInner {
}
/// Return the last time we either saw a node, or asked it a question
fn latest_contact_time(&self) -> Option<u64> {
fn latest_contact_time(&self) -> Option<Timestamp> {
self.peer_stats
.rpc_stats
.last_seen_ts
.max(self.peer_stats.rpc_stats.last_question)
.max(self.peer_stats.rpc_stats.last_question_ts)
}
fn needs_constant_ping(&self, cur_ts: u64, interval: u64) -> bool {
fn needs_constant_ping(&self, cur_ts: Timestamp, interval: Timestamp) -> bool {
// If we have not either seen the node in the last 'interval' then we should ping it
let latest_contact_time = self.latest_contact_time();
@@ -596,7 +596,7 @@ impl BucketEntryInner {
}
// Check if this node needs a ping right now to validate it is still reachable
pub(super) fn needs_ping(&self, cur_ts: u64, needs_keepalive: bool) -> bool {
pub(super) fn needs_ping(&self, cur_ts: Timestamp, needs_keepalive: bool) -> bool {
// See which ping pattern we are to use
let state = self.state(cur_ts);
@@ -653,7 +653,7 @@ impl BucketEntryInner {
}
}
pub(super) fn touch_last_seen(&mut self, ts: u64) {
pub(super) fn touch_last_seen(&mut self, ts: Timestamp) {
// Mark the node as seen
if self
.peer_stats
@@ -667,7 +667,7 @@ impl BucketEntryInner {
self.peer_stats.rpc_stats.last_seen_ts = Some(ts);
}
pub(super) fn _state_debug_info(&self, cur_ts: u64) -> String {
pub(super) fn _state_debug_info(&self, cur_ts: Timestamp) -> String {
let first_consecutive_seen_ts = if let Some(first_consecutive_seen_ts) =
self.peer_stats.rpc_stats.first_consecutive_seen_ts
{
@@ -698,26 +698,26 @@ impl BucketEntryInner {
////////////////////////////////////////////////////////////////
/// Called when rpc processor things happen
pub(super) fn question_sent(&mut self, ts: u64, bytes: u64, expects_answer: bool) {
pub(super) fn question_sent(&mut self, ts: Timestamp, bytes: u64, expects_answer: bool) {
self.transfer_stats_accounting.add_up(bytes);
self.peer_stats.rpc_stats.messages_sent += 1;
self.peer_stats.rpc_stats.failed_to_send = 0;
if expects_answer {
self.peer_stats.rpc_stats.questions_in_flight += 1;
self.peer_stats.rpc_stats.last_question = Some(ts);
self.peer_stats.rpc_stats.last_question_ts = Some(ts);
}
}
pub(super) fn question_rcvd(&mut self, ts: u64, bytes: u64) {
pub(super) fn question_rcvd(&mut self, ts: Timestamp, bytes: u64) {
self.transfer_stats_accounting.add_down(bytes);
self.peer_stats.rpc_stats.messages_rcvd += 1;
self.touch_last_seen(ts);
}
pub(super) fn answer_sent(&mut self, bytes: u64) {
pub(super) fn answer_sent(&mut self, bytes: ByteCount) {
self.transfer_stats_accounting.add_up(bytes);
self.peer_stats.rpc_stats.messages_sent += 1;
self.peer_stats.rpc_stats.failed_to_send = 0;
}
pub(super) fn answer_rcvd(&mut self, send_ts: u64, recv_ts: u64, bytes: u64) {
pub(super) fn answer_rcvd(&mut self, send_ts: Timestamp, recv_ts: Timestamp, bytes: ByteCount) {
self.transfer_stats_accounting.add_down(bytes);
self.peer_stats.rpc_stats.messages_rcvd += 1;
self.peer_stats.rpc_stats.questions_in_flight -= 1;
@@ -730,9 +730,9 @@ impl BucketEntryInner {
self.peer_stats.rpc_stats.questions_in_flight -= 1;
self.peer_stats.rpc_stats.recent_lost_answers += 1;
}
pub(super) fn failed_to_send(&mut self, ts: u64, expects_answer: bool) {
pub(super) fn failed_to_send(&mut self, ts: Timestamp, expects_answer: bool) {
if expects_answer {
self.peer_stats.rpc_stats.last_question = Some(ts);
self.peer_stats.rpc_stats.last_question_ts = Some(ts);
}
self.peer_stats.rpc_stats.failed_to_send += 1;
self.peer_stats.rpc_stats.first_consecutive_seen_ts = None;
@@ -747,7 +747,7 @@ pub struct BucketEntry {
impl BucketEntry {
pub(super) fn new() -> Self {
let now = get_timestamp();
let now = get_aligned_timestamp();
Self {
ref_count: AtomicU32::new(0),
inner: RwLock::new(BucketEntryInner {

View File

@@ -104,7 +104,7 @@ impl RoutingTable {
pub(crate) fn debug_info_entries(&self, limit: usize, min_state: BucketEntryState) -> String {
let inner = self.inner.read();
let inner = &*inner;
let cur_ts = get_timestamp();
let cur_ts = get_aligned_timestamp();
let mut out = String::new();
@@ -164,7 +164,7 @@ impl RoutingTable {
pub(crate) fn debug_info_buckets(&self, min_state: BucketEntryState) -> String {
let inner = self.inner.read();
let inner = &*inner;
let cur_ts = get_timestamp();
let cur_ts = get_aligned_timestamp();
let mut out = String::new();
const COLS: usize = 16;

View File

@@ -468,14 +468,14 @@ impl RoutingTable {
pub fn get_nodes_needing_ping(
&self,
routing_domain: RoutingDomain,
cur_ts: u64,
cur_ts: Timestamp,
) -> Vec<NodeRef> {
self.inner
.read()
.get_nodes_needing_ping(self.clone(), routing_domain, cur_ts)
}
pub fn get_all_nodes(&self, cur_ts: u64) -> Vec<NodeRef> {
pub fn get_all_nodes(&self, cur_ts: Timestamp) -> Vec<NodeRef> {
let inner = self.inner.read();
inner.get_all_nodes(self.clone(), cur_ts)
}
@@ -542,7 +542,7 @@ impl RoutingTable {
&self,
node_id: DHTKey,
descriptor: ConnectionDescriptor,
timestamp: u64,
timestamp: Timestamp,
) -> Option<NodeRef> {
self.inner.write().register_node_with_existing_connection(
self.clone(),
@@ -774,7 +774,7 @@ impl RoutingTable {
pub fn find_peers_with_sort_and_filter<C, T, O>(
&self,
node_count: usize,
cur_ts: u64,
cur_ts: Timestamp,
filters: VecDeque<RoutingTableEntryFilter>,
compare: C,
transform: T,
@@ -969,7 +969,7 @@ impl RoutingTable {
pub fn find_inbound_relay(
&self,
routing_domain: RoutingDomain,
cur_ts: u64,
cur_ts: Timestamp,
) -> Option<NodeRef> {
// Get relay filter function
let relay_node_filter = match routing_domain {

View File

@@ -119,7 +119,7 @@ pub trait NodeRefBase: Sized {
fn set_min_max_version(&self, min_max_version: VersionRange) {
self.operate_mut(|_rti, e| e.set_min_max_version(min_max_version))
}
fn state(&self, cur_ts: u64) -> BucketEntryState {
fn state(&self, cur_ts: Timestamp) -> BucketEntryState {
self.operate(|_rti, e| e.state(cur_ts))
}
fn peer_stats(&self) -> PeerStats {
@@ -140,21 +140,21 @@ pub trait NodeRefBase: Sized {
.unwrap_or(false)
})
}
fn node_info_ts(&self, routing_domain: RoutingDomain) -> u64 {
fn node_info_ts(&self, routing_domain: RoutingDomain) -> Timestamp {
self.operate(|_rti, e| {
e.signed_node_info(routing_domain)
.map(|sni| sni.timestamp())
.unwrap_or(0u64)
.unwrap_or(0u64.into())
})
}
fn has_seen_our_node_info_ts(
&self,
routing_domain: RoutingDomain,
our_node_info_ts: u64,
our_node_info_ts: Timestamp,
) -> bool {
self.operate(|_rti, e| e.has_seen_our_node_info_ts(routing_domain, our_node_info_ts))
}
fn set_our_node_info_ts(&self, routing_domain: RoutingDomain, seen_ts: u64) {
fn set_our_node_info_ts(&self, routing_domain: RoutingDomain, seen_ts: Timestamp) {
self.operate_mut(|_rti, e| e.set_our_node_info_ts(routing_domain, seen_ts));
}
fn network_class(&self, routing_domain: RoutingDomain) -> Option<NetworkClass> {
@@ -277,7 +277,7 @@ pub trait NodeRefBase: Sized {
self.operate_mut(|_rti, e| e.clear_last_connections())
}
fn set_last_connection(&self, connection_descriptor: ConnectionDescriptor, ts: u64) {
fn set_last_connection(&self, connection_descriptor: ConnectionDescriptor, ts: Timestamp) {
self.operate_mut(|rti, e| {
e.set_last_connection(connection_descriptor, ts);
rti.touch_recent_peer(self.common().node_id, connection_descriptor);
@@ -297,25 +297,25 @@ pub trait NodeRefBase: Sized {
})
}
fn stats_question_sent(&self, ts: u64, bytes: u64, expects_answer: bool) {
fn stats_question_sent(&self, ts: Timestamp, bytes: Timestamp, expects_answer: bool) {
self.operate_mut(|rti, e| {
rti.transfer_stats_accounting().add_up(bytes);
e.question_sent(ts, bytes, expects_answer);
})
}
fn stats_question_rcvd(&self, ts: u64, bytes: u64) {
fn stats_question_rcvd(&self, ts: Timestamp, bytes: ByteCount) {
self.operate_mut(|rti, e| {
rti.transfer_stats_accounting().add_down(bytes);
e.question_rcvd(ts, bytes);
})
}
fn stats_answer_sent(&self, bytes: u64) {
fn stats_answer_sent(&self, bytes: ByteCount) {
self.operate_mut(|rti, e| {
rti.transfer_stats_accounting().add_up(bytes);
e.answer_sent(bytes);
})
}
fn stats_answer_rcvd(&self, send_ts: u64, recv_ts: u64, bytes: u64) {
fn stats_answer_rcvd(&self, send_ts: Timestamp, recv_ts: Timestamp, bytes: ByteCount) {
self.operate_mut(|rti, e| {
rti.transfer_stats_accounting().add_down(bytes);
rti.latency_stats_accounting()
@@ -328,7 +328,7 @@ pub trait NodeRefBase: Sized {
e.question_lost();
})
}
fn stats_failed_to_send(&self, ts: u64, expects_answer: bool) {
fn stats_failed_to_send(&self, ts: Timestamp, expects_answer: bool) {
self.operate_mut(|_rti, e| {
e.failed_to_send(ts, expects_answer);
})

View File

@@ -7,7 +7,7 @@ use rkyv::{
/// The size of the remote private route cache
const REMOTE_PRIVATE_ROUTE_CACHE_SIZE: usize = 1024;
/// Remote private route cache entries expire in 5 minutes if they haven't been used
const REMOTE_PRIVATE_ROUTE_CACHE_EXPIRY: u64 = 300_000_000u64;
const REMOTE_PRIVATE_ROUTE_CACHE_EXPIRY: TimestampDuration = 300_000_000u64.into();
/// Amount of time a route can remain idle before it gets tested
const ROUTE_MIN_IDLE_TIME_MS: u32 = 30_000;
@@ -80,25 +80,25 @@ impl RouteStats {
}
/// Mark a route as having received something
pub fn record_received(&mut self, cur_ts: u64, bytes: u64) {
pub fn record_received(&mut self, cur_ts: Timestamp, bytes: ByteCount) {
self.last_received_ts = Some(cur_ts);
self.last_tested_ts = Some(cur_ts);
self.transfer_stats_accounting.add_down(bytes);
}
/// Mark a route as having been sent to
pub fn record_sent(&mut self, cur_ts: u64, bytes: u64) {
pub fn record_sent(&mut self, cur_ts: Timestamp, bytes: ByteCount) {
self.last_sent_ts = Some(cur_ts);
self.transfer_stats_accounting.add_up(bytes);
}
/// Mark a route as having been sent to
pub fn record_latency(&mut self, latency: u64) {
pub fn record_latency(&mut self, latency: TimestampDuration) {
self.latency_stats = self.latency_stats_accounting.record_latency(latency);
}
/// Mark a route as having been tested
pub fn record_tested(&mut self, cur_ts: u64) {
pub fn record_tested(&mut self, cur_ts: Timestamp) {
self.last_tested_ts = Some(cur_ts);
// Reset question_lost and failed_to_send if we test clean
@@ -107,7 +107,7 @@ impl RouteStats {
}
/// Roll transfers for these route stats
pub fn roll_transfers(&mut self, last_ts: u64, cur_ts: u64) {
pub fn roll_transfers(&mut self, last_ts: Timestamp, cur_ts: Timestamp) {
self.transfer_stats_accounting.roll_transfers(
last_ts,
cur_ts,
@@ -133,7 +133,7 @@ impl RouteStats {
}
/// Check if a route needs testing
pub fn needs_testing(&self, cur_ts: u64) -> bool {
pub fn needs_testing(&self, cur_ts: Timestamp) -> bool {
// Has the route had any failures lately?
if self.questions_lost > 0 || self.failed_to_send > 0 {
// If so, always test
@@ -634,7 +634,7 @@ impl RouteSpecStore {
.map(|nr| nr.node_id());
// Get list of all nodes, and sort them for selection
let cur_ts = get_timestamp();
let cur_ts = get_aligned_timestamp();
let filter = Box::new(
move |rti: &RoutingTableInner, k: DHTKey, v: Option<Arc<BucketEntry>>| -> bool {
// Exclude our own node from routes
@@ -872,22 +872,25 @@ impl RouteSpecStore {
Ok(Some(public_key))
}
#[instrument(level = "trace", skip(self, data), ret, err)]
#[instrument(level = "trace", skip(self, data), ret)]
pub fn validate_signatures(
&self,
public_key: &DHTKey,
signatures: &[DHTSignature],
data: &[u8],
last_hop_id: DHTKey,
) -> EyreResult<Option<(DHTKeySecret, SafetySpec)>> {
) -> Option<(DHTKeySecret, SafetySpec)> {
let inner = &*self.inner.lock();
let rsd = Self::detail(inner, &public_key).ok_or_else(|| eyre!("route does not exist"))?;
let Some(rsd) = Self::detail(inner, &public_key) else {
log_rpc!(debug "route does not exist: {:?}", public_key);
return None;
};
// Ensure we have the right number of signatures
if signatures.len() != rsd.hops.len() - 1 {
// Wrong number of signatures
log_rpc!(debug "wrong number of signatures ({} should be {}) for routed operation on private route {}", signatures.len(), rsd.hops.len() - 1, public_key);
return Ok(None);
return None;
}
// Validate signatures to ensure the route was handled by the nodes and not messed with
// This is in private route (reverse) order as we are receiving over the route
@@ -897,18 +900,18 @@ impl RouteSpecStore {
// Verify the node we received the routed operation from is the last hop in our route
if *hop_public_key != last_hop_id {
log_rpc!(debug "received routed operation from the wrong hop ({} should be {}) on private route {}", hop_public_key.encode(), last_hop_id.encode(), public_key);
return Ok(None);
return None;
}
} else {
// Verify a signature for a hop node along the route
if let Err(e) = verify(hop_public_key, data, &signatures[hop_n]) {
log_rpc!(debug "failed to verify signature for hop {} at {} on private route {}: {}", hop_n, hop_public_key, public_key, e);
return Ok(None);
return None;
}
}
}
// We got the correct signatures, return a key and response safety spec
Ok(Some((
Some((
rsd.secret_key,
SafetySpec {
preferred_route: Some(*public_key),
@@ -916,7 +919,7 @@ impl RouteSpecStore {
stability: rsd.stability,
sequencing: rsd.sequencing,
},
)))
))
}
#[instrument(level = "trace", skip(self), ret, err)]
@@ -1002,7 +1005,7 @@ impl RouteSpecStore {
pub async fn test_route(&self, key: &DHTKey) -> EyreResult<bool> {
let is_remote = {
let inner = &mut *self.inner.lock();
let cur_ts = get_timestamp();
let cur_ts = get_aligned_timestamp();
Self::with_peek_remote_private_route(inner, cur_ts, key, |_| {}).is_some()
};
if is_remote {
@@ -1066,7 +1069,7 @@ impl RouteSpecStore {
pub fn release_route(&self, key: &DHTKey) -> bool {
let is_remote = {
let inner = &mut *self.inner.lock();
let cur_ts = get_timestamp();
let cur_ts = get_aligned_timestamp();
Self::with_peek_remote_private_route(inner, cur_ts, key, |_| {}).is_some()
};
if is_remote {
@@ -1087,7 +1090,7 @@ impl RouteSpecStore {
directions: DirectionSet,
avoid_node_ids: &[DHTKey],
) -> Option<DHTKey> {
let cur_ts = get_timestamp();
let cur_ts = get_aligned_timestamp();
let mut routes = Vec::new();
@@ -1167,7 +1170,7 @@ impl RouteSpecStore {
/// Get the debug description of a route
pub fn debug_route(&self, key: &DHTKey) -> Option<String> {
let inner = &mut *self.inner.lock();
let cur_ts = get_timestamp();
let cur_ts = get_aligned_timestamp();
// If this is a remote route, print it
if let Some(s) =
Self::with_peek_remote_private_route(inner, cur_ts, key, |rpi| format!("{:#?}", rpi))
@@ -1570,7 +1573,7 @@ impl RouteSpecStore {
}
// store the private route in our cache
let cur_ts = get_timestamp();
let cur_ts = get_aligned_timestamp();
let key = Self::with_create_remote_private_route(inner, cur_ts, private_route, |r| {
r.private_route.as_ref().unwrap().public_key.clone()
});
@@ -1593,7 +1596,7 @@ impl RouteSpecStore {
/// Retrieve an imported remote private route by its public key
pub fn get_remote_private_route(&self, key: &DHTKey) -> Option<PrivateRoute> {
let inner = &mut *self.inner.lock();
let cur_ts = get_timestamp();
let cur_ts = get_aligned_timestamp();
Self::with_get_remote_private_route(inner, cur_ts, key, |r| {
r.private_route.as_ref().unwrap().clone()
})
@@ -1602,7 +1605,7 @@ impl RouteSpecStore {
/// Retrieve an imported remote private route by its public key but don't 'touch' it
pub fn peek_remote_private_route(&self, key: &DHTKey) -> Option<PrivateRoute> {
let inner = &mut *self.inner.lock();
let cur_ts = get_timestamp();
let cur_ts = get_aligned_timestamp();
Self::with_peek_remote_private_route(inner, cur_ts, key, |r| {
r.private_route.as_ref().unwrap().clone()
})
@@ -1611,7 +1614,7 @@ impl RouteSpecStore {
// get or create a remote private route cache entry
fn with_create_remote_private_route<F, R>(
inner: &mut RouteSpecStoreInner,
cur_ts: u64,
cur_ts: Timestamp,
private_route: PrivateRoute,
f: F,
) -> R
@@ -1660,7 +1663,7 @@ impl RouteSpecStore {
// get a remote private route cache entry
fn with_get_remote_private_route<F, R>(
inner: &mut RouteSpecStoreInner,
cur_ts: u64,
cur_ts: Timestamp,
key: &DHTKey,
f: F,
) -> Option<R>
@@ -1680,7 +1683,7 @@ impl RouteSpecStore {
// peek a remote private route cache entry
fn with_peek_remote_private_route<F, R>(
inner: &mut RouteSpecStoreInner,
cur_ts: u64,
cur_ts: Timestamp,
key: &DHTKey,
f: F,
) -> Option<R>
@@ -1714,7 +1717,7 @@ impl RouteSpecStore {
let opt_rpr_node_info_ts = {
let inner = &mut *self.inner.lock();
let cur_ts = get_timestamp();
let cur_ts = get_aligned_timestamp();
Self::with_peek_remote_private_route(inner, cur_ts, key, |rpr| {
rpr.last_seen_our_node_info_ts
})
@@ -1736,7 +1739,7 @@ impl RouteSpecStore {
pub fn mark_remote_private_route_seen_our_node_info(
&self,
key: &DHTKey,
cur_ts: u64,
cur_ts: Timestamp,
) -> EyreResult<()> {
let our_node_info_ts = {
let rti = &*self.unlocked_inner.routing_table.inner.read();
@@ -1765,7 +1768,7 @@ impl RouteSpecStore {
}
/// Get the route statistics for any route we know about, local or remote
pub fn with_route_stats<F, R>(&self, cur_ts: u64, key: &DHTKey, f: F) -> Option<R>
pub fn with_route_stats<F, R>(&self, cur_ts: Timestamp, key: &DHTKey, f: F) -> Option<R>
where
F: FnOnce(&mut RouteStats) -> R,
{
@@ -1822,7 +1825,7 @@ impl RouteSpecStore {
}
/// Process transfer statistics to get averages
pub fn roll_transfers(&self, last_ts: u64, cur_ts: u64) {
pub fn roll_transfers(&self, last_ts: Timestamp, cur_ts: Timestamp) {
let inner = &mut *self.inner.lock();
// Roll transfers for locally allocated routes

View File

@@ -227,7 +227,7 @@ impl RoutingTableInner {
}
pub fn reset_all_updated_since_last_network_change(&mut self) {
let cur_ts = get_timestamp();
let cur_ts = get_aligned_timestamp();
self.with_entries_mut(cur_ts, BucketEntryState::Dead, |rti, _, v| {
v.with_mut(rti, |_rti, e| {
e.set_updated_since_last_network_change(false)
@@ -347,7 +347,7 @@ impl RoutingTableInner {
// If the local network topology has changed, nuke the existing local node info and let new local discovery happen
if changed {
let cur_ts = get_timestamp();
let cur_ts = get_aligned_timestamp();
self.with_entries_mut(cur_ts, BucketEntryState::Dead, |rti, _, e| {
e.with_mut(rti, |_rti, e| {
e.clear_signed_node_info(RoutingDomain::LocalNetwork);
@@ -426,7 +426,7 @@ impl RoutingTableInner {
min_state: BucketEntryState,
) -> usize {
let mut count = 0usize;
let cur_ts = get_timestamp();
let cur_ts = get_aligned_timestamp();
self.with_entries(cur_ts, min_state, |rti, _, e| {
if e.with(rti, |rti, e| e.best_routing_domain(rti, routing_domain_set))
.is_some()
@@ -466,7 +466,7 @@ impl RoutingTableInner {
F: FnMut(&mut RoutingTableInner, DHTKey, Arc<BucketEntry>) -> Option<T>,
>(
&mut self,
cur_ts: u64,
cur_ts: Timestamp,
min_state: BucketEntryState,
mut f: F,
) -> Option<T> {
@@ -491,7 +491,7 @@ impl RoutingTableInner {
&self,
outer_self: RoutingTable,
routing_domain: RoutingDomain,
cur_ts: u64,
cur_ts: Timestamp,
) -> Vec<NodeRef> {
// Collect relay nodes
let opt_relay_id = self.with_routing_domain(routing_domain, |rd| {
@@ -531,7 +531,7 @@ impl RoutingTableInner {
node_refs
}
pub fn get_all_nodes(&self, outer_self: RoutingTable, cur_ts: u64) -> Vec<NodeRef> {
pub fn get_all_nodes(&self, outer_self: RoutingTable, cur_ts: Timestamp) -> Vec<NodeRef> {
let mut node_refs = Vec::<NodeRef>::with_capacity(self.bucket_entry_count);
self.with_entries(cur_ts, BucketEntryState::Unreliable, |_rti, k, v| {
node_refs.push(NodeRef::new(outer_self.clone(), k, v, None));
@@ -700,7 +700,7 @@ impl RoutingTableInner {
outer_self: RoutingTable,
node_id: DHTKey,
descriptor: ConnectionDescriptor,
timestamp: u64,
timestamp: Timestamp,
) -> Option<NodeRef> {
let out = self.create_node_ref(outer_self, node_id, |_rti, e| {
// this node is live because it literally just connected to us
@@ -719,7 +719,7 @@ impl RoutingTableInner {
pub fn get_routing_table_health(&self) -> RoutingTableHealth {
let mut health = RoutingTableHealth::default();
let cur_ts = get_timestamp();
let cur_ts = get_aligned_timestamp();
for bucket in &self.buckets {
for (_, v) in bucket.entries() {
match v.with(self, |_rti, e| e.state(cur_ts)) {
@@ -876,7 +876,7 @@ impl RoutingTableInner {
where
T: for<'r> FnMut(&'r RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> O,
{
let cur_ts = get_timestamp();
let cur_ts = get_aligned_timestamp();
// Add filter to remove dead nodes always
let filter_dead = Box::new(
@@ -961,7 +961,7 @@ impl RoutingTableInner {
where
T: for<'r> FnMut(&'r RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> O,
{
let cur_ts = get_timestamp();
let cur_ts = get_aligned_timestamp();
let node_count = {
let config = self.config();
let c = config.get();

View File

@@ -13,8 +13,8 @@ pub const ROLLING_TRANSFERS_INTERVAL_SECS: u32 = 1;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub struct TransferCount {
down: u64,
up: u64,
down: ByteCount,
up: ByteCount,
}
#[derive(Debug, Clone, Default)]
@@ -31,18 +31,18 @@ impl TransferStatsAccounting {
}
}
pub fn add_down(&mut self, bytes: u64) {
pub fn add_down(&mut self, bytes: ByteCount) {
self.current_transfer.down += bytes;
}
pub fn add_up(&mut self, bytes: u64) {
pub fn add_up(&mut self, bytes: ByteCount) {
self.current_transfer.up += bytes;
}
pub fn roll_transfers(
&mut self,
last_ts: u64,
cur_ts: u64,
last_ts: Timestamp,
cur_ts: Timestamp,
transfer_stats: &mut TransferStatsDownUp,
) {
let dur_ms = cur_ts.saturating_sub(last_ts) / 1000u64;
@@ -80,7 +80,7 @@ impl TransferStatsAccounting {
#[derive(Debug, Clone, Default)]
pub struct LatencyStatsAccounting {
rolling_latencies: VecDeque<u64>,
rolling_latencies: VecDeque<TimestampDuration>,
}
impl LatencyStatsAccounting {
@@ -90,7 +90,7 @@ impl LatencyStatsAccounting {
}
}
pub fn record_latency(&mut self, latency: u64) -> veilid_api::LatencyStats {
pub fn record_latency(&mut self, latency: TimestampDuration) -> veilid_api::LatencyStats {
while self.rolling_latencies.len() >= ROLLING_LATENCIES_SIZE {
self.rolling_latencies.pop_front();
}

View File

@@ -7,8 +7,8 @@ impl RoutingTable {
pub(crate) async fn kick_buckets_task_routine(
self,
_stop_token: StopToken,
_last_ts: u64,
cur_ts: u64,
_last_ts: Timestamp,
cur_ts: Timestamp,
) -> EyreResult<()> {
let kick_queue: Vec<usize> = core::mem::take(&mut *self.unlocked_inner.kick_queue.lock())
.into_iter()

View File

@@ -10,7 +10,7 @@ impl RoutingTable {
#[instrument(level = "trace", skip(self), err)]
fn ping_validator_public_internet(
&self,
cur_ts: u64,
cur_ts: Timestamp,
unord: &mut FuturesUnordered<
SendPinBoxFuture<Result<NetworkResult<Answer<Option<SenderInfo>>>, RPCError>>,
>,

View File

@@ -72,8 +72,8 @@ impl RoutingTable {
pub(crate) async fn private_route_management_task_routine(
self,
stop_token: StopToken,
_last_ts: u64,
cur_ts: u64,
_last_ts: Timestamp,
cur_ts: Timestamp,
) -> EyreResult<()> {
// Get our node's current node info and network class and do the right thing
let network_class = self

View File

@@ -6,8 +6,8 @@ impl RoutingTable {
pub(crate) async fn relay_management_task_routine(
self,
_stop_token: StopToken,
_last_ts: u64,
cur_ts: u64,
_last_ts: Timestamp,
cur_ts: Timestamp,
) -> EyreResult<()> {
// Get our node's current node info and network class and do the right thing
let Some(own_peer_info) = self.get_own_peer_info(RoutingDomain::PublicInternet) else {

View File

@@ -6,8 +6,8 @@ impl RoutingTable {
pub(crate) async fn rolling_transfers_task_routine(
self,
_stop_token: StopToken,
last_ts: u64,
cur_ts: u64,
last_ts: Timestamp,
cur_ts: Timestamp,
) -> EyreResult<()> {
// log_rtab!("--- rolling_transfers task");
{