Radish alpha
h
rad:z3gqcJUoA1n9HaHKufZs5FCSGazv5
Radicle Heartwood Protocol & Stack
Radicle
Git
radicle-protocol: A Sans I/O Implementation of the Network Protocol
Merged lorenz opened 10 months ago

As of 2025-06 this is just an early draft.

The radicle-protocol was initialized by moving files out of radicle-node, and adjusting uses.

radicle-node and radicle-protocol use radicle::node::Link instead of netservices::LinkDirection, to remove the dependency on netservices for radicle-protocol. To allow a “drop-in”, a few backward compatible extensions were added to radicle::node::Link.

The encoding and decoding layer of radicle-node was refactored to use bytes without std::io::Error instead of byteorder and std::io:Error. The dependency on byteorder was not completely removed from the Cargo workspace, as other crates use it in other ways.

56 files changed +9127 -9020 408d4f27 010d5134
modified Cargo.lock
@@ -350,6 +350,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"

[[package]]
+
name = "bytes"
+
version = "1.10.1"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a"
+

+
[[package]]
name = "bytesize"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -2643,7 +2649,7 @@ dependencies = [
 "amplify",
 "anyhow",
 "bloomy",
-
 "byteorder",
+
 "bytes",
 "chrono",
 "colored",
 "crossbeam-channel",
@@ -2662,6 +2668,7 @@ dependencies = [
 "radicle-crypto",
 "radicle-fetch",
 "radicle-git-ext",
+
 "radicle-protocol",
 "radicle-signals",
 "radicle-systemd",
 "scrypt",
@@ -2675,6 +2682,31 @@ dependencies = [
]

[[package]]
+
name = "radicle-protocol"
+
version = "0.1.0"
+
dependencies = [
+
 "bloomy",
+
 "bytes",
+
 "crossbeam-channel",
+
 "cyphernet",
+
 "fastrand",
+
 "localtime",
+
 "log",
+
 "nonempty 0.9.0",
+
 "qcheck",
+
 "qcheck-macros",
+
 "radicle",
+
 "radicle-crypto",
+
 "radicle-fetch",
+
 "radicle-git-ext",
+
 "scrypt",
+
 "serde",
+
 "serde_json",
+
 "sqlite",
+
 "thiserror 1.0.69",
+
]
+

+
[[package]]
name = "radicle-remote-helper"
version = "0.11.0"
dependencies = [
modified Cargo.toml
@@ -22,7 +22,7 @@ rust-version = "1.81.0"
amplify = { version = "4.0.0", default-features = false }
anyhow = "1"
bstr = "1.3"
-
byteorder = "1.4"
+
bytes = "1"
chrono = { version = "0.4.26", default-features = false }
colored = "2.1.0"
crossbeam-channel = "0.5.6"
@@ -47,6 +47,7 @@ radicle-dag = { version = "0.10", path = "crates/radicle-dag" }
radicle-fetch = { version = "0.12", path = "crates/radicle-fetch" }
radicle-git-ext = { version = "0.8", default-features = false }
radicle-node = { version = "0.12", path = "crates/radicle-node" }
+
radicle-protocol = { version = "0.1", path = "crates/radicle-protocol" }
radicle-signals = { version = "0.11", path = "crates/radicle-signals" }
radicle-ssh = { version = "0.9", path = "crates/radicle-ssh", default-features = false }
radicle-systemd = { version = "0.9", path = "crates/radicle-systemd" }
modified crates/radicle-cli/tests/commands.rs
@@ -7,6 +7,8 @@ use radicle::node;
use radicle::node::address::Store as _;
use radicle::node::config::seeds::RADICLE_NODE_BOOTSTRAP_IRIS;
use radicle::node::config::DefaultSeedingPolicy;
+
use radicle::node::events::Event;
+
use radicle::node::policy::Scope;
use radicle::node::routing::Store as _;
use radicle::node::UserAgent;
use radicle::node::{Address, Alias, Config, Handle as _, DEFAULT_TIMEOUT};
@@ -16,8 +18,6 @@ use radicle::profile::Home;
use radicle::storage::{ReadStorage, RefUpdate, RemoteRepository};
use radicle::test::fixtures;

-
use radicle_node::service::policy::Scope;
-
use radicle_node::service::Event;
#[allow(unused_imports)]
use radicle_node::test::logger;
use radicle_node::test::node::Node;
modified crates/radicle-node/Cargo.toml
@@ -12,13 +12,13 @@ rust-version.workspace = true
[features]
default = ["systemd"]
systemd = ["dep:radicle-systemd"]
-
test = ["radicle/test", "radicle-crypto/test", "radicle-crypto/cyphernet", "qcheck", "snapbox"]
+
test = ["radicle/test", "radicle-crypto/test", "radicle-crypto/cyphernet", "radicle-protocol/test", "qcheck", "snapbox"]

[dependencies]
amplify = { workspace = true }
anyhow = { workspace = true }
bloomy = "1.2"
-
byteorder = { workspace = true }
+
bytes = { workspace = true }
chrono = { workspace = true, features = ["clock"] }
colored = { workspace = true }
crossbeam-channel = { workspace = true }
@@ -37,6 +37,7 @@ radicle-fetch = { workspace = true }
# N.b. this is required to use macros, even though it's re-exported
# through radicle
radicle-git-ext = { workspace = true, features = ["serde"] }
+
radicle-protocol = { workspace = true }
radicle-signals = { workspace = true }
radicle-systemd = { workspace = true, optional = true }
sqlite = { workspace = true, features = ["bundled"] }
@@ -53,4 +54,4 @@ qcheck = { workspace = true }
qcheck-macros = { workspace = true }
radicle = { workspace = true, features = ["test"] }
radicle-crypto = { workspace = true, features = ["test", "cyphernet"] }
-
snapbox = { workspace = true }

\ No newline at end of file
+
snapbox = { workspace = true }
deleted crates/radicle-node/src/bounded.rs
@@ -1,241 +0,0 @@
-
use std::{
-
    collections::BTreeSet,
-
    ops::{self, RangeBounds},
-
};
-

-
#[derive(thiserror::Error, Debug)]
-
pub enum Error {
-
    #[error("invalid size: expected {expected}, got {actual}")]
-
    InvalidSize { expected: usize, actual: usize },
-
}
-

-
/// A vector with an upper limit on its size using type level constants.
-
#[derive(Default, Clone, PartialEq, Eq)]
-
pub struct BoundedVec<T, const N: usize> {
-
    v: Vec<T>,
-
}
-

-
impl<T, const N: usize> BoundedVec<T, N> {
-
    /// Create a new empty `BoundedVec<T,N>`.
-
    pub fn new() -> Self {
-
        BoundedVec {
-
            v: Vec::with_capacity(N),
-
        }
-
    }
-

-
    /// Build a `BoundedVec` by consuming from the given iterator up to its limit.
-
    ///
-
    /// # Examples
-
    ///
-
    /// ```
-
    /// use radicle_node::bounded;
-
    ///
-
    /// let mut iter = (0..4).into_iter();
-
    /// let bounded: bounded::BoundedVec<i32,3> = bounded::BoundedVec::collect_from(&mut iter);
-
    ///
-
    /// assert_eq!(bounded.len(), 3);
-
    /// assert_eq!(iter.count(), 1);
-
    /// ```
-
    pub fn collect_from<I: IntoIterator<Item = T>>(iter: I) -> Self {
-
        BoundedVec {
-
            v: iter.into_iter().take(N).collect(),
-
        }
-
    }
-

-
    /// Create a new `BoundedVec<T,N>` which takes upto the first N values of its argument, taking
-
    /// ownership.
-
    ///
-
    /// # Examples
-
    ///
-
    /// ```
-
    /// use radicle_node::bounded;
-
    ///
-
    /// let mut vec = vec![1, 2, 3];
-
    /// let bounded = bounded::BoundedVec::<_, 2>::truncate(vec);
-
    /// assert_eq!(bounded.len(), 2);
-
    /// ```
-
    pub fn truncate(mut v: Vec<T>) -> Self {
-
        v.truncate(N);
-
        BoundedVec { v }
-
    }
-

-
    /// Like [`Vec::with_capacity`] but returns an error if the allocation size exceeds the limit.
-
    ///
-
    /// # Examples
-
    ///
-
    /// ```
-
    /// use radicle_node::bounded;
-
    ///
-
    /// let vec = bounded::BoundedVec::<i32, 11>::with_capacity(10).unwrap();
-
    ///
-
    /// // The vector contains no items, even though it has capacity for more
-
    /// assert_eq!(vec.len(), 0);
-
    /// assert!(vec.capacity() >= 10);
-
    ///
-
    /// // A vector with a capacity over its limit will result in error.
-
    /// let vec_res = bounded::BoundedVec::<i32, 10>::with_capacity(11);
-
    /// assert!(vec_res.is_err());
-
    /// ```
-
    #[inline]
-
    pub fn with_capacity(capacity: usize) -> Result<Self, Error> {
-
        if capacity > N {
-
            return Err(Error::InvalidSize {
-
                expected: N,
-
                actual: capacity,
-
            });
-
        }
-
        Ok(Self {
-
            v: Vec::with_capacity(capacity),
-
        })
-
    }
-

-
    /// Return the maximum number of elements BoundedVec can contain.
-
    ///
-
    /// # Examples
-
    ///
-
    /// ```
-
    /// use radicle_node::bounded;
-
    ///
-
    /// type Inventory = bounded::BoundedVec<(), 10>;
-
    /// assert_eq!(Inventory::max(), 10);
-
    /// ```
-
    #[inline]
-
    pub fn max() -> usize {
-
        N
-
    }
-

-
    /// Extracts a slice containing the entire bounded vector.
-
    #[inline]
-
    pub fn as_slice(&self) -> &[T] {
-
        self.v.as_slice()
-
    }
-

-
    /// Returns the number of elements the bounded vector can hold without reallocating.
-
    pub fn capacity(&self) -> usize {
-
        self.v.capacity()
-
    }
-

-
    /// Like [`Vec::push`] but returns an error if the limit is exceeded.
-
    ///
-
    /// # Examples
-
    ///
-
    /// ```
-
    /// use radicle_node::bounded;
-
    ///
-
    /// let mut vec: bounded::BoundedVec<_,3> = vec![1, 2].try_into().unwrap();
-
    /// vec.push(3).expect("within limit");
-
    /// assert_eq!(vec, vec![1, 2, 3].try_into().unwrap());
-
    ///
-
    /// // ...but this will exceed its limit, returning an error.
-
    /// vec.push(4).expect_err("limit exceeded");
-
    /// assert_eq!(vec.len(), 3);
-
    /// ```
-
    #[inline]
-
    pub fn push(&mut self, item: T) -> Result<(), Error> {
-
        if self.len() >= N {
-
            return Err(Error::InvalidSize {
-
                expected: N,
-
                actual: N + 1,
-
            });
-
        }
-
        self.v.push(item);
-
        Ok(())
-
    }
-

-
    /// Return the underlying vector without an upper limit.
-
    ///
-
    /// # Examples
-
    ///
-
    /// ```
-
    /// use radicle_node::bounded;
-
    ///
-
    /// let mut bounded: bounded::BoundedVec<_,3> = vec![1, 2, 3].try_into().unwrap();
-
    /// let mut vec = bounded.unbound();
-
    ///
-
    /// vec.push(4);
-
    /// assert_eq!(vec.len(), 4);
-
    /// ```
-
    pub fn unbound(self) -> Vec<T> {
-
        self.v
-
    }
-

-
    /// Calls [`std::vec::Drain`].
-
    pub fn drain<R: RangeBounds<usize>>(&mut self, range: R) -> std::vec::Drain<T> {
-
        self.v.drain(range)
-
    }
-
}
-

-
impl<T: Clone, const N: usize> BoundedVec<T, N> {
-
    /// Like [`Vec::extend_from_slice`] but returns an error if out of bounds.
-
    pub fn extend_from_slice(&mut self, slice: &[T]) -> Result<(), Error> {
-
        if self.len() + slice.len() > N {
-
            return Err(Error::InvalidSize {
-
                expected: N,
-
                actual: self.len() + slice.len(),
-
            });
-
        }
-
        self.v.extend_from_slice(slice);
-

-
        Ok(())
-
    }
-
}
-

-
impl<T, const N: usize> ops::Deref for BoundedVec<T, N> {
-
    type Target = [T];
-

-
    fn deref(&self) -> &Self::Target {
-
        self.v.as_slice()
-
    }
-
}
-

-
impl<T, const N: usize> From<Option<T>> for BoundedVec<T, N> {
-
    fn from(value: Option<T>) -> Self {
-
        let v = match value {
-
            None => vec![],
-
            Some(v) => vec![v],
-
        };
-
        BoundedVec { v }
-
    }
-
}
-

-
impl<T, const N: usize> TryFrom<Vec<T>> for BoundedVec<T, N> {
-
    type Error = Error;
-

-
    fn try_from(value: Vec<T>) -> Result<Self, Self::Error> {
-
        if value.len() > N {
-
            return Err(Error::InvalidSize {
-
                expected: N,
-
                actual: value.len(),
-
            });
-
        }
-
        Ok(BoundedVec { v: value })
-
    }
-
}
-

-
impl<T, const N: usize> TryFrom<BTreeSet<T>> for BoundedVec<T, N> {
-
    type Error = Error;
-

-
    fn try_from(value: BTreeSet<T>) -> Result<Self, Self::Error> {
-
        if value.len() > N {
-
            return Err(Error::InvalidSize {
-
                expected: N,
-
                actual: value.len(),
-
            });
-
        }
-
        Ok(BoundedVec {
-
            v: value.into_iter().collect(),
-
        })
-
    }
-
}
-

-
impl<T, const N: usize> From<BoundedVec<T, N>> for Vec<T> {
-
    fn from(value: BoundedVec<T, N>) -> Self {
-
        value.v
-
    }
-
}
-

-
impl<T: std::fmt::Debug, const N: usize> std::fmt::Debug for BoundedVec<T, N> {
-
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
-
        self.v.fmt(f)
-
    }
-
}
modified crates/radicle-node/src/control.rs
@@ -246,9 +246,9 @@ mod tests {

    use super::*;
    use crate::identity::RepoId;
+
    use crate::node::policy::Scope;
    use crate::node::Handle;
    use crate::node::{Alias, Node, NodeId};
-
    use crate::service::policy::Scope;
    use crate::test;

    #[test]
deleted crates/radicle-node/src/deserializer.rs
@@ -1,163 +0,0 @@
-
use std::io;
-
use std::marker::PhantomData;
-

-
use crate::bounded;
-
use crate::prelude::BoundedVec;
-
use crate::service::message::Message;
-
use crate::wire;
-

-
/// Message stream deserializer.
-
///
-
/// Used to for example turn a byte stream into network messages.
-
#[derive(Debug)]
-
pub struct Deserializer<const B: usize, D = Message> {
-
    unparsed: BoundedVec<u8, B>,
-
    item: PhantomData<D>,
-
}
-

-
impl<const B: usize, D: wire::Decode> Default for Deserializer<B, D> {
-
    fn default() -> Self {
-
        Self::new(wire::Size::MAX as usize + 1)
-
    }
-
}
-

-
impl<const B: usize, D> TryFrom<Vec<u8>> for Deserializer<B, D> {
-
    type Error = bounded::Error;
-

-
    fn try_from(unparsed: Vec<u8>) -> Result<Self, Self::Error> {
-
        BoundedVec::try_from(unparsed).map(|unparsed| Self {
-
            unparsed,
-
            item: PhantomData,
-
        })
-
    }
-
}
-

-
impl<const B: usize, D: wire::Decode> Deserializer<B, D> {
-
    /// Create a new stream decoder.
-
    pub fn new(capacity: usize) -> Self {
-
        Self {
-
            unparsed: BoundedVec::with_capacity(capacity)
-
                .expect("Deserializer::new: capacity exceeds maximum"),
-
            item: PhantomData,
-
        }
-
    }
-

-
    /// Input bytes into the decoder.
-
    pub fn input(&mut self, bytes: &[u8]) -> Result<(), bounded::Error> {
-
        self.unparsed.extend_from_slice(bytes)
-
    }
-

-
    /// Decode and return the next message. Returns [`None`] if nothing was decoded.
-
    pub fn deserialize_next(&mut self) -> Result<Option<D>, wire::Error> {
-
        let mut reader = io::Cursor::new(self.unparsed.as_slice());
-

-
        match D::decode(&mut reader) {
-
            Ok(msg) => {
-
                let pos = reader.position() as usize;
-
                self.unparsed.drain(..pos);
-

-
                Ok(Some(msg))
-
            }
-
            Err(err) if err.is_eof() => Ok(None),
-
            Err(err) => Err(err),
-
        }
-
    }
-

-
    /// Drain the unparsed buffer.
-
    pub fn unparsed(&mut self) -> impl ExactSizeIterator<Item = u8> + '_ {
-
        self.unparsed.drain(..)
-
    }
-

-
    /// Return whether there are unparsed bytes.
-
    pub fn is_empty(&self) -> bool {
-
        self.unparsed.is_empty()
-
    }
-

-
    /// Return the size of the unparsed data.
-
    pub fn len(&self) -> usize {
-
        self.unparsed.len()
-
    }
-
}
-

-
impl<const B: usize, D: wire::Decode> io::Write for Deserializer<B, D> {
-
    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
-
        self.input(buf).map_err(|_| io::ErrorKind::OutOfMemory)?;
-

-
        Ok(buf.len())
-
    }
-

-
    fn flush(&mut self) -> io::Result<()> {
-
        Ok(())
-
    }
-
}
-

-
impl<const B: usize, D: wire::Decode> Iterator for Deserializer<B, D> {
-
    type Item = Result<D, wire::Error>;
-

-
    fn next(&mut self) -> Option<Self::Item> {
-
        self.deserialize_next().transpose()
-
    }
-
}
-

-
#[cfg(test)]
-
mod test {
-
    use super::*;
-
    use qcheck_macros::quickcheck;
-

-
    use crate::test::assert_matches;
-

-
    const MSG_HELLO: &[u8] = &[5, b'h', b'e', b'l', b'l', b'o'];
-
    const MSG_BYE: &[u8] = &[3, b'b', b'y', b'e'];
-

-
    #[test]
-
    fn test_decode_next() {
-
        let mut decoder = Deserializer::<1024, String>::new(8);
-

-
        decoder.input(&[3, b'b']).unwrap();
-
        assert_matches!(decoder.deserialize_next(), Ok(None));
-
        assert_eq!(decoder.unparsed.len(), 2);
-

-
        decoder.input(&[b'y']).unwrap();
-
        assert_matches!(decoder.deserialize_next(), Ok(None));
-
        assert_eq!(decoder.unparsed.len(), 3);
-

-
        decoder.input(&[b'e']).unwrap();
-
        assert_matches!(decoder.deserialize_next(), Ok(Some(s)) if s.as_str() == "bye");
-
        assert_eq!(decoder.unparsed.len(), 0);
-
        assert!(decoder.is_empty());
-
    }
-

-
    #[test]
-
    fn test_unparsed() {
-
        let mut decoder = Deserializer::<1024, String>::new(8);
-

-
        decoder.input(&[3, b'b', b'y']).unwrap();
-
        assert_eq!(decoder.unparsed().collect::<Vec<_>>(), vec![3, b'b', b'y']);
-
        assert!(decoder.is_empty());
-
    }
-

-
    #[quickcheck]
-
    fn prop_decode_next(chunk_size: usize) {
-
        let mut bytes = vec![];
-
        let mut msgs = vec![];
-
        let mut decoder = Deserializer::<1024, String>::new(8);
-

-
        let chunk_size = 1 + chunk_size % MSG_HELLO.len() + MSG_BYE.len();
-

-
        bytes.extend_from_slice(MSG_HELLO);
-
        bytes.extend_from_slice(MSG_BYE);
-

-
        for chunk in bytes.as_slice().chunks(chunk_size) {
-
            decoder.input(chunk).unwrap();
-

-
            while let Some(msg) = decoder.deserialize_next().unwrap() {
-
                msgs.push(msg);
-
            }
-
        }
-

-
        assert_eq!(decoder.unparsed.len(), 0);
-
        assert_eq!(msgs.len(), 2);
-
        assert_eq!(msgs[0], String::from("hello"));
-
        assert_eq!(msgs[1], String::from("bye"));
-
    }
-
}
modified crates/radicle-node/src/lib.rs
@@ -3,11 +3,12 @@
// suggestions did not make sense.
#![allow(clippy::byte_char_slices)]

-
pub mod bounded;
+
use std::str::FromStr;
+
use std::sync::LazyLock;
+

pub mod control;
-
pub mod deserializer;
pub mod runtime;
-
pub mod service;
+
pub(crate) use radicle_protocol::service;
#[cfg(any(test, feature = "test"))]
pub mod test;
#[cfg(test)]
@@ -18,7 +19,8 @@ pub mod worker;
use radicle::version::Version;

pub use localtime::{LocalDuration, LocalTime};
-
pub use netservices::Direction as Link;
+
pub use radicle::node::Link;
+
pub use radicle::node::UserAgent;
pub use radicle::node::PROTOCOL_VERSION;
pub use radicle::prelude::Timestamp;
pub use radicle::{collections, crypto, git, identity, node, profile, rad, storage};
@@ -32,14 +34,18 @@ pub const VERSION: Version = Version {
    timestamp: env!("SOURCE_DATE_EPOCH"),
};

+
/// This node's user agent string.
+
pub static USER_AGENT: LazyLock<UserAgent> = LazyLock::new(|| {
+
    FromStr::from_str(format!("/radicle:{}/", VERSION.version).as_str())
+
        .expect("user agent is valid")
+
});
+

pub mod prelude {
-
    pub use crate::bounded::BoundedVec;
    pub use crate::crypto::{PublicKey, Signature};
-
    pub use crate::deserializer::Deserializer;
    pub use crate::identity::{Did, RepoId};
-
    pub use crate::node::Address;
+
    pub use crate::node::{config::Network, Address, Event, NodeId};
    pub use crate::service::filter::Filter;
-
    pub use crate::service::{DisconnectReason, Event, Message, Network, NodeId};
+
    pub use crate::service::{DisconnectReason, Message};
    pub use crate::storage::refs::Refs;
    pub use crate::storage::WriteStorage;
    pub use crate::{LocalDuration, LocalTime, Timestamp};
modified crates/radicle-node/src/runtime.rs
@@ -21,6 +21,8 @@ use radicle::node;
use radicle::node::address;
use radicle::node::address::Store as _;
use radicle::node::notifications;
+
use radicle::node::policy::config as policy;
+
use radicle::node::Event;
use radicle::node::Handle as _;
use radicle::node::UserAgent;
use radicle::profile::Home;
@@ -29,7 +31,7 @@ use radicle::{cob, git, storage, Storage};
use crate::control;
use crate::node::{routing, NodeId};
use crate::service::message::NodeAnnouncement;
-
use crate::service::{gossip, policy, Event, INITIAL_SUBSCRIBE_BACKLOG_DELTA};
+
use crate::service::{gossip, INITIAL_SUBSCRIBE_BACKLOG_DELTA};
use crate::wire;
use crate::wire::{Decode, Wire};
use crate::worker;
@@ -117,7 +119,7 @@ impl Runtime {
    /// This function spawns threads.
    pub fn init<G>(
        home: Home,
-
        config: service::Config,
+
        config: radicle::node::Config,
        listen: Vec<net::SocketAddr>,
        signals: chan::Receiver<Signal>,
        signer: Device<G>,
modified crates/radicle-node/src/runtime/handle.rs
@@ -5,8 +5,10 @@ use std::sync::Arc;
use std::{fmt, io, time};

use crossbeam_channel as chan;
+
use radicle::node::events::{Event, Events};
+
use radicle::node::policy;
+
use radicle::node::{Config, NodeId};
use radicle::node::{ConnectOptions, ConnectResult, Seeds};
-
use radicle::storage::refs::RefsAt;
use reactor::poller::popol::PopolWaker;
use serde_json::json;
use thiserror::Error;
@@ -16,10 +18,8 @@ use crate::node::{Alias, Command, FetchResult};
use crate::profile::Home;
use crate::runtime::Emitter;
use crate::service;
-
use crate::service::policy;
-
use crate::service::NodeId;
-
use crate::service::{CommandError, Config, QueryState};
-
use crate::service::{Event, Events};
+
use crate::service::{CommandError, QueryState};
+
use crate::storage::refs::RefsAt;
use crate::wire;
use crate::wire::StreamId;
use crate::worker::TaskResult;
deleted crates/radicle-node/src/service.rs
@@ -1,2858 +0,0 @@
-
#![allow(clippy::too_many_arguments)]
-
#![allow(clippy::collapsible_match)]
-
#![allow(clippy::collapsible_if)]
-
#![warn(clippy::unwrap_used)]
-
pub mod filter;
-
pub mod gossip;
-
pub mod io;
-
pub mod limiter;
-
pub mod message;
-
pub mod session;
-

-
use std::collections::hash_map::Entry;
-
use std::collections::{BTreeSet, HashMap, HashSet};
-
use std::net::IpAddr;
-
use std::ops::{Deref, DerefMut};
-
use std::sync::Arc;
-
use std::{fmt, net, time};
-

-
use crossbeam_channel as chan;
-
use fastrand::Rng;
-
use localtime::{LocalDuration, LocalTime};
-
use log::*;
-
use nonempty::NonEmpty;
-

-
use radicle::identity::Doc;
-
use radicle::node;
-
use radicle::node::address;
-
use radicle::node::address::Store as _;
-
use radicle::node::address::{AddressBook, AddressType, KnownAddress};
-
use radicle::node::config::PeerConfig;
-
use radicle::node::device::Device;
-
use radicle::node::refs::Store as _;
-
use radicle::node::routing::Store as _;
-
use radicle::node::seed;
-
use radicle::node::seed::Store as _;
-
use radicle::node::{ConnectOptions, Penalty, Severity};
-
use radicle::storage::refs::SIGREFS_BRANCH;
-
use radicle::storage::RepositoryError;
-
use radicle_fetch::policy::SeedingPolicy;
-

-
use crate::identity::RepoId;
-
use crate::node::routing;
-
use crate::node::routing::InsertResult;
-
use crate::node::{
-
    Address, Alias, Features, FetchResult, HostName, Seed, Seeds, SyncStatus, SyncedAt,
-
};
-
use crate::prelude::*;
-
use crate::runtime::Emitter;
-
use crate::service::gossip::Store as _;
-
use crate::service::message::{
-
    Announcement, AnnouncementMessage, Info, NodeAnnouncement, Ping, RefsAnnouncement, RefsStatus,
-
};
-
use crate::service::policy::{store::Write, Scope};
-
use crate::storage;
-
use crate::storage::{refs::RefsAt, Namespaces, ReadStorage};
-
use crate::worker::fetch;
-
use crate::worker::FetchError;
-
use crate::Link;
-
use crate::{crypto, PROTOCOL_VERSION};
-

-
pub use crate::node::events::{Event, Events};
-
pub use crate::node::{config::Network, Config, NodeId};
-
pub use crate::service::message::{Message, ZeroBytes};
-
pub use crate::service::session::{QueuedFetch, Session};
-

-
pub use radicle::node::policy::config as policy;
-

-
use self::io::Outbox;
-
use self::limiter::RateLimiter;
-
use self::message::InventoryAnnouncement;
-
use self::policy::NamespacesError;
-

-
/// How often to run the "idle" task.
-
pub const IDLE_INTERVAL: LocalDuration = LocalDuration::from_secs(30);
-
/// How often to run the "gossip" task.
-
pub const GOSSIP_INTERVAL: LocalDuration = LocalDuration::from_secs(6);
-
/// How often to run the "announce" task.
-
pub const ANNOUNCE_INTERVAL: LocalDuration = LocalDuration::from_mins(60);
-
/// How often to run the "sync" task.
-
pub const SYNC_INTERVAL: LocalDuration = LocalDuration::from_secs(60);
-
/// How often to run the "prune" task.
-
pub const PRUNE_INTERVAL: LocalDuration = LocalDuration::from_mins(30);
-
/// Duration to wait on an unresponsive peer before dropping its connection.
-
pub const STALE_CONNECTION_TIMEOUT: LocalDuration = LocalDuration::from_mins(2);
-
/// How much time should pass after a peer was last active for a *ping* to be sent.
-
pub const KEEP_ALIVE_DELTA: LocalDuration = LocalDuration::from_mins(1);
-
/// Maximum number of latency values to keep for a session.
-
pub const MAX_LATENCIES: usize = 16;
-
/// Maximum time difference between the local time, and an announcement timestamp.
-
pub const MAX_TIME_DELTA: LocalDuration = LocalDuration::from_mins(60);
-
/// Maximum attempts to connect to a peer before we give up.
-
pub const MAX_CONNECTION_ATTEMPTS: usize = 3;
-
/// How far back from the present time should we request gossip messages when connecting to a peer,
-
/// when we come online for the first time.
-
pub const INITIAL_SUBSCRIBE_BACKLOG_DELTA: LocalDuration = LocalDuration::from_mins(60 * 24);
-
/// When subscribing, what margin of error do we give ourselves. A igher delta means we ask for
-
/// messages further back than strictly necessary, to account for missed messages.
-
pub const SUBSCRIBE_BACKLOG_DELTA: LocalDuration = LocalDuration::from_mins(3);
-
/// Minimum amount of time to wait before reconnecting to a peer.
-
pub const MIN_RECONNECTION_DELTA: LocalDuration = LocalDuration::from_secs(3);
-
/// Maximum amount of time to wait before reconnecting to a peer.
-
pub const MAX_RECONNECTION_DELTA: LocalDuration = LocalDuration::from_mins(60);
-
/// Connection retry delta used for ephemeral peers that failed to connect previously.
-
pub const CONNECTION_RETRY_DELTA: LocalDuration = LocalDuration::from_mins(10);
-
/// How long to wait for a fetch to stall before aborting, default is 3s.
-
pub const FETCH_TIMEOUT: time::Duration = time::Duration::from_secs(3);
-
/// Target number of peers to maintain connections to.
-
pub const TARGET_OUTBOUND_PEERS: usize = 8;
-

-
/// Maximum external address limit imposed by message size limits.
-
pub use message::ADDRESS_LIMIT;
-
/// Maximum inventory limit imposed by message size limits.
-
pub use message::INVENTORY_LIMIT;
-
/// Maximum number of project git references imposed by message size limits.
-
pub use message::REF_REMOTE_LIMIT;
-

-
/// Metrics we track.
-
#[derive(Clone, Debug, Default, serde::Serialize)]
-
#[serde(rename_all = "camelCase")]
-
pub struct Metrics {
-
    /// Metrics for each peer.
-
    pub peers: HashMap<NodeId, PeerMetrics>,
-
    /// Tasks queued in worker queue.
-
    pub worker_queue_size: usize,
-
    /// Current open channel count.
-
    pub open_channels: usize,
-
}
-

-
impl Metrics {
-
    /// Get metrics for the given peer.
-
    pub fn peer(&mut self, nid: NodeId) -> &mut PeerMetrics {
-
        self.peers.entry(nid).or_default()
-
    }
-
}
-

-
/// Per-peer metrics we track.
-
#[derive(Clone, Debug, Default, serde::Serialize)]
-
#[serde(rename_all = "camelCase")]
-
pub struct PeerMetrics {
-
    pub received_git_bytes: usize,
-
    pub received_fetch_requests: usize,
-
    pub received_bytes: usize,
-
    pub received_gossip_messages: usize,
-
    pub sent_bytes: usize,
-
    pub sent_fetch_requests: usize,
-
    pub sent_git_bytes: usize,
-
    pub sent_gossip_messages: usize,
-
    pub streams_opened: usize,
-
    pub inbound_connection_attempts: usize,
-
    pub outbound_connection_attempts: usize,
-
    pub disconnects: usize,
-
}
-

-
/// Result of syncing our routing table with a node's inventory.
-
#[derive(Default)]
-
struct SyncedRouting {
-
    /// Repo entries added.
-
    added: Vec<RepoId>,
-
    /// Repo entries removed.
-
    removed: Vec<RepoId>,
-
    /// Repo entries updated (time).
-
    updated: Vec<RepoId>,
-
}
-

-
impl SyncedRouting {
-
    fn is_empty(&self) -> bool {
-
        self.added.is_empty() && self.removed.is_empty() && self.updated.is_empty()
-
    }
-
}
-

-
/// A peer we can connect to.
-
#[derive(Debug, Clone)]
-
struct Peer {
-
    nid: NodeId,
-
    addresses: Vec<KnownAddress>,
-
    penalty: Penalty,
-
}
-

-
/// General service error.
-
#[derive(thiserror::Error, Debug)]
-
pub enum Error {
-
    #[error(transparent)]
-
    Git(#[from] radicle::git::raw::Error),
-
    #[error(transparent)]
-
    GitExt(#[from] radicle::git::ext::Error),
-
    #[error(transparent)]
-
    Storage(#[from] storage::Error),
-
    #[error(transparent)]
-
    Gossip(#[from] gossip::Error),
-
    #[error(transparent)]
-
    Refs(#[from] storage::refs::Error),
-
    #[error(transparent)]
-
    Routing(#[from] routing::Error),
-
    #[error(transparent)]
-
    Address(#[from] address::Error),
-
    #[error(transparent)]
-
    Database(#[from] node::db::Error),
-
    #[error(transparent)]
-
    Seeds(#[from] seed::Error),
-
    #[error(transparent)]
-
    Policy(#[from] policy::Error),
-
    #[error(transparent)]
-
    Repository(#[from] radicle::storage::RepositoryError),
-
    #[error("namespaces error: {0}")]
-
    Namespaces(#[from] NamespacesError),
-
}
-

-
#[derive(thiserror::Error, Debug)]
-
pub enum ConnectError {
-
    #[error("attempted connection to peer {nid} which already has a session")]
-
    SessionExists { nid: NodeId },
-
    #[error("attempted connection to self")]
-
    SelfConnection,
-
    #[error("outbound connection limit reached when attempting {nid} ({addr})")]
-
    LimitReached { nid: NodeId, addr: Address },
-
}
-

-
/// A store for all node data.
-
pub trait Store:
-
    address::Store + gossip::Store + routing::Store + seed::Store + node::refs::Store
-
{
-
}
-

-
impl Store for node::Database {}
-

-
/// Function used to query internal service state.
-
pub type QueryState = dyn Fn(&dyn ServiceState) -> Result<(), CommandError> + Send + Sync;
-

-
/// Commands sent to the service by the operator.
-
pub enum Command {
-
    /// Announce repository references for given repository to peers.
-
    AnnounceRefs(RepoId, chan::Sender<RefsAt>),
-
    /// Announce local repositories to peers.
-
    AnnounceInventory,
-
    /// Add repository to local inventory.
-
    AddInventory(RepoId, chan::Sender<bool>),
-
    /// Connect to node with the given address.
-
    Connect(NodeId, Address, ConnectOptions),
-
    /// Disconnect from node.
-
    Disconnect(NodeId),
-
    /// Get the node configuration.
-
    Config(chan::Sender<Config>),
-
    /// Get the node's listen addresses.
-
    ListenAddrs(chan::Sender<Vec<std::net::SocketAddr>>),
-
    /// Lookup seeds for the given repository in the routing table.
-
    Seeds(RepoId, chan::Sender<Seeds>),
-
    /// Fetch the given repository from the network.
-
    Fetch(RepoId, NodeId, time::Duration, chan::Sender<FetchResult>),
-
    /// Seed the given repository.
-
    Seed(RepoId, Scope, chan::Sender<bool>),
-
    /// Unseed the given repository.
-
    Unseed(RepoId, chan::Sender<bool>),
-
    /// Follow the given node.
-
    Follow(NodeId, Option<Alias>, chan::Sender<bool>),
-
    /// Unfollow the given node.
-
    Unfollow(NodeId, chan::Sender<bool>),
-
    /// Query the internal service state.
-
    QueryState(Arc<QueryState>, chan::Sender<Result<(), CommandError>>),
-
}
-

-
impl fmt::Debug for Command {
-
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-
        match self {
-
            Self::AnnounceRefs(id, _) => write!(f, "AnnounceRefs({id})"),
-
            Self::AnnounceInventory => write!(f, "AnnounceInventory"),
-
            Self::AddInventory(rid, _) => write!(f, "AddInventory({rid})"),
-
            Self::Connect(id, addr, opts) => write!(f, "Connect({id}, {addr}, {opts:?})"),
-
            Self::Disconnect(id) => write!(f, "Disconnect({id})"),
-
            Self::Config(_) => write!(f, "Config"),
-
            Self::ListenAddrs(_) => write!(f, "ListenAddrs"),
-
            Self::Seeds(id, _) => write!(f, "Seeds({id})"),
-
            Self::Fetch(id, node, _, _) => write!(f, "Fetch({id}, {node})"),
-
            Self::Seed(id, scope, _) => write!(f, "Seed({id}, {scope})"),
-
            Self::Unseed(id, _) => write!(f, "Unseed({id})"),
-
            Self::Follow(id, _, _) => write!(f, "Follow({id})"),
-
            Self::Unfollow(id, _) => write!(f, "Unfollow({id})"),
-
            Self::QueryState { .. } => write!(f, "QueryState(..)"),
-
        }
-
    }
-
}
-

-
/// Command-related errors.
-
#[derive(thiserror::Error, Debug)]
-
pub enum CommandError {
-
    #[error(transparent)]
-
    Storage(#[from] storage::Error),
-
    #[error(transparent)]
-
    Routing(#[from] routing::Error),
-
    #[error(transparent)]
-
    Policy(#[from] policy::Error),
-
}
-

-
/// Error returned by [`Service::try_fetch`].
-
#[derive(thiserror::Error, Debug)]
-
enum TryFetchError<'a> {
-
    #[error("ongoing fetch for repository exists")]
-
    AlreadyFetching(&'a mut FetchState),
-
    #[error("peer is not connected; cannot initiate fetch")]
-
    SessionNotConnected,
-
    #[error("peer fetch capacity reached; cannot initiate fetch")]
-
    SessionCapacityReached,
-
    #[error(transparent)]
-
    Namespaces(#[from] NamespacesError),
-
}
-

-
/// Fetch state for an ongoing fetch.
-
#[derive(Debug)]
-
pub struct FetchState {
-
    /// Node we're fetching from.
-
    pub from: NodeId,
-
    /// What refs we're fetching.
-
    pub refs_at: Vec<RefsAt>,
-
    /// Channels waiting for fetch results.
-
    pub subscribers: Vec<chan::Sender<FetchResult>>,
-
}
-

-
impl FetchState {
-
    /// Add a subscriber to this fetch.
-
    fn subscribe(&mut self, c: chan::Sender<FetchResult>) {
-
        if !self.subscribers.iter().any(|s| s.same_channel(&c)) {
-
            self.subscribers.push(c);
-
        }
-
    }
-
}
-

-
/// Holds all node stores.
-
#[derive(Debug)]
-
pub struct Stores<D>(D);
-

-
impl<D> Stores<D>
-
where
-
    D: Store,
-
{
-
    /// Get the database as a routing store.
-
    pub fn routing(&self) -> &impl routing::Store {
-
        &self.0
-
    }
-

-
    /// Get the database as a routing store, mutably.
-
    pub fn routing_mut(&mut self) -> &mut impl routing::Store {
-
        &mut self.0
-
    }
-

-
    /// Get the database as an address store.
-
    pub fn addresses(&self) -> &impl address::Store {
-
        &self.0
-
    }
-

-
    /// Get the database as an address store, mutably.
-
    pub fn addresses_mut(&mut self) -> &mut impl address::Store {
-
        &mut self.0
-
    }
-

-
    /// Get the database as a gossip store.
-
    pub fn gossip(&self) -> &impl gossip::Store {
-
        &self.0
-
    }
-

-
    /// Get the database as a gossip store, mutably.
-
    pub fn gossip_mut(&mut self) -> &mut impl gossip::Store {
-
        &mut self.0
-
    }
-

-
    /// Get the database as a seed store.
-
    pub fn seeds(&self) -> &impl seed::Store {
-
        &self.0
-
    }
-

-
    /// Get the database as a seed store, mutably.
-
    pub fn seeds_mut(&mut self) -> &mut impl seed::Store {
-
        &mut self.0
-
    }
-

-
    /// Get the database as a refs db.
-
    pub fn refs(&self) -> &impl node::refs::Store {
-
        &self.0
-
    }
-

-
    /// Get the database as a refs db, mutably.
-
    pub fn refs_mut(&mut self) -> &mut impl node::refs::Store {
-
        &mut self.0
-
    }
-
}
-

-
impl<D> AsMut<D> for Stores<D> {
-
    fn as_mut(&mut self) -> &mut D {
-
        &mut self.0
-
    }
-
}
-

-
impl<D> From<D> for Stores<D> {
-
    fn from(db: D) -> Self {
-
        Self(db)
-
    }
-
}
-

-
/// The node service.
-
#[derive(Debug)]
-
pub struct Service<D, S, G> {
-
    /// Service configuration.
-
    config: Config,
-
    /// Our cryptographic signer and key.
-
    signer: Device<G>,
-
    /// Project storage.
-
    storage: S,
-
    /// Node database.
-
    db: Stores<D>,
-
    /// Policy configuration.
-
    policies: policy::Config<Write>,
-
    /// Peer sessions, currently or recently connected.
-
    sessions: Sessions,
-
    /// Clock. Tells the time.
-
    clock: LocalTime,
-
    /// Who relayed what announcement to us. We keep track of this to ensure that
-
    /// we don't relay messages to nodes that already know about these messages.
-
    relayed_by: HashMap<gossip::AnnouncementId, Vec<NodeId>>,
-
    /// I/O outbox.
-
    outbox: Outbox,
-
    /// Cached local node announcement.
-
    node: NodeAnnouncement,
-
    /// Cached local inventory announcement.
-
    inventory: InventoryAnnouncement,
-
    /// Source of entropy.
-
    rng: Rng,
-
    /// Ongoing fetches.
-
    fetching: HashMap<RepoId, FetchState>,
-
    /// Request/connection rate limiter.
-
    limiter: RateLimiter,
-
    /// Current seeded repositories bloom filter.
-
    filter: Filter,
-
    /// Last time the service was idle.
-
    last_idle: LocalTime,
-
    /// Last time the gossip messages were relayed.
-
    last_gossip: LocalTime,
-
    /// Last time the service synced.
-
    last_sync: LocalTime,
-
    /// Last time the service routing table was pruned.
-
    last_prune: LocalTime,
-
    /// Last time the announcement task was run.
-
    last_announce: LocalTime,
-
    /// Timestamp of last local inventory announced.
-
    last_inventory: LocalTime,
-
    /// Last timestamp used for announcements.
-
    last_timestamp: Timestamp,
-
    /// Time when the service was initialized, or `None` if it wasn't initialized.
-
    started_at: Option<LocalTime>,
-
    /// Time when the service was last online, or `None` if this is the first time.
-
    last_online_at: Option<LocalTime>,
-
    /// Publishes events to subscribers.
-
    emitter: Emitter<Event>,
-
    /// Local listening addresses.
-
    listening: Vec<net::SocketAddr>,
-
    /// Latest metrics for all nodes connected to since the last start.
-
    metrics: Metrics,
-
}
-

-
impl<D, S, G> Service<D, S, G> {
-
    /// Get the local node id.
-
    pub fn node_id(&self) -> NodeId {
-
        *self.signer.public_key()
-
    }
-

-
    /// Get the local service time.
-
    pub fn local_time(&self) -> LocalTime {
-
        self.clock
-
    }
-

-
    pub fn emitter(&self) -> Emitter<Event> {
-
        self.emitter.clone()
-
    }
-
}
-

-
impl<D, S, G> Service<D, S, G>
-
where
-
    D: Store,
-
    S: ReadStorage + 'static,
-
    G: crypto::signature::Signer<crypto::Signature>,
-
{
-
    pub fn new(
-
        config: Config,
-
        db: Stores<D>,
-
        storage: S,
-
        policies: policy::Config<Write>,
-
        signer: Device<G>,
-
        rng: Rng,
-
        node: NodeAnnouncement,
-
        emitter: Emitter<Event>,
-
    ) -> Self {
-
        let sessions = Sessions::new(rng.clone());
-
        let limiter = RateLimiter::new(config.peers());
-
        let last_timestamp = node.timestamp;
-
        let clock = LocalTime::default(); // Updated on initialize.
-
        let inventory = gossip::inventory(clock.into(), []); // Updated on initialize.
-

-
        Self {
-
            config,
-
            storage,
-
            policies,
-
            signer,
-
            rng,
-
            inventory,
-
            node,
-
            clock,
-
            db,
-
            outbox: Outbox::default(),
-
            limiter,
-
            sessions,
-
            fetching: HashMap::new(),
-
            filter: Filter::empty(),
-
            relayed_by: HashMap::default(),
-
            last_idle: LocalTime::default(),
-
            last_gossip: LocalTime::default(),
-
            last_sync: LocalTime::default(),
-
            last_prune: LocalTime::default(),
-
            last_timestamp,
-
            last_announce: LocalTime::default(),
-
            last_inventory: LocalTime::default(),
-
            started_at: None,     // Updated on initialize.
-
            last_online_at: None, // Updated on initialize.
-
            emitter,
-
            listening: vec![],
-
            metrics: Metrics::default(),
-
        }
-
    }
-

-
    /// Whether the service was started (initialized) and if so, at what time.
-
    pub fn started(&self) -> Option<LocalTime> {
-
        self.started_at
-
    }
-

-
    /// Return the next i/o action to execute.
-
    #[allow(clippy::should_implement_trait)]
-
    pub fn next(&mut self) -> Option<io::Io> {
-
        self.outbox.next()
-
    }
-

-
    /// Seed a repository.
-
    /// Returns whether or not the repo policy was updated.
-
    pub fn seed(&mut self, id: &RepoId, scope: Scope) -> Result<bool, policy::Error> {
-
        let updated = self.policies.seed(id, scope)?;
-
        self.filter.insert(id);
-

-
        Ok(updated)
-
    }
-

-
    /// Unseed a repository.
-
    /// Returns whether or not the repo policy was updated.
-
    /// Note that when unseeding, we don't announce anything to the network. This is because by
-
    /// simply not announcing it anymore, it will eventually be pruned by nodes.
-
    pub fn unseed(&mut self, id: &RepoId) -> Result<bool, policy::Error> {
-
        let updated = self.policies.unseed(id)?;
-

-
        if updated {
-
            // Nb. This is potentially slow if we have lots of repos. We should probably
-
            // only re-compute the filter when we've unseeded a certain amount of repos
-
            // and the filter is really out of date.
-
            //
-
            // TODO: Share this code with initialization code.
-
            self.filter = Filter::new(
-
                self.policies
-
                    .seed_policies()?
-
                    .filter_map(|t| (t.policy.is_allow()).then_some(t.rid)),
-
            );
-
            // Update and announce new inventory.
-
            if let Err(e) = self.remove_inventory(id) {
-
                error!(target: "service", "Error updating inventory after unseed: {e}");
-
            }
-
        }
-
        Ok(updated)
-
    }
-

-
    /// Find the closest `n` peers by proximity in seeding graphs.
-
    /// Returns a sorted list from the closest peer to the furthest.
-
    /// Peers with more seedings in common score score higher.
-
    #[allow(unused)]
-
    pub fn closest_peers(&self, n: usize) -> Vec<NodeId> {
-
        todo!()
-
    }
-

-
    /// Get the database.
-
    pub fn database(&self) -> &Stores<D> {
-
        &self.db
-
    }
-

-
    /// Get the mutable database.
-
    pub fn database_mut(&mut self) -> &mut Stores<D> {
-
        &mut self.db
-
    }
-

-
    /// Get the storage instance.
-
    pub fn storage(&self) -> &S {
-
        &self.storage
-
    }
-

-
    /// Get the mutable storage instance.
-
    pub fn storage_mut(&mut self) -> &mut S {
-
        &mut self.storage
-
    }
-

-
    /// Get the node policies.
-
    pub fn policies(&self) -> &policy::Config<Write> {
-
        &self.policies
-
    }
-

-
    /// Get the local signer.
-
    pub fn signer(&self) -> &Device<G> {
-
        &self.signer
-
    }
-

-
    /// Subscriber to inner `Emitter` events.
-
    pub fn events(&mut self) -> Events {
-
        Events::from(self.emitter.subscribe())
-
    }
-

-
    /// Get I/O outbox.
-
    pub fn outbox(&mut self) -> &mut Outbox {
-
        &mut self.outbox
-
    }
-

-
    /// Get configuration.
-
    pub fn config(&self) -> &Config {
-
        &self.config
-
    }
-

-
    /// Lookup a repository, both locally and in the routing table.
-
    pub fn lookup(&self, rid: RepoId) -> Result<Lookup, LookupError> {
-
        let this = self.nid();
-
        let local = self.storage.get(rid)?;
-
        let remote = self
-
            .db
-
            .routing()
-
            .get(&rid)?
-
            .iter()
-
            .filter(|nid| nid != &this)
-
            .cloned()
-
            .collect();
-

-
        Ok(Lookup { local, remote })
-
    }
-

-
    /// Initialize service with current time. Call this once.
-
    pub fn initialize(&mut self, time: LocalTime) -> Result<(), Error> {
-
        debug!(target: "service", "Init @{}", time.as_millis());
-
        assert_ne!(time, LocalTime::default());
-

-
        let nid = self.node_id();
-

-
        self.clock = time;
-
        self.started_at = Some(time);
-
        self.last_online_at = match self.db.gossip().last() {
-
            Ok(Some(last)) => Some(last.to_local_time()),
-
            Ok(None) => None,
-
            Err(e) => {
-
                error!(target: "service", "Error getting the lastest gossip message from db: {e}");
-
                None
-
            }
-
        };
-

-
        // Populate refs database. This is only useful as part of the upgrade process for nodes
-
        // that have been online since before the refs database was created.
-
        match self.db.refs().count() {
-
            Ok(0) => {
-
                info!(target: "service", "Empty refs database, populating from storage..");
-
                if let Err(e) = self.db.refs_mut().populate(&self.storage) {
-
                    error!(target: "service", "Failed to populate refs database: {e}");
-
                }
-
            }
-
            Ok(n) => debug!(target: "service", "Refs database has {n} cached references"),
-
            Err(e) => error!(target: "service", "Error checking refs database: {e}"),
-
        }
-

-
        let announced = self
-
            .db
-
            .seeds()
-
            .seeded_by(&nid)?
-
            .collect::<Result<HashMap<_, _>, _>>()?;
-
        let mut inventory = BTreeSet::new();
-
        let mut private = BTreeSet::new();
-

-
        for repo in self.storage.repositories()? {
-
            let rid = repo.rid;
-

-
            // If we're not seeding this repo, just skip it.
-
            if !self.policies.is_seeding(&rid)? {
-
                warn!(target: "service", "Local repository {rid} is not seeded");
-
                continue;
-
            }
-
            // Add public repositories to inventory.
-
            if repo.doc.is_public() {
-
                inventory.insert(rid);
-
            } else {
-
                private.insert(rid);
-
            }
-
            // If we have no owned refs for this repo, then there's nothing to announce.
-
            let Some(updated_at) = repo.synced_at else {
-
                continue;
-
            };
-
            // Skip this repo if the sync status matches what we have in storage.
-
            if let Some(announced) = announced.get(&rid) {
-
                if updated_at.oid == announced.oid {
-
                    continue;
-
                }
-
            }
-
            // Make sure our local node's sync status is up to date with storage.
-
            if self.db.seeds_mut().synced(
-
                &rid,
-
                &nid,
-
                updated_at.oid,
-
                updated_at.timestamp.into(),
-
            )? {
-
                debug!(target: "service", "Saved local sync status for {rid}..");
-
            }
-
            // If we got here, it likely means a repo was updated while the node was stopped.
-
            // Therefore, we pre-load a refs announcement for this repo, so that it is included in
-
            // the historical gossip messages when a node connects and subscribes to this repo.
-
            if let Ok((ann, _)) = self.refs_announcement_for(rid, [nid]) {
-
                debug!(target: "service", "Adding refs announcement for {rid} to historical gossip messages..");
-
                self.db.gossip_mut().announced(&nid, &ann)?;
-
            }
-
        }
-

-
        // Ensure that our inventory is recorded in our routing table, and we are seeding
-
        // all of it. It can happen that inventory is not properly seeded if for eg. the
-
        // user creates a new repository while the node is stopped.
-
        self.db
-
            .routing_mut()
-
            .add_inventory(inventory.iter(), nid, time.into())?;
-
        self.inventory = gossip::inventory(self.timestamp(), inventory);
-

-
        // Ensure that private repositories are not in our inventory. It's possible that
-
        // a repository was public and then it was made private.
-
        self.db
-
            .routing_mut()
-
            .remove_inventories(private.iter(), &nid)?;
-

-
        // Setup subscription filter for seeded repos.
-
        self.filter = Filter::new(
-
            self.policies
-
                .seed_policies()?
-
                .filter_map(|t| (t.policy.is_allow()).then_some(t.rid)),
-
        );
-
        // Connect to configured peers.
-
        let addrs = self.config.connect.clone();
-
        for (id, addr) in addrs.into_iter().map(|ca| ca.into()) {
-
            if let Err(e) = self.connect(id, addr) {
-
                error!(target: "service", "Service::initialization connection error: {e}");
-
            }
-
        }
-
        // Try to establish some connections.
-
        self.maintain_connections();
-
        // Start periodic tasks.
-
        self.outbox.wakeup(IDLE_INTERVAL);
-
        self.outbox.wakeup(GOSSIP_INTERVAL);
-

-
        Ok(())
-
    }
-

-
    pub fn tick(&mut self, now: LocalTime, metrics: &Metrics) {
-
        trace!(
-
            target: "service",
-
            "Tick +{}",
-
            now - self.started_at.expect("Service::tick: service must be initialized")
-
        );
-
        if now >= self.clock {
-
            self.clock = now;
-
        } else {
-
            // Nb. In tests, we often move the clock forwards in time to test different behaviors,
-
            // so this warning isn't applicable there.
-
            #[cfg(not(test))]
-
            warn!(
-
                target: "service",
-
                "System clock is not monotonic: {now} is not greater or equal to {}", self.clock
-
            );
-
        }
-
        self.metrics = metrics.clone();
-
    }
-

-
    pub fn wake(&mut self) {
-
        let now = self.clock;
-

-
        trace!(
-
            target: "service",
-
            "Wake +{}",
-
            now - self.started_at.expect("Service::wake: service must be initialized")
-
        );
-

-
        if now - self.last_idle >= IDLE_INTERVAL {
-
            trace!(target: "service", "Running 'idle' task...");
-

-
            self.keep_alive(&now);
-
            self.disconnect_unresponsive_peers(&now);
-
            self.idle_connections();
-
            self.maintain_connections();
-
            self.dequeue_fetches();
-
            self.outbox.wakeup(IDLE_INTERVAL);
-
            self.last_idle = now;
-
        }
-
        if now - self.last_gossip >= GOSSIP_INTERVAL {
-
            trace!(target: "service", "Running 'gossip' task...");
-

-
            if let Err(e) = self.relay_announcements() {
-
                error!(target: "service", "Error relaying stored announcements: {e}");
-
            }
-
            self.outbox.wakeup(GOSSIP_INTERVAL);
-
            self.last_gossip = now;
-
        }
-
        if now - self.last_sync >= SYNC_INTERVAL {
-
            trace!(target: "service", "Running 'sync' task...");
-

-
            if let Err(e) = self.fetch_missing_repositories() {
-
                error!(target: "service", "Error fetching missing inventory: {e}");
-
            }
-
            self.outbox.wakeup(SYNC_INTERVAL);
-
            self.last_sync = now;
-
        }
-
        if now - self.last_announce >= ANNOUNCE_INTERVAL {
-
            trace!(target: "service", "Running 'announce' task...");
-

-
            self.announce_inventory();
-
            self.outbox.wakeup(ANNOUNCE_INTERVAL);
-
            self.last_announce = now;
-
        }
-
        if now - self.last_prune >= PRUNE_INTERVAL {
-
            trace!(target: "service", "Running 'prune' task...");
-

-
            if let Err(err) = self.prune_routing_entries(&now) {
-
                error!(target: "service", "Error pruning routing entries: {err}");
-
            }
-
            if let Err(err) = self
-
                .db
-
                .gossip_mut()
-
                .prune((now - self.config.limits.gossip_max_age).into())
-
            {
-
                error!(target: "service", "Error pruning gossip entries: {err}");
-
            }
-

-
            self.outbox.wakeup(PRUNE_INTERVAL);
-
            self.last_prune = now;
-
        }
-

-
        // Always check whether there are persistent peers that need reconnecting.
-
        self.maintain_persistent();
-
    }
-

-
    pub fn command(&mut self, cmd: Command) {
-
        info!(target: "service", "Received command {:?}", cmd);
-

-
        match cmd {
-
            Command::Connect(nid, addr, opts) => {
-
                if opts.persistent {
-
                    self.config.connect.insert((nid, addr.clone()).into());
-
                }
-
                if let Err(e) = self.connect(nid, addr) {
-
                    match e {
-
                        ConnectError::SessionExists { nid } => {
-
                            self.emitter.emit(Event::PeerConnected { nid });
-
                        }
-
                        e => {
-
                            // N.b. using the fact that the call to connect waits for an event
-
                            self.emitter.emit(Event::PeerDisconnected {
-
                                nid,
-
                                reason: e.to_string(),
-
                            });
-
                        }
-
                    }
-
                }
-
            }
-
            Command::Disconnect(nid) => {
-
                self.outbox.disconnect(nid, DisconnectReason::Command);
-
            }
-
            Command::Config(resp) => {
-
                resp.send(self.config.clone()).ok();
-
            }
-
            Command::ListenAddrs(resp) => {
-
                resp.send(self.listening.clone()).ok();
-
            }
-
            Command::Seeds(rid, resp) => match self.seeds(&rid) {
-
                Ok(seeds) => {
-
                    let (connected, disconnected) = seeds.partition();
-
                    debug!(
-
                        target: "service",
-
                        "Found {} connected seed(s) and {} disconnected seed(s) for {}",
-
                        connected.len(), disconnected.len(),  rid
-
                    );
-
                    resp.send(seeds).ok();
-
                }
-
                Err(e) => {
-
                    error!(target: "service", "Error getting seeds for {rid}: {e}");
-
                }
-
            },
-
            Command::Fetch(rid, seed, timeout, resp) => {
-
                self.fetch(rid, seed, timeout, Some(resp));
-
            }
-
            Command::Seed(rid, scope, resp) => {
-
                // Update our seeding policy.
-
                let seeded = self
-
                    .seed(&rid, scope)
-
                    .expect("Service::command: error seeding repository");
-
                resp.send(seeded).ok();
-

-
                // Let all our peers know that we're interested in this repo from now on.
-
                self.outbox.broadcast(
-
                    Message::subscribe(self.filter(), self.clock.into(), Timestamp::MAX),
-
                    self.sessions.connected().map(|(_, s)| s),
-
                );
-
            }
-
            Command::Unseed(id, resp) => {
-
                let updated = self
-
                    .unseed(&id)
-
                    .expect("Service::command: error unseeding repository");
-
                resp.send(updated).ok();
-
            }
-
            Command::Follow(id, alias, resp) => {
-
                let seeded = self
-
                    .policies
-
                    .follow(&id, alias.as_ref())
-
                    .expect("Service::command: error following node");
-
                resp.send(seeded).ok();
-
            }
-
            Command::Unfollow(id, resp) => {
-
                let updated = self
-
                    .policies
-
                    .unfollow(&id)
-
                    .expect("Service::command: error unfollowing node");
-
                resp.send(updated).ok();
-
            }
-
            Command::AnnounceRefs(id, resp) => {
-
                let doc = match self.storage.get(id) {
-
                    Ok(Some(doc)) => doc,
-
                    Ok(None) => {
-
                        error!(target: "service", "Error announcing refs: repository {id} not found");
-
                        return;
-
                    }
-
                    Err(e) => {
-
                        error!(target: "service", "Error announcing refs: doc error: {e}");
-
                        return;
-
                    }
-
                };
-

-
                match self.announce_own_refs(id, doc) {
-
                    Ok(refs) => match refs.as_slice() {
-
                        &[refs] => {
-
                            resp.send(refs).ok();
-
                        }
-
                        // SAFETY: Since we passed in one NID, we should get exactly one item back.
-
                        [..] => panic!("Service::command: unexpected refs returned"),
-
                    },
-
                    Err(err) => {
-
                        error!(target: "service", "Error announcing refs: {err}");
-
                    }
-
                }
-
            }
-
            Command::AnnounceInventory => {
-
                self.announce_inventory();
-
            }
-
            Command::AddInventory(rid, resp) => match self.add_inventory(rid) {
-
                Ok(updated) => {
-
                    resp.send(updated).ok();
-
                }
-
                Err(e) => {
-
                    error!(target: "service", "Error adding {rid} to inventory: {e}");
-
                }
-
            },
-
            Command::QueryState(query, sender) => {
-
                sender.send(query(self)).ok();
-
            }
-
        }
-
    }
-

-
    /// Initiate an outgoing fetch for some repository, based on another node's announcement.
-
    /// Returns `true` if the fetch was initiated and `false` if it was skipped.
-
    fn fetch_refs_at(
-
        &mut self,
-
        rid: RepoId,
-
        from: NodeId,
-
        refs: NonEmpty<RefsAt>,
-
        scope: Scope,
-
        timeout: time::Duration,
-
        channel: Option<chan::Sender<FetchResult>>,
-
    ) -> bool {
-
        match self.refs_status_of(rid, refs, &scope) {
-
            Ok(status) => {
-
                if status.want.is_empty() {
-
                    debug!(target: "service", "Skipping fetch for {rid}, all refs are already in storage");
-
                } else {
-
                    return self._fetch(rid, from, status.want, timeout, channel);
-
                }
-
            }
-
            Err(e) => {
-
                error!(target: "service", "Error getting the refs status of {rid}: {e}");
-
            }
-
        }
-
        // We didn't try to fetch anything.
-
        false
-
    }
-

-
    /// Initiate an outgoing fetch for some repository.
-
    fn fetch(
-
        &mut self,
-
        rid: RepoId,
-
        from: NodeId,
-
        timeout: time::Duration,
-
        channel: Option<chan::Sender<FetchResult>>,
-
    ) -> bool {
-
        self._fetch(rid, from, vec![], timeout, channel)
-
    }
-

-
    fn _fetch(
-
        &mut self,
-
        rid: RepoId,
-
        from: NodeId,
-
        refs_at: Vec<RefsAt>,
-
        timeout: time::Duration,
-
        channel: Option<chan::Sender<FetchResult>>,
-
    ) -> bool {
-
        match self.try_fetch(rid, &from, refs_at.clone(), timeout) {
-
            Ok(fetching) => {
-
                if let Some(c) = channel {
-
                    fetching.subscribe(c);
-
                }
-
                return true;
-
            }
-
            Err(TryFetchError::AlreadyFetching(fetching)) => {
-
                // If we're already fetching the same refs from the requested peer, there's nothing
-
                // to do, we simply add the supplied channel to the list of subscribers so that it
-
                // is notified on completion. Otherwise, we queue a fetch with the requested peer.
-
                if fetching.from == from && fetching.refs_at == refs_at {
-
                    debug!(target: "service", "Ignoring redundant fetch of {rid} from {from}");
-

-
                    if let Some(c) = channel {
-
                        fetching.subscribe(c);
-
                    }
-
                } else {
-
                    let fetch = QueuedFetch {
-
                        rid,
-
                        refs_at,
-
                        from,
-
                        timeout,
-
                        channel,
-
                    };
-
                    debug!(target: "service", "Queueing fetch for {rid} with {from} (already fetching)..");
-

-
                    self.queue_fetch(fetch);
-
                }
-
            }
-
            Err(TryFetchError::SessionCapacityReached) => {
-
                debug!(target: "service", "Fetch capacity reached for {from}, queueing {rid}..");
-
                self.queue_fetch(QueuedFetch {
-
                    rid,
-
                    refs_at,
-
                    from,
-
                    timeout,
-
                    channel,
-
                });
-
            }
-
            Err(e) => {
-
                if let Some(c) = channel {
-
                    c.send(FetchResult::Failed {
-
                        reason: e.to_string(),
-
                    })
-
                    .ok();
-
                }
-
            }
-
        }
-
        false
-
    }
-

-
    fn queue_fetch(&mut self, fetch: QueuedFetch) {
-
        let Some(s) = self.sessions.get_mut(&fetch.from) else {
-
            log::error!(target: "service", "Cannot queue fetch for unknown session {}", fetch.from);
-
            return;
-
        };
-
        if let Err(e) = s.queue_fetch(fetch) {
-
            let fetch = e.inner();
-
            log::debug!(target: "service", "Unable to queue fetch for {} with {}: {e}", &fetch.rid, &fetch.from);
-
        }
-
    }
-

-
    // TODO: Buffer/throttle fetches.
-
    fn try_fetch(
-
        &mut self,
-
        rid: RepoId,
-
        from: &NodeId,
-
        refs_at: Vec<RefsAt>,
-
        timeout: time::Duration,
-
    ) -> Result<&mut FetchState, TryFetchError> {
-
        let from = *from;
-
        let Some(session) = self.sessions.get_mut(&from) else {
-
            return Err(TryFetchError::SessionNotConnected);
-
        };
-
        let fetching = self.fetching.entry(rid);
-

-
        trace!(target: "service", "Trying to fetch {refs_at:?} for {rid}..");
-

-
        let fetching = match fetching {
-
            Entry::Vacant(fetching) => fetching,
-
            Entry::Occupied(fetching) => {
-
                // We're already fetching this repo from some peer.
-
                return Err(TryFetchError::AlreadyFetching(fetching.into_mut()));
-
            }
-
        };
-
        // Sanity check: We shouldn't be fetching from this session, since we return above if we're
-
        // fetching from any session.
-
        debug_assert!(!session.is_fetching(&rid));
-

-
        if !session.is_connected() {
-
            // This can happen if a session disconnects in the time between asking for seeds to
-
            // fetch from, and initiating the fetch from one of those seeds.
-
            return Err(TryFetchError::SessionNotConnected);
-
        }
-
        if session.is_at_capacity() {
-
            // If we're already fetching multiple repos from this peer.
-
            return Err(TryFetchError::SessionCapacityReached);
-
        }
-

-
        let fetching = fetching.insert(FetchState {
-
            from,
-
            refs_at: refs_at.clone(),
-
            subscribers: vec![],
-
        });
-
        self.outbox.fetch(
-
            session,
-
            rid,
-
            refs_at,
-
            timeout,
-
            self.config.limits.fetch_pack_receive,
-
        );
-

-
        Ok(fetching)
-
    }
-

-
    pub fn fetched(
-
        &mut self,
-
        rid: RepoId,
-
        remote: NodeId,
-
        result: Result<fetch::FetchResult, FetchError>,
-
    ) {
-
        let Some(fetching) = self.fetching.remove(&rid) else {
-
            error!(target: "service", "Received unexpected fetch result for {rid}, from {remote}");
-
            return;
-
        };
-
        debug_assert_eq!(fetching.from, remote);
-

-
        if let Some(s) = self.sessions.get_mut(&remote) {
-
            // Mark this RID as fetched for this session.
-
            s.fetched(rid);
-
        }
-

-
        // Notify all fetch subscribers of the fetch result. This is used when the user requests
-
        // a fetch via the CLI, for example.
-
        for sub in &fetching.subscribers {
-
            debug!(target: "service", "Found existing fetch request from {remote}, sending result..");
-

-
            let result = match &result {
-
                Ok(success) => FetchResult::Success {
-
                    updated: success.updated.clone(),
-
                    namespaces: success.namespaces.clone(),
-
                    clone: success.clone,
-
                },
-
                Err(e) => FetchResult::Failed {
-
                    reason: e.to_string(),
-
                },
-
            };
-
            if sub.send(result).is_err() {
-
                error!(target: "service", "Error sending fetch result for {rid} from {remote}..");
-
            } else {
-
                debug!(target: "service", "Sent fetch result for {rid} from {remote}..");
-
            }
-
        }
-

-
        match result {
-
            Ok(fetch::FetchResult {
-
                updated,
-
                namespaces,
-
                clone,
-
                doc,
-
            }) => {
-
                info!(target: "service", "Fetched {rid} from {remote} successfully");
-
                // Update our routing table in case this fetch was user-initiated and doesn't
-
                // come from an announcement.
-
                self.seed_discovered(rid, remote, self.clock.into());
-

-
                for update in &updated {
-
                    if update.is_skipped() {
-
                        trace!(target: "service", "Ref skipped: {update} for {rid}");
-
                    } else {
-
                        debug!(target: "service", "Ref updated: {update} for {rid}");
-
                    }
-
                }
-
                self.emitter.emit(Event::RefsFetched {
-
                    remote,
-
                    rid,
-
                    updated: updated.clone(),
-
                });
-

-
                // Announce our new inventory if this fetch was a full clone.
-
                // Only update and announce inventory for public repositories.
-
                if clone && doc.is_public() {
-
                    debug!(target: "service", "Updating and announcing inventory for cloned repository {rid}..");
-

-
                    if let Err(e) = self.add_inventory(rid) {
-
                        error!(target: "service", "Error announcing inventory for {rid}: {e}");
-
                    }
-
                }
-

-
                // It's possible for a fetch to succeed but nothing was updated.
-
                if updated.is_empty() || updated.iter().all(|u| u.is_skipped()) {
-
                    debug!(target: "service", "Nothing to announce, no refs were updated..");
-
                } else {
-
                    // Finally, announce the refs. This is useful for nodes to know what we've synced,
-
                    // beyond just knowing that we have added an item to our inventory.
-
                    if let Err(e) = self.announce_refs(rid, doc.into(), namespaces) {
-
                        error!(target: "service", "Failed to announce new refs: {e}");
-
                    }
-
                }
-
            }
-
            Err(err) => {
-
                error!(target: "service", "Fetch failed for {rid} from {remote}: {err}");
-

-
                // For now, we only disconnect the remote in case of timeout. In the future,
-
                // there may be other reasons to disconnect.
-
                if err.is_timeout() {
-
                    self.outbox.disconnect(remote, DisconnectReason::Fetch(err));
-
                }
-
            }
-
        }
-
        // We can now try to dequeue more fetches.
-
        self.dequeue_fetches();
-
    }
-

-
    /// Attempt to dequeue fetches from all peers.
-
    /// At most one fetch is dequeued per peer. If the fetch cannot be processed,
-
    /// it is put back on the queue for that peer.
-
    ///
-
    /// Fetches are queued for two reasons:
-
    /// 1. The RID was already being fetched.
-
    /// 2. The session was already at fetch capacity.
-
    pub fn dequeue_fetches(&mut self) {
-
        let sessions = self
-
            .sessions
-
            .shuffled()
-
            .map(|(k, _)| *k)
-
            .collect::<Vec<_>>();
-

-
        // Try to dequeue once per session.
-
        for nid in sessions {
-
            // SAFETY: All the keys we are iterating on exist.
-
            #[allow(clippy::unwrap_used)]
-
            let sess = self.sessions.get_mut(&nid).unwrap();
-
            if !sess.is_connected() || sess.is_at_capacity() {
-
                continue;
-
            }
-

-
            if let Some(QueuedFetch {
-
                rid,
-
                from,
-
                refs_at,
-
                timeout,
-
                channel,
-
            }) = sess.dequeue_fetch()
-
            {
-
                debug!(target: "service", "Dequeued fetch for {rid} from session {from}..");
-

-
                if let Some(refs) = NonEmpty::from_vec(refs_at) {
-
                    let repo_entry = self.policies.seed_policy(&rid).expect(
-
                        "Service::dequeue_fetch: error accessing repo seeding configuration",
-
                    );
-
                    let SeedingPolicy::Allow { scope } = repo_entry.policy else {
-
                        debug!(target: "service", "Repository {rid} is no longer seeded, skipping..");
-
                        continue;
-
                    };
-
                    self.fetch_refs_at(rid, from, refs, scope, timeout, channel);
-
                } else {
-
                    // If no refs are specified, always do a full fetch.
-
                    self.fetch(rid, from, timeout, channel);
-
                }
-
            }
-
        }
-
    }
-

-
    /// Inbound connection attempt.
-
    pub fn accepted(&mut self, ip: IpAddr) -> bool {
-
        // Always accept localhost connections, even if we already reached
-
        // our inbound connection limit.
-
        if ip.is_loopback() || ip.is_unspecified() {
-
            return true;
-
        }
-
        // Check for inbound connection limit.
-
        if self.sessions.inbound().count() >= self.config.limits.connection.inbound {
-
            return false;
-
        }
-
        match self.db.addresses().is_ip_banned(ip) {
-
            Ok(banned) => {
-
                if banned {
-
                    debug!(target: "service", "Rejecting inbound connection from banned ip {ip}");
-
                    return false;
-
                }
-
            }
-
            Err(e) => error!(target: "service", "Error querying ban status for {ip}: {e}"),
-
        }
-
        let host: HostName = ip.into();
-

-
        if self.limiter.limit(
-
            host.clone(),
-
            None,
-
            &self.config.limits.rate.inbound,
-
            self.clock,
-
        ) {
-
            trace!(target: "service", "Rate limiting inbound connection from {host}..");
-
            return false;
-
        }
-
        true
-
    }
-

-
    pub fn attempted(&mut self, nid: NodeId, addr: Address) {
-
        debug!(target: "service", "Attempted connection to {nid} ({addr})");
-

-
        if let Some(sess) = self.sessions.get_mut(&nid) {
-
            sess.to_attempted();
-
        } else {
-
            #[cfg(debug_assertions)]
-
            panic!("Service::attempted: unknown session {nid}@{addr}");
-
        }
-
    }
-

-
    pub fn listening(&mut self, local_addr: net::SocketAddr) {
-
        info!(target: "node", "Listening on {local_addr}..");
-

-
        self.listening.push(local_addr);
-
    }
-

-
    pub fn connected(&mut self, remote: NodeId, addr: Address, link: Link) {
-
        info!(target: "service", "Connected to {remote} ({addr}) ({link:?})");
-
        self.emitter.emit(Event::PeerConnected { nid: remote });
-

-
        let msgs = self.initial(link);
-

-
        if link.is_outbound() {
-
            if let Some(peer) = self.sessions.get_mut(&remote) {
-
                peer.to_connected(self.clock);
-
                self.outbox.write_all(peer, msgs);
-
            }
-
        } else {
-
            match self.sessions.entry(remote) {
-
                Entry::Occupied(mut e) => {
-
                    // In this scenario, it's possible that our peer is persistent, and
-
                    // disconnected. We get an inbound connection before we attempt a re-connection,
-
                    // and therefore we treat it as a regular inbound connection.
-
                    //
-
                    // It's also possible that a disconnection hasn't gone through yet and our
-
                    // peer is still in connected state here, while a new inbound connection from
-
                    // that same peer is made. This results in a new connection from a peer that is
-
                    // already connected from the perspective of the service. This appears to be
-
                    // a bug in the underlying networking library.
-
                    let peer = e.get_mut();
-
                    debug!(
-
                        target: "service",
-
                        "Connecting peer {remote} already has a session open ({peer})"
-
                    );
-
                    peer.link = link;
-
                    peer.to_connected(self.clock);
-
                    self.outbox.write_all(peer, msgs);
-
                }
-
                Entry::Vacant(e) => {
-
                    if let HostName::Ip(ip) = addr.host {
-
                        if !address::is_local(&ip) {
-
                            if let Err(e) =
-
                                self.db
-
                                    .addresses_mut()
-
                                    .record_ip(&remote, ip, self.clock.into())
-
                            {
-
                                log::error!(target: "service", "Error recording IP address for {remote}: {e}");
-
                            }
-
                        }
-
                    }
-
                    let peer = e.insert(Session::inbound(
-
                        remote,
-
                        addr,
-
                        self.config.is_persistent(&remote),
-
                        self.rng.clone(),
-
                        self.clock,
-
                        self.config.limits.clone(),
-
                    ));
-
                    self.outbox.write_all(peer, msgs);
-
                }
-
            }
-
        }
-
    }
-

-
    pub fn disconnected(&mut self, remote: NodeId, link: Link, reason: &DisconnectReason) {
-
        let since = self.local_time();
-
        let Some(session) = self.sessions.get_mut(&remote) else {
-
            // Since we sometimes disconnect the service eagerly, it's not unusual to get a second
-
            // disconnection event once the transport is dropped.
-
            trace!(target: "service", "Redundant disconnection for {} ({})", remote, reason);
-
            return;
-
        };
-
        // In cases of connection conflicts, there may be disconnections of one of the two
-
        // connections. In that case we don't want the service to remove the session.
-
        if session.link != link {
-
            return;
-
        }
-

-
        info!(target: "service", "Disconnected from {} ({})", remote, reason);
-
        self.emitter.emit(Event::PeerDisconnected {
-
            nid: remote,
-
            reason: reason.to_string(),
-
        });
-

-
        let link = session.link;
-
        let addr = session.addr.clone();
-

-
        self.fetching.retain(|_, fetching| {
-
            if fetching.from != remote {
-
                return true;
-
            }
-
            // Remove and fail any pending fetches from this remote node.
-
            for resp in &fetching.subscribers {
-
                resp.send(FetchResult::Failed {
-
                    reason: format!("disconnected: {reason}"),
-
                })
-
                .ok();
-
            }
-
            false
-
        });
-

-
        // Attempt to re-connect to persistent peers.
-
        if self.config.peer(&remote).is_some() {
-
            let delay = LocalDuration::from_secs(2u64.saturating_pow(session.attempts() as u32))
-
                .clamp(MIN_RECONNECTION_DELTA, MAX_RECONNECTION_DELTA);
-

-
            // Nb. We always try to reconnect to persistent peers, even when the error appears
-
            // to not be transient.
-
            session.to_disconnected(since, since + delay);
-

-
            debug!(target: "service", "Reconnecting to {remote} in {delay}..");
-

-
            self.outbox.wakeup(delay);
-
        } else {
-
            debug!(target: "service", "Dropping peer {remote}..");
-
            self.sessions.remove(&remote);
-

-
            let severity = match reason {
-
                DisconnectReason::Dial(_)
-
                | DisconnectReason::Fetch(_)
-
                | DisconnectReason::Connection(_) => {
-
                    if self.is_online() {
-
                        // If we're "online", there's something wrong with this
-
                        // peer connection specifically.
-
                        Severity::Medium
-
                    } else {
-
                        Severity::Low
-
                    }
-
                }
-
                DisconnectReason::Session(e) => e.severity(),
-
                DisconnectReason::Command
-
                | DisconnectReason::Conflict
-
                | DisconnectReason::SelfConnection => Severity::Low,
-
            };
-

-
            if let Err(e) = self
-
                .db
-
                .addresses_mut()
-
                .disconnected(&remote, &addr, severity)
-
            {
-
                error!(target: "service", "Error updating address store: {e}");
-
            }
-
            // Only re-attempt outbound connections, since we don't care if an inbound connection
-
            // is dropped.
-
            if link.is_outbound() {
-
                self.maintain_connections();
-
            }
-
        }
-
        self.dequeue_fetches();
-
    }
-

-
    pub fn received_message(&mut self, remote: NodeId, message: Message) {
-
        if let Err(err) = self.handle_message(&remote, message) {
-
            // If there's an error, stop processing messages from this peer.
-
            // However, we still relay messages returned up to this point.
-
            self.outbox
-
                .disconnect(remote, DisconnectReason::Session(err));
-

-
            // FIXME: The peer should be set in a state such that we don't
-
            // process further messages.
-
        }
-
    }
-

-
    /// Handle an announcement message.
-
    ///
-
    /// Returns `true` if this announcement should be stored and relayed to connected peers,
-
    /// and `false` if it should not.
-
    pub fn handle_announcement(
-
        &mut self,
-
        relayer: &NodeId,
-
        relayer_addr: &Address,
-
        announcement: &Announcement,
-
    ) -> Result<Option<gossip::AnnouncementId>, session::Error> {
-
        if !announcement.verify() {
-
            return Err(session::Error::Misbehavior);
-
        }
-
        let Announcement {
-
            node: announcer,
-
            message,
-
            ..
-
        } = announcement;
-

-
        // Ignore our own announcements, in case the relayer sent one by mistake.
-
        if announcer == self.nid() {
-
            return Ok(None);
-
        }
-
        let now = self.clock;
-
        let timestamp = message.timestamp();
-

-
        // Don't allow messages from too far in the future.
-
        if timestamp.saturating_sub(now.as_millis()) > MAX_TIME_DELTA.as_millis() as u64 {
-
            return Err(session::Error::InvalidTimestamp(timestamp));
-
        }
-

-
        // We don't process announcements from nodes we don't know, since the node announcement is
-
        // what provides DoS protection.
-
        //
-
        // Note that it's possible to *not* receive the node announcement, but receive the
-
        // subsequent announcements of a node in the case of historical gossip messages requested
-
        // from the `subscribe` message. This can happen if the cut-off time is after the node
-
        // announcement timestamp, but before the other announcements. In that case, we simply
-
        // ignore all announcements of that node until we get a node announcement.
-
        if let AnnouncementMessage::Inventory(_) | AnnouncementMessage::Refs(_) = message {
-
            match self.db.addresses().get(announcer) {
-
                Ok(node) => {
-
                    if node.is_none() {
-
                        debug!(target: "service", "Ignoring announcement from unknown node {announcer} (t={timestamp})");
-
                        return Ok(None);
-
                    }
-
                }
-
                Err(e) => {
-
                    error!(target: "service", "Error looking up node in address book: {e}");
-
                    return Ok(None);
-
                }
-
            }
-
        }
-

-
        // Discard announcement messages we've already seen, otherwise update our last seen time.
-
        let relay = match self.db.gossip_mut().announced(announcer, announcement) {
-
            Ok(Some(id)) => {
-
                log::debug!(
-
                    target: "service",
-
                    "Stored announcement from {announcer} to be broadcast in {} (t={timestamp})",
-
                    (self.last_gossip + GOSSIP_INTERVAL) - self.clock
-
                );
-
                // Keep track of who relayed the message for later.
-
                self.relayed_by.entry(id).or_default().push(*relayer);
-

-
                // Decide whether or not to relay this message, if it's fresh.
-
                // To avoid spamming peers on startup with historical gossip messages,
-
                // don't relay messages that are too old. We make an exception for node announcements,
-
                // since they are cached, and will hence often carry old timestamps.
-
                let relay = message.is_node_announcement()
-
                    || now - timestamp.to_local_time() <= MAX_TIME_DELTA;
-
                relay.then_some(id)
-
            }
-
            Ok(None) => {
-
                // FIXME: Still mark as relayed by this peer.
-
                // FIXME: Refs announcements should not be delayed, since they are only sent
-
                // to subscribers.
-
                debug!(target: "service", "Ignoring stale announcement from {announcer} (t={timestamp})");
-
                return Ok(None);
-
            }
-
            Err(e) => {
-
                error!(target: "service", "Error updating gossip entry from {announcer}: {e}");
-
                return Ok(None);
-
            }
-
        };
-

-
        match message {
-
            // Process a peer inventory update announcement by (maybe) fetching.
-
            AnnouncementMessage::Inventory(message) => {
-
                self.emitter.emit(Event::InventoryAnnounced {
-
                    nid: *announcer,
-
                    inventory: message.inventory.to_vec(),
-
                    timestamp: message.timestamp,
-
                });
-
                match self.sync_routing(
-
                    message.inventory.iter().cloned(),
-
                    *announcer,
-
                    message.timestamp,
-
                ) {
-
                    Ok(synced) => {
-
                        if synced.is_empty() {
-
                            trace!(target: "service", "No routes updated by inventory announcement from {announcer}");
-
                            return Ok(None);
-
                        }
-
                    }
-
                    Err(e) => {
-
                        error!(target: "service", "Error processing inventory from {announcer}: {e}");
-
                        return Ok(None);
-
                    }
-
                }
-
                let mut missing = Vec::new();
-
                let nid = *self.nid();
-

-
                // Here we handle the special case where the inventory we received is that of
-
                // a connected peer, as opposed to being relayed to us.
-
                if let Some(sess) = self.sessions.get_mut(announcer) {
-
                    for id in message.inventory.as_slice() {
-
                        // If we are connected to the announcer of this inventory, update the peer's
-
                        // subscription filter to include all inventory items. This way, we'll
-
                        // relay messages relating to the peer's inventory.
-
                        if let Some(sub) = &mut sess.subscribe {
-
                            sub.filter.insert(id);
-
                        }
-

-
                        // If we're seeding and connected to the announcer, and we don't have
-
                        // the inventory, fetch it from the announcer.
-
                        if self.policies.is_seeding(id).expect(
-
                            "Service::handle_announcement: error accessing seeding configuration",
-
                        ) {
-
                            // Only if we do not have the repository locally do we fetch here.
-
                            // If we do have it, only fetch after receiving a ref announcement.
-
                            match self.db.routing().entry(id, &nid) {
-
                                Ok(entry) => {
-
                                    if entry.is_none() {
-
                                        missing.push(*id);
-
                                    }
-
                                }
-
                                Err(e) => error!(
-
                                    target: "service",
-
                                    "Error checking local inventory for {id}: {e}"
-
                                ),
-
                            }
-
                        }
-
                    }
-
                }
-
                // Since we have limited fetch capacity, it may be that we can't fetch an entire
-
                // inventory from a peer. Therefore we randomize the order of the RIDs to fetch
-
                // different RIDs from different peers in case multiple peers announce the same
-
                // RIDs.
-
                self.rng.shuffle(&mut missing);
-

-
                for rid in missing {
-
                    debug!(target: "service", "Missing seeded inventory {rid}; initiating fetch..");
-
                    self.fetch(rid, *announcer, FETCH_TIMEOUT, None);
-
                }
-
                return Ok(relay);
-
            }
-
            AnnouncementMessage::Refs(message) => {
-
                self.emitter.emit(Event::RefsAnnounced {
-
                    nid: *announcer,
-
                    rid: message.rid,
-
                    refs: message.refs.to_vec(),
-
                    timestamp: message.timestamp,
-
                });
-
                // Empty announcements can be safely ignored.
-
                let Some(refs) = NonEmpty::from_vec(message.refs.to_vec()) else {
-
                    debug!(target: "service", "Skipping fetch, no refs in announcement for {} (t={timestamp})", message.rid);
-
                    return Ok(None);
-
                };
-
                // We update inventories when receiving ref announcements, as these could come
-
                // from a new repository being initialized.
-
                self.seed_discovered(message.rid, *announcer, message.timestamp);
-

-
                // Update sync status of announcer for this repo.
-
                if let Some(refs) = refs.iter().find(|r| &r.remote == self.nid()) {
-
                    debug!(
-
                        target: "service",
-
                        "Refs announcement of {announcer} for {} contains our own remote at {} (t={})",
-
                        message.rid, refs.at, message.timestamp
-
                    );
-
                    match self.db.seeds_mut().synced(
-
                        &message.rid,
-
                        announcer,
-
                        refs.at,
-
                        message.timestamp,
-
                    ) {
-
                        Ok(updated) => {
-
                            if updated {
-
                                debug!(
-
                                    target: "service",
-
                                    "Updating sync status of {announcer} for {} to {}",
-
                                    message.rid, refs.at
-
                                );
-
                                self.emitter.emit(Event::RefsSynced {
-
                                    rid: message.rid,
-
                                    remote: *announcer,
-
                                    at: refs.at,
-
                                });
-
                            } else {
-
                                debug!(
-
                                    target: "service",
-
                                    "Sync status of {announcer} was not updated for {}",
-
                                    message.rid,
-
                                );
-
                            }
-
                        }
-
                        Err(e) => {
-
                            error!(target: "service", "Error updating sync status for {}: {e}", message.rid);
-
                        }
-
                    }
-
                }
-
                let repo_entry = self.policies.seed_policy(&message.rid).expect(
-
                    "Service::handle_announcement: error accessing repo seeding configuration",
-
                );
-
                let SeedingPolicy::Allow { scope } = repo_entry.policy else {
-
                    debug!(
-
                        target: "service",
-
                        "Ignoring refs announcement from {announcer}: repository {} isn't seeded (t={timestamp})",
-
                        message.rid
-
                    );
-
                    return Ok(None);
-
                };
-
                // Refs can be relayed by peers who don't have the data in storage,
-
                // therefore we only check whether we are connected to the *announcer*,
-
                // which is required by the protocol to only announce refs it has.
-
                let Some(remote) = self.sessions.get(announcer).cloned() else {
-
                    trace!(
-
                        target: "service",
-
                        "Skipping fetch of {}, no sessions connected to {announcer}",
-
                        message.rid
-
                    );
-
                    return Ok(relay);
-
                };
-
                // Finally, start the fetch.
-
                self.fetch_refs_at(message.rid, remote.id, refs, scope, FETCH_TIMEOUT, None);
-

-
                return Ok(relay);
-
            }
-
            AnnouncementMessage::Node(
-
                ann @ NodeAnnouncement {
-
                    features,
-
                    addresses,
-
                    ..
-
                },
-
            ) => {
-
                self.emitter.emit(Event::NodeAnnounced {
-
                    nid: *announcer,
-
                    alias: ann.alias.clone(),
-
                    timestamp: ann.timestamp,
-
                    features: *features,
-
                    addresses: addresses.to_vec(),
-
                });
-
                // If this node isn't a seed, we're not interested in adding it
-
                // to our address book, but other nodes may be, so we relay the message anyway.
-
                if !features.has(Features::SEED) {
-
                    return Ok(relay);
-
                }
-

-
                match self.db.addresses_mut().insert(
-
                    announcer,
-
                    ann.version,
-
                    ann.features,
-
                    &ann.alias,
-
                    ann.work(),
-
                    &ann.agent,
-
                    timestamp,
-
                    addresses
-
                        .iter()
-
                        // Ignore non-routable addresses unless received from a local network
-
                        // peer. This allows the node to function in a local network.
-
                        .filter(|a| a.is_routable() || relayer_addr.is_local())
-
                        .map(|a| KnownAddress::new(a.clone(), address::Source::Peer)),
-
                ) {
-
                    Ok(updated) => {
-
                        // Only relay if we received new information.
-
                        if updated {
-
                            debug!(
-
                                target: "service",
-
                                "Address store entry for node {announcer} updated at {timestamp}"
-
                            );
-
                            return Ok(relay);
-
                        }
-
                    }
-
                    Err(err) => {
-
                        // An error here is due to a fault in our address store.
-
                        error!(target: "service", "Error processing node announcement from {announcer}: {err}");
-
                    }
-
                }
-
            }
-
        }
-
        Ok(None)
-
    }
-

-
    pub fn handle_info(&mut self, remote: NodeId, info: &Info) -> Result<(), session::Error> {
-
        match info {
-
            // Nb. We don't currently send this message.
-
            Info::RefsAlreadySynced { rid, at } => {
-
                debug!(target: "service", "Refs already synced for {rid} by {remote}");
-
                self.emitter.emit(Event::RefsSynced {
-
                    rid: *rid,
-
                    remote,
-
                    at: *at,
-
                });
-
            }
-
        }
-

-
        Ok(())
-
    }
-

-
    pub fn handle_message(
-
        &mut self,
-
        remote: &NodeId,
-
        message: Message,
-
    ) -> Result<(), session::Error> {
-
        let local = self.node_id();
-
        let relay = self.config.is_relay();
-
        let Some(peer) = self.sessions.get_mut(remote) else {
-
            warn!(target: "service", "Session not found for {remote}");
-
            return Ok(());
-
        };
-
        peer.last_active = self.clock;
-

-
        let limit = match peer.link {
-
            Link::Outbound => &self.config.limits.rate.outbound,
-
            Link::Inbound => &self.config.limits.rate.inbound,
-
        };
-
        if self
-
            .limiter
-
            .limit(peer.addr.clone().into(), Some(remote), limit, self.clock)
-
        {
-
            debug!(target: "service", "Rate limiting message from {remote} ({})", peer.addr);
-
            return Ok(());
-
        }
-
        message.log(log::Level::Debug, remote, Link::Inbound);
-

-
        let connected = match &mut peer.state {
-
            session::State::Disconnected { .. } => {
-
                debug!(target: "service", "Ignoring message from disconnected peer {}", peer.id);
-
                return Ok(());
-
            }
-
            // In case of a discrepancy between the service state and the state of the underlying
-
            // wire protocol, we may receive a message from a peer that we consider not fully connected
-
            // at the service level. To remedy this, we simply transition the peer to a connected state.
-
            //
-
            // This is not ideal, but until the wire protocol and service are unified, it's the simplest
-
            // solution to converge towards the same state.
-
            session::State::Attempted { .. } | session::State::Initial => {
-
                debug!(target: "service", "Received unexpected message from connecting peer {}", peer.id);
-
                debug!(target: "service", "Transitioning peer {} to 'connected' state", peer.id);
-

-
                peer.to_connected(self.clock);
-

-
                None
-
            }
-
            session::State::Connected {
-
                ping, latencies, ..
-
            } => Some((ping, latencies)),
-
        };
-

-
        trace!(target: "service", "Received message {message:?} from {remote}");
-

-
        match message {
-
            // Process a peer announcement.
-
            Message::Announcement(ann) => {
-
                let relayer = remote;
-
                let relayer_addr = peer.addr.clone();
-

-
                if let Some(id) = self.handle_announcement(relayer, &relayer_addr, &ann)? {
-
                    if self.config.is_relay() {
-
                        if let AnnouncementMessage::Inventory(_) = ann.message {
-
                            if let Err(e) = self
-
                                .database_mut()
-
                                .gossip_mut()
-
                                .set_relay(id, gossip::RelayStatus::Relay)
-
                            {
-
                                error!(target: "service", "Error setting relay flag for message: {e}");
-
                                return Ok(());
-
                            }
-
                        } else {
-
                            self.relay(id, ann);
-
                        }
-
                    }
-
                }
-
            }
-
            Message::Subscribe(subscribe) => {
-
                // Filter announcements by interest.
-
                match self
-
                    .db
-
                    .gossip()
-
                    .filtered(&subscribe.filter, subscribe.since, subscribe.until)
-
                {
-
                    Ok(anns) => {
-
                        for ann in anns {
-
                            let ann = match ann {
-
                                Ok(a) => a,
-
                                Err(e) => {
-
                                    error!(target: "service", "Error reading gossip message from store: {e}");
-
                                    continue;
-
                                }
-
                            };
-
                            // Don't send announcements authored by the remote, back to the remote.
-
                            if ann.node == *remote {
-
                                continue;
-
                            }
-
                            // Only send messages if we're a relay, or it's our own messages.
-
                            if relay || ann.node == local {
-
                                self.outbox.write(peer, ann.into());
-
                            }
-
                        }
-
                    }
-
                    Err(e) => {
-
                        error!(target: "service", "Error querying gossip messages from store: {e}");
-
                    }
-
                }
-
                peer.subscribe = Some(subscribe);
-
            }
-
            Message::Info(info) => {
-
                self.handle_info(*remote, &info)?;
-
            }
-
            Message::Ping(Ping { ponglen, .. }) => {
-
                // Ignore pings which ask for too much data.
-
                if ponglen > Ping::MAX_PONG_ZEROES {
-
                    return Ok(());
-
                }
-
                self.outbox.write(
-
                    peer,
-
                    Message::Pong {
-
                        zeroes: ZeroBytes::new(ponglen),
-
                    },
-
                );
-
            }
-
            Message::Pong { zeroes } => {
-
                if let Some((ping, latencies)) = connected {
-
                    if let session::PingState::AwaitingResponse {
-
                        len: ponglen,
-
                        since,
-
                    } = *ping
-
                    {
-
                        if (ponglen as usize) == zeroes.len() {
-
                            *ping = session::PingState::Ok;
-
                            // Keep track of peer latency.
-
                            latencies.push_back(self.clock - since);
-
                            if latencies.len() > MAX_LATENCIES {
-
                                latencies.pop_front();
-
                            }
-
                        }
-
                    }
-
                }
-
            }
-
        }
-
        Ok(())
-
    }
-

-
    /// A convenient method to check if we should fetch from a `RefsAnnouncement` with `scope`.
-
    fn refs_status_of(
-
        &self,
-
        rid: RepoId,
-
        refs: NonEmpty<RefsAt>,
-
        scope: &policy::Scope,
-
    ) -> Result<RefsStatus, Error> {
-
        let mut refs = RefsStatus::new(rid, refs, self.db.refs())?;
-
        // Check that there's something we want.
-
        if refs.want.is_empty() {
-
            return Ok(refs);
-
        }
-
        // Check scope.
-
        let mut refs = match scope {
-
            policy::Scope::All => refs,
-
            policy::Scope::Followed => match self.policies.namespaces_for(&self.storage, &rid) {
-
                Ok(Namespaces::All) => refs,
-
                Ok(Namespaces::Followed(followed)) => {
-
                    refs.want.retain(|r| followed.contains(&r.remote));
-
                    refs
-
                }
-
                Err(e) => return Err(e.into()),
-
            },
-
        };
-
        // Remove our own remote, we don't want to fetch that.
-
        refs.want.retain(|r| r.remote != self.node_id());
-

-
        Ok(refs)
-
    }
-

-
    /// Add a seed to our routing table.
-
    fn seed_discovered(&mut self, rid: RepoId, nid: NodeId, time: Timestamp) {
-
        if let Ok(result) = self.db.routing_mut().add_inventory([&rid], nid, time) {
-
            if let &[(_, InsertResult::SeedAdded)] = result.as_slice() {
-
                self.emitter.emit(Event::SeedDiscovered { rid, nid });
-
                info!(target: "service", "Routing table updated for {} with seed {nid}", rid);
-
            }
-
        }
-
    }
-

-
    /// Set of initial messages to send to a peer.
-
    fn initial(&mut self, _link: Link) -> Vec<Message> {
-
        let now = self.clock();
-
        let filter = self.filter();
-

-
        // TODO: Only subscribe to outbound connections, otherwise we will consume too
-
        // much bandwidth.
-

-
        // If we've been previously connected to the network, we'll have received gossip messages.
-
        // Instead of simply taking the last timestamp we try to ensure we don't miss any
-
        // messages due un-synchronized clocks.
-
        //
-
        // If this is our first connection to the network, we just ask for a fixed backlog
-
        // of messages to get us started.
-
        let since = if let Some(last) = self.last_online_at {
-
            Timestamp::from(last - SUBSCRIBE_BACKLOG_DELTA)
-
        } else {
-
            (*now - INITIAL_SUBSCRIBE_BACKLOG_DELTA).into()
-
        };
-
        debug!(target: "service", "Subscribing to messages since timestamp {since}..");
-

-
        vec![
-
            Message::node(self.node.clone(), &self.signer),
-
            Message::inventory(self.inventory.clone(), &self.signer),
-
            Message::subscribe(filter, since, Timestamp::MAX),
-
        ]
-
    }
-

-
    /// Try to guess whether we're online or not.
-
    fn is_online(&self) -> bool {
-
        self.sessions
-
            .connected()
-
            .filter(|(_, s)| s.addr.is_routable() && s.last_active >= self.clock - IDLE_INTERVAL)
-
            .count()
-
            > 0
-
    }
-

-
    /// Remove a local repository from our inventory.
-
    fn remove_inventory(&mut self, rid: &RepoId) -> Result<bool, Error> {
-
        let node = self.node_id();
-
        let now = self.timestamp();
-

-
        let removed = self.db.routing_mut().remove_inventory(rid, &node)?;
-
        if removed {
-
            self.refresh_and_announce_inventory(now)?;
-
        }
-
        Ok(removed)
-
    }
-

-
    /// Add a local repository to our inventory.
-
    fn add_inventory(&mut self, rid: RepoId) -> Result<bool, Error> {
-
        let node = self.node_id();
-
        let now = self.timestamp();
-

-
        if !self.storage.contains(&rid)? {
-
            error!(target: "service", "Attempt to add non-existing inventory {rid}: repository not found in storage");
-
            return Ok(false);
-
        }
-
        // Add to our local inventory.
-
        let updates = self.db.routing_mut().add_inventory([&rid], node, now)?;
-
        let updated = !updates.is_empty();
-

-
        if updated {
-
            self.refresh_and_announce_inventory(now)?;
-
        }
-
        Ok(updated)
-
    }
-

-
    /// Update cached inventory message, and announce new inventory to peers.
-
    fn refresh_and_announce_inventory(&mut self, time: Timestamp) -> Result<(), Error> {
-
        let inventory = self.inventory()?;
-

-
        self.inventory = gossip::inventory(time, inventory);
-
        self.announce_inventory();
-

-
        Ok(())
-
    }
-

-
    /// Get our local inventory.
-
    ///
-
    /// A node's inventory is the advertized list of repositories offered by a node.
-
    ///
-
    /// A node's inventory consists of *public* repositories that are seeded and available locally
-
    /// in the node's storage. We use the routing table as the canonical state of all inventories,
-
    /// including the local node's.
-
    ///
-
    /// When a repository is unseeded, it is also removed from the inventory. Private repositories
-
    /// are *not* part of a node's inventory.
-
    fn inventory(&self) -> Result<HashSet<RepoId>, Error> {
-
        self.db
-
            .routing()
-
            .get_inventory(self.nid())
-
            .map_err(Error::from)
-
    }
-

-
    /// Process a peer inventory announcement by updating our routing table.
-
    /// This function expects the peer's full inventory, and prunes entries that are not in the
-
    /// given inventory.
-
    fn sync_routing(
-
        &mut self,
-
        inventory: impl IntoIterator<Item = RepoId>,
-
        from: NodeId,
-
        timestamp: Timestamp,
-
    ) -> Result<SyncedRouting, Error> {
-
        let mut synced = SyncedRouting::default();
-
        let included = inventory.into_iter().collect::<BTreeSet<_>>();
-

-
        for (rid, result) in
-
            self.db
-
                .routing_mut()
-
                .add_inventory(included.iter(), from, timestamp)?
-
        {
-
            match result {
-
                InsertResult::SeedAdded => {
-
                    info!(target: "service", "Routing table updated for {rid} with seed {from}");
-
                    self.emitter.emit(Event::SeedDiscovered { rid, nid: from });
-

-
                    if self
-
                        .policies
-
                        .is_seeding(&rid)
-
                        .expect("Service::process_inventory: error accessing seeding configuration")
-
                    {
-
                        // TODO: We should fetch here if we're already connected, case this seed has
-
                        // refs we don't have.
-
                    }
-
                    synced.added.push(rid);
-
                }
-
                InsertResult::TimeUpdated => {
-
                    synced.updated.push(rid);
-
                }
-
                InsertResult::NotUpdated => {}
-
            }
-
        }
-
        for rid in self.db.routing().get_inventory(&from)?.into_iter() {
-
            if !included.contains(&rid) {
-
                if self.db.routing_mut().remove_inventory(&rid, &from)? {
-
                    synced.removed.push(rid);
-
                    self.emitter.emit(Event::SeedDropped { rid, nid: from });
-
                }
-
            }
-
        }
-
        Ok(synced)
-
    }
-

-
    /// Return a refs announcement including the given remotes.
-
    fn refs_announcement_for(
-
        &mut self,
-
        rid: RepoId,
-
        remotes: impl IntoIterator<Item = NodeId>,
-
    ) -> Result<(Announcement, Vec<RefsAt>), Error> {
-
        let repo = self.storage.repository(rid)?;
-
        let timestamp = self.timestamp();
-
        let mut refs = BoundedVec::<_, REF_REMOTE_LIMIT>::new();
-

-
        for remote_id in remotes.into_iter() {
-
            let refs_at = RefsAt::new(&repo, remote_id)?;
-

-
            if refs.push(refs_at).is_err() {
-
                warn!(
-
                    target: "service",
-
                    "refs announcement limit ({}) exceeded, peers will see only some of your repository references",
-
                    REF_REMOTE_LIMIT,
-
                );
-
                break;
-
            }
-
        }
-

-
        let msg = AnnouncementMessage::from(RefsAnnouncement {
-
            rid,
-
            refs: refs.clone(),
-
            timestamp,
-
        });
-
        Ok((msg.signed(&self.signer), refs.into()))
-
    }
-

-
    /// Announce our own refs for the given repo.
-
    fn announce_own_refs(&mut self, rid: RepoId, doc: Doc) -> Result<Vec<RefsAt>, Error> {
-
        let (refs, timestamp) = self.announce_refs(rid, doc, [self.node_id()])?;
-

-
        // Update refs database with our signed refs branches.
-
        // This isn't strictly necessary for now, as we only use the database for fetches, and
-
        // we don't fetch our own refs that are announced, but it's for good measure.
-
        if let &[r] = refs.as_slice() {
-
            self.emitter.emit(Event::LocalRefsAnnounced {
-
                rid,
-
                refs: r,
-
                timestamp,
-
            });
-
            if let Err(e) = self.database_mut().refs_mut().set(
-
                &rid,
-
                &r.remote,
-
                &SIGREFS_BRANCH,
-
                r.at,
-
                timestamp.to_local_time(),
-
            ) {
-
                error!(
-
                    target: "service",
-
                    "Error updating refs database for `rad/sigrefs` of {} in {rid}: {e}",
-
                    r.remote
-
                );
-
            }
-
        }
-
        Ok(refs)
-
    }
-

-
    /// Announce local refs for given repo.
-
    fn announce_refs(
-
        &mut self,
-
        rid: RepoId,
-
        doc: Doc,
-
        remotes: impl IntoIterator<Item = NodeId>,
-
    ) -> Result<(Vec<RefsAt>, Timestamp), Error> {
-
        let (ann, refs) = self.refs_announcement_for(rid, remotes)?;
-
        let timestamp = ann.timestamp();
-
        let peers = self.sessions.connected().map(|(_, p)| p);
-

-
        // Update our sync status for our own refs. This is useful for determining if refs were
-
        // updated while the node was stopped.
-
        if let Some(refs) = refs.iter().find(|r| r.remote == ann.node) {
-
            info!(
-
                target: "service",
-
                "Announcing own refs for {rid} to peers ({}) (t={timestamp})..",
-
                refs.at
-
            );
-
            // Update our local node's sync status to mark the refs as announced.
-
            if let Err(e) = self
-
                .db
-
                .seeds_mut()
-
                .synced(&rid, &ann.node, refs.at, timestamp)
-
            {
-
                error!(target: "service", "Error updating sync status for local node: {e}");
-
            } else {
-
                debug!(target: "service", "Saved local sync status for {rid}..");
-
            }
-
        }
-

-
        self.outbox.announce(
-
            ann,
-
            peers.filter(|p| {
-
                // Only announce to peers who are allowed to view this repo.
-
                doc.is_visible_to(&p.id.into())
-
            }),
-
            self.db.gossip_mut(),
-
        );
-
        Ok((refs, timestamp))
-
    }
-

-
    fn reconnect(&mut self, nid: NodeId, addr: Address) -> bool {
-
        if let Some(sess) = self.sessions.get_mut(&nid) {
-
            sess.to_initial();
-
            self.outbox.connect(nid, addr);
-

-
            return true;
-
        }
-
        false
-
    }
-

-
    fn connect(&mut self, nid: NodeId, addr: Address) -> Result<(), ConnectError> {
-
        debug!(target: "service", "Connecting to {nid} ({addr})..");
-

-
        if nid == self.node_id() {
-
            return Err(ConnectError::SelfConnection);
-
        }
-
        if self.sessions.contains_key(&nid) {
-
            return Err(ConnectError::SessionExists { nid });
-
        }
-
        if self.sessions.outbound().count() >= self.config.limits.connection.outbound {
-
            return Err(ConnectError::LimitReached { nid, addr });
-
        }
-
        let persistent = self.config.is_persistent(&nid);
-
        let timestamp: Timestamp = self.clock.into();
-

-
        if let Err(e) = self.db.addresses_mut().attempted(&nid, &addr, timestamp) {
-
            error!(target: "service", "Error updating address book with connection attempt: {e}");
-
        }
-
        self.sessions.insert(
-
            nid,
-
            Session::outbound(
-
                nid,
-
                addr.clone(),
-
                persistent,
-
                self.rng.clone(),
-
                self.config.limits.clone(),
-
            ),
-
        );
-
        self.outbox.connect(nid, addr);
-

-
        Ok(())
-
    }
-

-
    fn seeds(&self, rid: &RepoId) -> Result<Seeds, Error> {
-
        let mut seeds = Seeds::new(self.rng.clone());
-

-
        // First build a list from peers that have synced our own refs, if any.
-
        // This step is skipped if we don't have the repository yet, or don't have
-
        // our own refs.
-
        if let Ok(repo) = self.storage.repository(*rid) {
-
            if let Ok(local) = RefsAt::new(&repo, self.node_id()) {
-
                for seed in self.db.seeds().seeds_for(rid)? {
-
                    let seed = seed?;
-
                    let state = self.sessions.get(&seed.nid).map(|s| s.state.clone());
-
                    let synced = if local.at == seed.synced_at.oid {
-
                        SyncStatus::Synced { at: seed.synced_at }
-
                    } else {
-
                        let local = SyncedAt::new(local.at, &repo)?;
-

-
                        SyncStatus::OutOfSync {
-
                            local,
-
                            remote: seed.synced_at,
-
                        }
-
                    };
-
                    seeds.insert(Seed::new(seed.nid, seed.addresses, state, Some(synced)));
-
                }
-
            }
-
        }
-

-
        // Then, add peers we know about but have no information about the sync status.
-
        // These peers have announced that they seed the repository via an inventory
-
        // announcement, but we haven't received any ref announcements from them.
-
        for nid in self.db.routing().get(rid)? {
-
            if nid == self.node_id() {
-
                continue;
-
            }
-
            if seeds.contains(&nid) {
-
                // We already have a richer entry for this node.
-
                continue;
-
            }
-
            let addrs = self.db.addresses().addresses_of(&nid)?;
-
            let state = self.sessions.get(&nid).map(|s| s.state.clone());
-

-
            seeds.insert(Seed::new(nid, addrs, state, None));
-
        }
-
        Ok(seeds)
-
    }
-

-
    /// Return a new filter object, based on our seeding policy.
-
    fn filter(&self) -> Filter {
-
        if self.config.seeding_policy.is_allow() {
-
            // TODO: Remove bits for blocked repos.
-
            Filter::default()
-
        } else {
-
            self.filter.clone()
-
        }
-
    }
-

-
    /// Get a timestamp for using in announcements.
-
    /// Never returns the same timestamp twice.
-
    fn timestamp(&mut self) -> Timestamp {
-
        let now = Timestamp::from(self.clock);
-
        if *now > *self.last_timestamp {
-
            self.last_timestamp = now;
-
        } else {
-
            self.last_timestamp = self.last_timestamp + 1;
-
        }
-
        self.last_timestamp
-
    }
-

-
    fn relay(&mut self, id: gossip::AnnouncementId, ann: Announcement) {
-
        let announcer = ann.node;
-
        let relayed_by = self.relayed_by.get(&id);
-
        let rid = if let AnnouncementMessage::Refs(RefsAnnouncement { rid, .. }) = ann.message {
-
            Some(rid)
-
        } else {
-
            None
-
        };
-
        // Choose peers we should relay this message to.
-
        // 1. Don't relay to a peer who sent us this message.
-
        // 2. Don't relay to the peer who signed this announcement.
-
        let relay_to = self
-
            .sessions
-
            .connected()
-
            .filter(|(id, _)| {
-
                relayed_by
-
                    .map(|relayers| !relayers.contains(id))
-
                    .unwrap_or(true) // If there are no relayers we let it through.
-
            })
-
            .filter(|(id, _)| **id != announcer)
-
            .filter(|(id, _)| {
-
                if let Some(rid) = rid {
-
                    // Only relay this message if the peer is allowed to know about the
-
                    // repository. If we don't have the repository, return `false` because
-
                    // we can't determine if it's private or public.
-
                    self.storage
-
                        .get(rid)
-
                        .ok()
-
                        .flatten()
-
                        .map(|doc| doc.is_visible_to(&(*id).into()))
-
                        .unwrap_or(false)
-
                } else {
-
                    // Announcement doesn't concern a specific repository, let it through.
-
                    true
-
                }
-
            })
-
            .map(|(_, p)| p);
-

-
        self.outbox.relay(ann, relay_to);
-
    }
-

-
    ////////////////////////////////////////////////////////////////////////////
-
    // Periodic tasks
-
    ////////////////////////////////////////////////////////////////////////////
-

-
    fn relay_announcements(&mut self) -> Result<(), Error> {
-
        let now = self.clock.into();
-
        let rows = self.database_mut().gossip_mut().relays(now)?;
-
        let local = self.node_id();
-

-
        for (id, msg) in rows {
-
            let announcer = msg.node;
-
            if announcer == local {
-
                // Don't relay our own stored gossip messages.
-
                continue;
-
            }
-
            self.relay(id, msg);
-
        }
-
        Ok(())
-
    }
-

-
    /// Announce our inventory to all connected peers, unless it was already announced.
-
    fn announce_inventory(&mut self) {
-
        let timestamp = self.inventory.timestamp.to_local_time();
-

-
        if self.last_inventory == timestamp {
-
            debug!(target: "service", "Skipping redundant inventory announcement (t={})", self.inventory.timestamp);
-
            return;
-
        }
-
        let msg = AnnouncementMessage::from(self.inventory.clone());
-

-
        self.outbox.announce(
-
            msg.signed(&self.signer),
-
            self.sessions.connected().map(|(_, p)| p),
-
            self.db.gossip_mut(),
-
        );
-
        self.last_inventory = timestamp;
-
    }
-

-
    fn prune_routing_entries(&mut self, now: &LocalTime) -> Result<(), routing::Error> {
-
        let count = self.db.routing().len()?;
-
        if count <= self.config.limits.routing_max_size {
-
            return Ok(());
-
        }
-

-
        let delta = count - self.config.limits.routing_max_size;
-
        let nid = self.node_id();
-
        self.db.routing_mut().prune(
-
            (*now - self.config.limits.routing_max_age).into(),
-
            Some(delta),
-
            &nid,
-
        )?;
-
        Ok(())
-
    }
-

-
    fn disconnect_unresponsive_peers(&mut self, now: &LocalTime) {
-
        let stale = self
-
            .sessions
-
            .connected()
-
            .filter(|(_, session)| *now - session.last_active >= STALE_CONNECTION_TIMEOUT);
-

-
        for (_, session) in stale {
-
            debug!(target: "service", "Disconnecting unresponsive peer {}..", session.id);
-

-
            // TODO: Should we switch the session state to "disconnected" even before receiving
-
            // an official "disconnect"? Otherwise we keep pinging until we get the disconnection.
-

-
            self.outbox.disconnect(
-
                session.id,
-
                DisconnectReason::Session(session::Error::Timeout),
-
            );
-
        }
-
    }
-

-
    /// Ensure connection health by pinging connected peers.
-
    fn keep_alive(&mut self, now: &LocalTime) {
-
        let inactive_sessions = self
-
            .sessions
-
            .connected_mut()
-
            .filter(|(_, session)| *now - session.last_active >= KEEP_ALIVE_DELTA)
-
            .map(|(_, session)| session);
-
        for session in inactive_sessions {
-
            session.ping(self.clock, &mut self.outbox).ok();
-
        }
-
    }
-

-
    /// Get a list of peers available to connect to, sorted by lowest penalty.
-
    fn available_peers(&mut self) -> Vec<Peer> {
-
        match self.db.addresses().entries() {
-
            Ok(entries) => {
-
                // Nb. we don't want to connect to any peers that already have a session with us,
-
                // even if it's in a disconnected state. Those sessions are re-attempted automatically.
-
                let mut peers = entries
-
                    .filter(|entry| entry.version == PROTOCOL_VERSION)
-
                    .filter(|entry| !entry.address.banned)
-
                    .filter(|entry| !entry.penalty.is_connect_threshold_reached())
-
                    .filter(|entry| !self.sessions.contains_key(&entry.node))
-
                    .filter(|entry| !self.config.external_addresses.contains(&entry.address.addr))
-
                    .filter(|entry| &entry.node != self.nid())
-
                    .fold(HashMap::new(), |mut acc, entry| {
-
                        acc.entry(entry.node)
-
                            .and_modify(|e: &mut Peer| e.addresses.push(entry.address.clone()))
-
                            .or_insert_with(|| Peer {
-
                                nid: entry.node,
-
                                addresses: vec![entry.address],
-
                                penalty: entry.penalty,
-
                            });
-
                        acc
-
                    })
-
                    .into_values()
-
                    .collect::<Vec<_>>();
-
                peers.sort_by_key(|p| p.penalty);
-
                peers
-
            }
-
            Err(e) => {
-
                error!(target: "service", "Unable to lookup available peers in address book: {e}");
-
                Vec::new()
-
            }
-
        }
-
    }
-

-
    /// Fetch all repositories that are seeded but missing from storage.
-
    fn fetch_missing_repositories(&mut self) -> Result<(), Error> {
-
        // TODO(finto): could filter the policies based on the continue checks
-
        // below, but `storage.contains` is fallible
-
        let policies = self.policies.seed_policies()?.collect::<Vec<_>>();
-
        for policy in policies {
-
            let rid = policy.rid;
-

-
            if !policy.is_allow() {
-
                continue;
-
            }
-
            if self.storage.contains(&rid)? {
-
                continue;
-
            }
-
            match self.seeds(&rid) {
-
                Ok(seeds) => {
-
                    if let Some(connected) = NonEmpty::from_vec(seeds.connected().collect()) {
-
                        for seed in connected {
-
                            self.fetch(rid, seed.nid, FETCH_TIMEOUT, None);
-
                        }
-
                    } else {
-
                        // TODO: We should make sure that this fetch is retried later, either
-
                        // when we connect to a seed, or when we discover a new seed.
-
                        // Since new connections and routing table updates are both conditions for
-
                        // fetching, we should trigger fetches when those conditions appear.
-
                        // Another way to handle this would be to update our database, saying
-
                        // that we're trying to fetch a certain repo. We would then just
-
                        // iterate over those entries in the above circumstances. This is
-
                        // merely an optimization though, we can also iterate over all seeded
-
                        // repos and check which ones are not in our inventory.
-
                        debug!(target: "service", "No connected seeds found for {rid}..");
-
                    }
-
                }
-
                Err(e) => {
-
                    error!(target: "service", "Couldn't fetch missing repo {rid}: failed to lookup seeds: {e}");
-
                }
-
            }
-
        }
-
        Ok(())
-
    }
-

-
    /// Run idle task for all connections.
-
    fn idle_connections(&mut self) {
-
        for (_, sess) in self.sessions.iter_mut() {
-
            sess.idle(self.clock);
-

-
            if sess.is_stable() {
-
                // Mark as connected once connection is stable.
-
                if let Err(e) =
-
                    self.db
-
                        .addresses_mut()
-
                        .connected(&sess.id, &sess.addr, self.clock.into())
-
                {
-
                    error!(target: "service", "Error updating address book with connection: {e}");
-
                }
-
            }
-
        }
-
    }
-

-
    /// Try to maintain a target number of connections.
-
    fn maintain_connections(&mut self) {
-
        let PeerConfig::Dynamic = self.config.peers else {
-
            return;
-
        };
-
        trace!(target: "service", "Maintaining connections..");
-

-
        let target = TARGET_OUTBOUND_PEERS;
-
        let now = self.clock;
-
        let outbound = self
-
            .sessions
-
            .values()
-
            .filter(|s| s.link.is_outbound())
-
            .filter(|s| s.is_connected() || s.is_connecting())
-
            .count();
-
        let wanted = target.saturating_sub(outbound);
-

-
        // Don't connect to more peers than needed.
-
        if wanted == 0 {
-
            return;
-
        }
-

-
        // Peers available to connect to.
-
        let available = self
-
            .available_peers()
-
            .into_iter()
-
            .filter_map(|peer| {
-
                peer.addresses
-
                    .into_iter()
-
                    .find(|ka| match (ka.last_success, ka.last_attempt) {
-
                        // If we succeeded the last time we tried, this is a good address.
-
                        // If it's been long enough that we failed to connect, we also try again.
-
                        (Some(success), Some(attempt)) => {
-
                            success >= attempt || now - attempt >= CONNECTION_RETRY_DELTA
-
                        }
-
                        // If we haven't succeeded yet, and we waited long enough, we can try this address.
-
                        (None, Some(attempt)) => now - attempt >= CONNECTION_RETRY_DELTA,
-
                        // If we have no failed attempts for this address, it's worth a try.
-
                        (_, None) => true,
-
                    })
-
                    .map(|ka| (peer.nid, ka))
-
            })
-
            .filter(|(_, ka)| match AddressType::from(&ka.addr) {
-
                // Only consider onion addresses if configured.
-
                AddressType::Onion => self.config.onion.is_some(),
-
                AddressType::Dns | AddressType::Ipv4 | AddressType::Ipv6 => true,
-
            });
-

-
        // Peers we are going to attempt connections to.
-
        let connect = available.take(wanted).collect::<Vec<_>>();
-
        if connect.len() < wanted {
-
            log::debug!(
-
                target: "service",
-
                "Not enough available peers to connect to (available={}, wanted={wanted})",
-
                connect.len()
-
            );
-
        }
-
        for (id, ka) in connect {
-
            if let Err(e) = self.connect(id, ka.addr.clone()) {
-
                error!(target: "service", "Service::maintain_connections connection error: {e}");
-
            }
-
        }
-
    }
-

-
    /// Maintain persistent peer connections.
-
    fn maintain_persistent(&mut self) {
-
        trace!(target: "service", "Maintaining persistent peers..");
-

-
        let now = self.local_time();
-
        let mut reconnect = Vec::new();
-

-
        for (nid, session) in self.sessions.iter_mut() {
-
            if let Some(addr) = self.config.peer(nid) {
-
                if let session::State::Disconnected { retry_at, .. } = &mut session.state {
-
                    // TODO: Try to reconnect only if the peer was attempted. A disconnect without
-
                    // even a successful attempt means that we're unlikely to be able to reconnect.
-

-
                    if now >= *retry_at {
-
                        reconnect.push((*nid, addr.clone(), session.attempts()));
-
                    }
-
                }
-
            }
-
        }
-

-
        for (nid, addr, attempts) in reconnect {
-
            if self.reconnect(nid, addr) {
-
                debug!(target: "service", "Reconnecting to {nid} (attempts={attempts})...");
-
            }
-
        }
-
    }
-
}
-

-
/// Gives read access to the service state.
-
pub trait ServiceState {
-
    /// Get the Node ID.
-
    fn nid(&self) -> &NodeId;
-
    /// Get the existing sessions.
-
    fn sessions(&self) -> &Sessions;
-
    /// Get fetch state.
-
    fn fetching(&self) -> &HashMap<RepoId, FetchState>;
-
    /// Get outbox.
-
    fn outbox(&self) -> &Outbox;
-
    /// Get rate limiter.
-
    fn limiter(&self) -> &RateLimiter;
-
    /// Get event emitter.
-
    fn emitter(&self) -> &Emitter<Event>;
-
    /// Get a repository from storage.
-
    fn get(&self, rid: RepoId) -> Result<Option<Doc>, RepositoryError>;
-
    /// Get the clock.
-
    fn clock(&self) -> &LocalTime;
-
    /// Get the clock mutably.
-
    fn clock_mut(&mut self) -> &mut LocalTime;
-
    /// Get service configuration.
-
    fn config(&self) -> &Config;
-
    /// Get service metrics.
-
    fn metrics(&self) -> &Metrics;
-
}
-

-
impl<D, S, G> ServiceState for Service<D, S, G>
-
where
-
    D: routing::Store,
-
    G: crypto::signature::Signer<crypto::Signature>,
-
    S: ReadStorage,
-
{
-
    fn nid(&self) -> &NodeId {
-
        self.signer.public_key()
-
    }
-

-
    fn sessions(&self) -> &Sessions {
-
        &self.sessions
-
    }
-

-
    fn fetching(&self) -> &HashMap<RepoId, FetchState> {
-
        &self.fetching
-
    }
-

-
    fn outbox(&self) -> &Outbox {
-
        &self.outbox
-
    }
-

-
    fn limiter(&self) -> &RateLimiter {
-
        &self.limiter
-
    }
-

-
    fn emitter(&self) -> &Emitter<Event> {
-
        &self.emitter
-
    }
-

-
    fn get(&self, rid: RepoId) -> Result<Option<Doc>, RepositoryError> {
-
        self.storage.get(rid)
-
    }
-

-
    fn clock(&self) -> &LocalTime {
-
        &self.clock
-
    }
-

-
    fn clock_mut(&mut self) -> &mut LocalTime {
-
        &mut self.clock
-
    }
-

-
    fn config(&self) -> &Config {
-
        &self.config
-
    }
-

-
    fn metrics(&self) -> &Metrics {
-
        &self.metrics
-
    }
-
}
-

-
/// Disconnect reason.
-
#[derive(Debug)]
-
pub enum DisconnectReason {
-
    /// Error while dialing the remote. This error occures before a connection is
-
    /// even established. Errors of this kind are usually not transient.
-
    Dial(Arc<dyn std::error::Error + Sync + Send>),
-
    /// Error with an underlying established connection. Sometimes, reconnecting
-
    /// after such an error is possible.
-
    Connection(Arc<dyn std::error::Error + Sync + Send>),
-
    /// Error with a fetch.
-
    Fetch(FetchError),
-
    /// Session error.
-
    Session(session::Error),
-
    /// Session conflicts with existing session.
-
    Conflict,
-
    /// Connection to self.
-
    SelfConnection,
-
    /// User requested disconnect
-
    Command,
-
}
-

-
impl DisconnectReason {
-
    pub fn is_dial_err(&self) -> bool {
-
        matches!(self, Self::Dial(_))
-
    }
-

-
    pub fn is_connection_err(&self) -> bool {
-
        matches!(self, Self::Connection(_))
-
    }
-

-
    pub fn connection() -> Self {
-
        DisconnectReason::Connection(Arc::new(std::io::Error::from(
-
            std::io::ErrorKind::ConnectionReset,
-
        )))
-
    }
-
}
-

-
impl fmt::Display for DisconnectReason {
-
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-
        match self {
-
            Self::Dial(err) => write!(f, "{err}"),
-
            Self::Connection(err) => write!(f, "{err}"),
-
            Self::Command => write!(f, "command"),
-
            Self::SelfConnection => write!(f, "self-connection"),
-
            Self::Conflict => write!(f, "conflict"),
-
            Self::Session(err) => write!(f, "{err}"),
-
            Self::Fetch(err) => write!(f, "fetch: {err}"),
-
        }
-
    }
-
}
-

-
/// Result of a project lookup.
-
#[derive(Debug)]
-
pub struct Lookup {
-
    /// Whether the project was found locally or not.
-
    pub local: Option<Doc>,
-
    /// A list of remote peers on which the project is known to exist.
-
    pub remote: Vec<NodeId>,
-
}
-

-
#[derive(thiserror::Error, Debug)]
-
pub enum LookupError {
-
    #[error(transparent)]
-
    Routing(#[from] routing::Error),
-
    #[error(transparent)]
-
    Repository(#[from] RepositoryError),
-
}
-

-
#[derive(Debug, Clone)]
-
/// Holds currently (or recently) connected peers.
-
pub struct Sessions(AddressBook<NodeId, Session>);
-

-
impl Sessions {
-
    pub fn new(rng: Rng) -> Self {
-
        Self(AddressBook::new(rng))
-
    }
-

-
    /// Iterator over fully connected peers.
-
    pub fn connected(&self) -> impl Iterator<Item = (&NodeId, &Session)> + Clone {
-
        self.0
-
            .iter()
-
            .filter_map(move |(id, sess)| match &sess.state {
-
                session::State::Connected { .. } => Some((id, sess)),
-
                _ => None,
-
            })
-
    }
-

-
    /// Iterator over connected inbound peers.
-
    pub fn inbound(&self) -> impl Iterator<Item = (&NodeId, &Session)> + Clone {
-
        self.connected().filter(|(_, s)| s.link.is_inbound())
-
    }
-

-
    /// Iterator over outbound peers.
-
    pub fn outbound(&self) -> impl Iterator<Item = (&NodeId, &Session)> + Clone {
-
        self.connected().filter(|(_, s)| s.link.is_outbound())
-
    }
-

-
    /// Iterator over mutable fully connected peers.
-
    pub fn connected_mut(&mut self) -> impl Iterator<Item = (&NodeId, &mut Session)> {
-
        self.0.iter_mut().filter(move |(_, s)| s.is_connected())
-
    }
-

-
    /// Iterator over disconnected peers.
-
    pub fn disconnected_mut(&mut self) -> impl Iterator<Item = (&NodeId, &mut Session)> {
-
        self.0.iter_mut().filter(move |(_, s)| s.is_disconnected())
-
    }
-

-
    /// Return whether this node has a fully established session.
-
    pub fn is_connected(&self, id: &NodeId) -> bool {
-
        self.0.get(id).map(|s| s.is_connected()).unwrap_or(false)
-
    }
-

-
    /// Return whether this node can be connected to.
-
    pub fn is_disconnected(&self, id: &NodeId) -> bool {
-
        self.0.get(id).map(|s| s.is_disconnected()).unwrap_or(true)
-
    }
-
}
-

-
impl Deref for Sessions {
-
    type Target = AddressBook<NodeId, Session>;
-

-
    fn deref(&self) -> &Self::Target {
-
        &self.0
-
    }
-
}
-

-
impl DerefMut for Sessions {
-
    fn deref_mut(&mut self) -> &mut Self::Target {
-
        &mut self.0
-
    }
-
}
deleted crates/radicle-node/src/service/filter.rs
@@ -1,153 +0,0 @@
-
#![allow(clippy::identity_op)]
-
use std::ops::{Deref, DerefMut};
-

-
pub use bloomy::BloomFilter;
-

-
use crate::identity::RepoId;
-

-
/// Size in bytes of *large* bloom filter.
-
/// It can store about 13'675 items with a false positive rate of 1%.
-
pub const FILTER_SIZE_L: usize = 16 * 1024;
-
/// Size in bytes of *medium* bloom filter.
-
/// It can store about 3'419 items with a false positive rate of 1%.
-
pub const FILTER_SIZE_M: usize = 4 * 1024;
-
/// Size in bytes of *small* bloom filter.
-
/// It can store about 855 items with a false positive rate of 1%.
-
pub const FILTER_SIZE_S: usize = 1 * 1024;
-

-
/// Valid filter sizes.
-
pub const FILTER_SIZES: [usize; 3] = [FILTER_SIZE_S, FILTER_SIZE_M, FILTER_SIZE_L];
-

-
/// Target false positive rate of filter.
-
pub const FILTER_FP_RATE: f64 = 0.01;
-
/// Number of hashes used for bloom filter.
-
pub const FILTER_HASHES: usize = 7;
-

-
/// Inventory filter used for subscriptions and inventory comparison.
-
///
-
/// The [`Default`] instance has all bits set to `1`, ie. it will match
-
/// everything.
-
#[derive(Clone, PartialEq, Eq, Debug)]
-
pub struct Filter(BloomFilter<RepoId>);
-

-
impl Default for Filter {
-
    fn default() -> Self {
-
        Self(BloomFilter::from(vec![0xff; FILTER_SIZE_S]))
-
    }
-
}
-

-
impl Filter {
-
    /// Create a new filter with the given items.
-
    ///
-
    /// Uses the iterator's size hint to determine the size of the filter.
-
    pub fn new(ids: impl IntoIterator<Item = RepoId>) -> Self {
-
        let iterator = ids.into_iter();
-
        let (min, _) = iterator.size_hint();
-
        let size = bloomy::bloom::optimal_bits(min, FILTER_FP_RATE) / 8;
-
        let size = if size > FILTER_SIZE_M {
-
            FILTER_SIZE_L
-
        } else if size > FILTER_SIZE_S {
-
            FILTER_SIZE_M
-
        } else {
-
            FILTER_SIZE_S
-
        };
-
        let mut bloom = BloomFilter::with_size(size);
-

-
        for id in iterator {
-
            bloom.insert(&id);
-
        }
-
        Self(bloom)
-
    }
-

-
    /// Empty filter with nothing set.
-
    pub fn empty() -> Self {
-
        Self(BloomFilter::from(vec![0x0; FILTER_SIZE_S]))
-
    }
-

-
    /// Size in bytes.
-
    pub fn size(&self) -> usize {
-
        self.0.bits() / 8
-
    }
-
}
-

-
impl Deref for Filter {
-
    type Target = BloomFilter<RepoId>;
-

-
    fn deref(&self) -> &Self::Target {
-
        &self.0
-
    }
-
}
-

-
impl DerefMut for Filter {
-
    fn deref_mut(&mut self) -> &mut Self::Target {
-
        &mut self.0
-
    }
-
}
-

-
impl From<BloomFilter<RepoId>> for Filter {
-
    fn from(bloom: BloomFilter<RepoId>) -> Self {
-
        Self(bloom)
-
    }
-
}
-

-
#[cfg(test)]
-
mod test {
-
    use super::*;
-
    use crate::test::arbitrary;
-

-
    #[test]
-
    fn test_parameters() {
-
        // To store 10'000 items with a false positive rate of 1%, we need about 12KB.
-
        assert_eq!(bloomy::bloom::optimal_bits(10_000, 0.01) / 8, 11_981);
-
        // To store 1'000 items with a false positive rate of 1%, we need about 1KB.
-
        assert_eq!(bloomy::bloom::optimal_bits(1_000, 0.01) / 8, 1198);
-
        // To store 100 items with a false positive rate of 1%, we need about 120B.
-
        assert_eq!(bloomy::bloom::optimal_bits(100, 0.01) / 8, 119);
-

-
        // With 16KB, we can store 13'675 items with a false positive rate of 1%.
-
        assert_eq!(
-
            bloomy::bloom::optimal_capacity(FILTER_SIZE_L * 8, FILTER_FP_RATE),
-
            13_675
-
        );
-
        // With 4KB, we can store 3'419 items with a false positive rate of 1%.
-
        assert_eq!(
-
            bloomy::bloom::optimal_capacity(FILTER_SIZE_M * 8, FILTER_FP_RATE),
-
            3419
-
        );
-
        // With 1KB, we can store 855 items with a false positive rate of 1%.
-
        assert_eq!(
-
            bloomy::bloom::optimal_capacity(FILTER_SIZE_S * 8, FILTER_FP_RATE),
-
            855
-
        );
-

-
        assert_eq!(
-
            bloomy::bloom::optimal_hashes(FILTER_SIZE_L * 8, 13_675),
-
            FILTER_HASHES
-
        );
-
        assert_eq!(
-
            bloomy::bloom::optimal_hashes(FILTER_SIZE_M * 8, 3419),
-
            FILTER_HASHES
-
        );
-
        assert_eq!(
-
            bloomy::bloom::optimal_hashes(FILTER_SIZE_S * 8, 855),
-
            FILTER_HASHES
-
        );
-
    }
-

-
    #[test]
-
    fn test_sizes() {
-
        let ids = arbitrary::vec::<RepoId>(3420);
-
        let f = Filter::new(ids.iter().cloned().take(10));
-
        assert_eq!(f.size(), FILTER_SIZE_S);
-

-
        let f = Filter::new(ids.iter().cloned().take(1000));
-
        assert_eq!(f.size(), FILTER_SIZE_M);
-

-
        let f = Filter::new(ids.iter().cloned());
-
        assert_eq!(f.size(), FILTER_SIZE_L);
-

-
        // Just checking that iterators over hash sets give correct size hints.
-
        let hs = arbitrary::set::<RepoId>(42..=42);
-
        assert_eq!(hs.iter().size_hint(), (42, Some(42)));
-
    }
-
}
deleted crates/radicle-node/src/service/gossip.rs
@@ -1,57 +0,0 @@
-
pub mod store;
-

-
use std::str::FromStr;
-
use std::sync::LazyLock;
-

-
use super::*;
-
use crate::{PROTOCOL_VERSION, VERSION};
-
use radicle::node::UserAgent;
-

-
pub use store::{AnnouncementId, Error, RelayStatus, Store};
-

-
/// This node's user agent string.
-
pub static USER_AGENT: LazyLock<UserAgent> = LazyLock::new(|| {
-
    FromStr::from_str(format!("/radicle:{}/", VERSION.version).as_str())
-
        .expect("user agent is valid")
-
});
-

-
pub fn node(config: &Config, timestamp: Timestamp) -> NodeAnnouncement {
-
    let features = config.features();
-
    let alias = config.alias.clone();
-
    let addresses: BoundedVec<_, ADDRESS_LIMIT> = config
-
        .external_addresses
-
        .clone()
-
        .try_into()
-
        .expect("external addresses are within the limit");
-
    let agent = USER_AGENT.clone();
-
    let version = PROTOCOL_VERSION;
-

-
    NodeAnnouncement {
-
        features,
-
        version,
-
        timestamp,
-
        alias,
-
        addresses,
-
        nonce: 0,
-
        agent,
-
    }
-
}
-

-
pub fn inventory(
-
    timestamp: Timestamp,
-
    inventory: impl IntoIterator<Item = RepoId>,
-
) -> InventoryAnnouncement {
-
    let inventory = inventory.into_iter().collect::<Vec<_>>();
-
    if inventory.len() > INVENTORY_LIMIT {
-
        error!(
-
            target: "service",
-
            "inventory announcement limit ({}) exceeded, other nodes will see only some of your projects",
-
            inventory.len()
-
        );
-
    }
-

-
    InventoryAnnouncement {
-
        inventory: BoundedVec::truncate(inventory),
-
        timestamp,
-
    }
-
}
deleted crates/radicle-node/src/service/gossip/store.rs
@@ -1,446 +0,0 @@
-
use std::num::TryFromIntError;
-
use std::{fmt, io};
-

-
use radicle::crypto::Signature;
-
use sqlite as sql;
-
use thiserror::Error;
-

-
use crate::node::{Database, NodeId};
-
use crate::prelude::{Filter, Timestamp};
-
use crate::service::message::{
-
    Announcement, AnnouncementMessage, InventoryAnnouncement, NodeAnnouncement, RefsAnnouncement,
-
};
-
use crate::wire;
-
use crate::wire::Decode;
-

-
#[derive(Error, Debug)]
-
pub enum Error {
-
    /// An Internal error.
-
    #[error("internal error: {0}")]
-
    Internal(#[from] sql::Error),
-
    /// Unit overflow.
-
    #[error("unit overflow:: {0}")]
-
    UnitOverflow(#[from] TryFromIntError),
-
}
-

-
/// Unique announcement identifier.
-
pub type AnnouncementId = u64;
-

-
/// A database that has access to historical gossip messages.
-
/// Keeps track of the latest received gossip messages for each node.
-
/// Grows linearly with the number of nodes on the network.
-
pub trait Store {
-
    /// Prune announcements older than the cutoff time.
-
    fn prune(&mut self, cutoff: Timestamp) -> Result<usize, Error>;
-

-
    /// Get the timestamp of the last announcement in the store.
-
    fn last(&self) -> Result<Option<Timestamp>, Error>;
-

-
    /// Process an announcement for the given node.
-
    /// Returns `true` if the timestamp was updated or the announcement wasn't there before.
-
    fn announced(
-
        &mut self,
-
        nid: &NodeId,
-
        ann: &Announcement,
-
    ) -> Result<Option<AnnouncementId>, Error>;
-

-
    /// Set whether a message should be relayed or not.
-
    fn set_relay(&mut self, id: AnnouncementId, relay: RelayStatus) -> Result<(), Error>;
-

-
    /// Return messages that should be relayed.
-
    fn relays(&mut self, now: Timestamp) -> Result<Vec<(AnnouncementId, Announcement)>, Error>;
-

-
    /// Get all the latest gossip messages of all nodes, filtered by inventory filter and
-
    /// announcement timestamps.
-
    ///
-
    /// # Panics
-
    ///
-
    /// Panics if `from` > `to`.
-
    ///
-
    fn filtered<'a>(
-
        &'a self,
-
        filter: &'a Filter,
-
        from: Timestamp,
-
        to: Timestamp,
-
    ) -> Result<Box<dyn Iterator<Item = Result<Announcement, Error>> + 'a>, Error>;
-
}
-

-
impl Store for Database {
-
    fn prune(&mut self, cutoff: Timestamp) -> Result<usize, Error> {
-
        let mut stmt = self
-
            .db
-
            .prepare("DELETE FROM `announcements` WHERE timestamp < ?1")?;
-

-
        stmt.bind((1, &cutoff))?;
-
        stmt.next()?;
-

-
        Ok(self.db.change_count())
-
    }
-

-
    fn last(&self) -> Result<Option<Timestamp>, Error> {
-
        let stmt = self
-
            .db
-
            .prepare("SELECT MAX(timestamp) AS latest FROM `announcements`")?;
-

-
        if let Some(Ok(row)) = stmt.into_iter().next() {
-
            return match row.try_read::<Option<i64>, _>(0)? {
-
                Some(i) => Ok(Some(Timestamp::try_from(i)?)),
-
                None => Ok(None),
-
            };
-
        }
-
        Ok(None)
-
    }
-

-
    fn announced(
-
        &mut self,
-
        nid: &NodeId,
-
        ann: &Announcement,
-
    ) -> Result<Option<AnnouncementId>, Error> {
-
        assert_ne!(
-
            ann.timestamp(),
-
            Timestamp::MIN,
-
            "Timestamp of {ann:?} must not be zero"
-
        );
-
        let mut stmt = self.db.prepare(
-
            "INSERT INTO `announcements` (node, repo, type, message, signature, timestamp)
-
             VALUES (?1, ?2, ?3, ?4, ?5, ?6)
-
             ON CONFLICT DO UPDATE
-
             SET message = ?4, signature = ?5, timestamp = ?6
-
             WHERE timestamp < ?6
-
             RETURNING rowid",
-
        )?;
-
        stmt.bind((1, nid))?;
-

-
        match &ann.message {
-
            AnnouncementMessage::Node(msg) => {
-
                stmt.bind((2, sql::Value::String(String::new())))?;
-
                stmt.bind((3, &GossipType::Node))?;
-
                stmt.bind((4, msg))?;
-
            }
-
            AnnouncementMessage::Refs(msg) => {
-
                stmt.bind((2, &msg.rid))?;
-
                stmt.bind((3, &GossipType::Refs))?;
-
                stmt.bind((4, msg))?;
-
            }
-
            AnnouncementMessage::Inventory(msg) => {
-
                stmt.bind((2, sql::Value::String(String::new())))?;
-
                stmt.bind((3, &GossipType::Inventory))?;
-
                stmt.bind((4, msg))?;
-
            }
-
        }
-
        stmt.bind((5, &ann.signature))?;
-
        stmt.bind((6, &ann.message.timestamp()))?;
-

-
        if let Some(row) = stmt.into_iter().next() {
-
            let row = row?;
-
            let id = row.read::<i64, _>("rowid");
-

-
            Ok(Some(id as AnnouncementId))
-
        } else {
-
            Ok(None)
-
        }
-
    }
-

-
    fn set_relay(&mut self, id: AnnouncementId, relay: RelayStatus) -> Result<(), Error> {
-
        let mut stmt = self.db.prepare(
-
            "UPDATE announcements
-
             SET relay = ?1
-
             WHERE rowid = ?2",
-
        )?;
-
        stmt.bind((1, relay))?;
-
        stmt.bind((2, id as i64))?;
-
        stmt.next()?;
-

-
        Ok(())
-
    }
-

-
    fn relays(&mut self, now: Timestamp) -> Result<Vec<(AnnouncementId, Announcement)>, Error> {
-
        let mut stmt = self.db.prepare(
-
            "UPDATE announcements
-
             SET relay = ?1
-
             WHERE relay IS ?2
-
             RETURNING rowid, node, type, message, signature, timestamp",
-
        )?;
-
        stmt.bind((1, RelayStatus::RelayedAt(now)))?;
-
        stmt.bind((2, RelayStatus::Relay))?;
-

-
        let mut rows = stmt
-
            .into_iter()
-
            .map(|row| {
-
                let row = row?;
-
                parse::announcement(row)
-
            })
-
            .collect::<Result<Vec<_>, _>>()?;
-

-
        // Nb. Manually sort by insertion order, because we can't use `ORDER BY` with `RETURNING`
-
        // as of SQLite 3.45.
-
        rows.sort_by_key(|(id, _)| *id);
-

-
        Ok(rows)
-
    }
-

-
    fn filtered<'a>(
-
        &'a self,
-
        filter: &'a Filter,
-
        from: Timestamp,
-
        to: Timestamp,
-
    ) -> Result<Box<dyn Iterator<Item = Result<Announcement, Error>> + 'a>, Error> {
-
        let mut stmt = self.db.prepare(
-
            "SELECT rowid, node, type, message, signature, timestamp
-
             FROM announcements
-
             WHERE timestamp >= ?1 and timestamp < ?2
-
             ORDER BY timestamp, node, type",
-
        )?;
-
        assert!(*from <= *to);
-

-
        stmt.bind((1, &from))?;
-
        stmt.bind((2, &to))?;
-

-
        Ok(Box::new(
-
            stmt.into_iter()
-
                .map(|row| {
-
                    let row = row?;
-
                    let (_, ann) = parse::announcement(row)?;
-

-
                    Ok(ann)
-
                })
-
                .filter(|ann| match ann {
-
                    Ok(a) => a.matches(filter),
-
                    Err(_) => true,
-
                }),
-
        ))
-
    }
-
}
-

-
impl TryFrom<&sql::Value> for NodeAnnouncement {
-
    type Error = sql::Error;
-

-
    fn try_from(value: &sql::Value) -> Result<Self, Self::Error> {
-
        match value {
-
            sql::Value::Binary(bytes) => {
-
                let mut reader = io::Cursor::new(bytes);
-
                NodeAnnouncement::decode(&mut reader).map_err(wire::Error::into)
-
            }
-
            _ => Err(sql::Error {
-
                code: None,
-
                message: Some("sql: invalid type for node announcement".to_owned()),
-
            }),
-
        }
-
    }
-
}
-

-
impl sql::BindableWithIndex for &NodeAnnouncement {
-
    fn bind<I: sql::ParameterIndex>(self, stmt: &mut sql::Statement<'_>, i: I) -> sql::Result<()> {
-
        wire::serialize(self).bind(stmt, i)
-
    }
-
}
-

-
impl TryFrom<&sql::Value> for RefsAnnouncement {
-
    type Error = sql::Error;
-

-
    fn try_from(value: &sql::Value) -> Result<Self, Self::Error> {
-
        match value {
-
            sql::Value::Binary(bytes) => {
-
                let mut reader = io::Cursor::new(bytes);
-
                RefsAnnouncement::decode(&mut reader).map_err(wire::Error::into)
-
            }
-
            _ => Err(sql::Error {
-
                code: None,
-
                message: Some("sql: invalid type for refs announcement".to_owned()),
-
            }),
-
        }
-
    }
-
}
-

-
impl sql::BindableWithIndex for &RefsAnnouncement {
-
    fn bind<I: sql::ParameterIndex>(self, stmt: &mut sql::Statement<'_>, i: I) -> sql::Result<()> {
-
        wire::serialize(self).bind(stmt, i)
-
    }
-
}
-

-
impl TryFrom<&sql::Value> for InventoryAnnouncement {
-
    type Error = sql::Error;
-

-
    fn try_from(value: &sql::Value) -> Result<Self, Self::Error> {
-
        match value {
-
            sql::Value::Binary(bytes) => {
-
                let mut reader = io::Cursor::new(bytes);
-
                InventoryAnnouncement::decode(&mut reader).map_err(wire::Error::into)
-
            }
-
            _ => Err(sql::Error {
-
                code: None,
-
                message: Some("sql: invalid type for inventory announcement".to_owned()),
-
            }),
-
        }
-
    }
-
}
-

-
impl sql::BindableWithIndex for &InventoryAnnouncement {
-
    fn bind<I: sql::ParameterIndex>(self, stmt: &mut sql::Statement<'_>, i: I) -> sql::Result<()> {
-
        wire::serialize(self).bind(stmt, i)
-
    }
-
}
-

-
impl From<wire::Error> for sql::Error {
-
    fn from(other: wire::Error) -> Self {
-
        sql::Error {
-
            code: None,
-
            message: Some(other.to_string()),
-
        }
-
    }
-
}
-

-
/// Message relay status.
-
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
-
pub enum RelayStatus {
-
    Relay,
-
    DontRelay,
-
    RelayedAt(Timestamp),
-
}
-

-
impl sql::BindableWithIndex for RelayStatus {
-
    fn bind<I: sql::ParameterIndex>(self, stmt: &mut sql::Statement<'_>, i: I) -> sql::Result<()> {
-
        match self {
-
            Self::Relay => sql::Value::Null.bind(stmt, i),
-
            Self::DontRelay => sql::Value::Integer(-1).bind(stmt, i),
-
            Self::RelayedAt(t) => t.bind(stmt, i),
-
        }
-
    }
-
}
-

-
/// Type of gossip message.
-
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
-
enum GossipType {
-
    Refs,
-
    Node,
-
    Inventory,
-
}
-

-
impl fmt::Display for GossipType {
-
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-
        match self {
-
            Self::Refs => write!(f, "refs"),
-
            Self::Node => write!(f, "node"),
-
            Self::Inventory => write!(f, "inventory"),
-
        }
-
    }
-
}
-

-
impl sql::BindableWithIndex for &GossipType {
-
    fn bind<I: sql::ParameterIndex>(self, stmt: &mut sql::Statement<'_>, i: I) -> sql::Result<()> {
-
        self.to_string().as_str().bind(stmt, i)
-
    }
-
}
-

-
impl TryFrom<&sql::Value> for GossipType {
-
    type Error = sql::Error;
-

-
    fn try_from(value: &sql::Value) -> Result<Self, Self::Error> {
-
        match value {
-
            sql::Value::String(s) => match s.as_str() {
-
                "refs" => Ok(Self::Refs),
-
                "node" => Ok(Self::Node),
-
                "inventory" => Ok(Self::Inventory),
-
                other => Err(sql::Error {
-
                    code: None,
-
                    message: Some(format!("unknown gossip type '{other}'")),
-
                }),
-
            },
-
            _ => Err(sql::Error {
-
                code: None,
-
                message: Some("sql: invalid type for gossip type".to_owned()),
-
            }),
-
        }
-
    }
-
}
-

-
mod parse {
-
    use super::*;
-

-
    pub fn announcement(row: sql::Row) -> Result<(AnnouncementId, Announcement), Error> {
-
        let id = row.read::<i64, _>("rowid") as AnnouncementId;
-
        let node = row.read::<NodeId, _>("node");
-
        let gt = row.read::<GossipType, _>("type");
-
        let message = match gt {
-
            GossipType::Refs => {
-
                let ann = row.try_read::<RefsAnnouncement, _>("message")?;
-
                AnnouncementMessage::Refs(ann)
-
            }
-
            GossipType::Inventory => {
-
                let ann = row.try_read::<InventoryAnnouncement, _>("message")?;
-
                AnnouncementMessage::Inventory(ann)
-
            }
-
            GossipType::Node => {
-
                let ann = row.try_read::<NodeAnnouncement, _>("message")?;
-
                AnnouncementMessage::Node(ann)
-
            }
-
        };
-
        let signature = row.read::<Signature, _>("signature");
-
        let timestamp = row.read::<Timestamp, _>("timestamp");
-

-
        debug_assert_eq!(timestamp, message.timestamp());
-

-
        Ok((
-
            id,
-
            Announcement {
-
                node,
-
                message,
-
                signature,
-
            },
-
        ))
-
    }
-
}
-

-
#[cfg(test)]
-
#[allow(clippy::unwrap_used)]
-
mod test {
-
    use super::*;
-
    use crate::prelude::{BoundedVec, RepoId};
-
    use crate::test::arbitrary;
-
    use localtime::LocalTime;
-
    use radicle::assert_matches;
-
    use radicle::node::device::Device;
-

-
    #[test]
-
    fn test_announced() {
-
        let mut db = Database::memory().unwrap();
-
        let nid = arbitrary::gen::<NodeId>(1);
-
        let rid = arbitrary::gen::<RepoId>(1);
-
        let timestamp = LocalTime::now().into();
-
        let signer = Device::mock();
-
        let refs = AnnouncementMessage::Refs(RefsAnnouncement {
-
            rid,
-
            refs: BoundedVec::new(),
-
            timestamp,
-
        })
-
        .signed(&signer);
-
        let inv = AnnouncementMessage::Inventory(InventoryAnnouncement {
-
            inventory: BoundedVec::new(),
-
            timestamp,
-
        })
-
        .signed(&signer);
-

-
        // Only the first announcement of each type is recognized as new.
-
        let id1 = db.announced(&nid, &refs).unwrap().unwrap();
-
        assert!(db.announced(&nid, &refs).unwrap().is_none());
-

-
        let id2 = db.announced(&nid, &inv).unwrap().unwrap();
-
        assert!(db.announced(&nid, &inv).unwrap().is_none());
-

-
        // Nothing was set to be relayed.
-
        assert_eq!(db.relays(LocalTime::now().into()).unwrap().len(), 0);
-

-
        // Set the messages to be relayed.
-
        db.set_relay(id1, RelayStatus::Relay).unwrap();
-
        db.set_relay(id2, RelayStatus::Relay).unwrap();
-

-
        // Now they are returned.
-
        assert_matches!(
-
            db.relays(LocalTime::now().into()).unwrap().as_slice(),
-
            &[(id1_, _), (id2_, _)]
-
            if id1_ == id1 && id2_ == id2
-
        );
-
        // But only once.
-
        assert_matches!(db.relays(LocalTime::now().into()).unwrap().as_slice(), &[]);
-
    }
-
}
deleted crates/radicle-node/src/service/io.rs
@@ -1,203 +0,0 @@
-
use std::collections::VecDeque;
-
use std::time;
-

-
use log::*;
-
use radicle::node::config::FetchPackSizeLimit;
-
use radicle::storage::refs::RefsAt;
-

-
use crate::prelude::*;
-
use crate::service::session::Session;
-
use crate::service::Link;
-

-
use super::gossip;
-
use super::message::{Announcement, AnnouncementMessage};
-

-
/// I/O operation to execute at the network/wire level.
-
#[derive(Debug)]
-
pub enum Io {
-
    /// There are some messages ready to be sent to a peer.
-
    Write(NodeId, Vec<Message>),
-
    /// Connect to a peer.
-
    Connect(NodeId, Address),
-
    /// Disconnect from a peer.
-
    Disconnect(NodeId, DisconnectReason),
-
    /// Fetch repository data from a peer.
-
    Fetch {
-
        /// Repo being fetched.
-
        rid: RepoId,
-
        /// Remote node being fetched from.
-
        remote: NodeId,
-
        /// If the node is fetching specific `rad/sigrefs`.
-
        refs_at: Option<Vec<RefsAt>>,
-
        /// Fetch timeout.
-
        timeout: time::Duration,
-
        /// Limit the number of bytes fetched.
-
        reader_limit: FetchPackSizeLimit,
-
    },
-
    /// Ask for a wakeup in a specified amount of time.
-
    Wakeup(LocalDuration),
-
}
-

-
/// Interface to the network.
-
#[derive(Debug, Default)]
-
pub struct Outbox {
-
    /// Outgoing I/O queue.
-
    io: VecDeque<Io>,
-
}
-

-
impl Outbox {
-
    /// Connect to a peer.
-
    pub fn connect(&mut self, id: NodeId, addr: Address) {
-
        self.io.push_back(Io::Connect(id, addr));
-
    }
-

-
    /// Disconnect a peer.
-
    pub fn disconnect(&mut self, id: NodeId, reason: DisconnectReason) {
-
        self.io.push_back(Io::Disconnect(id, reason));
-
    }
-

-
    pub fn write(&mut self, remote: &Session, msg: Message) {
-
        msg.log(log::Level::Debug, &remote.id, Link::Outbound);
-
        trace!(target: "service", "Write {:?} to {}", &msg, remote);
-

-
        self.io.push_back(Io::Write(remote.id, vec![msg]));
-
    }
-

-
    /// Announce something to a peer. This is meant for our own announcement messages.
-
    pub fn announce<'a>(
-
        &mut self,
-
        ann: Announcement,
-
        peers: impl Iterator<Item = &'a Session>,
-
        gossip: &mut impl gossip::Store,
-
    ) {
-
        // Store our announcement so that it can be retrieved from us later, just like
-
        // announcements we receive from peers.
-
        if let Err(e) = gossip.announced(&ann.node, &ann) {
-
            error!(target: "service", "Error updating our gossip store with announced message: {e}");
-
        }
-

-
        for peer in peers {
-
            if let AnnouncementMessage::Refs(refs) = &ann.message {
-
                if let Some(subscribe) = &peer.subscribe {
-
                    if subscribe.filter.contains(&refs.rid) {
-
                        self.write(peer, ann.clone().into());
-
                    } else {
-
                        debug!(
-
                            target: "service",
-
                            "Skipping refs announcement relay to {peer}: peer isn't subscribed to {}",
-
                            refs.rid
-
                        );
-
                    }
-
                } else {
-
                    debug!(
-
                        target: "service",
-
                        "Skipping refs announcement relay to {peer}: peer didn't send a subscription filter"
-
                    );
-
                }
-
            } else {
-
                self.write(peer, ann.clone().into());
-
            }
-
        }
-
    }
-

-
    pub fn write_all(&mut self, remote: &Session, msgs: impl IntoIterator<Item = Message>) {
-
        let msgs = msgs.into_iter().collect::<Vec<_>>();
-

-
        for (ix, msg) in msgs.iter().enumerate() {
-
            trace!(
-
                target: "service",
-
                "Write {:?} to {} ({}/{})",
-
                msg,
-
                remote,
-
                ix + 1,
-
                msgs.len()
-
            );
-
            msg.log(log::Level::Trace, &remote.id, Link::Outbound);
-
        }
-
        self.io.push_back(Io::Write(remote.id, msgs));
-
    }
-

-
    pub fn wakeup(&mut self, after: LocalDuration) {
-
        self.io.push_back(Io::Wakeup(after));
-
    }
-

-
    pub fn fetch(
-
        &mut self,
-
        peer: &mut Session,
-
        rid: RepoId,
-
        refs_at: Vec<RefsAt>,
-
        timeout: time::Duration,
-
        reader_limit: FetchPackSizeLimit,
-
    ) {
-
        peer.fetching(rid);
-

-
        let refs_at = (!refs_at.is_empty()).then_some(refs_at);
-

-
        if let Some(refs_at) = &refs_at {
-
            debug!(
-
                target: "service",
-
                "Fetch initiated for {rid} with {peer} ({} remote(s))..", refs_at.len()
-
            );
-
        } else {
-
            debug!(target: "service", "Fetch initiated for {rid} with {peer} (all remotes)..");
-
        }
-

-
        self.io.push_back(Io::Fetch {
-
            rid,
-
            refs_at,
-
            remote: peer.id,
-
            timeout,
-
            reader_limit,
-
        });
-
    }
-

-
    /// Broadcast a message to a list of peers.
-
    pub fn broadcast<'a>(
-
        &mut self,
-
        msg: impl Into<Message>,
-
        peers: impl IntoIterator<Item = &'a Session>,
-
    ) {
-
        let msg = msg.into();
-
        for peer in peers {
-
            self.write(peer, msg.clone());
-
        }
-
    }
-

-
    /// Relay a message to interested peers.
-
    pub fn relay<'a>(&mut self, ann: Announcement, peers: impl IntoIterator<Item = &'a Session>) {
-
        if let AnnouncementMessage::Refs(msg) = &ann.message {
-
            let id = msg.rid;
-
            let peers = peers.into_iter().filter(|p| {
-
                if let Some(subscribe) = &p.subscribe {
-
                    subscribe.filter.contains(&id)
-
                } else {
-
                    // If the peer did not send us a `subscribe` message, we don't
-
                    // relay any messages to them.
-
                    false
-
                }
-
            });
-
            self.broadcast(ann, peers);
-
        } else {
-
            self.broadcast(ann, peers);
-
        }
-
    }
-

-
    /// Number of items in outbox.
-
    #[allow(clippy::len_without_is_empty)]
-
    pub fn len(&self) -> usize {
-
        self.io.len()
-
    }
-

-
    #[cfg(any(test, feature = "test"))]
-
    pub(crate) fn queue(&mut self) -> &mut VecDeque<Io> {
-
        &mut self.io
-
    }
-
}
-

-
impl Iterator for Outbox {
-
    type Item = Io;
-

-
    fn next(&mut self) -> Option<Self::Item> {
-
        self.io.pop_front()
-
    }
-
}
deleted crates/radicle-node/src/service/limiter.rs
@@ -1,210 +0,0 @@
-
use std::collections::{HashMap, HashSet};
-

-
use localtime::LocalTime;
-
use radicle::node::{address, config, HostName, NodeId};
-

-
/// Peer rate limiter.
-
///
-
/// Uses a token bucket algorithm, where each address starts with a certain amount of tokens,
-
/// and every request from that address consumes one token. Tokens refill at a predefined
-
/// rate. This mechanism allows for consistent request rates with potential bursts up to the
-
/// bucket's capacity.
-
#[derive(Debug, Default)]
-
pub struct RateLimiter {
-
    pub buckets: HashMap<HostName, TokenBucket>,
-
    pub bypass: HashSet<NodeId>,
-
}
-

-
impl RateLimiter {
-
    /// Create a new rate limiter with a bypass list. Nodes in the bypass list are not limited.
-
    pub fn new(bypass: impl IntoIterator<Item = NodeId>) -> Self {
-
        Self {
-
            buckets: HashMap::default(),
-
            bypass: bypass.into_iter().collect(),
-
        }
-
    }
-

-
    /// Call this when the address has performed some rate-limited action.
-
    /// Returns whether the action is rate-limited or not.
-
    ///
-
    /// Supplying a different amount of tokens per address is useful if for eg. a peer
-
    /// is outbound vs. inbound.
-
    pub fn limit<T: AsTokens>(
-
        &mut self,
-
        addr: HostName,
-
        nid: Option<&NodeId>,
-
        tokens: &T,
-
        now: LocalTime,
-
    ) -> bool {
-
        if let Some(nid) = nid {
-
            if self.bypass.contains(nid) {
-
                return false;
-
            }
-
        }
-
        if let HostName::Ip(ip) = addr {
-
            // Don't limit LAN addresses.
-
            if !address::is_routable(&ip) {
-
                return false;
-
            }
-
        }
-
        !self
-
            .buckets
-
            .entry(addr)
-
            .or_insert_with(|| TokenBucket::new(tokens.capacity(), tokens.rate(), now))
-
            .take(now)
-
    }
-
}
-

-
/// Any type that can be assigned a number of rate-limit tokens.
-
pub trait AsTokens {
-
    /// Get the token capacity for this object.
-
    fn capacity(&self) -> usize;
-
    /// Get the refill rate for this object.
-
    /// A rate of `1.0` means one token per second.
-
    fn rate(&self) -> f64;
-
}
-

-
impl AsTokens for config::RateLimit {
-
    fn rate(&self) -> f64 {
-
        self.fill_rate
-
    }
-

-
    fn capacity(&self) -> usize {
-
        self.capacity
-
    }
-
}
-

-
#[derive(Debug, serde::Serialize)]
-
#[serde(rename_all = "camelCase")]
-
pub struct TokenBucket {
-
    /// Token refill rate per second.
-
    rate: f64,
-
    /// Token capacity.
-
    capacity: f64,
-
    /// Tokens remaining.
-
    tokens: f64,
-
    /// Time of last token refill.
-
    refilled_at: LocalTime,
-
}
-

-
impl TokenBucket {
-
    fn new(tokens: usize, rate: f64, now: LocalTime) -> Self {
-
        Self {
-
            rate,
-
            capacity: tokens as f64,
-
            tokens: tokens as f64,
-
            refilled_at: now,
-
        }
-
    }
-

-
    fn refill(&mut self, now: LocalTime) {
-
        let elapsed = now.duration_since(self.refilled_at);
-
        let tokens = elapsed.as_secs() as f64 * self.rate;
-

-
        self.tokens = (self.tokens + tokens).min(self.capacity);
-
        self.refilled_at = now;
-
    }
-

-
    fn take(&mut self, now: LocalTime) -> bool {
-
        self.refill(now);
-

-
        if self.tokens >= 1.0 {
-
            self.tokens -= 1.0;
-
            true
-
        } else {
-
            false
-
        }
-
    }
-
}
-

-
#[cfg(test)]
-
#[allow(clippy::bool_assert_comparison, clippy::redundant_clone)]
-
mod test {
-
    use radicle::test::arbitrary;
-

-
    use super::*;
-

-
    impl AsTokens for (usize, f64) {
-
        fn capacity(&self) -> usize {
-
            self.0
-
        }
-

-
        fn rate(&self) -> f64 {
-
            self.1
-
        }
-
    }
-

-
    #[test]
-
    fn test_limitter_refill() {
-
        let mut r = RateLimiter::default();
-
        let t = (3, 0.2); // Three tokens burst. One token every 5 seconds.
-
        let a = HostName::Dns(String::from("seed.radicle.example.com"));
-
        let n = arbitrary::gen::<NodeId>(1);
-
        let n = Some(&n);
-

-
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(0)), false); // Burst capacity
-
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(1)), false); // Burst capacity
-
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(2)), false); // Burst capacity
-
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(3)), true); // Limited
-
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(4)), true); // Limited
-
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(5)), false); // Refilled (1)
-
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(6)), true); // Limited
-
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(7)), true); // Limited
-
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(8)), true); // Limited
-
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(9)), true); // Limited
-
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(10)), false); // Refilled (1)
-
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(11)), true); // Limited
-
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(12)), true); // Limited
-
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(13)), true); // Limited
-
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(14)), true); // Limited
-
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(15)), false); // Refilled (1)
-
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(16)), true); // Limited
-
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(60)), false); // Refilled (3)
-
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(60)), false); // Burst capacity
-
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(60)), false); // Burst capacity
-
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(60)), true); // Limited
-
    }
-

-
    #[test]
-
    #[rustfmt::skip]
-
    fn test_limitter_multi() {
-
        let t = (1, 1.0); // One token per second. One token burst.
-
        let n = arbitrary::gen::<NodeId>(1);
-
        let n = Some(&n);
-
        let mut r = RateLimiter::default();
-
        let addr1 = HostName::Dns(String::from("seed.radicle.example.com"));
-
        let addr2 = HostName::Dns(String::from("seed.radicle.example.net"));
-

-
        assert_eq!(r.limit(addr1.clone(), n, &t, LocalTime::from_secs(0)), false);
-
        assert_eq!(r.limit(addr1.clone(), n, &t, LocalTime::from_secs(0)), true);
-
        assert_eq!(r.limit(addr2.clone(), n, &t, LocalTime::from_secs(0)), false);
-
        assert_eq!(r.limit(addr2.clone(), n, &t, LocalTime::from_secs(0)), true);
-
        assert_eq!(r.limit(addr1.clone(), n, &t, LocalTime::from_secs(1)), false);
-
        assert_eq!(r.limit(addr1.clone(), n, &t, LocalTime::from_secs(1)), true);
-
        assert_eq!(r.limit(addr2.clone(), n, &t, LocalTime::from_secs(1)), false);
-
        assert_eq!(r.limit(addr2.clone(), n, &t, LocalTime::from_secs(1)), true);
-
    }
-

-
    #[test]
-
    #[rustfmt::skip]
-
    fn test_limitter_different_rates() {
-
        let t1 = (1, 1.0); // One token per second. One token burst.
-
        let t2 = (2, 2.0); // Two tokens per second. Two token burst.
-
        let n = arbitrary::gen::<NodeId>(1);
-
        let n = Some(&n);
-
        let mut r = RateLimiter::default();
-
        let addr1 = HostName::Dns(String::from("seed.radicle.example.com"));
-
        let addr2 = HostName::Dns(String::from("seed.radicle.example.net"));
-

-
        assert_eq!(r.limit(addr1.clone(), n, &t1, LocalTime::from_secs(0)), false);
-
        assert_eq!(r.limit(addr1.clone(), n, &t1, LocalTime::from_secs(0)), true);
-
        assert_eq!(r.limit(addr2.clone(), n, &t2, LocalTime::from_secs(0)), false);
-
        assert_eq!(r.limit(addr2.clone(), n, &t2, LocalTime::from_secs(0)), false);
-
        assert_eq!(r.limit(addr2.clone(), n, &t2, LocalTime::from_secs(0)), true);
-
        assert_eq!(r.limit(addr1.clone(), n, &t1, LocalTime::from_secs(1)), false); // Refilled (1)
-
        assert_eq!(r.limit(addr1.clone(), n, &t1, LocalTime::from_secs(1)), true);
-
        assert_eq!(r.limit(addr2.clone(), n, &t2, LocalTime::from_secs(1)), false); // Refilled (2)
-
        assert_eq!(r.limit(addr2.clone(), n, &t2, LocalTime::from_secs(1)), false);
-
        assert_eq!(r.limit(addr2.clone(), n, &t2, LocalTime::from_secs(1)), true);
-
    }
-
}
deleted crates/radicle-node/src/service/message.rs
@@ -1,706 +0,0 @@
-
use std::{fmt, io, mem};
-

-
use nonempty::NonEmpty;
-
use radicle::git;
-
use radicle::node::device::Device;
-
use radicle::storage::refs::RefsAt;
-

-
use crate::crypto;
-
use crate::identity::RepoId;
-
use crate::node;
-
use crate::node::{Address, Alias, UserAgent};
-
use crate::prelude::BoundedVec;
-
use crate::service::filter::Filter;
-
use crate::service::{Link, NodeId, Timestamp};
-
use crate::storage;
-
use crate::wire;
-

-
/// Maximum number of addresses which can be announced to other nodes.
-
pub const ADDRESS_LIMIT: usize = 16;
-
/// Maximum number of repository remotes that can be included in a [`RefsAnnouncement`] message.
-
pub const REF_REMOTE_LIMIT: usize = 1024;
-
/// Maximum number of inventory which can be announced to other nodes.
-
pub const INVENTORY_LIMIT: usize = 2973;
-

-
#[derive(Debug, Clone, PartialEq, Eq)]
-
pub struct Subscribe {
-
    /// Subscribe to events matching this filter.
-
    pub filter: Filter,
-
    /// Request messages since this time.
-
    pub since: Timestamp,
-
    /// Request messages until this time.
-
    pub until: Timestamp,
-
}
-

-
impl Subscribe {
-
    pub fn all() -> Self {
-
        Self {
-
            filter: Filter::default(),
-
            since: Timestamp::MIN,
-
            until: Timestamp::MAX,
-
        }
-
    }
-
}
-

-
/// Node announcing itself to the network.
-
#[derive(Debug, Clone, PartialEq, Eq)]
-
pub struct NodeAnnouncement {
-
    /// Supported protocol version.
-
    pub version: u8,
-
    /// Advertized features.
-
    pub features: node::Features,
-
    /// Monotonic timestamp.
-
    pub timestamp: Timestamp,
-
    /// Non-unique alias.
-
    pub alias: Alias,
-
    /// Announced addresses.
-
    pub addresses: BoundedVec<Address, ADDRESS_LIMIT>,
-
    /// Nonce used for announcement proof-of-work.
-
    pub nonce: u64,
-
    /// User-agent string.
-
    pub agent: UserAgent,
-
}
-

-
impl NodeAnnouncement {
-
    /// Calculate the amount of work that went into creating this announcement.
-
    ///
-
    /// Proof-of-work uses the [`scrypt`] algorithm with the parameters in
-
    /// [`Announcement::POW_PARAMS`]. The "work" is calculated by counting the number of leading
-
    /// zero bits after running `scrypt` on a serialized [`NodeAnnouncement`] using
-
    /// [`wire::serialize`].
-
    ///
-
    /// In other words, `work = leading-zeros(scrypt(serialize(announcement)))`.
-
    ///
-
    /// Higher numbers mean higher difficulty. For each increase in work, difficulty is doubled.
-
    /// For instance, an output of `7` is *four* times more work than an output of `5`.
-
    ///
-
    pub fn work(&self) -> u32 {
-
        let (n, r, p) = Announcement::POW_PARAMS;
-
        let params = scrypt::Params::new(n, r, p, 32).expect("proof-of-work parameters are valid");
-
        let mut output = [0u8; 32];
-

-
        scrypt::scrypt(
-
            wire::serialize(self).as_ref(),
-
            Announcement::POW_SALT,
-
            &params,
-
            &mut output,
-
        )
-
        .expect("proof-of-work output vector is a valid length");
-

-
        // Calculate the number of leading zero bits in the output vector.
-
        if let Some((zero_bytes, non_zero)) = output.iter().enumerate().find(|(_, &x)| x != 0) {
-
            zero_bytes as u32 * 8 + non_zero.leading_zeros()
-
        } else {
-
            output.len() as u32 * 8
-
        }
-
    }
-

-
    /// Solve the proof-of-work of a node announcement for the given target, by iterating through
-
    /// different nonces.
-
    ///
-
    /// If the given difficulty target is too high, there may not be a result. In that case, `None`
-
    /// is returned.
-
    pub fn solve(mut self, target: u32) -> Option<Self> {
-
        loop {
-
            if let Some(nonce) = self.nonce.checked_add(1) {
-
                self.nonce = nonce;
-

-
                if self.work() >= target {
-
                    break;
-
                }
-
            } else {
-
                return None;
-
            }
-
        }
-
        Some(self)
-
    }
-
}
-

-
impl wire::Encode for NodeAnnouncement {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        let mut n = 0;
-

-
        n += self.version.encode(writer)?;
-
        n += self.features.encode(writer)?;
-
        n += self.timestamp.encode(writer)?;
-
        n += self.alias.encode(writer)?;
-
        n += self.addresses.encode(writer)?;
-
        n += self.nonce.encode(writer)?;
-
        n += self.agent.encode(writer)?;
-

-
        Ok(n)
-
    }
-
}
-

-
impl wire::Decode for NodeAnnouncement {
-
    fn decode<R: std::io::Read + ?Sized>(reader: &mut R) -> Result<Self, wire::Error> {
-
        let version = u8::decode(reader)?;
-
        let features = node::Features::decode(reader)?;
-
        let timestamp = Timestamp::decode(reader)?;
-
        let alias = wire::Decode::decode(reader)?;
-
        let addresses = BoundedVec::<Address, ADDRESS_LIMIT>::decode(reader)?;
-
        let nonce = u64::decode(reader)?;
-
        let agent = match UserAgent::decode(reader) {
-
            Ok(ua) => ua,
-
            Err(e) if e.is_eof() => UserAgent::default(),
-
            Err(e) => return Err(e),
-
        };
-

-
        Ok(Self {
-
            version,
-
            features,
-
            timestamp,
-
            alias,
-
            addresses,
-
            nonce,
-
            agent,
-
        })
-
    }
-
}
-

-
/// Node announcing project refs being created or updated.
-
#[derive(Debug, Clone, PartialEq, Eq)]
-
pub struct RefsAnnouncement {
-
    /// Repository identifier.
-
    pub rid: RepoId,
-
    /// Updated `rad/sigrefs`.
-
    pub refs: BoundedVec<RefsAt, REF_REMOTE_LIMIT>,
-
    /// Time of announcement.
-
    pub timestamp: Timestamp,
-
}
-

-
/// Track the status of `RefsAt` within a given repository.
-
#[derive(Default)]
-
pub struct RefsStatus {
-
    /// The `rad/sigrefs` was missing or it's ahead of the local
-
    /// `rad/sigrefs`. We want it.
-
    pub want: Vec<RefsAt>,
-
    /// The `rad/sigrefs` has been seen before. We already have it.
-
    pub have: Vec<RefsAt>,
-
}
-

-
impl RefsStatus {
-
    /// Get the set of `want` and `have` `RefsAt`'s for the given
-
    /// announcement.
-
    ///
-
    /// Nb. We use the refs database as a cache for quick lookups. This does *not* check
-
    /// for ancestry matches, since we don't cache the whole history (only the tips).
-
    /// This, however, is not a problem because the signed refs branch is fast-forward only,
-
    /// and old refs announcements will be discarded due to their lower timestamps.
-
    pub fn new<D: node::refs::Store>(
-
        rid: RepoId,
-
        refs: NonEmpty<RefsAt>,
-
        db: &D,
-
    ) -> Result<RefsStatus, storage::Error> {
-
        let mut status = RefsStatus::default();
-
        for theirs in refs.iter() {
-
            status.insert(&rid, *theirs, db)?;
-
        }
-
        Ok(status)
-
    }
-

-
    fn insert<D: node::refs::Store>(
-
        &mut self,
-
        repo: &RepoId,
-
        theirs: RefsAt,
-
        db: &D,
-
    ) -> Result<(), storage::Error> {
-
        match db.get(repo, &theirs.remote, &storage::refs::SIGREFS_BRANCH) {
-
            Ok(Some((ours, _))) => {
-
                if theirs.at != ours {
-
                    self.want.push(theirs);
-
                } else {
-
                    self.have.push(theirs);
-
                }
-
            }
-
            Ok(None) => {
-
                self.want.push(theirs);
-
            }
-
            Err(e) => {
-
                log::warn!(
-
                    target: "service",
-
                    "Error getting cached ref of {repo} for refs status: {e}"
-
                );
-
            }
-
        }
-
        Ok(())
-
    }
-
}
-

-
/// Node announcing its inventory to the network.
-
/// This should be the whole inventory every time.
-
#[derive(Debug, Clone, PartialEq, Eq)]
-
pub struct InventoryAnnouncement {
-
    /// Node inventory.
-
    pub inventory: BoundedVec<RepoId, INVENTORY_LIMIT>,
-
    /// Time of announcement.
-
    pub timestamp: Timestamp,
-
}
-

-
/// Node announcing information to a connected peer.
-
///
-
/// This should not be relayed and should be used to send an
-
/// informational message a peer.
-
#[derive(Debug, Clone, PartialEq, Eq)]
-
pub enum Info {
-
    /// Tell a node that sent a refs announcement that it was already synced at the given `Oid`,
-
    /// for this particular `rid`.
-
    RefsAlreadySynced { rid: RepoId, at: git::Oid },
-
}
-

-
/// Announcement messages are messages that are relayed between peers.
-
#[derive(Clone, PartialEq, Eq)]
-
pub enum AnnouncementMessage {
-
    /// Inventory announcement.
-
    Inventory(InventoryAnnouncement),
-
    /// Node announcement.
-
    Node(NodeAnnouncement),
-
    /// Refs announcement.
-
    Refs(RefsAnnouncement),
-
}
-

-
impl AnnouncementMessage {
-
    /// Sign this announcement message.
-
    pub fn signed<G>(self, signer: &Device<G>) -> Announcement
-
    where
-
        G: crypto::signature::Signer<crypto::Signature>,
-
    {
-
        use crypto::signature::Signer as _;
-

-
        let msg = wire::serialize(&self);
-
        let signature = signer.sign(&msg);
-

-
        Announcement {
-
            node: *signer.public_key(),
-
            message: self,
-
            signature,
-
        }
-
    }
-

-
    pub fn timestamp(&self) -> Timestamp {
-
        match self {
-
            Self::Inventory(InventoryAnnouncement { timestamp, .. }) => *timestamp,
-
            Self::Refs(RefsAnnouncement { timestamp, .. }) => *timestamp,
-
            Self::Node(NodeAnnouncement { timestamp, .. }) => *timestamp,
-
        }
-
    }
-

-
    pub fn is_node_announcement(&self) -> bool {
-
        matches!(self, Self::Node(_))
-
    }
-
}
-

-
impl From<NodeAnnouncement> for AnnouncementMessage {
-
    fn from(ann: NodeAnnouncement) -> Self {
-
        Self::Node(ann)
-
    }
-
}
-

-
impl From<InventoryAnnouncement> for AnnouncementMessage {
-
    fn from(ann: InventoryAnnouncement) -> Self {
-
        Self::Inventory(ann)
-
    }
-
}
-

-
impl From<RefsAnnouncement> for AnnouncementMessage {
-
    fn from(ann: RefsAnnouncement) -> Self {
-
        Self::Refs(ann)
-
    }
-
}
-

-
impl fmt::Debug for AnnouncementMessage {
-
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
-
        match self {
-
            Self::Node(message) => write!(f, "Node({})", message.timestamp),
-
            Self::Inventory(message) => {
-
                write!(
-
                    f,
-
                    "Inventory([{}], {})",
-
                    message
-
                        .inventory
-
                        .iter()
-
                        .map(|i| i.to_string())
-
                        .collect::<Vec<String>>()
-
                        .join(", "),
-
                    message.timestamp
-
                )
-
            }
-
            Self::Refs(message) => {
-
                write!(
-
                    f,
-
                    "Refs({}, {}, {:?})",
-
                    message.rid, message.timestamp, message.refs
-
                )
-
            }
-
        }
-
    }
-
}
-

-
#[derive(Debug, Clone, PartialEq, Eq)]
-
pub struct Announcement {
-
    /// Node identifier.
-
    pub node: NodeId,
-
    /// Signature over the announcement.
-
    pub signature: crypto::Signature,
-
    /// Unsigned node announcement.
-
    pub message: AnnouncementMessage,
-
}
-

-
impl Announcement {
-
    /// Proof-of-work parameters for announcements.
-
    ///
-
    /// These parameters are fed into `scrypt`.
-
    /// They represent the `log2(N)`, `r`, `p` parameters, respectively.
-
    ///
-
    /// * log2(N) – iterations count (affects memory and CPU usage), e.g. 15
-
    /// * r – block size (affects memory and CPU usage), e.g. 8
-
    /// * p – parallelism factor (threads to run in parallel - affects the memory, CPU usage), usually 1
-
    ///
-
    /// `15, 8, 1` are usually the recommended parameters.
-
    ///
-
    #[cfg(debug_assertions)]
-
    pub const POW_PARAMS: (u8, u32, u32) = (1, 1, 1);
-
    #[cfg(not(debug_assertions))]
-
    pub const POW_PARAMS: (u8, u32, u32) = (15, 8, 1);
-
    /// Salt used for generating PoW.
-
    pub const POW_SALT: &'static [u8] = &[b'r', b'a', b'd'];
-

-
    /// Verify this announcement's signature.
-
    pub fn verify(&self) -> bool {
-
        let msg = wire::serialize(&self.message);
-
        self.node.verify(msg, &self.signature).is_ok()
-
    }
-

-
    pub fn matches(&self, filter: &Filter) -> bool {
-
        match &self.message {
-
            AnnouncementMessage::Inventory(_) => true,
-
            AnnouncementMessage::Node(_) => true,
-
            AnnouncementMessage::Refs(RefsAnnouncement { rid, .. }) => filter.contains(rid),
-
        }
-
    }
-

-
    /// Check whether this announcement is of the same variant as another.
-
    pub fn variant_eq(&self, other: &Self) -> bool {
-
        std::mem::discriminant(&self.message) == std::mem::discriminant(&other.message)
-
    }
-

-
    /// Get the announcement timestamp.
-
    pub fn timestamp(&self) -> Timestamp {
-
        self.message.timestamp()
-
    }
-
}
-

-
/// Message payload.
-
/// These are the messages peers send to each other.
-
#[derive(Clone, PartialEq, Eq)]
-
pub enum Message {
-
    /// Subscribe to gossip messages matching the filter and time range.
-
    Subscribe(Subscribe),
-

-
    /// Gossip announcement. These messages are relayed to peers, and filtered
-
    /// using [`Message::Subscribe`].
-
    Announcement(Announcement),
-

-
    /// Informational message. These messages are sent between peers for information
-
    /// and do not need to be acted upon. They can be safely ignored, though handling
-
    /// them can be useful for the user.
-
    Info(Info),
-

-
    /// Ask a connected peer for a Pong.
-
    ///
-
    /// Used to check if the remote peer is responsive, or a side-effect free way to keep a
-
    /// connection alive.
-
    Ping(Ping),
-

-
    /// Response to `Ping` message.
-
    Pong {
-
        /// The pong payload.
-
        zeroes: ZeroBytes,
-
    },
-
}
-

-
impl PartialOrd for Message {
-
    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
-
        Some(self.cmp(other))
-
    }
-
}
-

-
impl Ord for Message {
-
    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
-
        let this = wire::serialize(self);
-
        let other = wire::serialize(other);
-

-
        this.cmp(&other)
-
    }
-
}
-

-
impl Message {
-
    pub fn announcement(
-
        node: NodeId,
-
        message: impl Into<AnnouncementMessage>,
-
        signature: crypto::Signature,
-
    ) -> Self {
-
        Announcement {
-
            node,
-
            signature,
-
            message: message.into(),
-
        }
-
        .into()
-
    }
-

-
    pub fn node<G: crypto::signature::Signer<crypto::Signature>>(
-
        message: NodeAnnouncement,
-
        signer: &Device<G>,
-
    ) -> Self {
-
        AnnouncementMessage::from(message).signed(signer).into()
-
    }
-

-
    pub fn inventory<G: crypto::signature::Signer<crypto::Signature>>(
-
        message: InventoryAnnouncement,
-
        signer: &Device<G>,
-
    ) -> Self {
-
        AnnouncementMessage::from(message).signed(signer).into()
-
    }
-

-
    pub fn subscribe(filter: Filter, since: Timestamp, until: Timestamp) -> Self {
-
        Self::Subscribe(Subscribe {
-
            filter,
-
            since,
-
            until,
-
        })
-
    }
-

-
    pub fn log(&self, level: log::Level, remote: &NodeId, link: Link) {
-
        if !log::log_enabled!(level) {
-
            return;
-
        }
-
        let (verb, prep) = if link.is_inbound() {
-
            ("Received", "from")
-
        } else {
-
            ("Sending", "to")
-
        };
-
        let msg = match self {
-
            Self::Announcement(Announcement { node, message, .. }) => match message {
-
                AnnouncementMessage::Node(NodeAnnouncement { addresses, timestamp, .. }) => format!(
-
                    "{verb} node announcement of {node} with {} address(es) {prep} {remote} (t={timestamp})",
-
                    addresses.len()
-
                ),
-
                AnnouncementMessage::Refs(RefsAnnouncement { rid, refs, timestamp }) => format!(
-
                    "{verb} refs announcement of {node} for {rid} with {} remote(s) {prep} {remote} (t={timestamp})",
-
                    refs.len()
-
                ),
-
                AnnouncementMessage::Inventory(InventoryAnnouncement { inventory, timestamp }) => {
-
                    format!(
-
                        "{verb} inventory announcement of {node} with {} item(s) {prep} {remote} (t={timestamp})",
-
                        inventory.len()
-
                    )
-
                }
-
            },
-
            Self::Info(Info::RefsAlreadySynced { rid,  .. }) => {
-
                format!(
-
                    "{verb} `refs-already-synced` info {prep} {remote} for {rid}"
-
                )
-
            },
-
            Self::Ping { .. } => format!("{verb} ping {prep} {remote}"),
-
            Self::Pong { .. } => format!("{verb} pong {prep} {remote}"),
-
            Self::Subscribe(Subscribe { .. }) => {
-
                format!("{verb} subscription filter {prep} {remote}")
-
            }
-
        };
-
        log::log!(target: "service", level, "{msg}");
-
    }
-
}
-

-
/// A ping message.
-
#[derive(Debug, PartialEq, Eq, Clone)]
-
pub struct Ping {
-
    /// The requested length of the pong message.
-
    pub ponglen: wire::Size,
-
    /// Zero bytes (ignored).
-
    pub zeroes: ZeroBytes,
-
}
-

-
impl Ping {
-
    /// Maximum number of zero bytes in a ping message.
-
    pub const MAX_PING_ZEROES: wire::Size = Message::MAX_SIZE // Message size without the type.
-
        - mem::size_of::<wire::Size>() as wire::Size // Account for pong length.
-
        - mem::size_of::<wire::Size>() as wire::Size; // Account for zeroes length prefix.
-

-
    /// Maximum number of zero bytes in a pong message.
-
    pub const MAX_PONG_ZEROES: wire::Size =
-
        Message::MAX_SIZE - mem::size_of::<wire::Size>() as wire::Size; // Account for zeroes length
-
                                                                        // prefix.
-

-
    pub fn new(rng: &mut fastrand::Rng) -> Self {
-
        let ponglen = rng.u16(0..Self::MAX_PONG_ZEROES);
-

-
        Ping {
-
            ponglen,
-
            zeroes: ZeroBytes::new(rng.u16(0..Self::MAX_PING_ZEROES)),
-
        }
-
    }
-
}
-

-
impl From<Announcement> for Message {
-
    fn from(ann: Announcement) -> Self {
-
        Self::Announcement(ann)
-
    }
-
}
-

-
impl From<Info> for Message {
-
    fn from(info: Info) -> Self {
-
        Self::Info(info)
-
    }
-
}
-

-
impl fmt::Debug for Message {
-
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-
        match self {
-
            Self::Subscribe(Subscribe { since, until, .. }) => {
-
                write!(f, "Subscribe({since}..{until})")
-
            }
-
            Self::Announcement(Announcement { node, message, .. }) => {
-
                write!(f, "Announcement({node}, {message:?})")
-
            }
-
            Self::Info(info) => {
-
                write!(f, "Info({info:?})")
-
            }
-
            Self::Ping(Ping { ponglen, zeroes }) => write!(f, "Ping({ponglen}, {zeroes:?})"),
-
            Self::Pong { zeroes } => write!(f, "Pong({zeroes:?})"),
-
        }
-
    }
-
}
-

-
/// Represents a vector of zeroes of a certain length.
-
#[derive(Clone, Debug, PartialEq, Eq)]
-
pub struct ZeroBytes(wire::Size);
-

-
impl ZeroBytes {
-
    pub fn new(size: wire::Size) -> Self {
-
        ZeroBytes(size)
-
    }
-

-
    pub fn is_empty(&self) -> bool {
-
        self.0 == 0
-
    }
-

-
    pub fn len(&self) -> usize {
-
        self.0.into()
-
    }
-
}
-

-
#[cfg(test)]
-
#[allow(clippy::unwrap_used)]
-
mod tests {
-
    use std::str::FromStr;
-

-
    use fastrand;
-
    use qcheck_macros::quickcheck;
-
    use radicle::git::raw;
-

-
    use super::*;
-
    use crate::prelude::*;
-
    use crate::test::arbitrary;
-
    use crate::wire::Encode;
-

-
    #[test]
-
    fn test_ref_remote_limit() {
-
        let mut refs = BoundedVec::<_, REF_REMOTE_LIMIT>::new();
-
        let signer = Device::mock();
-
        let at = raw::Oid::zero().into();
-

-
        assert_eq!(refs.capacity(), REF_REMOTE_LIMIT);
-

-
        for _ in 0..refs.capacity() {
-
            refs.push(RefsAt {
-
                remote: *signer.public_key(),
-
                at,
-
            })
-
            .unwrap();
-
        }
-

-
        let msg: Message = AnnouncementMessage::from(RefsAnnouncement {
-
            rid: arbitrary::gen(1),
-
            refs,
-
            timestamp: LocalTime::now().into(),
-
        })
-
        .signed(&Device::mock())
-
        .into();
-

-
        let mut buf: Vec<u8> = Vec::new();
-
        assert!(msg.encode(&mut buf).is_ok());
-

-
        let decoded = wire::deserialize(buf.as_slice());
-
        assert!(decoded.is_ok());
-
        assert_eq!(msg, decoded.unwrap());
-
    }
-

-
    #[test]
-
    fn test_inventory_limit() {
-
        let msg = Message::inventory(
-
            InventoryAnnouncement {
-
                inventory: arbitrary::vec(INVENTORY_LIMIT)
-
                    .try_into()
-
                    .expect("size within bounds limit"),
-
                timestamp: LocalTime::now().into(),
-
            },
-
            &Device::mock(),
-
        );
-
        let mut buf: Vec<u8> = Vec::new();
-
        assert!(
-
            msg.encode(&mut buf).is_ok(),
-
            "INVENTORY_LIMIT is a valid limit for encoding",
-
        );
-

-
        let decoded = wire::deserialize(buf.as_slice());
-
        assert!(
-
            decoded.is_ok(),
-
            "INVENTORY_LIMIT is a valid limit for decoding"
-
        );
-
        assert_eq!(
-
            msg,
-
            decoded.unwrap(),
-
            "encoding and decoding should be safe for message at INVENTORY_LIMIT",
-
        );
-
    }
-

-
    #[quickcheck]
-
    fn prop_refs_announcement_signing(rid: RepoId) {
-
        let signer = Device::mock_rng(&mut fastrand::Rng::new());
-
        let timestamp = Timestamp::EPOCH;
-
        let at = raw::Oid::zero().into();
-
        let refs = BoundedVec::collect_from(
-
            &mut [RefsAt {
-
                remote: *signer.public_key(),
-
                at,
-
            }]
-
            .into_iter(),
-
        );
-
        let message = AnnouncementMessage::Refs(RefsAnnouncement {
-
            rid,
-
            refs,
-
            timestamp,
-
        });
-
        let ann = message.signed(&signer);
-

-
        assert!(ann.verify());
-
    }
-

-
    #[test]
-
    fn test_node_announcement_validate() {
-
        let ann = NodeAnnouncement {
-
            version: 1,
-
            features: node::Features::SEED,
-
            timestamp: Timestamp::try_from(42491841u64).unwrap(),
-
            alias: Alias::new("alice"),
-
            addresses: BoundedVec::new(),
-
            nonce: 0,
-
            agent: UserAgent::from_str("/heartwood:1.0.0/").unwrap(),
-
        };
-

-
        assert_eq!(ann.work(), 1);
-
        assert_eq!(ann.clone().solve(1).unwrap().work(), 1);
-
        assert_eq!(ann.clone().solve(8).unwrap().work(), 10);
-
        assert_eq!(ann.solve(14).unwrap().work(), 14);
-
    }
-
}
deleted crates/radicle-node/src/service/session.rs
@@ -1,361 +0,0 @@
-
use std::collections::{HashSet, VecDeque};
-
use std::{fmt, time};
-

-
use crossbeam_channel as chan;
-

-
use crate::node::config::Limits;
-
use crate::node::{FetchResult, Severity};
-
use crate::service::message;
-
use crate::service::message::Message;
-
use crate::service::{Address, LocalDuration, LocalTime, NodeId, Outbox, RepoId, Rng};
-
use crate::storage::refs::RefsAt;
-
use crate::{Link, Timestamp};
-

-
pub use crate::node::{PingState, State};
-

-
/// Time after which a connection is considered stable.
-
pub const CONNECTION_STABLE_THRESHOLD: LocalDuration = LocalDuration::from_mins(1);
-
/// Maximum items in the fetch queue.
-
pub const MAX_FETCH_QUEUE_SIZE: usize = 128;
-

-
#[derive(thiserror::Error, Debug, Clone, Copy)]
-
pub enum Error {
-
    /// The remote peer sent an invalid announcement timestamp,
-
    /// for eg. a timestamp far in the future.
-
    #[error("invalid announcement timestamp: {0}")]
-
    InvalidTimestamp(Timestamp),
-
    /// The remote peer sent git protocol messages while we were expecting
-
    /// gossip messages. Or vice-versa.
-
    #[error("protocol mismatch")]
-
    ProtocolMismatch,
-
    /// The remote peer did something that violates the protocol rules.
-
    #[error("peer misbehaved")]
-
    Misbehavior,
-
    /// The remote peer timed out.
-
    #[error("peer timed out")]
-
    Timeout,
-
}
-

-
impl Error {
-
    /// Return the severity for this error.
-
    pub fn severity(&self) -> Severity {
-
        match self {
-
            Self::InvalidTimestamp(_) => Severity::High,
-
            Self::ProtocolMismatch => Severity::High,
-
            Self::Misbehavior => Severity::High,
-
            Self::Timeout => Severity::Low,
-
        }
-
    }
-
}
-

-
/// Error when trying to queue a fetch.
-
#[derive(thiserror::Error, Debug, Clone)]
-
pub enum QueueError {
-
    /// The item already exists in the queue.
-
    #[error("item is already queued")]
-
    Duplicate(QueuedFetch),
-
    /// The queue is at capacity.
-
    #[error("queue capacity reached")]
-
    CapacityReached(QueuedFetch),
-
}
-

-
impl QueueError {
-
    /// Get the inner [`QueuedFetch`].
-
    pub fn inner(&self) -> &QueuedFetch {
-
        match self {
-
            Self::Duplicate(f) => f,
-
            Self::CapacityReached(f) => f,
-
        }
-
    }
-
}
-

-
/// Fetch waiting to be processed, in the fetch queue.
-
#[derive(Debug, Clone)]
-
pub struct QueuedFetch {
-
    /// Repo being fetched.
-
    pub rid: RepoId,
-
    /// Peer being fetched from.
-
    pub from: NodeId,
-
    /// Refs being fetched.
-
    pub refs_at: Vec<RefsAt>,
-
    /// The timeout given for the fetch request.
-
    pub timeout: time::Duration,
-
    /// Result channel.
-
    pub channel: Option<chan::Sender<FetchResult>>,
-
}
-

-
impl PartialEq for QueuedFetch {
-
    fn eq(&self, other: &Self) -> bool {
-
        self.rid == other.rid
-
            && self.from == other.from
-
            && self.refs_at == other.refs_at
-
            && self.channel.is_none()
-
            && other.channel.is_none()
-
    }
-
}
-

-
/// A peer session. Each connected peer will have one session.
-
#[derive(Debug, Clone)]
-
pub struct Session {
-
    /// Peer id.
-
    pub id: NodeId,
-
    /// Peer address.
-
    pub addr: Address,
-
    /// Connection direction.
-
    pub link: Link,
-
    /// Whether we should attempt to re-connect
-
    /// to this peer upon disconnection.
-
    pub persistent: bool,
-
    /// Peer connection state.
-
    pub state: State,
-
    /// Peer subscription.
-
    pub subscribe: Option<message::Subscribe>,
-
    /// Last time a message was received from the peer.
-
    pub last_active: LocalTime,
-
    /// Fetch queue.
-
    pub queue: VecDeque<QueuedFetch>,
-

-
    /// Connection attempts. For persistent peers, Tracks
-
    /// how many times we've attempted to connect. We reset this to zero
-
    /// upon successful connection, once the connection is stable.
-
    attempts: usize,
-
    /// Source of entropy.
-
    rng: Rng,
-
    /// Protocol limits.
-
    limits: Limits,
-
}
-

-
impl fmt::Display for Session {
-
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-
        let mut attrs = Vec::new();
-
        let state = self.state.to_string();
-

-
        if self.link.is_inbound() {
-
            attrs.push("inbound");
-
        } else {
-
            attrs.push("outbound");
-
        }
-
        if self.persistent {
-
            attrs.push("persistent");
-
        }
-
        attrs.push(state.as_str());
-

-
        write!(f, "{} [{}]", self.id, attrs.join(" "))
-
    }
-
}
-

-
impl From<&Session> for radicle::node::Session {
-
    fn from(s: &Session) -> Self {
-
        Self {
-
            nid: s.id,
-
            link: if s.link.is_inbound() {
-
                radicle::node::Link::Inbound
-
            } else {
-
                radicle::node::Link::Outbound
-
            },
-
            addr: s.addr.clone(),
-
            state: s.state.clone(),
-
        }
-
    }
-
}
-

-
impl Session {
-
    pub fn outbound(id: NodeId, addr: Address, persistent: bool, rng: Rng, limits: Limits) -> Self {
-
        Self {
-
            id,
-
            addr,
-
            state: State::Initial,
-
            link: Link::Outbound,
-
            subscribe: None,
-
            persistent,
-
            last_active: LocalTime::default(),
-
            queue: VecDeque::with_capacity(MAX_FETCH_QUEUE_SIZE),
-
            attempts: 1,
-
            rng,
-
            limits,
-
        }
-
    }
-

-
    pub fn inbound(
-
        id: NodeId,
-
        addr: Address,
-
        persistent: bool,
-
        rng: Rng,
-
        time: LocalTime,
-
        limits: Limits,
-
    ) -> Self {
-
        Self {
-
            id,
-
            addr,
-
            state: State::Connected {
-
                since: time,
-
                ping: PingState::default(),
-
                fetching: HashSet::default(),
-
                latencies: VecDeque::default(),
-
                stable: false,
-
            },
-
            link: Link::Inbound,
-
            subscribe: None,
-
            persistent,
-
            last_active: time,
-
            queue: VecDeque::new(),
-
            attempts: 0,
-
            rng,
-
            limits,
-
        }
-
    }
-

-
    pub fn is_connecting(&self) -> bool {
-
        matches!(self.state, State::Attempted { .. })
-
    }
-

-
    pub fn is_stable(&self) -> bool {
-
        matches!(self.state, State::Connected { stable: true, .. })
-
    }
-

-
    pub fn is_connected(&self) -> bool {
-
        self.state.is_connected()
-
    }
-

-
    pub fn is_disconnected(&self) -> bool {
-
        matches!(self.state, State::Disconnected { .. })
-
    }
-

-
    pub fn is_initial(&self) -> bool {
-
        matches!(self.state, State::Initial)
-
    }
-

-
    pub fn is_at_capacity(&self) -> bool {
-
        if let State::Connected { fetching, .. } = &self.state {
-
            if fetching.len() >= self.limits.fetch_concurrency {
-
                return true;
-
            }
-
        }
-
        false
-
    }
-

-
    pub fn is_fetching(&self, rid: &RepoId) -> bool {
-
        if let State::Connected { fetching, .. } = &self.state {
-
            return fetching.contains(rid);
-
        }
-
        false
-
    }
-

-
    /// Queue a fetch. Returns `true` if it was added to the queue, and `false` if
-
    /// it already was present in the queue.
-
    pub fn queue_fetch(&mut self, fetch: QueuedFetch) -> Result<(), QueueError> {
-
        assert_eq!(fetch.from, self.id);
-

-
        if self.queue.len() >= MAX_FETCH_QUEUE_SIZE {
-
            return Err(QueueError::CapacityReached(fetch));
-
        } else if self.queue.contains(&fetch) {
-
            return Err(QueueError::Duplicate(fetch));
-
        }
-
        self.queue.push_back(fetch);
-

-
        Ok(())
-
    }
-

-
    pub fn dequeue_fetch(&mut self) -> Option<QueuedFetch> {
-
        self.queue.pop_front()
-
    }
-

-
    pub fn attempts(&self) -> usize {
-
        self.attempts
-
    }
-

-
    /// Run 'idle' task for session.
-
    pub fn idle(&mut self, now: LocalTime) {
-
        if let State::Connected {
-
            since,
-
            ref mut stable,
-
            ..
-
        } = self.state
-
        {
-
            if now >= since && now.duration_since(since) >= CONNECTION_STABLE_THRESHOLD {
-
                *stable = true;
-
                // Reset number of attempts for stable connections.
-
                self.attempts = 0;
-
            }
-
        }
-
    }
-

-
    /// Mark this session as fetching the given RID.
-
    ///
-
    /// # Panics
-
    ///
-
    /// If it is already fetching that RID, or the session is disconnected.
-
    pub fn fetching(&mut self, rid: RepoId) {
-
        if let State::Connected { fetching, .. } = &mut self.state {
-
            assert!(
-
                fetching.insert(rid),
-
                "Session must not already be fetching {rid}"
-
            );
-
        } else {
-
            panic!(
-
                "Attempting to fetch {rid} from disconnected session {}",
-
                self.id
-
            );
-
        }
-
    }
-

-
    pub fn fetched(&mut self, rid: RepoId) {
-
        if let State::Connected { fetching, .. } = &mut self.state {
-
            if !fetching.remove(&rid) {
-
                log::warn!(target: "service", "Fetched unknown repository {rid}");
-
            }
-
        }
-
    }
-

-
    pub fn to_attempted(&mut self) {
-
        assert!(
-
            self.is_initial(),
-
            "Can only transition to 'attempted' state from 'initial' state"
-
        );
-
        self.state = State::Attempted;
-
        self.attempts += 1;
-
    }
-

-
    pub fn to_connected(&mut self, since: LocalTime) {
-
        self.last_active = since;
-

-
        if let State::Connected { .. } = &self.state {
-
            log::error!(target: "service", "Session {} is already in 'connected' state, resetting..", self.id);
-
        };
-
        self.state = State::Connected {
-
            since,
-
            ping: PingState::default(),
-
            fetching: HashSet::default(),
-
            latencies: VecDeque::default(),
-
            stable: false,
-
        };
-
    }
-

-
    /// Move the session state to "disconnected". Returns any pending RID
-
    /// that was requested.
-
    pub fn to_disconnected(&mut self, since: LocalTime, retry_at: LocalTime) {
-
        self.state = State::Disconnected { since, retry_at };
-
    }
-

-
    /// Return to initial state from disconnected state. This state transition
-
    /// happens when we attempt to re-connect to a disconnected peer.
-
    pub fn to_initial(&mut self) {
-
        assert!(
-
            self.is_disconnected(),
-
            "Can only transition to 'initial' state from 'disconnected' state"
-
        );
-
        self.state = State::Initial;
-
    }
-

-
    pub fn ping(&mut self, since: LocalTime, reactor: &mut Outbox) -> Result<(), Error> {
-
        if let State::Connected { ping, .. } = &mut self.state {
-
            let msg = message::Ping::new(&mut self.rng);
-
            *ping = PingState::AwaitingResponse {
-
                len: msg.ponglen,
-
                since,
-
            };
-
            reactor.write(self, Message::Ping(msg));
-
        }
-
        Ok(())
-
    }
-
}
modified crates/radicle-node/src/test.rs
@@ -1,4 +1,3 @@
-
pub mod arbitrary;
pub mod gossip;
pub mod handle;
pub mod node;
deleted crates/radicle-node/src/test/arbitrary.rs
@@ -1,142 +0,0 @@
-
use std::collections::HashSet;
-

-
use bloomy::BloomFilter;
-
use qcheck::Arbitrary;
-
use radicle::node::UserAgent;
-

-
use crate::crypto;
-
use crate::identity::DocAt;
-
use crate::node::Alias;
-
use crate::prelude::{BoundedVec, NodeId, RepoId, Timestamp};
-
use crate::service::filter::{Filter, FILTER_SIZE_L, FILTER_SIZE_M, FILTER_SIZE_S};
-
use crate::service::message::{
-
    Announcement, Info, InventoryAnnouncement, Message, NodeAnnouncement, Ping, RefsAnnouncement,
-
    Subscribe, ZeroBytes,
-
};
-
use crate::wire::MessageType;
-
use crate::worker::fetch::FetchResult;
-

-
pub use radicle::test::arbitrary::*;
-

-
impl Arbitrary for Filter {
-
    fn arbitrary(g: &mut qcheck::Gen) -> Self {
-
        let size = *g
-
            .choose(&[FILTER_SIZE_S, FILTER_SIZE_M, FILTER_SIZE_L])
-
            .unwrap();
-
        let mut bytes = vec![0; size];
-
        for _ in 0..64 {
-
            let index = usize::arbitrary(g) % bytes.len();
-
            bytes[index] = u8::arbitrary(g);
-
        }
-
        Self::from(BloomFilter::from(bytes))
-
    }
-
}
-

-
impl Arbitrary for FetchResult {
-
    fn arbitrary(g: &mut qcheck::Gen) -> Self {
-
        FetchResult {
-
            updated: vec![],
-
            namespaces: HashSet::arbitrary(g),
-
            clone: bool::arbitrary(g),
-
            doc: DocAt::arbitrary(g),
-
        }
-
    }
-
}
-

-
impl Arbitrary for Message {
-
    fn arbitrary(g: &mut qcheck::Gen) -> Self {
-
        let type_id = g
-
            .choose(&[
-
                MessageType::InventoryAnnouncement,
-
                MessageType::NodeAnnouncement,
-
                MessageType::RefsAnnouncement,
-
                MessageType::Info,
-
                MessageType::Subscribe,
-
                MessageType::Ping,
-
                MessageType::Pong,
-
            ])
-
            .unwrap();
-

-
        match type_id {
-
            MessageType::InventoryAnnouncement => Announcement {
-
                node: NodeId::arbitrary(g),
-
                message: InventoryAnnouncement {
-
                    inventory: BoundedVec::arbitrary(g),
-
                    timestamp: Timestamp::arbitrary(g),
-
                }
-
                .into(),
-
                signature: crypto::Signature::from(<[u8; 64]>::arbitrary(g)),
-
            }
-
            .into(),
-
            MessageType::RefsAnnouncement => Announcement {
-
                node: NodeId::arbitrary(g),
-
                message: RefsAnnouncement {
-
                    rid: RepoId::arbitrary(g),
-
                    refs: BoundedVec::arbitrary(g),
-
                    timestamp: Timestamp::arbitrary(g),
-
                }
-
                .into(),
-
                signature: crypto::Signature::from(<[u8; 64]>::arbitrary(g)),
-
            }
-
            .into(),
-
            MessageType::NodeAnnouncement => {
-
                let message = NodeAnnouncement {
-
                    version: u8::arbitrary(g),
-
                    features: u64::arbitrary(g).into(),
-
                    timestamp: Timestamp::arbitrary(g),
-
                    alias: Alias::arbitrary(g),
-
                    addresses: Arbitrary::arbitrary(g),
-
                    nonce: u64::arbitrary(g),
-
                    agent: UserAgent::arbitrary(g),
-
                }
-
                .into();
-
                let bytes: [u8; 64] = Arbitrary::arbitrary(g);
-
                let signature = crypto::Signature::from(bytes);
-

-
                Announcement {
-
                    node: NodeId::arbitrary(g),
-
                    signature,
-
                    message,
-
                }
-
                .into()
-
            }
-
            MessageType::Info => {
-
                let message = Info::RefsAlreadySynced {
-
                    rid: RepoId::arbitrary(g),
-
                    at: oid(),
-
                };
-
                Self::Info(message)
-
            }
-
            MessageType::Subscribe => Self::Subscribe(Subscribe {
-
                filter: Filter::arbitrary(g),
-
                since: Timestamp::arbitrary(g),
-
                until: Timestamp::arbitrary(g),
-
            }),
-
            MessageType::Ping => {
-
                let mut rng = fastrand::Rng::with_seed(u64::arbitrary(g));
-

-
                Self::Ping(Ping::new(&mut rng))
-
            }
-
            MessageType::Pong => Self::Pong {
-
                zeroes: ZeroBytes::new(u16::arbitrary(g).min(Ping::MAX_PONG_ZEROES)),
-
            },
-
        }
-
    }
-
}
-

-
impl Arbitrary for ZeroBytes {
-
    fn arbitrary(g: &mut qcheck::Gen) -> Self {
-
        ZeroBytes::new(u16::arbitrary(g))
-
    }
-
}
-

-
impl<T, const N: usize> Arbitrary for BoundedVec<T, N>
-
where
-
    T: Arbitrary + Eq,
-
{
-
    fn arbitrary(g: &mut qcheck::Gen) -> Self {
-
        let mut v: Vec<T> = Arbitrary::arbitrary(g);
-
        v.truncate(N);
-
        v.try_into().expect("size within bounds")
-
    }
-
}
modified crates/radicle-node/src/test/environment.rs
@@ -19,8 +19,10 @@ use radicle::git::refname;
use radicle::identity::{RepoId, Visibility};
use radicle::node::config::ConnectAddress;
use radicle::node::device::Device;
+
use radicle::node::events::Event;
use radicle::node::policy::store as policy;
use radicle::node::seed::Store as _;
+
pub use radicle::node::Config;
use radicle::node::{Alias, Database, UserAgent, POLICIES_DB_FILE};
use radicle::node::{ConnectOptions, Handle as _};
use radicle::profile;
@@ -34,12 +36,9 @@ use radicle::{cob, explorer};
use radicle::{git, web};

use crate::node::NodeId;
-
use crate::service::Event;
use crate::storage::git::transport;
use crate::{runtime, runtime::Handle, service, Runtime};

-
pub use service::Config;
-

/// Test environment.
pub struct Environment {
    tempdir: tempfile::TempDir,
modified crates/radicle-node/src/test/handle.rs
@@ -9,8 +9,8 @@ use radicle::storage::refs::RefsAt;
use crate::identity::RepoId;
use crate::node::{Alias, Config, ConnectOptions, ConnectResult, Event, FetchResult, Seeds};
use crate::runtime::HandleError;
-
use crate::service::policy;
-
use crate::service::NodeId;
+
use radicle::node::policy;
+
use radicle::node::NodeId;

#[derive(Default, Clone)]
pub struct Handle {
modified crates/radicle-node/src/test/node.rs
@@ -22,6 +22,7 @@ use radicle::node::config::ConnectAddress;
use radicle::node::policy::store as policy;
use radicle::node::seed::Store as _;
use radicle::node::Config;
+
use radicle::node::Event;
use radicle::node::{self, Alias};
use radicle::node::{ConnectOptions, Handle as _};
use radicle::node::{Database, POLICIES_DB_FILE};
@@ -33,7 +34,6 @@ use radicle::Storage;

use crate::node::device::Device;
use crate::node::NodeId;
-
use crate::service::Event;
use crate::storage::git::transport;
use crate::{runtime, runtime::Handle, service, Runtime};

modified crates/radicle-node/src/test/peer.rs
@@ -28,7 +28,6 @@ use crate::runtime::Emitter;
use crate::service;
use crate::service::io::Io;
use crate::service::message::*;
-
use crate::service::policy::{Scope, SeedingPolicy};
use crate::service::*;
use crate::storage::git::transport::remote;
use crate::storage::{RemoteId, WriteStorage};
@@ -36,6 +35,10 @@ use crate::test::storage::MockStorage;
use crate::test::{arbitrary, fixtures, simulator};
use crate::wire::MessageType;
use crate::{Link, LocalDuration, LocalTime, PROTOCOL_VERSION};
+
use radicle::node::events::Events;
+
use radicle::node::policy::config as policy;
+
use radicle::node::policy::{Scope, SeedingPolicy};
+
use radicle_protocol::bounded::BoundedVec;

/// Service instantiation used for testing.
pub type Service<S, G> = service::Service<Database, S, G>;
@@ -100,7 +103,7 @@ where
}

pub struct Config<G: crypto::signature::Signer<crypto::Signature> + 'static> {
-
    pub config: service::Config,
+
    pub config: radicle::node::Config,
    pub local_time: LocalTime,
    pub policy: SeedingPolicy,
    pub signer: Device<G>,
@@ -113,7 +116,7 @@ impl Default for Config<MockSigner> {
        let mut rng = fastrand::Rng::new();
        let signer = Device::mock_rng(&mut rng);
        let tmp = tempfile::TempDir::new().unwrap();
-
        let config = service::Config::test(Alias::from_str("mocky").unwrap());
+
        let config = radicle::node::Config::test(Alias::from_str("mocky").unwrap());

        Config {
            config,
modified crates/radicle-node/src/test/simulator.rs
@@ -13,16 +13,19 @@ use std::{fmt, io, net};

use localtime::{LocalDuration, LocalTime};
use log::*;
+
use radicle::node::events::Event;
+
use radicle::node::NodeId;
+
use radicle_protocol::worker::FetchError;

use crate::crypto;
use crate::prelude::{Address, RepoId};
use crate::service::io::Io;
-
use crate::service::{DisconnectReason, Event, Message, Metrics, NodeId};
+
use crate::service::{DisconnectReason, Message, Metrics};
use crate::storage::Namespaces;
use crate::storage::{ReadRepository, WriteStorage};
use crate::test::arbitrary;
use crate::test::peer::Service;
-
use crate::worker::{fetch, FetchError};
+
use crate::worker::fetch;
use crate::Link;

/// Minimum latency between peers.
modified crates/radicle-node/src/tests.rs
@@ -9,17 +9,19 @@ use std::sync::LazyLock;
use std::time;

use crossbeam_channel as chan;
-
use netservices::Direction as Link;
use radicle::identity::Visibility;
use radicle::node::address::Store as _;
use radicle::node::device::Device;
+
use radicle::node::policy;
use radicle::node::refs::Store as _;
use radicle::node::routing::Store as _;
+
use radicle::node::Link;
use radicle::node::{ConnectOptions, DEFAULT_TIMEOUT};
use radicle::storage::refs::RefsAt;
use radicle::storage::RefUpdate;
use radicle::test::arbitrary::gen;
use radicle::test::storage::MockRepository;
+
use radicle_protocol::bounded::BoundedVec;

use crate::collections::{RandomMap, RandomSet};
use crate::identity::RepoId;
@@ -49,7 +51,6 @@ use crate::test::simulator::{Peer as _, Simulation};
use crate::test::storage::MockStorage;
use crate::wire::Decode;
use crate::wire::Encode;
-
use crate::worker;
use crate::worker::fetch;
use crate::LocalTime;
use crate::{git, identity, rad, runtime, service, test};
@@ -78,8 +79,8 @@ fn test_inventory_decode() {
    let timestamp: Timestamp = LocalTime::now().into();

    let mut buf = Vec::new();
-
    inventory.as_slice().encode(&mut buf).unwrap();
-
    timestamp.encode(&mut buf).unwrap();
+
    inventory.as_slice().encode(&mut buf);
+
    timestamp.encode(&mut buf);

    let m = InventoryAnnouncement::decode(&mut buf.as_slice()).expect("message decodes");
    assert_eq!(inventory.as_slice(), m.inventory.as_slice());
@@ -1462,7 +1463,7 @@ fn test_fetch_missing_inventory_on_schedule() {
    alice.fetched(
        rid,
        bob.id,
-
        Err(worker::FetchError::Io(
+
        Err(radicle_protocol::worker::FetchError::Io(
            io::ErrorKind::ConnectionReset.into(),
        )),
    );
@@ -1806,7 +1807,7 @@ fn test_init_and_seed() {
        .find(|e| {
            matches!(
                e,
-
                service::Event::RefsFetched { remote, .. }
+
                radicle::node::events::Event::RefsFetched { remote, .. }
                if *remote == eve.node_id()
            )
        })
modified crates/radicle-node/src/tests/e2e.rs
@@ -1,6 +1,8 @@
use std::{collections::HashSet, thread, time};

use radicle::node::device::Device;
+
use radicle::node::policy::Scope;
+
use radicle::node::Event;
use radicle::node::{Alias, ConnectResult, FetchResult, Handle as _, DEFAULT_TIMEOUT};
use radicle::storage::{
    ReadRepository, ReadStorage, RefUpdate, RemoteRepository, SignRepository, ValidateRepository,
@@ -13,7 +15,6 @@ use radicle::{git, issue};
use crate::node::config::Limits;
use crate::node::{Config, ConnectOptions};
use crate::service;
-
use crate::service::policy::Scope;
use crate::storage::git::transport;
use crate::test::logger;
use crate::test::node::{converge, Node};
@@ -688,8 +689,7 @@ fn test_large_fetch() {
    bob_events
        .wait(
            |e| {
-
                matches!(e, service::Event::RefsFetched { updated, .. } if !updated.is_empty())
-
                    .then_some(())
+
                matches!(e, Event::RefsFetched { updated, .. } if !updated.is_empty()).then_some(())
            },
            time::Duration::from_secs(9 * scale as u64),
        )
@@ -717,7 +717,7 @@ fn test_concurrent_fetches() {
    let mut alice_repos = HashSet::new();
    let mut alice = Node::init(
        tmp.path(),
-
        service::Config {
+
        radicle::node::config::Config {
            limits: limits.clone(),
            relay: radicle::node::config::Relay::Always,
            ..config::relay("alice")
@@ -725,7 +725,7 @@ fn test_concurrent_fetches() {
    );
    let mut bob = Node::init(
        tmp.path(),
-
        service::Config {
+
        radicle::node::config::Config {
            limits,
            relay: radicle::node::config::Relay::Always,
            ..config::relay("bob")
@@ -766,7 +766,7 @@ fn test_concurrent_fetches() {

    while !bob_repos.is_empty() {
        match alice_events.recv().unwrap() {
-
            service::Event::RefsFetched { rid, updated, .. } if !updated.is_empty() => {
+
            Event::RefsFetched { rid, updated, .. } if !updated.is_empty() => {
                bob_repos.remove(&rid);
                log::debug!(target: "test", "{} fetched {rid} ({} left)",alice.id, bob_repos.len());
            }
@@ -776,7 +776,7 @@ fn test_concurrent_fetches() {

    while !alice_repos.is_empty() {
        match bob_events.recv().unwrap() {
-
            service::Event::RefsFetched { rid, updated, .. } if !updated.is_empty() => {
+
            Event::RefsFetched { rid, updated, .. } if !updated.is_empty() => {
                alice_repos.remove(&rid);
                log::debug!(target: "test", "{} fetched {rid} ({} left)", bob.id, alice_repos.len());
            }
@@ -1255,8 +1255,7 @@ fn missing_delegate_default_branch() {
    bob_events
        .wait(
            |e| {
-
                matches!(e, service::Event::RefsFetched { updated, .. } if !updated.is_empty())
-
                    .then_some(())
+
                matches!(e, Event::RefsFetched { updated, .. } if !updated.is_empty()).then_some(())
            },
            DEFAULT_TIMEOUT,
        )
@@ -1416,7 +1415,7 @@ fn test_background_foreground_fetch() {
    bob.handle.announce_refs(rid).unwrap();
    alice_events
        .wait(
-
            |e| matches!(e, service::Event::RefsAnnounced { .. }).then_some(()),
+
            |e| matches!(e, Event::RefsAnnounced { .. }).then_some(()),
            DEFAULT_TIMEOUT,
        )
        .unwrap();
modified crates/radicle-node/src/wire.rs
@@ -1,717 +1,1380 @@
-
mod frame;
-
mod message;
-
mod protocol;
-
mod varint;
-

-
pub use frame::StreamId;
-
pub use message::{AddressType, MessageType};
-
pub use protocol::{Control, Wire, WireReader, WireSession, WireWriter};
-
use radicle::node::UserAgent;
-

-
use std::collections::BTreeMap;
-
use std::convert::TryFrom;
-
use std::ops::Deref;
-
use std::str::FromStr;
-
use std::string::FromUtf8Error;
-
use std::{io, mem};
-

-
use byteorder::{NetworkEndian, ReadBytesExt, WriteBytesExt};
-
use cyphernet::addr::tor;
-

-
use crate::crypto::{PublicKey, Signature, Unverified};
-
use crate::git;
-
use crate::git::fmt;
-
use crate::identity::RepoId;
-
use crate::node;
-
use crate::node::Alias;
-
use crate::prelude::*;
-
use crate::service::filter;
-
use crate::storage::refs::Refs;
-
use crate::storage::refs::RefsAt;
-
use crate::storage::refs::SignedRefs;
-
use crate::Timestamp;
-

-
/// The default type we use to represent sizes on the wire.
-
///
-
/// Since wire messages are limited to 64KB by the transport layer,
-
/// two bytes is enough to represent any message.
-
///
-
/// Note that in certain cases, we may use a smaller type.
-
pub type Size = u16;
-

-
#[derive(thiserror::Error, Debug)]
-
pub enum Error {
-
    #[error("i/o: {0}")]
-
    Io(#[from] io::Error),
-
    #[error("UTF-8 error: {0}")]
-
    FromUtf8(#[from] FromUtf8Error),
-
    #[error("invalid size: expected {expected}, got {actual}")]
-
    InvalidSize { expected: usize, actual: usize },
-
    #[error("invalid filter size: {0}")]
-
    InvalidFilterSize(usize),
-
    #[error("invalid channel type {0:x}")]
-
    InvalidStreamKind(u8),
-
    #[error(transparent)]
-
    InvalidRefName(#[from] fmt::Error),
-
    #[error(transparent)]
-
    InvalidAlias(#[from] node::AliasError),
-
    #[error("invalid user agent string: {0:?}")]
-
    InvalidUserAgent(String),
-
    #[error("invalid control message with type `{0}`")]
-
    InvalidControlMessage(u8),
-
    #[error("invalid protocol version header `{0:x?}`")]
-
    InvalidProtocolVersion([u8; 4]),
-
    #[error("invalid onion address: {0}")]
-
    InvalidOnionAddr(#[from] tor::OnionAddrDecodeError),
-
    #[error("invalid timestamp: {0}")]
-
    InvalidTimestamp(u64),
-
    #[error("wrong protocol version `{0}`")]
-
    WrongProtocolVersion(u8),
-
    #[error("unknown address type `{0}`")]
-
    UnknownAddressType(u8),
-
    #[error("unknown message type `{0}`")]
-
    UnknownMessageType(u16),
-
    #[error("unknown info type `{0}`")]
-
    UnknownInfoType(u16),
-
    #[error("unexpected bytes")]
-
    UnexpectedBytes,
+
//! Implementation of the transport protocol.
+
//!
+
//! We use the Noise XK handshake pattern to establish an encrypted stream with a remote peer.
+
//! The handshake itself is implemented in the external [`cyphernet`] and [`netservices`] crates.
+
use std::collections::hash_map::Entry;
+
use std::collections::VecDeque;
+
use std::os::unix::io::{AsRawFd, RawFd};
+
use std::sync::Arc;
+
use std::{io, net, time};
+

+
use amplify::Wrapper as _;
+
use crossbeam_channel as chan;
+
use cyphernet::addr::{HostName, InetHost, NetAddr};
+
use cyphernet::encrypt::noise::{HandshakePattern, Keyset, NoiseState};
+
use cyphernet::proxy::socks5;
+
use cyphernet::{Digest, EcSk, Ecdh, Sha256};
+
use localtime::LocalTime;
+
use netservices::resource::{ListenerEvent, NetAccept, NetTransport, SessionEvent};
+
use netservices::session::{NoiseSession, ProtocolArtifact, Socks5Session};
+
use netservices::{NetConnection, NetReader, NetWriter};
+
use radicle::node::device::Device;
+
use reactor::{ResourceId, ResourceType, Timestamp};
+

+
use radicle::collections::RandomMap;
+
use radicle::crypto;
+
use radicle::node::config::AddressConfig;
+
use radicle::node::Link;
+
use radicle::node::NodeId;
+
use radicle::storage::WriteStorage;
+
use radicle_protocol::deserializer::Deserializer;
+
pub use radicle_protocol::wire::frame;
+
pub use radicle_protocol::wire::frame::{Frame, FrameData, StreamId};
+
pub use radicle_protocol::wire::*;
+
use radicle_protocol::worker::{FetchRequest, FetchResult};
+

+
use crate::service;
+
use crate::service::io::Io;
+
use crate::service::FETCH_TIMEOUT;
+
use crate::service::{session, DisconnectReason, Metrics, Service};
+
use crate::worker;
+
use crate::worker::{ChannelEvent, ChannelsConfig};
+
use crate::worker::{Task, TaskResult};
+

+
/// NoiseXK handshake pattern.
+
pub const NOISE_XK: HandshakePattern = HandshakePattern {
+
    initiator: cyphernet::encrypt::noise::InitiatorPattern::Xmitted,
+
    responder: cyphernet::encrypt::noise::OneWayPattern::Known,
+
};
+

+
/// Default time to wait until a network connection is considered inactive.
+
pub const DEFAULT_CONNECTION_TIMEOUT: time::Duration = time::Duration::from_secs(6);
+

+
/// Default time to wait when dialing a connection, before the remote is considered unreachable.
+
pub const DEFAULT_DIAL_TIMEOUT: time::Duration = time::Duration::from_secs(6);
+

+
/// Maximum size of a peer inbox, in bytes.
+
pub const MAX_INBOX_SIZE: usize = 1024 * 1024 * 2;
+

+
/// Control message used internally between workers, users, and the service.
+
#[allow(clippy::large_enum_variant)]
+
#[derive(Debug)]
+
pub enum Control {
+
    /// Message from the user to the service.
+
    User(service::Command),
+
    /// Message from a worker to the service.
+
    Worker(TaskResult),
+
    /// Flush data in the given stream to the remote.
+
    Flush { remote: NodeId, stream: StreamId },
}

-
impl Error {
-
    /// Whether we've reached the end of file. This will be true when we fail to decode
-
    /// a message because there's not enough data in the stream.
-
    pub fn is_eof(&self) -> bool {
-
        matches!(self, Self::Io(err) if err.kind() == io::ErrorKind::UnexpectedEof)
-
    }
-
}
-

-
/// Things that can be encoded as binary.
-
pub trait Encode {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error>;
+
/// Peer session type.
+
pub type WireSession<G> = NoiseSession<G, Sha256, Socks5Session<net::TcpStream>>;
+
/// Peer session type (read-only).
+
pub type WireReader = NetReader<Socks5Session<net::TcpStream>>;
+
/// Peer session type (write-only).
+
pub type WireWriter<G> = NetWriter<NoiseState<G, Sha256>, Socks5Session<net::TcpStream>>;
+

+
/// Reactor action.
+
type Action<G> = reactor::Action<NetAccept<WireSession<G>>, NetTransport<WireSession<G>>>;
+

+
/// A worker stream.
+
struct Stream {
+
    /// Channels.
+
    channels: worker::Channels,
+
    /// Data sent.
+
    sent_bytes: usize,
+
    /// Data received.
+
    received_bytes: usize,
}

-
/// Things that can be decoded from binary.
-
pub trait Decode: Sized {
-
    fn decode<R: io::Read + ?Sized>(reader: &mut R) -> Result<Self, Error>;
+
impl Stream {
+
    fn new(channels: worker::Channels) -> Self {
+
        Self {
+
            channels,
+
            sent_bytes: 0,
+
            received_bytes: 0,
+
        }
+
    }
}

-
/// Encode an object into a byte vector.
-
///
-
/// # Panics
-
///
-
/// If the encoded object exceeds [`Size::MAX`].
-
pub fn serialize<T: Encode + ?Sized>(data: &T) -> Vec<u8> {
-
    let mut buffer = Vec::new();
-
    // SAFETY: We expect this to panic if the user passes
-
    // in data that exceeds the maximum allowed size.
-
    #[allow(clippy::unwrap_used)]
-
    let len = data.encode(&mut buffer).unwrap();
-

-
    debug_assert_eq!(len, buffer.len());
-

-
    buffer
+
/// Streams associated with a connected peer.
+
struct Streams {
+
    /// Active streams and their associated worker channels.
+
    /// Note that the gossip and control streams are not included here as they are always
+
    /// implied to exist.
+
    streams: RandomMap<StreamId, Stream>,
+
    /// Connection direction.
+
    link: Link,
+
    /// Sequence number used to compute the next stream id.
+
    seq: u64,
}

-
/// Decode an object from a vector.
-
pub fn deserialize<T: Decode>(data: &[u8]) -> Result<T, Error> {
-
    let mut cursor = io::Cursor::new(data);
-
    let obj = T::decode(&mut cursor)?;
-

-
    if cursor.position() as usize != cursor.get_ref().len() {
-
        return Err(Error::UnexpectedBytes);
+
impl Streams {
+
    /// Create a new [`Streams`] object, passing the connection link.
+
    fn new(link: Link) -> Self {
+
        Self {
+
            streams: RandomMap::default(),
+
            link,
+
            seq: 0,
+
        }
    }
-
    Ok(obj)
-
}
-

-
impl Encode for u8 {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        writer.write_u8(*self)?;

-
        Ok(mem::size_of::<Self>())
+
    /// Get a known stream.
+
    fn get(&self, stream: &StreamId) -> Option<&Stream> {
+
        self.streams.get(stream)
    }
-
}
-

-
impl Encode for u16 {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        writer.write_u16::<NetworkEndian>(*self)?;

-
        Ok(mem::size_of::<Self>())
+
    /// Get a known stream, mutably.
+
    fn get_mut(&mut self, stream: &StreamId) -> Option<&mut Stream> {
+
        self.streams.get_mut(stream)
    }
-
}
-

-
impl Encode for u32 {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        writer.write_u32::<NetworkEndian>(*self)?;

-
        Ok(mem::size_of::<Self>())
-
    }
-
}
+
    /// Open a new stream.
+
    fn open(&mut self, config: ChannelsConfig) -> (StreamId, worker::Channels) {
+
        self.seq += 1;

-
impl Encode for u64 {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        writer.write_u64::<NetworkEndian>(*self)?;
+
        let id = StreamId::git(self.link)
+
            .nth(self.seq)
+
            .expect("Streams::open: too many streams");
+
        let channels = self
+
            .register(id, config)
+
            .expect("Streams::open: stream was already open");

-
        Ok(mem::size_of::<Self>())
+
        (id, channels)
    }
-
}

-
impl Encode for PublicKey {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        self.deref().encode(writer)
-
    }
-
}
+
    /// Register an open stream.
+
    fn register(&mut self, stream: StreamId, config: ChannelsConfig) -> Option<worker::Channels> {
+
        let (wire, worker) = worker::Channels::pair(config)
+
            .expect("Streams::register: fatal: unable to create channels");

-
impl<const T: usize> Encode for &[u8; T] {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        writer.write_all(&**self)?;
-
        Ok(mem::size_of::<Self>())
+
        match self.streams.entry(stream) {
+
            Entry::Vacant(e) => {
+
                e.insert(Stream::new(worker));
+
                Some(wire)
+
            }
+
            Entry::Occupied(_) => None,
+
        }
    }
-
}

-
impl<const T: usize> Encode for [u8; T] {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        writer.write_all(self)?;
-

-
        Ok(mem::size_of::<Self>())
+
    /// Unregister an open stream.
+
    fn unregister(&mut self, stream: &StreamId) -> Option<Stream> {
+
        self.streams.remove(stream)
    }
-
}

-
impl<T> Encode for &[T]
-
where
-
    T: Encode,
-
{
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        let mut n = (self.len() as Size).encode(writer)?;
-

-
        for item in self.iter() {
-
            n += item.encode(writer)?;
+
    /// Close all streams.
+
    fn shutdown(&mut self) {
+
        for (sid, stream) in self.streams.drain() {
+
            log::debug!(target: "wire", "Closing worker stream {sid}");
+
            stream.channels.close().ok();
        }
-
        Ok(n)
    }
}

-
impl<T, const N: usize> Encode for BoundedVec<T, N>
-
where
-
    T: Encode,
-
{
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        self.as_slice().encode(writer)
-
    }
+
/// The initial state of an outbound peer before handshake is completed.
+
#[derive(Debug)]
+
struct Outbound {
+
    /// Resource ID, if registered.
+
    id: Option<ResourceId>,
+
    /// Remote address.
+
    addr: NetAddr<HostName>,
+
    /// Remote Node ID.
+
    nid: NodeId,
}

-
impl Encode for &str {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        assert!(self.len() <= u8::MAX as usize);
-

-
        let n = (self.len() as u8).encode(writer)?;
-
        let bytes = self.as_bytes();
-

-
        // Nb. Don't use the [`Encode`] instance here for &[u8], because we are prefixing the
-
        // length ourselves.
-
        writer.write_all(bytes)?;
-

-
        Ok(n + bytes.len())
-
    }
+
/// The initial state of an inbound peer before handshake is completed.
+
#[derive(Debug)]
+
struct Inbound {
+
    /// Resource ID, if registered.
+
    id: Option<ResourceId>,
+
    /// Remote address.
+
    addr: NetAddr<HostName>,
}

-
impl Encode for String {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        self.as_str().encode(writer)
-
    }
+
/// Peer connection state machine.
+
enum Peer {
+
    /// The state after handshake is completed.
+
    /// Peers in this state are handled by the underlying service.
+
    Connected {
+
        #[allow(dead_code)]
+
        addr: NetAddr<HostName>,
+
        link: Link,
+
        nid: NodeId,
+
        inbox: Deserializer<MAX_INBOX_SIZE, Frame>,
+
        streams: Streams,
+
    },
+
    /// The peer was scheduled for disconnection. Once the transport is handed over
+
    /// by the reactor, we can consider it disconnected.
+
    Disconnecting {
+
        link: Link,
+
        nid: Option<NodeId>,
+
        reason: DisconnectReason,
+
    },
}

-
impl Encode for git::Url {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        self.to_string().encode(writer)
+
impl std::fmt::Debug for Peer {
+
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+
        match self {
+
            Self::Connected { link, nid, .. } => write!(f, "Connected({link:?}, {nid})"),
+
            Self::Disconnecting { .. } => write!(f, "Disconnecting"),
+
        }
    }
}

-
impl Encode for RepoId {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        self.deref().encode(writer)
+
impl Peer {
+
    /// Return the peer's id, if any.
+
    fn id(&self) -> Option<&NodeId> {
+
        match self {
+
            Peer::Connected { nid, .. } | Peer::Disconnecting { nid: Some(nid), .. } => Some(nid),
+
            Peer::Disconnecting { nid: None, .. } => None,
+
        }
    }
-
}

-
impl Encode for Refs {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        let len: Size = self
-
            .len()
-
            .try_into()
-
            .map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))?;
-
        let mut n = len.encode(writer)?;
-

-
        for (name, oid) in self.iter() {
-
            n += name.as_str().encode(writer)?;
-
            n += oid.encode(writer)?;
+
    fn link(&self) -> Link {
+
        match self {
+
            Peer::Connected { link, .. } => *link,
+
            Peer::Disconnecting { link, .. } => *link,
        }
-
        Ok(n)
    }
-
}

-
impl Encode for cyphernet::addr::tor::OnionAddrV3 {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        self.into_raw_bytes().encode(writer)
+
    /// Connected peer.
+
    fn connected(nid: NodeId, addr: NetAddr<HostName>, link: Link) -> Self {
+
        Self::Connected {
+
            link,
+
            addr,
+
            nid,
+
            inbox: Deserializer::default(),
+
            streams: Streams::new(link),
+
        }
    }
}

-
impl Encode for UserAgent {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        self.as_ref().encode(writer)
-
    }
-
}
+
/// Holds connected peers.
+
struct Peers(RandomMap<ResourceId, Peer>);

-
impl Encode for Alias {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        self.as_ref().encode(writer)
+
impl Peers {
+
    fn get_mut(&mut self, id: &ResourceId) -> Option<&mut Peer> {
+
        self.0.get_mut(id)
    }
-
}

-
impl<A, B> Encode for (A, B)
-
where
-
    A: Encode,
-
    B: Encode,
-
{
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        let mut n = self.0.encode(writer)?;
-
        n += self.1.encode(writer)?;
-
        Ok(n)
+
    fn entry(&mut self, id: ResourceId) -> Entry<ResourceId, Peer> {
+
        self.0.entry(id)
    }
-
}

-
impl Encode for git::RefString {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        self.as_str().encode(writer)
+
    fn insert(&mut self, id: ResourceId, peer: Peer) {
+
        if self.0.insert(id, peer).is_some() {
+
            log::warn!(target: "wire", "Replacing existing peer id={id}");
+
        }
    }
-
}

-
impl Encode for Signature {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        self.deref().encode(writer)
+
    fn remove(&mut self, id: &ResourceId) -> Option<Peer> {
+
        self.0.remove(id)
    }
-
}

-
impl Encode for git::Oid {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        // Nb. We use length-encoding here to support future SHA-2 object ids.
-
        self.as_bytes().encode(writer)
+
    fn lookup(&self, node_id: &NodeId) -> Option<(ResourceId, &Peer)> {
+
        self.0
+
            .iter()
+
            .find(|(_, peer)| peer.id() == Some(node_id))
+
            .map(|(fd, peer)| (*fd, peer))
    }
-
}
-

-
////////////////////////////////////////////////////////////////////////////////
-

-
impl Decode for PublicKey {
-
    fn decode<R: io::Read + ?Sized>(reader: &mut R) -> Result<Self, Error> {
-
        let buf: [u8; 32] = Decode::decode(reader)?;

-
        Ok(PublicKey::from(buf))
+
    fn lookup_mut(&mut self, node_id: &NodeId) -> Option<(ResourceId, &mut Peer)> {
+
        self.0
+
            .iter_mut()
+
            .find(|(_, peer)| peer.id() == Some(node_id))
+
            .map(|(fd, peer)| (*fd, peer))
    }
-
}
-

-
impl Decode for Refs {
-
    fn decode<R: io::Read + ?Sized>(reader: &mut R) -> Result<Self, Error> {
-
        let len = Size::decode(reader)?;
-
        let mut refs = BTreeMap::new();
-

-
        for _ in 0..len {
-
            let name = String::decode(reader)?;
-
            let name = git::RefString::try_from(name).map_err(Error::from)?;
-
            let oid = git::Oid::decode(reader)?;

-
            refs.insert(name, oid);
-
        }
-
        Ok(refs.into())
+
    fn active(&self) -> impl Iterator<Item = (ResourceId, &NodeId, Link)> {
+
        self.0.iter().filter_map(|(id, peer)| match peer {
+
            Peer::Connected { nid, link, .. } => Some((*id, nid, *link)),
+
            Peer::Disconnecting { .. } => None,
+
        })
    }
-
}

-
impl Decode for git::RefString {
-
    fn decode<R: io::Read + ?Sized>(reader: &mut R) -> Result<Self, Error> {
-
        let ref_str = String::decode(reader)?;
-
        git::RefString::try_from(ref_str).map_err(Error::from)
+
    fn connected(&self) -> impl Iterator<Item = (ResourceId, &NodeId)> {
+
        self.0.iter().filter_map(|(id, peer)| {
+
            if let Peer::Connected { nid, .. } = peer {
+
                Some((*id, nid))
+
            } else {
+
                None
+
            }
+
        })
    }
-
}

-
impl Decode for UserAgent {
-
    fn decode<R: io::Read + ?Sized>(reader: &mut R) -> Result<Self, Error> {
-
        String::decode(reader)
-
            .and_then(|s| UserAgent::from_str(&s).map_err(Error::InvalidUserAgent))
+
    fn iter(&self) -> impl Iterator<Item = &Peer> {
+
        self.0.values()
    }
}

-
impl Decode for Alias {
-
    fn decode<R: io::Read + ?Sized>(reader: &mut R) -> Result<Self, Error> {
-
        String::decode(reader).and_then(|s| Alias::from_str(&s).map_err(Error::from))
-
    }
+
/// Wire protocol implementation for a set of peers.
+
pub struct Wire<D, S, G: crypto::signature::Signer<crypto::Signature> + Ecdh> {
+
    /// Backing service instance.
+
    service: Service<D, S, G>,
+
    /// Worker pool interface.
+
    worker: chan::Sender<Task>,
+
    /// Used for authentication.
+
    signer: Device<G>,
+
    /// Node metrics.
+
    metrics: service::Metrics,
+
    /// Internal queue of actions to send to the reactor.
+
    actions: VecDeque<Action<G>>,
+
    /// Outbound attempted peers without a session.
+
    outbound: RandomMap<RawFd, Outbound>,
+
    /// Inbound peers without a session.
+
    inbound: RandomMap<RawFd, Inbound>,
+
    /// Listening addresses that are not yet registered.
+
    listening: RandomMap<RawFd, net::SocketAddr>,
+
    /// Peer (established) sessions.
+
    peers: Peers,
}

-
impl<A, B> Decode for (A, B)
+
impl<D, S, G> Wire<D, S, G>
where
-
    A: Decode,
-
    B: Decode,
+
    D: service::Store,
+
    S: WriteStorage + 'static,
+
    G: crypto::signature::Signer<crypto::Signature> + Ecdh<Pk = NodeId>,
{
-
    fn decode<R: io::Read + ?Sized>(reader: &mut R) -> Result<Self, Error> {
-
        let a = A::decode(reader)?;
-
        let b = B::decode(reader)?;
-
        Ok((a, b))
-
    }
-
}
-

-
impl Decode for git::Oid {
-
    fn decode<R: io::Read + ?Sized>(reader: &mut R) -> Result<Self, Error> {
-
        let len = Size::decode(reader)? as usize;
-
        #[allow(non_upper_case_globals)]
-
        const expected: usize = mem::size_of::<git::raw::Oid>();
-

-
        if len != expected {
-
            return Err(Error::InvalidSize {
-
                expected,
-
                actual: len,
-
            });
+
    pub fn new(service: Service<D, S, G>, worker: chan::Sender<Task>, signer: Device<G>) -> Self {
+
        assert!(service.started().is_some(), "Service must be initialized");
+

+
        Self {
+
            service,
+
            worker,
+
            signer,
+
            metrics: Metrics::default(),
+
            actions: VecDeque::new(),
+
            inbound: RandomMap::default(),
+
            outbound: RandomMap::default(),
+
            listening: RandomMap::default(),
+
            peers: Peers(RandomMap::default()),
        }
-

-
        let buf: [u8; expected] = Decode::decode(reader)?;
-
        let oid = git::raw::Oid::from_bytes(&buf).expect("the buffer is exactly the right size");
-
        let oid = git::Oid::from(oid);
-

-
        Ok(oid)
-
    }
-
}
-

-
impl Decode for Signature {
-
    fn decode<R: io::Read + ?Sized>(reader: &mut R) -> Result<Self, Error> {
-
        let bytes: [u8; 64] = Decode::decode(reader)?;
-

-
        Ok(Signature::from(bytes))
    }
-
}

-
impl Decode for u8 {
-
    fn decode<R: io::Read + ?Sized>(reader: &mut R) -> Result<Self, Error> {
-
        reader.read_u8().map_err(Error::from)
+
    pub fn listen(&mut self, socket: NetAccept<WireSession<G>>) {
+
        self.listening
+
            .insert(socket.as_raw_fd(), socket.local_addr());
+
        self.actions.push_back(Action::RegisterListener(socket));
+
    }
+

+
    fn disconnect(&mut self, id: ResourceId, reason: DisconnectReason) -> Option<(NodeId, Link)> {
+
        match self.peers.entry(id) {
+
            Entry::Vacant(_) => {
+
                // Connecting peer with no session.
+
                log::debug!(target: "wire", "Disconnecting pending peer with id={id}: {reason}");
+
                self.actions.push_back(Action::UnregisterTransport(id));
+

+
                // Check for attempted outbound connections. Unestablished inbound connections don't
+
                // have an NID yet.
+
                self.outbound
+
                    .values()
+
                    .find(|o| o.id == Some(id))
+
                    .map(|o| (o.nid, Link::Outbound))
+
            }
+
            Entry::Occupied(mut e) => match e.get_mut() {
+
                Peer::Disconnecting { nid, link, .. } => {
+
                    log::error!(target: "wire", "Peer with id={id} is already disconnecting");
+

+
                    nid.map(|n| (n, *link))
+
                }
+
                Peer::Connected {
+
                    nid, streams, link, ..
+
                } => {
+
                    log::debug!(target: "wire", "Disconnecting peer with id={id}: {reason}");
+
                    let nid = *nid;
+
                    let link = *link;
+

+
                    streams.shutdown();
+
                    e.insert(Peer::Disconnecting {
+
                        nid: Some(nid),
+
                        link,
+
                        reason,
+
                    });
+
                    self.actions.push_back(Action::UnregisterTransport(id));
+

+
                    Some((nid, link))
+
                }
+
            },
+
        }
    }
-
}

-
impl Decode for u16 {
-
    fn decode<R: io::Read + ?Sized>(reader: &mut R) -> Result<Self, Error> {
-
        reader.read_u16::<NetworkEndian>().map_err(Error::from)
-
    }
-
}
+
    fn worker_result(&mut self, task: TaskResult) {
+
        log::debug!(
+
            target: "wire",
+
            "Received fetch result from worker for stream {}, remote {}: {:?}",
+
            task.stream, task.remote, task.result
+
        );

-
impl Decode for u32 {
-
    fn decode<R: io::Read + ?Sized>(reader: &mut R) -> Result<Self, Error> {
-
        reader.read_u32::<NetworkEndian>().map_err(Error::from)
+
        let nid = task.remote;
+
        let Some((fd, peer)) = self.peers.lookup_mut(&nid) else {
+
            log::warn!(target: "wire", "Peer {nid} not found; ignoring fetch result");
+
            return;
+
        };
+

+
        if let Peer::Connected { link, streams, .. } = peer {
+
            // Nb. It's possible that the stream would already be unregistered if we received an
+
            // early "close" from the remote. Otherwise, we unregister it here and send the "close"
+
            // ourselves.
+
            if let Some(s) = streams.unregister(&task.stream) {
+
                log::debug!(
+
                    target: "wire", "Stream {} of {} closing with {} byte(s) sent and {} byte(s) received",
+
                    task.stream, task.remote, s.sent_bytes, s.received_bytes
+
                );
+
                let frame = Frame::<service::Message>::control(
+
                    *link,
+
                    frame::Control::Close {
+
                        stream: task.stream,
+
                    },
+
                );
+
                self.actions.push_back(Action::Send(fd, frame.to_bytes()));
+
            }
+
        } else {
+
            // If the peer disconnected, we'll get here, but we still want to let the service know
+
            // about the fetch result, so we don't return here.
+
            log::warn!(target: "wire", "Peer {nid} is not connected; ignoring fetch result");
+
            return;
+
        };
+

+
        // Only call into the service if we initiated this fetch.
+
        match task.result {
+
            FetchResult::Initiator { rid, result } => {
+
                self.service.fetched(rid, nid, result);
+
            }
+
            FetchResult::Responder { rid, result } => {
+
                if let Some(rid) = rid {
+
                    if let Some(err) = result.err() {
+
                        log::info!(target: "wire", "Peer {nid} failed to fetch {rid} from us: {err}");
+
                    } else {
+
                        log::info!(target: "wire", "Peer {nid} fetched {rid} from us successfully");
+
                    }
+
                }
+
            }
+
        }
    }
-
}

-
impl Decode for u64 {
-
    fn decode<R: io::Read + ?Sized>(reader: &mut R) -> Result<Self, Error> {
-
        reader.read_u64::<NetworkEndian>().map_err(Error::from)
+
    fn flush(&mut self, remote: NodeId, stream: StreamId) {
+
        let Some((fd, peer)) = self.peers.lookup_mut(&remote) else {
+
            log::warn!(target: "wire", "Peer {remote} is not known; ignoring flush");
+
            return;
+
        };
+
        let Peer::Connected { streams, link, .. } = peer else {
+
            log::warn!(target: "wire", "Peer {remote} is not connected; ignoring flush");
+
            return;
+
        };
+
        let Some(s) = streams.get_mut(&stream) else {
+
            log::debug!(target: "wire", "Stream {stream} cannot be found; ignoring flush");
+
            return;
+
        };
+
        let metrics = self.metrics.peer(remote);
+

+
        for data in s.channels.try_iter() {
+
            let frame = match data {
+
                ChannelEvent::Data(data) => {
+
                    metrics.sent_git_bytes += data.len();
+
                    metrics.sent_bytes += data.len();
+
                    Frame::<service::Message>::git(stream, data)
+
                }
+
                ChannelEvent::Close => Frame::control(*link, frame::Control::Close { stream }),
+
                ChannelEvent::Eof => Frame::control(*link, frame::Control::Eof { stream }),
+
            };
+
            self.actions
+
                .push_back(reactor::Action::Send(fd, frame.to_bytes()));
+
        }
    }
-
}
-

-
impl<const N: usize> Decode for [u8; N] {
-
    fn decode<R: io::Read + ?Sized>(reader: &mut R) -> Result<Self, Error> {
-
        let mut ary = [0; N];
-
        reader.read_exact(&mut ary)?;

-
        Ok(ary)
+
    fn cleanup(&mut self, id: ResourceId, fd: RawFd) {
+
        if self.inbound.remove(&fd).is_some() {
+
            log::debug!(target: "wire", "Cleaning up inbound peer state with id={id} (fd={fd})");
+
        } else if let Some(outbound) = self.outbound.remove(&fd) {
+
            log::debug!(target: "wire", "Cleaning up outbound peer state with id={id} (fd={fd})");
+
            self.service.disconnected(
+
                outbound.nid,
+
                Link::Outbound,
+
                &DisconnectReason::connection(),
+
            );
+
        } else {
+
            log::debug!(target: "wire", "Tried to cleanup unknown peer with id={id} (fd={fd})");
+
        }
    }
}

-
impl<T, const N: usize> Decode for BoundedVec<T, N>
+
impl<D, S, G> reactor::Handler for Wire<D, S, G>
where
-
    T: Decode,
+
    D: service::Store + Send,
+
    S: WriteStorage + Send + 'static,
+
    G: crypto::signature::Signer<crypto::Signature> + Ecdh<Pk = NodeId> + Clone + Send,
{
-
    fn decode<R: io::Read + ?Sized>(reader: &mut R) -> Result<Self, Error> {
-
        let len: usize = Size::decode(reader)? as usize;
-
        let mut items = Self::with_capacity(len).map_err(|_| Error::InvalidSize {
-
            expected: Self::max(),
-
            actual: len,
-
        })?;
-

-
        for _ in 0..items.capacity() {
-
            let item = T::decode(reader)?;
-
            items.push(item).ok();
-
        }
-
        Ok(items)
-
    }
-
}
-

-
impl Decode for String {
-
    fn decode<R: io::Read + ?Sized>(reader: &mut R) -> Result<Self, Error> {
-
        let len = u8::decode(reader)?;
-
        let mut bytes = vec![0; len as usize];
-

-
        reader.read_exact(&mut bytes)?;
-

-
        let string = String::from_utf8(bytes)?;
-

-
        Ok(string)
-
    }
-
}
-

-
impl Decode for RepoId {
-
    fn decode<R: io::Read + ?Sized>(reader: &mut R) -> Result<Self, Error> {
-
        let oid: git::Oid = Decode::decode(reader)?;
-

-
        Ok(Self::from(oid))
-
    }
-
}
-

-
impl Encode for filter::Filter {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        let mut n = 0;
-

-
        n += self.deref().as_bytes().encode(writer)?;
-

-
        Ok(n)
+
    type Listener = NetAccept<WireSession<G>>;
+
    type Transport = NetTransport<WireSession<G>>;
+
    type Command = Control;
+

+
    fn tick(&mut self, time: Timestamp) {
+
        self.metrics.open_channels = self
+
            .peers
+
            .iter()
+
            .filter_map(|p| {
+
                if let Peer::Connected { streams, .. } = p {
+
                    Some(streams.streams.len())
+
                } else {
+
                    None
+
                }
+
            })
+
            .sum();
+
        self.metrics.worker_queue_size = self.worker.len();
+
        self.service.tick(
+
            LocalTime::from_millis(time.as_millis() as u128),
+
            &self.metrics,
+
        );
    }
-
}

-
impl Decode for filter::Filter {
-
    fn decode<R: std::io::Read + ?Sized>(reader: &mut R) -> Result<Self, Error> {
-
        let size: usize = Size::decode(reader)? as usize;
-
        if !filter::FILTER_SIZES.contains(&size) {
-
            return Err(Error::InvalidFilterSize(size));
+
    fn handle_timer(&mut self) {
+
        self.service.wake();
+
    }
+

+
    fn handle_listener_event(
+
        &mut self,
+
        _: ResourceId, // Nb. This is the ID of the listener socket.
+
        event: ListenerEvent<WireSession<G>>,
+
        _: Timestamp,
+
    ) {
+
        match event {
+
            ListenerEvent::Accepted(connection) => {
+
                let Ok(remote) = connection.remote_addr() else {
+
                    log::warn!(target: "wire", "Accepted connection doesn't have remote address; dropping..");
+
                    drop(connection);
+

+
                    return;
+
                };
+
                let InetHost::Ip(ip) = remote.host else {
+
                    log::error!(target: "wire", "Unexpected host type for inbound connection {remote}; dropping..");
+
                    drop(connection);
+

+
                    return;
+
                };
+
                let fd = connection.as_raw_fd();
+
                log::debug!(target: "wire", "Inbound connection from {remote} (fd={fd})..");
+

+
                // If the service doesn't want to accept this connection,
+
                // we drop the connection here, which disconnects the socket.
+
                if !self.service.accepted(ip) {
+
                    log::debug!(target: "wire", "Rejecting inbound connection from {ip} (fd={fd})..");
+
                    drop(connection);
+

+
                    return;
+
                }
+

+
                let session = match accept::<G>(
+
                    remote.clone().into(),
+
                    connection,
+
                    self.signer.clone().into_inner(),
+
                ) {
+
                    Ok(s) => s,
+
                    Err(e) => {
+
                        log::error!(target: "wire", "Error creating session for {ip}: {e}");
+
                        return;
+
                    }
+
                };
+
                let transport = match NetTransport::with_session(
+
                    session,
+
                    netservices::Direction::Inbound,
+
                ) {
+
                    Ok(transport) => transport,
+
                    Err(err) => {
+
                        log::error!(target: "wire", "Failed to create transport for accepted connection: {err}");
+
                        return;
+
                    }
+
                };
+
                log::debug!(target: "wire", "Accepted inbound connection from {remote} (fd={fd})..");
+

+
                self.inbound.insert(
+
                    fd,
+
                    Inbound {
+
                        id: None,
+
                        addr: remote.into(),
+
                    },
+
                );
+
                self.actions
+
                    .push_back(reactor::Action::RegisterTransport(transport))
+
            }
+
            ListenerEvent::Failure(err) => {
+
                log::error!(target: "wire", "Error listening for inbound connections: {err}");
+
            }
        }
-

-
        let mut bytes = vec![0; size];
-
        reader.read_exact(&mut bytes[..])?;
-

-
        let f = filter::BloomFilter::from(bytes);
-
        debug_assert_eq!(f.hashes(), filter::FILTER_HASHES);
-

-
        Ok(Self::from(f))
    }
-
}
-

-
impl<V> Encode for SignedRefs<V> {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        let mut n = 0;
-

-
        n += self.id.encode(writer)?;
-
        n += self.refs.encode(writer)?;
-
        n += self.signature.encode(writer)?;

-
        Ok(n)
+
    fn handle_registered(&mut self, fd: RawFd, id: ResourceId, typ: ResourceType) {
+
        match typ {
+
            ResourceType::Listener => {
+
                if let Some(local_addr) = self.listening.remove(&fd) {
+
                    self.service.listening(local_addr);
+
                }
+
            }
+
            ResourceType::Transport => {
+
                if let Some(outbound) = self.outbound.get_mut(&fd) {
+
                    log::debug!(target: "wire", "Outbound peer resource registered for {} with id={id} (fd={fd})", outbound.nid);
+
                    outbound.id = Some(id);
+
                } else if let Some(inbound) = self.inbound.get_mut(&fd) {
+
                    log::debug!(target: "wire", "Inbound peer resource registered with id={id} (fd={fd})");
+
                    inbound.id = Some(id);
+
                } else {
+
                    log::warn!(target: "wire", "Unknown peer registered with fd={fd} and id={id}");
+
                }
+
            }
+
        }
    }
-
}
-

-
impl Decode for SignedRefs<Unverified> {
-
    fn decode<R: io::Read + ?Sized>(reader: &mut R) -> Result<Self, Error> {
-
        let id = NodeId::decode(reader)?;
-
        let refs = Refs::decode(reader)?;
-
        let signature = Signature::decode(reader)?;

-
        Ok(Self::new(refs, id, signature))
+
    fn handle_transport_event(
+
        &mut self,
+
        id: ResourceId,
+
        event: SessionEvent<WireSession<G>>,
+
        _: Timestamp,
+
    ) {
+
        match event {
+
            SessionEvent::Established(fd, ProtocolArtifact { state, .. }) => {
+
                // SAFETY: With the NoiseXK protocol, there is always a remote static key.
+
                let nid: NodeId = state.remote_static_key.unwrap();
+
                // Make sure we don't try to connect to ourselves by mistake.
+
                if &nid == self.signer.public_key() {
+
                    log::error!(target: "wire", "Self-connection detected, disconnecting..");
+
                    self.disconnect(id, DisconnectReason::SelfConnection);
+

+
                    return;
+
                }
+
                let (addr, link) = if let Some(peer) = self.inbound.remove(&fd) {
+
                    self.metrics.peer(nid).inbound_connection_attempts += 1;
+
                    (peer.addr, Link::Inbound)
+
                } else if let Some(peer) = self.outbound.remove(&fd) {
+
                    assert_eq!(nid, peer.nid);
+
                    (peer.addr, Link::Outbound)
+
                } else {
+
                    log::error!(target: "wire", "Session for {nid} (id={id}) not found");
+
                    return;
+
                };
+
                log::debug!(
+
                    target: "wire",
+
                    "Session established with {nid} (id={id}) (fd={fd}) ({})",
+
                    if link.is_inbound() { "inbound" } else { "outbound" }
+
                );
+

+
                // Connections to close.
+
                let mut disconnect = Vec::new();
+

+
                // Handle conflicting connections.
+
                // This is typical when nodes have mutually configured their nodes to connect to
+
                // each other on startup. We handle this by deterministically choosing one node
+
                // whos outbound connection is the one that is kept. The other connections are
+
                // dropped.
+
                {
+
                    // Whether we have precedence in case of conflicting connections.
+
                    // Having precedence means that our outbound connection will win over
+
                    // the other node's outbound connection.
+
                    let precedence = *self.signer.public_key() > nid;
+

+
                    // Pre-existing connections that conflict with this newly established session.
+
                    // Note that we can't know whether a connection is conflicting before we get the
+
                    // remote static key.
+
                    let mut conflicting = Vec::new();
+

+
                    // Active sessions with the same NID but a different Resource ID are conflicting.
+
                    conflicting.extend(
+
                        self.peers
+
                            .active()
+
                            .filter(|(c_id, d, _)| **d == nid && *c_id != id)
+
                            .map(|(c_id, _, link)| (c_id, link)),
+
                    );
+

+
                    // Outbound connection attempts with the same remote key but a different file
+
                    // descriptor are conflicting.
+
                    conflicting.extend(self.outbound.iter().filter_map(|(c_fd, other)| {
+
                        if other.nid == nid && *c_fd != fd {
+
                            other.id.map(|c_id| (c_id, Link::Outbound))
+
                        } else {
+
                            None
+
                        }
+
                    }));
+

+
                    for (c_id, c_link) in conflicting {
+
                        // If we have precedence, the inbound connection is closed.
+
                        // In the case where both connections are inbound or outbound,
+
                        // we close the newer connection, ie. the one with the higher
+
                        // resource id.
+
                        let close = match (link, c_link) {
+
                            (Link::Inbound, Link::Outbound) => {
+
                                if precedence {
+
                                    id
+
                                } else {
+
                                    c_id
+
                                }
+
                            }
+
                            (Link::Outbound, Link::Inbound) => {
+
                                if precedence {
+
                                    c_id
+
                                } else {
+
                                    id
+
                                }
+
                            }
+
                            (Link::Inbound, Link::Inbound) => id.max(c_id),
+
                            (Link::Outbound, Link::Outbound) => id.max(c_id),
+
                        };
+

+
                        log::warn!(
+
                            target: "wire", "Established session (id={id}) conflicts with existing session for {nid} (id={c_id})"
+
                        );
+
                        disconnect.push(close);
+
                    }
+
                }
+
                for id in &disconnect {
+
                    log::warn!(
+
                        target: "wire", "Closing conflicting session (id={id}) with {nid}.."
+
                    );
+
                    // Disconnect and return the associated NID of the peer, if available.
+
                    if let Some((nid, link)) = self.disconnect(*id, DisconnectReason::Conflict) {
+
                        // We disconnect the session eagerly because otherwise we will get the new
+
                        // `connected` event before the `disconnect`, resulting in a duplicate
+
                        // connection.
+
                        self.service
+
                            .disconnected(nid, link, &DisconnectReason::Conflict);
+
                    }
+
                }
+
                if !disconnect.contains(&id) {
+
                    self.peers
+
                        .insert(id, Peer::connected(nid, addr.clone(), link));
+
                    self.service.connected(nid, addr.into(), link);
+
                }
+
            }
+
            SessionEvent::Data(data) => {
+
                if let Some(Peer::Connected {
+
                    nid,
+
                    inbox,
+
                    streams,
+
                    ..
+
                }) = self.peers.get_mut(&id)
+
                {
+
                    let metrics = self.metrics.peer(*nid);
+
                    metrics.received_bytes += data.len();
+

+
                    if inbox.input(&data).is_err() {
+
                        log::error!(target: "wire", "Maximum inbox size ({MAX_INBOX_SIZE}) reached for peer {nid}");
+
                        log::error!(target: "wire", "Unable to process messages fast enough for peer {nid}; disconnecting..");
+
                        self.disconnect(id, DisconnectReason::Session(session::Error::Misbehavior));
+

+
                        return;
+
                    }
+

+
                    loop {
+
                        match inbox.deserialize_next() {
+
                            Ok(Some(Frame {
+
                                data: FrameData::Control(frame::Control::Open { stream }),
+
                                ..
+
                            })) => {
+
                                log::debug!(target: "wire", "Received `open` command for stream {stream} from {nid}");
+
                                metrics.streams_opened += 1;
+
                                metrics.received_fetch_requests += 1;
+
                                let reader_limit = self.service.config().limits.fetch_pack_receive;
+
                                let Some(channels) = streams.register(
+
                                    stream,
+
                                    ChannelsConfig::new(FETCH_TIMEOUT)
+
                                        .with_reader_limit(reader_limit),
+
                                ) else {
+
                                    log::warn!(target: "wire", "Peer attempted to open already-open stream stream {stream}");
+
                                    continue;
+
                                };
+

+
                                let task = Task {
+
                                    fetch: FetchRequest::Responder {
+
                                        remote: *nid,
+
                                        emitter: self.service.emitter(),
+
                                    },
+
                                    stream,
+
                                    channels,
+
                                };
+
                                if let Err(e) = self.worker.try_send(task) {
+
                                    log::error!(
+
                                        target: "wire",
+
                                        "Worker pool failed to accept incoming fetch request: {e}"
+
                                    );
+
                                }
+
                            }
+
                            Ok(Some(Frame {
+
                                data: FrameData::Control(frame::Control::Eof { stream }),
+
                                ..
+
                            })) => {
+
                                if let Some(s) = streams.get(&stream) {
+
                                    log::debug!(target: "wire", "Received `end-of-file` on stream {stream} from {nid}");
+

+
                                    if s.channels.send(ChannelEvent::Eof).is_err() {
+
                                        log::error!(target: "wire", "Worker is disconnected; cannot send `EOF`");
+
                                    }
+
                                } else {
+
                                    log::debug!(target: "wire", "Ignoring frame on closed or unknown stream {stream}");
+
                                }
+
                            }
+
                            Ok(Some(Frame {
+
                                data: FrameData::Control(frame::Control::Close { stream }),
+
                                ..
+
                            })) => {
+
                                log::debug!(target: "wire", "Received `close` command for stream {stream} from {nid}");
+

+
                                if let Some(s) = streams.unregister(&stream) {
+
                                    log::debug!(
+
                                        target: "wire",
+
                                        "Stream {stream} of {nid} closed with {} byte(s) sent and {} byte(s) received",
+
                                        s.sent_bytes, s.received_bytes
+
                                    );
+
                                    s.channels.close().ok();
+
                                }
+
                            }
+
                            Ok(Some(Frame {
+
                                data: FrameData::Gossip(msg),
+
                                ..
+
                            })) => {
+
                                metrics.received_gossip_messages += 1;
+
                                self.service.received_message(*nid, msg);
+
                            }
+
                            Ok(Some(Frame {
+
                                stream,
+
                                data: FrameData::Git(data),
+
                                ..
+
                            })) => {
+
                                if let Some(s) = streams.get_mut(&stream) {
+
                                    metrics.received_git_bytes += data.len();
+

+
                                    if s.channels.send(ChannelEvent::Data(data)).is_err() {
+
                                        log::error!(target: "wire", "Worker is disconnected; cannot send data");
+
                                    }
+
                                } else {
+
                                    log::debug!(target: "wire", "Ignoring frame on closed or unknown stream {stream}");
+
                                }
+
                            }
+
                            Ok(None) => {
+
                                // Buffer is empty, or message isn't complete.
+
                                break;
+
                            }
+
                            Err(e) => {
+
                                log::error!(target: "wire", "Invalid gossip message from {nid}: {e}");
+

+
                                if !inbox.is_empty() {
+
                                    log::debug!(target: "wire", "Dropping read buffer for {nid} with {} bytes", inbox.len());
+
                                }
+
                                self.disconnect(
+
                                    id,
+
                                    DisconnectReason::Session(session::Error::Misbehavior),
+
                                );
+
                                break;
+
                            }
+
                        }
+
                    }
+
                } else {
+
                    log::warn!(target: "wire", "Dropping message from unconnected peer (id={id})");
+
                }
+
            }
+
            SessionEvent::Terminated(err) => {
+
                self.disconnect(id, DisconnectReason::Connection(Arc::new(err)));
+
            }
+
        }
    }
-
}
-

-
impl Encode for RefsAt {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        let mut n = 0;

-
        n += self.remote.encode(writer)?;
-
        n += self.at.encode(writer)?;
-

-
        Ok(n)
+
    fn handle_command(&mut self, cmd: Self::Command) {
+
        match cmd {
+
            Control::User(cmd) => self.service.command(cmd),
+
            Control::Worker(result) => self.worker_result(result),
+
            Control::Flush { remote, stream } => self.flush(remote, stream),
+
        }
    }
-
}

-
impl Decode for RefsAt {
-
    fn decode<R: std::io::Read + ?Sized>(reader: &mut R) -> Result<Self, Error> {
-
        let remote = NodeId::decode(reader)?;
-
        let at = git::Oid::decode(reader)?;
-
        Ok(Self { remote, at })
+
    fn handle_error(
+
        &mut self,
+
        err: reactor::Error<NetAccept<WireSession<G>>, NetTransport<WireSession<G>>>,
+
    ) {
+
        match err {
+
            reactor::Error::Poll(err) => {
+
                // TODO: This should be a fatal error, there's nothing we can do here.
+
                log::error!(target: "wire", "Can't poll connections: {err}");
+
            }
+
            reactor::Error::ListenerDisconnect(id, _) => {
+
                // TODO: This should be a fatal error, there's nothing we can do here.
+
                log::error!(target: "wire", "Listener {id} disconnected");
+
            }
+
            reactor::Error::TransportDisconnect(id, transport) => {
+
                let fd = transport.as_raw_fd();
+
                log::error!(target: "wire", "Peer id={id} (fd={fd}) disconnected");
+

+
                // We're dropping the TCP connection here.
+
                drop(transport);
+

+
                // The peer transport is already disconnected and removed from the reactor;
+
                // therefore there is no need to initiate a disconnection. We simply remove
+
                // the peer from the map.
+
                match self.peers.remove(&id) {
+
                    Some(mut peer) => {
+
                        if let Peer::Connected { streams, .. } = &mut peer {
+
                            streams.shutdown();
+
                        }
+

+
                        if let Some(id) = peer.id() {
+
                            self.service.disconnected(
+
                                *id,
+
                                peer.link(),
+
                                &DisconnectReason::connection(),
+
                            );
+
                        } else {
+
                            log::debug!(target: "wire", "Inbound disconnection before handshake; ignoring..")
+
                        }
+
                    }
+
                    None => self.cleanup(id, fd),
+
                }
+
            }
+
        }
    }
-
}

-
impl Encode for node::Features {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        self.deref().encode(writer)
+
    fn handover_listener(&mut self, id: ResourceId, _listener: Self::Listener) {
+
        log::error!(target: "wire", "Listener handover is not supported (id={id})");
+
    }
+

+
    fn handover_transport(&mut self, id: ResourceId, transport: Self::Transport) {
+
        let fd = transport.as_raw_fd();
+

+
        match self.peers.entry(id) {
+
            Entry::Occupied(e) => {
+
                match e.get() {
+
                    Peer::Disconnecting {
+
                        nid, reason, link, ..
+
                    } => {
+
                        log::debug!(target: "wire", "Transport handover for disconnecting peer with id={id} (fd={fd})");
+

+
                        // Disconnect TCP stream.
+
                        drop(transport);
+

+
                        // If there is no NID, the service is not aware of the peer.
+
                        if let Some(nid) = nid {
+
                            // In the case of a conflicting connection, there will be two resources
+
                            // for the peer. However, at the service level, there is only one, and
+
                            // it is identified by NID.
+
                            //
+
                            // Therefore, we specify which of the connections we're closing by
+
                            // passing the `link`.
+
                            self.service.disconnected(*nid, *link, reason);
+
                        }
+
                        e.remove();
+
                    }
+
                    Peer::Connected { nid, .. } => {
+
                        panic!("Wire::handover_transport: Unexpected handover of connected peer {} with id={id} (fd={fd})", nid);
+
                    }
+
                }
+
            }
+
            Entry::Vacant(_) => self.cleanup(id, fd),
+
        }
    }
}

-
impl Decode for node::Features {
-
    fn decode<R: io::Read + ?Sized>(reader: &mut R) -> Result<Self, Error> {
-
        let features = u64::decode(reader)?;
-

-
        Ok(Self::from(features))
+
impl<D, S, G> Iterator for Wire<D, S, G>
+
where
+
    D: service::Store,
+
    S: WriteStorage + 'static,
+
    G: crypto::signature::Signer<crypto::Signature> + Ecdh<Pk = NodeId> + Clone,
+
{
+
    type Item = Action<G>;
+

+
    fn next(&mut self) -> Option<Self::Item> {
+
        while let Some(ev) = self.service.next() {
+
            match ev {
+
                Io::Write(node_id, msgs) => {
+
                    let (fd, link) = match self.peers.lookup(&node_id) {
+
                        Some((fd, Peer::Connected { link, .. })) => (fd, *link),
+
                        Some((_, peer)) => {
+
                            // If the peer is disconnected by the wire protocol, the service may
+
                            // not be aware of this yet, and may continue to write messages to it.
+
                            log::debug!(target: "wire", "Dropping {} message(s) to {node_id} ({peer:?})", msgs.len());
+
                            continue;
+
                        }
+
                        None => {
+
                            log::error!(target: "wire", "Dropping {} message(s) to {node_id}: unknown peer", msgs.len());
+
                            continue;
+
                        }
+
                    };
+
                    log::trace!(
+
                        target: "wire", "Writing {} message(s) to {}", msgs.len(), node_id
+
                    );
+
                    let mut data = Vec::new();
+
                    let metrics = self.metrics.peer(node_id);
+
                    metrics.sent_gossip_messages += msgs.len();
+

+
                    for msg in msgs {
+
                        Frame::gossip(link, msg).encode(&mut data);
+
                    }
+
                    metrics.sent_bytes += data.len();
+

+
                    self.actions.push_back(reactor::Action::Send(fd, data));
+
                }
+
                Io::Connect(node_id, addr) => {
+
                    if self.peers.connected().any(|(_, id)| id == &node_id) {
+
                        log::error!(
+
                            target: "wire",
+
                            "Attempt to connect to already connected peer {node_id}"
+
                        );
+
                        // FIXME: The problem here is the session will stay in "initial" state,
+
                        // because it can't transition to attempted.
+
                        continue;
+
                    }
+
                    self.service.attempted(node_id, addr.clone());
+
                    self.metrics.peer(node_id).outbound_connection_attempts += 1;
+

+
                    match dial::<G>(
+
                        addr.to_inner(),
+
                        node_id,
+
                        self.signer.clone().into_inner(),
+
                        self.service.config(),
+
                    )
+
                    .and_then(|session| {
+
                        NetTransport::<WireSession<G>>::with_session(
+
                            session,
+
                            netservices::Direction::Outbound,
+
                        )
+
                    }) {
+
                        Ok(transport) => {
+
                            self.outbound.insert(
+
                                transport.as_raw_fd(),
+
                                Outbound {
+
                                    id: None,
+
                                    nid: node_id,
+
                                    addr: addr.to_inner(),
+
                                },
+
                            );
+
                            log::debug!(
+
                                target: "wire",
+
                                "Registering outbound transport for {node_id} (fd={})..",
+
                                transport.as_raw_fd()
+
                            );
+
                            self.actions
+
                                .push_back(reactor::Action::RegisterTransport(transport));
+
                        }
+
                        Err(err) => {
+
                            log::error!(target: "wire", "Error establishing connection to {addr}: {err}");
+

+
                            self.service.disconnected(
+
                                node_id,
+
                                Link::Outbound,
+
                                &DisconnectReason::Dial(Arc::new(err)),
+
                            );
+
                        }
+
                    }
+
                }
+
                Io::Disconnect(nid, reason) => {
+
                    if let Some((id, Peer::Connected { .. })) = self.peers.lookup(&nid) {
+
                        if let Some((nid, _)) = self.disconnect(id, reason) {
+
                            self.metrics.peer(nid).disconnects += 1;
+
                        }
+
                    } else {
+
                        log::warn!(target: "wire", "Peer {nid} is not connected: ignoring disconnect");
+
                    }
+
                }
+
                Io::Wakeup(d) => {
+
                    self.actions.push_back(reactor::Action::SetTimer(d.into()));
+
                }
+
                Io::Fetch {
+
                    rid,
+
                    remote,
+
                    timeout,
+
                    reader_limit,
+
                    refs_at,
+
                } => {
+
                    log::trace!(target: "wire", "Processing fetch for {rid} from {remote}..");
+

+
                    let Some((fd, Peer::Connected { link, streams, .. })) =
+
                        self.peers.lookup_mut(&remote)
+
                    else {
+
                        // Nb. It's possible that a peer is disconnected while an `Io::Fetch`
+
                        // is in the service's i/o buffer. Since the service may not purge the
+
                        // buffer on disconnect, we should just ignore i/o actions that don't
+
                        // have a connected peer.
+
                        log::error!(target: "wire", "Peer {remote} is not connected: dropping fetch");
+
                        continue;
+
                    };
+
                    let (stream, channels) =
+
                        streams.open(ChannelsConfig::new(timeout).with_reader_limit(reader_limit));
+

+
                    log::debug!(target: "wire", "Opened new stream with id {stream} for {rid} and remote {remote}");
+

+
                    let link = *link;
+
                    let task = Task {
+
                        fetch: FetchRequest::Initiator {
+
                            rid,
+
                            remote,
+
                            refs_at,
+
                        },
+
                        stream,
+
                        channels,
+
                    };
+

+
                    if !self.worker.is_empty() {
+
                        log::warn!(
+
                            target: "wire",
+
                            "Worker pool is busy: {} tasks pending, fetch requests may be delayed", self.worker.len()
+
                        );
+
                    }
+
                    if let Err(e) = self.worker.try_send(task) {
+
                        log::error!(
+
                            target: "wire",
+
                            "Worker pool failed to accept outgoing fetch request: {e}"
+
                        );
+
                    }
+
                    let metrics = self.metrics.peer(remote);
+
                    metrics.streams_opened += 1;
+
                    metrics.sent_fetch_requests += 1;
+

+
                    self.actions.push_back(Action::Send(
+
                        fd,
+
                        Frame::<service::Message>::control(link, frame::Control::Open { stream })
+
                            .to_bytes(),
+
                    ));
+
                }
+
            }
+
        }
+
        self.actions.pop_front()
    }
}

-
impl Decode for tor::OnionAddrV3 {
-
    fn decode<R: io::Read + ?Sized>(reader: &mut R) -> Result<Self, Error> {
-
        let bytes: [u8; tor::ONION_V3_RAW_LEN] = Decode::decode(reader)?;
-
        let addr = tor::OnionAddrV3::from_raw_bytes(bytes)?;
-

-
        Ok(addr)
-
    }
+
/// Establish a new outgoing connection.
+
pub fn dial<G: Ecdh<Pk = NodeId>>(
+
    remote_addr: NetAddr<HostName>,
+
    remote_id: <G as EcSk>::Pk,
+
    signer: G,
+
    config: &radicle::node::Config,
+
) -> io::Result<WireSession<G>> {
+
    // Determine what address to establish a TCP connection with, given the remote peer
+
    // address and our node configuration.
+
    let inet_addr: NetAddr<InetHost> = match (&remote_addr.host, config.proxy) {
+
        // For IP and DNS addresses, use the global proxy if set, otherwise use the address as-is.
+
        (HostName::Ip(_), Some(proxy)) => proxy.into(),
+
        (HostName::Ip(ip), None) => NetAddr::new(InetHost::Ip(*ip), remote_addr.port),
+
        (HostName::Dns(_), Some(proxy)) => proxy.into(),
+
        (HostName::Dns(dns), None) => NetAddr::new(InetHost::Dns(dns.clone()), remote_addr.port),
+
        // For onion addresses, handle with care.
+
        (HostName::Tor(onion), proxy) => match config.onion {
+
            // In onion proxy mode, simply use the configured proxy address.
+
            // This takes precedence over any global proxy.
+
            Some(AddressConfig::Proxy { address }) => address.into(),
+
            // In "forward" mode, if a global proxy is set, we use that, otherwise
+
            // we treat `.onion` addresses as regular DNS names.
+
            Some(AddressConfig::Forward) => {
+
                if let Some(proxy) = proxy {
+
                    proxy.into()
+
                } else {
+
                    NetAddr::new(InetHost::Dns(onion.to_string()), remote_addr.port)
+
                }
+
            }
+
            // If onion address support isn't configured, refuse to connect.
+
            None => {
+
                return Err(io::Error::new(
+
                    io::ErrorKind::Unsupported,
+
                    "no configuration found for .onion addresses",
+
                ));
+
            }
+
        },
+
        _ => {
+
            return Err(io::Error::new(
+
                io::ErrorKind::Unsupported,
+
                "unsupported remote address type",
+
            ));
+
        }
+
    };
+
    // Nb. This timeout is currently not used by the underlying library due to the
+
    // `socket2` library not supporting non-blocking connect with timeout.
+
    let connection = net::TcpStream::connect_nonblocking(inet_addr, DEFAULT_DIAL_TIMEOUT)?;
+
    // Whether to tunnel regular connections through the proxy.
+
    let force_proxy = config.proxy.is_some();
+

+
    session::<G>(
+
        remote_addr,
+
        Some(remote_id),
+
        connection,
+
        force_proxy,
+
        signer,
+
    )
}

-
impl Encode for Timestamp {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        self.deref().encode(writer)
-
    }
+
/// Accept a new connection.
+
pub fn accept<G: Ecdh<Pk = NodeId>>(
+
    remote_addr: NetAddr<HostName>,
+
    connection: net::TcpStream,
+
    signer: G,
+
) -> io::Result<WireSession<G>> {
+
    session::<G>(remote_addr, None, connection, false, signer)
}

-
impl Decode for Timestamp {
-
    fn decode<R: io::Read + ?Sized>(reader: &mut R) -> Result<Self, Error> {
-
        let millis = u64::decode(reader)?;
-
        let ts = Timestamp::try_from(millis).map_err(Error::InvalidTimestamp)?;
-

-
        Ok(ts)
-
    }
+
/// Create a new [`WireSession`].
+
fn session<G: Ecdh<Pk = NodeId>>(
+
    remote_addr: NetAddr<HostName>,
+
    remote_id: Option<NodeId>,
+
    connection: net::TcpStream,
+
    force_proxy: bool,
+
    signer: G,
+
) -> io::Result<WireSession<G>> {
+
    // There are issues with setting TCP_NODELAY on WSL. Not a big deal.
+
    if let Err(e) = connection.set_nodelay(true) {
+
        log::warn!(target: "wire", "Unable to set TCP_NODELAY on fd {}: {e}", connection.as_raw_fd());
+
    }
+
    connection.set_read_timeout(Some(DEFAULT_CONNECTION_TIMEOUT))?;
+
    connection.set_write_timeout(Some(DEFAULT_CONNECTION_TIMEOUT))?;
+

+
    let sock = socket2::Socket::from(connection);
+
    let ka = socket2::TcpKeepalive::new()
+
        .with_time(time::Duration::from_secs(30))
+
        .with_interval(time::Duration::from_secs(10))
+
        .with_retries(3);
+
    if let Err(e) = sock.set_tcp_keepalive(&ka) {
+
        log::warn!(target: "wire", "Unable to set TCP_KEEPALIVE on fd {}: {e}", sock.as_raw_fd());
+
    }
+

+
    let socks5 = socks5::Socks5::with(remote_addr, force_proxy);
+
    let proxy = Socks5Session::with(sock.into(), socks5);
+
    let pair = G::generate_keypair();
+
    let keyset = Keyset {
+
        e: pair.0,
+
        s: Some(signer),
+
        re: None,
+
        rs: remote_id,
+
    };
+
    let noise = NoiseState::initialize::<{ Sha256::OUTPUT_LEN }>(
+
        NOISE_XK,
+
        remote_id.is_some(),
+
        &[],
+
        keyset,
+
    );
+
    Ok(WireSession::with(proxy, noise))
}

#[cfg(test)]
-
mod tests {
+
mod test {
    use super::*;
-
    use qcheck;
-
    use qcheck_macros::quickcheck;
-

-
    use crate::crypto::Unverified;
-
    use crate::storage::refs::SignedRefs;
-
    use crate::test::assert_matches;
-

-
    #[quickcheck]
-
    fn prop_u8(input: u8) {
-
        assert_eq!(deserialize::<u8>(&serialize(&input)).unwrap(), input);
-
    }
+
    use crate::service::{Message, ZeroBytes};
+
    use crate::wire;
+
    use crate::wire::varint;

-
    #[quickcheck]
-
    fn prop_u16(input: u16) {
-
        assert_eq!(deserialize::<u16>(&serialize(&input)).unwrap(), input);
-
    }
-

-
    #[quickcheck]
-
    fn prop_u32(input: u32) {
-
        assert_eq!(deserialize::<u32>(&serialize(&input)).unwrap(), input);
-
    }
-

-
    #[quickcheck]
-
    fn prop_u64(input: u64) {
-
        assert_eq!(deserialize::<u64>(&serialize(&input)).unwrap(), input);
-
    }
-

-
    #[quickcheck]
-
    fn prop_string(input: String) -> qcheck::TestResult {
-
        if input.len() > u8::MAX as usize {
-
            return qcheck::TestResult::discard();
-
        }
-
        assert_eq!(deserialize::<String>(&serialize(&input)).unwrap(), input);
-

-
        qcheck::TestResult::passed()
-
    }
-

-
    #[quickcheck]
-
    fn prop_vec(input: BoundedVec<String, 16>) {
-
        assert_eq!(
-
            deserialize::<BoundedVec<String, 16>>(&serialize(&input.as_slice())).unwrap(),
-
            input
-
        );
-
    }
-

-
    #[quickcheck]
-
    fn prop_pubkey(input: PublicKey) {
-
        assert_eq!(deserialize::<PublicKey>(&serialize(&input)).unwrap(), input);
-
    }
-

-
    #[quickcheck]
-
    fn prop_filter(input: filter::Filter) {
-
        assert_eq!(
-
            deserialize::<filter::Filter>(&serialize(&input)).unwrap(),
-
            input
-
        );
-
    }
+
    #[test]
+
    fn test_pong_message_with_extension() {
+
        use radicle_protocol::deserializer;

-
    #[quickcheck]
-
    fn prop_id(input: RepoId) {
-
        assert_eq!(deserialize::<RepoId>(&serialize(&input)).unwrap(), input);
-
    }
+
        let mut stream = Vec::new();
+
        let pong = Message::Pong {
+
            zeroes: ZeroBytes::new(42),
+
        };
+
        frame::PROTOCOL_VERSION_STRING.encode(&mut stream);
+
        frame::StreamId::gossip(Link::Outbound).encode(&mut stream);

-
    #[quickcheck]
-
    fn prop_refs(input: Refs) {
-
        assert_eq!(deserialize::<Refs>(&serialize(&input)).unwrap(), input);
-
    }
+
        // Serialize gossip message with some extension fields.
+
        let mut gossip = wire::serialize(&pong);
+
        String::from("extra").encode(&mut gossip);
+
        48u8.encode(&mut gossip);

-
    #[quickcheck]
-
    fn prop_tuple(input: (String, String)) {
-
        assert_eq!(
-
            deserialize::<(String, String)>(&serialize(&input)).unwrap(),
-
            input
-
        );
-
    }
+
        // Encode gossip message using the varint-prefix format into the stream.
+
        varint::payload::encode(&gossip, &mut stream);

-
    #[quickcheck]
-
    fn prop_signature(input: [u8; 64]) {
-
        let signature = Signature::from(input);
+
        let mut de = deserializer::Deserializer::<1024, Frame>::new(1024);
+
        de.input(&stream).unwrap();

+
        // The "pong" message decodes successfully, even though there is trailing data.
        assert_eq!(
-
            deserialize::<Signature>(&serialize(&signature)).unwrap(),
-
            signature
+
            de.deserialize_next().unwrap().unwrap(),
+
            Frame::gossip(Link::Outbound, pong)
        );
+
        assert!(de.deserialize_next().unwrap().is_none());
+
        assert!(de.is_empty());
    }

-
    #[quickcheck]
-
    fn prop_oid(input: [u8; 20]) {
-
        let oid = git::Oid::try_from(input.as_slice()).unwrap();
+
    #[test]
+
    fn test_inventory_ann_with_extension() {
+
        use radicle_protocol::deserializer;

-
        assert_eq!(deserialize::<git::Oid>(&serialize(&oid)).unwrap(), oid);
-
    }
+
        #[derive(Debug)]
+
        struct MessageWithExt {
+
            msg: Message,
+
            ext: String,
+
        }

-
    #[quickcheck]
-
    fn prop_signed_refs(input: SignedRefs<Unverified>) {
-
        assert_eq!(
-
            deserialize::<SignedRefs<Unverified>>(&serialize(&input)).unwrap(),
-
            input
-
        );
-
    }
+
        impl wire::Encode for MessageWithExt {
+
            fn encode(&self, writer: &mut impl bytes::BufMut) {
+
                self.msg.encode(writer);
+
                self.ext.encode(writer);
+
            }
+
        }

-
    #[test]
-
    fn test_string() {
-
        assert_eq!(
-
            serialize(&String::from("hello")),
-
            vec![5, b'h', b'e', b'l', b'l', b'o']
-
        );
-
    }
+
        impl wire::Decode for MessageWithExt {
+
            fn decode(reader: &mut impl bytes::Buf) -> Result<Self, wire::Error> {
+
                let msg = Message::decode(reader)?;
+
                let ext = String::decode(reader).unwrap_or_default();

-
    #[test]
-
    fn test_alias() {
-
        assert_eq!(
-
            serialize(&Alias::from_str("hello").unwrap()),
-
            vec![5, b'h', b'e', b'l', b'l', b'o']
-
        );
-
    }
+
                Ok(MessageWithExt { msg, ext })
+
            }
+
        }

-
    #[test]
-
    fn test_filter_invalid() {
-
        let b = bloomy::BloomFilter::with_size(filter::FILTER_SIZE_M / 3);
-
        let f = filter::Filter::from(b);
-
        let bytes = serialize(&f);
-

-
        assert_matches!(
-
            deserialize::<filter::Filter>(&bytes).unwrap_err(),
-
            Error::InvalidFilterSize(_)
-
        );
-
    }
+
        let rid = radicle::test::arbitrary::gen(1);
+
        let pk = radicle::test::arbitrary::gen(1);
+
        let sig: [u8; 64] = radicle::test::arbitrary::gen(1);

-
    #[test]
-
    fn test_bounded_vec_limit() {
-
        let v: BoundedVec<u8, 2> = vec![1, 2].try_into().unwrap();
-
        let buf = serialize(&v);
-

-
        assert_matches!(
-
            deserialize::<BoundedVec<u8, 1>>(&buf),
-
            Err(Error::InvalidSize {
-
                expected: 1,
-
                actual: 2
-
            }),
-
            "fail when vector is too small for buffer",
+
        // Message with extension.
+
        let mut stream = Vec::new();
+
        let ann = Message::announcement(
+
            pk,
+
            service::gossip::inventory(radicle::node::Timestamp::MAX, [rid]),
+
            radicle::crypto::Signature::from(sig),
        );
+
        let pong = Message::Pong {
+
            zeroes: ZeroBytes::new(42),
+
        };
+
        // Framed message with extension.
+
        frame::Frame::gossip(
+
            Link::Outbound,
+
            MessageWithExt {
+
                msg: ann.clone(),
+
                ext: String::from("extra"),
+
            },
+
        )
+
        .encode(&mut stream);
+
        // Pong message that comes after, without extension.
+
        frame::Frame::gossip(Link::Outbound, pong.clone()).encode(&mut stream);
+

+
        // First test deserializing using the message with extension type.
+
        {
+
            let mut de = deserializer::Deserializer::<1024, Frame<MessageWithExt>>::new(1024);
+
            de.input(&stream).unwrap();
+

+
            radicle::assert_matches!(
+
                de.deserialize_next().unwrap().unwrap().data,
+
                FrameData::Gossip(MessageWithExt {
+
                    msg,
+
                    ext,
+
                }) if msg == ann && ext == *"extra"
+
            );
+
            radicle::assert_matches!(
+
                de.deserialize_next().unwrap().unwrap().data,
+
                FrameData::Gossip(MessageWithExt {
+
                    msg,
+
                    ext,
+
                }) if msg == pong && ext.is_empty()
+
            );
+
            assert!(de.deserialize_next().unwrap().is_none());
+
            assert!(de.is_empty());
+
        }

-
        assert!(
-
            deserialize::<BoundedVec<u8, 2>>(&buf).is_ok(),
-
            "successfully decode vector of same size",
-
        );
+
        // Then test deserializing using the current message type without the extension.
+
        {
+
            let mut de = deserializer::Deserializer::<1024, Frame<Message>>::new(1024);
+
            de.input(&stream).unwrap();
+

+
            radicle::assert_matches!(
+
                de.deserialize_next().unwrap().unwrap().data,
+
                FrameData::Gossip(msg)
+
                if msg == ann
+
            );
+
            radicle::assert_matches!(
+
                de.deserialize_next().unwrap().unwrap().data,
+
                FrameData::Gossip(msg)
+
                if msg == pong
+
            );
+
            assert!(de.deserialize_next().unwrap().is_none());
+
            assert!(de.is_empty());
+
        }
    }
}
deleted crates/radicle-node/src/wire/frame.rs
@@ -1,395 +0,0 @@
-
//! Framing protocol.
-
#![warn(clippy::missing_docs_in_private_items)]
-
use std::{fmt, io};
-

-
use crate::{wire, wire::varint, wire::varint::VarInt, wire::Message, Link, PROTOCOL_VERSION};
-

-
/// Protocol version strings all start with the magic sequence `rad`, followed
-
/// by a version number.
-
pub const PROTOCOL_VERSION_STRING: Version = Version([b'r', b'a', b'd', PROTOCOL_VERSION]);
-

-
/// Control open byte.
-
const CONTROL_OPEN: u8 = 0;
-
/// Control close byte.
-
const CONTROL_CLOSE: u8 = 1;
-
/// Control EOF byte.
-
const CONTROL_EOF: u8 = 2;
-

-
/// Protocol version.
-
#[derive(Debug, PartialEq, Eq)]
-
pub struct Version([u8; 4]);
-

-
impl Version {
-
    /// Version number.
-
    pub fn number(&self) -> u8 {
-
        self.0[3]
-
    }
-
}
-

-
impl wire::Encode for Version {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        writer.write_all(&PROTOCOL_VERSION_STRING.0)?;
-

-
        Ok(PROTOCOL_VERSION_STRING.0.len())
-
    }
-
}
-

-
impl wire::Decode for Version {
-
    fn decode<R: io::Read + ?Sized>(reader: &mut R) -> Result<Self, wire::Error> {
-
        let mut version = [0u8; 4];
-
        reader.read_exact(&mut version[..])?;
-

-
        if version != PROTOCOL_VERSION_STRING.0 {
-
            return Err(wire::Error::InvalidProtocolVersion(version));
-
        }
-
        Ok(Self(version))
-
    }
-
}
-

-
/// Identifies a (multiplexed) stream.
-
///
-
/// Stream IDs are variable-length integers with the least significant 3 bits
-
/// denoting the stream type and initiator.
-
///
-
/// The first bit denotes the initiator (outbound or inbound), while the second
-
/// and third bit denote the stream type. See `StreamKind`.
-
///
-
/// In a situation where Alice connects to Bob, Alice will have the initiator
-
/// bit set to `1` for all streams she creates, while Bob will have it set to `0`.
-
///
-
/// This ensures that Stream IDs never collide.
-
/// Additionally, Stream IDs must never be re-used within a connection.
-
///
-
/// +=======+==================================+
-
/// | Bits  | Stream Type                      |
-
/// +=======+==================================+
-
/// | 0b000 | Outbound Control stream          |
-
/// +-------+----------------------------------+
-
/// | 0b001 | Inbound Control stream           |
-
/// +-------+----------------------------------+
-
/// | 0b010 | Outbound Gossip stream           |
-
/// +-------+----------------------------------+
-
/// | 0b011 | Inbound Gossip stream            |
-
/// +-------+----------------------------------+
-
/// | 0b100 | Outbound Git stream              |
-
/// +-------+----------------------------------+
-
/// | 0b101 | Inbound Git stream               |
-
/// +-------+----------------------------------+
-
///
-
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
-
pub struct StreamId(VarInt);
-

-
impl StreamId {
-
    /// Get the initiator of this stream.
-
    pub fn link(&self) -> Link {
-
        let n = *self.0;
-
        if 0b1 & n == 0 {
-
            Link::Outbound
-
        } else {
-
            Link::Inbound
-
        }
-
    }
-

-
    /// Get the kind of stream this is.
-
    pub fn kind(&self) -> Result<StreamKind, u8> {
-
        let id = *self.0;
-
        let kind = ((id >> 1) & 0b11) as u8;
-

-
        StreamKind::try_from(kind)
-
    }
-

-
    /// Create a control identifier.
-
    pub fn control(link: Link) -> Self {
-
        let link = if link.is_outbound() { 0 } else { 1 };
-
        Self(VarInt::from(((StreamKind::Control as u8) << 1) | link))
-
    }
-

-
    /// Create a gossip identifier.
-
    pub fn gossip(link: Link) -> Self {
-
        let link = if link.is_outbound() { 0 } else { 1 };
-
        Self(VarInt::from(((StreamKind::Gossip as u8) << 1) | link))
-
    }
-

-
    /// Create a git identifier.
-
    pub fn git(link: Link) -> Self {
-
        let link = if link.is_outbound() { 0 } else { 1 };
-
        Self(VarInt::from(((StreamKind::Git as u8) << 1) | link))
-
    }
-

-
    /// Get the nth identifier while preserving the stream type and initiator.
-
    pub fn nth(self, n: u64) -> Result<Self, varint::BoundsExceeded> {
-
        let id = *self.0 + (n << 3);
-
        VarInt::new(id).map(Self)
-
    }
-
}
-

-
impl From<StreamId> for u64 {
-
    fn from(value: StreamId) -> Self {
-
        *value.0
-
    }
-
}
-

-
impl From<StreamId> for VarInt {
-
    fn from(value: StreamId) -> Self {
-
        value.0
-
    }
-
}
-

-
impl fmt::Display for StreamId {
-
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-
        write!(f, "{}", *self.0)
-
    }
-
}
-

-
impl wire::Decode for StreamId {
-
    fn decode<R: io::Read + ?Sized>(reader: &mut R) -> Result<Self, wire::Error> {
-
        let id = VarInt::decode(reader)?;
-
        Ok(Self(id))
-
    }
-
}
-

-
impl wire::Encode for StreamId {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        self.0.encode(writer)
-
    }
-
}
-

-
/// Type of stream.
-
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
-
#[repr(u8)]
-
pub enum StreamKind {
-
    /// Control stream, used to open and close streams.
-
    Control = 0b00,
-
    /// Gossip stream, used to exchange messages.
-
    Gossip = 0b01,
-
    /// Git stream, used for replication.
-
    Git = 0b10,
-
}
-

-
impl TryFrom<u8> for StreamKind {
-
    type Error = u8;
-

-
    fn try_from(value: u8) -> Result<Self, Self::Error> {
-
        match value {
-
            0b00 => Ok(StreamKind::Control),
-
            0b01 => Ok(StreamKind::Gossip),
-
            0b10 => Ok(StreamKind::Git),
-
            n => Err(n),
-
        }
-
    }
-
}
-

-
/// Protocol frame.
-
///
-
///  0                   1                   2                   3
-
///  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-
/// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
/// |      'r'      |      'a'      |      'd'      |      0x1      | Version
-
/// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
/// |                     Stream ID                           |TTT|I| Stream ID with Stream [T]ype and [I]nitiator bits
-
/// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
/// |                     Data                                   ...| Data (variable size)
-
/// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
#[derive(Debug, PartialEq, Eq)]
-
pub struct Frame<M = Message> {
-
    /// The protocol version.
-
    pub version: Version,
-
    /// The stream identifier.
-
    pub stream: StreamId,
-
    /// The frame payload.
-
    pub data: FrameData<M>,
-
}
-

-
impl<M> Frame<M> {
-
    /// Create a 'git' protocol frame.
-
    pub fn git(stream: StreamId, data: Vec<u8>) -> Self {
-
        Self {
-
            version: PROTOCOL_VERSION_STRING,
-
            stream,
-
            data: FrameData::Git(data),
-
        }
-
    }
-

-
    /// Create a 'control' protocol frame.
-
    pub fn control(link: Link, ctrl: Control) -> Self {
-
        Self {
-
            version: PROTOCOL_VERSION_STRING,
-
            stream: StreamId::control(link),
-
            data: FrameData::Control(ctrl),
-
        }
-
    }
-

-
    /// Create a 'gossip' protocol frame.
-
    pub fn gossip(link: Link, msg: M) -> Self {
-
        Self {
-
            version: PROTOCOL_VERSION_STRING,
-
            stream: StreamId::gossip(link),
-
            data: FrameData::Gossip(msg),
-
        }
-
    }
-
}
-

-
impl<M: wire::Encode> Frame<M> {
-
    /// Serialize frame to bytes.
-
    pub fn to_bytes(&self) -> Vec<u8> {
-
        wire::serialize(self)
-
    }
-
}
-

-
/// Frame payload.
-
#[derive(Debug, PartialEq, Eq)]
-
pub enum FrameData<M> {
-
    /// Control frame payload.
-
    Control(Control),
-
    /// Gossip frame payload.
-
    Gossip(M),
-
    /// Git frame payload. May contain packet-lines as well as packfile data.
-
    Git(Vec<u8>),
-
}
-

-
/// A control message sent over a control stream.
-
#[derive(Debug, PartialEq, Eq)]
-
pub enum Control {
-
    /// Open a new stream.
-
    Open {
-
        /// The stream to open.
-
        stream: StreamId,
-
    },
-
    /// Close an existing stream.
-
    Close {
-
        /// The stream to close.
-
        stream: StreamId,
-
    },
-
    /// Signal an end-of-file. This can be used to simulate connections terminating
-
    /// without having to close the connection. These control messages are turned into
-
    /// [`io::ErrorKind::UnexpectedEof`] errors on read.
-
    Eof {
-
        /// The stream to send an EOF on.
-
        stream: StreamId,
-
    },
-
}
-

-
impl wire::Decode for Control {
-
    fn decode<R: io::Read + ?Sized>(reader: &mut R) -> Result<Self, wire::Error> {
-
        let command = u8::decode(reader)?;
-
        match command {
-
            CONTROL_OPEN => {
-
                let stream = StreamId::decode(reader)?;
-
                Ok(Control::Open { stream })
-
            }
-
            CONTROL_CLOSE => {
-
                let stream = StreamId::decode(reader)?;
-
                Ok(Control::Close { stream })
-
            }
-
            CONTROL_EOF => {
-
                let stream = StreamId::decode(reader)?;
-
                Ok(Control::Eof { stream })
-
            }
-
            other => Err(wire::Error::InvalidControlMessage(other)),
-
        }
-
    }
-
}
-

-
impl wire::Encode for Control {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        let mut n = 0;
-

-
        match self {
-
            Self::Open { stream: id } => {
-
                n += CONTROL_OPEN.encode(writer)?;
-
                n += id.encode(writer)?;
-
            }
-
            Self::Eof { stream: id } => {
-
                n += CONTROL_EOF.encode(writer)?;
-
                n += id.encode(writer)?;
-
            }
-
            Self::Close { stream: id } => {
-
                n += CONTROL_CLOSE.encode(writer)?;
-
                n += id.encode(writer)?;
-
            }
-
        }
-
        Ok(n)
-
    }
-
}
-

-
impl<M: wire::Decode> wire::Decode for Frame<M> {
-
    fn decode<R: io::Read + ?Sized>(reader: &mut R) -> Result<Self, wire::Error> {
-
        let version = Version::decode(reader)?;
-
        if version.number() != PROTOCOL_VERSION {
-
            return Err(wire::Error::WrongProtocolVersion(version.number()));
-
        }
-
        let stream = StreamId::decode(reader)?;
-

-
        match stream.kind() {
-
            Ok(StreamKind::Control) => {
-
                let ctrl = Control::decode(reader)?;
-
                let frame = Frame {
-
                    version,
-
                    stream,
-
                    data: FrameData::Control(ctrl),
-
                };
-
                Ok(frame)
-
            }
-
            Ok(StreamKind::Gossip) => {
-
                let data = varint::payload::decode(reader)?;
-
                let mut cursor = io::Cursor::new(data);
-
                let msg = M::decode(&mut cursor)?;
-
                let frame = Frame {
-
                    version,
-
                    stream,
-
                    data: FrameData::Gossip(msg),
-
                };
-

-
                // Nb. If there is data after the `Message` that is not decoded,
-
                // it is simply dropped here.
-

-
                Ok(frame)
-
            }
-
            Ok(StreamKind::Git { .. }) => {
-
                let data = varint::payload::decode(reader)?;
-
                Ok(Frame::git(stream, data))
-
            }
-
            Err(n) => Err(wire::Error::InvalidStreamKind(n)),
-
        }
-
    }
-
}
-

-
impl<M: wire::Encode> wire::Encode for Frame<M> {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        let mut n = 0;
-

-
        n += self.version.encode(writer)?;
-
        n += self.stream.encode(writer)?;
-
        n += match &self.data {
-
            FrameData::Control(ctrl) => ctrl.encode(writer)?,
-
            FrameData::Git(data) => varint::payload::encode(data, writer)?,
-
            FrameData::Gossip(msg) => varint::payload::encode(&wire::serialize(msg), writer)?,
-
        };
-

-
        Ok(n)
-
    }
-
}
-

-
#[cfg(test)]
-
mod test {
-
    use super::*;
-

-
    #[test]
-
    fn test_stream_id() {
-
        assert_eq!(StreamId(VarInt(0b000)).kind().unwrap(), StreamKind::Control);
-
        assert_eq!(StreamId(VarInt(0b010)).kind().unwrap(), StreamKind::Gossip);
-
        assert_eq!(StreamId(VarInt(0b100)).kind().unwrap(), StreamKind::Git);
-
        assert_eq!(StreamId(VarInt(0b001)).link(), Link::Inbound);
-
        assert_eq!(StreamId(VarInt(0b000)).link(), Link::Outbound);
-
        assert_eq!(StreamId(VarInt(0b101)).link(), Link::Inbound);
-
        assert_eq!(StreamId(VarInt(0b100)).link(), Link::Outbound);
-

-
        assert_eq!(StreamId::git(Link::Outbound), StreamId(VarInt(0b100)));
-
        assert_eq!(StreamId::control(Link::Outbound), StreamId(VarInt(0b000)));
-
        assert_eq!(StreamId::gossip(Link::Outbound), StreamId(VarInt(0b010)));
-

-
        assert_eq!(StreamId::git(Link::Inbound), StreamId(VarInt(0b101)));
-
        assert_eq!(StreamId::control(Link::Inbound), StreamId(VarInt(0b001)));
-
        assert_eq!(StreamId::gossip(Link::Inbound), StreamId(VarInt(0b011)));
-
    }
-
}
deleted crates/radicle-node/src/wire/message.rs
@@ -1,600 +0,0 @@
-
use std::{io, mem, net};
-

-
use byteorder::{NetworkEndian, ReadBytesExt};
-
use cyphernet::addr::{tor, Addr, HostName, NetAddr};
-
use radicle::git::Oid;
-
use radicle::node::Address;
-

-
use crate::prelude::*;
-
use crate::service::message::*;
-
use crate::wire;
-
use crate::wire::{Decode, Encode};
-

-
/// Message type.
-
#[repr(u16)]
-
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
-
pub enum MessageType {
-
    NodeAnnouncement = 2,
-
    InventoryAnnouncement = 4,
-
    RefsAnnouncement = 6,
-
    Subscribe = 8,
-
    Ping = 10,
-
    Pong = 12,
-
    Info = 14,
-
}
-

-
impl From<MessageType> for u16 {
-
    fn from(other: MessageType) -> Self {
-
        other as u16
-
    }
-
}
-

-
impl TryFrom<u16> for MessageType {
-
    type Error = u16;
-

-
    fn try_from(other: u16) -> Result<Self, Self::Error> {
-
        match other {
-
            2 => Ok(MessageType::NodeAnnouncement),
-
            4 => Ok(MessageType::InventoryAnnouncement),
-
            6 => Ok(MessageType::RefsAnnouncement),
-
            8 => Ok(MessageType::Subscribe),
-
            10 => Ok(MessageType::Ping),
-
            12 => Ok(MessageType::Pong),
-
            14 => Ok(MessageType::Info),
-
            _ => Err(other),
-
        }
-
    }
-
}
-

-
impl Message {
-
    /// The maximum supported message size in bytes.
-
    pub const MAX_SIZE: wire::Size =
-
        wire::Size::MAX - (mem::size_of::<MessageType>() as wire::Size);
-

-
    pub fn type_id(&self) -> u16 {
-
        match self {
-
            Self::Subscribe { .. } => MessageType::Subscribe,
-
            Self::Announcement(Announcement { message, .. }) => match message {
-
                AnnouncementMessage::Node(_) => MessageType::NodeAnnouncement,
-
                AnnouncementMessage::Inventory(_) => MessageType::InventoryAnnouncement,
-
                AnnouncementMessage::Refs(_) => MessageType::RefsAnnouncement,
-
            },
-
            Self::Info(_) => MessageType::Info,
-
            Self::Ping { .. } => MessageType::Ping,
-
            Self::Pong { .. } => MessageType::Pong,
-
        }
-
        .into()
-
    }
-
}
-

-
impl netservices::Frame for Message {
-
    type Error = wire::Error;
-

-
    fn unmarshall(mut reader: impl io::Read) -> Result<Option<Self>, Self::Error> {
-
        match Message::decode(&mut reader) {
-
            Ok(msg) => Ok(Some(msg)),
-
            Err(wire::Error::Io(_)) => Ok(None),
-
            Err(err) => Err(err),
-
        }
-
    }
-

-
    fn marshall(&self, mut writer: impl io::Write) -> Result<usize, Self::Error> {
-
        self.encode(&mut writer).map_err(wire::Error::from)
-
    }
-
}
-

-
/// Address type.
-
#[repr(u8)]
-
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
-
pub enum AddressType {
-
    Ipv4 = 1,
-
    Ipv6 = 2,
-
    Dns = 3,
-
    Onion = 4,
-
}
-

-
impl From<AddressType> for u8 {
-
    fn from(other: AddressType) -> Self {
-
        other as u8
-
    }
-
}
-

-
impl From<&Address> for AddressType {
-
    fn from(a: &Address) -> Self {
-
        match a.host {
-
            HostName::Ip(net::IpAddr::V4(_)) => AddressType::Ipv4,
-
            HostName::Ip(net::IpAddr::V6(_)) => AddressType::Ipv6,
-
            HostName::Dns(_) => AddressType::Dns,
-
            HostName::Tor(_) => AddressType::Onion,
-
            _ => todo!(), // FIXME(cloudhead): Maxim will remove `non-exhaustive`
-
        }
-
    }
-
}
-

-
impl TryFrom<u8> for AddressType {
-
    type Error = u8;
-

-
    fn try_from(other: u8) -> Result<Self, Self::Error> {
-
        match other {
-
            1 => Ok(AddressType::Ipv4),
-
            2 => Ok(AddressType::Ipv6),
-
            3 => Ok(AddressType::Dns),
-
            4 => Ok(AddressType::Onion),
-
            _ => Err(other),
-
        }
-
    }
-
}
-

-
impl wire::Encode for AnnouncementMessage {
-
    fn encode<W: std::io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, std::io::Error> {
-
        match self {
-
            Self::Node(ann) => ann.encode(writer),
-
            Self::Inventory(ann) => ann.encode(writer),
-
            Self::Refs(ann) => ann.encode(writer),
-
        }
-
    }
-
}
-

-
impl wire::Encode for RefsAnnouncement {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        let mut n = 0;
-

-
        n += self.rid.encode(writer)?;
-
        n += self.refs.encode(writer)?;
-
        n += self.timestamp.encode(writer)?;
-

-
        Ok(n)
-
    }
-
}
-

-
impl wire::Decode for RefsAnnouncement {
-
    fn decode<R: std::io::Read + ?Sized>(reader: &mut R) -> Result<Self, wire::Error> {
-
        let rid = RepoId::decode(reader)?;
-
        let refs = BoundedVec::<_, REF_REMOTE_LIMIT>::decode(reader)?;
-
        let timestamp = Timestamp::decode(reader)?;
-

-
        Ok(Self {
-
            rid,
-
            refs,
-
            timestamp,
-
        })
-
    }
-
}
-

-
impl wire::Encode for InventoryAnnouncement {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        let mut n = 0;
-

-
        n += self.inventory.encode(writer)?;
-
        n += self.timestamp.encode(writer)?;
-

-
        Ok(n)
-
    }
-
}
-

-
impl wire::Decode for InventoryAnnouncement {
-
    fn decode<R: std::io::Read + ?Sized>(reader: &mut R) -> Result<Self, wire::Error> {
-
        let inventory = BoundedVec::decode(reader)?;
-
        let timestamp = Timestamp::decode(reader)?;
-

-
        Ok(Self {
-
            inventory,
-
            timestamp,
-
        })
-
    }
-
}
-

-
/// The type tracking the different variants of [`Info`] for encoding and
-
/// decoding purposes.
-
#[repr(u8)]
-
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
-
pub enum InfoType {
-
    RefsAlreadySynced = 1,
-
}
-

-
impl From<InfoType> for u16 {
-
    fn from(other: InfoType) -> Self {
-
        other as u16
-
    }
-
}
-

-
impl TryFrom<u16> for InfoType {
-
    type Error = u16;
-

-
    fn try_from(other: u16) -> Result<Self, Self::Error> {
-
        match other {
-
            1 => Ok(Self::RefsAlreadySynced),
-
            n => Err(n),
-
        }
-
    }
-
}
-

-
impl From<Info> for InfoType {
-
    fn from(info: Info) -> Self {
-
        (&info).into()
-
    }
-
}
-

-
impl From<&Info> for InfoType {
-
    fn from(info: &Info) -> Self {
-
        match info {
-
            Info::RefsAlreadySynced { .. } => Self::RefsAlreadySynced,
-
        }
-
    }
-
}
-

-
impl wire::Encode for Info {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        let mut n = 0;
-
        n += u16::from(InfoType::from(self)).encode(writer)?;
-
        match self {
-
            Info::RefsAlreadySynced { rid, at } => {
-
                n += rid.encode(writer)?;
-
                n += at.encode(writer)?;
-
            }
-
        }
-

-
        Ok(n)
-
    }
-
}
-

-
impl wire::Decode for Info {
-
    fn decode<R: std::io::Read + ?Sized>(reader: &mut R) -> Result<Self, wire::Error> {
-
        let info_type = reader.read_u16::<NetworkEndian>()?;
-

-
        match InfoType::try_from(info_type) {
-
            Ok(InfoType::RefsAlreadySynced) => {
-
                let rid = RepoId::decode(reader)?;
-
                let at = Oid::decode(reader)?;
-

-
                Ok(Self::RefsAlreadySynced { rid, at })
-
            }
-
            Err(other) => Err(wire::Error::UnknownInfoType(other)),
-
        }
-
    }
-
}
-

-
impl wire::Encode for Message {
-
    fn encode<W: std::io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, std::io::Error> {
-
        let mut n = self.type_id().encode(writer)?;
-

-
        match self {
-
            Self::Subscribe(Subscribe {
-
                filter,
-
                since,
-
                until,
-
            }) => {
-
                n += filter.encode(writer)?;
-
                n += since.encode(writer)?;
-
                n += until.encode(writer)?;
-
            }
-
            Self::Announcement(Announcement {
-
                node,
-
                message,
-
                signature,
-
            }) => {
-
                n += node.encode(writer)?;
-
                n += signature.encode(writer)?;
-
                n += message.encode(writer)?;
-
            }
-
            Self::Info(info) => {
-
                n += info.encode(writer)?;
-
            }
-
            Self::Ping(Ping { ponglen, zeroes }) => {
-
                n += ponglen.encode(writer)?;
-
                n += zeroes.encode(writer)?;
-
            }
-
            Self::Pong { zeroes } => {
-
                n += zeroes.encode(writer)?;
-
            }
-
        }
-

-
        if n > wire::Size::MAX as usize {
-
            return Err(io::Error::new(
-
                io::ErrorKind::InvalidData,
-
                "Message exceeds maximum size",
-
            ));
-
        }
-
        Ok(n)
-
    }
-
}
-

-
impl wire::Decode for Message {
-
    fn decode<R: std::io::Read + ?Sized>(reader: &mut R) -> Result<Self, wire::Error> {
-
        let type_id = reader.read_u16::<NetworkEndian>()?;
-

-
        match MessageType::try_from(type_id) {
-
            Ok(MessageType::Subscribe) => {
-
                let filter = Filter::decode(reader)?;
-
                let since = Timestamp::decode(reader)?;
-
                let until = Timestamp::decode(reader)?;
-

-
                Ok(Self::Subscribe(Subscribe {
-
                    filter,
-
                    since,
-
                    until,
-
                }))
-
            }
-
            Ok(MessageType::NodeAnnouncement) => {
-
                let node = NodeId::decode(reader)?;
-
                let signature = Signature::decode(reader)?;
-
                let message = NodeAnnouncement::decode(reader)?.into();
-

-
                Ok(Announcement {
-
                    node,
-
                    message,
-
                    signature,
-
                }
-
                .into())
-
            }
-
            Ok(MessageType::InventoryAnnouncement) => {
-
                let node = NodeId::decode(reader)?;
-
                let signature = Signature::decode(reader)?;
-
                let message = InventoryAnnouncement::decode(reader)?.into();
-

-
                Ok(Announcement {
-
                    node,
-
                    message,
-
                    signature,
-
                }
-
                .into())
-
            }
-
            Ok(MessageType::RefsAnnouncement) => {
-
                let node = NodeId::decode(reader)?;
-
                let signature = Signature::decode(reader)?;
-
                let message = RefsAnnouncement::decode(reader)?.into();
-

-
                Ok(Announcement {
-
                    node,
-
                    message,
-
                    signature,
-
                }
-
                .into())
-
            }
-
            Ok(MessageType::Info) => {
-
                let info = Info::decode(reader)?;
-
                Ok(Self::Info(info))
-
            }
-
            Ok(MessageType::Ping) => {
-
                let ponglen = u16::decode(reader)?;
-
                let zeroes = ZeroBytes::decode(reader)?;
-
                Ok(Self::Ping(Ping { ponglen, zeroes }))
-
            }
-
            Ok(MessageType::Pong) => {
-
                let zeroes = ZeroBytes::decode(reader)?;
-
                Ok(Self::Pong { zeroes })
-
            }
-
            Err(other) => Err(wire::Error::UnknownMessageType(other)),
-
        }
-
    }
-
}
-

-
impl wire::Encode for Address {
-
    fn encode<W: std::io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, std::io::Error> {
-
        let mut n = 0;
-

-
        match self.host {
-
            HostName::Ip(net::IpAddr::V4(ip)) => {
-
                n += u8::from(AddressType::Ipv4).encode(writer)?;
-
                n += ip.octets().encode(writer)?;
-
            }
-
            HostName::Ip(net::IpAddr::V6(ip)) => {
-
                n += u8::from(AddressType::Ipv6).encode(writer)?;
-
                n += ip.octets().encode(writer)?;
-
            }
-
            HostName::Dns(ref dns) => {
-
                n += u8::from(AddressType::Dns).encode(writer)?;
-
                n += dns.encode(writer)?;
-
            }
-
            HostName::Tor(addr) => {
-
                n += u8::from(AddressType::Onion).encode(writer)?;
-
                n += addr.encode(writer)?;
-
            }
-
            _ => {
-
                return Err(io::ErrorKind::Unsupported.into());
-
            }
-
        }
-
        n += self.port().encode(writer)?;
-

-
        Ok(n)
-
    }
-
}
-

-
impl wire::Decode for Address {
-
    fn decode<R: std::io::Read + ?Sized>(reader: &mut R) -> Result<Self, wire::Error> {
-
        let addrtype = reader.read_u8()?;
-
        let host = match AddressType::try_from(addrtype) {
-
            Ok(AddressType::Ipv4) => {
-
                let octets: [u8; 4] = wire::Decode::decode(reader)?;
-
                let ip = net::Ipv4Addr::from(octets);
-

-
                HostName::Ip(net::IpAddr::V4(ip))
-
            }
-
            Ok(AddressType::Ipv6) => {
-
                let octets: [u8; 16] = wire::Decode::decode(reader)?;
-
                let ip = net::Ipv6Addr::from(octets);
-

-
                HostName::Ip(net::IpAddr::V6(ip))
-
            }
-
            Ok(AddressType::Dns) => {
-
                let dns: String = wire::Decode::decode(reader)?;
-

-
                HostName::Dns(dns)
-
            }
-
            Ok(AddressType::Onion) => {
-
                let onion: tor::OnionAddrV3 = wire::Decode::decode(reader)?;
-

-
                HostName::Tor(onion)
-
            }
-
            Err(other) => return Err(wire::Error::UnknownAddressType(other)),
-
        };
-
        let port = u16::decode(reader)?;
-

-
        Ok(Self::from(NetAddr { host, port }))
-
    }
-
}
-

-
impl wire::Encode for ZeroBytes {
-
    fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
        let mut n = (self.len() as u16).encode(writer)?;
-
        for _ in 0..self.len() {
-
            n += 0u8.encode(writer)?;
-
        }
-
        Ok(n)
-
    }
-
}
-

-
impl wire::Decode for ZeroBytes {
-
    fn decode<R: std::io::Read + ?Sized>(reader: &mut R) -> Result<Self, wire::Error> {
-
        let zeroes = u16::decode(reader)?;
-
        for _ in 0..zeroes {
-
            _ = u8::decode(reader)?;
-
        }
-
        Ok(ZeroBytes::new(zeroes))
-
    }
-
}
-

-
#[cfg(test)]
-
mod tests {
-
    use super::*;
-
    use qcheck_macros::quickcheck;
-
    use radicle::node::device::Device;
-
    use radicle::node::UserAgent;
-
    use radicle::storage::refs::RefsAt;
-

-
    use crate::deserializer::Deserializer;
-
    use crate::test::arbitrary;
-
    use crate::wire::{self, Encode};
-

-
    #[test]
-
    fn test_refs_ann_max_size() {
-
        let signer = Device::mock();
-
        let refs: [RefsAt; REF_REMOTE_LIMIT] = arbitrary::gen(1);
-
        let ann = AnnouncementMessage::Refs(RefsAnnouncement {
-
            rid: arbitrary::gen(1),
-
            refs: BoundedVec::collect_from(&mut refs.into_iter()),
-
            timestamp: arbitrary::gen(1),
-
        });
-
        let ann = ann.signed(&signer);
-
        let msg = Message::Announcement(ann);
-
        let data = wire::serialize(&msg);
-

-
        assert!(data.len() < wire::Size::MAX as usize);
-
    }
-

-
    #[test]
-
    fn test_inv_ann_max_size() {
-
        let signer = Device::mock();
-
        let inv: [RepoId; INVENTORY_LIMIT] = arbitrary::gen(1);
-
        let ann = AnnouncementMessage::Inventory(InventoryAnnouncement {
-
            inventory: BoundedVec::collect_from(&mut inv.into_iter()),
-
            timestamp: arbitrary::gen(1),
-
        });
-
        let ann = ann.signed(&signer);
-
        let msg = Message::Announcement(ann);
-
        let data = wire::serialize(&msg);
-

-
        assert!(data.len() < wire::Size::MAX as usize);
-
    }
-

-
    #[test]
-
    fn test_node_ann_max_size() {
-
        let signer = Device::mock();
-
        let addrs: [Address; ADDRESS_LIMIT] = arbitrary::gen(1);
-
        let alias = ['@'; radicle::node::MAX_ALIAS_LENGTH];
-
        let ann = AnnouncementMessage::Node(NodeAnnouncement {
-
            version: 1,
-
            features: Default::default(),
-
            alias: radicle::node::Alias::new(String::from_iter(alias)),
-
            addresses: BoundedVec::collect_from(&mut addrs.into_iter()),
-
            timestamp: arbitrary::gen(1),
-
            nonce: u64::MAX,
-
            agent: UserAgent::default(),
-
        });
-
        let ann = ann.signed(&signer);
-
        let msg = Message::Announcement(ann);
-
        let data = wire::serialize(&msg);
-

-
        assert!(data.len() < wire::Size::MAX as usize);
-
    }
-

-
    #[test]
-
    fn test_pingpong_encode_max_size() {
-
        let mut buf = Vec::new();
-

-
        let ping = Message::Ping(Ping {
-
            ponglen: 0,
-
            zeroes: ZeroBytes::new(Ping::MAX_PING_ZEROES),
-
        });
-
        ping.encode(&mut buf)
-
            .expect("ping should be within max message size");
-

-
        let pong = Message::Pong {
-
            zeroes: ZeroBytes::new(Ping::MAX_PONG_ZEROES),
-
        };
-
        pong.encode(&mut buf)
-
            .expect("pong should be within max message size");
-
    }
-

-
    #[test]
-
    fn test_pingpong_encode_size_overflow() {
-
        let ping = Message::Ping(Ping {
-
            ponglen: 0,
-
            zeroes: ZeroBytes::new(Ping::MAX_PING_ZEROES + 1),
-
        });
-

-
        let mut buf = Vec::new();
-
        ping.encode(&mut buf)
-
            .expect_err("ping should exceed max message size");
-

-
        let pong = Message::Pong {
-
            zeroes: ZeroBytes::new(Ping::MAX_PONG_ZEROES + 1),
-
        };
-

-
        let mut buf = Vec::new();
-
        pong.encode(&mut buf)
-
            .expect_err("pong should exceed max message size");
-
    }
-

-
    #[quickcheck]
-
    fn prop_message_encode_decode(message: Message) {
-
        let encoded = &wire::serialize(&message);
-
        let decoded = wire::deserialize::<Message>(encoded).unwrap();
-

-
        assert_eq!(message, decoded);
-
    }
-

-
    #[test]
-
    fn prop_message_decoder() {
-
        fn property(items: Vec<Message>) {
-
            let mut decoder = Deserializer::<1048576, Message>::new(8);
-

-
            for item in &items {
-
                item.encode(&mut decoder).unwrap();
-
            }
-
            for item in items {
-
                assert_eq!(decoder.next().unwrap().unwrap(), item);
-
            }
-
        }
-

-
        qcheck::QuickCheck::new()
-
            .gen(qcheck::Gen::new(16))
-
            .quickcheck(property as fn(items: Vec<Message>));
-
    }
-

-
    #[quickcheck]
-
    fn prop_zero_bytes_encode_decode(zeroes: ZeroBytes) {
-
        assert_eq!(
-
            wire::deserialize::<ZeroBytes>(&wire::serialize(&zeroes)).unwrap(),
-
            zeroes
-
        );
-
    }
-

-
    #[quickcheck]
-
    fn prop_addr(addr: Address) {
-
        assert_eq!(
-
            wire::deserialize::<Address>(&wire::serialize(&addr)).unwrap(),
-
            addr
-
        );
-
    }
-
}
deleted crates/radicle-node/src/wire/protocol.rs
@@ -1,1381 +0,0 @@
-
//! Implementation of the transport protocol.
-
//!
-
//! We use the Noise XK handshake pattern to establish an encrypted stream with a remote peer.
-
//! The handshake itself is implemented in the external [`cyphernet`] and [`netservices`] crates.
-
use std::collections::hash_map::Entry;
-
use std::collections::VecDeque;
-
use std::os::unix::io::{AsRawFd, RawFd};
-
use std::sync::Arc;
-
use std::{io, net, time};
-

-
use amplify::Wrapper as _;
-
use crossbeam_channel as chan;
-
use cyphernet::addr::{HostName, InetHost, NetAddr};
-
use cyphernet::encrypt::noise::{HandshakePattern, Keyset, NoiseState};
-
use cyphernet::proxy::socks5;
-
use cyphernet::{Digest, EcSk, Ecdh, Sha256};
-
use localtime::LocalTime;
-
use netservices::resource::{ListenerEvent, NetAccept, NetTransport, SessionEvent};
-
use netservices::session::{NoiseSession, ProtocolArtifact, Socks5Session};
-
use netservices::{NetConnection, NetReader, NetWriter};
-
use radicle::node::device::Device;
-
use reactor::{ResourceId, ResourceType, Timestamp};
-

-
use radicle::collections::RandomMap;
-
use radicle::crypto;
-
use radicle::node::config::AddressConfig;
-
use radicle::node::NodeId;
-
use radicle::storage::WriteStorage;
-

-
use crate::prelude::Deserializer;
-
use crate::service;
-
use crate::service::io::Io;
-
use crate::service::FETCH_TIMEOUT;
-
use crate::service::{session, DisconnectReason, Metrics, Service};
-
use crate::wire::frame;
-
use crate::wire::frame::{Frame, FrameData, StreamId};
-
use crate::wire::Encode;
-
use crate::worker;
-
use crate::worker::{ChannelEvent, ChannelsConfig, FetchRequest, FetchResult, Task, TaskResult};
-
use crate::Link;
-

-
/// NoiseXK handshake pattern.
-
pub const NOISE_XK: HandshakePattern = HandshakePattern {
-
    initiator: cyphernet::encrypt::noise::InitiatorPattern::Xmitted,
-
    responder: cyphernet::encrypt::noise::OneWayPattern::Known,
-
};
-

-
/// Default time to wait until a network connection is considered inactive.
-
pub const DEFAULT_CONNECTION_TIMEOUT: time::Duration = time::Duration::from_secs(6);
-

-
/// Default time to wait when dialing a connection, before the remote is considered unreachable.
-
pub const DEFAULT_DIAL_TIMEOUT: time::Duration = time::Duration::from_secs(6);
-

-
/// Maximum size of a peer inbox, in bytes.
-
pub const MAX_INBOX_SIZE: usize = 1024 * 1024 * 2;
-

-
/// Control message used internally between workers, users, and the service.
-
#[allow(clippy::large_enum_variant)]
-
#[derive(Debug)]
-
pub enum Control {
-
    /// Message from the user to the service.
-
    User(service::Command),
-
    /// Message from a worker to the service.
-
    Worker(TaskResult),
-
    /// Flush data in the given stream to the remote.
-
    Flush { remote: NodeId, stream: StreamId },
-
}
-

-
/// Peer session type.
-
pub type WireSession<G> = NoiseSession<G, Sha256, Socks5Session<net::TcpStream>>;
-
/// Peer session type (read-only).
-
pub type WireReader = NetReader<Socks5Session<net::TcpStream>>;
-
/// Peer session type (write-only).
-
pub type WireWriter<G> = NetWriter<NoiseState<G, Sha256>, Socks5Session<net::TcpStream>>;
-

-
/// Reactor action.
-
type Action<G> = reactor::Action<NetAccept<WireSession<G>>, NetTransport<WireSession<G>>>;
-

-
/// A worker stream.
-
struct Stream {
-
    /// Channels.
-
    channels: worker::Channels,
-
    /// Data sent.
-
    sent_bytes: usize,
-
    /// Data received.
-
    received_bytes: usize,
-
}
-

-
impl Stream {
-
    fn new(channels: worker::Channels) -> Self {
-
        Self {
-
            channels,
-
            sent_bytes: 0,
-
            received_bytes: 0,
-
        }
-
    }
-
}
-

-
/// Streams associated with a connected peer.
-
struct Streams {
-
    /// Active streams and their associated worker channels.
-
    /// Note that the gossip and control streams are not included here as they are always
-
    /// implied to exist.
-
    streams: RandomMap<StreamId, Stream>,
-
    /// Connection direction.
-
    link: Link,
-
    /// Sequence number used to compute the next stream id.
-
    seq: u64,
-
}
-

-
impl Streams {
-
    /// Create a new [`Streams`] object, passing the connection link.
-
    fn new(link: Link) -> Self {
-
        Self {
-
            streams: RandomMap::default(),
-
            link,
-
            seq: 0,
-
        }
-
    }
-

-
    /// Get a known stream.
-
    fn get(&self, stream: &StreamId) -> Option<&Stream> {
-
        self.streams.get(stream)
-
    }
-

-
    /// Get a known stream, mutably.
-
    fn get_mut(&mut self, stream: &StreamId) -> Option<&mut Stream> {
-
        self.streams.get_mut(stream)
-
    }
-

-
    /// Open a new stream.
-
    fn open(&mut self, config: ChannelsConfig) -> (StreamId, worker::Channels) {
-
        self.seq += 1;
-

-
        let id = StreamId::git(self.link)
-
            .nth(self.seq)
-
            .expect("Streams::open: too many streams");
-
        let channels = self
-
            .register(id, config)
-
            .expect("Streams::open: stream was already open");
-

-
        (id, channels)
-
    }
-

-
    /// Register an open stream.
-
    fn register(&mut self, stream: StreamId, config: ChannelsConfig) -> Option<worker::Channels> {
-
        let (wire, worker) = worker::Channels::pair(config)
-
            .expect("Streams::register: fatal: unable to create channels");
-

-
        match self.streams.entry(stream) {
-
            Entry::Vacant(e) => {
-
                e.insert(Stream::new(worker));
-
                Some(wire)
-
            }
-
            Entry::Occupied(_) => None,
-
        }
-
    }
-

-
    /// Unregister an open stream.
-
    fn unregister(&mut self, stream: &StreamId) -> Option<Stream> {
-
        self.streams.remove(stream)
-
    }
-

-
    /// Close all streams.
-
    fn shutdown(&mut self) {
-
        for (sid, stream) in self.streams.drain() {
-
            log::debug!(target: "wire", "Closing worker stream {sid}");
-
            stream.channels.close().ok();
-
        }
-
    }
-
}
-

-
/// The initial state of an outbound peer before handshake is completed.
-
#[derive(Debug)]
-
struct Outbound {
-
    /// Resource ID, if registered.
-
    id: Option<ResourceId>,
-
    /// Remote address.
-
    addr: NetAddr<HostName>,
-
    /// Remote Node ID.
-
    nid: NodeId,
-
}
-

-
/// The initial state of an inbound peer before handshake is completed.
-
#[derive(Debug)]
-
struct Inbound {
-
    /// Resource ID, if registered.
-
    id: Option<ResourceId>,
-
    /// Remote address.
-
    addr: NetAddr<HostName>,
-
}
-

-
/// Peer connection state machine.
-
enum Peer {
-
    /// The state after handshake is completed.
-
    /// Peers in this state are handled by the underlying service.
-
    Connected {
-
        #[allow(dead_code)]
-
        addr: NetAddr<HostName>,
-
        link: Link,
-
        nid: NodeId,
-
        inbox: Deserializer<MAX_INBOX_SIZE, Frame>,
-
        streams: Streams,
-
    },
-
    /// The peer was scheduled for disconnection. Once the transport is handed over
-
    /// by the reactor, we can consider it disconnected.
-
    Disconnecting {
-
        link: Link,
-
        nid: Option<NodeId>,
-
        reason: DisconnectReason,
-
    },
-
}
-

-
impl std::fmt::Debug for Peer {
-
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
-
        match self {
-
            Self::Connected { link, nid, .. } => write!(f, "Connected({link:?}, {nid})"),
-
            Self::Disconnecting { .. } => write!(f, "Disconnecting"),
-
        }
-
    }
-
}
-

-
impl Peer {
-
    /// Return the peer's id, if any.
-
    fn id(&self) -> Option<&NodeId> {
-
        match self {
-
            Peer::Connected { nid, .. } | Peer::Disconnecting { nid: Some(nid), .. } => Some(nid),
-
            Peer::Disconnecting { nid: None, .. } => None,
-
        }
-
    }
-

-
    fn link(&self) -> Link {
-
        match self {
-
            Peer::Connected { link, .. } => *link,
-
            Peer::Disconnecting { link, .. } => *link,
-
        }
-
    }
-

-
    /// Connected peer.
-
    fn connected(nid: NodeId, addr: NetAddr<HostName>, link: Link) -> Self {
-
        Self::Connected {
-
            link,
-
            addr,
-
            nid,
-
            inbox: Deserializer::default(),
-
            streams: Streams::new(link),
-
        }
-
    }
-
}
-

-
/// Holds connected peers.
-
struct Peers(RandomMap<ResourceId, Peer>);
-

-
impl Peers {
-
    fn get_mut(&mut self, id: &ResourceId) -> Option<&mut Peer> {
-
        self.0.get_mut(id)
-
    }
-

-
    fn entry(&mut self, id: ResourceId) -> Entry<ResourceId, Peer> {
-
        self.0.entry(id)
-
    }
-

-
    fn insert(&mut self, id: ResourceId, peer: Peer) {
-
        if self.0.insert(id, peer).is_some() {
-
            log::warn!(target: "wire", "Replacing existing peer id={id}");
-
        }
-
    }
-

-
    fn remove(&mut self, id: &ResourceId) -> Option<Peer> {
-
        self.0.remove(id)
-
    }
-

-
    fn lookup(&self, node_id: &NodeId) -> Option<(ResourceId, &Peer)> {
-
        self.0
-
            .iter()
-
            .find(|(_, peer)| peer.id() == Some(node_id))
-
            .map(|(fd, peer)| (*fd, peer))
-
    }
-

-
    fn lookup_mut(&mut self, node_id: &NodeId) -> Option<(ResourceId, &mut Peer)> {
-
        self.0
-
            .iter_mut()
-
            .find(|(_, peer)| peer.id() == Some(node_id))
-
            .map(|(fd, peer)| (*fd, peer))
-
    }
-

-
    fn active(&self) -> impl Iterator<Item = (ResourceId, &NodeId, Link)> {
-
        self.0.iter().filter_map(|(id, peer)| match peer {
-
            Peer::Connected { nid, link, .. } => Some((*id, nid, *link)),
-
            Peer::Disconnecting { .. } => None,
-
        })
-
    }
-

-
    fn connected(&self) -> impl Iterator<Item = (ResourceId, &NodeId)> {
-
        self.0.iter().filter_map(|(id, peer)| {
-
            if let Peer::Connected { nid, .. } = peer {
-
                Some((*id, nid))
-
            } else {
-
                None
-
            }
-
        })
-
    }
-

-
    fn iter(&self) -> impl Iterator<Item = &Peer> {
-
        self.0.values()
-
    }
-
}
-

-
/// Wire protocol implementation for a set of peers.
-
pub struct Wire<D, S, G: crypto::signature::Signer<crypto::Signature> + Ecdh> {
-
    /// Backing service instance.
-
    service: Service<D, S, G>,
-
    /// Worker pool interface.
-
    worker: chan::Sender<Task>,
-
    /// Used for authentication.
-
    signer: Device<G>,
-
    /// Node metrics.
-
    metrics: service::Metrics,
-
    /// Internal queue of actions to send to the reactor.
-
    actions: VecDeque<Action<G>>,
-
    /// Outbound attempted peers without a session.
-
    outbound: RandomMap<RawFd, Outbound>,
-
    /// Inbound peers without a session.
-
    inbound: RandomMap<RawFd, Inbound>,
-
    /// Listening addresses that are not yet registered.
-
    listening: RandomMap<RawFd, net::SocketAddr>,
-
    /// Peer (established) sessions.
-
    peers: Peers,
-
}
-

-
impl<D, S, G> Wire<D, S, G>
-
where
-
    D: service::Store,
-
    S: WriteStorage + 'static,
-
    G: crypto::signature::Signer<crypto::Signature> + Ecdh<Pk = NodeId>,
-
{
-
    pub fn new(service: Service<D, S, G>, worker: chan::Sender<Task>, signer: Device<G>) -> Self {
-
        assert!(service.started().is_some(), "Service must be initialized");
-

-
        Self {
-
            service,
-
            worker,
-
            signer,
-
            metrics: Metrics::default(),
-
            actions: VecDeque::new(),
-
            inbound: RandomMap::default(),
-
            outbound: RandomMap::default(),
-
            listening: RandomMap::default(),
-
            peers: Peers(RandomMap::default()),
-
        }
-
    }
-

-
    pub fn listen(&mut self, socket: NetAccept<WireSession<G>>) {
-
        self.listening
-
            .insert(socket.as_raw_fd(), socket.local_addr());
-
        self.actions.push_back(Action::RegisterListener(socket));
-
    }
-

-
    fn disconnect(&mut self, id: ResourceId, reason: DisconnectReason) -> Option<(NodeId, Link)> {
-
        match self.peers.entry(id) {
-
            Entry::Vacant(_) => {
-
                // Connecting peer with no session.
-
                log::debug!(target: "wire", "Disconnecting pending peer with id={id}: {reason}");
-
                self.actions.push_back(Action::UnregisterTransport(id));
-

-
                // Check for attempted outbound connections. Unestablished inbound connections don't
-
                // have an NID yet.
-
                self.outbound
-
                    .values()
-
                    .find(|o| o.id == Some(id))
-
                    .map(|o| (o.nid, Link::Outbound))
-
            }
-
            Entry::Occupied(mut e) => match e.get_mut() {
-
                Peer::Disconnecting { nid, link, .. } => {
-
                    log::error!(target: "wire", "Peer with id={id} is already disconnecting");
-

-
                    nid.map(|n| (n, *link))
-
                }
-
                Peer::Connected {
-
                    nid, streams, link, ..
-
                } => {
-
                    log::debug!(target: "wire", "Disconnecting peer with id={id}: {reason}");
-
                    let nid = *nid;
-
                    let link = *link;
-

-
                    streams.shutdown();
-
                    e.insert(Peer::Disconnecting {
-
                        nid: Some(nid),
-
                        link,
-
                        reason,
-
                    });
-
                    self.actions.push_back(Action::UnregisterTransport(id));
-

-
                    Some((nid, link))
-
                }
-
            },
-
        }
-
    }
-

-
    fn worker_result(&mut self, task: TaskResult) {
-
        log::debug!(
-
            target: "wire",
-
            "Received fetch result from worker for stream {}, remote {}: {:?}",
-
            task.stream, task.remote, task.result
-
        );
-

-
        let nid = task.remote;
-
        let Some((fd, peer)) = self.peers.lookup_mut(&nid) else {
-
            log::warn!(target: "wire", "Peer {nid} not found; ignoring fetch result");
-
            return;
-
        };
-

-
        if let Peer::Connected { link, streams, .. } = peer {
-
            // Nb. It's possible that the stream would already be unregistered if we received an
-
            // early "close" from the remote. Otherwise, we unregister it here and send the "close"
-
            // ourselves.
-
            if let Some(s) = streams.unregister(&task.stream) {
-
                log::debug!(
-
                    target: "wire", "Stream {} of {} closing with {} byte(s) sent and {} byte(s) received",
-
                    task.stream, task.remote, s.sent_bytes, s.received_bytes
-
                );
-
                let frame = Frame::<service::Message>::control(
-
                    *link,
-
                    frame::Control::Close {
-
                        stream: task.stream,
-
                    },
-
                );
-
                self.actions.push_back(Action::Send(fd, frame.to_bytes()));
-
            }
-
        } else {
-
            // If the peer disconnected, we'll get here, but we still want to let the service know
-
            // about the fetch result, so we don't return here.
-
            log::warn!(target: "wire", "Peer {nid} is not connected; ignoring fetch result");
-
            return;
-
        };
-

-
        // Only call into the service if we initiated this fetch.
-
        match task.result {
-
            FetchResult::Initiator { rid, result } => {
-
                self.service.fetched(rid, nid, result);
-
            }
-
            FetchResult::Responder { rid, result } => {
-
                if let Some(rid) = rid {
-
                    if let Some(err) = result.err() {
-
                        log::info!(target: "wire", "Peer {nid} failed to fetch {rid} from us: {err}");
-
                    } else {
-
                        log::info!(target: "wire", "Peer {nid} fetched {rid} from us successfully");
-
                    }
-
                }
-
            }
-
        }
-
    }
-

-
    fn flush(&mut self, remote: NodeId, stream: StreamId) {
-
        let Some((fd, peer)) = self.peers.lookup_mut(&remote) else {
-
            log::warn!(target: "wire", "Peer {remote} is not known; ignoring flush");
-
            return;
-
        };
-
        let Peer::Connected { streams, link, .. } = peer else {
-
            log::warn!(target: "wire", "Peer {remote} is not connected; ignoring flush");
-
            return;
-
        };
-
        let Some(s) = streams.get_mut(&stream) else {
-
            log::debug!(target: "wire", "Stream {stream} cannot be found; ignoring flush");
-
            return;
-
        };
-
        let metrics = self.metrics.peer(remote);
-

-
        for data in s.channels.try_iter() {
-
            let frame = match data {
-
                ChannelEvent::Data(data) => {
-
                    metrics.sent_git_bytes += data.len();
-
                    metrics.sent_bytes += data.len();
-
                    Frame::<service::Message>::git(stream, data)
-
                }
-
                ChannelEvent::Close => Frame::control(*link, frame::Control::Close { stream }),
-
                ChannelEvent::Eof => Frame::control(*link, frame::Control::Eof { stream }),
-
            };
-
            self.actions
-
                .push_back(reactor::Action::Send(fd, frame.to_bytes()));
-
        }
-
    }
-

-
    fn cleanup(&mut self, id: ResourceId, fd: RawFd) {
-
        if self.inbound.remove(&fd).is_some() {
-
            log::debug!(target: "wire", "Cleaning up inbound peer state with id={id} (fd={fd})");
-
        } else if let Some(outbound) = self.outbound.remove(&fd) {
-
            log::debug!(target: "wire", "Cleaning up outbound peer state with id={id} (fd={fd})");
-
            self.service.disconnected(
-
                outbound.nid,
-
                Link::Outbound,
-
                &DisconnectReason::connection(),
-
            );
-
        } else {
-
            log::debug!(target: "wire", "Tried to cleanup unknown peer with id={id} (fd={fd})");
-
        }
-
    }
-
}
-

-
impl<D, S, G> reactor::Handler for Wire<D, S, G>
-
where
-
    D: service::Store + Send,
-
    S: WriteStorage + Send + 'static,
-
    G: crypto::signature::Signer<crypto::Signature> + Ecdh<Pk = NodeId> + Clone + Send,
-
{
-
    type Listener = NetAccept<WireSession<G>>;
-
    type Transport = NetTransport<WireSession<G>>;
-
    type Command = Control;
-

-
    fn tick(&mut self, time: Timestamp) {
-
        self.metrics.open_channels = self
-
            .peers
-
            .iter()
-
            .filter_map(|p| {
-
                if let Peer::Connected { streams, .. } = p {
-
                    Some(streams.streams.len())
-
                } else {
-
                    None
-
                }
-
            })
-
            .sum();
-
        self.metrics.worker_queue_size = self.worker.len();
-
        self.service.tick(
-
            LocalTime::from_millis(time.as_millis() as u128),
-
            &self.metrics,
-
        );
-
    }
-

-
    fn handle_timer(&mut self) {
-
        self.service.wake();
-
    }
-

-
    fn handle_listener_event(
-
        &mut self,
-
        _: ResourceId, // Nb. This is the ID of the listener socket.
-
        event: ListenerEvent<WireSession<G>>,
-
        _: Timestamp,
-
    ) {
-
        match event {
-
            ListenerEvent::Accepted(connection) => {
-
                let Ok(remote) = connection.remote_addr() else {
-
                    log::warn!(target: "wire", "Accepted connection doesn't have remote address; dropping..");
-
                    drop(connection);
-

-
                    return;
-
                };
-
                let InetHost::Ip(ip) = remote.host else {
-
                    log::error!(target: "wire", "Unexpected host type for inbound connection {remote}; dropping..");
-
                    drop(connection);
-

-
                    return;
-
                };
-
                let fd = connection.as_raw_fd();
-
                log::debug!(target: "wire", "Inbound connection from {remote} (fd={fd})..");
-

-
                // If the service doesn't want to accept this connection,
-
                // we drop the connection here, which disconnects the socket.
-
                if !self.service.accepted(ip) {
-
                    log::debug!(target: "wire", "Rejecting inbound connection from {ip} (fd={fd})..");
-
                    drop(connection);
-

-
                    return;
-
                }
-

-
                let session = match accept::<G>(
-
                    remote.clone().into(),
-
                    connection,
-
                    self.signer.clone().into_inner(),
-
                ) {
-
                    Ok(s) => s,
-
                    Err(e) => {
-
                        log::error!(target: "wire", "Error creating session for {ip}: {e}");
-
                        return;
-
                    }
-
                };
-
                let transport = match NetTransport::with_session(session, Link::Inbound) {
-
                    Ok(transport) => transport,
-
                    Err(err) => {
-
                        log::error!(target: "wire", "Failed to create transport for accepted connection: {err}");
-
                        return;
-
                    }
-
                };
-
                log::debug!(target: "wire", "Accepted inbound connection from {remote} (fd={fd})..");
-

-
                self.inbound.insert(
-
                    fd,
-
                    Inbound {
-
                        id: None,
-
                        addr: remote.into(),
-
                    },
-
                );
-
                self.actions
-
                    .push_back(reactor::Action::RegisterTransport(transport))
-
            }
-
            ListenerEvent::Failure(err) => {
-
                log::error!(target: "wire", "Error listening for inbound connections: {err}");
-
            }
-
        }
-
    }
-

-
    fn handle_registered(&mut self, fd: RawFd, id: ResourceId, typ: ResourceType) {
-
        match typ {
-
            ResourceType::Listener => {
-
                if let Some(local_addr) = self.listening.remove(&fd) {
-
                    self.service.listening(local_addr);
-
                }
-
            }
-
            ResourceType::Transport => {
-
                if let Some(outbound) = self.outbound.get_mut(&fd) {
-
                    log::debug!(target: "wire", "Outbound peer resource registered for {} with id={id} (fd={fd})", outbound.nid);
-
                    outbound.id = Some(id);
-
                } else if let Some(inbound) = self.inbound.get_mut(&fd) {
-
                    log::debug!(target: "wire", "Inbound peer resource registered with id={id} (fd={fd})");
-
                    inbound.id = Some(id);
-
                } else {
-
                    log::warn!(target: "wire", "Unknown peer registered with fd={fd} and id={id}");
-
                }
-
            }
-
        }
-
    }
-

-
    fn handle_transport_event(
-
        &mut self,
-
        id: ResourceId,
-
        event: SessionEvent<WireSession<G>>,
-
        _: Timestamp,
-
    ) {
-
        match event {
-
            SessionEvent::Established(fd, ProtocolArtifact { state, .. }) => {
-
                // SAFETY: With the NoiseXK protocol, there is always a remote static key.
-
                let nid: NodeId = state.remote_static_key.unwrap();
-
                // Make sure we don't try to connect to ourselves by mistake.
-
                if &nid == self.signer.public_key() {
-
                    log::error!(target: "wire", "Self-connection detected, disconnecting..");
-
                    self.disconnect(id, DisconnectReason::SelfConnection);
-

-
                    return;
-
                }
-
                let (addr, link) = if let Some(peer) = self.inbound.remove(&fd) {
-
                    self.metrics.peer(nid).inbound_connection_attempts += 1;
-
                    (peer.addr, Link::Inbound)
-
                } else if let Some(peer) = self.outbound.remove(&fd) {
-
                    assert_eq!(nid, peer.nid);
-
                    (peer.addr, Link::Outbound)
-
                } else {
-
                    log::error!(target: "wire", "Session for {nid} (id={id}) not found");
-
                    return;
-
                };
-
                log::debug!(
-
                    target: "wire",
-
                    "Session established with {nid} (id={id}) (fd={fd}) ({})",
-
                    if link.is_inbound() { "inbound" } else { "outbound" }
-
                );
-

-
                // Connections to close.
-
                let mut disconnect = Vec::new();
-

-
                // Handle conflicting connections.
-
                // This is typical when nodes have mutually configured their nodes to connect to
-
                // each other on startup. We handle this by deterministically choosing one node
-
                // whos outbound connection is the one that is kept. The other connections are
-
                // dropped.
-
                {
-
                    // Whether we have precedence in case of conflicting connections.
-
                    // Having precedence means that our outbound connection will win over
-
                    // the other node's outbound connection.
-
                    let precedence = *self.signer.public_key() > nid;
-

-
                    // Pre-existing connections that conflict with this newly established session.
-
                    // Note that we can't know whether a connection is conflicting before we get the
-
                    // remote static key.
-
                    let mut conflicting = Vec::new();
-

-
                    // Active sessions with the same NID but a different Resource ID are conflicting.
-
                    conflicting.extend(
-
                        self.peers
-
                            .active()
-
                            .filter(|(c_id, d, _)| **d == nid && *c_id != id)
-
                            .map(|(c_id, _, link)| (c_id, link)),
-
                    );
-

-
                    // Outbound connection attempts with the same remote key but a different file
-
                    // descriptor are conflicting.
-
                    conflicting.extend(self.outbound.iter().filter_map(|(c_fd, other)| {
-
                        if other.nid == nid && *c_fd != fd {
-
                            other.id.map(|c_id| (c_id, Link::Outbound))
-
                        } else {
-
                            None
-
                        }
-
                    }));
-

-
                    for (c_id, c_link) in conflicting {
-
                        // If we have precedence, the inbound connection is closed.
-
                        // In the case where both connections are inbound or outbound,
-
                        // we close the newer connection, ie. the one with the higher
-
                        // resource id.
-
                        let close = match (link, c_link) {
-
                            (Link::Inbound, Link::Outbound) => {
-
                                if precedence {
-
                                    id
-
                                } else {
-
                                    c_id
-
                                }
-
                            }
-
                            (Link::Outbound, Link::Inbound) => {
-
                                if precedence {
-
                                    c_id
-
                                } else {
-
                                    id
-
                                }
-
                            }
-
                            (Link::Inbound, Link::Inbound) => id.max(c_id),
-
                            (Link::Outbound, Link::Outbound) => id.max(c_id),
-
                        };
-

-
                        log::warn!(
-
                            target: "wire", "Established session (id={id}) conflicts with existing session for {nid} (id={c_id})"
-
                        );
-
                        disconnect.push(close);
-
                    }
-
                }
-
                for id in &disconnect {
-
                    log::warn!(
-
                        target: "wire", "Closing conflicting session (id={id}) with {nid}.."
-
                    );
-
                    // Disconnect and return the associated NID of the peer, if available.
-
                    if let Some((nid, link)) = self.disconnect(*id, DisconnectReason::Conflict) {
-
                        // We disconnect the session eagerly because otherwise we will get the new
-
                        // `connected` event before the `disconnect`, resulting in a duplicate
-
                        // connection.
-
                        self.service
-
                            .disconnected(nid, link, &DisconnectReason::Conflict);
-
                    }
-
                }
-
                if !disconnect.contains(&id) {
-
                    self.peers
-
                        .insert(id, Peer::connected(nid, addr.clone(), link));
-
                    self.service.connected(nid, addr.into(), link);
-
                }
-
            }
-
            SessionEvent::Data(data) => {
-
                if let Some(Peer::Connected {
-
                    nid,
-
                    inbox,
-
                    streams,
-
                    ..
-
                }) = self.peers.get_mut(&id)
-
                {
-
                    let metrics = self.metrics.peer(*nid);
-
                    metrics.received_bytes += data.len();
-

-
                    if inbox.input(&data).is_err() {
-
                        log::error!(target: "wire", "Maximum inbox size ({MAX_INBOX_SIZE}) reached for peer {nid}");
-
                        log::error!(target: "wire", "Unable to process messages fast enough for peer {nid}; disconnecting..");
-
                        self.disconnect(id, DisconnectReason::Session(session::Error::Misbehavior));
-

-
                        return;
-
                    }
-

-
                    loop {
-
                        match inbox.deserialize_next() {
-
                            Ok(Some(Frame {
-
                                data: FrameData::Control(frame::Control::Open { stream }),
-
                                ..
-
                            })) => {
-
                                log::debug!(target: "wire", "Received `open` command for stream {stream} from {nid}");
-
                                metrics.streams_opened += 1;
-
                                metrics.received_fetch_requests += 1;
-
                                let reader_limit = self.service.config().limits.fetch_pack_receive;
-
                                let Some(channels) = streams.register(
-
                                    stream,
-
                                    ChannelsConfig::new(FETCH_TIMEOUT)
-
                                        .with_reader_limit(reader_limit),
-
                                ) else {
-
                                    log::warn!(target: "wire", "Peer attempted to open already-open stream stream {stream}");
-
                                    continue;
-
                                };
-

-
                                let task = Task {
-
                                    fetch: FetchRequest::Responder {
-
                                        remote: *nid,
-
                                        emitter: self.service.emitter(),
-
                                    },
-
                                    stream,
-
                                    channels,
-
                                };
-
                                if let Err(e) = self.worker.try_send(task) {
-
                                    log::error!(
-
                                        target: "wire",
-
                                        "Worker pool failed to accept incoming fetch request: {e}"
-
                                    );
-
                                }
-
                            }
-
                            Ok(Some(Frame {
-
                                data: FrameData::Control(frame::Control::Eof { stream }),
-
                                ..
-
                            })) => {
-
                                if let Some(s) = streams.get(&stream) {
-
                                    log::debug!(target: "wire", "Received `end-of-file` on stream {stream} from {nid}");
-

-
                                    if s.channels.send(ChannelEvent::Eof).is_err() {
-
                                        log::error!(target: "wire", "Worker is disconnected; cannot send `EOF`");
-
                                    }
-
                                } else {
-
                                    log::debug!(target: "wire", "Ignoring frame on closed or unknown stream {stream}");
-
                                }
-
                            }
-
                            Ok(Some(Frame {
-
                                data: FrameData::Control(frame::Control::Close { stream }),
-
                                ..
-
                            })) => {
-
                                log::debug!(target: "wire", "Received `close` command for stream {stream} from {nid}");
-

-
                                if let Some(s) = streams.unregister(&stream) {
-
                                    log::debug!(
-
                                        target: "wire",
-
                                        "Stream {stream} of {nid} closed with {} byte(s) sent and {} byte(s) received",
-
                                        s.sent_bytes, s.received_bytes
-
                                    );
-
                                    s.channels.close().ok();
-
                                }
-
                            }
-
                            Ok(Some(Frame {
-
                                data: FrameData::Gossip(msg),
-
                                ..
-
                            })) => {
-
                                metrics.received_gossip_messages += 1;
-
                                self.service.received_message(*nid, msg);
-
                            }
-
                            Ok(Some(Frame {
-
                                stream,
-
                                data: FrameData::Git(data),
-
                                ..
-
                            })) => {
-
                                if let Some(s) = streams.get_mut(&stream) {
-
                                    metrics.received_git_bytes += data.len();
-

-
                                    if s.channels.send(ChannelEvent::Data(data)).is_err() {
-
                                        log::error!(target: "wire", "Worker is disconnected; cannot send data");
-
                                    }
-
                                } else {
-
                                    log::debug!(target: "wire", "Ignoring frame on closed or unknown stream {stream}");
-
                                }
-
                            }
-
                            Ok(None) => {
-
                                // Buffer is empty, or message isn't complete.
-
                                break;
-
                            }
-
                            Err(e) => {
-
                                log::error!(target: "wire", "Invalid gossip message from {nid}: {e}");
-

-
                                if !inbox.is_empty() {
-
                                    log::debug!(target: "wire", "Dropping read buffer for {nid} with {} bytes", inbox.len());
-
                                }
-
                                self.disconnect(
-
                                    id,
-
                                    DisconnectReason::Session(session::Error::Misbehavior),
-
                                );
-
                                break;
-
                            }
-
                        }
-
                    }
-
                } else {
-
                    log::warn!(target: "wire", "Dropping message from unconnected peer (id={id})");
-
                }
-
            }
-
            SessionEvent::Terminated(err) => {
-
                self.disconnect(id, DisconnectReason::Connection(Arc::new(err)));
-
            }
-
        }
-
    }
-

-
    fn handle_command(&mut self, cmd: Self::Command) {
-
        match cmd {
-
            Control::User(cmd) => self.service.command(cmd),
-
            Control::Worker(result) => self.worker_result(result),
-
            Control::Flush { remote, stream } => self.flush(remote, stream),
-
        }
-
    }
-

-
    fn handle_error(
-
        &mut self,
-
        err: reactor::Error<NetAccept<WireSession<G>>, NetTransport<WireSession<G>>>,
-
    ) {
-
        match err {
-
            reactor::Error::Poll(err) => {
-
                // TODO: This should be a fatal error, there's nothing we can do here.
-
                log::error!(target: "wire", "Can't poll connections: {err}");
-
            }
-
            reactor::Error::ListenerDisconnect(id, _) => {
-
                // TODO: This should be a fatal error, there's nothing we can do here.
-
                log::error!(target: "wire", "Listener {id} disconnected");
-
            }
-
            reactor::Error::TransportDisconnect(id, transport) => {
-
                let fd = transport.as_raw_fd();
-
                log::error!(target: "wire", "Peer id={id} (fd={fd}) disconnected");
-

-
                // We're dropping the TCP connection here.
-
                drop(transport);
-

-
                // The peer transport is already disconnected and removed from the reactor;
-
                // therefore there is no need to initiate a disconnection. We simply remove
-
                // the peer from the map.
-
                match self.peers.remove(&id) {
-
                    Some(mut peer) => {
-
                        if let Peer::Connected { streams, .. } = &mut peer {
-
                            streams.shutdown();
-
                        }
-

-
                        if let Some(id) = peer.id() {
-
                            self.service.disconnected(
-
                                *id,
-
                                peer.link(),
-
                                &DisconnectReason::connection(),
-
                            );
-
                        } else {
-
                            log::debug!(target: "wire", "Inbound disconnection before handshake; ignoring..")
-
                        }
-
                    }
-
                    None => self.cleanup(id, fd),
-
                }
-
            }
-
        }
-
    }
-

-
    fn handover_listener(&mut self, id: ResourceId, _listener: Self::Listener) {
-
        log::error!(target: "wire", "Listener handover is not supported (id={id})");
-
    }
-

-
    fn handover_transport(&mut self, id: ResourceId, transport: Self::Transport) {
-
        let fd = transport.as_raw_fd();
-

-
        match self.peers.entry(id) {
-
            Entry::Occupied(e) => {
-
                match e.get() {
-
                    Peer::Disconnecting {
-
                        nid, reason, link, ..
-
                    } => {
-
                        log::debug!(target: "wire", "Transport handover for disconnecting peer with id={id} (fd={fd})");
-

-
                        // Disconnect TCP stream.
-
                        drop(transport);
-

-
                        // If there is no NID, the service is not aware of the peer.
-
                        if let Some(nid) = nid {
-
                            // In the case of a conflicting connection, there will be two resources
-
                            // for the peer. However, at the service level, there is only one, and
-
                            // it is identified by NID.
-
                            //
-
                            // Therefore, we specify which of the connections we're closing by
-
                            // passing the `link`.
-
                            self.service.disconnected(*nid, *link, reason);
-
                        }
-
                        e.remove();
-
                    }
-
                    Peer::Connected { nid, .. } => {
-
                        panic!("Wire::handover_transport: Unexpected handover of connected peer {} with id={id} (fd={fd})", nid);
-
                    }
-
                }
-
            }
-
            Entry::Vacant(_) => self.cleanup(id, fd),
-
        }
-
    }
-
}
-

-
impl<D, S, G> Iterator for Wire<D, S, G>
-
where
-
    D: service::Store,
-
    S: WriteStorage + 'static,
-
    G: crypto::signature::Signer<crypto::Signature> + Ecdh<Pk = NodeId> + Clone,
-
{
-
    type Item = Action<G>;
-

-
    fn next(&mut self) -> Option<Self::Item> {
-
        while let Some(ev) = self.service.next() {
-
            match ev {
-
                Io::Write(node_id, msgs) => {
-
                    let (fd, link) = match self.peers.lookup(&node_id) {
-
                        Some((fd, Peer::Connected { link, .. })) => (fd, *link),
-
                        Some((_, peer)) => {
-
                            // If the peer is disconnected by the wire protocol, the service may
-
                            // not be aware of this yet, and may continue to write messages to it.
-
                            log::debug!(target: "wire", "Dropping {} message(s) to {node_id} ({peer:?})", msgs.len());
-
                            continue;
-
                        }
-
                        None => {
-
                            log::error!(target: "wire", "Dropping {} message(s) to {node_id}: unknown peer", msgs.len());
-
                            continue;
-
                        }
-
                    };
-
                    log::trace!(
-
                        target: "wire", "Writing {} message(s) to {}", msgs.len(), node_id
-
                    );
-
                    let mut data = Vec::new();
-
                    let metrics = self.metrics.peer(node_id);
-
                    metrics.sent_gossip_messages += msgs.len();
-

-
                    for msg in msgs {
-
                        Frame::gossip(link, msg)
-
                            .encode(&mut data)
-
                            .expect("in-memory writes never fail");
-
                    }
-
                    metrics.sent_bytes += data.len();
-

-
                    self.actions.push_back(reactor::Action::Send(fd, data));
-
                }
-
                Io::Connect(node_id, addr) => {
-
                    if self.peers.connected().any(|(_, id)| id == &node_id) {
-
                        log::error!(
-
                            target: "wire",
-
                            "Attempt to connect to already connected peer {node_id}"
-
                        );
-
                        // FIXME: The problem here is the session will stay in "initial" state,
-
                        // because it can't transition to attempted.
-
                        continue;
-
                    }
-
                    self.service.attempted(node_id, addr.clone());
-
                    self.metrics.peer(node_id).outbound_connection_attempts += 1;
-

-
                    match dial::<G>(
-
                        addr.to_inner(),
-
                        node_id,
-
                        self.signer.clone().into_inner(),
-
                        self.service.config(),
-
                    )
-
                    .and_then(|session| {
-
                        NetTransport::<WireSession<G>>::with_session(session, Link::Outbound)
-
                    }) {
-
                        Ok(transport) => {
-
                            self.outbound.insert(
-
                                transport.as_raw_fd(),
-
                                Outbound {
-
                                    id: None,
-
                                    nid: node_id,
-
                                    addr: addr.to_inner(),
-
                                },
-
                            );
-
                            log::debug!(
-
                                target: "wire",
-
                                "Registering outbound transport for {node_id} (fd={})..",
-
                                transport.as_raw_fd()
-
                            );
-
                            self.actions
-
                                .push_back(reactor::Action::RegisterTransport(transport));
-
                        }
-
                        Err(err) => {
-
                            log::error!(target: "wire", "Error establishing connection to {addr}: {err}");
-

-
                            self.service.disconnected(
-
                                node_id,
-
                                Link::Outbound,
-
                                &DisconnectReason::Dial(Arc::new(err)),
-
                            );
-
                        }
-
                    }
-
                }
-
                Io::Disconnect(nid, reason) => {
-
                    if let Some((id, Peer::Connected { .. })) = self.peers.lookup(&nid) {
-
                        if let Some((nid, _)) = self.disconnect(id, reason) {
-
                            self.metrics.peer(nid).disconnects += 1;
-
                        }
-
                    } else {
-
                        log::warn!(target: "wire", "Peer {nid} is not connected: ignoring disconnect");
-
                    }
-
                }
-
                Io::Wakeup(d) => {
-
                    self.actions.push_back(reactor::Action::SetTimer(d.into()));
-
                }
-
                Io::Fetch {
-
                    rid,
-
                    remote,
-
                    timeout,
-
                    reader_limit,
-
                    refs_at,
-
                } => {
-
                    log::trace!(target: "wire", "Processing fetch for {rid} from {remote}..");
-

-
                    let Some((fd, Peer::Connected { link, streams, .. })) =
-
                        self.peers.lookup_mut(&remote)
-
                    else {
-
                        // Nb. It's possible that a peer is disconnected while an `Io::Fetch`
-
                        // is in the service's i/o buffer. Since the service may not purge the
-
                        // buffer on disconnect, we should just ignore i/o actions that don't
-
                        // have a connected peer.
-
                        log::error!(target: "wire", "Peer {remote} is not connected: dropping fetch");
-
                        continue;
-
                    };
-
                    let (stream, channels) =
-
                        streams.open(ChannelsConfig::new(timeout).with_reader_limit(reader_limit));
-

-
                    log::debug!(target: "wire", "Opened new stream with id {stream} for {rid} and remote {remote}");
-

-
                    let link = *link;
-
                    let task = Task {
-
                        fetch: FetchRequest::Initiator {
-
                            rid,
-
                            remote,
-
                            refs_at,
-
                        },
-
                        stream,
-
                        channels,
-
                    };
-

-
                    if !self.worker.is_empty() {
-
                        log::warn!(
-
                            target: "wire",
-
                            "Worker pool is busy: {} tasks pending, fetch requests may be delayed", self.worker.len()
-
                        );
-
                    }
-
                    if let Err(e) = self.worker.try_send(task) {
-
                        log::error!(
-
                            target: "wire",
-
                            "Worker pool failed to accept outgoing fetch request: {e}"
-
                        );
-
                    }
-
                    let metrics = self.metrics.peer(remote);
-
                    metrics.streams_opened += 1;
-
                    metrics.sent_fetch_requests += 1;
-

-
                    self.actions.push_back(Action::Send(
-
                        fd,
-
                        Frame::<service::Message>::control(link, frame::Control::Open { stream })
-
                            .to_bytes(),
-
                    ));
-
                }
-
            }
-
        }
-
        self.actions.pop_front()
-
    }
-
}
-

-
/// Establish a new outgoing connection.
-
pub fn dial<G: Ecdh<Pk = NodeId>>(
-
    remote_addr: NetAddr<HostName>,
-
    remote_id: <G as EcSk>::Pk,
-
    signer: G,
-
    config: &service::Config,
-
) -> io::Result<WireSession<G>> {
-
    // Determine what address to establish a TCP connection with, given the remote peer
-
    // address and our node configuration.
-
    let inet_addr: NetAddr<InetHost> = match (&remote_addr.host, config.proxy) {
-
        // For IP and DNS addresses, use the global proxy if set, otherwise use the address as-is.
-
        (HostName::Ip(_), Some(proxy)) => proxy.into(),
-
        (HostName::Ip(ip), None) => NetAddr::new(InetHost::Ip(*ip), remote_addr.port),
-
        (HostName::Dns(_), Some(proxy)) => proxy.into(),
-
        (HostName::Dns(dns), None) => NetAddr::new(InetHost::Dns(dns.clone()), remote_addr.port),
-
        // For onion addresses, handle with care.
-
        (HostName::Tor(onion), proxy) => match config.onion {
-
            // In onion proxy mode, simply use the configured proxy address.
-
            // This takes precedence over any global proxy.
-
            Some(AddressConfig::Proxy { address }) => address.into(),
-
            // In "forward" mode, if a global proxy is set, we use that, otherwise
-
            // we treat `.onion` addresses as regular DNS names.
-
            Some(AddressConfig::Forward) => {
-
                if let Some(proxy) = proxy {
-
                    proxy.into()
-
                } else {
-
                    NetAddr::new(InetHost::Dns(onion.to_string()), remote_addr.port)
-
                }
-
            }
-
            // If onion address support isn't configured, refuse to connect.
-
            None => {
-
                return Err(io::Error::new(
-
                    io::ErrorKind::Unsupported,
-
                    "no configuration found for .onion addresses",
-
                ));
-
            }
-
        },
-
        _ => {
-
            return Err(io::Error::new(
-
                io::ErrorKind::Unsupported,
-
                "unsupported remote address type",
-
            ));
-
        }
-
    };
-
    // Nb. This timeout is currently not used by the underlying library due to the
-
    // `socket2` library not supporting non-blocking connect with timeout.
-
    let connection = net::TcpStream::connect_nonblocking(inet_addr, DEFAULT_DIAL_TIMEOUT)?;
-
    // Whether to tunnel regular connections through the proxy.
-
    let force_proxy = config.proxy.is_some();
-

-
    session::<G>(
-
        remote_addr,
-
        Some(remote_id),
-
        connection,
-
        force_proxy,
-
        signer,
-
    )
-
}
-

-
/// Accept a new connection.
-
pub fn accept<G: Ecdh<Pk = NodeId>>(
-
    remote_addr: NetAddr<HostName>,
-
    connection: net::TcpStream,
-
    signer: G,
-
) -> io::Result<WireSession<G>> {
-
    session::<G>(remote_addr, None, connection, false, signer)
-
}
-

-
/// Create a new [`WireSession`].
-
fn session<G: Ecdh<Pk = NodeId>>(
-
    remote_addr: NetAddr<HostName>,
-
    remote_id: Option<NodeId>,
-
    connection: net::TcpStream,
-
    force_proxy: bool,
-
    signer: G,
-
) -> io::Result<WireSession<G>> {
-
    // There are issues with setting TCP_NODELAY on WSL. Not a big deal.
-
    if let Err(e) = connection.set_nodelay(true) {
-
        log::warn!(target: "wire", "Unable to set TCP_NODELAY on fd {}: {e}", connection.as_raw_fd());
-
    }
-
    connection.set_read_timeout(Some(DEFAULT_CONNECTION_TIMEOUT))?;
-
    connection.set_write_timeout(Some(DEFAULT_CONNECTION_TIMEOUT))?;
-

-
    let sock = socket2::Socket::from(connection);
-
    let ka = socket2::TcpKeepalive::new()
-
        .with_time(time::Duration::from_secs(30))
-
        .with_interval(time::Duration::from_secs(10))
-
        .with_retries(3);
-
    if let Err(e) = sock.set_tcp_keepalive(&ka) {
-
        log::warn!(target: "wire", "Unable to set TCP_KEEPALIVE on fd {}: {e}", sock.as_raw_fd());
-
    }
-

-
    let socks5 = socks5::Socks5::with(remote_addr, force_proxy);
-
    let proxy = Socks5Session::with(sock.into(), socks5);
-
    let pair = G::generate_keypair();
-
    let keyset = Keyset {
-
        e: pair.0,
-
        s: Some(signer),
-
        re: None,
-
        rs: remote_id,
-
    };
-
    let noise = NoiseState::initialize::<{ Sha256::OUTPUT_LEN }>(
-
        NOISE_XK,
-
        remote_id.is_some(),
-
        &[],
-
        keyset,
-
    );
-
    Ok(WireSession::with(proxy, noise))
-
}
-

-
#[cfg(test)]
-
mod test {
-
    use super::*;
-
    use crate::service::{Message, ZeroBytes};
-
    use crate::wire;
-
    use crate::wire::varint;
-

-
    #[test]
-
    fn test_pong_message_with_extension() {
-
        use crate::deserializer;
-

-
        let mut stream = Vec::new();
-
        let pong = Message::Pong {
-
            zeroes: ZeroBytes::new(42),
-
        };
-
        frame::PROTOCOL_VERSION_STRING.encode(&mut stream).unwrap();
-
        frame::StreamId::gossip(Link::Outbound)
-
            .encode(&mut stream)
-
            .unwrap();
-

-
        // Serialize gossip message with some extension fields.
-
        let mut gossip = wire::serialize(&pong);
-
        String::from("extra").encode(&mut gossip).unwrap();
-
        48u8.encode(&mut gossip).unwrap();
-

-
        // Encode gossip message using the varint-prefix format into the stream.
-
        varint::payload::encode(&gossip, &mut stream).unwrap();
-

-
        let mut de = deserializer::Deserializer::<1024, Frame>::new(1024);
-
        de.input(&stream).unwrap();
-

-
        // The "pong" message decodes successfully, even though there is trailing data.
-
        assert_eq!(
-
            de.deserialize_next().unwrap().unwrap(),
-
            Frame::gossip(Link::Outbound, pong)
-
        );
-
        assert!(de.deserialize_next().unwrap().is_none());
-
        assert!(de.is_empty());
-
    }
-

-
    #[test]
-
    fn test_inventory_ann_with_extension() {
-
        use crate::deserializer;
-

-
        #[derive(Debug)]
-
        struct MessageWithExt {
-
            msg: Message,
-
            ext: String,
-
        }
-

-
        impl wire::Encode for MessageWithExt {
-
            fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
                let mut n = self.msg.encode(writer)?;
-
                n += self.ext.encode(writer)?;
-

-
                Ok(n)
-
            }
-
        }
-

-
        impl wire::Decode for MessageWithExt {
-
            fn decode<R: io::Read + ?Sized>(reader: &mut R) -> Result<Self, wire::Error> {
-
                let msg = Message::decode(reader)?;
-
                let ext = String::decode(reader).unwrap_or_default();
-

-
                Ok(MessageWithExt { msg, ext })
-
            }
-
        }
-

-
        let rid = radicle::test::arbitrary::gen(1);
-
        let pk = radicle::test::arbitrary::gen(1);
-
        let sig: [u8; 64] = radicle::test::arbitrary::gen(1);
-

-
        // Message with extension.
-
        let mut stream = Vec::new();
-
        let ann = Message::announcement(
-
            pk,
-
            service::gossip::inventory(radicle::node::Timestamp::MAX, [rid]),
-
            radicle::crypto::Signature::from(sig),
-
        );
-
        let pong = Message::Pong {
-
            zeroes: ZeroBytes::new(42),
-
        };
-
        // Framed message with extension.
-
        frame::Frame::gossip(
-
            Link::Outbound,
-
            MessageWithExt {
-
                msg: ann.clone(),
-
                ext: String::from("extra"),
-
            },
-
        )
-
        .encode(&mut stream)
-
        .unwrap();
-
        // Pong message that comes after, without extension.
-
        frame::Frame::gossip(Link::Outbound, pong.clone())
-
            .encode(&mut stream)
-
            .unwrap();
-

-
        // First test deserializing using the message with extension type.
-
        {
-
            let mut de = deserializer::Deserializer::<1024, Frame<MessageWithExt>>::new(1024);
-
            de.input(&stream).unwrap();
-

-
            radicle::assert_matches!(
-
                de.deserialize_next().unwrap().unwrap().data,
-
                FrameData::Gossip(MessageWithExt {
-
                    msg,
-
                    ext,
-
                }) if msg == ann && ext == *"extra"
-
            );
-
            radicle::assert_matches!(
-
                de.deserialize_next().unwrap().unwrap().data,
-
                FrameData::Gossip(MessageWithExt {
-
                    msg,
-
                    ext,
-
                }) if msg == pong && ext.is_empty()
-
            );
-
            assert!(de.deserialize_next().unwrap().is_none());
-
            assert!(de.is_empty());
-
        }
-

-
        // Then test deserializing using the current message type without the extension.
-
        {
-
            let mut de = deserializer::Deserializer::<1024, Frame<Message>>::new(1024);
-
            de.input(&stream).unwrap();
-

-
            radicle::assert_matches!(
-
                de.deserialize_next().unwrap().unwrap().data,
-
                FrameData::Gossip(msg)
-
                if msg == ann
-
            );
-
            radicle::assert_matches!(
-
                de.deserialize_next().unwrap().unwrap().data,
-
                FrameData::Gossip(msg)
-
                if msg == pong
-
            );
-
            assert!(de.deserialize_next().unwrap().is_none());
-
            assert!(de.is_empty());
-
        }
-
    }
-
}
deleted crates/radicle-node/src/wire/varint.rs
@@ -1,254 +0,0 @@
-
//! Variable-length integer implementation based on QUIC.
-
#![warn(clippy::missing_docs_in_private_items)]
-

-
// This implementation is largely based on the `quinn` crate.
-
// Copyright (c) 2018 The quinn developers.
-
use std::{fmt, io, ops};
-

-
use byteorder::ReadBytesExt;
-
use thiserror::Error;
-

-
use crate::wire;
-
use crate::wire::{Decode, Encode};
-

-
/// An integer less than 2^62
-
///
-
/// Based on QUIC variable-length integers (RFC 9000).
-
///
-
/// > The QUIC variable-length integer encoding reserves the two most significant bits of the first
-
/// > byte to encode the base-2 logarithm of the integer encoding length in bytes. The integer value is
-
/// > encoded on the remaining bits, in network byte order. This means that integers are encoded on 1,
-
/// > 2, 4, or 8 bytes and can encode 6-, 14-, 30-, or 62-bit values, respectively. Table 4 summarizes
-
/// > the encoding properties.
-
///
-
/// ```text
-
/// MSB   Length   Usable Bits   Range
-
/// ----------------------------------------------------
-
/// 00    1        6             0 - 63
-
/// 01    2        14            0 - 16383
-
/// 10    4        30            0 - 1073741823
-
/// 11    8        62            0 - 4611686018427387903
-
/// ```
-
#[derive(Default, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
-
pub struct VarInt(pub(crate) u64);
-

-
impl VarInt {
-
    /// The largest representable value.
-
    pub const MAX: VarInt = VarInt((1 << 62) - 1);
-

-
    /// Succeeds iff `x` < 2^62.
-
    pub fn new(x: u64) -> Result<Self, BoundsExceeded> {
-
        if x <= Self::MAX.0 {
-
            Ok(Self(x))
-
        } else {
-
            Err(BoundsExceeded)
-
        }
-
    }
-
}
-

-
impl ops::Deref for VarInt {
-
    type Target = u64;
-

-
    fn deref(&self) -> &Self::Target {
-
        &self.0
-
    }
-
}
-

-
impl From<u8> for VarInt {
-
    fn from(x: u8) -> Self {
-
        VarInt(x.into())
-
    }
-
}
-

-
impl From<u16> for VarInt {
-
    fn from(x: u16) -> Self {
-
        VarInt(x.into())
-
    }
-
}
-

-
impl From<u32> for VarInt {
-
    fn from(x: u32) -> Self {
-
        VarInt(x.into())
-
    }
-
}
-

-
impl std::convert::TryFrom<u64> for VarInt {
-
    type Error = BoundsExceeded;
-
    /// Succeeds iff `x` < 2^62.
-
    fn try_from(x: u64) -> Result<Self, BoundsExceeded> {
-
        VarInt::new(x)
-
    }
-
}
-

-
impl fmt::Debug for VarInt {
-
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-
        self.0.fmt(f)
-
    }
-
}
-

-
impl fmt::Display for VarInt {
-
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-
        self.0.fmt(f)
-
    }
-
}
-

-
/// Error returned when constructing a `VarInt` from a value >= 2^62.
-
#[derive(Debug, Copy, Clone, Eq, PartialEq, Error)]
-
#[error("value too large for varint encoding")]
-
pub struct BoundsExceeded;
-

-
impl Decode for VarInt {
-
    fn decode<R: io::Read + ?Sized>(r: &mut R) -> Result<Self, wire::Error> {
-
        let mut buf = [0; 8];
-
        buf[0] = r.read_u8()?;
-

-
        // Integer length.
-
        let tag = buf[0] >> 6;
-
        buf[0] &= 0b0011_1111;
-

-
        let x = match tag {
-
            0b00 => u64::from(buf[0]),
-
            0b01 => {
-
                r.read_exact(&mut buf[1..2])?;
-
                u64::from(u16::from_be_bytes([buf[0], buf[1]]))
-
            }
-
            0b10 => {
-
                r.read_exact(&mut buf[1..4])?;
-
                u64::from(u32::from_be_bytes([buf[0], buf[1], buf[2], buf[3]]))
-
            }
-
            0b11 => {
-
                r.read_exact(&mut buf[1..8])?;
-
                u64::from_be_bytes(buf)
-
            }
-
            // SAFETY: It should be obvious that we can't have any other bit pattern
-
            // than the above, since all other bits are zeroed.
-
            _ => unreachable! {},
-
        };
-
        Ok(Self(x))
-
    }
-
}
-

-
impl Encode for VarInt {
-
    fn encode<W: io::Write + ?Sized>(&self, w: &mut W) -> io::Result<usize> {
-
        let x: u64 = self.0;
-

-
        if x < 2u64.pow(6) {
-
            (x as u8).encode(w)
-
        } else if x < 2u64.pow(14) {
-
            ((0b01 << 14) | x as u16).encode(w)
-
        } else if x < 2u64.pow(30) {
-
            ((0b10 << 30) | x as u32).encode(w)
-
        } else if x < 2u64.pow(62) {
-
            ((0b11 << 62) | x).encode(w)
-
        } else {
-
            panic!("VarInt::encode: integer overflow");
-
        }
-
    }
-
}
-

-
/// Encoding and decoding varint-prefixed payloads.
-
pub mod payload {
-
    use super::*;
-

-
    /// Encode varint-prefixed data payload.
-
    pub fn encode<W: io::Write + ?Sized>(payload: &[u8], writer: &mut W) -> io::Result<usize> {
-
        let mut n = 0;
-
        let len = payload.len();
-
        let varint =
-
            VarInt::new(len as u64).map_err(|_| io::Error::from(io::ErrorKind::InvalidInput))?;
-

-
        n += varint.encode(writer)?; // The length of the payload length.
-
        n += len; // The length of the data payload itself.
-

-
        writer.write_all(payload)?;
-

-
        Ok(n)
-
    }
-

-
    /// Decode varint-prefixed data payload.
-
    pub fn decode<R: io::Read + ?Sized>(reader: &mut R) -> Result<Vec<u8>, wire::Error> {
-
        let size = VarInt::decode(reader)?;
-
        let mut data = vec![0; *size as usize];
-
        reader.read_exact(&mut data[..])?;
-

-
        Ok(data)
-
    }
-
}
-

-
#[cfg(test)]
-
mod test {
-
    use super::*;
-
    use qcheck_macros::quickcheck;
-

-
    impl qcheck::Arbitrary for VarInt {
-
        fn arbitrary(g: &mut qcheck::Gen) -> Self {
-
            let a = u16::arbitrary(g) as u64;
-
            let b = u32::arbitrary(g) as u64;
-
            let n = g
-
                .choose(&[
-
                    0,
-
                    1,
-
                    3,
-
                    7,
-
                    13,
-
                    37,
-
                    255,
-
                    4931,
-
                    54019,
-
                    69149,
-
                    151288809941952652,
-
                    u8::MAX as u64,
-
                    u16::MAX as u64,
-
                    u16::MAX as u64 - 1,
-
                    u32::MAX as u64,
-
                    u32::MAX as u64 - 1,
-
                    *Self::MAX,
-
                    a,
-
                    b,
-
                ])
-
                .copied()
-
                .unwrap();
-

-
            Self(n)
-
        }
-
    }
-

-
    #[quickcheck]
-
    fn prop_encode_decode(input: VarInt) {
-
        let encoded = wire::serialize(&input);
-
        let decoded: VarInt = wire::deserialize(&encoded).unwrap();
-

-
        assert_eq!(decoded, input);
-
    }
-

-
    #[test]
-
    #[should_panic]
-
    fn test_encode_overflow() {
-
        wire::serialize(&VarInt(u64::MAX));
-
    }
-

-
    #[test]
-
    fn test_encoding() {
-
        assert_eq!(wire::serialize(&VarInt(0)), vec![0x0]);
-
        assert_eq!(wire::serialize(&VarInt(1)), vec![0x01]);
-
        assert_eq!(wire::serialize(&VarInt(10)), vec![0x0a]);
-
        assert_eq!(wire::serialize(&VarInt(37)), vec![0x25]);
-
        assert_eq!(
-
            wire::deserialize::<VarInt>(&[0x40, 0x25]).unwrap(),
-
            VarInt(37)
-
        );
-
        assert_eq!(wire::serialize(&VarInt(15293)), vec![0x7b, 0xbd]);
-
        assert_eq!(
-
            wire::serialize(&VarInt(494878333)),
-
            vec![0x9d, 0x7f, 0x3e, 0x7d],
-
        );
-
        assert_eq!(
-
            wire::serialize(&VarInt(151288809941952652)),
-
            vec![0xc2, 0x19, 0x7c, 0x5e, 0xff, 0x14, 0xe8, 0x8c]
-
        );
-
        assert_eq!(
-
            wire::serialize(&VarInt(10000000000)),
-
            vec![0xc0, 0x00, 0x00, 0x02, 0x54, 0x0b, 0xe4, 0x00],
-
        );
-
    }
-
}
modified crates/radicle-node/src/worker.rs
@@ -5,22 +5,25 @@ mod upload_pack;
pub mod fetch;
pub mod garbage;

-
use std::io;
use std::path::PathBuf;

use crossbeam_channel as chan;

use radicle::identity::RepoId;
-
use radicle::node::{notifications, Event};
+
use radicle::node::notifications;
+
use radicle::node::policy::config as policy;
+
use radicle::node::policy::config::SeedingPolicy;
use radicle::prelude::NodeId;
use radicle::storage::refs::RefsAt;
use radicle::storage::{ReadRepository, ReadStorage};
use radicle::{cob, crypto, Storage};
use radicle_fetch::FetchLimit;

-
use crate::runtime::{thread, Emitter, Handle};
-
use crate::service::policy;
-
use crate::service::policy::SeedingPolicy;
+
pub use radicle_protocol::worker::{
+
    AuthorizationError, FetchError, FetchRequest, FetchResult, UploadError,
+
};
+

+
use crate::runtime::{thread, Handle};
use crate::wire::StreamId;

pub use channels::{ChannelEvent, Channels, ChannelsConfig};
@@ -39,110 +42,6 @@ pub struct Config {
    pub policies_db: PathBuf,
}

-
/// Error returned by fetch.
-
#[derive(thiserror::Error, Debug)]
-
pub enum FetchError {
-
    #[error("the 'git fetch' command failed with exit code '{code}'")]
-
    CommandFailed { code: i32 },
-
    #[error(transparent)]
-
    Io(#[from] io::Error),
-
    #[error(transparent)]
-
    Fetch(#[from] fetch::error::Fetch),
-
    #[error(transparent)]
-
    Handle(#[from] fetch::error::Handle),
-
    #[error(transparent)]
-
    Storage(#[from] radicle::storage::Error),
-
    #[error(transparent)]
-
    PolicyStore(#[from] radicle::node::policy::store::Error),
-
    #[error(transparent)]
-
    Policy(#[from] radicle_fetch::policy::error::Policy),
-
    #[error(transparent)]
-
    Blocked(#[from] radicle_fetch::policy::error::Blocked),
-
}
-

-
impl FetchError {
-
    /// Check if it's a timeout error.
-
    pub fn is_timeout(&self) -> bool {
-
        matches!(self, FetchError::Io(e) if e.kind() == io::ErrorKind::TimedOut)
-
    }
-
}
-

-
/// Error returned by fetch responder.
-
#[derive(thiserror::Error, Debug)]
-
pub enum UploadError {
-
    #[error("error parsing git command packet-line: {0}")]
-
    PacketLine(io::Error),
-
    #[error("error while performing git upload-pack: {0}")]
-
    UploadPack(io::Error),
-
    #[error(transparent)]
-
    Authorization(#[from] AuthorizationError),
-
}
-

-
#[derive(thiserror::Error, Debug)]
-
pub enum AuthorizationError {
-
    #[error("{0} is not authorized to fetch {1}")]
-
    Unauthorized(NodeId, RepoId),
-
    #[error(transparent)]
-
    PolicyStore(#[from] radicle::node::policy::store::Error),
-
    #[error(transparent)]
-
    Repository(#[from] radicle::storage::RepositoryError),
-
}
-

-
impl UploadError {
-
    /// Check if it's an end-of-file error.
-
    pub fn is_eof(&self) -> bool {
-
        matches!(self, UploadError::UploadPack(e) if e.kind() == io::ErrorKind::UnexpectedEof)
-
    }
-
}
-

-
/// Fetch job sent to worker thread.
-
#[derive(Debug, Clone)]
-
pub enum FetchRequest {
-
    /// Client is initiating a fetch for the repository identified by
-
    /// `rid` from the peer identified by `remote`.
-
    Initiator {
-
        /// Repo to fetch.
-
        rid: RepoId,
-
        /// Remote peer we are interacting with.
-
        remote: NodeId,
-
        /// If this fetch is for a particular set of `rad/sigrefs`.
-
        refs_at: Option<Vec<RefsAt>>,
-
    },
-
    /// Server is responding to a fetch request by uploading the
-
    /// specified `refspecs` sent by the client.
-
    Responder {
-
        /// Remote peer we are interacting with.
-
        remote: NodeId,
-
        /// Reporter for upload-pack progress.
-
        emitter: Emitter<Event>,
-
    },
-
}
-

-
impl FetchRequest {
-
    pub fn remote(&self) -> NodeId {
-
        match self {
-
            Self::Initiator { remote, .. } | Self::Responder { remote, .. } => *remote,
-
        }
-
    }
-
}
-

-
/// Fetch result of an upload or fetch.
-
#[derive(Debug)]
-
pub enum FetchResult {
-
    Initiator {
-
        /// Repo fetched.
-
        rid: RepoId,
-
        /// Fetch result, including remotes fetched.
-
        result: Result<fetch::FetchResult, FetchError>,
-
    },
-
    Responder {
-
        /// Repo requested.
-
        rid: Option<RepoId>,
-
        /// Upload result.
-
        result: Result<(), UploadError>,
-
    },
-
}
-

/// Task to be accomplished on a worker thread.
/// This is either going to be an outgoing or incoming fetch.
pub struct Task {
modified crates/radicle-node/src/worker/fetch.rs
@@ -1,6 +1,5 @@
-
pub mod error;
+
pub(crate) use radicle_protocol::worker::fetch::error;

-
use std::collections::HashSet;
use std::str::FromStr;

use localtime::LocalTime;
@@ -8,7 +7,6 @@ use localtime::LocalTime;
use radicle::cob::TypedId;
use radicle::crypto::PublicKey;
use radicle::identity::crefs::GetCanonicalRefs as _;
-
use radicle::identity::DocAt;
use radicle::prelude::NodeId;
use radicle::prelude::RepoId;
use radicle::storage::git::Repository;
@@ -20,32 +18,10 @@ use radicle::storage::{
use radicle::{cob, git, node, Storage};
use radicle_fetch::git::refs::Applied;
use radicle_fetch::{Allowed, BlockList, FetchLimit};
+
pub use radicle_protocol::worker::fetch::FetchResult;

use super::channels::ChannelsFlush;

-
#[derive(Debug, Clone)]
-
pub struct FetchResult {
-
    /// The set of updated references.
-
    pub updated: Vec<RefUpdate>,
-
    /// The set of remote namespaces that were updated.
-
    pub namespaces: HashSet<PublicKey>,
-
    /// The fetch was a full clone.
-
    pub clone: bool,
-
    /// Identity doc of fetched repo.
-
    pub doc: DocAt,
-
}
-

-
impl FetchResult {
-
    pub fn new(doc: DocAt) -> Self {
-
        Self {
-
            updated: vec![],
-
            namespaces: HashSet::new(),
-
            clone: false,
-
            doc,
-
        }
-
    }
-
}
-

pub enum Handle {
    Clone {
        handle: radicle_fetch::Handle<ChannelsFlush>,
deleted crates/radicle-node/src/worker/fetch/error.rs
@@ -1,75 +0,0 @@
-
use std::io;
-

-
use thiserror::Error;
-

-
use radicle::{cob, git, identity, storage};
-
use radicle_fetch as fetch;
-

-
#[derive(Debug, Error)]
-
pub enum Fetch {
-
    #[error(transparent)]
-
    Run(#[from] fetch::Error),
-
    #[error(transparent)]
-
    Git(#[from] git::raw::Error),
-
    #[error(transparent)]
-
    Storage(#[from] storage::Error),
-
    #[error(transparent)]
-
    StorageCopy(#[from] io::Error),
-
    #[error(transparent)]
-
    Repository(#[from] radicle::storage::RepositoryError),
-
    #[error(transparent)]
-
    RefsDb(#[from] radicle::node::refs::Error),
-
    #[error("validation of the storage repository failed: the delegates {delegates:?} failed to validate to meet a threshold of {threshold}")]
-
    Validation {
-
        threshold: usize,
-
        delegates: Vec<String>,
-
    },
-
    #[error(transparent)]
-
    Cache(#[from] Cache),
-
}
-

-
#[derive(Debug, Error)]
-
pub enum Cache {
-
    #[error(transparent)]
-
    Parse(#[from] cob::ParseIdentifierError),
-
    #[error(transparent)]
-
    Repository(#[from] storage::RepositoryError),
-
    #[error("failed to remove {type_name} '{id}' from cache: {err}")]
-
    Remove {
-
        id: cob::ObjectId,
-
        type_name: cob::TypeName,
-
        #[source]
-
        err: Box<dyn std::error::Error + Send + Sync + 'static>,
-
    },
-
    #[error(transparent)]
-
    Store(#[from] cob::store::Error),
-
    #[error("failed to update {type_name} '{id}' in cache: {err}")]
-
    Update {
-
        id: cob::ObjectId,
-
        type_name: cob::TypeName,
-
        #[source]
-
        err: Box<dyn std::error::Error + Send + Sync + 'static>,
-
    },
-
}
-

-
#[derive(Debug, Error)]
-
pub enum Handle {
-
    #[error(transparent)]
-
    Doc(#[from] identity::DocError),
-
    #[error(transparent)]
-
    Io(#[from] io::Error),
-
    #[error(transparent)]
-
    Init(#[from] fetch::handle::error::Init),
-
    #[error(transparent)]
-
    Storage(#[from] storage::Error),
-
    #[error(transparent)]
-
    Repository(#[from] radicle::storage::RepositoryError),
-
}
-

-
#[derive(Debug, Error)]
-
pub enum Canonical {
-
    #[error(transparent)]
-
    Identity(#[from] radicle::storage::RepositoryError),
-
    #[error(transparent)]
-
    CanonicalRefs(#[from] radicle::identity::doc::CanonicalRefsError),
-
}
added crates/radicle-protocol/Cargo.toml
@@ -0,0 +1,37 @@
+
[package]
+
name = "radicle-protocol"
+
description = "The Radicle Protocol"
+
homepage.workspace = true
+
license.workspace = true
+
version = "0.1.0"
+
authors = ["Radicle Team <team@radicle.xyz>"]
+
edition.workspace = true
+
rust-version.workspace = true
+

+
[features]
+
test = ["radicle/test", "radicle-crypto/test", "radicle-crypto/cyphernet", "qcheck"]
+

+
[dependencies]
+
bloomy = "1.2"
+
bytes = { workspace = true }
+
crossbeam-channel = { workspace = true }
+
cyphernet = { workspace = true, features = ["tor"] }
+
fastrand = { workspace = true }
+
log = { workspace = true, features = ["std"] }
+
localtime = { workspace = true }
+
nonempty = { workspace = true, features = ["serialize"] }
+
qcheck = { workspace = true, optional = true }
+
radicle = { workspace = true, features = ["logger"] }
+
radicle-fetch = { workspace = true }
+
radicle-git-ext = { workspace = true, features = ["serde"] }
+
sqlite = { workspace = true, features = ["bundled"] }
+
scrypt = { version = "0.11.0", default-features = false }
+
serde = { workspace = true, features = ["derive"] }
+
serde_json = { workspace = true, features = ["preserve_order"] }
+
thiserror = { workspace = true }
+

+
[dev-dependencies]
+
qcheck = { workspace = true }
+
qcheck-macros = { workspace = true }
+
radicle = { workspace = true, features = ["test"] }
+
radicle-crypto = { workspace = true, features = ["test", "cyphernet"] }
added crates/radicle-protocol/src/bounded.rs
@@ -0,0 +1,298 @@
+
use std::{
+
    collections::BTreeSet,
+
    ops::{self, RangeBounds},
+
};
+

+
#[derive(thiserror::Error, Debug)]
+
pub enum Error {
+
    #[error("invalid size: expected {expected}, got {actual}")]
+
    InvalidSize { expected: usize, actual: usize },
+
}
+

+
/// A vector with an upper limit on its size using type level constants.
+
#[derive(Default, Clone, PartialEq, Eq)]
+
pub struct BoundedVec<T, const N: usize> {
+
    v: Vec<T>,
+
}
+

+
impl<T, const N: usize> BoundedVec<T, N> {
+
    /// Create a new empty `BoundedVec<T,N>`.
+
    pub fn new() -> Self {
+
        BoundedVec {
+
            v: Vec::with_capacity(N),
+
        }
+
    }
+

+
    /// Build a `BoundedVec` by consuming from the given iterator up to its limit.
+
    ///
+
    /// # Examples
+
    ///
+
    /// ```
+
    /// use radicle_protocol::bounded;
+
    ///
+
    /// let mut iter = (0..4).into_iter();
+
    /// let bounded: bounded::BoundedVec<i32,3> = bounded::BoundedVec::collect_from(&mut iter);
+
    ///
+
    /// assert_eq!(bounded.len(), 3);
+
    /// assert_eq!(iter.count(), 1);
+
    /// ```
+
    pub fn collect_from<I: IntoIterator<Item = T>>(iter: I) -> Self {
+
        BoundedVec {
+
            v: iter.into_iter().take(N).collect(),
+
        }
+
    }
+

+
    /// Create a new `BoundedVec<T,N>` which takes upto the first N values of its argument, taking
+
    /// ownership.
+
    ///
+
    /// # Examples
+
    ///
+
    /// ```
+
    /// use radicle_protocol::bounded;
+
    ///
+
    /// let mut vec = vec![1, 2, 3];
+
    /// let bounded = bounded::BoundedVec::<_, 2>::truncate(vec);
+
    /// assert_eq!(bounded.len(), 2);
+
    /// ```
+
    pub fn truncate(mut v: Vec<T>) -> Self {
+
        v.truncate(N);
+
        BoundedVec { v }
+
    }
+

+
    /// Like [`Vec::with_capacity`] but returns an error if the allocation size exceeds the limit.
+
    ///
+
    /// # Examples
+
    ///
+
    /// ```
+
    /// use radicle_protocol::bounded;
+
    ///
+
    /// let vec = bounded::BoundedVec::<i32, 11>::with_capacity(10).unwrap();
+
    ///
+
    /// // The vector contains no items, even though it has capacity for more
+
    /// assert_eq!(vec.len(), 0);
+
    /// assert!(vec.capacity() >= 10);
+
    ///
+
    /// // A vector with a capacity over its limit will result in error.
+
    /// let vec_res = bounded::BoundedVec::<i32, 10>::with_capacity(11);
+
    /// assert!(vec_res.is_err());
+
    /// ```
+
    #[inline]
+
    pub fn with_capacity(capacity: usize) -> Result<Self, Error> {
+
        if capacity > N {
+
            return Err(Error::InvalidSize {
+
                expected: N,
+
                actual: capacity,
+
            });
+
        }
+
        Ok(Self {
+
            v: Vec::with_capacity(capacity),
+
        })
+
    }
+

+
    /// Return the maximum number of elements BoundedVec can contain.
+
    ///
+
    /// # Examples
+
    ///
+
    /// ```
+
    /// use radicle_protocol::bounded;
+
    ///
+
    /// type Inventory = bounded::BoundedVec<(), 10>;
+
    /// assert_eq!(Inventory::max(), 10);
+
    /// ```
+
    #[inline]
+
    pub fn max() -> usize {
+
        N
+
    }
+

+
    /// Extracts a slice containing the entire bounded vector.
+
    #[inline]
+
    pub fn as_slice(&self) -> &[T] {
+
        self.v.as_slice()
+
    }
+

+
    /// Returns the number of elements the bounded vector can hold without reallocating.
+
    pub fn capacity(&self) -> usize {
+
        self.v.capacity()
+
    }
+

+
    /// Like [`Vec::push`] but returns an error if the limit is exceeded.
+
    ///
+
    /// # Examples
+
    ///
+
    /// ```
+
    /// use radicle_protocol::bounded;
+
    ///
+
    /// let mut vec: bounded::BoundedVec<_,3> = vec![1, 2].try_into().unwrap();
+
    /// vec.push(3).expect("within limit");
+
    /// assert_eq!(vec, vec![1, 2, 3].try_into().unwrap());
+
    ///
+
    /// // ...but this will exceed its limit, returning an error.
+
    /// vec.push(4).expect_err("limit exceeded");
+
    /// assert_eq!(vec.len(), 3);
+
    /// ```
+
    #[inline]
+
    pub fn push(&mut self, item: T) -> Result<(), Error> {
+
        if self.len() >= N {
+
            return Err(Error::InvalidSize {
+
                expected: N,
+
                actual: N + 1,
+
            });
+
        }
+
        self.v.push(item);
+
        Ok(())
+
    }
+

+
    /// Return the underlying vector without an upper limit.
+
    ///
+
    /// # Examples
+
    ///
+
    /// ```
+
    /// use radicle_protocol::bounded;
+
    ///
+
    /// let mut bounded: bounded::BoundedVec<_,3> = vec![1, 2, 3].try_into().unwrap();
+
    /// let mut vec = bounded.unbound();
+
    ///
+
    /// vec.push(4);
+
    /// assert_eq!(vec.len(), 4);
+
    /// ```
+
    pub fn unbound(self) -> Vec<T> {
+
        self.v
+
    }
+

+
    /// Calls [`std::vec::Drain`].
+
    pub fn drain<R: RangeBounds<usize>>(&mut self, range: R) -> std::vec::Drain<T> {
+
        self.v.drain(range)
+
    }
+
}
+

+
impl<T: Clone, const N: usize> BoundedVec<T, N> {
+
    /// Like [`Vec::extend_from_slice`] but returns an error if out of bounds.
+
    pub fn extend_from_slice(&mut self, slice: &[T]) -> Result<(), Error> {
+
        if self.len() + slice.len() > N {
+
            return Err(Error::InvalidSize {
+
                expected: N,
+
                actual: self.len() + slice.len(),
+
            });
+
        }
+
        self.v.extend_from_slice(slice);
+

+
        Ok(())
+
    }
+
}
+

+
impl<T, const N: usize> ops::Deref for BoundedVec<T, N> {
+
    type Target = [T];
+

+
    fn deref(&self) -> &Self::Target {
+
        self.v.as_slice()
+
    }
+
}
+

+
impl<T, const N: usize> From<Option<T>> for BoundedVec<T, N> {
+
    fn from(value: Option<T>) -> Self {
+
        let v = match value {
+
            None => vec![],
+
            Some(v) => vec![v],
+
        };
+
        BoundedVec { v }
+
    }
+
}
+

+
impl<T, const N: usize> TryFrom<Vec<T>> for BoundedVec<T, N> {
+
    type Error = Error;
+

+
    fn try_from(value: Vec<T>) -> Result<Self, Self::Error> {
+
        if value.len() > N {
+
            return Err(Error::InvalidSize {
+
                expected: N,
+
                actual: value.len(),
+
            });
+
        }
+
        Ok(BoundedVec { v: value })
+
    }
+
}
+

+
impl<T, const N: usize> TryFrom<BTreeSet<T>> for BoundedVec<T, N> {
+
    type Error = Error;
+

+
    fn try_from(value: BTreeSet<T>) -> Result<Self, Self::Error> {
+
        if value.len() > N {
+
            return Err(Error::InvalidSize {
+
                expected: N,
+
                actual: value.len(),
+
            });
+
        }
+
        Ok(BoundedVec {
+
            v: value.into_iter().collect(),
+
        })
+
    }
+
}
+

+
impl<T, const N: usize> From<BoundedVec<T, N>> for Vec<T> {
+
    fn from(value: BoundedVec<T, N>) -> Self {
+
        value.v
+
    }
+
}
+

+
impl<T: std::fmt::Debug, const N: usize> std::fmt::Debug for BoundedVec<T, N> {
+
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+
        self.v.fmt(f)
+
    }
+
}
+

+
unsafe impl<const N: usize> bytes::BufMut for BoundedVec<u8, N> {
+
    fn remaining_mut(&self) -> usize {
+
        N - self.v.len()
+
    }
+

+
    unsafe fn advance_mut(&mut self, cnt: usize) {
+
        let len = {
+
            let len = self.v.len();
+
            let remaining = N - len;
+

+
            if remaining >= cnt {
+
                len + cnt
+
            } else {
+
                panic!("advance out of bounds: have {remaining} remaining, but advancing by {cnt}",);
+
            }
+
        };
+

+
        debug_assert!(len <= N);
+

+
        // Addition will not overflow since the sum is at most the capacity.
+
        self.v.set_len(len);
+
    }
+

+
    fn chunk_mut(&mut self) -> &mut bytes::buf::UninitSlice {
+
        let len = self.v.len();
+

+
        // If the vector is full, we double its capacity using `reserve`, but not beyond the limit.
+
        if self.v.capacity() == len {
+
            self.v.reserve(std::cmp::min(len, N - len));
+
        }
+

+
        let cap = self.v.capacity();
+

+
        debug_assert!(cap <= N);
+
        debug_assert!(len <= cap);
+

+
        let ptr = self.v.as_mut_ptr();
+

+
        // SAFETY: Since `ptr` is valid for `cap` bytes, `ptr.add(len)` must be
+
        // valid for `cap - len` bytes. The subtraction will not underflow since
+
        // `len <= cap`.
+
        unsafe { bytes::buf::UninitSlice::from_raw_parts_mut(ptr.add(len), cap - len) }
+
    }
+
}
+

+
#[cfg(any(test, feature = "test"))]
+
impl<T, const N: usize> qcheck::Arbitrary for BoundedVec<T, N>
+
where
+
    T: qcheck::Arbitrary + Eq,
+
{
+
    fn arbitrary(g: &mut qcheck::Gen) -> Self {
+
        let mut v: Vec<T> = qcheck::Arbitrary::arbitrary(g);
+
        v.truncate(N);
+
        v.try_into().expect("size within bounds")
+
    }
+
}
added crates/radicle-protocol/src/deserializer.rs
@@ -0,0 +1,177 @@
+
use std::io;
+
use std::marker::PhantomData;
+

+
use crate::bounded;
+
use crate::bounded::BoundedVec;
+
use crate::service::message::Message;
+
use crate::wire;
+

+
/// Message stream deserializer.
+
///
+
/// Used to for example turn a byte stream into network messages.
+
#[derive(Debug)]
+
pub struct Deserializer<const B: usize, D = Message> {
+
    unparsed: BoundedVec<u8, B>,
+
    item: PhantomData<D>,
+
}
+

+
impl<const B: usize, D: wire::Decode> Default for Deserializer<B, D> {
+
    fn default() -> Self {
+
        Self::new(wire::Size::MAX as usize + 1)
+
    }
+
}
+

+
impl<const B: usize, D> TryFrom<Vec<u8>> for Deserializer<B, D> {
+
    type Error = bounded::Error;
+

+
    fn try_from(unparsed: Vec<u8>) -> Result<Self, Self::Error> {
+
        BoundedVec::try_from(unparsed).map(|unparsed| Self {
+
            unparsed,
+
            item: PhantomData,
+
        })
+
    }
+
}
+

+
impl<const B: usize, D: wire::Decode> Deserializer<B, D> {
+
    /// Create a new stream decoder.
+
    pub fn new(capacity: usize) -> Self {
+
        Self {
+
            unparsed: BoundedVec::with_capacity(capacity)
+
                .expect("Deserializer::new: capacity exceeds maximum"),
+
            item: PhantomData,
+
        }
+
    }
+

+
    /// Input bytes into the decoder.
+
    pub fn input(&mut self, bytes: &[u8]) -> Result<(), bounded::Error> {
+
        self.unparsed.extend_from_slice(bytes)
+
    }
+

+
    /// Decode and return the next message. Returns [`None`] if nothing was decoded.
+
    pub fn deserialize_next(&mut self) -> Result<Option<D>, wire::Error> {
+
        let mut reader = io::Cursor::new(self.unparsed.as_slice());
+

+
        match D::decode(&mut reader) {
+
            Ok(msg) => {
+
                let pos = reader.position() as usize;
+
                self.unparsed.drain(..pos);
+

+
                Ok(Some(msg))
+
            }
+
            Err(wire::Error::UnexpectedEnd { .. }) => Ok(None),
+
            Err(err) => Err(err),
+
        }
+
    }
+

+
    /// Drain the unparsed buffer.
+
    pub fn unparsed(&mut self) -> impl ExactSizeIterator<Item = u8> + '_ {
+
        self.unparsed.drain(..)
+
    }
+

+
    /// Return whether there are unparsed bytes.
+
    pub fn is_empty(&self) -> bool {
+
        self.unparsed.is_empty()
+
    }
+

+
    /// Return the size of the unparsed data.
+
    pub fn len(&self) -> usize {
+
        self.unparsed.len()
+
    }
+
}
+

+
unsafe impl<const B: usize, D: wire::Decode> bytes::BufMut for Deserializer<B, D> {
+
    fn remaining_mut(&self) -> usize {
+
        self.unparsed.remaining_mut()
+
    }
+

+
    unsafe fn advance_mut(&mut self, cnt: usize) {
+
        self.unparsed.advance_mut(cnt);
+
    }
+

+
    fn chunk_mut(&mut self) -> &mut bytes::buf::UninitSlice {
+
        self.unparsed.chunk_mut()
+
    }
+
}
+

+
impl<const B: usize, D: wire::Decode> io::Write for Deserializer<B, D> {
+
    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+
        self.input(buf).map_err(|_| io::ErrorKind::OutOfMemory)?;
+

+
        Ok(buf.len())
+
    }
+

+
    fn flush(&mut self) -> io::Result<()> {
+
        Ok(())
+
    }
+
}
+

+
impl<const B: usize, D: wire::Decode> Iterator for Deserializer<B, D> {
+
    type Item = Result<D, wire::Error>;
+

+
    fn next(&mut self) -> Option<Self::Item> {
+
        self.deserialize_next().transpose()
+
    }
+
}
+

+
#[cfg(test)]
+
mod test {
+
    use super::*;
+
    use qcheck_macros::quickcheck;
+

+
    use radicle::assert_matches;
+

+
    const MSG_HELLO: &[u8] = &[5, b'h', b'e', b'l', b'l', b'o'];
+
    const MSG_BYE: &[u8] = &[3, b'b', b'y', b'e'];
+

+
    #[test]
+
    fn test_decode_next() {
+
        let mut decoder = Deserializer::<1024, String>::new(8);
+

+
        decoder.input(&[3, b'b']).unwrap();
+
        assert_matches!(decoder.deserialize_next(), Ok(None));
+
        assert_eq!(decoder.unparsed.len(), 2);
+

+
        decoder.input(b"y").unwrap();
+
        assert_matches!(decoder.deserialize_next(), Ok(None));
+
        assert_eq!(decoder.unparsed.len(), 3);
+

+
        decoder.input(b"e").unwrap();
+
        assert_matches!(decoder.deserialize_next(), Ok(Some(s)) if s.as_str() == "bye");
+
        assert_eq!(decoder.unparsed.len(), 0);
+
        assert!(decoder.is_empty());
+
    }
+

+
    #[test]
+
    fn test_unparsed() {
+
        let mut decoder = Deserializer::<1024, String>::new(8);
+

+
        decoder.input(&[3, b'b', b'y']).unwrap();
+
        assert_eq!(decoder.unparsed().collect::<Vec<_>>(), vec![3, b'b', b'y']);
+
        assert!(decoder.is_empty());
+
    }
+

+
    #[quickcheck]
+
    fn prop_decode_next(chunk_size: usize) {
+
        let mut bytes = vec![];
+
        let mut msgs = vec![];
+
        let mut decoder = Deserializer::<1024, String>::new(8);
+

+
        let chunk_size = 1 + chunk_size % MSG_HELLO.len() + MSG_BYE.len();
+

+
        bytes.extend_from_slice(MSG_HELLO);
+
        bytes.extend_from_slice(MSG_BYE);
+

+
        for chunk in bytes.as_slice().chunks(chunk_size) {
+
            decoder.input(chunk).unwrap();
+

+
            while let Some(msg) = decoder.deserialize_next().unwrap() {
+
                msgs.push(msg);
+
            }
+
        }
+

+
        assert_eq!(decoder.unparsed.len(), 0);
+
        assert_eq!(msgs.len(), 2);
+
        assert_eq!(msgs[0], String::from("hello"));
+
        assert_eq!(msgs[1], String::from("bye"));
+
    }
+
}
added crates/radicle-protocol/src/lib.rs
@@ -0,0 +1,8 @@
+
pub mod bounded;
+
pub mod deserializer;
+
pub mod service;
+
pub mod wire;
+
pub mod worker;
+

+
/// Peer-to-peer protocol version.
+
pub const PROTOCOL_VERSION: u8 = 1;
added crates/radicle-protocol/src/service.rs
@@ -0,0 +1,2862 @@
+
#![allow(clippy::too_many_arguments)]
+
#![allow(clippy::collapsible_match)]
+
#![allow(clippy::collapsible_if)]
+
#![warn(clippy::unwrap_used)]
+
pub mod filter;
+
pub mod gossip;
+
pub mod io;
+
pub mod limiter;
+
pub mod message;
+
pub mod session;
+

+
use std::collections::hash_map::Entry;
+
use std::collections::{BTreeSet, HashMap, HashSet};
+
use std::net::IpAddr;
+
use std::ops::{Deref, DerefMut};
+
use std::sync::Arc;
+
use std::{fmt, net, time};
+

+
use crossbeam_channel as chan;
+
use fastrand::Rng;
+
use localtime::{LocalDuration, LocalTime};
+
use log::*;
+
use nonempty::NonEmpty;
+

+
use radicle::identity::Doc;
+
use radicle::node;
+
use radicle::node::address;
+
use radicle::node::address::Store as _;
+
use radicle::node::address::{AddressBook, AddressType, KnownAddress};
+
use radicle::node::config::PeerConfig;
+
use radicle::node::device::Device;
+
use radicle::node::refs::Store as _;
+
use radicle::node::routing::Store as _;
+
use radicle::node::seed;
+
use radicle::node::seed::Store as _;
+
use radicle::node::{ConnectOptions, Penalty, Severity};
+
use radicle::storage::refs::SIGREFS_BRANCH;
+
use radicle::storage::RepositoryError;
+
use radicle_fetch::policy::SeedingPolicy;
+

+
use crate::service::gossip::Store as _;
+
use crate::service::message::{
+
    Announcement, AnnouncementMessage, Info, NodeAnnouncement, Ping, RefsAnnouncement, RefsStatus,
+
};
+
use crate::service::policy::{store::Write, Scope};
+
use radicle::identity::RepoId;
+
use radicle::node::events::Emitter;
+
use radicle::node::routing;
+
use radicle::node::routing::InsertResult;
+
use radicle::node::{
+
    Address, Alias, Features, FetchResult, HostName, Seed, Seeds, SyncStatus, SyncedAt,
+
};
+
use radicle::prelude::*;
+
use radicle::storage;
+
use radicle::storage::{refs::RefsAt, Namespaces, ReadStorage};
+
// use radicle::worker::fetch;
+
// use crate::worker::FetchError;
+
use radicle::crypto;
+
use radicle::node::Link;
+
use radicle::node::PROTOCOL_VERSION;
+

+
use crate::bounded::BoundedVec;
+
use crate::service::filter::Filter;
+
pub use crate::service::message::{Message, ZeroBytes};
+
pub use crate::service::session::{QueuedFetch, Session};
+
use crate::worker::FetchError;
+
use radicle::node::events::{Event, Events};
+
use radicle::node::{Config, NodeId};
+

+
use radicle::node::policy::config as policy;
+

+
use self::io::Outbox;
+
use self::limiter::RateLimiter;
+
use self::message::InventoryAnnouncement;
+
use self::policy::NamespacesError;
+

+
/// How often to run the "idle" task.
+
pub const IDLE_INTERVAL: LocalDuration = LocalDuration::from_secs(30);
+
/// How often to run the "gossip" task.
+
pub const GOSSIP_INTERVAL: LocalDuration = LocalDuration::from_secs(6);
+
/// How often to run the "announce" task.
+
pub const ANNOUNCE_INTERVAL: LocalDuration = LocalDuration::from_mins(60);
+
/// How often to run the "sync" task.
+
pub const SYNC_INTERVAL: LocalDuration = LocalDuration::from_secs(60);
+
/// How often to run the "prune" task.
+
pub const PRUNE_INTERVAL: LocalDuration = LocalDuration::from_mins(30);
+
/// Duration to wait on an unresponsive peer before dropping its connection.
+
pub const STALE_CONNECTION_TIMEOUT: LocalDuration = LocalDuration::from_mins(2);
+
/// How much time should pass after a peer was last active for a *ping* to be sent.
+
pub const KEEP_ALIVE_DELTA: LocalDuration = LocalDuration::from_mins(1);
+
/// Maximum number of latency values to keep for a session.
+
pub const MAX_LATENCIES: usize = 16;
+
/// Maximum time difference between the local time, and an announcement timestamp.
+
pub const MAX_TIME_DELTA: LocalDuration = LocalDuration::from_mins(60);
+
/// Maximum attempts to connect to a peer before we give up.
+
pub const MAX_CONNECTION_ATTEMPTS: usize = 3;
+
/// How far back from the present time should we request gossip messages when connecting to a peer,
+
/// when we come online for the first time.
+
pub const INITIAL_SUBSCRIBE_BACKLOG_DELTA: LocalDuration = LocalDuration::from_mins(60 * 24);
+
/// When subscribing, what margin of error do we give ourselves. A igher delta means we ask for
+
/// messages further back than strictly necessary, to account for missed messages.
+
pub const SUBSCRIBE_BACKLOG_DELTA: LocalDuration = LocalDuration::from_mins(3);
+
/// Minimum amount of time to wait before reconnecting to a peer.
+
pub const MIN_RECONNECTION_DELTA: LocalDuration = LocalDuration::from_secs(3);
+
/// Maximum amount of time to wait before reconnecting to a peer.
+
pub const MAX_RECONNECTION_DELTA: LocalDuration = LocalDuration::from_mins(60);
+
/// Connection retry delta used for ephemeral peers that failed to connect previously.
+
pub const CONNECTION_RETRY_DELTA: LocalDuration = LocalDuration::from_mins(10);
+
/// How long to wait for a fetch to stall before aborting, default is 3s.
+
pub const FETCH_TIMEOUT: time::Duration = time::Duration::from_secs(3);
+
/// Target number of peers to maintain connections to.
+
pub const TARGET_OUTBOUND_PEERS: usize = 8;
+

+
/// Maximum external address limit imposed by message size limits.
+
pub use message::ADDRESS_LIMIT;
+
/// Maximum inventory limit imposed by message size limits.
+
pub use message::INVENTORY_LIMIT;
+
/// Maximum number of project git references imposed by message size limits.
+
pub use message::REF_REMOTE_LIMIT;
+

+
/// Metrics we track.
+
#[derive(Clone, Debug, Default, serde::Serialize)]
+
#[serde(rename_all = "camelCase")]
+
pub struct Metrics {
+
    /// Metrics for each peer.
+
    pub peers: HashMap<NodeId, PeerMetrics>,
+
    /// Tasks queued in worker queue.
+
    pub worker_queue_size: usize,
+
    /// Current open channel count.
+
    pub open_channels: usize,
+
}
+

+
impl Metrics {
+
    /// Get metrics for the given peer.
+
    pub fn peer(&mut self, nid: NodeId) -> &mut PeerMetrics {
+
        self.peers.entry(nid).or_default()
+
    }
+
}
+

+
/// Per-peer metrics we track.
+
#[derive(Clone, Debug, Default, serde::Serialize)]
+
#[serde(rename_all = "camelCase")]
+
pub struct PeerMetrics {
+
    pub received_git_bytes: usize,
+
    pub received_fetch_requests: usize,
+
    pub received_bytes: usize,
+
    pub received_gossip_messages: usize,
+
    pub sent_bytes: usize,
+
    pub sent_fetch_requests: usize,
+
    pub sent_git_bytes: usize,
+
    pub sent_gossip_messages: usize,
+
    pub streams_opened: usize,
+
    pub inbound_connection_attempts: usize,
+
    pub outbound_connection_attempts: usize,
+
    pub disconnects: usize,
+
}
+

+
/// Result of syncing our routing table with a node's inventory.
+
#[derive(Default)]
+
struct SyncedRouting {
+
    /// Repo entries added.
+
    added: Vec<RepoId>,
+
    /// Repo entries removed.
+
    removed: Vec<RepoId>,
+
    /// Repo entries updated (time).
+
    updated: Vec<RepoId>,
+
}
+

+
impl SyncedRouting {
+
    fn is_empty(&self) -> bool {
+
        self.added.is_empty() && self.removed.is_empty() && self.updated.is_empty()
+
    }
+
}
+

+
/// A peer we can connect to.
+
#[derive(Debug, Clone)]
+
struct Peer {
+
    nid: NodeId,
+
    addresses: Vec<KnownAddress>,
+
    penalty: Penalty,
+
}
+

+
/// General service error.
+
#[derive(thiserror::Error, Debug)]
+
pub enum Error {
+
    #[error(transparent)]
+
    Git(#[from] radicle::git::raw::Error),
+
    #[error(transparent)]
+
    GitExt(#[from] radicle::git::ext::Error),
+
    #[error(transparent)]
+
    Storage(#[from] storage::Error),
+
    #[error(transparent)]
+
    Gossip(#[from] gossip::Error),
+
    #[error(transparent)]
+
    Refs(#[from] storage::refs::Error),
+
    #[error(transparent)]
+
    Routing(#[from] routing::Error),
+
    #[error(transparent)]
+
    Address(#[from] address::Error),
+
    #[error(transparent)]
+
    Database(#[from] node::db::Error),
+
    #[error(transparent)]
+
    Seeds(#[from] seed::Error),
+
    #[error(transparent)]
+
    Policy(#[from] policy::Error),
+
    #[error(transparent)]
+
    Repository(#[from] radicle::storage::RepositoryError),
+
    #[error("namespaces error: {0}")]
+
    Namespaces(#[from] NamespacesError),
+
}
+

+
#[derive(thiserror::Error, Debug)]
+
pub enum ConnectError {
+
    #[error("attempted connection to peer {nid} which already has a session")]
+
    SessionExists { nid: NodeId },
+
    #[error("attempted connection to self")]
+
    SelfConnection,
+
    #[error("outbound connection limit reached when attempting {nid} ({addr})")]
+
    LimitReached { nid: NodeId, addr: Address },
+
}
+

+
/// A store for all node data.
+
pub trait Store:
+
    address::Store + gossip::Store + routing::Store + seed::Store + node::refs::Store
+
{
+
}
+

+
impl Store for radicle::node::Database {}
+

+
/// Function used to query internal service state.
+
pub type QueryState = dyn Fn(&dyn ServiceState) -> Result<(), CommandError> + Send + Sync;
+

+
/// Commands sent to the service by the operator.
+
pub enum Command {
+
    /// Announce repository references for given repository to peers.
+
    AnnounceRefs(RepoId, chan::Sender<RefsAt>),
+
    /// Announce local repositories to peers.
+
    AnnounceInventory,
+
    /// Add repository to local inventory.
+
    AddInventory(RepoId, chan::Sender<bool>),
+
    /// Connect to node with the given address.
+
    Connect(NodeId, Address, ConnectOptions),
+
    /// Disconnect from node.
+
    Disconnect(NodeId),
+
    /// Get the node configuration.
+
    Config(chan::Sender<Config>),
+
    /// Get the node's listen addresses.
+
    ListenAddrs(chan::Sender<Vec<std::net::SocketAddr>>),
+
    /// Lookup seeds for the given repository in the routing table.
+
    Seeds(RepoId, chan::Sender<Seeds>),
+
    /// Fetch the given repository from the network.
+
    Fetch(RepoId, NodeId, time::Duration, chan::Sender<FetchResult>),
+
    /// Seed the given repository.
+
    Seed(RepoId, Scope, chan::Sender<bool>),
+
    /// Unseed the given repository.
+
    Unseed(RepoId, chan::Sender<bool>),
+
    /// Follow the given node.
+
    Follow(NodeId, Option<Alias>, chan::Sender<bool>),
+
    /// Unfollow the given node.
+
    Unfollow(NodeId, chan::Sender<bool>),
+
    /// Query the internal service state.
+
    QueryState(Arc<QueryState>, chan::Sender<Result<(), CommandError>>),
+
}
+

+
impl fmt::Debug for Command {
+
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+
        match self {
+
            Self::AnnounceRefs(id, _) => write!(f, "AnnounceRefs({id})"),
+
            Self::AnnounceInventory => write!(f, "AnnounceInventory"),
+
            Self::AddInventory(rid, _) => write!(f, "AddInventory({rid})"),
+
            Self::Connect(id, addr, opts) => write!(f, "Connect({id}, {addr}, {opts:?})"),
+
            Self::Disconnect(id) => write!(f, "Disconnect({id})"),
+
            Self::Config(_) => write!(f, "Config"),
+
            Self::ListenAddrs(_) => write!(f, "ListenAddrs"),
+
            Self::Seeds(id, _) => write!(f, "Seeds({id})"),
+
            Self::Fetch(id, node, _, _) => write!(f, "Fetch({id}, {node})"),
+
            Self::Seed(id, scope, _) => write!(f, "Seed({id}, {scope})"),
+
            Self::Unseed(id, _) => write!(f, "Unseed({id})"),
+
            Self::Follow(id, _, _) => write!(f, "Follow({id})"),
+
            Self::Unfollow(id, _) => write!(f, "Unfollow({id})"),
+
            Self::QueryState { .. } => write!(f, "QueryState(..)"),
+
        }
+
    }
+
}
+

+
/// Command-related errors.
+
#[derive(thiserror::Error, Debug)]
+
pub enum CommandError {
+
    #[error(transparent)]
+
    Storage(#[from] storage::Error),
+
    #[error(transparent)]
+
    Routing(#[from] routing::Error),
+
    #[error(transparent)]
+
    Policy(#[from] policy::Error),
+
}
+

+
/// Error returned by [`Service::try_fetch`].
+
#[derive(thiserror::Error, Debug)]
+
enum TryFetchError<'a> {
+
    #[error("ongoing fetch for repository exists")]
+
    AlreadyFetching(&'a mut FetchState),
+
    #[error("peer is not connected; cannot initiate fetch")]
+
    SessionNotConnected,
+
    #[error("peer fetch capacity reached; cannot initiate fetch")]
+
    SessionCapacityReached,
+
    #[error(transparent)]
+
    Namespaces(#[from] NamespacesError),
+
}
+

+
/// Fetch state for an ongoing fetch.
+
#[derive(Debug)]
+
pub struct FetchState {
+
    /// Node we're fetching from.
+
    pub from: NodeId,
+
    /// What refs we're fetching.
+
    pub refs_at: Vec<RefsAt>,
+
    /// Channels waiting for fetch results.
+
    pub subscribers: Vec<chan::Sender<FetchResult>>,
+
}
+

+
impl FetchState {
+
    /// Add a subscriber to this fetch.
+
    fn subscribe(&mut self, c: chan::Sender<FetchResult>) {
+
        if !self.subscribers.iter().any(|s| s.same_channel(&c)) {
+
            self.subscribers.push(c);
+
        }
+
    }
+
}
+

+
/// Holds all node stores.
+
#[derive(Debug)]
+
pub struct Stores<D>(D);
+

+
impl<D> Stores<D>
+
where
+
    D: Store,
+
{
+
    /// Get the database as a routing store.
+
    pub fn routing(&self) -> &impl routing::Store {
+
        &self.0
+
    }
+

+
    /// Get the database as a routing store, mutably.
+
    pub fn routing_mut(&mut self) -> &mut impl routing::Store {
+
        &mut self.0
+
    }
+

+
    /// Get the database as an address store.
+
    pub fn addresses(&self) -> &impl address::Store {
+
        &self.0
+
    }
+

+
    /// Get the database as an address store, mutably.
+
    pub fn addresses_mut(&mut self) -> &mut impl address::Store {
+
        &mut self.0
+
    }
+

+
    /// Get the database as a gossip store.
+
    pub fn gossip(&self) -> &impl gossip::Store {
+
        &self.0
+
    }
+

+
    /// Get the database as a gossip store, mutably.
+
    pub fn gossip_mut(&mut self) -> &mut impl gossip::Store {
+
        &mut self.0
+
    }
+

+
    /// Get the database as a seed store.
+
    pub fn seeds(&self) -> &impl seed::Store {
+
        &self.0
+
    }
+

+
    /// Get the database as a seed store, mutably.
+
    pub fn seeds_mut(&mut self) -> &mut impl seed::Store {
+
        &mut self.0
+
    }
+

+
    /// Get the database as a refs db.
+
    pub fn refs(&self) -> &impl node::refs::Store {
+
        &self.0
+
    }
+

+
    /// Get the database as a refs db, mutably.
+
    pub fn refs_mut(&mut self) -> &mut impl node::refs::Store {
+
        &mut self.0
+
    }
+
}
+

+
impl<D> AsMut<D> for Stores<D> {
+
    fn as_mut(&mut self) -> &mut D {
+
        &mut self.0
+
    }
+
}
+

+
impl<D> From<D> for Stores<D> {
+
    fn from(db: D) -> Self {
+
        Self(db)
+
    }
+
}
+

+
/// The node service.
+
#[derive(Debug)]
+
pub struct Service<D, S, G> {
+
    /// Service configuration.
+
    config: Config,
+
    /// Our cryptographic signer and key.
+
    signer: Device<G>,
+
    /// Project storage.
+
    storage: S,
+
    /// Node database.
+
    db: Stores<D>,
+
    /// Policy configuration.
+
    policies: policy::Config<Write>,
+
    /// Peer sessions, currently or recently connected.
+
    sessions: Sessions,
+
    /// Clock. Tells the time.
+
    clock: LocalTime,
+
    /// Who relayed what announcement to us. We keep track of this to ensure that
+
    /// we don't relay messages to nodes that already know about these messages.
+
    relayed_by: HashMap<gossip::AnnouncementId, Vec<NodeId>>,
+
    /// I/O outbox.
+
    outbox: Outbox,
+
    /// Cached local node announcement.
+
    node: NodeAnnouncement,
+
    /// Cached local inventory announcement.
+
    inventory: InventoryAnnouncement,
+
    /// Source of entropy.
+
    rng: Rng,
+
    /// Ongoing fetches.
+
    fetching: HashMap<RepoId, FetchState>,
+
    /// Request/connection rate limiter.
+
    limiter: RateLimiter,
+
    /// Current seeded repositories bloom filter.
+
    filter: Filter,
+
    /// Last time the service was idle.
+
    last_idle: LocalTime,
+
    /// Last time the gossip messages were relayed.
+
    last_gossip: LocalTime,
+
    /// Last time the service synced.
+
    last_sync: LocalTime,
+
    /// Last time the service routing table was pruned.
+
    last_prune: LocalTime,
+
    /// Last time the announcement task was run.
+
    last_announce: LocalTime,
+
    /// Timestamp of last local inventory announced.
+
    last_inventory: LocalTime,
+
    /// Last timestamp used for announcements.
+
    last_timestamp: Timestamp,
+
    /// Time when the service was initialized, or `None` if it wasn't initialized.
+
    started_at: Option<LocalTime>,
+
    /// Time when the service was last online, or `None` if this is the first time.
+
    last_online_at: Option<LocalTime>,
+
    /// Publishes events to subscribers.
+
    emitter: Emitter<Event>,
+
    /// Local listening addresses.
+
    listening: Vec<net::SocketAddr>,
+
    /// Latest metrics for all nodes connected to since the last start.
+
    metrics: Metrics,
+
}
+

+
impl<D, S, G> Service<D, S, G> {
+
    /// Get the local node id.
+
    pub fn node_id(&self) -> NodeId {
+
        *self.signer.public_key()
+
    }
+

+
    /// Get the local service time.
+
    pub fn local_time(&self) -> LocalTime {
+
        self.clock
+
    }
+

+
    pub fn emitter(&self) -> Emitter<Event> {
+
        self.emitter.clone()
+
    }
+
}
+

+
impl<D, S, G> Service<D, S, G>
+
where
+
    D: Store,
+
    S: ReadStorage + 'static,
+
    G: crypto::signature::Signer<crypto::Signature>,
+
{
+
    pub fn new(
+
        config: Config,
+
        db: Stores<D>,
+
        storage: S,
+
        policies: policy::Config<Write>,
+
        signer: Device<G>,
+
        rng: Rng,
+
        node: NodeAnnouncement,
+
        emitter: Emitter<Event>,
+
    ) -> Self {
+
        let sessions = Sessions::new(rng.clone());
+
        let limiter = RateLimiter::new(config.peers());
+
        let last_timestamp = node.timestamp;
+
        let clock = LocalTime::default(); // Updated on initialize.
+
        let inventory = gossip::inventory(clock.into(), []); // Updated on initialize.
+

+
        Self {
+
            config,
+
            storage,
+
            policies,
+
            signer,
+
            rng,
+
            inventory,
+
            node,
+
            clock,
+
            db,
+
            outbox: Outbox::default(),
+
            limiter,
+
            sessions,
+
            fetching: HashMap::new(),
+
            filter: Filter::empty(),
+
            relayed_by: HashMap::default(),
+
            last_idle: LocalTime::default(),
+
            last_gossip: LocalTime::default(),
+
            last_sync: LocalTime::default(),
+
            last_prune: LocalTime::default(),
+
            last_timestamp,
+
            last_announce: LocalTime::default(),
+
            last_inventory: LocalTime::default(),
+
            started_at: None,     // Updated on initialize.
+
            last_online_at: None, // Updated on initialize.
+
            emitter,
+
            listening: vec![],
+
            metrics: Metrics::default(),
+
        }
+
    }
+

+
    /// Whether the service was started (initialized) and if so, at what time.
+
    pub fn started(&self) -> Option<LocalTime> {
+
        self.started_at
+
    }
+

+
    /// Return the next i/o action to execute.
+
    #[allow(clippy::should_implement_trait)]
+
    pub fn next(&mut self) -> Option<io::Io> {
+
        self.outbox.next()
+
    }
+

+
    /// Seed a repository.
+
    /// Returns whether or not the repo policy was updated.
+
    pub fn seed(&mut self, id: &RepoId, scope: Scope) -> Result<bool, policy::Error> {
+
        let updated = self.policies.seed(id, scope)?;
+
        self.filter.insert(id);
+

+
        Ok(updated)
+
    }
+

+
    /// Unseed a repository.
+
    /// Returns whether or not the repo policy was updated.
+
    /// Note that when unseeding, we don't announce anything to the network. This is because by
+
    /// simply not announcing it anymore, it will eventually be pruned by nodes.
+
    pub fn unseed(&mut self, id: &RepoId) -> Result<bool, policy::Error> {
+
        let updated = self.policies.unseed(id)?;
+

+
        if updated {
+
            // Nb. This is potentially slow if we have lots of repos. We should probably
+
            // only re-compute the filter when we've unseeded a certain amount of repos
+
            // and the filter is really out of date.
+
            //
+
            // TODO: Share this code with initialization code.
+
            self.filter = Filter::new(
+
                self.policies
+
                    .seed_policies()?
+
                    .filter_map(|t| (t.policy.is_allow()).then_some(t.rid)),
+
            );
+
            // Update and announce new inventory.
+
            if let Err(e) = self.remove_inventory(id) {
+
                error!(target: "service", "Error updating inventory after unseed: {e}");
+
            }
+
        }
+
        Ok(updated)
+
    }
+

+
    /// Find the closest `n` peers by proximity in seeding graphs.
+
    /// Returns a sorted list from the closest peer to the furthest.
+
    /// Peers with more seedings in common score score higher.
+
    #[allow(unused)]
+
    pub fn closest_peers(&self, n: usize) -> Vec<NodeId> {
+
        todo!()
+
    }
+

+
    /// Get the database.
+
    pub fn database(&self) -> &Stores<D> {
+
        &self.db
+
    }
+

+
    /// Get the mutable database.
+
    pub fn database_mut(&mut self) -> &mut Stores<D> {
+
        &mut self.db
+
    }
+

+
    /// Get the storage instance.
+
    pub fn storage(&self) -> &S {
+
        &self.storage
+
    }
+

+
    /// Get the mutable storage instance.
+
    pub fn storage_mut(&mut self) -> &mut S {
+
        &mut self.storage
+
    }
+

+
    /// Get the node policies.
+
    pub fn policies(&self) -> &policy::Config<Write> {
+
        &self.policies
+
    }
+

+
    /// Get the local signer.
+
    pub fn signer(&self) -> &Device<G> {
+
        &self.signer
+
    }
+

+
    /// Subscriber to inner `Emitter` events.
+
    pub fn events(&mut self) -> Events {
+
        Events::from(self.emitter.subscribe())
+
    }
+

+
    /// Get I/O outbox.
+
    pub fn outbox(&mut self) -> &mut Outbox {
+
        &mut self.outbox
+
    }
+

+
    /// Get configuration.
+
    pub fn config(&self) -> &Config {
+
        &self.config
+
    }
+

+
    /// Lookup a repository, both locally and in the routing table.
+
    pub fn lookup(&self, rid: RepoId) -> Result<Lookup, LookupError> {
+
        let this = self.nid();
+
        let local = self.storage.get(rid)?;
+
        let remote = self
+
            .db
+
            .routing()
+
            .get(&rid)?
+
            .iter()
+
            .filter(|nid| nid != &this)
+
            .cloned()
+
            .collect();
+

+
        Ok(Lookup { local, remote })
+
    }
+

+
    /// Initialize service with current time. Call this once.
+
    pub fn initialize(&mut self, time: LocalTime) -> Result<(), Error> {
+
        debug!(target: "service", "Init @{}", time.as_millis());
+
        assert_ne!(time, LocalTime::default());
+

+
        let nid = self.node_id();
+

+
        self.clock = time;
+
        self.started_at = Some(time);
+
        self.last_online_at = match self.db.gossip().last() {
+
            Ok(Some(last)) => Some(last.to_local_time()),
+
            Ok(None) => None,
+
            Err(e) => {
+
                error!(target: "service", "Error getting the lastest gossip message from db: {e}");
+
                None
+
            }
+
        };
+

+
        // Populate refs database. This is only useful as part of the upgrade process for nodes
+
        // that have been online since before the refs database was created.
+
        match self.db.refs().count() {
+
            Ok(0) => {
+
                info!(target: "service", "Empty refs database, populating from storage..");
+
                if let Err(e) = self.db.refs_mut().populate(&self.storage) {
+
                    error!(target: "service", "Failed to populate refs database: {e}");
+
                }
+
            }
+
            Ok(n) => debug!(target: "service", "Refs database has {n} cached references"),
+
            Err(e) => error!(target: "service", "Error checking refs database: {e}"),
+
        }
+

+
        let announced = self
+
            .db
+
            .seeds()
+
            .seeded_by(&nid)?
+
            .collect::<Result<HashMap<_, _>, _>>()?;
+
        let mut inventory = BTreeSet::new();
+
        let mut private = BTreeSet::new();
+

+
        for repo in self.storage.repositories()? {
+
            let rid = repo.rid;
+

+
            // If we're not seeding this repo, just skip it.
+
            if !self.policies.is_seeding(&rid)? {
+
                warn!(target: "service", "Local repository {rid} is not seeded");
+
                continue;
+
            }
+
            // Add public repositories to inventory.
+
            if repo.doc.is_public() {
+
                inventory.insert(rid);
+
            } else {
+
                private.insert(rid);
+
            }
+
            // If we have no owned refs for this repo, then there's nothing to announce.
+
            let Some(updated_at) = repo.synced_at else {
+
                continue;
+
            };
+
            // Skip this repo if the sync status matches what we have in storage.
+
            if let Some(announced) = announced.get(&rid) {
+
                if updated_at.oid == announced.oid {
+
                    continue;
+
                }
+
            }
+
            // Make sure our local node's sync status is up to date with storage.
+
            if self.db.seeds_mut().synced(
+
                &rid,
+
                &nid,
+
                updated_at.oid,
+
                updated_at.timestamp.into(),
+
            )? {
+
                debug!(target: "service", "Saved local sync status for {rid}..");
+
            }
+
            // If we got here, it likely means a repo was updated while the node was stopped.
+
            // Therefore, we pre-load a refs announcement for this repo, so that it is included in
+
            // the historical gossip messages when a node connects and subscribes to this repo.
+
            if let Ok((ann, _)) = self.refs_announcement_for(rid, [nid]) {
+
                debug!(target: "service", "Adding refs announcement for {rid} to historical gossip messages..");
+
                self.db.gossip_mut().announced(&nid, &ann)?;
+
            }
+
        }
+

+
        // Ensure that our inventory is recorded in our routing table, and we are seeding
+
        // all of it. It can happen that inventory is not properly seeded if for eg. the
+
        // user creates a new repository while the node is stopped.
+
        self.db
+
            .routing_mut()
+
            .add_inventory(inventory.iter(), nid, time.into())?;
+
        self.inventory = gossip::inventory(self.timestamp(), inventory);
+

+
        // Ensure that private repositories are not in our inventory. It's possible that
+
        // a repository was public and then it was made private.
+
        self.db
+
            .routing_mut()
+
            .remove_inventories(private.iter(), &nid)?;
+

+
        // Setup subscription filter for seeded repos.
+
        self.filter = Filter::new(
+
            self.policies
+
                .seed_policies()?
+
                .filter_map(|t| (t.policy.is_allow()).then_some(t.rid)),
+
        );
+
        // Connect to configured peers.
+
        let addrs = self.config.connect.clone();
+
        for (id, addr) in addrs.into_iter().map(|ca| ca.into()) {
+
            if let Err(e) = self.connect(id, addr) {
+
                error!(target: "service", "Service::initialization connection error: {e}");
+
            }
+
        }
+
        // Try to establish some connections.
+
        self.maintain_connections();
+
        // Start periodic tasks.
+
        self.outbox.wakeup(IDLE_INTERVAL);
+
        self.outbox.wakeup(GOSSIP_INTERVAL);
+

+
        Ok(())
+
    }
+

+
    pub fn tick(&mut self, now: LocalTime, metrics: &Metrics) {
+
        trace!(
+
            target: "service",
+
            "Tick +{}",
+
            now - self.started_at.expect("Service::tick: service must be initialized")
+
        );
+
        if now >= self.clock {
+
            self.clock = now;
+
        } else {
+
            // Nb. In tests, we often move the clock forwards in time to test different behaviors,
+
            // so this warning isn't applicable there.
+
            #[cfg(not(test))]
+
            warn!(
+
                target: "service",
+
                "System clock is not monotonic: {now} is not greater or equal to {}", self.clock
+
            );
+
        }
+
        self.metrics = metrics.clone();
+
    }
+

+
    pub fn wake(&mut self) {
+
        let now = self.clock;
+

+
        trace!(
+
            target: "service",
+
            "Wake +{}",
+
            now - self.started_at.expect("Service::wake: service must be initialized")
+
        );
+

+
        if now - self.last_idle >= IDLE_INTERVAL {
+
            trace!(target: "service", "Running 'idle' task...");
+

+
            self.keep_alive(&now);
+
            self.disconnect_unresponsive_peers(&now);
+
            self.idle_connections();
+
            self.maintain_connections();
+
            self.dequeue_fetches();
+
            self.outbox.wakeup(IDLE_INTERVAL);
+
            self.last_idle = now;
+
        }
+
        if now - self.last_gossip >= GOSSIP_INTERVAL {
+
            trace!(target: "service", "Running 'gossip' task...");
+

+
            if let Err(e) = self.relay_announcements() {
+
                error!(target: "service", "Error relaying stored announcements: {e}");
+
            }
+
            self.outbox.wakeup(GOSSIP_INTERVAL);
+
            self.last_gossip = now;
+
        }
+
        if now - self.last_sync >= SYNC_INTERVAL {
+
            trace!(target: "service", "Running 'sync' task...");
+

+
            if let Err(e) = self.fetch_missing_repositories() {
+
                error!(target: "service", "Error fetching missing inventory: {e}");
+
            }
+
            self.outbox.wakeup(SYNC_INTERVAL);
+
            self.last_sync = now;
+
        }
+
        if now - self.last_announce >= ANNOUNCE_INTERVAL {
+
            trace!(target: "service", "Running 'announce' task...");
+

+
            self.announce_inventory();
+
            self.outbox.wakeup(ANNOUNCE_INTERVAL);
+
            self.last_announce = now;
+
        }
+
        if now - self.last_prune >= PRUNE_INTERVAL {
+
            trace!(target: "service", "Running 'prune' task...");
+

+
            if let Err(err) = self.prune_routing_entries(&now) {
+
                error!(target: "service", "Error pruning routing entries: {err}");
+
            }
+
            if let Err(err) = self
+
                .db
+
                .gossip_mut()
+
                .prune((now - self.config.limits.gossip_max_age).into())
+
            {
+
                error!(target: "service", "Error pruning gossip entries: {err}");
+
            }
+

+
            self.outbox.wakeup(PRUNE_INTERVAL);
+
            self.last_prune = now;
+
        }
+

+
        // Always check whether there are persistent peers that need reconnecting.
+
        self.maintain_persistent();
+
    }
+

+
    pub fn command(&mut self, cmd: Command) {
+
        info!(target: "service", "Received command {:?}", cmd);
+

+
        match cmd {
+
            Command::Connect(nid, addr, opts) => {
+
                if opts.persistent {
+
                    self.config.connect.insert((nid, addr.clone()).into());
+
                }
+
                if let Err(e) = self.connect(nid, addr) {
+
                    match e {
+
                        ConnectError::SessionExists { nid } => {
+
                            self.emitter.emit(Event::PeerConnected { nid });
+
                        }
+
                        e => {
+
                            // N.b. using the fact that the call to connect waits for an event
+
                            self.emitter.emit(Event::PeerDisconnected {
+
                                nid,
+
                                reason: e.to_string(),
+
                            });
+
                        }
+
                    }
+
                }
+
            }
+
            Command::Disconnect(nid) => {
+
                self.outbox.disconnect(nid, DisconnectReason::Command);
+
            }
+
            Command::Config(resp) => {
+
                resp.send(self.config.clone()).ok();
+
            }
+
            Command::ListenAddrs(resp) => {
+
                resp.send(self.listening.clone()).ok();
+
            }
+
            Command::Seeds(rid, resp) => match self.seeds(&rid) {
+
                Ok(seeds) => {
+
                    let (connected, disconnected) = seeds.partition();
+
                    debug!(
+
                        target: "service",
+
                        "Found {} connected seed(s) and {} disconnected seed(s) for {}",
+
                        connected.len(), disconnected.len(),  rid
+
                    );
+
                    resp.send(seeds).ok();
+
                }
+
                Err(e) => {
+
                    error!(target: "service", "Error getting seeds for {rid}: {e}");
+
                }
+
            },
+
            Command::Fetch(rid, seed, timeout, resp) => {
+
                self.fetch(rid, seed, timeout, Some(resp));
+
            }
+
            Command::Seed(rid, scope, resp) => {
+
                // Update our seeding policy.
+
                let seeded = self
+
                    .seed(&rid, scope)
+
                    .expect("Service::command: error seeding repository");
+
                resp.send(seeded).ok();
+

+
                // Let all our peers know that we're interested in this repo from now on.
+
                self.outbox.broadcast(
+
                    Message::subscribe(self.filter(), self.clock.into(), Timestamp::MAX),
+
                    self.sessions.connected().map(|(_, s)| s),
+
                );
+
            }
+
            Command::Unseed(id, resp) => {
+
                let updated = self
+
                    .unseed(&id)
+
                    .expect("Service::command: error unseeding repository");
+
                resp.send(updated).ok();
+
            }
+
            Command::Follow(id, alias, resp) => {
+
                let seeded = self
+
                    .policies
+
                    .follow(&id, alias.as_ref())
+
                    .expect("Service::command: error following node");
+
                resp.send(seeded).ok();
+
            }
+
            Command::Unfollow(id, resp) => {
+
                let updated = self
+
                    .policies
+
                    .unfollow(&id)
+
                    .expect("Service::command: error unfollowing node");
+
                resp.send(updated).ok();
+
            }
+
            Command::AnnounceRefs(id, resp) => {
+
                let doc = match self.storage.get(id) {
+
                    Ok(Some(doc)) => doc,
+
                    Ok(None) => {
+
                        error!(target: "service", "Error announcing refs: repository {id} not found");
+
                        return;
+
                    }
+
                    Err(e) => {
+
                        error!(target: "service", "Error announcing refs: doc error: {e}");
+
                        return;
+
                    }
+
                };
+

+
                match self.announce_own_refs(id, doc) {
+
                    Ok(refs) => match refs.as_slice() {
+
                        &[refs] => {
+
                            resp.send(refs).ok();
+
                        }
+
                        // SAFETY: Since we passed in one NID, we should get exactly one item back.
+
                        [..] => panic!("Service::command: unexpected refs returned"),
+
                    },
+
                    Err(err) => {
+
                        error!(target: "service", "Error announcing refs: {err}");
+
                    }
+
                }
+
            }
+
            Command::AnnounceInventory => {
+
                self.announce_inventory();
+
            }
+
            Command::AddInventory(rid, resp) => match self.add_inventory(rid) {
+
                Ok(updated) => {
+
                    resp.send(updated).ok();
+
                }
+
                Err(e) => {
+
                    error!(target: "service", "Error adding {rid} to inventory: {e}");
+
                }
+
            },
+
            Command::QueryState(query, sender) => {
+
                sender.send(query(self)).ok();
+
            }
+
        }
+
    }
+

+
    /// Initiate an outgoing fetch for some repository, based on another node's announcement.
+
    /// Returns `true` if the fetch was initiated and `false` if it was skipped.
+
    fn fetch_refs_at(
+
        &mut self,
+
        rid: RepoId,
+
        from: NodeId,
+
        refs: NonEmpty<RefsAt>,
+
        scope: Scope,
+
        timeout: time::Duration,
+
        channel: Option<chan::Sender<FetchResult>>,
+
    ) -> bool {
+
        match self.refs_status_of(rid, refs, &scope) {
+
            Ok(status) => {
+
                if status.want.is_empty() {
+
                    debug!(target: "service", "Skipping fetch for {rid}, all refs are already in storage");
+
                } else {
+
                    return self._fetch(rid, from, status.want, timeout, channel);
+
                }
+
            }
+
            Err(e) => {
+
                error!(target: "service", "Error getting the refs status of {rid}: {e}");
+
            }
+
        }
+
        // We didn't try to fetch anything.
+
        false
+
    }
+

+
    /// Initiate an outgoing fetch for some repository.
+
    fn fetch(
+
        &mut self,
+
        rid: RepoId,
+
        from: NodeId,
+
        timeout: time::Duration,
+
        channel: Option<chan::Sender<FetchResult>>,
+
    ) -> bool {
+
        self._fetch(rid, from, vec![], timeout, channel)
+
    }
+

+
    fn _fetch(
+
        &mut self,
+
        rid: RepoId,
+
        from: NodeId,
+
        refs_at: Vec<RefsAt>,
+
        timeout: time::Duration,
+
        channel: Option<chan::Sender<FetchResult>>,
+
    ) -> bool {
+
        match self.try_fetch(rid, &from, refs_at.clone(), timeout) {
+
            Ok(fetching) => {
+
                if let Some(c) = channel {
+
                    fetching.subscribe(c);
+
                }
+
                return true;
+
            }
+
            Err(TryFetchError::AlreadyFetching(fetching)) => {
+
                // If we're already fetching the same refs from the requested peer, there's nothing
+
                // to do, we simply add the supplied channel to the list of subscribers so that it
+
                // is notified on completion. Otherwise, we queue a fetch with the requested peer.
+
                if fetching.from == from && fetching.refs_at == refs_at {
+
                    debug!(target: "service", "Ignoring redundant fetch of {rid} from {from}");
+

+
                    if let Some(c) = channel {
+
                        fetching.subscribe(c);
+
                    }
+
                } else {
+
                    let fetch = QueuedFetch {
+
                        rid,
+
                        refs_at,
+
                        from,
+
                        timeout,
+
                        channel,
+
                    };
+
                    debug!(target: "service", "Queueing fetch for {rid} with {from} (already fetching)..");
+

+
                    self.queue_fetch(fetch);
+
                }
+
            }
+
            Err(TryFetchError::SessionCapacityReached) => {
+
                debug!(target: "service", "Fetch capacity reached for {from}, queueing {rid}..");
+
                self.queue_fetch(QueuedFetch {
+
                    rid,
+
                    refs_at,
+
                    from,
+
                    timeout,
+
                    channel,
+
                });
+
            }
+
            Err(e) => {
+
                if let Some(c) = channel {
+
                    c.send(FetchResult::Failed {
+
                        reason: e.to_string(),
+
                    })
+
                    .ok();
+
                }
+
            }
+
        }
+
        false
+
    }
+

+
    fn queue_fetch(&mut self, fetch: QueuedFetch) {
+
        let Some(s) = self.sessions.get_mut(&fetch.from) else {
+
            log::error!(target: "service", "Cannot queue fetch for unknown session {}", fetch.from);
+
            return;
+
        };
+
        if let Err(e) = s.queue_fetch(fetch) {
+
            let fetch = e.inner();
+
            log::debug!(target: "service", "Unable to queue fetch for {} with {}: {e}", &fetch.rid, &fetch.from);
+
        }
+
    }
+

+
    // TODO: Buffer/throttle fetches.
+
    fn try_fetch(
+
        &mut self,
+
        rid: RepoId,
+
        from: &NodeId,
+
        refs_at: Vec<RefsAt>,
+
        timeout: time::Duration,
+
    ) -> Result<&mut FetchState, TryFetchError> {
+
        let from = *from;
+
        let Some(session) = self.sessions.get_mut(&from) else {
+
            return Err(TryFetchError::SessionNotConnected);
+
        };
+
        let fetching = self.fetching.entry(rid);
+

+
        trace!(target: "service", "Trying to fetch {refs_at:?} for {rid}..");
+

+
        let fetching = match fetching {
+
            Entry::Vacant(fetching) => fetching,
+
            Entry::Occupied(fetching) => {
+
                // We're already fetching this repo from some peer.
+
                return Err(TryFetchError::AlreadyFetching(fetching.into_mut()));
+
            }
+
        };
+
        // Sanity check: We shouldn't be fetching from this session, since we return above if we're
+
        // fetching from any session.
+
        debug_assert!(!session.is_fetching(&rid));
+

+
        if !session.is_connected() {
+
            // This can happen if a session disconnects in the time between asking for seeds to
+
            // fetch from, and initiating the fetch from one of those seeds.
+
            return Err(TryFetchError::SessionNotConnected);
+
        }
+
        if session.is_at_capacity() {
+
            // If we're already fetching multiple repos from this peer.
+
            return Err(TryFetchError::SessionCapacityReached);
+
        }
+

+
        let fetching = fetching.insert(FetchState {
+
            from,
+
            refs_at: refs_at.clone(),
+
            subscribers: vec![],
+
        });
+
        self.outbox.fetch(
+
            session,
+
            rid,
+
            refs_at,
+
            timeout,
+
            self.config.limits.fetch_pack_receive,
+
        );
+

+
        Ok(fetching)
+
    }
+

+
    pub fn fetched(
+
        &mut self,
+
        rid: RepoId,
+
        remote: NodeId,
+
        result: Result<crate::worker::fetch::FetchResult, crate::worker::FetchError>,
+
    ) {
+
        let Some(fetching) = self.fetching.remove(&rid) else {
+
            error!(target: "service", "Received unexpected fetch result for {rid}, from {remote}");
+
            return;
+
        };
+
        debug_assert_eq!(fetching.from, remote);
+

+
        if let Some(s) = self.sessions.get_mut(&remote) {
+
            // Mark this RID as fetched for this session.
+
            s.fetched(rid);
+
        }
+

+
        // Notify all fetch subscribers of the fetch result. This is used when the user requests
+
        // a fetch via the CLI, for example.
+
        for sub in &fetching.subscribers {
+
            debug!(target: "service", "Found existing fetch request from {remote}, sending result..");
+

+
            let result = match &result {
+
                Ok(success) => FetchResult::Success {
+
                    updated: success.updated.clone(),
+
                    namespaces: success.namespaces.clone(),
+
                    clone: success.clone,
+
                },
+
                Err(e) => FetchResult::Failed {
+
                    reason: e.to_string(),
+
                },
+
            };
+
            if sub.send(result).is_err() {
+
                error!(target: "service", "Error sending fetch result for {rid} from {remote}..");
+
            } else {
+
                debug!(target: "service", "Sent fetch result for {rid} from {remote}..");
+
            }
+
        }
+

+
        match result {
+
            Ok(crate::worker::fetch::FetchResult {
+
                updated,
+
                namespaces,
+
                clone,
+
                doc,
+
            }) => {
+
                info!(target: "service", "Fetched {rid} from {remote} successfully");
+
                // Update our routing table in case this fetch was user-initiated and doesn't
+
                // come from an announcement.
+
                self.seed_discovered(rid, remote, self.clock.into());
+

+
                for update in &updated {
+
                    if update.is_skipped() {
+
                        trace!(target: "service", "Ref skipped: {update} for {rid}");
+
                    } else {
+
                        debug!(target: "service", "Ref updated: {update} for {rid}");
+
                    }
+
                }
+
                self.emitter.emit(Event::RefsFetched {
+
                    remote,
+
                    rid,
+
                    updated: updated.clone(),
+
                });
+

+
                // Announce our new inventory if this fetch was a full clone.
+
                // Only update and announce inventory for public repositories.
+
                if clone && doc.is_public() {
+
                    debug!(target: "service", "Updating and announcing inventory for cloned repository {rid}..");
+

+
                    if let Err(e) = self.add_inventory(rid) {
+
                        error!(target: "service", "Error announcing inventory for {rid}: {e}");
+
                    }
+
                }
+

+
                // It's possible for a fetch to succeed but nothing was updated.
+
                if updated.is_empty() || updated.iter().all(|u| u.is_skipped()) {
+
                    debug!(target: "service", "Nothing to announce, no refs were updated..");
+
                } else {
+
                    // Finally, announce the refs. This is useful for nodes to know what we've synced,
+
                    // beyond just knowing that we have added an item to our inventory.
+
                    if let Err(e) = self.announce_refs(rid, doc.into(), namespaces) {
+
                        error!(target: "service", "Failed to announce new refs: {e}");
+
                    }
+
                }
+
            }
+
            Err(err) => {
+
                error!(target: "service", "Fetch failed for {rid} from {remote}: {err}");
+

+
                // For now, we only disconnect the remote in case of timeout. In the future,
+
                // there may be other reasons to disconnect.
+
                if err.is_timeout() {
+
                    self.outbox.disconnect(remote, DisconnectReason::Fetch(err));
+
                }
+
            }
+
        }
+
        // We can now try to dequeue more fetches.
+
        self.dequeue_fetches();
+
    }
+

+
    /// Attempt to dequeue fetches from all peers.
+
    /// At most one fetch is dequeued per peer. If the fetch cannot be processed,
+
    /// it is put back on the queue for that peer.
+
    ///
+
    /// Fetches are queued for two reasons:
+
    /// 1. The RID was already being fetched.
+
    /// 2. The session was already at fetch capacity.
+
    pub fn dequeue_fetches(&mut self) {
+
        let sessions = self
+
            .sessions
+
            .shuffled()
+
            .map(|(k, _)| *k)
+
            .collect::<Vec<_>>();
+

+
        // Try to dequeue once per session.
+
        for nid in sessions {
+
            // SAFETY: All the keys we are iterating on exist.
+
            #[allow(clippy::unwrap_used)]
+
            let sess = self.sessions.get_mut(&nid).unwrap();
+
            if !sess.is_connected() || sess.is_at_capacity() {
+
                continue;
+
            }
+

+
            if let Some(QueuedFetch {
+
                rid,
+
                from,
+
                refs_at,
+
                timeout,
+
                channel,
+
            }) = sess.dequeue_fetch()
+
            {
+
                debug!(target: "service", "Dequeued fetch for {rid} from session {from}..");
+

+
                if let Some(refs) = NonEmpty::from_vec(refs_at) {
+
                    let repo_entry = self.policies.seed_policy(&rid).expect(
+
                        "Service::dequeue_fetch: error accessing repo seeding configuration",
+
                    );
+
                    let SeedingPolicy::Allow { scope } = repo_entry.policy else {
+
                        debug!(target: "service", "Repository {rid} is no longer seeded, skipping..");
+
                        continue;
+
                    };
+
                    self.fetch_refs_at(rid, from, refs, scope, timeout, channel);
+
                } else {
+
                    // If no refs are specified, always do a full fetch.
+
                    self.fetch(rid, from, timeout, channel);
+
                }
+
            }
+
        }
+
    }
+

+
    /// Inbound connection attempt.
+
    pub fn accepted(&mut self, ip: IpAddr) -> bool {
+
        // Always accept localhost connections, even if we already reached
+
        // our inbound connection limit.
+
        if ip.is_loopback() || ip.is_unspecified() {
+
            return true;
+
        }
+
        // Check for inbound connection limit.
+
        if self.sessions.inbound().count() >= self.config.limits.connection.inbound {
+
            return false;
+
        }
+
        match self.db.addresses().is_ip_banned(ip) {
+
            Ok(banned) => {
+
                if banned {
+
                    debug!(target: "service", "Rejecting inbound connection from banned ip {ip}");
+
                    return false;
+
                }
+
            }
+
            Err(e) => error!(target: "service", "Error querying ban status for {ip}: {e}"),
+
        }
+
        let host: HostName = ip.into();
+

+
        if self.limiter.limit(
+
            host.clone(),
+
            None,
+
            &self.config.limits.rate.inbound,
+
            self.clock,
+
        ) {
+
            trace!(target: "service", "Rate limiting inbound connection from {host}..");
+
            return false;
+
        }
+
        true
+
    }
+

+
    pub fn attempted(&mut self, nid: NodeId, addr: Address) {
+
        debug!(target: "service", "Attempted connection to {nid} ({addr})");
+

+
        if let Some(sess) = self.sessions.get_mut(&nid) {
+
            sess.to_attempted();
+
        } else {
+
            #[cfg(debug_assertions)]
+
            panic!("Service::attempted: unknown session {nid}@{addr}");
+
        }
+
    }
+

+
    pub fn listening(&mut self, local_addr: net::SocketAddr) {
+
        info!(target: "node", "Listening on {local_addr}..");
+

+
        self.listening.push(local_addr);
+
    }
+

+
    pub fn connected(&mut self, remote: NodeId, addr: Address, link: Link) {
+
        info!(target: "service", "Connected to {remote} ({addr}) ({link:?})");
+
        self.emitter.emit(Event::PeerConnected { nid: remote });
+

+
        let msgs = self.initial(link);
+

+
        if link.is_outbound() {
+
            if let Some(peer) = self.sessions.get_mut(&remote) {
+
                peer.to_connected(self.clock);
+
                self.outbox.write_all(peer, msgs);
+
            }
+
        } else {
+
            match self.sessions.entry(remote) {
+
                Entry::Occupied(mut e) => {
+
                    // In this scenario, it's possible that our peer is persistent, and
+
                    // disconnected. We get an inbound connection before we attempt a re-connection,
+
                    // and therefore we treat it as a regular inbound connection.
+
                    //
+
                    // It's also possible that a disconnection hasn't gone through yet and our
+
                    // peer is still in connected state here, while a new inbound connection from
+
                    // that same peer is made. This results in a new connection from a peer that is
+
                    // already connected from the perspective of the service. This appears to be
+
                    // a bug in the underlying networking library.
+
                    let peer = e.get_mut();
+
                    debug!(
+
                        target: "service",
+
                        "Connecting peer {remote} already has a session open ({peer})"
+
                    );
+
                    peer.link = link;
+
                    peer.to_connected(self.clock);
+
                    self.outbox.write_all(peer, msgs);
+
                }
+
                Entry::Vacant(e) => {
+
                    if let HostName::Ip(ip) = addr.host {
+
                        if !address::is_local(&ip) {
+
                            if let Err(e) =
+
                                self.db
+
                                    .addresses_mut()
+
                                    .record_ip(&remote, ip, self.clock.into())
+
                            {
+
                                log::error!(target: "service", "Error recording IP address for {remote}: {e}");
+
                            }
+
                        }
+
                    }
+
                    let peer = e.insert(Session::inbound(
+
                        remote,
+
                        addr,
+
                        self.config.is_persistent(&remote),
+
                        self.rng.clone(),
+
                        self.clock,
+
                        self.config.limits.clone(),
+
                    ));
+
                    self.outbox.write_all(peer, msgs);
+
                }
+
            }
+
        }
+
    }
+

+
    pub fn disconnected(&mut self, remote: NodeId, link: Link, reason: &DisconnectReason) {
+
        let since = self.local_time();
+
        let Some(session) = self.sessions.get_mut(&remote) else {
+
            // Since we sometimes disconnect the service eagerly, it's not unusual to get a second
+
            // disconnection event once the transport is dropped.
+
            trace!(target: "service", "Redundant disconnection for {} ({})", remote, reason);
+
            return;
+
        };
+
        // In cases of connection conflicts, there may be disconnections of one of the two
+
        // connections. In that case we don't want the service to remove the session.
+
        if session.link != link {
+
            return;
+
        }
+

+
        info!(target: "service", "Disconnected from {} ({})", remote, reason);
+
        self.emitter.emit(Event::PeerDisconnected {
+
            nid: remote,
+
            reason: reason.to_string(),
+
        });
+

+
        let link = session.link;
+
        let addr = session.addr.clone();
+

+
        self.fetching.retain(|_, fetching| {
+
            if fetching.from != remote {
+
                return true;
+
            }
+
            // Remove and fail any pending fetches from this remote node.
+
            for resp in &fetching.subscribers {
+
                resp.send(FetchResult::Failed {
+
                    reason: format!("disconnected: {reason}"),
+
                })
+
                .ok();
+
            }
+
            false
+
        });
+

+
        // Attempt to re-connect to persistent peers.
+
        if self.config.peer(&remote).is_some() {
+
            let delay = LocalDuration::from_secs(2u64.saturating_pow(session.attempts() as u32))
+
                .clamp(MIN_RECONNECTION_DELTA, MAX_RECONNECTION_DELTA);
+

+
            // Nb. We always try to reconnect to persistent peers, even when the error appears
+
            // to not be transient.
+
            session.to_disconnected(since, since + delay);
+

+
            debug!(target: "service", "Reconnecting to {remote} in {delay}..");
+

+
            self.outbox.wakeup(delay);
+
        } else {
+
            debug!(target: "service", "Dropping peer {remote}..");
+
            self.sessions.remove(&remote);
+

+
            let severity = match reason {
+
                DisconnectReason::Dial(_)
+
                | DisconnectReason::Fetch(_)
+
                | DisconnectReason::Connection(_) => {
+
                    if self.is_online() {
+
                        // If we're "online", there's something wrong with this
+
                        // peer connection specifically.
+
                        Severity::Medium
+
                    } else {
+
                        Severity::Low
+
                    }
+
                }
+
                DisconnectReason::Session(e) => e.severity(),
+
                DisconnectReason::Command
+
                | DisconnectReason::Conflict
+
                | DisconnectReason::SelfConnection => Severity::Low,
+
            };
+

+
            if let Err(e) = self
+
                .db
+
                .addresses_mut()
+
                .disconnected(&remote, &addr, severity)
+
            {
+
                error!(target: "service", "Error updating address store: {e}");
+
            }
+
            // Only re-attempt outbound connections, since we don't care if an inbound connection
+
            // is dropped.
+
            if link.is_outbound() {
+
                self.maintain_connections();
+
            }
+
        }
+
        self.dequeue_fetches();
+
    }
+

+
    pub fn received_message(&mut self, remote: NodeId, message: Message) {
+
        if let Err(err) = self.handle_message(&remote, message) {
+
            // If there's an error, stop processing messages from this peer.
+
            // However, we still relay messages returned up to this point.
+
            self.outbox
+
                .disconnect(remote, DisconnectReason::Session(err));
+

+
            // FIXME: The peer should be set in a state such that we don't
+
            // process further messages.
+
        }
+
    }
+

+
    /// Handle an announcement message.
+
    ///
+
    /// Returns `true` if this announcement should be stored and relayed to connected peers,
+
    /// and `false` if it should not.
+
    pub fn handle_announcement(
+
        &mut self,
+
        relayer: &NodeId,
+
        relayer_addr: &Address,
+
        announcement: &Announcement,
+
    ) -> Result<Option<gossip::AnnouncementId>, session::Error> {
+
        if !announcement.verify() {
+
            return Err(session::Error::Misbehavior);
+
        }
+
        let Announcement {
+
            node: announcer,
+
            message,
+
            ..
+
        } = announcement;
+

+
        // Ignore our own announcements, in case the relayer sent one by mistake.
+
        if announcer == self.nid() {
+
            return Ok(None);
+
        }
+
        let now = self.clock;
+
        let timestamp = message.timestamp();
+

+
        // Don't allow messages from too far in the future.
+
        if timestamp.saturating_sub(now.as_millis()) > MAX_TIME_DELTA.as_millis() as u64 {
+
            return Err(session::Error::InvalidTimestamp(timestamp));
+
        }
+

+
        // We don't process announcements from nodes we don't know, since the node announcement is
+
        // what provides DoS protection.
+
        //
+
        // Note that it's possible to *not* receive the node announcement, but receive the
+
        // subsequent announcements of a node in the case of historical gossip messages requested
+
        // from the `subscribe` message. This can happen if the cut-off time is after the node
+
        // announcement timestamp, but before the other announcements. In that case, we simply
+
        // ignore all announcements of that node until we get a node announcement.
+
        if let AnnouncementMessage::Inventory(_) | AnnouncementMessage::Refs(_) = message {
+
            match self.db.addresses().get(announcer) {
+
                Ok(node) => {
+
                    if node.is_none() {
+
                        debug!(target: "service", "Ignoring announcement from unknown node {announcer} (t={timestamp})");
+
                        return Ok(None);
+
                    }
+
                }
+
                Err(e) => {
+
                    error!(target: "service", "Error looking up node in address book: {e}");
+
                    return Ok(None);
+
                }
+
            }
+
        }
+

+
        // Discard announcement messages we've already seen, otherwise update our last seen time.
+
        let relay = match self.db.gossip_mut().announced(announcer, announcement) {
+
            Ok(Some(id)) => {
+
                log::debug!(
+
                    target: "service",
+
                    "Stored announcement from {announcer} to be broadcast in {} (t={timestamp})",
+
                    (self.last_gossip + GOSSIP_INTERVAL) - self.clock
+
                );
+
                // Keep track of who relayed the message for later.
+
                self.relayed_by.entry(id).or_default().push(*relayer);
+

+
                // Decide whether or not to relay this message, if it's fresh.
+
                // To avoid spamming peers on startup with historical gossip messages,
+
                // don't relay messages that are too old. We make an exception for node announcements,
+
                // since they are cached, and will hence often carry old timestamps.
+
                let relay = message.is_node_announcement()
+
                    || now - timestamp.to_local_time() <= MAX_TIME_DELTA;
+
                relay.then_some(id)
+
            }
+
            Ok(None) => {
+
                // FIXME: Still mark as relayed by this peer.
+
                // FIXME: Refs announcements should not be delayed, since they are only sent
+
                // to subscribers.
+
                debug!(target: "service", "Ignoring stale announcement from {announcer} (t={timestamp})");
+
                return Ok(None);
+
            }
+
            Err(e) => {
+
                error!(target: "service", "Error updating gossip entry from {announcer}: {e}");
+
                return Ok(None);
+
            }
+
        };
+

+
        match message {
+
            // Process a peer inventory update announcement by (maybe) fetching.
+
            AnnouncementMessage::Inventory(message) => {
+
                self.emitter.emit(Event::InventoryAnnounced {
+
                    nid: *announcer,
+
                    inventory: message.inventory.to_vec(),
+
                    timestamp: message.timestamp,
+
                });
+
                match self.sync_routing(
+
                    message.inventory.iter().cloned(),
+
                    *announcer,
+
                    message.timestamp,
+
                ) {
+
                    Ok(synced) => {
+
                        if synced.is_empty() {
+
                            trace!(target: "service", "No routes updated by inventory announcement from {announcer}");
+
                            return Ok(None);
+
                        }
+
                    }
+
                    Err(e) => {
+
                        error!(target: "service", "Error processing inventory from {announcer}: {e}");
+
                        return Ok(None);
+
                    }
+
                }
+
                let mut missing = Vec::new();
+
                let nid = *self.nid();
+

+
                // Here we handle the special case where the inventory we received is that of
+
                // a connected peer, as opposed to being relayed to us.
+
                if let Some(sess) = self.sessions.get_mut(announcer) {
+
                    for id in message.inventory.as_slice() {
+
                        // If we are connected to the announcer of this inventory, update the peer's
+
                        // subscription filter to include all inventory items. This way, we'll
+
                        // relay messages relating to the peer's inventory.
+
                        if let Some(sub) = &mut sess.subscribe {
+
                            sub.filter.insert(id);
+
                        }
+

+
                        // If we're seeding and connected to the announcer, and we don't have
+
                        // the inventory, fetch it from the announcer.
+
                        if self.policies.is_seeding(id).expect(
+
                            "Service::handle_announcement: error accessing seeding configuration",
+
                        ) {
+
                            // Only if we do not have the repository locally do we fetch here.
+
                            // If we do have it, only fetch after receiving a ref announcement.
+
                            match self.db.routing().entry(id, &nid) {
+
                                Ok(entry) => {
+
                                    if entry.is_none() {
+
                                        missing.push(*id);
+
                                    }
+
                                }
+
                                Err(e) => error!(
+
                                    target: "service",
+
                                    "Error checking local inventory for {id}: {e}"
+
                                ),
+
                            }
+
                        }
+
                    }
+
                }
+
                // Since we have limited fetch capacity, it may be that we can't fetch an entire
+
                // inventory from a peer. Therefore we randomize the order of the RIDs to fetch
+
                // different RIDs from different peers in case multiple peers announce the same
+
                // RIDs.
+
                self.rng.shuffle(&mut missing);
+

+
                for rid in missing {
+
                    debug!(target: "service", "Missing seeded inventory {rid}; initiating fetch..");
+
                    self.fetch(rid, *announcer, FETCH_TIMEOUT, None);
+
                }
+
                return Ok(relay);
+
            }
+
            AnnouncementMessage::Refs(message) => {
+
                self.emitter.emit(Event::RefsAnnounced {
+
                    nid: *announcer,
+
                    rid: message.rid,
+
                    refs: message.refs.to_vec(),
+
                    timestamp: message.timestamp,
+
                });
+
                // Empty announcements can be safely ignored.
+
                let Some(refs) = NonEmpty::from_vec(message.refs.to_vec()) else {
+
                    debug!(target: "service", "Skipping fetch, no refs in announcement for {} (t={timestamp})", message.rid);
+
                    return Ok(None);
+
                };
+
                // We update inventories when receiving ref announcements, as these could come
+
                // from a new repository being initialized.
+
                self.seed_discovered(message.rid, *announcer, message.timestamp);
+

+
                // Update sync status of announcer for this repo.
+
                if let Some(refs) = refs.iter().find(|r| &r.remote == self.nid()) {
+
                    debug!(
+
                        target: "service",
+
                        "Refs announcement of {announcer} for {} contains our own remote at {} (t={})",
+
                        message.rid, refs.at, message.timestamp
+
                    );
+
                    match self.db.seeds_mut().synced(
+
                        &message.rid,
+
                        announcer,
+
                        refs.at,
+
                        message.timestamp,
+
                    ) {
+
                        Ok(updated) => {
+
                            if updated {
+
                                debug!(
+
                                    target: "service",
+
                                    "Updating sync status of {announcer} for {} to {}",
+
                                    message.rid, refs.at
+
                                );
+
                                self.emitter.emit(Event::RefsSynced {
+
                                    rid: message.rid,
+
                                    remote: *announcer,
+
                                    at: refs.at,
+
                                });
+
                            } else {
+
                                debug!(
+
                                    target: "service",
+
                                    "Sync status of {announcer} was not updated for {}",
+
                                    message.rid,
+
                                );
+
                            }
+
                        }
+
                        Err(e) => {
+
                            error!(target: "service", "Error updating sync status for {}: {e}", message.rid);
+
                        }
+
                    }
+
                }
+
                let repo_entry = self.policies.seed_policy(&message.rid).expect(
+
                    "Service::handle_announcement: error accessing repo seeding configuration",
+
                );
+
                let SeedingPolicy::Allow { scope } = repo_entry.policy else {
+
                    debug!(
+
                        target: "service",
+
                        "Ignoring refs announcement from {announcer}: repository {} isn't seeded (t={timestamp})",
+
                        message.rid
+
                    );
+
                    return Ok(None);
+
                };
+
                // Refs can be relayed by peers who don't have the data in storage,
+
                // therefore we only check whether we are connected to the *announcer*,
+
                // which is required by the protocol to only announce refs it has.
+
                let Some(remote) = self.sessions.get(announcer).cloned() else {
+
                    trace!(
+
                        target: "service",
+
                        "Skipping fetch of {}, no sessions connected to {announcer}",
+
                        message.rid
+
                    );
+
                    return Ok(relay);
+
                };
+
                // Finally, start the fetch.
+
                self.fetch_refs_at(message.rid, remote.id, refs, scope, FETCH_TIMEOUT, None);
+

+
                return Ok(relay);
+
            }
+
            AnnouncementMessage::Node(
+
                ann @ NodeAnnouncement {
+
                    features,
+
                    addresses,
+
                    ..
+
                },
+
            ) => {
+
                self.emitter.emit(Event::NodeAnnounced {
+
                    nid: *announcer,
+
                    alias: ann.alias.clone(),
+
                    timestamp: ann.timestamp,
+
                    features: *features,
+
                    addresses: addresses.to_vec(),
+
                });
+
                // If this node isn't a seed, we're not interested in adding it
+
                // to our address book, but other nodes may be, so we relay the message anyway.
+
                if !features.has(Features::SEED) {
+
                    return Ok(relay);
+
                }
+

+
                match self.db.addresses_mut().insert(
+
                    announcer,
+
                    ann.version,
+
                    ann.features,
+
                    &ann.alias,
+
                    ann.work(),
+
                    &ann.agent,
+
                    timestamp,
+
                    addresses
+
                        .iter()
+
                        // Ignore non-routable addresses unless received from a local network
+
                        // peer. This allows the node to function in a local network.
+
                        .filter(|a| a.is_routable() || relayer_addr.is_local())
+
                        .map(|a| KnownAddress::new(a.clone(), address::Source::Peer)),
+
                ) {
+
                    Ok(updated) => {
+
                        // Only relay if we received new information.
+
                        if updated {
+
                            debug!(
+
                                target: "service",
+
                                "Address store entry for node {announcer} updated at {timestamp}"
+
                            );
+
                            return Ok(relay);
+
                        }
+
                    }
+
                    Err(err) => {
+
                        // An error here is due to a fault in our address store.
+
                        error!(target: "service", "Error processing node announcement from {announcer}: {err}");
+
                    }
+
                }
+
            }
+
        }
+
        Ok(None)
+
    }
+

+
    pub fn handle_info(&mut self, remote: NodeId, info: &Info) -> Result<(), session::Error> {
+
        match info {
+
            // Nb. We don't currently send this message.
+
            Info::RefsAlreadySynced { rid, at } => {
+
                debug!(target: "service", "Refs already synced for {rid} by {remote}");
+
                self.emitter.emit(Event::RefsSynced {
+
                    rid: *rid,
+
                    remote,
+
                    at: *at,
+
                });
+
            }
+
        }
+

+
        Ok(())
+
    }
+

+
    pub fn handle_message(
+
        &mut self,
+
        remote: &NodeId,
+
        message: Message,
+
    ) -> Result<(), session::Error> {
+
        let local = self.node_id();
+
        let relay = self.config.is_relay();
+
        let Some(peer) = self.sessions.get_mut(remote) else {
+
            warn!(target: "service", "Session not found for {remote}");
+
            return Ok(());
+
        };
+
        peer.last_active = self.clock;
+

+
        let limit = match peer.link {
+
            Link::Outbound => &self.config.limits.rate.outbound,
+
            Link::Inbound => &self.config.limits.rate.inbound,
+
        };
+
        if self
+
            .limiter
+
            .limit(peer.addr.clone().into(), Some(remote), limit, self.clock)
+
        {
+
            debug!(target: "service", "Rate limiting message from {remote} ({})", peer.addr);
+
            return Ok(());
+
        }
+
        message.log(log::Level::Debug, remote, Link::Inbound);
+

+
        let connected = match &mut peer.state {
+
            session::State::Disconnected { .. } => {
+
                debug!(target: "service", "Ignoring message from disconnected peer {}", peer.id);
+
                return Ok(());
+
            }
+
            // In case of a discrepancy between the service state and the state of the underlying
+
            // wire protocol, we may receive a message from a peer that we consider not fully connected
+
            // at the service level. To remedy this, we simply transition the peer to a connected state.
+
            //
+
            // This is not ideal, but until the wire protocol and service are unified, it's the simplest
+
            // solution to converge towards the same state.
+
            session::State::Attempted { .. } | session::State::Initial => {
+
                debug!(target: "service", "Received unexpected message from connecting peer {}", peer.id);
+
                debug!(target: "service", "Transitioning peer {} to 'connected' state", peer.id);
+

+
                peer.to_connected(self.clock);
+

+
                None
+
            }
+
            session::State::Connected {
+
                ping, latencies, ..
+
            } => Some((ping, latencies)),
+
        };
+

+
        trace!(target: "service", "Received message {message:?} from {remote}");
+

+
        match message {
+
            // Process a peer announcement.
+
            Message::Announcement(ann) => {
+
                let relayer = remote;
+
                let relayer_addr = peer.addr.clone();
+

+
                if let Some(id) = self.handle_announcement(relayer, &relayer_addr, &ann)? {
+
                    if self.config.is_relay() {
+
                        if let AnnouncementMessage::Inventory(_) = ann.message {
+
                            if let Err(e) = self
+
                                .database_mut()
+
                                .gossip_mut()
+
                                .set_relay(id, gossip::RelayStatus::Relay)
+
                            {
+
                                error!(target: "service", "Error setting relay flag for message: {e}");
+
                                return Ok(());
+
                            }
+
                        } else {
+
                            self.relay(id, ann);
+
                        }
+
                    }
+
                }
+
            }
+
            Message::Subscribe(subscribe) => {
+
                // Filter announcements by interest.
+
                match self
+
                    .db
+
                    .gossip()
+
                    .filtered(&subscribe.filter, subscribe.since, subscribe.until)
+
                {
+
                    Ok(anns) => {
+
                        for ann in anns {
+
                            let ann = match ann {
+
                                Ok(a) => a,
+
                                Err(e) => {
+
                                    error!(target: "service", "Error reading gossip message from store: {e}");
+
                                    continue;
+
                                }
+
                            };
+
                            // Don't send announcements authored by the remote, back to the remote.
+
                            if ann.node == *remote {
+
                                continue;
+
                            }
+
                            // Only send messages if we're a relay, or it's our own messages.
+
                            if relay || ann.node == local {
+
                                self.outbox.write(peer, ann.into());
+
                            }
+
                        }
+
                    }
+
                    Err(e) => {
+
                        error!(target: "service", "Error querying gossip messages from store: {e}");
+
                    }
+
                }
+
                peer.subscribe = Some(subscribe);
+
            }
+
            Message::Info(info) => {
+
                self.handle_info(*remote, &info)?;
+
            }
+
            Message::Ping(Ping { ponglen, .. }) => {
+
                // Ignore pings which ask for too much data.
+
                if ponglen > Ping::MAX_PONG_ZEROES {
+
                    return Ok(());
+
                }
+
                self.outbox.write(
+
                    peer,
+
                    Message::Pong {
+
                        zeroes: ZeroBytes::new(ponglen),
+
                    },
+
                );
+
            }
+
            Message::Pong { zeroes } => {
+
                if let Some((ping, latencies)) = connected {
+
                    if let session::PingState::AwaitingResponse {
+
                        len: ponglen,
+
                        since,
+
                    } = *ping
+
                    {
+
                        if (ponglen as usize) == zeroes.len() {
+
                            *ping = session::PingState::Ok;
+
                            // Keep track of peer latency.
+
                            latencies.push_back(self.clock - since);
+
                            if latencies.len() > MAX_LATENCIES {
+
                                latencies.pop_front();
+
                            }
+
                        }
+
                    }
+
                }
+
            }
+
        }
+
        Ok(())
+
    }
+

+
    /// A convenient method to check if we should fetch from a `RefsAnnouncement` with `scope`.
+
    fn refs_status_of(
+
        &self,
+
        rid: RepoId,
+
        refs: NonEmpty<RefsAt>,
+
        scope: &policy::Scope,
+
    ) -> Result<RefsStatus, Error> {
+
        let mut refs = RefsStatus::new(rid, refs, self.db.refs())?;
+
        // Check that there's something we want.
+
        if refs.want.is_empty() {
+
            return Ok(refs);
+
        }
+
        // Check scope.
+
        let mut refs = match scope {
+
            policy::Scope::All => refs,
+
            policy::Scope::Followed => match self.policies.namespaces_for(&self.storage, &rid) {
+
                Ok(Namespaces::All) => refs,
+
                Ok(Namespaces::Followed(followed)) => {
+
                    refs.want.retain(|r| followed.contains(&r.remote));
+
                    refs
+
                }
+
                Err(e) => return Err(e.into()),
+
            },
+
        };
+
        // Remove our own remote, we don't want to fetch that.
+
        refs.want.retain(|r| r.remote != self.node_id());
+

+
        Ok(refs)
+
    }
+

+
    /// Add a seed to our routing table.
+
    fn seed_discovered(&mut self, rid: RepoId, nid: NodeId, time: Timestamp) {
+
        if let Ok(result) = self.db.routing_mut().add_inventory([&rid], nid, time) {
+
            if let &[(_, InsertResult::SeedAdded)] = result.as_slice() {
+
                self.emitter.emit(Event::SeedDiscovered { rid, nid });
+
                info!(target: "service", "Routing table updated for {} with seed {nid}", rid);
+
            }
+
        }
+
    }
+

+
    /// Set of initial messages to send to a peer.
+
    fn initial(&mut self, _link: Link) -> Vec<Message> {
+
        let now = self.clock();
+
        let filter = self.filter();
+

+
        // TODO: Only subscribe to outbound connections, otherwise we will consume too
+
        // much bandwidth.
+

+
        // If we've been previously connected to the network, we'll have received gossip messages.
+
        // Instead of simply taking the last timestamp we try to ensure we don't miss any
+
        // messages due un-synchronized clocks.
+
        //
+
        // If this is our first connection to the network, we just ask for a fixed backlog
+
        // of messages to get us started.
+
        let since = if let Some(last) = self.last_online_at {
+
            Timestamp::from(last - SUBSCRIBE_BACKLOG_DELTA)
+
        } else {
+
            (*now - INITIAL_SUBSCRIBE_BACKLOG_DELTA).into()
+
        };
+
        debug!(target: "service", "Subscribing to messages since timestamp {since}..");
+

+
        vec![
+
            Message::node(self.node.clone(), &self.signer),
+
            Message::inventory(self.inventory.clone(), &self.signer),
+
            Message::subscribe(filter, since, Timestamp::MAX),
+
        ]
+
    }
+

+
    /// Try to guess whether we're online or not.
+
    fn is_online(&self) -> bool {
+
        self.sessions
+
            .connected()
+
            .filter(|(_, s)| s.addr.is_routable() && s.last_active >= self.clock - IDLE_INTERVAL)
+
            .count()
+
            > 0
+
    }
+

+
    /// Remove a local repository from our inventory.
+
    fn remove_inventory(&mut self, rid: &RepoId) -> Result<bool, Error> {
+
        let node = self.node_id();
+
        let now = self.timestamp();
+

+
        let removed = self.db.routing_mut().remove_inventory(rid, &node)?;
+
        if removed {
+
            self.refresh_and_announce_inventory(now)?;
+
        }
+
        Ok(removed)
+
    }
+

+
    /// Add a local repository to our inventory.
+
    fn add_inventory(&mut self, rid: RepoId) -> Result<bool, Error> {
+
        let node = self.node_id();
+
        let now = self.timestamp();
+

+
        if !self.storage.contains(&rid)? {
+
            error!(target: "service", "Attempt to add non-existing inventory {rid}: repository not found in storage");
+
            return Ok(false);
+
        }
+
        // Add to our local inventory.
+
        let updates = self.db.routing_mut().add_inventory([&rid], node, now)?;
+
        let updated = !updates.is_empty();
+

+
        if updated {
+
            self.refresh_and_announce_inventory(now)?;
+
        }
+
        Ok(updated)
+
    }
+

+
    /// Update cached inventory message, and announce new inventory to peers.
+
    fn refresh_and_announce_inventory(&mut self, time: Timestamp) -> Result<(), Error> {
+
        let inventory = self.inventory()?;
+

+
        self.inventory = gossip::inventory(time, inventory);
+
        self.announce_inventory();
+

+
        Ok(())
+
    }
+

+
    /// Get our local inventory.
+
    ///
+
    /// A node's inventory is the advertized list of repositories offered by a node.
+
    ///
+
    /// A node's inventory consists of *public* repositories that are seeded and available locally
+
    /// in the node's storage. We use the routing table as the canonical state of all inventories,
+
    /// including the local node's.
+
    ///
+
    /// When a repository is unseeded, it is also removed from the inventory. Private repositories
+
    /// are *not* part of a node's inventory.
+
    fn inventory(&self) -> Result<HashSet<RepoId>, Error> {
+
        self.db
+
            .routing()
+
            .get_inventory(self.nid())
+
            .map_err(Error::from)
+
    }
+

+
    /// Process a peer inventory announcement by updating our routing table.
+
    /// This function expects the peer's full inventory, and prunes entries that are not in the
+
    /// given inventory.
+
    fn sync_routing(
+
        &mut self,
+
        inventory: impl IntoIterator<Item = RepoId>,
+
        from: NodeId,
+
        timestamp: Timestamp,
+
    ) -> Result<SyncedRouting, Error> {
+
        let mut synced = SyncedRouting::default();
+
        let included = inventory.into_iter().collect::<BTreeSet<_>>();
+

+
        for (rid, result) in
+
            self.db
+
                .routing_mut()
+
                .add_inventory(included.iter(), from, timestamp)?
+
        {
+
            match result {
+
                InsertResult::SeedAdded => {
+
                    info!(target: "service", "Routing table updated for {rid} with seed {from}");
+
                    self.emitter.emit(Event::SeedDiscovered { rid, nid: from });
+

+
                    if self
+
                        .policies
+
                        .is_seeding(&rid)
+
                        .expect("Service::process_inventory: error accessing seeding configuration")
+
                    {
+
                        // TODO: We should fetch here if we're already connected, case this seed has
+
                        // refs we don't have.
+
                    }
+
                    synced.added.push(rid);
+
                }
+
                InsertResult::TimeUpdated => {
+
                    synced.updated.push(rid);
+
                }
+
                InsertResult::NotUpdated => {}
+
            }
+
        }
+
        for rid in self.db.routing().get_inventory(&from)?.into_iter() {
+
            if !included.contains(&rid) {
+
                if self.db.routing_mut().remove_inventory(&rid, &from)? {
+
                    synced.removed.push(rid);
+
                    self.emitter.emit(Event::SeedDropped { rid, nid: from });
+
                }
+
            }
+
        }
+
        Ok(synced)
+
    }
+

+
    /// Return a refs announcement including the given remotes.
+
    fn refs_announcement_for(
+
        &mut self,
+
        rid: RepoId,
+
        remotes: impl IntoIterator<Item = NodeId>,
+
    ) -> Result<(Announcement, Vec<RefsAt>), Error> {
+
        let repo = self.storage.repository(rid)?;
+
        let timestamp = self.timestamp();
+
        let mut refs = BoundedVec::<_, REF_REMOTE_LIMIT>::new();
+

+
        for remote_id in remotes.into_iter() {
+
            let refs_at = RefsAt::new(&repo, remote_id)?;
+

+
            if refs.push(refs_at).is_err() {
+
                warn!(
+
                    target: "service",
+
                    "refs announcement limit ({}) exceeded, peers will see only some of your repository references",
+
                    REF_REMOTE_LIMIT,
+
                );
+
                break;
+
            }
+
        }
+

+
        let msg = AnnouncementMessage::from(RefsAnnouncement {
+
            rid,
+
            refs: refs.clone(),
+
            timestamp,
+
        });
+
        Ok((msg.signed(&self.signer), refs.into()))
+
    }
+

+
    /// Announce our own refs for the given repo.
+
    fn announce_own_refs(&mut self, rid: RepoId, doc: Doc) -> Result<Vec<RefsAt>, Error> {
+
        let (refs, timestamp) = self.announce_refs(rid, doc, [self.node_id()])?;
+

+
        // Update refs database with our signed refs branches.
+
        // This isn't strictly necessary for now, as we only use the database for fetches, and
+
        // we don't fetch our own refs that are announced, but it's for good measure.
+
        if let &[r] = refs.as_slice() {
+
            self.emitter.emit(Event::LocalRefsAnnounced {
+
                rid,
+
                refs: r,
+
                timestamp,
+
            });
+
            if let Err(e) = self.database_mut().refs_mut().set(
+
                &rid,
+
                &r.remote,
+
                &SIGREFS_BRANCH,
+
                r.at,
+
                timestamp.to_local_time(),
+
            ) {
+
                error!(
+
                    target: "service",
+
                    "Error updating refs database for `rad/sigrefs` of {} in {rid}: {e}",
+
                    r.remote
+
                );
+
            }
+
        }
+
        Ok(refs)
+
    }
+

+
    /// Announce local refs for given repo.
+
    fn announce_refs(
+
        &mut self,
+
        rid: RepoId,
+
        doc: Doc,
+
        remotes: impl IntoIterator<Item = NodeId>,
+
    ) -> Result<(Vec<RefsAt>, Timestamp), Error> {
+
        let (ann, refs) = self.refs_announcement_for(rid, remotes)?;
+
        let timestamp = ann.timestamp();
+
        let peers = self.sessions.connected().map(|(_, p)| p);
+

+
        // Update our sync status for our own refs. This is useful for determining if refs were
+
        // updated while the node was stopped.
+
        if let Some(refs) = refs.iter().find(|r| r.remote == ann.node) {
+
            info!(
+
                target: "service",
+
                "Announcing own refs for {rid} to peers ({}) (t={timestamp})..",
+
                refs.at
+
            );
+
            // Update our local node's sync status to mark the refs as announced.
+
            if let Err(e) = self
+
                .db
+
                .seeds_mut()
+
                .synced(&rid, &ann.node, refs.at, timestamp)
+
            {
+
                error!(target: "service", "Error updating sync status for local node: {e}");
+
            } else {
+
                debug!(target: "service", "Saved local sync status for {rid}..");
+
            }
+
        }
+

+
        self.outbox.announce(
+
            ann,
+
            peers.filter(|p| {
+
                // Only announce to peers who are allowed to view this repo.
+
                doc.is_visible_to(&p.id.into())
+
            }),
+
            self.db.gossip_mut(),
+
        );
+
        Ok((refs, timestamp))
+
    }
+

+
    fn reconnect(&mut self, nid: NodeId, addr: Address) -> bool {
+
        if let Some(sess) = self.sessions.get_mut(&nid) {
+
            sess.to_initial();
+
            self.outbox.connect(nid, addr);
+

+
            return true;
+
        }
+
        false
+
    }
+

+
    fn connect(&mut self, nid: NodeId, addr: Address) -> Result<(), ConnectError> {
+
        debug!(target: "service", "Connecting to {nid} ({addr})..");
+

+
        if nid == self.node_id() {
+
            return Err(ConnectError::SelfConnection);
+
        }
+
        if self.sessions.contains_key(&nid) {
+
            return Err(ConnectError::SessionExists { nid });
+
        }
+
        if self.sessions.outbound().count() >= self.config.limits.connection.outbound {
+
            return Err(ConnectError::LimitReached { nid, addr });
+
        }
+
        let persistent = self.config.is_persistent(&nid);
+
        let timestamp: Timestamp = self.clock.into();
+

+
        if let Err(e) = self.db.addresses_mut().attempted(&nid, &addr, timestamp) {
+
            error!(target: "service", "Error updating address book with connection attempt: {e}");
+
        }
+
        self.sessions.insert(
+
            nid,
+
            Session::outbound(
+
                nid,
+
                addr.clone(),
+
                persistent,
+
                self.rng.clone(),
+
                self.config.limits.clone(),
+
            ),
+
        );
+
        self.outbox.connect(nid, addr);
+

+
        Ok(())
+
    }
+

+
    fn seeds(&self, rid: &RepoId) -> Result<Seeds, Error> {
+
        let mut seeds = Seeds::new(self.rng.clone());
+

+
        // First build a list from peers that have synced our own refs, if any.
+
        // This step is skipped if we don't have the repository yet, or don't have
+
        // our own refs.
+
        if let Ok(repo) = self.storage.repository(*rid) {
+
            if let Ok(local) = RefsAt::new(&repo, self.node_id()) {
+
                for seed in self.db.seeds().seeds_for(rid)? {
+
                    let seed = seed?;
+
                    let state = self.sessions.get(&seed.nid).map(|s| s.state.clone());
+
                    let synced = if local.at == seed.synced_at.oid {
+
                        SyncStatus::Synced { at: seed.synced_at }
+
                    } else {
+
                        let local = SyncedAt::new(local.at, &repo)?;
+

+
                        SyncStatus::OutOfSync {
+
                            local,
+
                            remote: seed.synced_at,
+
                        }
+
                    };
+
                    seeds.insert(Seed::new(seed.nid, seed.addresses, state, Some(synced)));
+
                }
+
            }
+
        }
+

+
        // Then, add peers we know about but have no information about the sync status.
+
        // These peers have announced that they seed the repository via an inventory
+
        // announcement, but we haven't received any ref announcements from them.
+
        for nid in self.db.routing().get(rid)? {
+
            if nid == self.node_id() {
+
                continue;
+
            }
+
            if seeds.contains(&nid) {
+
                // We already have a richer entry for this node.
+
                continue;
+
            }
+
            let addrs = self.db.addresses().addresses_of(&nid)?;
+
            let state = self.sessions.get(&nid).map(|s| s.state.clone());
+

+
            seeds.insert(Seed::new(nid, addrs, state, None));
+
        }
+
        Ok(seeds)
+
    }
+

+
    /// Return a new filter object, based on our seeding policy.
+
    fn filter(&self) -> Filter {
+
        if self.config.seeding_policy.is_allow() {
+
            // TODO: Remove bits for blocked repos.
+
            Filter::default()
+
        } else {
+
            self.filter.clone()
+
        }
+
    }
+

+
    /// Get a timestamp for using in announcements.
+
    /// Never returns the same timestamp twice.
+
    fn timestamp(&mut self) -> Timestamp {
+
        let now = Timestamp::from(self.clock);
+
        if *now > *self.last_timestamp {
+
            self.last_timestamp = now;
+
        } else {
+
            self.last_timestamp = self.last_timestamp + 1;
+
        }
+
        self.last_timestamp
+
    }
+

+
    fn relay(&mut self, id: gossip::AnnouncementId, ann: Announcement) {
+
        let announcer = ann.node;
+
        let relayed_by = self.relayed_by.get(&id);
+
        let rid = if let AnnouncementMessage::Refs(RefsAnnouncement { rid, .. }) = ann.message {
+
            Some(rid)
+
        } else {
+
            None
+
        };
+
        // Choose peers we should relay this message to.
+
        // 1. Don't relay to a peer who sent us this message.
+
        // 2. Don't relay to the peer who signed this announcement.
+
        let relay_to = self
+
            .sessions
+
            .connected()
+
            .filter(|(id, _)| {
+
                relayed_by
+
                    .map(|relayers| !relayers.contains(id))
+
                    .unwrap_or(true) // If there are no relayers we let it through.
+
            })
+
            .filter(|(id, _)| **id != announcer)
+
            .filter(|(id, _)| {
+
                if let Some(rid) = rid {
+
                    // Only relay this message if the peer is allowed to know about the
+
                    // repository. If we don't have the repository, return `false` because
+
                    // we can't determine if it's private or public.
+
                    self.storage
+
                        .get(rid)
+
                        .ok()
+
                        .flatten()
+
                        .map(|doc| doc.is_visible_to(&(*id).into()))
+
                        .unwrap_or(false)
+
                } else {
+
                    // Announcement doesn't concern a specific repository, let it through.
+
                    true
+
                }
+
            })
+
            .map(|(_, p)| p);
+

+
        self.outbox.relay(ann, relay_to);
+
    }
+

+
    ////////////////////////////////////////////////////////////////////////////
+
    // Periodic tasks
+
    ////////////////////////////////////////////////////////////////////////////
+

+
    fn relay_announcements(&mut self) -> Result<(), Error> {
+
        let now = self.clock.into();
+
        let rows = self.database_mut().gossip_mut().relays(now)?;
+
        let local = self.node_id();
+

+
        for (id, msg) in rows {
+
            let announcer = msg.node;
+
            if announcer == local {
+
                // Don't relay our own stored gossip messages.
+
                continue;
+
            }
+
            self.relay(id, msg);
+
        }
+
        Ok(())
+
    }
+

+
    /// Announce our inventory to all connected peers, unless it was already announced.
+
    fn announce_inventory(&mut self) {
+
        let timestamp = self.inventory.timestamp.to_local_time();
+

+
        if self.last_inventory == timestamp {
+
            debug!(target: "service", "Skipping redundant inventory announcement (t={})", self.inventory.timestamp);
+
            return;
+
        }
+
        let msg = AnnouncementMessage::from(self.inventory.clone());
+

+
        self.outbox.announce(
+
            msg.signed(&self.signer),
+
            self.sessions.connected().map(|(_, p)| p),
+
            self.db.gossip_mut(),
+
        );
+
        self.last_inventory = timestamp;
+
    }
+

+
    fn prune_routing_entries(&mut self, now: &LocalTime) -> Result<(), routing::Error> {
+
        let count = self.db.routing().len()?;
+
        if count <= self.config.limits.routing_max_size {
+
            return Ok(());
+
        }
+

+
        let delta = count - self.config.limits.routing_max_size;
+
        let nid = self.node_id();
+
        self.db.routing_mut().prune(
+
            (*now - self.config.limits.routing_max_age).into(),
+
            Some(delta),
+
            &nid,
+
        )?;
+
        Ok(())
+
    }
+

+
    fn disconnect_unresponsive_peers(&mut self, now: &LocalTime) {
+
        let stale = self
+
            .sessions
+
            .connected()
+
            .filter(|(_, session)| *now - session.last_active >= STALE_CONNECTION_TIMEOUT);
+

+
        for (_, session) in stale {
+
            debug!(target: "service", "Disconnecting unresponsive peer {}..", session.id);
+

+
            // TODO: Should we switch the session state to "disconnected" even before receiving
+
            // an official "disconnect"? Otherwise we keep pinging until we get the disconnection.
+

+
            self.outbox.disconnect(
+
                session.id,
+
                DisconnectReason::Session(session::Error::Timeout),
+
            );
+
        }
+
    }
+

+
    /// Ensure connection health by pinging connected peers.
+
    fn keep_alive(&mut self, now: &LocalTime) {
+
        let inactive_sessions = self
+
            .sessions
+
            .connected_mut()
+
            .filter(|(_, session)| *now - session.last_active >= KEEP_ALIVE_DELTA)
+
            .map(|(_, session)| session);
+
        for session in inactive_sessions {
+
            session.ping(self.clock, &mut self.outbox).ok();
+
        }
+
    }
+

+
    /// Get a list of peers available to connect to, sorted by lowest penalty.
+
    fn available_peers(&mut self) -> Vec<Peer> {
+
        match self.db.addresses().entries() {
+
            Ok(entries) => {
+
                // Nb. we don't want to connect to any peers that already have a session with us,
+
                // even if it's in a disconnected state. Those sessions are re-attempted automatically.
+
                let mut peers = entries
+
                    .filter(|entry| entry.version == PROTOCOL_VERSION)
+
                    .filter(|entry| !entry.address.banned)
+
                    .filter(|entry| !entry.penalty.is_connect_threshold_reached())
+
                    .filter(|entry| !self.sessions.contains_key(&entry.node))
+
                    .filter(|entry| !self.config.external_addresses.contains(&entry.address.addr))
+
                    .filter(|entry| &entry.node != self.nid())
+
                    .fold(HashMap::new(), |mut acc, entry| {
+
                        acc.entry(entry.node)
+
                            .and_modify(|e: &mut Peer| e.addresses.push(entry.address.clone()))
+
                            .or_insert_with(|| Peer {
+
                                nid: entry.node,
+
                                addresses: vec![entry.address],
+
                                penalty: entry.penalty,
+
                            });
+
                        acc
+
                    })
+
                    .into_values()
+
                    .collect::<Vec<_>>();
+
                peers.sort_by_key(|p| p.penalty);
+
                peers
+
            }
+
            Err(e) => {
+
                error!(target: "service", "Unable to lookup available peers in address book: {e}");
+
                Vec::new()
+
            }
+
        }
+
    }
+

+
    /// Fetch all repositories that are seeded but missing from storage.
+
    fn fetch_missing_repositories(&mut self) -> Result<(), Error> {
+
        // TODO(finto): could filter the policies based on the continue checks
+
        // below, but `storage.contains` is fallible
+
        let policies = self.policies.seed_policies()?.collect::<Vec<_>>();
+
        for policy in policies {
+
            let rid = policy.rid;
+

+
            if !policy.is_allow() {
+
                continue;
+
            }
+
            if self.storage.contains(&rid)? {
+
                continue;
+
            }
+
            match self.seeds(&rid) {
+
                Ok(seeds) => {
+
                    if let Some(connected) = NonEmpty::from_vec(seeds.connected().collect()) {
+
                        for seed in connected {
+
                            self.fetch(rid, seed.nid, FETCH_TIMEOUT, None);
+
                        }
+
                    } else {
+
                        // TODO: We should make sure that this fetch is retried later, either
+
                        // when we connect to a seed, or when we discover a new seed.
+
                        // Since new connections and routing table updates are both conditions for
+
                        // fetching, we should trigger fetches when those conditions appear.
+
                        // Another way to handle this would be to update our database, saying
+
                        // that we're trying to fetch a certain repo. We would then just
+
                        // iterate over those entries in the above circumstances. This is
+
                        // merely an optimization though, we can also iterate over all seeded
+
                        // repos and check which ones are not in our inventory.
+
                        debug!(target: "service", "No connected seeds found for {rid}..");
+
                    }
+
                }
+
                Err(e) => {
+
                    error!(target: "service", "Couldn't fetch missing repo {rid}: failed to lookup seeds: {e}");
+
                }
+
            }
+
        }
+
        Ok(())
+
    }
+

+
    /// Run idle task for all connections.
+
    fn idle_connections(&mut self) {
+
        for (_, sess) in self.sessions.iter_mut() {
+
            sess.idle(self.clock);
+

+
            if sess.is_stable() {
+
                // Mark as connected once connection is stable.
+
                if let Err(e) =
+
                    self.db
+
                        .addresses_mut()
+
                        .connected(&sess.id, &sess.addr, self.clock.into())
+
                {
+
                    error!(target: "service", "Error updating address book with connection: {e}");
+
                }
+
            }
+
        }
+
    }
+

+
    /// Try to maintain a target number of connections.
+
    fn maintain_connections(&mut self) {
+
        let PeerConfig::Dynamic = self.config.peers else {
+
            return;
+
        };
+
        trace!(target: "service", "Maintaining connections..");
+

+
        let target = TARGET_OUTBOUND_PEERS;
+
        let now = self.clock;
+
        let outbound = self
+
            .sessions
+
            .values()
+
            .filter(|s| s.link.is_outbound())
+
            .filter(|s| s.is_connected() || s.is_connecting())
+
            .count();
+
        let wanted = target.saturating_sub(outbound);
+

+
        // Don't connect to more peers than needed.
+
        if wanted == 0 {
+
            return;
+
        }
+

+
        // Peers available to connect to.
+
        let available = self
+
            .available_peers()
+
            .into_iter()
+
            .filter_map(|peer| {
+
                peer.addresses
+
                    .into_iter()
+
                    .find(|ka| match (ka.last_success, ka.last_attempt) {
+
                        // If we succeeded the last time we tried, this is a good address.
+
                        // If it's been long enough that we failed to connect, we also try again.
+
                        (Some(success), Some(attempt)) => {
+
                            success >= attempt || now - attempt >= CONNECTION_RETRY_DELTA
+
                        }
+
                        // If we haven't succeeded yet, and we waited long enough, we can try this address.
+
                        (None, Some(attempt)) => now - attempt >= CONNECTION_RETRY_DELTA,
+
                        // If we have no failed attempts for this address, it's worth a try.
+
                        (_, None) => true,
+
                    })
+
                    .map(|ka| (peer.nid, ka))
+
            })
+
            .filter(|(_, ka)| match AddressType::from(&ka.addr) {
+
                // Only consider onion addresses if configured.
+
                AddressType::Onion => self.config.onion.is_some(),
+
                AddressType::Dns | AddressType::Ipv4 | AddressType::Ipv6 => true,
+
            });
+

+
        // Peers we are going to attempt connections to.
+
        let connect = available.take(wanted).collect::<Vec<_>>();
+
        if connect.len() < wanted {
+
            log::debug!(
+
                target: "service",
+
                "Not enough available peers to connect to (available={}, wanted={wanted})",
+
                connect.len()
+
            );
+
        }
+
        for (id, ka) in connect {
+
            if let Err(e) = self.connect(id, ka.addr.clone()) {
+
                error!(target: "service", "Service::maintain_connections connection error: {e}");
+
            }
+
        }
+
    }
+

+
    /// Maintain persistent peer connections.
+
    fn maintain_persistent(&mut self) {
+
        trace!(target: "service", "Maintaining persistent peers..");
+

+
        let now = self.local_time();
+
        let mut reconnect = Vec::new();
+

+
        for (nid, session) in self.sessions.iter_mut() {
+
            if let Some(addr) = self.config.peer(nid) {
+
                if let session::State::Disconnected { retry_at, .. } = &mut session.state {
+
                    // TODO: Try to reconnect only if the peer was attempted. A disconnect without
+
                    // even a successful attempt means that we're unlikely to be able to reconnect.
+

+
                    if now >= *retry_at {
+
                        reconnect.push((*nid, addr.clone(), session.attempts()));
+
                    }
+
                }
+
            }
+
        }
+

+
        for (nid, addr, attempts) in reconnect {
+
            if self.reconnect(nid, addr) {
+
                debug!(target: "service", "Reconnecting to {nid} (attempts={attempts})...");
+
            }
+
        }
+
    }
+
}
+

+
/// Gives read access to the service state.
+
pub trait ServiceState {
+
    /// Get the Node ID.
+
    fn nid(&self) -> &NodeId;
+
    /// Get the existing sessions.
+
    fn sessions(&self) -> &Sessions;
+
    /// Get fetch state.
+
    fn fetching(&self) -> &HashMap<RepoId, FetchState>;
+
    /// Get outbox.
+
    fn outbox(&self) -> &Outbox;
+
    /// Get rate limiter.
+
    fn limiter(&self) -> &RateLimiter;
+
    /// Get event emitter.
+
    fn emitter(&self) -> &Emitter<Event>;
+
    /// Get a repository from storage.
+
    fn get(&self, rid: RepoId) -> Result<Option<Doc>, RepositoryError>;
+
    /// Get the clock.
+
    fn clock(&self) -> &LocalTime;
+
    /// Get the clock mutably.
+
    fn clock_mut(&mut self) -> &mut LocalTime;
+
    /// Get service configuration.
+
    fn config(&self) -> &Config;
+
    /// Get service metrics.
+
    fn metrics(&self) -> &Metrics;
+
}
+

+
impl<D, S, G> ServiceState for Service<D, S, G>
+
where
+
    D: routing::Store,
+
    G: crypto::signature::Signer<crypto::Signature>,
+
    S: ReadStorage,
+
{
+
    fn nid(&self) -> &NodeId {
+
        self.signer.public_key()
+
    }
+

+
    fn sessions(&self) -> &Sessions {
+
        &self.sessions
+
    }
+

+
    fn fetching(&self) -> &HashMap<RepoId, FetchState> {
+
        &self.fetching
+
    }
+

+
    fn outbox(&self) -> &Outbox {
+
        &self.outbox
+
    }
+

+
    fn limiter(&self) -> &RateLimiter {
+
        &self.limiter
+
    }
+

+
    fn emitter(&self) -> &Emitter<Event> {
+
        &self.emitter
+
    }
+

+
    fn get(&self, rid: RepoId) -> Result<Option<Doc>, RepositoryError> {
+
        self.storage.get(rid)
+
    }
+

+
    fn clock(&self) -> &LocalTime {
+
        &self.clock
+
    }
+

+
    fn clock_mut(&mut self) -> &mut LocalTime {
+
        &mut self.clock
+
    }
+

+
    fn config(&self) -> &Config {
+
        &self.config
+
    }
+

+
    fn metrics(&self) -> &Metrics {
+
        &self.metrics
+
    }
+
}
+

+
/// Disconnect reason.
+
#[derive(Debug)]
+
pub enum DisconnectReason {
+
    /// Error while dialing the remote. This error occures before a connection is
+
    /// even established. Errors of this kind are usually not transient.
+
    Dial(Arc<dyn std::error::Error + Sync + Send>),
+
    /// Error with an underlying established connection. Sometimes, reconnecting
+
    /// after such an error is possible.
+
    Connection(Arc<dyn std::error::Error + Sync + Send>),
+
    /// Error with a fetch.
+
    Fetch(FetchError),
+
    /// Session error.
+
    Session(session::Error),
+
    /// Session conflicts with existing session.
+
    Conflict,
+
    /// Connection to self.
+
    SelfConnection,
+
    /// User requested disconnect
+
    Command,
+
}
+

+
impl DisconnectReason {
+
    pub fn is_dial_err(&self) -> bool {
+
        matches!(self, Self::Dial(_))
+
    }
+

+
    pub fn is_connection_err(&self) -> bool {
+
        matches!(self, Self::Connection(_))
+
    }
+

+
    pub fn connection() -> Self {
+
        DisconnectReason::Connection(Arc::new(std::io::Error::from(
+
            std::io::ErrorKind::ConnectionReset,
+
        )))
+
    }
+
}
+

+
impl fmt::Display for DisconnectReason {
+
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+
        match self {
+
            Self::Dial(err) => write!(f, "{err}"),
+
            Self::Connection(err) => write!(f, "{err}"),
+
            Self::Command => write!(f, "command"),
+
            Self::SelfConnection => write!(f, "self-connection"),
+
            Self::Conflict => write!(f, "conflict"),
+
            Self::Session(err) => write!(f, "{err}"),
+
            Self::Fetch(err) => write!(f, "fetch: {err}"),
+
        }
+
    }
+
}
+

+
/// Result of a project lookup.
+
#[derive(Debug)]
+
pub struct Lookup {
+
    /// Whether the project was found locally or not.
+
    pub local: Option<Doc>,
+
    /// A list of remote peers on which the project is known to exist.
+
    pub remote: Vec<NodeId>,
+
}
+

+
#[derive(thiserror::Error, Debug)]
+
pub enum LookupError {
+
    #[error(transparent)]
+
    Routing(#[from] routing::Error),
+
    #[error(transparent)]
+
    Repository(#[from] RepositoryError),
+
}
+

+
#[derive(Debug, Clone)]
+
/// Holds currently (or recently) connected peers.
+
pub struct Sessions(AddressBook<NodeId, Session>);
+

+
impl Sessions {
+
    pub fn new(rng: Rng) -> Self {
+
        Self(AddressBook::new(rng))
+
    }
+

+
    /// Iterator over fully connected peers.
+
    pub fn connected(&self) -> impl Iterator<Item = (&NodeId, &Session)> + Clone {
+
        self.0
+
            .iter()
+
            .filter_map(move |(id, sess)| match &sess.state {
+
                session::State::Connected { .. } => Some((id, sess)),
+
                _ => None,
+
            })
+
    }
+

+
    /// Iterator over connected inbound peers.
+
    pub fn inbound(&self) -> impl Iterator<Item = (&NodeId, &Session)> + Clone {
+
        self.connected().filter(|(_, s)| s.link.is_inbound())
+
    }
+

+
    /// Iterator over outbound peers.
+
    pub fn outbound(&self) -> impl Iterator<Item = (&NodeId, &Session)> + Clone {
+
        self.connected().filter(|(_, s)| s.link.is_outbound())
+
    }
+

+
    /// Iterator over mutable fully connected peers.
+
    pub fn connected_mut(&mut self) -> impl Iterator<Item = (&NodeId, &mut Session)> {
+
        self.0.iter_mut().filter(move |(_, s)| s.is_connected())
+
    }
+

+
    /// Iterator over disconnected peers.
+
    pub fn disconnected_mut(&mut self) -> impl Iterator<Item = (&NodeId, &mut Session)> {
+
        self.0.iter_mut().filter(move |(_, s)| s.is_disconnected())
+
    }
+

+
    /// Return whether this node has a fully established session.
+
    pub fn is_connected(&self, id: &NodeId) -> bool {
+
        self.0.get(id).map(|s| s.is_connected()).unwrap_or(false)
+
    }
+

+
    /// Return whether this node can be connected to.
+
    pub fn is_disconnected(&self, id: &NodeId) -> bool {
+
        self.0.get(id).map(|s| s.is_disconnected()).unwrap_or(true)
+
    }
+
}
+

+
impl Deref for Sessions {
+
    type Target = AddressBook<NodeId, Session>;
+

+
    fn deref(&self) -> &Self::Target {
+
        &self.0
+
    }
+
}
+

+
impl DerefMut for Sessions {
+
    fn deref_mut(&mut self) -> &mut Self::Target {
+
        &mut self.0
+
    }
+
}
added crates/radicle-protocol/src/service/filter.rs
@@ -0,0 +1,169 @@
+
#![allow(clippy::identity_op)]
+
use std::ops::{Deref, DerefMut};
+

+
pub use bloomy::BloomFilter;
+

+
use radicle::identity::RepoId;
+

+
/// Size in bytes of *large* bloom filter.
+
/// It can store about 13'675 items with a false positive rate of 1%.
+
pub const FILTER_SIZE_L: usize = 16 * 1024;
+
/// Size in bytes of *medium* bloom filter.
+
/// It can store about 3'419 items with a false positive rate of 1%.
+
pub const FILTER_SIZE_M: usize = 4 * 1024;
+
/// Size in bytes of *small* bloom filter.
+
/// It can store about 855 items with a false positive rate of 1%.
+
pub const FILTER_SIZE_S: usize = 1 * 1024;
+

+
/// Valid filter sizes.
+
pub const FILTER_SIZES: [usize; 3] = [FILTER_SIZE_S, FILTER_SIZE_M, FILTER_SIZE_L];
+

+
/// Target false positive rate of filter.
+
pub const FILTER_FP_RATE: f64 = 0.01;
+
/// Number of hashes used for bloom filter.
+
pub const FILTER_HASHES: usize = 7;
+

+
/// Inventory filter used for subscriptions and inventory comparison.
+
///
+
/// The [`Default`] instance has all bits set to `1`, ie. it will match
+
/// everything.
+
#[derive(Clone, PartialEq, Eq, Debug)]
+
pub struct Filter(BloomFilter<RepoId>);
+

+
impl Default for Filter {
+
    fn default() -> Self {
+
        Self(BloomFilter::from(vec![0xff; FILTER_SIZE_S]))
+
    }
+
}
+

+
impl Filter {
+
    /// Create a new filter with the given items.
+
    ///
+
    /// Uses the iterator's size hint to determine the size of the filter.
+
    pub fn new(ids: impl IntoIterator<Item = RepoId>) -> Self {
+
        let iterator = ids.into_iter();
+
        let (min, _) = iterator.size_hint();
+
        let size = bloomy::bloom::optimal_bits(min, FILTER_FP_RATE) / 8;
+
        let size = if size > FILTER_SIZE_M {
+
            FILTER_SIZE_L
+
        } else if size > FILTER_SIZE_S {
+
            FILTER_SIZE_M
+
        } else {
+
            FILTER_SIZE_S
+
        };
+
        let mut bloom = BloomFilter::with_size(size);
+

+
        for id in iterator {
+
            bloom.insert(&id);
+
        }
+
        Self(bloom)
+
    }
+

+
    /// Empty filter with nothing set.
+
    pub fn empty() -> Self {
+
        Self(BloomFilter::from(vec![0x0; FILTER_SIZE_S]))
+
    }
+

+
    /// Size in bytes.
+
    pub fn size(&self) -> usize {
+
        self.0.bits() / 8
+
    }
+
}
+

+
impl Deref for Filter {
+
    type Target = BloomFilter<RepoId>;
+

+
    fn deref(&self) -> &Self::Target {
+
        &self.0
+
    }
+
}
+

+
impl DerefMut for Filter {
+
    fn deref_mut(&mut self) -> &mut Self::Target {
+
        &mut self.0
+
    }
+
}
+

+
impl From<BloomFilter<RepoId>> for Filter {
+
    fn from(bloom: BloomFilter<RepoId>) -> Self {
+
        Self(bloom)
+
    }
+
}
+

+
#[allow(clippy::unwrap_used)]
+
#[cfg(any(test, feature = "test"))]
+
impl qcheck::Arbitrary for Filter {
+
    fn arbitrary(g: &mut qcheck::Gen) -> Self {
+
        let size = *g
+
            .choose(&[FILTER_SIZE_S, FILTER_SIZE_M, FILTER_SIZE_L])
+
            .unwrap();
+
        let mut bytes = vec![0; size];
+
        for _ in 0..64 {
+
            let index = usize::arbitrary(g) % bytes.len();
+
            bytes[index] = u8::arbitrary(g);
+
        }
+
        Self::from(BloomFilter::from(bytes))
+
    }
+
}
+

+
#[cfg(test)]
+
mod test {
+
    use super::*;
+
    use radicle::test::arbitrary;
+

+
    #[test]
+
    fn test_parameters() {
+
        // To store 10'000 items with a false positive rate of 1%, we need about 12KB.
+
        assert_eq!(bloomy::bloom::optimal_bits(10_000, 0.01) / 8, 11_981);
+
        // To store 1'000 items with a false positive rate of 1%, we need about 1KB.
+
        assert_eq!(bloomy::bloom::optimal_bits(1_000, 0.01) / 8, 1198);
+
        // To store 100 items with a false positive rate of 1%, we need about 120B.
+
        assert_eq!(bloomy::bloom::optimal_bits(100, 0.01) / 8, 119);
+

+
        // With 16KB, we can store 13'675 items with a false positive rate of 1%.
+
        assert_eq!(
+
            bloomy::bloom::optimal_capacity(FILTER_SIZE_L * 8, FILTER_FP_RATE),
+
            13_675
+
        );
+
        // With 4KB, we can store 3'419 items with a false positive rate of 1%.
+
        assert_eq!(
+
            bloomy::bloom::optimal_capacity(FILTER_SIZE_M * 8, FILTER_FP_RATE),
+
            3419
+
        );
+
        // With 1KB, we can store 855 items with a false positive rate of 1%.
+
        assert_eq!(
+
            bloomy::bloom::optimal_capacity(FILTER_SIZE_S * 8, FILTER_FP_RATE),
+
            855
+
        );
+

+
        assert_eq!(
+
            bloomy::bloom::optimal_hashes(FILTER_SIZE_L * 8, 13_675),
+
            FILTER_HASHES
+
        );
+
        assert_eq!(
+
            bloomy::bloom::optimal_hashes(FILTER_SIZE_M * 8, 3419),
+
            FILTER_HASHES
+
        );
+
        assert_eq!(
+
            bloomy::bloom::optimal_hashes(FILTER_SIZE_S * 8, 855),
+
            FILTER_HASHES
+
        );
+
    }
+

+
    #[test]
+
    fn test_sizes() {
+
        let ids = arbitrary::vec::<RepoId>(3420);
+
        let f = Filter::new(ids.iter().cloned().take(10));
+
        assert_eq!(f.size(), FILTER_SIZE_S);
+

+
        let f = Filter::new(ids.iter().cloned().take(1000));
+
        assert_eq!(f.size(), FILTER_SIZE_M);
+

+
        let f = Filter::new(ids.iter().cloned());
+
        assert_eq!(f.size(), FILTER_SIZE_L);
+

+
        // Just checking that iterators over hash sets give correct size hints.
+
        let hs = arbitrary::set::<RepoId>(42..=42);
+
        assert_eq!(hs.iter().size_hint(), (42, Some(42)));
+
    }
+
}
added crates/radicle-protocol/src/service/gossip.rs
@@ -0,0 +1,58 @@
+
pub mod store;
+

+
use std::str::FromStr;
+
use std::sync::LazyLock;
+

+
use super::*;
+
use crate::bounded::BoundedVec;
+
use radicle::node::UserAgent;
+
use radicle::node::PROTOCOL_VERSION;
+

+
pub use store::{AnnouncementId, Error, RelayStatus, Store};
+

+
/// This node's user agent string.
+
pub static PROTOCOL_VERSION_STRING: LazyLock<UserAgent> = LazyLock::new(|| {
+
    FromStr::from_str(format!("/radicle:{}/", PROTOCOL_VERSION).as_str())
+
        .expect("user agent is valid")
+
});
+

+
pub fn node(config: &Config, timestamp: Timestamp) -> NodeAnnouncement {
+
    let features = config.features();
+
    let alias = config.alias.clone();
+
    let addresses: BoundedVec<_, ADDRESS_LIMIT> = config
+
        .external_addresses
+
        .clone()
+
        .try_into()
+
        .expect("external addresses are within the limit");
+
    let agent = PROTOCOL_VERSION_STRING.clone();
+
    let version = PROTOCOL_VERSION;
+

+
    NodeAnnouncement {
+
        features,
+
        version,
+
        timestamp,
+
        alias,
+
        addresses,
+
        nonce: 0,
+
        agent,
+
    }
+
}
+

+
pub fn inventory(
+
    timestamp: Timestamp,
+
    inventory: impl IntoIterator<Item = RepoId>,
+
) -> InventoryAnnouncement {
+
    let inventory = inventory.into_iter().collect::<Vec<_>>();
+
    if inventory.len() > INVENTORY_LIMIT {
+
        error!(
+
            target: "service",
+
            "inventory announcement limit ({}) exceeded, other nodes will see only some of your projects",
+
            inventory.len()
+
        );
+
    }
+

+
    InventoryAnnouncement {
+
        inventory: BoundedVec::truncate(inventory),
+
        timestamp,
+
    }
+
}
added crates/radicle-protocol/src/service/gossip/store.rs
@@ -0,0 +1,449 @@
+
use std::num::TryFromIntError;
+
use std::{fmt, io};
+

+
use radicle::crypto::Signature;
+
use sqlite as sql;
+
use thiserror::Error;
+

+
use crate::service::filter::Filter;
+
use crate::service::message::{
+
    Announcement, AnnouncementMessage, InventoryAnnouncement, NodeAnnouncement, RefsAnnouncement,
+
};
+
use crate::wire;
+
use crate::wire::Decode;
+
use radicle::node::Database;
+
use radicle::node::NodeId;
+
use radicle::prelude::Timestamp;
+

+
#[derive(Error, Debug)]
+
pub enum Error {
+
    /// An Internal error.
+
    #[error("internal error: {0}")]
+
    Internal(#[from] sql::Error),
+
    /// Unit overflow.
+
    #[error("unit overflow:: {0}")]
+
    UnitOverflow(#[from] TryFromIntError),
+
}
+

+
/// Unique announcement identifier.
+
pub type AnnouncementId = u64;
+

+
/// A database that has access to historical gossip messages.
+
/// Keeps track of the latest received gossip messages for each node.
+
/// Grows linearly with the number of nodes on the network.
+
pub trait Store {
+
    /// Prune announcements older than the cutoff time.
+
    fn prune(&mut self, cutoff: Timestamp) -> Result<usize, Error>;
+

+
    /// Get the timestamp of the last announcement in the store.
+
    fn last(&self) -> Result<Option<Timestamp>, Error>;
+

+
    /// Process an announcement for the given node.
+
    /// Returns `true` if the timestamp was updated or the announcement wasn't there before.
+
    fn announced(
+
        &mut self,
+
        nid: &NodeId,
+
        ann: &Announcement,
+
    ) -> Result<Option<AnnouncementId>, Error>;
+

+
    /// Set whether a message should be relayed or not.
+
    fn set_relay(&mut self, id: AnnouncementId, relay: RelayStatus) -> Result<(), Error>;
+

+
    /// Return messages that should be relayed.
+
    fn relays(&mut self, now: Timestamp) -> Result<Vec<(AnnouncementId, Announcement)>, Error>;
+

+
    /// Get all the latest gossip messages of all nodes, filtered by inventory filter and
+
    /// announcement timestamps.
+
    ///
+
    /// # Panics
+
    ///
+
    /// Panics if `from` > `to`.
+
    ///
+
    fn filtered<'a>(
+
        &'a self,
+
        filter: &'a Filter,
+
        from: Timestamp,
+
        to: Timestamp,
+
    ) -> Result<Box<dyn Iterator<Item = Result<Announcement, Error>> + 'a>, Error>;
+
}
+

+
impl Store for Database {
+
    fn prune(&mut self, cutoff: Timestamp) -> Result<usize, Error> {
+
        let mut stmt = self
+
            .db
+
            .prepare("DELETE FROM `announcements` WHERE timestamp < ?1")?;
+

+
        stmt.bind((1, &cutoff))?;
+
        stmt.next()?;
+

+
        Ok(self.db.change_count())
+
    }
+

+
    fn last(&self) -> Result<Option<Timestamp>, Error> {
+
        let stmt = self
+
            .db
+
            .prepare("SELECT MAX(timestamp) AS latest FROM `announcements`")?;
+

+
        if let Some(Ok(row)) = stmt.into_iter().next() {
+
            return match row.try_read::<Option<i64>, _>(0)? {
+
                Some(i) => Ok(Some(Timestamp::try_from(i)?)),
+
                None => Ok(None),
+
            };
+
        }
+
        Ok(None)
+
    }
+

+
    fn announced(
+
        &mut self,
+
        nid: &NodeId,
+
        ann: &Announcement,
+
    ) -> Result<Option<AnnouncementId>, Error> {
+
        assert_ne!(
+
            ann.timestamp(),
+
            Timestamp::MIN,
+
            "Timestamp of {ann:?} must not be zero"
+
        );
+
        let mut stmt = self.db.prepare(
+
            "INSERT INTO `announcements` (node, repo, type, message, signature, timestamp)
+
             VALUES (?1, ?2, ?3, ?4, ?5, ?6)
+
             ON CONFLICT DO UPDATE
+
             SET message = ?4, signature = ?5, timestamp = ?6
+
             WHERE timestamp < ?6
+
             RETURNING rowid",
+
        )?;
+
        stmt.bind((1, nid))?;
+

+
        match &ann.message {
+
            AnnouncementMessage::Node(msg) => {
+
                stmt.bind((2, sql::Value::String(String::new())))?;
+
                stmt.bind((3, &GossipType::Node))?;
+
                stmt.bind((4, msg))?;
+
            }
+
            AnnouncementMessage::Refs(msg) => {
+
                stmt.bind((2, &msg.rid))?;
+
                stmt.bind((3, &GossipType::Refs))?;
+
                stmt.bind((4, msg))?;
+
            }
+
            AnnouncementMessage::Inventory(msg) => {
+
                stmt.bind((2, sql::Value::String(String::new())))?;
+
                stmt.bind((3, &GossipType::Inventory))?;
+
                stmt.bind((4, msg))?;
+
            }
+
        }
+
        stmt.bind((5, &ann.signature))?;
+
        stmt.bind((6, &ann.message.timestamp()))?;
+

+
        if let Some(row) = stmt.into_iter().next() {
+
            let row = row?;
+
            let id = row.read::<i64, _>("rowid");
+

+
            Ok(Some(id as AnnouncementId))
+
        } else {
+
            Ok(None)
+
        }
+
    }
+

+
    fn set_relay(&mut self, id: AnnouncementId, relay: RelayStatus) -> Result<(), Error> {
+
        let mut stmt = self.db.prepare(
+
            "UPDATE announcements
+
             SET relay = ?1
+
             WHERE rowid = ?2",
+
        )?;
+
        stmt.bind((1, relay))?;
+
        stmt.bind((2, id as i64))?;
+
        stmt.next()?;
+

+
        Ok(())
+
    }
+

+
    fn relays(&mut self, now: Timestamp) -> Result<Vec<(AnnouncementId, Announcement)>, Error> {
+
        let mut stmt = self.db.prepare(
+
            "UPDATE announcements
+
             SET relay = ?1
+
             WHERE relay IS ?2
+
             RETURNING rowid, node, type, message, signature, timestamp",
+
        )?;
+
        stmt.bind((1, RelayStatus::RelayedAt(now)))?;
+
        stmt.bind((2, RelayStatus::Relay))?;
+

+
        let mut rows = stmt
+
            .into_iter()
+
            .map(|row| {
+
                let row = row?;
+
                parse::announcement(row)
+
            })
+
            .collect::<Result<Vec<_>, _>>()?;
+

+
        // Nb. Manually sort by insertion order, because we can't use `ORDER BY` with `RETURNING`
+
        // as of SQLite 3.45.
+
        rows.sort_by_key(|(id, _)| *id);
+

+
        Ok(rows)
+
    }
+

+
    fn filtered<'a>(
+
        &'a self,
+
        filter: &'a Filter,
+
        from: Timestamp,
+
        to: Timestamp,
+
    ) -> Result<Box<dyn Iterator<Item = Result<Announcement, Error>> + 'a>, Error> {
+
        let mut stmt = self.db.prepare(
+
            "SELECT rowid, node, type, message, signature, timestamp
+
             FROM announcements
+
             WHERE timestamp >= ?1 and timestamp < ?2
+
             ORDER BY timestamp, node, type",
+
        )?;
+
        assert!(*from <= *to);
+

+
        stmt.bind((1, &from))?;
+
        stmt.bind((2, &to))?;
+

+
        Ok(Box::new(
+
            stmt.into_iter()
+
                .map(|row| {
+
                    let row = row?;
+
                    let (_, ann) = parse::announcement(row)?;
+

+
                    Ok(ann)
+
                })
+
                .filter(|ann| match ann {
+
                    Ok(a) => a.matches(filter),
+
                    Err(_) => true,
+
                }),
+
        ))
+
    }
+
}
+

+
impl TryFrom<&sql::Value> for NodeAnnouncement {
+
    type Error = sql::Error;
+

+
    fn try_from(value: &sql::Value) -> Result<Self, Self::Error> {
+
        match value {
+
            sql::Value::Binary(bytes) => {
+
                let mut reader = io::Cursor::new(bytes);
+
                NodeAnnouncement::decode(&mut reader).map_err(wire::Error::into)
+
            }
+
            _ => Err(sql::Error {
+
                code: None,
+
                message: Some("sql: invalid type for node announcement".to_owned()),
+
            }),
+
        }
+
    }
+
}
+

+
impl sql::BindableWithIndex for &NodeAnnouncement {
+
    fn bind<I: sql::ParameterIndex>(self, stmt: &mut sql::Statement<'_>, i: I) -> sql::Result<()> {
+
        wire::serialize(self).bind(stmt, i)
+
    }
+
}
+

+
impl TryFrom<&sql::Value> for RefsAnnouncement {
+
    type Error = sql::Error;
+

+
    fn try_from(value: &sql::Value) -> Result<Self, Self::Error> {
+
        match value {
+
            sql::Value::Binary(bytes) => {
+
                let mut reader = io::Cursor::new(bytes);
+
                RefsAnnouncement::decode(&mut reader).map_err(wire::Error::into)
+
            }
+
            _ => Err(sql::Error {
+
                code: None,
+
                message: Some("sql: invalid type for refs announcement".to_owned()),
+
            }),
+
        }
+
    }
+
}
+

+
impl sql::BindableWithIndex for &RefsAnnouncement {
+
    fn bind<I: sql::ParameterIndex>(self, stmt: &mut sql::Statement<'_>, i: I) -> sql::Result<()> {
+
        wire::serialize(self).bind(stmt, i)
+
    }
+
}
+

+
impl TryFrom<&sql::Value> for InventoryAnnouncement {
+
    type Error = sql::Error;
+

+
    fn try_from(value: &sql::Value) -> Result<Self, Self::Error> {
+
        match value {
+
            sql::Value::Binary(bytes) => {
+
                let mut reader = io::Cursor::new(bytes);
+
                InventoryAnnouncement::decode(&mut reader).map_err(wire::Error::into)
+
            }
+
            _ => Err(sql::Error {
+
                code: None,
+
                message: Some("sql: invalid type for inventory announcement".to_owned()),
+
            }),
+
        }
+
    }
+
}
+

+
impl sql::BindableWithIndex for &InventoryAnnouncement {
+
    fn bind<I: sql::ParameterIndex>(self, stmt: &mut sql::Statement<'_>, i: I) -> sql::Result<()> {
+
        wire::serialize(self).bind(stmt, i)
+
    }
+
}
+

+
impl From<wire::Error> for sql::Error {
+
    fn from(other: wire::Error) -> Self {
+
        sql::Error {
+
            code: None,
+
            message: Some(other.to_string()),
+
        }
+
    }
+
}
+

+
/// Message relay status.
+
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+
pub enum RelayStatus {
+
    Relay,
+
    DontRelay,
+
    RelayedAt(Timestamp),
+
}
+

+
impl sql::BindableWithIndex for RelayStatus {
+
    fn bind<I: sql::ParameterIndex>(self, stmt: &mut sql::Statement<'_>, i: I) -> sql::Result<()> {
+
        match self {
+
            Self::Relay => sql::Value::Null.bind(stmt, i),
+
            Self::DontRelay => sql::Value::Integer(-1).bind(stmt, i),
+
            Self::RelayedAt(t) => t.bind(stmt, i),
+
        }
+
    }
+
}
+

+
/// Type of gossip message.
+
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+
enum GossipType {
+
    Refs,
+
    Node,
+
    Inventory,
+
}
+

+
impl fmt::Display for GossipType {
+
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+
        match self {
+
            Self::Refs => write!(f, "refs"),
+
            Self::Node => write!(f, "node"),
+
            Self::Inventory => write!(f, "inventory"),
+
        }
+
    }
+
}
+

+
impl sql::BindableWithIndex for &GossipType {
+
    fn bind<I: sql::ParameterIndex>(self, stmt: &mut sql::Statement<'_>, i: I) -> sql::Result<()> {
+
        self.to_string().as_str().bind(stmt, i)
+
    }
+
}
+

+
impl TryFrom<&sql::Value> for GossipType {
+
    type Error = sql::Error;
+

+
    fn try_from(value: &sql::Value) -> Result<Self, Self::Error> {
+
        match value {
+
            sql::Value::String(s) => match s.as_str() {
+
                "refs" => Ok(Self::Refs),
+
                "node" => Ok(Self::Node),
+
                "inventory" => Ok(Self::Inventory),
+
                other => Err(sql::Error {
+
                    code: None,
+
                    message: Some(format!("unknown gossip type '{other}'")),
+
                }),
+
            },
+
            _ => Err(sql::Error {
+
                code: None,
+
                message: Some("sql: invalid type for gossip type".to_owned()),
+
            }),
+
        }
+
    }
+
}
+

+
mod parse {
+
    use super::*;
+

+
    pub fn announcement(row: sql::Row) -> Result<(AnnouncementId, Announcement), Error> {
+
        let id = row.read::<i64, _>("rowid") as AnnouncementId;
+
        let node = row.read::<NodeId, _>("node");
+
        let gt = row.read::<GossipType, _>("type");
+
        let message = match gt {
+
            GossipType::Refs => {
+
                let ann = row.try_read::<RefsAnnouncement, _>("message")?;
+
                AnnouncementMessage::Refs(ann)
+
            }
+
            GossipType::Inventory => {
+
                let ann = row.try_read::<InventoryAnnouncement, _>("message")?;
+
                AnnouncementMessage::Inventory(ann)
+
            }
+
            GossipType::Node => {
+
                let ann = row.try_read::<NodeAnnouncement, _>("message")?;
+
                AnnouncementMessage::Node(ann)
+
            }
+
        };
+
        let signature = row.read::<Signature, _>("signature");
+
        let timestamp = row.read::<Timestamp, _>("timestamp");
+

+
        debug_assert_eq!(timestamp, message.timestamp());
+

+
        Ok((
+
            id,
+
            Announcement {
+
                node,
+
                message,
+
                signature,
+
            },
+
        ))
+
    }
+
}
+

+
#[cfg(test)]
+
#[allow(clippy::unwrap_used)]
+
mod test {
+
    use super::*;
+
    use crate::bounded::BoundedVec;
+
    use localtime::LocalTime;
+
    use radicle::assert_matches;
+
    use radicle::identity::RepoId;
+
    use radicle::node::device::Device;
+
    use radicle::test::arbitrary;
+

+
    #[test]
+
    fn test_announced() {
+
        let mut db = Database::memory().unwrap();
+
        let nid = arbitrary::gen::<NodeId>(1);
+
        let rid = arbitrary::gen::<RepoId>(1);
+
        let timestamp = LocalTime::now().into();
+
        let signer = Device::mock();
+
        let refs = AnnouncementMessage::Refs(RefsAnnouncement {
+
            rid,
+
            refs: BoundedVec::new(),
+
            timestamp,
+
        })
+
        .signed(&signer);
+
        let inv = AnnouncementMessage::Inventory(InventoryAnnouncement {
+
            inventory: BoundedVec::new(),
+
            timestamp,
+
        })
+
        .signed(&signer);
+

+
        // Only the first announcement of each type is recognized as new.
+
        let id1 = db.announced(&nid, &refs).unwrap().unwrap();
+
        assert!(db.announced(&nid, &refs).unwrap().is_none());
+

+
        let id2 = db.announced(&nid, &inv).unwrap().unwrap();
+
        assert!(db.announced(&nid, &inv).unwrap().is_none());
+

+
        // Nothing was set to be relayed.
+
        assert_eq!(db.relays(LocalTime::now().into()).unwrap().len(), 0);
+

+
        // Set the messages to be relayed.
+
        db.set_relay(id1, RelayStatus::Relay).unwrap();
+
        db.set_relay(id2, RelayStatus::Relay).unwrap();
+

+
        // Now they are returned.
+
        assert_matches!(
+
            db.relays(LocalTime::now().into()).unwrap().as_slice(),
+
            &[(id1_, _), (id2_, _)]
+
            if id1_ == id1 && id2_ == id2
+
        );
+
        // But only once.
+
        assert_matches!(db.relays(LocalTime::now().into()).unwrap().as_slice(), &[]);
+
    }
+
}
added crates/radicle-protocol/src/service/io.rs
@@ -0,0 +1,208 @@
+
use std::collections::VecDeque;
+
use std::time;
+

+
use localtime::LocalDuration;
+
use log::*;
+
use radicle::identity::RepoId;
+
use radicle::node::config::FetchPackSizeLimit;
+
use radicle::node::Address;
+
use radicle::node::NodeId;
+
use radicle::storage::refs::RefsAt;
+

+
use crate::service::message::Message;
+
use crate::service::session::Session;
+
use crate::service::DisconnectReason;
+
use crate::service::Link;
+

+
use super::gossip;
+
use super::message::{Announcement, AnnouncementMessage};
+

+
/// I/O operation to execute at the network/wire level.
+
#[derive(Debug)]
+
pub enum Io {
+
    /// There are some messages ready to be sent to a peer.
+
    Write(NodeId, Vec<Message>),
+
    /// Connect to a peer.
+
    Connect(NodeId, Address),
+
    /// Disconnect from a peer.
+
    Disconnect(NodeId, DisconnectReason),
+
    /// Fetch repository data from a peer.
+
    Fetch {
+
        /// Repo being fetched.
+
        rid: RepoId,
+
        /// Remote node being fetched from.
+
        remote: NodeId,
+
        /// If the node is fetching specific `rad/sigrefs`.
+
        refs_at: Option<Vec<RefsAt>>,
+
        /// Fetch timeout.
+
        timeout: time::Duration,
+
        /// Limit the number of bytes fetched.
+
        reader_limit: FetchPackSizeLimit,
+
    },
+
    /// Ask for a wakeup in a specified amount of time.
+
    Wakeup(LocalDuration),
+
}
+

+
/// Interface to the network.
+
#[derive(Debug, Default)]
+
pub struct Outbox {
+
    /// Outgoing I/O queue.
+
    io: VecDeque<Io>,
+
}
+

+
impl Outbox {
+
    /// Connect to a peer.
+
    pub fn connect(&mut self, id: NodeId, addr: Address) {
+
        self.io.push_back(Io::Connect(id, addr));
+
    }
+

+
    /// Disconnect a peer.
+
    pub fn disconnect(&mut self, id: NodeId, reason: DisconnectReason) {
+
        self.io.push_back(Io::Disconnect(id, reason));
+
    }
+

+
    pub fn write(&mut self, remote: &Session, msg: Message) {
+
        msg.log(log::Level::Debug, &remote.id, Link::Outbound);
+
        trace!(target: "service", "Write {:?} to {}", &msg, remote);
+

+
        self.io.push_back(Io::Write(remote.id, vec![msg]));
+
    }
+

+
    /// Announce something to a peer. This is meant for our own announcement messages.
+
    pub fn announce<'a>(
+
        &mut self,
+
        ann: Announcement,
+
        peers: impl Iterator<Item = &'a Session>,
+
        gossip: &mut impl gossip::Store,
+
    ) {
+
        // Store our announcement so that it can be retrieved from us later, just like
+
        // announcements we receive from peers.
+
        if let Err(e) = gossip.announced(&ann.node, &ann) {
+
            error!(target: "service", "Error updating our gossip store with announced message: {e}");
+
        }
+

+
        for peer in peers {
+
            if let AnnouncementMessage::Refs(refs) = &ann.message {
+
                if let Some(subscribe) = &peer.subscribe {
+
                    if subscribe.filter.contains(&refs.rid) {
+
                        self.write(peer, ann.clone().into());
+
                    } else {
+
                        debug!(
+
                            target: "service",
+
                            "Skipping refs announcement relay to {peer}: peer isn't subscribed to {}",
+
                            refs.rid
+
                        );
+
                    }
+
                } else {
+
                    debug!(
+
                        target: "service",
+
                        "Skipping refs announcement relay to {peer}: peer didn't send a subscription filter"
+
                    );
+
                }
+
            } else {
+
                self.write(peer, ann.clone().into());
+
            }
+
        }
+
    }
+

+
    pub fn write_all(&mut self, remote: &Session, msgs: impl IntoIterator<Item = Message>) {
+
        let msgs = msgs.into_iter().collect::<Vec<_>>();
+

+
        for (ix, msg) in msgs.iter().enumerate() {
+
            trace!(
+
                target: "service",
+
                "Write {:?} to {} ({}/{})",
+
                msg,
+
                remote,
+
                ix + 1,
+
                msgs.len()
+
            );
+
            msg.log(log::Level::Trace, &remote.id, Link::Outbound);
+
        }
+
        self.io.push_back(Io::Write(remote.id, msgs));
+
    }
+

+
    pub fn wakeup(&mut self, after: LocalDuration) {
+
        self.io.push_back(Io::Wakeup(after));
+
    }
+

+
    pub fn fetch(
+
        &mut self,
+
        peer: &mut Session,
+
        rid: RepoId,
+
        refs_at: Vec<RefsAt>,
+
        timeout: time::Duration,
+
        reader_limit: FetchPackSizeLimit,
+
    ) {
+
        peer.fetching(rid);
+

+
        let refs_at = (!refs_at.is_empty()).then_some(refs_at);
+

+
        if let Some(refs_at) = &refs_at {
+
            debug!(
+
                target: "service",
+
                "Fetch initiated for {rid} with {peer} ({} remote(s))..", refs_at.len()
+
            );
+
        } else {
+
            debug!(target: "service", "Fetch initiated for {rid} with {peer} (all remotes)..");
+
        }
+

+
        self.io.push_back(Io::Fetch {
+
            rid,
+
            refs_at,
+
            remote: peer.id,
+
            timeout,
+
            reader_limit,
+
        });
+
    }
+

+
    /// Broadcast a message to a list of peers.
+
    pub fn broadcast<'a>(
+
        &mut self,
+
        msg: impl Into<Message>,
+
        peers: impl IntoIterator<Item = &'a Session>,
+
    ) {
+
        let msg = msg.into();
+
        for peer in peers {
+
            self.write(peer, msg.clone());
+
        }
+
    }
+

+
    /// Relay a message to interested peers.
+
    pub fn relay<'a>(&mut self, ann: Announcement, peers: impl IntoIterator<Item = &'a Session>) {
+
        if let AnnouncementMessage::Refs(msg) = &ann.message {
+
            let id = msg.rid;
+
            let peers = peers.into_iter().filter(|p| {
+
                if let Some(subscribe) = &p.subscribe {
+
                    subscribe.filter.contains(&id)
+
                } else {
+
                    // If the peer did not send us a `subscribe` message, we don't
+
                    // relay any messages to them.
+
                    false
+
                }
+
            });
+
            self.broadcast(ann, peers);
+
        } else {
+
            self.broadcast(ann, peers);
+
        }
+
    }
+

+
    /// Number of items in outbox.
+
    #[allow(clippy::len_without_is_empty)]
+
    pub fn len(&self) -> usize {
+
        self.io.len()
+
    }
+

+
    #[cfg(any(test, feature = "test"))]
+
    pub fn queue(&mut self) -> &mut VecDeque<Io> {
+
        &mut self.io
+
    }
+
}
+

+
impl Iterator for Outbox {
+
    type Item = Io;
+

+
    fn next(&mut self) -> Option<Self::Item> {
+
        self.io.pop_front()
+
    }
+
}
added crates/radicle-protocol/src/service/limiter.rs
@@ -0,0 +1,210 @@
+
use std::collections::{HashMap, HashSet};
+

+
use localtime::LocalTime;
+
use radicle::node::{address, config, HostName, NodeId};
+

+
/// Peer rate limiter.
+
///
+
/// Uses a token bucket algorithm, where each address starts with a certain amount of tokens,
+
/// and every request from that address consumes one token. Tokens refill at a predefined
+
/// rate. This mechanism allows for consistent request rates with potential bursts up to the
+
/// bucket's capacity.
+
#[derive(Debug, Default)]
+
pub struct RateLimiter {
+
    pub buckets: HashMap<HostName, TokenBucket>,
+
    pub bypass: HashSet<NodeId>,
+
}
+

+
impl RateLimiter {
+
    /// Create a new rate limiter with a bypass list. Nodes in the bypass list are not limited.
+
    pub fn new(bypass: impl IntoIterator<Item = NodeId>) -> Self {
+
        Self {
+
            buckets: HashMap::default(),
+
            bypass: bypass.into_iter().collect(),
+
        }
+
    }
+

+
    /// Call this when the address has performed some rate-limited action.
+
    /// Returns whether the action is rate-limited or not.
+
    ///
+
    /// Supplying a different amount of tokens per address is useful if for eg. a peer
+
    /// is outbound vs. inbound.
+
    pub fn limit<T: AsTokens>(
+
        &mut self,
+
        addr: HostName,
+
        nid: Option<&NodeId>,
+
        tokens: &T,
+
        now: LocalTime,
+
    ) -> bool {
+
        if let Some(nid) = nid {
+
            if self.bypass.contains(nid) {
+
                return false;
+
            }
+
        }
+
        if let HostName::Ip(ip) = addr {
+
            // Don't limit LAN addresses.
+
            if !address::is_routable(&ip) {
+
                return false;
+
            }
+
        }
+
        !self
+
            .buckets
+
            .entry(addr)
+
            .or_insert_with(|| TokenBucket::new(tokens.capacity(), tokens.rate(), now))
+
            .take(now)
+
    }
+
}
+

+
/// Any type that can be assigned a number of rate-limit tokens.
+
pub trait AsTokens {
+
    /// Get the token capacity for this object.
+
    fn capacity(&self) -> usize;
+
    /// Get the refill rate for this object.
+
    /// A rate of `1.0` means one token per second.
+
    fn rate(&self) -> f64;
+
}
+

+
impl AsTokens for config::RateLimit {
+
    fn rate(&self) -> f64 {
+
        self.fill_rate
+
    }
+

+
    fn capacity(&self) -> usize {
+
        self.capacity
+
    }
+
}
+

+
#[derive(Debug, serde::Serialize)]
+
#[serde(rename_all = "camelCase")]
+
pub struct TokenBucket {
+
    /// Token refill rate per second.
+
    rate: f64,
+
    /// Token capacity.
+
    capacity: f64,
+
    /// Tokens remaining.
+
    tokens: f64,
+
    /// Time of last token refill.
+
    refilled_at: LocalTime,
+
}
+

+
impl TokenBucket {
+
    fn new(tokens: usize, rate: f64, now: LocalTime) -> Self {
+
        Self {
+
            rate,
+
            capacity: tokens as f64,
+
            tokens: tokens as f64,
+
            refilled_at: now,
+
        }
+
    }
+

+
    fn refill(&mut self, now: LocalTime) {
+
        let elapsed = now.duration_since(self.refilled_at);
+
        let tokens = elapsed.as_secs() as f64 * self.rate;
+

+
        self.tokens = (self.tokens + tokens).min(self.capacity);
+
        self.refilled_at = now;
+
    }
+

+
    fn take(&mut self, now: LocalTime) -> bool {
+
        self.refill(now);
+

+
        if self.tokens >= 1.0 {
+
            self.tokens -= 1.0;
+
            true
+
        } else {
+
            false
+
        }
+
    }
+
}
+

+
#[cfg(test)]
+
#[allow(clippy::bool_assert_comparison, clippy::redundant_clone)]
+
mod test {
+
    use radicle::test::arbitrary;
+

+
    use super::*;
+

+
    impl AsTokens for (usize, f64) {
+
        fn capacity(&self) -> usize {
+
            self.0
+
        }
+

+
        fn rate(&self) -> f64 {
+
            self.1
+
        }
+
    }
+

+
    #[test]
+
    fn test_limitter_refill() {
+
        let mut r = RateLimiter::default();
+
        let t = (3, 0.2); // Three tokens burst. One token every 5 seconds.
+
        let a = HostName::Dns(String::from("seed.radicle.example.com"));
+
        let n = arbitrary::gen::<NodeId>(1);
+
        let n = Some(&n);
+

+
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(0)), false); // Burst capacity
+
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(1)), false); // Burst capacity
+
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(2)), false); // Burst capacity
+
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(3)), true); // Limited
+
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(4)), true); // Limited
+
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(5)), false); // Refilled (1)
+
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(6)), true); // Limited
+
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(7)), true); // Limited
+
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(8)), true); // Limited
+
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(9)), true); // Limited
+
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(10)), false); // Refilled (1)
+
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(11)), true); // Limited
+
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(12)), true); // Limited
+
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(13)), true); // Limited
+
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(14)), true); // Limited
+
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(15)), false); // Refilled (1)
+
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(16)), true); // Limited
+
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(60)), false); // Refilled (3)
+
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(60)), false); // Burst capacity
+
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(60)), false); // Burst capacity
+
        assert_eq!(r.limit(a.clone(), n, &t, LocalTime::from_secs(60)), true); // Limited
+
    }
+

+
    #[test]
+
    #[rustfmt::skip]
+
    fn test_limitter_multi() {
+
        let t = (1, 1.0); // One token per second. One token burst.
+
        let n = arbitrary::gen::<NodeId>(1);
+
        let n = Some(&n);
+
        let mut r = RateLimiter::default();
+
        let addr1 = HostName::Dns(String::from("seed.radicle.example.com"));
+
        let addr2 = HostName::Dns(String::from("seed.radicle.example.net"));
+

+
        assert_eq!(r.limit(addr1.clone(), n, &t, LocalTime::from_secs(0)), false);
+
        assert_eq!(r.limit(addr1.clone(), n, &t, LocalTime::from_secs(0)), true);
+
        assert_eq!(r.limit(addr2.clone(), n, &t, LocalTime::from_secs(0)), false);
+
        assert_eq!(r.limit(addr2.clone(), n, &t, LocalTime::from_secs(0)), true);
+
        assert_eq!(r.limit(addr1.clone(), n, &t, LocalTime::from_secs(1)), false);
+
        assert_eq!(r.limit(addr1.clone(), n, &t, LocalTime::from_secs(1)), true);
+
        assert_eq!(r.limit(addr2.clone(), n, &t, LocalTime::from_secs(1)), false);
+
        assert_eq!(r.limit(addr2.clone(), n, &t, LocalTime::from_secs(1)), true);
+
    }
+

+
    #[test]
+
    #[rustfmt::skip]
+
    fn test_limitter_different_rates() {
+
        let t1 = (1, 1.0); // One token per second. One token burst.
+
        let t2 = (2, 2.0); // Two tokens per second. Two token burst.
+
        let n = arbitrary::gen::<NodeId>(1);
+
        let n = Some(&n);
+
        let mut r = RateLimiter::default();
+
        let addr1 = HostName::Dns(String::from("seed.radicle.example.com"));
+
        let addr2 = HostName::Dns(String::from("seed.radicle.example.net"));
+

+
        assert_eq!(r.limit(addr1.clone(), n, &t1, LocalTime::from_secs(0)), false);
+
        assert_eq!(r.limit(addr1.clone(), n, &t1, LocalTime::from_secs(0)), true);
+
        assert_eq!(r.limit(addr2.clone(), n, &t2, LocalTime::from_secs(0)), false);
+
        assert_eq!(r.limit(addr2.clone(), n, &t2, LocalTime::from_secs(0)), false);
+
        assert_eq!(r.limit(addr2.clone(), n, &t2, LocalTime::from_secs(0)), true);
+
        assert_eq!(r.limit(addr1.clone(), n, &t1, LocalTime::from_secs(1)), false); // Refilled (1)
+
        assert_eq!(r.limit(addr1.clone(), n, &t1, LocalTime::from_secs(1)), true);
+
        assert_eq!(r.limit(addr2.clone(), n, &t2, LocalTime::from_secs(1)), false); // Refilled (2)
+
        assert_eq!(r.limit(addr2.clone(), n, &t2, LocalTime::from_secs(1)), false);
+
        assert_eq!(r.limit(addr2.clone(), n, &t2, LocalTime::from_secs(1)), true);
+
    }
+
}
added crates/radicle-protocol/src/service/message.rs
@@ -0,0 +1,782 @@
+
use std::{fmt, mem};
+

+
use bytes::{Buf, BufMut};
+
use nonempty::NonEmpty;
+

+
use radicle::crypto;
+
use radicle::git;
+
use radicle::identity::RepoId;
+
use radicle::node;
+
use radicle::node::device::Device;
+
use radicle::node::{Address, Alias, UserAgent};
+
use radicle::storage;
+
use radicle::storage::refs::RefsAt;
+

+
use crate::bounded::BoundedVec;
+
use crate::service::filter::Filter;
+
use crate::service::{Link, NodeId, Timestamp};
+
use crate::wire;
+

+
/// Maximum number of addresses which can be announced to other nodes.
+
pub const ADDRESS_LIMIT: usize = 16;
+
/// Maximum number of repository remotes that can be included in a [`RefsAnnouncement`] message.
+
pub const REF_REMOTE_LIMIT: usize = 1024;
+
/// Maximum number of inventory which can be announced to other nodes.
+
pub const INVENTORY_LIMIT: usize = 2973;
+

+
#[derive(Debug, Clone, PartialEq, Eq)]
+
pub struct Subscribe {
+
    /// Subscribe to events matching this filter.
+
    pub filter: Filter,
+
    /// Request messages since this time.
+
    pub since: Timestamp,
+
    /// Request messages until this time.
+
    pub until: Timestamp,
+
}
+

+
impl Subscribe {
+
    pub fn all() -> Self {
+
        Self {
+
            filter: Filter::default(),
+
            since: Timestamp::MIN,
+
            until: Timestamp::MAX,
+
        }
+
    }
+
}
+

+
/// Node announcing itself to the network.
+
#[derive(Debug, Clone, PartialEq, Eq)]
+
pub struct NodeAnnouncement {
+
    /// Supported protocol version.
+
    pub version: u8,
+
    /// Advertized features.
+
    pub features: node::Features,
+
    /// Monotonic timestamp.
+
    pub timestamp: Timestamp,
+
    /// Non-unique alias.
+
    pub alias: Alias,
+
    /// Announced addresses.
+
    pub addresses: BoundedVec<Address, ADDRESS_LIMIT>,
+
    /// Nonce used for announcement proof-of-work.
+
    pub nonce: u64,
+
    /// User-agent string.
+
    pub agent: UserAgent,
+
}
+

+
impl NodeAnnouncement {
+
    /// Calculate the amount of work that went into creating this announcement.
+
    ///
+
    /// Proof-of-work uses the [`scrypt`] algorithm with the parameters in
+
    /// [`Announcement::POW_PARAMS`]. The "work" is calculated by counting the number of leading
+
    /// zero bits after running `scrypt` on a serialized [`NodeAnnouncement`] using
+
    /// [`wire::serialize`].
+
    ///
+
    /// In other words, `work = leading-zeros(scrypt(serialize(announcement)))`.
+
    ///
+
    /// Higher numbers mean higher difficulty. For each increase in work, difficulty is doubled.
+
    /// For instance, an output of `7` is *four* times more work than an output of `5`.
+
    ///
+
    pub fn work(&self) -> u32 {
+
        let (n, r, p) = Announcement::POW_PARAMS;
+
        let params = scrypt::Params::new(n, r, p, 32).expect("proof-of-work parameters are valid");
+
        let mut output = [0u8; 32];
+

+
        scrypt::scrypt(
+
            wire::serialize(self).as_ref(),
+
            Announcement::POW_SALT,
+
            &params,
+
            &mut output,
+
        )
+
        .expect("proof-of-work output vector is a valid length");
+

+
        // Calculate the number of leading zero bits in the output vector.
+
        if let Some((zero_bytes, non_zero)) = output.iter().enumerate().find(|(_, &x)| x != 0) {
+
            zero_bytes as u32 * 8 + non_zero.leading_zeros()
+
        } else {
+
            output.len() as u32 * 8
+
        }
+
    }
+

+
    /// Solve the proof-of-work of a node announcement for the given target, by iterating through
+
    /// different nonces.
+
    ///
+
    /// If the given difficulty target is too high, there may not be a result. In that case, `None`
+
    /// is returned.
+
    pub fn solve(mut self, target: u32) -> Option<Self> {
+
        loop {
+
            if let Some(nonce) = self.nonce.checked_add(1) {
+
                self.nonce = nonce;
+

+
                if self.work() >= target {
+
                    break;
+
                }
+
            } else {
+
                return None;
+
            }
+
        }
+
        Some(self)
+
    }
+
}
+

+
impl wire::Encode for NodeAnnouncement {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        self.version.encode(buf);
+
        self.features.encode(buf);
+
        self.timestamp.encode(buf);
+
        self.alias.encode(buf);
+
        self.addresses.encode(buf);
+
        self.nonce.encode(buf);
+
        self.agent.encode(buf);
+
    }
+
}
+

+
impl wire::Decode for NodeAnnouncement {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, wire::Error> {
+
        let version = u8::decode(buf)?;
+
        let features = node::Features::decode(buf)?;
+
        let timestamp = Timestamp::decode(buf)?;
+
        let alias = wire::Decode::decode(buf)?;
+
        let addresses = BoundedVec::<Address, ADDRESS_LIMIT>::decode(buf)?;
+
        let nonce = u64::decode(buf)?;
+
        let agent = match UserAgent::decode(buf) {
+
            Ok(ua) => ua,
+
            Err(wire::Error::UnexpectedEnd { .. }) => UserAgent::default(),
+
            Err(e) => return Err(e),
+
        };
+

+
        Ok(Self {
+
            version,
+
            features,
+
            timestamp,
+
            alias,
+
            addresses,
+
            nonce,
+
            agent,
+
        })
+
    }
+
}
+

+
/// Node announcing project refs being created or updated.
+
#[derive(Debug, Clone, PartialEq, Eq)]
+
pub struct RefsAnnouncement {
+
    /// Repository identifier.
+
    pub rid: RepoId,
+
    /// Updated `rad/sigrefs`.
+
    pub refs: BoundedVec<RefsAt, REF_REMOTE_LIMIT>,
+
    /// Time of announcement.
+
    pub timestamp: Timestamp,
+
}
+

+
/// Track the status of `RefsAt` within a given repository.
+
#[derive(Default)]
+
pub struct RefsStatus {
+
    /// The `rad/sigrefs` was missing or it's ahead of the local
+
    /// `rad/sigrefs`. We want it.
+
    pub want: Vec<RefsAt>,
+
    /// The `rad/sigrefs` has been seen before. We already have it.
+
    pub have: Vec<RefsAt>,
+
}
+

+
impl RefsStatus {
+
    /// Get the set of `want` and `have` `RefsAt`'s for the given
+
    /// announcement.
+
    ///
+
    /// Nb. We use the refs database as a cache for quick lookups. This does *not* check
+
    /// for ancestry matches, since we don't cache the whole history (only the tips).
+
    /// This, however, is not a problem because the signed refs branch is fast-forward only,
+
    /// and old refs announcements will be discarded due to their lower timestamps.
+
    pub fn new<D: node::refs::Store>(
+
        rid: RepoId,
+
        refs: NonEmpty<RefsAt>,
+
        db: &D,
+
    ) -> Result<RefsStatus, storage::Error> {
+
        let mut status = RefsStatus::default();
+
        for theirs in refs.iter() {
+
            status.insert(&rid, *theirs, db)?;
+
        }
+
        Ok(status)
+
    }
+

+
    fn insert<D: node::refs::Store>(
+
        &mut self,
+
        repo: &RepoId,
+
        theirs: RefsAt,
+
        db: &D,
+
    ) -> Result<(), storage::Error> {
+
        match db.get(repo, &theirs.remote, &storage::refs::SIGREFS_BRANCH) {
+
            Ok(Some((ours, _))) => {
+
                if theirs.at != ours {
+
                    self.want.push(theirs);
+
                } else {
+
                    self.have.push(theirs);
+
                }
+
            }
+
            Ok(None) => {
+
                self.want.push(theirs);
+
            }
+
            Err(e) => {
+
                log::warn!(
+
                    target: "service",
+
                    "Error getting cached ref of {repo} for refs status: {e}"
+
                );
+
            }
+
        }
+
        Ok(())
+
    }
+
}
+

+
/// Node announcing its inventory to the network.
+
/// This should be the whole inventory every time.
+
#[derive(Debug, Clone, PartialEq, Eq)]
+
pub struct InventoryAnnouncement {
+
    /// Node inventory.
+
    pub inventory: BoundedVec<RepoId, INVENTORY_LIMIT>,
+
    /// Time of announcement.
+
    pub timestamp: Timestamp,
+
}
+

+
/// Node announcing information to a connected peer.
+
///
+
/// This should not be relayed and should be used to send an
+
/// informational message a peer.
+
#[derive(Debug, Clone, PartialEq, Eq)]
+
pub enum Info {
+
    /// Tell a node that sent a refs announcement that it was already synced at the given `Oid`,
+
    /// for this particular `rid`.
+
    RefsAlreadySynced { rid: RepoId, at: git::Oid },
+
}
+

+
/// Announcement messages are messages that are relayed between peers.
+
#[derive(Clone, PartialEq, Eq)]
+
pub enum AnnouncementMessage {
+
    /// Inventory announcement.
+
    Inventory(InventoryAnnouncement),
+
    /// Node announcement.
+
    Node(NodeAnnouncement),
+
    /// Refs announcement.
+
    Refs(RefsAnnouncement),
+
}
+

+
impl AnnouncementMessage {
+
    /// Sign this announcement message.
+
    pub fn signed<G>(self, signer: &Device<G>) -> Announcement
+
    where
+
        G: crypto::signature::Signer<crypto::Signature>,
+
    {
+
        use crypto::signature::Signer as _;
+

+
        let msg = wire::serialize(&self);
+
        let signature = signer.sign(&msg);
+

+
        Announcement {
+
            node: *signer.public_key(),
+
            message: self,
+
            signature,
+
        }
+
    }
+

+
    pub fn timestamp(&self) -> Timestamp {
+
        match self {
+
            Self::Inventory(InventoryAnnouncement { timestamp, .. }) => *timestamp,
+
            Self::Refs(RefsAnnouncement { timestamp, .. }) => *timestamp,
+
            Self::Node(NodeAnnouncement { timestamp, .. }) => *timestamp,
+
        }
+
    }
+

+
    pub fn is_node_announcement(&self) -> bool {
+
        matches!(self, Self::Node(_))
+
    }
+
}
+

+
impl From<NodeAnnouncement> for AnnouncementMessage {
+
    fn from(ann: NodeAnnouncement) -> Self {
+
        Self::Node(ann)
+
    }
+
}
+

+
impl From<InventoryAnnouncement> for AnnouncementMessage {
+
    fn from(ann: InventoryAnnouncement) -> Self {
+
        Self::Inventory(ann)
+
    }
+
}
+

+
impl From<RefsAnnouncement> for AnnouncementMessage {
+
    fn from(ann: RefsAnnouncement) -> Self {
+
        Self::Refs(ann)
+
    }
+
}
+

+
impl fmt::Debug for AnnouncementMessage {
+
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+
        match self {
+
            Self::Node(message) => write!(f, "Node({})", message.timestamp),
+
            Self::Inventory(message) => {
+
                write!(
+
                    f,
+
                    "Inventory([{}], {})",
+
                    message
+
                        .inventory
+
                        .iter()
+
                        .map(|i| i.to_string())
+
                        .collect::<Vec<String>>()
+
                        .join(", "),
+
                    message.timestamp
+
                )
+
            }
+
            Self::Refs(message) => {
+
                write!(
+
                    f,
+
                    "Refs({}, {}, {:?})",
+
                    message.rid, message.timestamp, message.refs
+
                )
+
            }
+
        }
+
    }
+
}
+

+
#[derive(Debug, Clone, PartialEq, Eq)]
+
pub struct Announcement {
+
    /// Node identifier.
+
    pub node: NodeId,
+
    /// Signature over the announcement.
+
    pub signature: crypto::Signature,
+
    /// Unsigned node announcement.
+
    pub message: AnnouncementMessage,
+
}
+

+
impl Announcement {
+
    /// Proof-of-work parameters for announcements.
+
    ///
+
    /// These parameters are fed into `scrypt`.
+
    /// They represent the `log2(N)`, `r`, `p` parameters, respectively.
+
    ///
+
    /// * log2(N) – iterations count (affects memory and CPU usage), e.g. 15
+
    /// * r – block size (affects memory and CPU usage), e.g. 8
+
    /// * p – parallelism factor (threads to run in parallel - affects the memory, CPU usage), usually 1
+
    ///
+
    /// `15, 8, 1` are usually the recommended parameters.
+
    ///
+
    #[cfg(debug_assertions)]
+
    pub const POW_PARAMS: (u8, u32, u32) = (1, 1, 1);
+
    #[cfg(not(debug_assertions))]
+
    pub const POW_PARAMS: (u8, u32, u32) = (15, 8, 1);
+
    /// Salt used for generating PoW.
+
    pub const POW_SALT: &'static [u8] = b"rad";
+

+
    /// Verify this announcement's signature.
+
    pub fn verify(&self) -> bool {
+
        let msg = wire::serialize(&self.message);
+
        self.node.verify(msg, &self.signature).is_ok()
+
    }
+

+
    pub fn matches(&self, filter: &Filter) -> bool {
+
        match &self.message {
+
            AnnouncementMessage::Inventory(_) => true,
+
            AnnouncementMessage::Node(_) => true,
+
            AnnouncementMessage::Refs(RefsAnnouncement { rid, .. }) => filter.contains(rid),
+
        }
+
    }
+

+
    /// Check whether this announcement is of the same variant as another.
+
    pub fn variant_eq(&self, other: &Self) -> bool {
+
        std::mem::discriminant(&self.message) == std::mem::discriminant(&other.message)
+
    }
+

+
    /// Get the announcement timestamp.
+
    pub fn timestamp(&self) -> Timestamp {
+
        self.message.timestamp()
+
    }
+
}
+

+
/// Message payload.
+
/// These are the messages peers send to each other.
+
#[derive(Clone, PartialEq, Eq)]
+
pub enum Message {
+
    /// Subscribe to gossip messages matching the filter and time range.
+
    Subscribe(Subscribe),
+

+
    /// Gossip announcement. These messages are relayed to peers, and filtered
+
    /// using [`Message::Subscribe`].
+
    Announcement(Announcement),
+

+
    /// Informational message. These messages are sent between peers for information
+
    /// and do not need to be acted upon. They can be safely ignored, though handling
+
    /// them can be useful for the user.
+
    Info(Info),
+

+
    /// Ask a connected peer for a Pong.
+
    ///
+
    /// Used to check if the remote peer is responsive, or a side-effect free way to keep a
+
    /// connection alive.
+
    Ping(Ping),
+

+
    /// Response to `Ping` message.
+
    Pong {
+
        /// The pong payload.
+
        zeroes: ZeroBytes,
+
    },
+
}
+

+
impl PartialOrd for Message {
+
    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
+
        Some(self.cmp(other))
+
    }
+
}
+

+
impl Ord for Message {
+
    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
+
        let this = wire::serialize(self);
+
        let other = wire::serialize(other);
+

+
        this.cmp(&other)
+
    }
+
}
+

+
impl Message {
+
    pub fn announcement(
+
        node: NodeId,
+
        message: impl Into<AnnouncementMessage>,
+
        signature: crypto::Signature,
+
    ) -> Self {
+
        Announcement {
+
            node,
+
            signature,
+
            message: message.into(),
+
        }
+
        .into()
+
    }
+

+
    pub fn node<G: crypto::signature::Signer<crypto::Signature>>(
+
        message: NodeAnnouncement,
+
        signer: &Device<G>,
+
    ) -> Self {
+
        AnnouncementMessage::from(message).signed(signer).into()
+
    }
+

+
    pub fn inventory<G: crypto::signature::Signer<crypto::Signature>>(
+
        message: InventoryAnnouncement,
+
        signer: &Device<G>,
+
    ) -> Self {
+
        AnnouncementMessage::from(message).signed(signer).into()
+
    }
+

+
    pub fn subscribe(filter: Filter, since: Timestamp, until: Timestamp) -> Self {
+
        Self::Subscribe(Subscribe {
+
            filter,
+
            since,
+
            until,
+
        })
+
    }
+

+
    pub fn log(&self, level: log::Level, remote: &NodeId, link: Link) {
+
        if !log::log_enabled!(level) {
+
            return;
+
        }
+
        let (verb, prep) = if link.is_inbound() {
+
            ("Received", "from")
+
        } else {
+
            ("Sending", "to")
+
        };
+
        let msg = match self {
+
            Self::Announcement(Announcement { node, message, .. }) => match message {
+
                AnnouncementMessage::Node(NodeAnnouncement { addresses, timestamp, .. }) => format!(
+
                    "{verb} node announcement of {node} with {} address(es) {prep} {remote} (t={timestamp})",
+
                    addresses.len()
+
                ),
+
                AnnouncementMessage::Refs(RefsAnnouncement { rid, refs, timestamp }) => format!(
+
                    "{verb} refs announcement of {node} for {rid} with {} remote(s) {prep} {remote} (t={timestamp})",
+
                    refs.len()
+
                ),
+
                AnnouncementMessage::Inventory(InventoryAnnouncement { inventory, timestamp }) => {
+
                    format!(
+
                        "{verb} inventory announcement of {node} with {} item(s) {prep} {remote} (t={timestamp})",
+
                        inventory.len()
+
                    )
+
                }
+
            },
+
            Self::Info(Info::RefsAlreadySynced { rid,  .. }) => {
+
                format!(
+
                    "{verb} `refs-already-synced` info {prep} {remote} for {rid}"
+
                )
+
            },
+
            Self::Ping { .. } => format!("{verb} ping {prep} {remote}"),
+
            Self::Pong { .. } => format!("{verb} pong {prep} {remote}"),
+
            Self::Subscribe(Subscribe { .. }) => {
+
                format!("{verb} subscription filter {prep} {remote}")
+
            }
+
        };
+
        log::log!(target: "service", level, "{msg}");
+
    }
+
}
+

+
/// A ping message.
+
#[derive(Debug, PartialEq, Eq, Clone)]
+
pub struct Ping {
+
    /// The requested length of the pong message.
+
    pub ponglen: wire::Size,
+
    /// Zero bytes (ignored).
+
    pub zeroes: ZeroBytes,
+
}
+

+
impl Ping {
+
    /// Maximum number of zero bytes in a ping message.
+
    pub const MAX_PING_ZEROES: wire::Size = Message::MAX_SIZE // Message size without the type.
+
        - mem::size_of::<wire::Size>() as wire::Size // Account for pong length.
+
        - mem::size_of::<wire::Size>() as wire::Size; // Account for zeroes length prefix.
+

+
    /// Maximum number of zero bytes in a pong message.
+
    pub const MAX_PONG_ZEROES: wire::Size =
+
        Message::MAX_SIZE - mem::size_of::<wire::Size>() as wire::Size; // Account for zeroes length
+
                                                                        // prefix.
+

+
    pub fn new(rng: &mut fastrand::Rng) -> Self {
+
        let ponglen = rng.u16(0..Self::MAX_PONG_ZEROES);
+

+
        Ping {
+
            ponglen,
+
            zeroes: ZeroBytes::new(rng.u16(0..Self::MAX_PING_ZEROES)),
+
        }
+
    }
+
}
+

+
impl From<Announcement> for Message {
+
    fn from(ann: Announcement) -> Self {
+
        Self::Announcement(ann)
+
    }
+
}
+

+
impl From<Info> for Message {
+
    fn from(info: Info) -> Self {
+
        Self::Info(info)
+
    }
+
}
+

+
impl fmt::Debug for Message {
+
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+
        match self {
+
            Self::Subscribe(Subscribe { since, until, .. }) => {
+
                write!(f, "Subscribe({since}..{until})")
+
            }
+
            Self::Announcement(Announcement { node, message, .. }) => {
+
                write!(f, "Announcement({node}, {message:?})")
+
            }
+
            Self::Info(info) => {
+
                write!(f, "Info({info:?})")
+
            }
+
            Self::Ping(Ping { ponglen, zeroes }) => write!(f, "Ping({ponglen}, {zeroes:?})"),
+
            Self::Pong { zeroes } => write!(f, "Pong({zeroes:?})"),
+
        }
+
    }
+
}
+

+
/// Represents a vector of zeroes of a certain length.
+
#[derive(Clone, Debug, PartialEq, Eq)]
+
pub struct ZeroBytes(wire::Size);
+

+
impl ZeroBytes {
+
    pub fn new(size: wire::Size) -> Self {
+
        ZeroBytes(size)
+
    }
+

+
    pub fn is_empty(&self) -> bool {
+
        self.0 == 0
+
    }
+

+
    pub fn len(&self) -> usize {
+
        self.0.into()
+
    }
+
}
+

+
#[cfg(any(test, feature = "test"))]
+
#[allow(clippy::unwrap_used)]
+
impl qcheck::Arbitrary for Message {
+
    fn arbitrary(g: &mut qcheck::Gen) -> Self {
+
        use qcheck::Arbitrary;
+

+
        match g.choose(&[1, 2, 3, 4, 5, 6, 7]).unwrap() {
+
            1 => Announcement {
+
                node: NodeId::arbitrary(g),
+
                message: InventoryAnnouncement {
+
                    inventory: BoundedVec::arbitrary(g),
+
                    timestamp: Timestamp::arbitrary(g),
+
                }
+
                .into(),
+
                signature: crypto::Signature::from(<[u8; 64]>::arbitrary(g)),
+
            }
+
            .into(),
+
            2 => Announcement {
+
                node: NodeId::arbitrary(g),
+
                message: RefsAnnouncement {
+
                    rid: RepoId::arbitrary(g),
+
                    refs: BoundedVec::arbitrary(g),
+
                    timestamp: Timestamp::arbitrary(g),
+
                }
+
                .into(),
+
                signature: crypto::Signature::from(<[u8; 64]>::arbitrary(g)),
+
            }
+
            .into(),
+
            3 => {
+
                let message = NodeAnnouncement {
+
                    version: u8::arbitrary(g),
+
                    features: u64::arbitrary(g).into(),
+
                    timestamp: Timestamp::arbitrary(g),
+
                    alias: Alias::arbitrary(g),
+
                    addresses: Arbitrary::arbitrary(g),
+
                    nonce: u64::arbitrary(g),
+
                    agent: UserAgent::arbitrary(g),
+
                }
+
                .into();
+
                let bytes: [u8; 64] = Arbitrary::arbitrary(g);
+
                let signature = crypto::Signature::from(bytes);
+

+
                Announcement {
+
                    node: NodeId::arbitrary(g),
+
                    signature,
+
                    message,
+
                }
+
                .into()
+
            }
+
            4 => {
+
                let message = Info::RefsAlreadySynced {
+
                    rid: RepoId::arbitrary(g),
+
                    at: radicle::test::arbitrary::oid(),
+
                };
+
                Self::Info(message)
+
            }
+
            5 => Self::Subscribe(Subscribe {
+
                filter: Filter::arbitrary(g),
+
                since: Timestamp::arbitrary(g),
+
                until: Timestamp::arbitrary(g),
+
            }),
+
            6 => {
+
                let mut rng = fastrand::Rng::with_seed(u64::arbitrary(g));
+

+
                Self::Ping(Ping::new(&mut rng))
+
            }
+
            7 => Self::Pong {
+
                zeroes: ZeroBytes::new(u16::arbitrary(g).min(Ping::MAX_PONG_ZEROES)),
+
            },
+
            _ => panic!("Invalid choice for Message::arbitrary"),
+
        }
+
    }
+
}
+

+
#[cfg(any(test, feature = "test"))]
+
impl qcheck::Arbitrary for ZeroBytes {
+
    fn arbitrary(g: &mut qcheck::Gen) -> Self {
+
        ZeroBytes::new(u16::arbitrary(g))
+
    }
+
}
+

+
#[cfg(test)]
+
#[allow(clippy::unwrap_used)]
+
mod tests {
+
    use std::str::FromStr;
+

+
    use fastrand;
+
    use qcheck_macros::quickcheck;
+
    use radicle::git::raw;
+

+
    use super::*;
+
    use crate::wire::Encode;
+
    use localtime::LocalTime;
+
    use radicle::test::arbitrary;
+

+
    #[test]
+
    fn test_ref_remote_limit() {
+
        let mut refs = BoundedVec::<_, REF_REMOTE_LIMIT>::new();
+
        let signer = Device::mock();
+
        let at = raw::Oid::zero().into();
+

+
        assert_eq!(refs.capacity(), REF_REMOTE_LIMIT);
+

+
        for _ in 0..refs.capacity() {
+
            refs.push(RefsAt {
+
                remote: *signer.public_key(),
+
                at,
+
            })
+
            .unwrap();
+
        }
+

+
        let msg: Message = AnnouncementMessage::from(RefsAnnouncement {
+
            rid: arbitrary::gen(1),
+
            refs,
+
            timestamp: LocalTime::now().into(),
+
        })
+
        .signed(&Device::mock())
+
        .into();
+

+
        let mut buf = Vec::new();
+
        msg.encode(&mut buf);
+

+
        let decoded = wire::deserialize(buf.as_slice());
+
        assert!(decoded.is_ok());
+
        assert_eq!(msg, decoded.unwrap());
+
    }
+

+
    #[test]
+
    fn test_inventory_limit() {
+
        let msg = Message::inventory(
+
            InventoryAnnouncement {
+
                inventory: arbitrary::vec(INVENTORY_LIMIT)
+
                    .try_into()
+
                    .expect("size within bounds limit"),
+
                timestamp: LocalTime::now().into(),
+
            },
+
            &Device::mock(),
+
        );
+
        let mut buf: Vec<u8> = Vec::new();
+
        msg.encode(&mut buf);
+

+
        let decoded = wire::deserialize(buf.as_slice());
+
        assert!(
+
            decoded.is_ok(),
+
            "INVENTORY_LIMIT is a valid limit for decoding"
+
        );
+
        assert_eq!(
+
            msg,
+
            decoded.unwrap(),
+
            "encoding and decoding should be safe for message at INVENTORY_LIMIT",
+
        );
+
    }
+

+
    #[quickcheck]
+
    fn prop_refs_announcement_signing(rid: RepoId) {
+
        let signer = Device::mock_rng(&mut fastrand::Rng::new());
+
        let timestamp = Timestamp::EPOCH;
+
        let at = raw::Oid::zero().into();
+
        let refs = BoundedVec::collect_from(
+
            &mut [RefsAt {
+
                remote: *signer.public_key(),
+
                at,
+
            }]
+
            .into_iter(),
+
        );
+
        let message = AnnouncementMessage::Refs(RefsAnnouncement {
+
            rid,
+
            refs,
+
            timestamp,
+
        });
+
        let ann = message.signed(&signer);
+

+
        assert!(ann.verify());
+
    }
+

+
    #[test]
+
    fn test_node_announcement_validate() {
+
        let ann = NodeAnnouncement {
+
            version: 1,
+
            features: node::Features::SEED,
+
            timestamp: Timestamp::try_from(42491841u64).unwrap(),
+
            alias: Alias::new("alice"),
+
            addresses: BoundedVec::new(),
+
            nonce: 0,
+
            agent: UserAgent::from_str("/heartwood:1.0.0/").unwrap(),
+
        };
+

+
        assert_eq!(ann.work(), 1);
+
        assert_eq!(ann.clone().solve(1).unwrap().work(), 1);
+
        assert_eq!(ann.clone().solve(8).unwrap().work(), 10);
+
        assert_eq!(ann.solve(14).unwrap().work(), 14);
+
    }
+
}
added crates/radicle-protocol/src/service/session.rs
@@ -0,0 +1,360 @@
+
use std::collections::{HashSet, VecDeque};
+
use std::{fmt, time};
+

+
use crossbeam_channel as chan;
+
use radicle::node::config::Limits;
+
use radicle::node::{FetchResult, Severity};
+
use radicle::node::{Link, Timestamp};
+
pub use radicle::node::{PingState, State};
+
use radicle::storage::refs::RefsAt;
+

+
use crate::service::message;
+
use crate::service::message::Message;
+
use crate::service::{Address, LocalDuration, LocalTime, NodeId, Outbox, RepoId, Rng};
+

+
/// Time after which a connection is considered stable.
+
pub const CONNECTION_STABLE_THRESHOLD: LocalDuration = LocalDuration::from_mins(1);
+
/// Maximum items in the fetch queue.
+
pub const MAX_FETCH_QUEUE_SIZE: usize = 128;
+

+
#[derive(thiserror::Error, Debug, Clone, Copy)]
+
pub enum Error {
+
    /// The remote peer sent an invalid announcement timestamp,
+
    /// for eg. a timestamp far in the future.
+
    #[error("invalid announcement timestamp: {0}")]
+
    InvalidTimestamp(Timestamp),
+
    /// The remote peer sent git protocol messages while we were expecting
+
    /// gossip messages. Or vice-versa.
+
    #[error("protocol mismatch")]
+
    ProtocolMismatch,
+
    /// The remote peer did something that violates the protocol rules.
+
    #[error("peer misbehaved")]
+
    Misbehavior,
+
    /// The remote peer timed out.
+
    #[error("peer timed out")]
+
    Timeout,
+
}
+

+
impl Error {
+
    /// Return the severity for this error.
+
    pub fn severity(&self) -> Severity {
+
        match self {
+
            Self::InvalidTimestamp(_) => Severity::High,
+
            Self::ProtocolMismatch => Severity::High,
+
            Self::Misbehavior => Severity::High,
+
            Self::Timeout => Severity::Low,
+
        }
+
    }
+
}
+

+
/// Error when trying to queue a fetch.
+
#[derive(thiserror::Error, Debug, Clone)]
+
pub enum QueueError {
+
    /// The item already exists in the queue.
+
    #[error("item is already queued")]
+
    Duplicate(QueuedFetch),
+
    /// The queue is at capacity.
+
    #[error("queue capacity reached")]
+
    CapacityReached(QueuedFetch),
+
}
+

+
impl QueueError {
+
    /// Get the inner [`QueuedFetch`].
+
    pub fn inner(&self) -> &QueuedFetch {
+
        match self {
+
            Self::Duplicate(f) => f,
+
            Self::CapacityReached(f) => f,
+
        }
+
    }
+
}
+

+
/// Fetch waiting to be processed, in the fetch queue.
+
#[derive(Debug, Clone)]
+
pub struct QueuedFetch {
+
    /// Repo being fetched.
+
    pub rid: RepoId,
+
    /// Peer being fetched from.
+
    pub from: NodeId,
+
    /// Refs being fetched.
+
    pub refs_at: Vec<RefsAt>,
+
    /// The timeout given for the fetch request.
+
    pub timeout: time::Duration,
+
    /// Result channel.
+
    pub channel: Option<chan::Sender<FetchResult>>,
+
}
+

+
impl PartialEq for QueuedFetch {
+
    fn eq(&self, other: &Self) -> bool {
+
        self.rid == other.rid
+
            && self.from == other.from
+
            && self.refs_at == other.refs_at
+
            && self.channel.is_none()
+
            && other.channel.is_none()
+
    }
+
}
+

+
/// A peer session. Each connected peer will have one session.
+
#[derive(Debug, Clone)]
+
pub struct Session {
+
    /// Peer id.
+
    pub id: NodeId,
+
    /// Peer address.
+
    pub addr: Address,
+
    /// Connection direction.
+
    pub link: Link,
+
    /// Whether we should attempt to re-connect
+
    /// to this peer upon disconnection.
+
    pub persistent: bool,
+
    /// Peer connection state.
+
    pub state: State,
+
    /// Peer subscription.
+
    pub subscribe: Option<message::Subscribe>,
+
    /// Last time a message was received from the peer.
+
    pub last_active: LocalTime,
+
    /// Fetch queue.
+
    pub queue: VecDeque<QueuedFetch>,
+

+
    /// Connection attempts. For persistent peers, Tracks
+
    /// how many times we've attempted to connect. We reset this to zero
+
    /// upon successful connection, once the connection is stable.
+
    attempts: usize,
+
    /// Source of entropy.
+
    rng: Rng,
+
    /// Protocol limits.
+
    limits: Limits,
+
}
+

+
impl fmt::Display for Session {
+
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+
        let mut attrs = Vec::new();
+
        let state = self.state.to_string();
+

+
        if self.link.is_inbound() {
+
            attrs.push("inbound");
+
        } else {
+
            attrs.push("outbound");
+
        }
+
        if self.persistent {
+
            attrs.push("persistent");
+
        }
+
        attrs.push(state.as_str());
+

+
        write!(f, "{} [{}]", self.id, attrs.join(" "))
+
    }
+
}
+

+
impl From<&Session> for radicle::node::Session {
+
    fn from(s: &Session) -> Self {
+
        Self {
+
            nid: s.id,
+
            link: if s.link.is_inbound() {
+
                radicle::node::Link::Inbound
+
            } else {
+
                radicle::node::Link::Outbound
+
            },
+
            addr: s.addr.clone(),
+
            state: s.state.clone(),
+
        }
+
    }
+
}
+

+
impl Session {
+
    pub fn outbound(id: NodeId, addr: Address, persistent: bool, rng: Rng, limits: Limits) -> Self {
+
        Self {
+
            id,
+
            addr,
+
            state: State::Initial,
+
            link: Link::Outbound,
+
            subscribe: None,
+
            persistent,
+
            last_active: LocalTime::default(),
+
            queue: VecDeque::with_capacity(MAX_FETCH_QUEUE_SIZE),
+
            attempts: 1,
+
            rng,
+
            limits,
+
        }
+
    }
+

+
    pub fn inbound(
+
        id: NodeId,
+
        addr: Address,
+
        persistent: bool,
+
        rng: Rng,
+
        time: LocalTime,
+
        limits: Limits,
+
    ) -> Self {
+
        Self {
+
            id,
+
            addr,
+
            state: State::Connected {
+
                since: time,
+
                ping: PingState::default(),
+
                fetching: HashSet::default(),
+
                latencies: VecDeque::default(),
+
                stable: false,
+
            },
+
            link: Link::Inbound,
+
            subscribe: None,
+
            persistent,
+
            last_active: time,
+
            queue: VecDeque::new(),
+
            attempts: 0,
+
            rng,
+
            limits,
+
        }
+
    }
+

+
    pub fn is_connecting(&self) -> bool {
+
        matches!(self.state, State::Attempted { .. })
+
    }
+

+
    pub fn is_stable(&self) -> bool {
+
        matches!(self.state, State::Connected { stable: true, .. })
+
    }
+

+
    pub fn is_connected(&self) -> bool {
+
        self.state.is_connected()
+
    }
+

+
    pub fn is_disconnected(&self) -> bool {
+
        matches!(self.state, State::Disconnected { .. })
+
    }
+

+
    pub fn is_initial(&self) -> bool {
+
        matches!(self.state, State::Initial)
+
    }
+

+
    pub fn is_at_capacity(&self) -> bool {
+
        if let State::Connected { fetching, .. } = &self.state {
+
            if fetching.len() >= self.limits.fetch_concurrency {
+
                return true;
+
            }
+
        }
+
        false
+
    }
+

+
    pub fn is_fetching(&self, rid: &RepoId) -> bool {
+
        if let State::Connected { fetching, .. } = &self.state {
+
            return fetching.contains(rid);
+
        }
+
        false
+
    }
+

+
    /// Queue a fetch. Returns `true` if it was added to the queue, and `false` if
+
    /// it already was present in the queue.
+
    pub fn queue_fetch(&mut self, fetch: QueuedFetch) -> Result<(), QueueError> {
+
        assert_eq!(fetch.from, self.id);
+

+
        if self.queue.len() >= MAX_FETCH_QUEUE_SIZE {
+
            return Err(QueueError::CapacityReached(fetch));
+
        } else if self.queue.contains(&fetch) {
+
            return Err(QueueError::Duplicate(fetch));
+
        }
+
        self.queue.push_back(fetch);
+

+
        Ok(())
+
    }
+

+
    pub fn dequeue_fetch(&mut self) -> Option<QueuedFetch> {
+
        self.queue.pop_front()
+
    }
+

+
    pub fn attempts(&self) -> usize {
+
        self.attempts
+
    }
+

+
    /// Run 'idle' task for session.
+
    pub fn idle(&mut self, now: LocalTime) {
+
        if let State::Connected {
+
            since,
+
            ref mut stable,
+
            ..
+
        } = self.state
+
        {
+
            if now >= since && now.duration_since(since) >= CONNECTION_STABLE_THRESHOLD {
+
                *stable = true;
+
                // Reset number of attempts for stable connections.
+
                self.attempts = 0;
+
            }
+
        }
+
    }
+

+
    /// Mark this session as fetching the given RID.
+
    ///
+
    /// # Panics
+
    ///
+
    /// If it is already fetching that RID, or the session is disconnected.
+
    pub fn fetching(&mut self, rid: RepoId) {
+
        if let State::Connected { fetching, .. } = &mut self.state {
+
            assert!(
+
                fetching.insert(rid),
+
                "Session must not already be fetching {rid}"
+
            );
+
        } else {
+
            panic!(
+
                "Attempting to fetch {rid} from disconnected session {}",
+
                self.id
+
            );
+
        }
+
    }
+

+
    pub fn fetched(&mut self, rid: RepoId) {
+
        if let State::Connected { fetching, .. } = &mut self.state {
+
            if !fetching.remove(&rid) {
+
                log::warn!(target: "service", "Fetched unknown repository {rid}");
+
            }
+
        }
+
    }
+

+
    pub fn to_attempted(&mut self) {
+
        assert!(
+
            self.is_initial(),
+
            "Can only transition to 'attempted' state from 'initial' state"
+
        );
+
        self.state = State::Attempted;
+
        self.attempts += 1;
+
    }
+

+
    pub fn to_connected(&mut self, since: LocalTime) {
+
        self.last_active = since;
+

+
        if let State::Connected { .. } = &self.state {
+
            log::error!(target: "service", "Session {} is already in 'connected' state, resetting..", self.id);
+
        };
+
        self.state = State::Connected {
+
            since,
+
            ping: PingState::default(),
+
            fetching: HashSet::default(),
+
            latencies: VecDeque::default(),
+
            stable: false,
+
        };
+
    }
+

+
    /// Move the session state to "disconnected". Returns any pending RID
+
    /// that was requested.
+
    pub fn to_disconnected(&mut self, since: LocalTime, retry_at: LocalTime) {
+
        self.state = State::Disconnected { since, retry_at };
+
    }
+

+
    /// Return to initial state from disconnected state. This state transition
+
    /// happens when we attempt to re-connect to a disconnected peer.
+
    pub fn to_initial(&mut self) {
+
        assert!(
+
            self.is_disconnected(),
+
            "Can only transition to 'initial' state from 'disconnected' state"
+
        );
+
        self.state = State::Initial;
+
    }
+

+
    pub fn ping(&mut self, since: LocalTime, reactor: &mut Outbox) -> Result<(), Error> {
+
        if let State::Connected { ping, .. } = &mut self.state {
+
            let msg = message::Ping::new(&mut self.rng);
+
            *ping = PingState::AwaitingResponse {
+
                len: msg.ponglen,
+
                since,
+
            };
+
            reactor.write(self, Message::Ping(msg));
+
        }
+
        Ok(())
+
    }
+
}
added crates/radicle-protocol/src/wire.rs
@@ -0,0 +1,690 @@
+
pub mod frame;
+
pub mod message;
+
pub mod varint;
+

+
pub use frame::StreamId;
+
pub use message::{AddressType, MessageType};
+

+
use std::collections::BTreeMap;
+
use std::convert::TryFrom;
+
use std::mem;
+
use std::ops::Deref;
+
use std::str::FromStr;
+
use std::string::FromUtf8Error;
+

+
use bytes::{Buf, BufMut};
+

+
use cyphernet::addr::tor;
+

+
use radicle::crypto::{PublicKey, Signature, Unverified};
+
use radicle::git;
+
use radicle::git::fmt;
+
use radicle::identity::RepoId;
+
use radicle::node;
+
use radicle::node::Alias;
+
use radicle::node::NodeId;
+
use radicle::node::Timestamp;
+
use radicle::node::UserAgent;
+
use radicle::storage::refs::Refs;
+
use radicle::storage::refs::RefsAt;
+
use radicle::storage::refs::SignedRefs;
+

+
use crate::bounded::BoundedVec;
+
use crate::service::filter;
+

+
/// The default type we use to represent sizes on the wire.
+
///
+
/// Since wire messages are limited to 64KB by the transport layer,
+
/// two bytes is enough to represent any message.
+
///
+
/// Note that in certain cases, we may use a smaller type.
+
pub type Size = u16;
+

+
#[derive(thiserror::Error, Debug)]
+
pub enum Error {
+
    #[error("UTF-8 error: {0}")]
+
    FromUtf8(#[from] FromUtf8Error),
+
    #[error("invalid size: expected {expected}, got {actual}")]
+
    InvalidSize { expected: usize, actual: usize },
+
    #[error("invalid filter size: {0}")]
+
    InvalidFilterSize(usize),
+
    #[error("invalid channel type {0:x}")]
+
    InvalidStreamKind(u8),
+
    #[error(transparent)]
+
    InvalidRefName(#[from] fmt::Error),
+
    #[error(transparent)]
+
    InvalidAlias(#[from] node::AliasError),
+
    #[error("invalid user agent string: {0:?}")]
+
    InvalidUserAgent(String),
+
    #[error("invalid control message with type `{0}`")]
+
    InvalidControlMessage(u8),
+
    #[error("invalid protocol version header `{0:x?}`")]
+
    InvalidProtocolVersion([u8; 4]),
+
    #[error("invalid onion address: {0}")]
+
    InvalidOnionAddr(#[from] tor::OnionAddrDecodeError),
+
    #[error("invalid timestamp: {0}")]
+
    InvalidTimestamp(u64),
+
    #[error("wrong protocol version `{0}`")]
+
    WrongProtocolVersion(u8),
+
    #[error("unknown address type `{0}`")]
+
    UnknownAddressType(u8),
+
    #[error("unknown message type `{0}`")]
+
    UnknownMessageType(u16),
+
    #[error("unknown info type `{0}`")]
+
    UnknownInfoType(u16),
+
    #[error("unexpected bytes")]
+
    UnexpectedBytes,
+
    #[error("unexpected end of buffer, requested {requested} more bytes but only {available} are available")]
+
    UnexpectedEnd { available: usize, requested: usize },
+
}
+

+
impl From<bytes::TryGetError> for Error {
+
    fn from(
+
        bytes::TryGetError {
+
            available,
+
            requested,
+
        }: bytes::TryGetError,
+
    ) -> Self {
+
        Self::UnexpectedEnd {
+
            available,
+
            requested,
+
        }
+
    }
+
}
+

+
/// Things that can be encoded as binary.
+
pub trait Encode {
+
    fn encode(&self, buffer: &mut impl BufMut);
+
}
+

+
/// Things that can be decoded from binary.
+
pub trait Decode: Sized {
+
    fn decode(buffer: &mut impl Buf) -> Result<Self, Error>;
+
}
+

+
/// Encode an object into a byte vector.
+
///
+
/// # Panics
+
///
+
/// If the encoded object exceeds [`Size::MAX`].
+
pub fn serialize<E: Encode + ?Sized>(data: &E) -> Vec<u8> {
+
    let mut buffer = Vec::new().limit(Size::MAX as usize);
+
    data.encode(&mut buffer);
+
    buffer.into_inner()
+
}
+

+
/// Decode an object from a slice.
+
pub fn deserialize<T: Decode>(mut data: &[u8]) -> Result<T, Error> {
+
    let result = T::decode(&mut data)?;
+

+
    if data.is_empty() {
+
        Ok(result)
+
    } else {
+
        Err(Error::UnexpectedBytes)
+
    }
+
}
+

+
impl Encode for u8 {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        buf.put_u8(*self);
+
    }
+
}
+

+
impl Encode for u16 {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        buf.put_u16(*self);
+
    }
+
}
+

+
impl Encode for u32 {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        buf.put_u32(*self);
+
    }
+
}
+

+
impl Encode for u64 {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        buf.put_u64(*self);
+
    }
+
}
+

+
impl Encode for PublicKey {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        self.deref().encode(buf)
+
    }
+
}
+

+
impl<const T: usize> Encode for &[u8; T] {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        buf.put_slice(&**self);
+
    }
+
}
+

+
impl<const T: usize> Encode for [u8; T] {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        buf.put_slice(self);
+
    }
+
}
+

+
impl<T> Encode for &[T]
+
where
+
    T: Encode,
+
{
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        (self.len() as Size).encode(buf);
+

+
        for item in self.iter() {
+
            item.encode(buf);
+
        }
+
    }
+
}
+

+
impl<T, const N: usize> Encode for BoundedVec<T, N>
+
where
+
    T: Encode,
+
{
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        self.as_slice().encode(buf)
+
    }
+
}
+

+
impl Encode for &str {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        assert!(self.len() <= u8::MAX as usize);
+

+
        (self.len() as u8).encode(buf);
+
        let bytes = self.as_bytes();
+

+
        // Nb. Don't use the [`Encode`] instance here for &[u8], because we are prefixing the
+
        // length ourselves.
+
        buf.put_slice(bytes);
+
    }
+
}
+

+
impl Encode for String {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        self.as_str().encode(buf)
+
    }
+
}
+

+
impl Encode for git::Url {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        self.to_string().encode(buf)
+
    }
+
}
+

+
impl Encode for RepoId {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        self.deref().encode(buf)
+
    }
+
}
+

+
impl Encode for Refs {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        let len: Size = self
+
            .len()
+
            .try_into()
+
            .expect("`Refs::len()` must be less than or equal to `Size::MAX`");
+
        len.encode(buf);
+

+
        for (name, oid) in self.iter() {
+
            name.as_str().encode(buf);
+
            oid.encode(buf);
+
        }
+
    }
+
}
+

+
impl Encode for cyphernet::addr::tor::OnionAddrV3 {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        self.into_raw_bytes().encode(buf)
+
    }
+
}
+

+
impl Encode for UserAgent {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        self.as_ref().encode(buf)
+
    }
+
}
+

+
impl Encode for Alias {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        self.as_ref().encode(buf)
+
    }
+
}
+

+
impl<A, B> Encode for (A, B)
+
where
+
    A: Encode,
+
    B: Encode,
+
{
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        self.0.encode(buf);
+
        self.1.encode(buf);
+
    }
+
}
+

+
impl Encode for git::RefString {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        self.as_str().encode(buf)
+
    }
+
}
+

+
impl Encode for Signature {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        self.deref().encode(buf)
+
    }
+
}
+

+
impl Encode for git::Oid {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        // Nb. We use length-encoding here to support future SHA-2 object ids.
+
        self.as_bytes().encode(buf)
+
    }
+
}
+

+
////////////////////////////////////////////////////////////////////////////////
+

+
impl Decode for PublicKey {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, Error> {
+
        let buf: [u8; 32] = Decode::decode(buf)?;
+

+
        Ok(PublicKey::from(buf))
+
    }
+
}
+

+
impl Decode for Refs {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, Error> {
+
        let len = Size::decode(buf)?;
+
        let mut refs = BTreeMap::new();
+

+
        for _ in 0..len {
+
            let name = String::decode(buf)?;
+
            let name = git::RefString::try_from(name).map_err(Error::from)?;
+
            let oid = git::Oid::decode(buf)?;
+

+
            refs.insert(name, oid);
+
        }
+
        Ok(refs.into())
+
    }
+
}
+

+
impl Decode for git::RefString {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, Error> {
+
        let ref_str = String::decode(buf)?;
+
        git::RefString::try_from(ref_str).map_err(Error::from)
+
    }
+
}
+

+
impl Decode for UserAgent {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, Error> {
+
        String::decode(buf).and_then(|s| UserAgent::from_str(&s).map_err(Error::InvalidUserAgent))
+
    }
+
}
+

+
impl Decode for Alias {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, Error> {
+
        String::decode(buf).and_then(|s| Alias::from_str(&s).map_err(Error::from))
+
    }
+
}
+

+
impl<A, B> Decode for (A, B)
+
where
+
    A: Decode,
+
    B: Decode,
+
{
+
    fn decode(buf: &mut impl Buf) -> Result<Self, Error> {
+
        let a = A::decode(buf)?;
+
        let b = B::decode(buf)?;
+
        Ok((a, b))
+
    }
+
}
+

+
impl Decode for git::Oid {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, Error> {
+
        let len = Size::decode(buf)? as usize;
+
        #[allow(non_upper_case_globals)]
+
        const expected: usize = mem::size_of::<git::raw::Oid>();
+

+
        if len != expected {
+
            return Err(Error::InvalidSize {
+
                expected,
+
                actual: len,
+
            });
+
        }
+

+
        let buf: [u8; expected] = Decode::decode(buf)?;
+
        let oid = git::raw::Oid::from_bytes(&buf).expect("the buffer is exactly the right size");
+
        let oid = git::Oid::from(oid);
+

+
        Ok(oid)
+
    }
+
}
+

+
impl Decode for Signature {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, Error> {
+
        let bytes: [u8; 64] = Decode::decode(buf)?;
+

+
        Ok(Signature::from(bytes))
+
    }
+
}
+

+
impl Decode for u8 {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, Error> {
+
        Ok(buf.try_get_u8()?)
+
    }
+
}
+

+
impl Decode for u16 {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, Error> {
+
        Ok(buf.try_get_u16()?)
+
    }
+
}
+

+
impl Decode for u32 {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, Error> {
+
        Ok(buf.try_get_u32()?)
+
    }
+
}
+

+
impl Decode for u64 {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, Error> {
+
        Ok(buf.try_get_u64()?)
+
    }
+
}
+

+
impl<const N: usize> Decode for [u8; N] {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, Error> {
+
        let mut ary = [0; N];
+
        buf.try_copy_to_slice(&mut ary).map_err(Error::from)?;
+

+
        Ok(ary)
+
    }
+
}
+

+
impl<T, const N: usize> Decode for BoundedVec<T, N>
+
where
+
    T: Decode,
+
{
+
    fn decode(buf: &mut impl Buf) -> Result<Self, Error> {
+
        let len: usize = Size::decode(buf)? as usize;
+
        let mut items = Self::with_capacity(len).map_err(|_| Error::InvalidSize {
+
            expected: Self::max(),
+
            actual: len,
+
        })?;
+

+
        for _ in 0..items.capacity() {
+
            let item = T::decode(buf)?;
+
            items.push(item).ok();
+
        }
+
        Ok(items)
+
    }
+
}
+

+
impl Decode for String {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, Error> {
+
        let len = u8::decode(buf)?;
+
        let mut bytes = vec![0; len as usize];
+

+
        buf.try_copy_to_slice(&mut bytes)?;
+

+
        let string = String::from_utf8(bytes)?;
+

+
        Ok(string)
+
    }
+
}
+

+
impl Decode for RepoId {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, Error> {
+
        let oid: git::Oid = Decode::decode(buf)?;
+

+
        Ok(Self::from(oid))
+
    }
+
}
+

+
impl Encode for filter::Filter {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        self.deref().as_bytes().encode(buf);
+
    }
+
}
+

+
impl Decode for filter::Filter {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, Error> {
+
        let size: usize = Size::decode(buf)? as usize;
+
        if !filter::FILTER_SIZES.contains(&size) {
+
            return Err(Error::InvalidFilterSize(size));
+
        }
+

+
        let mut bytes = vec![0; size];
+

+
        buf.try_copy_to_slice(&mut bytes)?;
+

+
        let f = filter::BloomFilter::from(bytes);
+
        debug_assert_eq!(f.hashes(), filter::FILTER_HASHES);
+

+
        Ok(Self::from(f))
+
    }
+
}
+

+
impl<V> Encode for SignedRefs<V> {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        self.id.encode(buf);
+
        self.refs.encode(buf);
+
        self.signature.encode(buf);
+
    }
+
}
+

+
impl Decode for SignedRefs<Unverified> {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, Error> {
+
        let id = NodeId::decode(buf)?;
+
        let refs = Refs::decode(buf)?;
+
        let signature = Signature::decode(buf)?;
+

+
        Ok(Self::new(refs, id, signature))
+
    }
+
}
+

+
impl Encode for RefsAt {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        self.remote.encode(buf);
+
        self.at.encode(buf);
+
    }
+
}
+

+
impl Decode for RefsAt {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, Error> {
+
        let remote = NodeId::decode(buf)?;
+
        let at = git::Oid::decode(buf)?;
+
        Ok(Self { remote, at })
+
    }
+
}
+

+
impl Encode for node::Features {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        self.deref().encode(buf)
+
    }
+
}
+

+
impl Decode for node::Features {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, Error> {
+
        let features = u64::decode(buf)?;
+

+
        Ok(Self::from(features))
+
    }
+
}
+

+
impl Decode for tor::OnionAddrV3 {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, Error> {
+
        let bytes: [u8; tor::ONION_V3_RAW_LEN] = Decode::decode(buf)?;
+
        let addr = tor::OnionAddrV3::from_raw_bytes(bytes)?;
+

+
        Ok(addr)
+
    }
+
}
+

+
impl Encode for Timestamp {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        self.deref().encode(buf)
+
    }
+
}
+

+
impl Decode for Timestamp {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, Error> {
+
        let millis = u64::decode(buf)?;
+
        let ts = Timestamp::try_from(millis).map_err(Error::InvalidTimestamp)?;
+

+
        Ok(ts)
+
    }
+
}
+

+
#[cfg(test)]
+
mod tests {
+
    use super::*;
+
    use qcheck;
+
    use qcheck_macros::quickcheck;
+

+
    use radicle::assert_matches;
+
    use radicle::crypto::Unverified;
+
    use radicle::storage::refs::SignedRefs;
+

+
    #[quickcheck]
+
    fn prop_u8(input: u8) {
+
        assert_eq!(deserialize::<u8>(&serialize(&input)).unwrap(), input);
+
    }
+

+
    #[quickcheck]
+
    fn prop_u16(input: u16) {
+
        assert_eq!(deserialize::<u16>(&serialize(&input)).unwrap(), input);
+
    }
+

+
    #[quickcheck]
+
    fn prop_u32(input: u32) {
+
        assert_eq!(deserialize::<u32>(&serialize(&input)).unwrap(), input);
+
    }
+

+
    #[quickcheck]
+
    fn prop_u64(input: u64) {
+
        assert_eq!(deserialize::<u64>(&serialize(&input)).unwrap(), input);
+
    }
+

+
    #[quickcheck]
+
    fn prop_string(input: String) -> qcheck::TestResult {
+
        if input.len() > u8::MAX as usize {
+
            return qcheck::TestResult::discard();
+
        }
+
        assert_eq!(deserialize::<String>(&serialize(&input)).unwrap(), input);
+

+
        qcheck::TestResult::passed()
+
    }
+

+
    #[quickcheck]
+
    fn prop_vec(input: BoundedVec<String, 16>) {
+
        assert_eq!(
+
            deserialize::<BoundedVec<String, 16>>(&serialize(&input.as_slice())).unwrap(),
+
            input
+
        );
+
    }
+

+
    #[quickcheck]
+
    fn prop_pubkey(input: PublicKey) {
+
        assert_eq!(deserialize::<PublicKey>(&serialize(&input)).unwrap(), input);
+
    }
+

+
    #[quickcheck]
+
    fn prop_filter(input: filter::Filter) {
+
        assert_eq!(
+
            deserialize::<filter::Filter>(&serialize(&input)).unwrap(),
+
            input
+
        );
+
    }
+

+
    #[quickcheck]
+
    fn prop_id(input: RepoId) {
+
        assert_eq!(deserialize::<RepoId>(&serialize(&input)).unwrap(), input);
+
    }
+

+
    #[quickcheck]
+
    fn prop_refs(input: Refs) {
+
        assert_eq!(deserialize::<Refs>(&serialize(&input)).unwrap(), input);
+
    }
+

+
    #[quickcheck]
+
    fn prop_tuple(input: (String, String)) {
+
        assert_eq!(
+
            deserialize::<(String, String)>(&serialize(&input)).unwrap(),
+
            input
+
        );
+
    }
+

+
    #[quickcheck]
+
    fn prop_signature(input: [u8; 64]) {
+
        let signature = Signature::from(input);
+

+
        assert_eq!(
+
            deserialize::<Signature>(&serialize(&signature)).unwrap(),
+
            signature
+
        );
+
    }
+

+
    #[quickcheck]
+
    fn prop_oid(input: [u8; 20]) {
+
        let oid = git::Oid::try_from(input.as_slice()).unwrap();
+

+
        assert_eq!(deserialize::<git::Oid>(&serialize(&oid)).unwrap(), oid);
+
    }
+

+
    #[quickcheck]
+
    fn prop_signed_refs(input: SignedRefs<Unverified>) {
+
        assert_eq!(
+
            deserialize::<SignedRefs<Unverified>>(&serialize(&input)).unwrap(),
+
            input
+
        );
+
    }
+

+
    #[test]
+
    fn test_string() {
+
        assert_eq!(
+
            serialize(&String::from("hello")),
+
            vec![5, b'h', b'e', b'l', b'l', b'o']
+
        );
+
    }
+

+
    #[test]
+
    fn test_alias() {
+
        assert_eq!(
+
            serialize(&Alias::from_str("hello").unwrap()),
+
            vec![5, b'h', b'e', b'l', b'l', b'o']
+
        );
+
    }
+

+
    #[test]
+
    fn test_filter_invalid() {
+
        let b = bloomy::BloomFilter::with_size(filter::FILTER_SIZE_M / 3);
+
        let f = filter::Filter::from(b);
+
        let bytes = serialize(&f);
+

+
        assert_matches!(
+
            deserialize::<filter::Filter>(&bytes).unwrap_err(),
+
            Error::InvalidFilterSize(_)
+
        );
+
    }
+

+
    #[test]
+
    fn test_bounded_vec_limit() {
+
        let v: BoundedVec<u8, 2> = vec![1, 2].try_into().unwrap();
+
        let buf = serialize(&v);
+

+
        assert_matches!(
+
            deserialize::<BoundedVec<u8, 1>>(&buf),
+
            Err(Error::InvalidSize {
+
                expected: 1,
+
                actual: 2
+
            }),
+
            "fail when vector is too small for buffer",
+
        );
+

+
        assert!(
+
            deserialize::<BoundedVec<u8, 2>>(&buf).is_ok(),
+
            "successfully decode vector of same size",
+
        );
+
    }
+
}
added crates/radicle-protocol/src/wire/frame.rs
@@ -0,0 +1,391 @@
+
//! Framing protocol.
+
#![warn(clippy::missing_docs_in_private_items)]
+
use std::{fmt, io};
+

+
use bytes::{Buf, BufMut};
+
use radicle::node::Link;
+

+
use crate::service::Message;
+
use crate::{wire, wire::varint, wire::varint::VarInt, PROTOCOL_VERSION};
+

+
/// Protocol version strings all start with the magic sequence `rad`, followed
+
/// by a version number.
+
pub const PROTOCOL_VERSION_STRING: Version = Version([b'r', b'a', b'd', PROTOCOL_VERSION]);
+

+
/// Control open byte.
+
const CONTROL_OPEN: u8 = 0;
+
/// Control close byte.
+
const CONTROL_CLOSE: u8 = 1;
+
/// Control EOF byte.
+
const CONTROL_EOF: u8 = 2;
+

+
/// Protocol version.
+
#[derive(Debug, PartialEq, Eq)]
+
pub struct Version([u8; 4]);
+

+
impl Version {
+
    /// Version number.
+
    pub fn number(&self) -> u8 {
+
        self.0[3]
+
    }
+
}
+

+
impl wire::Encode for Version {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        buf.put_slice(&PROTOCOL_VERSION_STRING.0);
+
    }
+
}
+

+
impl wire::Decode for Version {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, wire::Error> {
+
        let mut version = [0u8; 4];
+

+
        buf.try_copy_to_slice(&mut version[..])?;
+

+
        if version != PROTOCOL_VERSION_STRING.0 {
+
            return Err(wire::Error::InvalidProtocolVersion(version));
+
        }
+
        Ok(Self(version))
+
    }
+
}
+

+
/// Identifies a (multiplexed) stream.
+
///
+
/// Stream IDs are variable-length integers with the least significant 3 bits
+
/// denoting the stream type and initiator.
+
///
+
/// The first bit denotes the initiator (outbound or inbound), while the second
+
/// and third bit denote the stream type. See `StreamKind`.
+
///
+
/// In a situation where Alice connects to Bob, Alice will have the initiator
+
/// bit set to `1` for all streams she creates, while Bob will have it set to `0`.
+
///
+
/// This ensures that Stream IDs never collide.
+
/// Additionally, Stream IDs must never be re-used within a connection.
+
///
+
/// +=======+==================================+
+
/// | Bits  | Stream Type                      |
+
/// +=======+==================================+
+
/// | 0b000 | Outbound Control stream          |
+
/// +-------+----------------------------------+
+
/// | 0b001 | Inbound Control stream           |
+
/// +-------+----------------------------------+
+
/// | 0b010 | Outbound Gossip stream           |
+
/// +-------+----------------------------------+
+
/// | 0b011 | Inbound Gossip stream            |
+
/// +-------+----------------------------------+
+
/// | 0b100 | Outbound Git stream              |
+
/// +-------+----------------------------------+
+
/// | 0b101 | Inbound Git stream               |
+
/// +-------+----------------------------------+
+
///
+
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+
pub struct StreamId(VarInt);
+

+
impl StreamId {
+
    /// Get the initiator of this stream.
+
    pub fn link(&self) -> Link {
+
        let n = *self.0;
+
        if 0b1 & n == 0 {
+
            Link::Outbound
+
        } else {
+
            Link::Inbound
+
        }
+
    }
+

+
    /// Get the kind of stream this is.
+
    pub fn kind(&self) -> Result<StreamKind, u8> {
+
        let id = *self.0;
+
        let kind = ((id >> 1) & 0b11) as u8;
+

+
        StreamKind::try_from(kind)
+
    }
+

+
    /// Create a control identifier.
+
    pub fn control(link: Link) -> Self {
+
        let link = if link.is_outbound() { 0 } else { 1 };
+
        Self(VarInt::from(((StreamKind::Control as u8) << 1) | link))
+
    }
+

+
    /// Create a gossip identifier.
+
    pub fn gossip(link: Link) -> Self {
+
        let link = if link.is_outbound() { 0 } else { 1 };
+
        Self(VarInt::from(((StreamKind::Gossip as u8) << 1) | link))
+
    }
+

+
    /// Create a git identifier.
+
    pub fn git(link: Link) -> Self {
+
        let link = if link.is_outbound() { 0 } else { 1 };
+
        Self(VarInt::from(((StreamKind::Git as u8) << 1) | link))
+
    }
+

+
    /// Get the nth identifier while preserving the stream type and initiator.
+
    pub fn nth(self, n: u64) -> Result<Self, varint::BoundsExceeded> {
+
        let id = *self.0 + (n << 3);
+
        VarInt::new(id).map(Self)
+
    }
+
}
+

+
impl From<StreamId> for u64 {
+
    fn from(value: StreamId) -> Self {
+
        *value.0
+
    }
+
}
+

+
impl From<StreamId> for VarInt {
+
    fn from(value: StreamId) -> Self {
+
        value.0
+
    }
+
}
+

+
impl fmt::Display for StreamId {
+
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+
        write!(f, "{}", *self.0)
+
    }
+
}
+

+
impl wire::Decode for StreamId {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, wire::Error> {
+
        let id = VarInt::decode(buf)?;
+
        Ok(Self(id))
+
    }
+
}
+

+
impl wire::Encode for StreamId {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        self.0.encode(buf)
+
    }
+
}
+

+
/// Type of stream.
+
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+
#[repr(u8)]
+
pub enum StreamKind {
+
    /// Control stream, used to open and close streams.
+
    Control = 0b00,
+
    /// Gossip stream, used to exchange messages.
+
    Gossip = 0b01,
+
    /// Git stream, used for replication.
+
    Git = 0b10,
+
}
+

+
impl TryFrom<u8> for StreamKind {
+
    type Error = u8;
+

+
    fn try_from(value: u8) -> Result<Self, Self::Error> {
+
        match value {
+
            0b00 => Ok(StreamKind::Control),
+
            0b01 => Ok(StreamKind::Gossip),
+
            0b10 => Ok(StreamKind::Git),
+
            n => Err(n),
+
        }
+
    }
+
}
+

+
/// Protocol frame.
+
///
+
///  0                   1                   2                   3
+
///  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+
/// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
/// |      'r'      |      'a'      |      'd'      |      0x1      | Version
+
/// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
/// |                     Stream ID                           |TTT|I| Stream ID with Stream [T]ype and [I]nitiator bits
+
/// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
/// |                     Data                                   ...| Data (variable size)
+
/// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
#[derive(Debug, PartialEq, Eq)]
+
pub struct Frame<M = Message> {
+
    /// The protocol version.
+
    pub version: Version,
+
    /// The stream identifier.
+
    pub stream: StreamId,
+
    /// The frame payload.
+
    pub data: FrameData<M>,
+
}
+

+
impl<M> Frame<M> {
+
    /// Create a 'git' protocol frame.
+
    pub fn git(stream: StreamId, data: Vec<u8>) -> Self {
+
        Self {
+
            version: PROTOCOL_VERSION_STRING,
+
            stream,
+
            data: FrameData::Git(data),
+
        }
+
    }
+

+
    /// Create a 'control' protocol frame.
+
    pub fn control(link: Link, ctrl: Control) -> Self {
+
        Self {
+
            version: PROTOCOL_VERSION_STRING,
+
            stream: StreamId::control(link),
+
            data: FrameData::Control(ctrl),
+
        }
+
    }
+

+
    /// Create a 'gossip' protocol frame.
+
    pub fn gossip(link: Link, msg: M) -> Self {
+
        Self {
+
            version: PROTOCOL_VERSION_STRING,
+
            stream: StreamId::gossip(link),
+
            data: FrameData::Gossip(msg),
+
        }
+
    }
+
}
+

+
impl<M: wire::Encode> Frame<M> {
+
    /// Serialize frame to bytes.
+
    pub fn to_bytes(&self) -> Vec<u8> {
+
        wire::serialize(self)
+
    }
+
}
+

+
/// Frame payload.
+
#[derive(Debug, PartialEq, Eq)]
+
pub enum FrameData<M> {
+
    /// Control frame payload.
+
    Control(Control),
+
    /// Gossip frame payload.
+
    Gossip(M),
+
    /// Git frame payload. May contain packet-lines as well as packfile data.
+
    Git(Vec<u8>),
+
}
+

+
/// A control message sent over a control stream.
+
#[derive(Debug, PartialEq, Eq)]
+
pub enum Control {
+
    /// Open a new stream.
+
    Open {
+
        /// The stream to open.
+
        stream: StreamId,
+
    },
+
    /// Close an existing stream.
+
    Close {
+
        /// The stream to close.
+
        stream: StreamId,
+
    },
+
    /// Signal an end-of-file. This can be used to simulate connections terminating
+
    /// without having to close the connection. These control messages are turned into
+
    /// [`io::ErrorKind::UnexpectedEof`] errors on read.
+
    Eof {
+
        /// The stream to send an EOF on.
+
        stream: StreamId,
+
    },
+
}
+

+
impl wire::Decode for Control {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, wire::Error> {
+
        let command = u8::decode(buf)?;
+
        match command {
+
            CONTROL_OPEN => {
+
                let stream = StreamId::decode(buf)?;
+
                Ok(Control::Open { stream })
+
            }
+
            CONTROL_CLOSE => {
+
                let stream = StreamId::decode(buf)?;
+
                Ok(Control::Close { stream })
+
            }
+
            CONTROL_EOF => {
+
                let stream = StreamId::decode(buf)?;
+
                Ok(Control::Eof { stream })
+
            }
+
            other => Err(wire::Error::InvalidControlMessage(other)),
+
        }
+
    }
+
}
+

+
impl wire::Encode for Control {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        match self {
+
            Self::Open { stream: id } => {
+
                CONTROL_OPEN.encode(buf);
+
                id.encode(buf);
+
            }
+
            Self::Eof { stream: id } => {
+
                CONTROL_EOF.encode(buf);
+
                id.encode(buf);
+
            }
+
            Self::Close { stream: id } => {
+
                CONTROL_CLOSE.encode(buf);
+
                id.encode(buf);
+
            }
+
        }
+
    }
+
}
+

+
impl<M: wire::Decode> wire::Decode for Frame<M> {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, wire::Error> {
+
        let version = Version::decode(buf)?;
+
        if version.number() != PROTOCOL_VERSION {
+
            return Err(wire::Error::WrongProtocolVersion(version.number()));
+
        }
+
        let stream = StreamId::decode(buf)?;
+

+
        match stream.kind() {
+
            Ok(StreamKind::Control) => {
+
                let ctrl = Control::decode(buf)?;
+
                let frame = Frame {
+
                    version,
+
                    stream,
+
                    data: FrameData::Control(ctrl),
+
                };
+
                Ok(frame)
+
            }
+
            Ok(StreamKind::Gossip) => {
+
                let data = varint::payload::decode(buf)?;
+
                let mut cursor = io::Cursor::new(data);
+
                let msg = M::decode(&mut cursor)?;
+
                let frame = Frame {
+
                    version,
+
                    stream,
+
                    data: FrameData::Gossip(msg),
+
                };
+

+
                // Nb. If there is data after the `Message` that is not decoded,
+
                // it is simply dropped here.
+

+
                Ok(frame)
+
            }
+
            Ok(StreamKind::Git { .. }) => {
+
                let data = varint::payload::decode(buf)?;
+
                Ok(Frame::git(stream, data))
+
            }
+
            Err(n) => Err(wire::Error::InvalidStreamKind(n)),
+
        }
+
    }
+
}
+

+
impl<M: wire::Encode> wire::Encode for Frame<M> {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        self.version.encode(buf);
+
        self.stream.encode(buf);
+
        match &self.data {
+
            FrameData::Control(ctrl) => ctrl.encode(buf),
+
            FrameData::Git(data) => varint::payload::encode(data, buf),
+
            FrameData::Gossip(msg) => varint::payload::encode(&wire::serialize(msg), buf),
+
        }
+
    }
+
}
+

+
#[cfg(test)]
+
mod test {
+
    use super::*;
+

+
    #[test]
+
    fn test_stream_id() {
+
        assert_eq!(StreamId(VarInt(0b000)).kind().unwrap(), StreamKind::Control);
+
        assert_eq!(StreamId(VarInt(0b010)).kind().unwrap(), StreamKind::Gossip);
+
        assert_eq!(StreamId(VarInt(0b100)).kind().unwrap(), StreamKind::Git);
+
        assert_eq!(StreamId(VarInt(0b001)).link(), Link::Inbound);
+
        assert_eq!(StreamId(VarInt(0b000)).link(), Link::Outbound);
+
        assert_eq!(StreamId(VarInt(0b101)).link(), Link::Inbound);
+
        assert_eq!(StreamId(VarInt(0b100)).link(), Link::Outbound);
+

+
        assert_eq!(StreamId::git(Link::Outbound), StreamId(VarInt(0b100)));
+
        assert_eq!(StreamId::control(Link::Outbound), StreamId(VarInt(0b000)));
+
        assert_eq!(StreamId::gossip(Link::Outbound), StreamId(VarInt(0b010)));
+

+
        assert_eq!(StreamId::git(Link::Inbound), StreamId(VarInt(0b101)));
+
        assert_eq!(StreamId::control(Link::Inbound), StreamId(VarInt(0b001)));
+
        assert_eq!(StreamId::gossip(Link::Inbound), StreamId(VarInt(0b011)));
+
    }
+
}
added crates/radicle-protocol/src/wire/message.rs
@@ -0,0 +1,570 @@
+
use std::{mem, net};
+

+
use bytes::Buf;
+
use bytes::BufMut;
+
use cyphernet::addr::{tor, Addr, HostName, NetAddr};
+
use radicle::crypto::Signature;
+
use radicle::git::Oid;
+
use radicle::identity::RepoId;
+
use radicle::node::Address;
+
use radicle::node::NodeId;
+
use radicle::node::Timestamp;
+

+
use crate::bounded::BoundedVec;
+
use crate::service::filter::Filter;
+
use crate::service::message::*;
+
use crate::wire;
+

+
/// Message type.
+
#[repr(u16)]
+
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+
pub enum MessageType {
+
    NodeAnnouncement = 2,
+
    InventoryAnnouncement = 4,
+
    RefsAnnouncement = 6,
+
    Subscribe = 8,
+
    Ping = 10,
+
    Pong = 12,
+
    Info = 14,
+
}
+

+
impl From<MessageType> for u16 {
+
    fn from(other: MessageType) -> Self {
+
        other as u16
+
    }
+
}
+

+
impl TryFrom<u16> for MessageType {
+
    type Error = u16;
+

+
    fn try_from(other: u16) -> Result<Self, Self::Error> {
+
        match other {
+
            2 => Ok(MessageType::NodeAnnouncement),
+
            4 => Ok(MessageType::InventoryAnnouncement),
+
            6 => Ok(MessageType::RefsAnnouncement),
+
            8 => Ok(MessageType::Subscribe),
+
            10 => Ok(MessageType::Ping),
+
            12 => Ok(MessageType::Pong),
+
            14 => Ok(MessageType::Info),
+
            _ => Err(other),
+
        }
+
    }
+
}
+

+
impl Message {
+
    /// The maximum supported message size in bytes.
+
    pub const MAX_SIZE: wire::Size =
+
        wire::Size::MAX - (mem::size_of::<MessageType>() as wire::Size);
+

+
    pub fn type_id(&self) -> u16 {
+
        match self {
+
            Self::Subscribe { .. } => MessageType::Subscribe,
+
            Self::Announcement(Announcement { message, .. }) => match message {
+
                AnnouncementMessage::Node(_) => MessageType::NodeAnnouncement,
+
                AnnouncementMessage::Inventory(_) => MessageType::InventoryAnnouncement,
+
                AnnouncementMessage::Refs(_) => MessageType::RefsAnnouncement,
+
            },
+
            Self::Info(_) => MessageType::Info,
+
            Self::Ping { .. } => MessageType::Ping,
+
            Self::Pong { .. } => MessageType::Pong,
+
        }
+
        .into()
+
    }
+
}
+

+
/// Address type.
+
#[repr(u8)]
+
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+
pub enum AddressType {
+
    Ipv4 = 1,
+
    Ipv6 = 2,
+
    Dns = 3,
+
    Onion = 4,
+
}
+

+
impl From<AddressType> for u8 {
+
    fn from(other: AddressType) -> Self {
+
        other as u8
+
    }
+
}
+

+
impl From<&Address> for AddressType {
+
    fn from(a: &Address) -> Self {
+
        match a.host {
+
            HostName::Ip(net::IpAddr::V4(_)) => AddressType::Ipv4,
+
            HostName::Ip(net::IpAddr::V6(_)) => AddressType::Ipv6,
+
            HostName::Dns(_) => AddressType::Dns,
+
            HostName::Tor(_) => AddressType::Onion,
+
            _ => todo!(), // FIXME(cloudhead): Maxim will remove `non-exhaustive`
+
        }
+
    }
+
}
+

+
impl TryFrom<u8> for AddressType {
+
    type Error = u8;
+

+
    fn try_from(other: u8) -> Result<Self, Self::Error> {
+
        match other {
+
            1 => Ok(AddressType::Ipv4),
+
            2 => Ok(AddressType::Ipv6),
+
            3 => Ok(AddressType::Dns),
+
            4 => Ok(AddressType::Onion),
+
            _ => Err(other),
+
        }
+
    }
+
}
+

+
impl wire::Encode for AnnouncementMessage {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        match self {
+
            Self::Node(ann) => ann.encode(buf),
+
            Self::Inventory(ann) => ann.encode(buf),
+
            Self::Refs(ann) => ann.encode(buf),
+
        }
+
    }
+
}
+

+
impl wire::Encode for RefsAnnouncement {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        self.rid.encode(buf);
+
        self.refs.encode(buf);
+
        self.timestamp.encode(buf);
+
    }
+
}
+

+
impl wire::Decode for RefsAnnouncement {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, wire::Error> {
+
        let rid = RepoId::decode(buf)?;
+
        let refs = BoundedVec::<_, REF_REMOTE_LIMIT>::decode(buf)?;
+
        let timestamp = Timestamp::decode(buf)?;
+

+
        Ok(Self {
+
            rid,
+
            refs,
+
            timestamp,
+
        })
+
    }
+
}
+

+
impl wire::Encode for InventoryAnnouncement {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        self.inventory.encode(buf);
+
        self.timestamp.encode(buf);
+
    }
+
}
+

+
impl wire::Decode for InventoryAnnouncement {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, wire::Error> {
+
        let inventory = BoundedVec::decode(buf)?;
+
        let timestamp = Timestamp::decode(buf)?;
+

+
        Ok(Self {
+
            inventory,
+
            timestamp,
+
        })
+
    }
+
}
+

+
/// The type tracking the different variants of [`Info`] for encoding and
+
/// decoding purposes.
+
#[repr(u8)]
+
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+
pub enum InfoType {
+
    RefsAlreadySynced = 1,
+
}
+

+
impl From<InfoType> for u16 {
+
    fn from(other: InfoType) -> Self {
+
        other as u16
+
    }
+
}
+

+
impl TryFrom<u16> for InfoType {
+
    type Error = u16;
+

+
    fn try_from(other: u16) -> Result<Self, Self::Error> {
+
        match other {
+
            1 => Ok(Self::RefsAlreadySynced),
+
            n => Err(n),
+
        }
+
    }
+
}
+

+
impl From<Info> for InfoType {
+
    fn from(info: Info) -> Self {
+
        (&info).into()
+
    }
+
}
+

+
impl From<&Info> for InfoType {
+
    fn from(info: &Info) -> Self {
+
        match info {
+
            Info::RefsAlreadySynced { .. } => Self::RefsAlreadySynced,
+
        }
+
    }
+
}
+

+
impl wire::Encode for Info {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        u16::from(InfoType::from(self)).encode(buf);
+
        match self {
+
            Info::RefsAlreadySynced { rid, at } => {
+
                rid.encode(buf);
+
                at.encode(buf);
+
            }
+
        }
+
    }
+
}
+

+
impl wire::Decode for Info {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, wire::Error> {
+
        let info_type = buf.try_get_u16()?;
+

+
        match InfoType::try_from(info_type) {
+
            Ok(InfoType::RefsAlreadySynced) => {
+
                let rid = RepoId::decode(buf)?;
+
                let at = Oid::decode(buf)?;
+

+
                Ok(Self::RefsAlreadySynced { rid, at })
+
            }
+
            Err(other) => Err(wire::Error::UnknownInfoType(other)),
+
        }
+
    }
+
}
+

+
impl wire::Encode for Message {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        self.type_id().encode(buf);
+

+
        match self {
+
            Self::Subscribe(Subscribe {
+
                filter,
+
                since,
+
                until,
+
            }) => {
+
                filter.encode(buf);
+
                since.encode(buf);
+
                until.encode(buf);
+
            }
+
            Self::Announcement(Announcement {
+
                node,
+
                message,
+
                signature,
+
            }) => {
+
                node.encode(buf);
+
                signature.encode(buf);
+
                message.encode(buf);
+
            }
+
            Self::Info(info) => {
+
                info.encode(buf);
+
            }
+
            Self::Ping(Ping { ponglen, zeroes }) => {
+
                ponglen.encode(buf);
+
                zeroes.encode(buf);
+
            }
+
            Self::Pong { zeroes } => {
+
                zeroes.encode(buf);
+
            }
+
        }
+
    }
+
}
+

+
impl wire::Decode for Message {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, wire::Error> {
+
        let type_id = buf.try_get_u16()?;
+

+
        match MessageType::try_from(type_id) {
+
            Ok(MessageType::Subscribe) => {
+
                let filter = Filter::decode(buf)?;
+
                let since = Timestamp::decode(buf)?;
+
                let until = Timestamp::decode(buf)?;
+

+
                Ok(Self::Subscribe(Subscribe {
+
                    filter,
+
                    since,
+
                    until,
+
                }))
+
            }
+
            Ok(MessageType::NodeAnnouncement) => {
+
                let node = NodeId::decode(buf)?;
+
                let signature = Signature::decode(buf)?;
+
                let message = NodeAnnouncement::decode(buf)?.into();
+

+
                Ok(Announcement {
+
                    node,
+
                    message,
+
                    signature,
+
                }
+
                .into())
+
            }
+
            Ok(MessageType::InventoryAnnouncement) => {
+
                let node = NodeId::decode(buf)?;
+
                let signature = Signature::decode(buf)?;
+
                let message = InventoryAnnouncement::decode(buf)?.into();
+

+
                Ok(Announcement {
+
                    node,
+
                    message,
+
                    signature,
+
                }
+
                .into())
+
            }
+
            Ok(MessageType::RefsAnnouncement) => {
+
                let node = NodeId::decode(buf)?;
+
                let signature = Signature::decode(buf)?;
+
                let message = RefsAnnouncement::decode(buf)?.into();
+

+
                Ok(Announcement {
+
                    node,
+
                    message,
+
                    signature,
+
                }
+
                .into())
+
            }
+
            Ok(MessageType::Info) => {
+
                let info = Info::decode(buf)?;
+
                Ok(Self::Info(info))
+
            }
+
            Ok(MessageType::Ping) => {
+
                let ponglen = u16::decode(buf)?;
+
                let zeroes = ZeroBytes::decode(buf)?;
+
                Ok(Self::Ping(Ping { ponglen, zeroes }))
+
            }
+
            Ok(MessageType::Pong) => {
+
                let zeroes = ZeroBytes::decode(buf)?;
+
                Ok(Self::Pong { zeroes })
+
            }
+
            Err(other) => Err(wire::Error::UnknownMessageType(other)),
+
        }
+
    }
+
}
+

+
impl wire::Encode for Address {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        match self.host {
+
            HostName::Ip(net::IpAddr::V4(ip)) => {
+
                u8::from(AddressType::Ipv4).encode(buf);
+
                ip.octets().encode(buf);
+
            }
+
            HostName::Ip(net::IpAddr::V6(ip)) => {
+
                u8::from(AddressType::Ipv6).encode(buf);
+
                ip.octets().encode(buf);
+
            }
+
            HostName::Dns(ref dns) => {
+
                u8::from(AddressType::Dns).encode(buf);
+
                dns.encode(buf);
+
            }
+
            HostName::Tor(addr) => {
+
                u8::from(AddressType::Onion).encode(buf);
+
                addr.encode(buf);
+
            }
+
            _ => {
+
                unimplemented!(
+
                    "Encoding not defined for addresses of the same type as the following: {:?}",
+
                    self.host
+
                );
+
            }
+
        }
+
        self.port().encode(buf);
+
    }
+
}
+

+
impl wire::Decode for Address {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, wire::Error> {
+
        let addrtype = buf.try_get_u8()?;
+

+
        let host = match AddressType::try_from(addrtype) {
+
            Ok(AddressType::Ipv4) => {
+
                let octets: [u8; 4] = wire::Decode::decode(buf)?;
+
                let ip = net::Ipv4Addr::from(octets);
+

+
                HostName::Ip(net::IpAddr::V4(ip))
+
            }
+
            Ok(AddressType::Ipv6) => {
+
                let octets: [u8; 16] = wire::Decode::decode(buf)?;
+
                let ip = net::Ipv6Addr::from(octets);
+

+
                HostName::Ip(net::IpAddr::V6(ip))
+
            }
+
            Ok(AddressType::Dns) => {
+
                let dns: String = wire::Decode::decode(buf)?;
+

+
                HostName::Dns(dns)
+
            }
+
            Ok(AddressType::Onion) => {
+
                let onion: tor::OnionAddrV3 = wire::Decode::decode(buf)?;
+

+
                HostName::Tor(onion)
+
            }
+
            Err(other) => return Err(wire::Error::UnknownAddressType(other)),
+
        };
+
        let port = u16::decode(buf)?;
+

+
        Ok(Self::from(NetAddr { host, port }))
+
    }
+
}
+

+
impl wire::Encode for ZeroBytes {
+
    fn encode(&self, buf: &mut impl BufMut) {
+
        (self.len() as u16).encode(buf);
+
        buf.put_bytes(0u8, self.len());
+
    }
+
}
+

+
impl wire::Decode for ZeroBytes {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, wire::Error> {
+
        let zeroes = u16::decode(buf)?;
+
        for _ in 0..zeroes {
+
            _ = u8::decode(buf)?;
+
        }
+
        Ok(ZeroBytes::new(zeroes))
+
    }
+
}
+

+
#[cfg(test)]
+
mod tests {
+
    use super::*;
+
    use qcheck_macros::quickcheck;
+
    use radicle::node::device::Device;
+
    use radicle::node::UserAgent;
+
    use radicle::storage::refs::RefsAt;
+

+
    use crate::deserializer::Deserializer;
+
    use crate::wire::{self, Encode};
+
    use radicle::test::arbitrary;
+

+
    #[test]
+
    fn test_refs_ann_max_size() {
+
        let signer = Device::mock();
+
        let refs: [RefsAt; REF_REMOTE_LIMIT] = arbitrary::gen(1);
+
        let ann = AnnouncementMessage::Refs(RefsAnnouncement {
+
            rid: arbitrary::gen(1),
+
            refs: BoundedVec::collect_from(&mut refs.into_iter()),
+
            timestamp: arbitrary::gen(1),
+
        });
+
        let ann = ann.signed(&signer);
+
        let msg = Message::Announcement(ann);
+
        let data = wire::serialize(&msg);
+

+
        assert!(data.len() < wire::Size::MAX as usize);
+
    }
+

+
    #[test]
+
    fn test_inv_ann_max_size() {
+
        let signer = Device::mock();
+
        let inv: [RepoId; INVENTORY_LIMIT] = arbitrary::gen(1);
+
        let ann = AnnouncementMessage::Inventory(InventoryAnnouncement {
+
            inventory: BoundedVec::collect_from(&mut inv.into_iter()),
+
            timestamp: arbitrary::gen(1),
+
        });
+
        let ann = ann.signed(&signer);
+
        let msg = Message::Announcement(ann);
+
        let data = wire::serialize(&msg);
+

+
        assert!(data.len() < wire::Size::MAX as usize);
+
    }
+

+
    #[test]
+
    fn test_node_ann_max_size() {
+
        let signer = Device::mock();
+
        let addrs: [Address; ADDRESS_LIMIT] = arbitrary::gen(1);
+
        let alias = ['@'; radicle::node::MAX_ALIAS_LENGTH];
+
        let ann = AnnouncementMessage::Node(NodeAnnouncement {
+
            version: 1,
+
            features: Default::default(),
+
            alias: radicle::node::Alias::new(String::from_iter(alias)),
+
            addresses: BoundedVec::collect_from(&mut addrs.into_iter()),
+
            timestamp: arbitrary::gen(1),
+
            nonce: u64::MAX,
+
            agent: UserAgent::default(),
+
        });
+
        let ann = ann.signed(&signer);
+
        let msg = Message::Announcement(ann);
+
        let data = wire::serialize(&msg);
+

+
        assert!(data.len() < wire::Size::MAX as usize);
+
    }
+

+
    #[test]
+
    fn test_pingpong_encode_max_size() {
+
        wire::serialize(&Message::Ping(Ping {
+
            ponglen: 0,
+
            zeroes: ZeroBytes::new(Ping::MAX_PING_ZEROES),
+
        }));
+

+
        wire::serialize(&Message::Pong {
+
            zeroes: ZeroBytes::new(Ping::MAX_PONG_ZEROES),
+
        });
+
    }
+

+
    #[test]
+
    #[should_panic(expected = "advance out of bounds")]
+
    fn test_ping_encode_size_overflow() {
+
        wire::serialize(&Message::Ping(Ping {
+
            ponglen: 0,
+
            zeroes: ZeroBytes::new(Ping::MAX_PING_ZEROES + 1),
+
        }));
+
    }
+

+
    #[test]
+
    #[should_panic(expected = "advance out of bounds")]
+
    fn test_pong_encode_size_overflow() {
+
        wire::serialize(&Message::Pong {
+
            zeroes: ZeroBytes::new(Ping::MAX_PONG_ZEROES + 1),
+
        });
+
    }
+

+
    #[quickcheck]
+
    fn prop_message_encode_decode(message: Message) {
+
        let encoded = &wire::serialize(&message);
+
        let decoded = wire::deserialize::<Message>(encoded).unwrap();
+

+
        assert_eq!(message, decoded);
+
    }
+

+
    #[test]
+
    fn prop_message_decoder() {
+
        fn property(items: Vec<Message>) {
+
            let mut decoder = Deserializer::<1048576, Message>::new(8);
+

+
            for item in &items {
+
                item.encode(&mut decoder);
+
            }
+
            for item in items {
+
                assert_eq!(decoder.next().unwrap().unwrap(), item);
+
            }
+
        }
+

+
        qcheck::QuickCheck::new()
+
            .gen(qcheck::Gen::new(16))
+
            .quickcheck(property as fn(items: Vec<Message>));
+
    }
+

+
    #[test]
+
    fn prop_zero_bytes_encode_decode() {
+
        fn property(zeroes: wire::Size) {
+
            if zeroes > Ping::MAX_PING_ZEROES {
+
                return;
+
            }
+

+
            let zeroes = ZeroBytes::new(zeroes);
+

+
            assert_eq!(
+
                wire::deserialize::<ZeroBytes>(&wire::serialize(&zeroes)).unwrap(),
+
                zeroes
+
            );
+
        }
+

+
        qcheck::QuickCheck::new()
+
            .gen(qcheck::Gen::new(16))
+
            .quickcheck(property as fn(zeroes: wire::Size));
+
    }
+

+
    #[quickcheck]
+
    fn prop_addr(addr: Address) {
+
        assert_eq!(
+
            wire::deserialize::<Address>(&wire::serialize(&addr)).unwrap(),
+
            addr
+
        );
+
    }
+
}
added crates/radicle-protocol/src/wire/varint.rs
@@ -0,0 +1,252 @@
+
//! Variable-length integer implementation based on QUIC.
+
#![warn(clippy::missing_docs_in_private_items)]
+

+
// This implementation is largely based on the `quinn` crate.
+
// Copyright (c) 2018 The quinn developers.
+
use std::{fmt, ops};
+

+
use bytes::{Buf, BufMut};
+
use thiserror::Error;
+

+
use crate::wire;
+
use crate::wire::{Decode, Encode};
+

+
/// An integer less than 2^62
+
///
+
/// Based on QUIC variable-length integers (RFC 9000).
+
///
+
/// > The QUIC variable-length integer encoding reserves the two most significant bits of the first
+
/// > byte to encode the base-2 logarithm of the integer encoding length in bytes. The integer value is
+
/// > encoded on the remaining bits, in network byte order. This means that integers are encoded on 1,
+
/// > 2, 4, or 8 bytes and can encode 6-, 14-, 30-, or 62-bit values, respectively. Table 4 summarizes
+
/// > the encoding properties.
+
///
+
/// ```text
+
/// MSB   Length   Usable Bits   Range
+
/// ----------------------------------------------------
+
/// 00    1        6             0 - 63
+
/// 01    2        14            0 - 16383
+
/// 10    4        30            0 - 1073741823
+
/// 11    8        62            0 - 4611686018427387903
+
/// ```
+
#[derive(Default, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
+
pub struct VarInt(pub(crate) u64);
+

+
impl VarInt {
+
    /// The largest representable value.
+
    pub const MAX: VarInt = VarInt((1 << 62) - 1);
+

+
    /// Succeeds iff `x` < 2^62.
+
    pub fn new(x: u64) -> Result<Self, BoundsExceeded> {
+
        if x <= Self::MAX.0 {
+
            Ok(Self(x))
+
        } else {
+
            Err(BoundsExceeded)
+
        }
+
    }
+

+
    pub fn new_unchecked(x: u64) -> Self {
+
        Self(x)
+
    }
+
}
+

+
impl ops::Deref for VarInt {
+
    type Target = u64;
+

+
    fn deref(&self) -> &Self::Target {
+
        &self.0
+
    }
+
}
+

+
impl From<u8> for VarInt {
+
    fn from(x: u8) -> Self {
+
        VarInt(x.into())
+
    }
+
}
+

+
impl From<u16> for VarInt {
+
    fn from(x: u16) -> Self {
+
        VarInt(x.into())
+
    }
+
}
+

+
impl From<u32> for VarInt {
+
    fn from(x: u32) -> Self {
+
        VarInt(x.into())
+
    }
+
}
+

+
impl std::convert::TryFrom<u64> for VarInt {
+
    type Error = BoundsExceeded;
+
    /// Succeeds iff `x` < 2^62.
+
    fn try_from(x: u64) -> Result<Self, BoundsExceeded> {
+
        VarInt::new(x)
+
    }
+
}
+

+
impl fmt::Debug for VarInt {
+
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+
        self.0.fmt(f)
+
    }
+
}
+

+
impl fmt::Display for VarInt {
+
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+
        self.0.fmt(f)
+
    }
+
}
+

+
/// Error returned when constructing a `VarInt` from a value >= 2^62.
+
#[derive(Debug, Copy, Clone, Eq, PartialEq, Error)]
+
#[error("value too large for varint encoding")]
+
pub struct BoundsExceeded;
+

+
impl Decode for VarInt {
+
    fn decode(buf: &mut impl Buf) -> Result<Self, wire::Error> {
+
        let mut tmp = [0; 8];
+
        tmp[0] = buf.try_get_u8()?;
+

+
        // Integer length.
+
        let tag = tmp[0] >> 6;
+
        tmp[0] &= 0b0011_1111;
+

+
        let x = match tag {
+
            0b00 => u64::from(tmp[0]),
+
            0b01 => {
+
                buf.try_copy_to_slice(&mut tmp[1..2])?;
+
                u64::from(u16::from_be_bytes([tmp[0], tmp[1]]))
+
            }
+
            0b10 => {
+
                buf.try_copy_to_slice(&mut tmp[1..4])?;
+
                u64::from(u32::from_be_bytes([tmp[0], tmp[1], tmp[2], tmp[3]]))
+
            }
+
            0b11 => {
+
                buf.try_copy_to_slice(&mut tmp[1..8])?;
+
                u64::from_be_bytes(tmp)
+
            }
+
            // SAFETY: It should be obvious that we can't have any other bit pattern
+
            // than the above, since all other bits are zeroed.
+
            _ => unreachable! {},
+
        };
+
        Ok(Self(x))
+
    }
+
}
+

+
impl Encode for VarInt {
+
    fn encode(&self, w: &mut impl BufMut) {
+
        let x: u64 = self.0;
+

+
        if x < 2u64.pow(6) {
+
            (x as u8).encode(w)
+
        } else if x < 2u64.pow(14) {
+
            ((0b01 << 14) | x as u16).encode(w)
+
        } else if x < 2u64.pow(30) {
+
            ((0b10 << 30) | x as u32).encode(w)
+
        } else if x < 2u64.pow(62) {
+
            ((0b11 << 62) | x).encode(w)
+
        } else {
+
            panic!("VarInt::encode: integer overflow");
+
        }
+
    }
+
}
+

+
/// Encoding and decoding varint-prefixed payloads.
+
pub mod payload {
+
    use super::*;
+

+
    /// Encode varint-prefixed data payload.
+
    pub fn encode(payload: &[u8], buf: &mut impl BufMut) {
+
        let len = payload.len();
+
        let varint = VarInt::new_unchecked(len as u64);
+

+
        varint.encode(buf); // The length of the payload length.
+
        buf.put_slice(payload);
+
    }
+

+
    /// Decode varint-prefixed data payload.
+
    pub fn decode(buf: &mut impl Buf) -> Result<Vec<u8>, wire::Error> {
+
        let size = VarInt::decode(buf)?;
+
        let mut data = vec![0; *size as usize];
+
        buf.try_copy_to_slice(&mut data[..])?;
+

+
        Ok(data)
+
    }
+
}
+

+
#[cfg(test)]
+
mod test {
+
    use super::*;
+
    use qcheck_macros::quickcheck;
+

+
    impl qcheck::Arbitrary for VarInt {
+
        fn arbitrary(g: &mut qcheck::Gen) -> Self {
+
            let a = u16::arbitrary(g) as u64;
+
            let b = u32::arbitrary(g) as u64;
+
            let n = g
+
                .choose(&[
+
                    0,
+
                    1,
+
                    3,
+
                    7,
+
                    13,
+
                    37,
+
                    255,
+
                    4931,
+
                    54019,
+
                    69149,
+
                    151288809941952652,
+
                    u8::MAX as u64,
+
                    u16::MAX as u64,
+
                    u16::MAX as u64 - 1,
+
                    u32::MAX as u64,
+
                    u32::MAX as u64 - 1,
+
                    *Self::MAX,
+
                    a,
+
                    b,
+
                ])
+
                .copied()
+
                .unwrap();
+

+
            Self(n)
+
        }
+
    }
+

+
    #[quickcheck]
+
    fn prop_encode_decode(input: VarInt) {
+
        let encoded = wire::serialize(&input);
+
        let decoded: VarInt = wire::deserialize(&encoded).unwrap();
+

+
        assert_eq!(decoded, input);
+
    }
+

+
    #[test]
+
    #[should_panic]
+
    fn test_encode_overflow() {
+
        wire::serialize(&VarInt(u64::MAX));
+
    }
+

+
    #[test]
+
    fn test_encoding() {
+
        assert_eq!(wire::serialize(&VarInt(0)), vec![0x0]);
+
        assert_eq!(wire::serialize(&VarInt(1)), vec![0x01]);
+
        assert_eq!(wire::serialize(&VarInt(10)), vec![0x0a]);
+
        assert_eq!(wire::serialize(&VarInt(37)), vec![0x25]);
+
        assert_eq!(
+
            wire::deserialize::<VarInt>(&[0x40, 0x25]).unwrap(),
+
            VarInt(37)
+
        );
+
        assert_eq!(wire::serialize(&VarInt(15293)), vec![0x7b, 0xbd]);
+
        assert_eq!(
+
            wire::serialize(&VarInt(494878333)),
+
            vec![0x9d, 0x7f, 0x3e, 0x7d],
+
        );
+
        assert_eq!(
+
            wire::serialize(&VarInt(151288809941952652)),
+
            vec![0xc2, 0x19, 0x7c, 0x5e, 0xff, 0x14, 0xe8, 0x8c]
+
        );
+
        assert_eq!(
+
            wire::serialize(&VarInt(10000000000)),
+
            vec![0xc0, 0x00, 0x00, 0x02, 0x54, 0x0b, 0xe4, 0x00],
+
        );
+
    }
+
}
added crates/radicle-protocol/src/worker.rs
@@ -0,0 +1,119 @@
+
#![allow(clippy::too_many_arguments)]
+
pub mod fetch;
+

+
use std::io;
+

+
use radicle::identity::RepoId;
+
use radicle::node::Event;
+
use radicle::prelude::NodeId;
+
use radicle::storage::refs::RefsAt;
+

+
// use crate::runtime::{thread, Emitter, Handle};
+

+
use radicle::node::events::Emitter;
+

+
// pub use channels::{ChannelEvent, Channels, ChannelsConfig};
+

+
/// Error returned by fetch.
+
#[derive(thiserror::Error, Debug)]
+
pub enum FetchError {
+
    #[error("the 'git fetch' command failed with exit code '{code}'")]
+
    CommandFailed { code: i32 },
+
    #[error(transparent)]
+
    Io(#[from] io::Error),
+
    #[error(transparent)]
+
    Fetch(#[from] fetch::error::Fetch),
+
    #[error(transparent)]
+
    Handle(#[from] fetch::error::Handle),
+
    #[error(transparent)]
+
    Storage(#[from] radicle::storage::Error),
+
    #[error(transparent)]
+
    PolicyStore(#[from] radicle::node::policy::store::Error),
+
    #[error(transparent)]
+
    Policy(#[from] radicle_fetch::policy::error::Policy),
+
    #[error(transparent)]
+
    Blocked(#[from] radicle_fetch::policy::error::Blocked),
+
}
+

+
impl FetchError {
+
    /// Check if it's a timeout error.
+
    pub fn is_timeout(&self) -> bool {
+
        matches!(self, FetchError::Io(e) if e.kind() == io::ErrorKind::TimedOut)
+
    }
+
}
+

+
/// Error returned by fetch responder.
+
#[derive(thiserror::Error, Debug)]
+
pub enum UploadError {
+
    #[error("error parsing git command packet-line: {0}")]
+
    PacketLine(io::Error),
+
    #[error("error while performing git upload-pack: {0}")]
+
    UploadPack(io::Error),
+
    #[error(transparent)]
+
    Authorization(#[from] AuthorizationError),
+
}
+

+
impl UploadError {
+
    /// Check if it's an end-of-file error.
+
    pub fn is_eof(&self) -> bool {
+
        matches!(self, UploadError::UploadPack(e) if e.kind() == io::ErrorKind::UnexpectedEof)
+
    }
+
}
+

+
#[derive(thiserror::Error, Debug)]
+
pub enum AuthorizationError {
+
    #[error("{0} is not authorized to fetch {1}")]
+
    Unauthorized(NodeId, RepoId),
+
    #[error(transparent)]
+
    PolicyStore(#[from] radicle::node::policy::store::Error),
+
    #[error(transparent)]
+
    Repository(#[from] radicle::storage::RepositoryError),
+
}
+

+
/// Fetch job sent to worker thread.
+
#[derive(Debug, Clone)]
+
pub enum FetchRequest {
+
    /// Client is initiating a fetch for the repository identified by
+
    /// `rid` from the peer identified by `remote`.
+
    Initiator {
+
        /// Repo to fetch.
+
        rid: RepoId,
+
        /// Remote peer we are interacting with.
+
        remote: NodeId,
+
        /// If this fetch is for a particular set of `rad/sigrefs`.
+
        refs_at: Option<Vec<RefsAt>>,
+
    },
+
    /// Server is responding to a fetch request by uploading the
+
    /// specified `refspecs` sent by the client.
+
    Responder {
+
        /// Remote peer we are interacting with.
+
        remote: NodeId,
+
        /// Reporter for upload-pack progress.
+
        emitter: Emitter<Event>,
+
    },
+
}
+

+
impl FetchRequest {
+
    pub fn remote(&self) -> NodeId {
+
        match self {
+
            Self::Initiator { remote, .. } | Self::Responder { remote, .. } => *remote,
+
        }
+
    }
+
}
+

+
/// Fetch result of an upload or fetch.
+
#[derive(Debug)]
+
pub enum FetchResult {
+
    Initiator {
+
        /// Repo fetched.
+
        rid: RepoId,
+
        /// Fetch result, including remotes fetched.
+
        result: Result<fetch::FetchResult, FetchError>,
+
    },
+
    Responder {
+
        /// Repo requested.
+
        rid: Option<RepoId>,
+
        /// Upload result.
+
        result: Result<(), UploadError>,
+
    },
+
}
added crates/radicle-protocol/src/worker/fetch.rs
@@ -0,0 +1,41 @@
+
pub mod error;
+

+
use std::collections::HashSet;
+

+
use radicle::crypto::PublicKey;
+
use radicle::{identity::DocAt, storage::RefUpdate};
+

+
#[derive(Debug, Clone)]
+
pub struct FetchResult {
+
    /// The set of updated references.
+
    pub updated: Vec<RefUpdate>,
+
    /// The set of remote namespaces that were updated.
+
    pub namespaces: HashSet<PublicKey>,
+
    /// The fetch was a full clone.
+
    pub clone: bool,
+
    /// Identity doc of fetched repo.
+
    pub doc: DocAt,
+
}
+

+
impl FetchResult {
+
    pub fn new(doc: DocAt) -> Self {
+
        Self {
+
            updated: vec![],
+
            namespaces: HashSet::new(),
+
            clone: false,
+
            doc,
+
        }
+
    }
+
}
+

+
#[cfg(any(test, feature = "test"))]
+
impl qcheck::Arbitrary for FetchResult {
+
    fn arbitrary(g: &mut qcheck::Gen) -> Self {
+
        FetchResult {
+
            updated: vec![],
+
            namespaces: HashSet::arbitrary(g),
+
            clone: bool::arbitrary(g),
+
            doc: DocAt::arbitrary(g),
+
        }
+
    }
+
}
added crates/radicle-protocol/src/worker/fetch/error.rs
@@ -0,0 +1,75 @@
+
use std::io;
+

+
use thiserror::Error;
+

+
use radicle::{cob, git, identity, storage};
+
use radicle_fetch as fetch;
+

+
#[derive(Debug, Error)]
+
pub enum Fetch {
+
    #[error(transparent)]
+
    Run(#[from] fetch::Error),
+
    #[error(transparent)]
+
    Git(#[from] git::raw::Error),
+
    #[error(transparent)]
+
    Storage(#[from] storage::Error),
+
    #[error(transparent)]
+
    StorageCopy(#[from] io::Error),
+
    #[error(transparent)]
+
    Repository(#[from] radicle::storage::RepositoryError),
+
    #[error(transparent)]
+
    RefsDb(#[from] radicle::node::refs::Error),
+
    #[error("validation of the storage repository failed: the delegates {delegates:?} failed to validate to meet a threshold of {threshold}")]
+
    Validation {
+
        threshold: usize,
+
        delegates: Vec<String>,
+
    },
+
    #[error(transparent)]
+
    Cache(#[from] Cache),
+
}
+

+
#[derive(Debug, Error)]
+
pub enum Cache {
+
    #[error(transparent)]
+
    Parse(#[from] cob::ParseIdentifierError),
+
    #[error(transparent)]
+
    Repository(#[from] storage::RepositoryError),
+
    #[error("failed to remove {type_name} '{id}' from cache: {err}")]
+
    Remove {
+
        id: cob::ObjectId,
+
        type_name: cob::TypeName,
+
        #[source]
+
        err: Box<dyn std::error::Error + Send + Sync + 'static>,
+
    },
+
    #[error(transparent)]
+
    Store(#[from] cob::store::Error),
+
    #[error("failed to update {type_name} '{id}' in cache: {err}")]
+
    Update {
+
        id: cob::ObjectId,
+
        type_name: cob::TypeName,
+
        #[source]
+
        err: Box<dyn std::error::Error + Send + Sync + 'static>,
+
    },
+
}
+

+
#[derive(Debug, Error)]
+
pub enum Handle {
+
    #[error(transparent)]
+
    Doc(#[from] identity::DocError),
+
    #[error(transparent)]
+
    Io(#[from] io::Error),
+
    #[error(transparent)]
+
    Init(#[from] fetch::handle::error::Init),
+
    #[error(transparent)]
+
    Storage(#[from] storage::Error),
+
    #[error(transparent)]
+
    Repository(#[from] radicle::storage::RepositoryError),
+
}
+

+
#[derive(Debug, Error)]
+
pub enum Canonical {
+
    #[error(transparent)]
+
    Identity(#[from] radicle::storage::RepositoryError),
+
    #[error(transparent)]
+
    CanonicalRefs(#[from] radicle::identity::doc::CanonicalRefsError),
+
}
modified crates/radicle-ssh/Cargo.toml
@@ -14,7 +14,7 @@ edition.workspace = true
rust-version.workspace = true

[dependencies]
-
byteorder = { workspace = true }
+
byteorder = "1.4"
log = { workspace = true }
thiserror = { workspace = true }
zeroize = { workspace = true }
modified crates/radicle/src/node.rs
@@ -723,7 +723,7 @@ impl Command {
}

/// Connection link direction.
-
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
+
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub enum Link {
@@ -733,6 +733,18 @@ pub enum Link {
    Inbound,
}

+
impl Link {
+
    /// Check if this is an outbound link.
+
    pub fn is_outbound(&self) -> bool {
+
        matches!(self, Self::Outbound)
+
    }
+

+
    /// Check if this is an inbound link.
+
    pub fn is_inbound(&self) -> bool {
+
        matches!(self, Self::Inbound)
+
    }
+
}
+

/// An established network connection with a peer.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]