Radish alpha
h
Radicle Heartwood Protocol & Stack
Radicle
Git (anonymous pull)
Log in to clone via SSH
node, protocol: Refactor
Lorenz Leutgeb committed 9 months ago
commit 1c20f64a26641ecb7777567b9661599d92757e08
parent 61c468778a87a3a40acfa3be90107cd98a1451b5
39 files changed +2063 -2054
modified Cargo.lock
@@ -2679,21 +2679,13 @@ dependencies = [
name = "radicle-protocol"
version = "0.1.0"
dependencies = [
-
 "amplify",
-
 "anyhow",
 "bloomy",
 "byteorder",
-
 "chrono",
-
 "colored",
 "crossbeam-channel",
 "cyphernet",
 "fastrand",
-
 "io-reactor",
-
 "lexopt",
-
 "libc",
 "localtime",
 "log",
-
 "netservices",
 "nonempty 0.9.0",
 "qcheck",
 "qcheck-macros",
@@ -2704,10 +2696,7 @@ dependencies = [
 "scrypt",
 "serde",
 "serde_json",
-
 "snapbox",
-
 "socket2",
 "sqlite",
-
 "tempfile",
 "thiserror 1.0.69",
]

modified crates/radicle-cli/tests/commands.rs
@@ -7,6 +7,8 @@ use radicle::node;
use radicle::node::address::Store as _;
use radicle::node::config::seeds::RADICLE_NODE_BOOTSTRAP_IRIS;
use radicle::node::config::DefaultSeedingPolicy;
+
use radicle::node::events::Event;
+
use radicle::node::policy::Scope;
use radicle::node::routing::Store as _;
use radicle::node::UserAgent;
use radicle::node::{Address, Alias, Config, Handle as _, DEFAULT_TIMEOUT};
@@ -16,8 +18,6 @@ use radicle::profile::Home;
use radicle::storage::{ReadStorage, RefUpdate, RemoteRepository};
use radicle::test::fixtures;

-
use radicle_node::service::policy::Scope;
-
use radicle_node::service::Event;
#[allow(unused_imports)]
use radicle_node::test::logger;
use radicle_node::test::node::Node;
modified crates/radicle-node/Cargo.toml
@@ -12,7 +12,7 @@ rust-version.workspace = true
[features]
default = ["systemd"]
systemd = ["dep:radicle-systemd"]
-
test = ["radicle/test", "radicle-crypto/test", "radicle-crypto/cyphernet", "qcheck", "snapbox"]
+
test = ["radicle/test", "radicle-crypto/test", "radicle-crypto/cyphernet", "radicle-protocol/test", "qcheck", "snapbox"]

[dependencies]
amplify = { workspace = true }
modified crates/radicle-node/src/control.rs
@@ -246,9 +246,9 @@ mod tests {

    use super::*;
    use crate::identity::RepoId;
+
    use crate::node::policy::Scope;
    use crate::node::Handle;
    use crate::node::{Alias, Node, NodeId};
-
    use crate::service::policy::Scope;
    use crate::test;

    #[test]
deleted crates/radicle-node/src/deserializer.rs
@@ -1,163 +0,0 @@
-
use std::io;
-
use std::marker::PhantomData;
-

-
use crate::bounded;
-
use crate::prelude::BoundedVec;
-
use crate::service::message::Message;
-
use crate::wire;
-

-
/// Message stream deserializer.
-
///
-
/// Used to for example turn a byte stream into network messages.
-
#[derive(Debug)]
-
pub struct Deserializer<const B: usize, D = Message> {
-
    unparsed: BoundedVec<u8, B>,
-
    item: PhantomData<D>,
-
}
-

-
impl<const B: usize, D: wire::Decode> Default for Deserializer<B, D> {
-
    fn default() -> Self {
-
        Self::new(wire::Size::MAX as usize + 1)
-
    }
-
}
-

-
impl<const B: usize, D> TryFrom<Vec<u8>> for Deserializer<B, D> {
-
    type Error = bounded::Error;
-

-
    fn try_from(unparsed: Vec<u8>) -> Result<Self, Self::Error> {
-
        BoundedVec::try_from(unparsed).map(|unparsed| Self {
-
            unparsed,
-
            item: PhantomData,
-
        })
-
    }
-
}
-

-
impl<const B: usize, D: wire::Decode> Deserializer<B, D> {
-
    /// Create a new stream decoder.
-
    pub fn new(capacity: usize) -> Self {
-
        Self {
-
            unparsed: BoundedVec::with_capacity(capacity)
-
                .expect("Deserializer::new: capacity exceeds maximum"),
-
            item: PhantomData,
-
        }
-
    }
-

-
    /// Input bytes into the decoder.
-
    pub fn input(&mut self, bytes: &[u8]) -> Result<(), bounded::Error> {
-
        self.unparsed.extend_from_slice(bytes)
-
    }
-

-
    /// Decode and return the next message. Returns [`None`] if nothing was decoded.
-
    pub fn deserialize_next(&mut self) -> Result<Option<D>, wire::Error> {
-
        let mut reader = io::Cursor::new(self.unparsed.as_slice());
-

-
        match D::decode(&mut reader) {
-
            Ok(msg) => {
-
                let pos = reader.position() as usize;
-
                self.unparsed.drain(..pos);
-

-
                Ok(Some(msg))
-
            }
-
            Err(err) if err.is_eof() => Ok(None),
-
            Err(err) => Err(err),
-
        }
-
    }
-

-
    /// Drain the unparsed buffer.
-
    pub fn unparsed(&mut self) -> impl ExactSizeIterator<Item = u8> + '_ {
-
        self.unparsed.drain(..)
-
    }
-

-
    /// Return whether there are unparsed bytes.
-
    pub fn is_empty(&self) -> bool {
-
        self.unparsed.is_empty()
-
    }
-

-
    /// Return the size of the unparsed data.
-
    pub fn len(&self) -> usize {
-
        self.unparsed.len()
-
    }
-
}
-

-
impl<const B: usize, D: wire::Decode> io::Write for Deserializer<B, D> {
-
    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
-
        self.input(buf).map_err(|_| io::ErrorKind::OutOfMemory)?;
-

-
        Ok(buf.len())
-
    }
-

-
    fn flush(&mut self) -> io::Result<()> {
-
        Ok(())
-
    }
-
}
-

-
impl<const B: usize, D: wire::Decode> Iterator for Deserializer<B, D> {
-
    type Item = Result<D, wire::Error>;
-

-
    fn next(&mut self) -> Option<Self::Item> {
-
        self.deserialize_next().transpose()
-
    }
-
}
-

-
#[cfg(test)]
-
mod test {
-
    use super::*;
-
    use qcheck_macros::quickcheck;
-

-
    use crate::test::assert_matches;
-

-
    const MSG_HELLO: &[u8] = &[5, b'h', b'e', b'l', b'l', b'o'];
-
    const MSG_BYE: &[u8] = &[3, b'b', b'y', b'e'];
-

-
    #[test]
-
    fn test_decode_next() {
-
        let mut decoder = Deserializer::<1024, String>::new(8);
-

-
        decoder.input(&[3, b'b']).unwrap();
-
        assert_matches!(decoder.deserialize_next(), Ok(None));
-
        assert_eq!(decoder.unparsed.len(), 2);
-

-
        decoder.input(&[b'y']).unwrap();
-
        assert_matches!(decoder.deserialize_next(), Ok(None));
-
        assert_eq!(decoder.unparsed.len(), 3);
-

-
        decoder.input(&[b'e']).unwrap();
-
        assert_matches!(decoder.deserialize_next(), Ok(Some(s)) if s.as_str() == "bye");
-
        assert_eq!(decoder.unparsed.len(), 0);
-
        assert!(decoder.is_empty());
-
    }
-

-
    #[test]
-
    fn test_unparsed() {
-
        let mut decoder = Deserializer::<1024, String>::new(8);
-

-
        decoder.input(&[3, b'b', b'y']).unwrap();
-
        assert_eq!(decoder.unparsed().collect::<Vec<_>>(), vec![3, b'b', b'y']);
-
        assert!(decoder.is_empty());
-
    }
-

-
    #[quickcheck]
-
    fn prop_decode_next(chunk_size: usize) {
-
        let mut bytes = vec![];
-
        let mut msgs = vec![];
-
        let mut decoder = Deserializer::<1024, String>::new(8);
-

-
        let chunk_size = 1 + chunk_size % MSG_HELLO.len() + MSG_BYE.len();
-

-
        bytes.extend_from_slice(MSG_HELLO);
-
        bytes.extend_from_slice(MSG_BYE);
-

-
        for chunk in bytes.as_slice().chunks(chunk_size) {
-
            decoder.input(chunk).unwrap();
-

-
            while let Some(msg) = decoder.deserialize_next().unwrap() {
-
                msgs.push(msg);
-
            }
-
        }
-

-
        assert_eq!(decoder.unparsed.len(), 0);
-
        assert_eq!(msgs.len(), 2);
-
        assert_eq!(msgs[0], String::from("hello"));
-
        assert_eq!(msgs[1], String::from("bye"));
-
    }
-
}
modified crates/radicle-node/src/lib.rs
@@ -3,11 +3,12 @@
// suggestions did not make sense.
#![allow(clippy::byte_char_slices)]

-
pub mod bounded;
+
use std::str::FromStr;
+
use std::sync::LazyLock;
+

pub mod control;
-
pub mod deserializer;
pub mod runtime;
-
pub mod service;
+
pub(crate) use radicle_protocol::service;
#[cfg(any(test, feature = "test"))]
pub mod test;
#[cfg(test)]
@@ -18,7 +19,8 @@ pub mod worker;
use radicle::version::Version;

pub use localtime::{LocalDuration, LocalTime};
-
pub use netservices::Direction as Link;
+
pub use radicle::node::Link;
+
pub use radicle::node::UserAgent;
pub use radicle::node::PROTOCOL_VERSION;
pub use radicle::prelude::Timestamp;
pub use radicle::{collections, crypto, git, identity, node, profile, rad, storage};
@@ -32,14 +34,18 @@ pub const VERSION: Version = Version {
    timestamp: env!("SOURCE_DATE_EPOCH"),
};

+
/// This node's user agent string.
+
pub static USER_AGENT: LazyLock<UserAgent> = LazyLock::new(|| {
+
    FromStr::from_str(format!("/radicle:{}/", VERSION.version).as_str())
+
        .expect("user agent is valid")
+
});
+

pub mod prelude {
-
    pub use crate::bounded::BoundedVec;
    pub use crate::crypto::{PublicKey, Signature};
-
    pub use crate::deserializer::Deserializer;
    pub use crate::identity::{Did, RepoId};
-
    pub use crate::node::Address;
+
    pub use crate::node::{config::Network, Address, Event, NodeId};
    pub use crate::service::filter::Filter;
-
    pub use crate::service::{DisconnectReason, Event, Message, Network, NodeId};
+
    pub use crate::service::{DisconnectReason, Message};
    pub use crate::storage::refs::Refs;
    pub use crate::storage::WriteStorage;
    pub use crate::{LocalDuration, LocalTime, Timestamp};
modified crates/radicle-node/src/runtime.rs
@@ -21,6 +21,8 @@ use radicle::node;
use radicle::node::address;
use radicle::node::address::Store as _;
use radicle::node::notifications;
+
use radicle::node::policy::config as policy;
+
use radicle::node::Event;
use radicle::node::Handle as _;
use radicle::node::UserAgent;
use radicle::profile::Home;
@@ -29,7 +31,7 @@ use radicle::{cob, git, storage, Storage};
use crate::control;
use crate::node::{routing, NodeId};
use crate::service::message::NodeAnnouncement;
-
use crate::service::{gossip, policy, Event, INITIAL_SUBSCRIBE_BACKLOG_DELTA};
+
use crate::service::{gossip, INITIAL_SUBSCRIBE_BACKLOG_DELTA};
use crate::wire;
use crate::wire::{Decode, Wire};
use crate::worker;
@@ -117,7 +119,7 @@ impl Runtime {
    /// This function spawns threads.
    pub fn init<G>(
        home: Home,
-
        config: service::Config,
+
        config: radicle::node::Config,
        listen: Vec<net::SocketAddr>,
        signals: chan::Receiver<Signal>,
        signer: Device<G>,
modified crates/radicle-node/src/runtime/handle.rs
@@ -5,8 +5,10 @@ use std::sync::Arc;
use std::{fmt, io, time};

use crossbeam_channel as chan;
+
use radicle::node::events::{Event, Events};
+
use radicle::node::policy;
+
use radicle::node::{Config, NodeId};
use radicle::node::{ConnectOptions, ConnectResult, Seeds};
-
use radicle::storage::refs::RefsAt;
use reactor::poller::popol::PopolWaker;
use serde_json::json;
use thiserror::Error;
@@ -16,10 +18,8 @@ use crate::node::{Alias, Command, FetchResult};
use crate::profile::Home;
use crate::runtime::Emitter;
use crate::service;
-
use crate::service::policy;
-
use crate::service::NodeId;
-
use crate::service::{CommandError, Config, QueryState};
-
use crate::service::{Event, Events};
+
use crate::service::{CommandError, QueryState};
+
use crate::storage::refs::RefsAt;
use crate::wire;
use crate::wire::StreamId;
use crate::worker::TaskResult;
modified crates/radicle-node/src/test.rs
@@ -1,4 +1,3 @@
-
pub mod arbitrary;
pub mod gossip;
pub mod handle;
pub mod node;
deleted crates/radicle-node/src/test/arbitrary.rs
@@ -1,142 +0,0 @@
-
use std::collections::HashSet;
-

-
use bloomy::BloomFilter;
-
use qcheck::Arbitrary;
-
use radicle::node::UserAgent;
-

-
use crate::crypto;
-
use crate::identity::DocAt;
-
use crate::node::Alias;
-
use crate::prelude::{BoundedVec, NodeId, RepoId, Timestamp};
-
use crate::service::filter::{Filter, FILTER_SIZE_L, FILTER_SIZE_M, FILTER_SIZE_S};
-
use crate::service::message::{
-
    Announcement, Info, InventoryAnnouncement, Message, NodeAnnouncement, Ping, RefsAnnouncement,
-
    Subscribe, ZeroBytes,
-
};
-
use crate::wire::MessageType;
-
use crate::worker::fetch::FetchResult;
-

-
pub use radicle::test::arbitrary::*;
-

-
impl Arbitrary for Filter {
-
    fn arbitrary(g: &mut qcheck::Gen) -> Self {
-
        let size = *g
-
            .choose(&[FILTER_SIZE_S, FILTER_SIZE_M, FILTER_SIZE_L])
-
            .unwrap();
-
        let mut bytes = vec![0; size];
-
        for _ in 0..64 {
-
            let index = usize::arbitrary(g) % bytes.len();
-
            bytes[index] = u8::arbitrary(g);
-
        }
-
        Self::from(BloomFilter::from(bytes))
-
    }
-
}
-

-
impl Arbitrary for FetchResult {
-
    fn arbitrary(g: &mut qcheck::Gen) -> Self {
-
        FetchResult {
-
            updated: vec![],
-
            namespaces: HashSet::arbitrary(g),
-
            clone: bool::arbitrary(g),
-
            doc: DocAt::arbitrary(g),
-
        }
-
    }
-
}
-

-
impl Arbitrary for Message {
-
    fn arbitrary(g: &mut qcheck::Gen) -> Self {
-
        let type_id = g
-
            .choose(&[
-
                MessageType::InventoryAnnouncement,
-
                MessageType::NodeAnnouncement,
-
                MessageType::RefsAnnouncement,
-
                MessageType::Info,
-
                MessageType::Subscribe,
-
                MessageType::Ping,
-
                MessageType::Pong,
-
            ])
-
            .unwrap();
-

-
        match type_id {
-
            MessageType::InventoryAnnouncement => Announcement {
-
                node: NodeId::arbitrary(g),
-
                message: InventoryAnnouncement {
-
                    inventory: BoundedVec::arbitrary(g),
-
                    timestamp: Timestamp::arbitrary(g),
-
                }
-
                .into(),
-
                signature: crypto::Signature::from(<[u8; 64]>::arbitrary(g)),
-
            }
-
            .into(),
-
            MessageType::RefsAnnouncement => Announcement {
-
                node: NodeId::arbitrary(g),
-
                message: RefsAnnouncement {
-
                    rid: RepoId::arbitrary(g),
-
                    refs: BoundedVec::arbitrary(g),
-
                    timestamp: Timestamp::arbitrary(g),
-
                }
-
                .into(),
-
                signature: crypto::Signature::from(<[u8; 64]>::arbitrary(g)),
-
            }
-
            .into(),
-
            MessageType::NodeAnnouncement => {
-
                let message = NodeAnnouncement {
-
                    version: u8::arbitrary(g),
-
                    features: u64::arbitrary(g).into(),
-
                    timestamp: Timestamp::arbitrary(g),
-
                    alias: Alias::arbitrary(g),
-
                    addresses: Arbitrary::arbitrary(g),
-
                    nonce: u64::arbitrary(g),
-
                    agent: UserAgent::arbitrary(g),
-
                }
-
                .into();
-
                let bytes: [u8; 64] = Arbitrary::arbitrary(g);
-
                let signature = crypto::Signature::from(bytes);
-

-
                Announcement {
-
                    node: NodeId::arbitrary(g),
-
                    signature,
-
                    message,
-
                }
-
                .into()
-
            }
-
            MessageType::Info => {
-
                let message = Info::RefsAlreadySynced {
-
                    rid: RepoId::arbitrary(g),
-
                    at: oid(),
-
                };
-
                Self::Info(message)
-
            }
-
            MessageType::Subscribe => Self::Subscribe(Subscribe {
-
                filter: Filter::arbitrary(g),
-
                since: Timestamp::arbitrary(g),
-
                until: Timestamp::arbitrary(g),
-
            }),
-
            MessageType::Ping => {
-
                let mut rng = fastrand::Rng::with_seed(u64::arbitrary(g));
-

-
                Self::Ping(Ping::new(&mut rng))
-
            }
-
            MessageType::Pong => Self::Pong {
-
                zeroes: ZeroBytes::new(u16::arbitrary(g).min(Ping::MAX_PONG_ZEROES)),
-
            },
-
        }
-
    }
-
}
-

-
impl Arbitrary for ZeroBytes {
-
    fn arbitrary(g: &mut qcheck::Gen) -> Self {
-
        ZeroBytes::new(u16::arbitrary(g))
-
    }
-
}
-

-
impl<T, const N: usize> Arbitrary for BoundedVec<T, N>
-
where
-
    T: Arbitrary + Eq,
-
{
-
    fn arbitrary(g: &mut qcheck::Gen) -> Self {
-
        let mut v: Vec<T> = Arbitrary::arbitrary(g);
-
        v.truncate(N);
-
        v.try_into().expect("size within bounds")
-
    }
-
}
modified crates/radicle-node/src/test/environment.rs
@@ -19,8 +19,10 @@ use radicle::git::refname;
use radicle::identity::{RepoId, Visibility};
use radicle::node::config::ConnectAddress;
use radicle::node::device::Device;
+
use radicle::node::events::Event;
use radicle::node::policy::store as policy;
use radicle::node::seed::Store as _;
+
pub use radicle::node::Config;
use radicle::node::{Alias, Database, UserAgent, POLICIES_DB_FILE};
use radicle::node::{ConnectOptions, Handle as _};
use radicle::profile;
@@ -34,12 +36,9 @@ use radicle::{cob, explorer};
use radicle::{git, web};

use crate::node::NodeId;
-
use crate::service::Event;
use crate::storage::git::transport;
use crate::{runtime, runtime::Handle, service, Runtime};

-
pub use service::Config;
-

/// Test environment.
pub struct Environment {
    tempdir: tempfile::TempDir,
modified crates/radicle-node/src/test/handle.rs
@@ -9,8 +9,8 @@ use radicle::storage::refs::RefsAt;
use crate::identity::RepoId;
use crate::node::{Alias, Config, ConnectOptions, ConnectResult, Event, FetchResult, Seeds};
use crate::runtime::HandleError;
-
use crate::service::policy;
-
use crate::service::NodeId;
+
use radicle::node::policy;
+
use radicle::node::NodeId;

#[derive(Default, Clone)]
pub struct Handle {
modified crates/radicle-node/src/test/node.rs
@@ -22,6 +22,7 @@ use radicle::node::config::ConnectAddress;
use radicle::node::policy::store as policy;
use radicle::node::seed::Store as _;
use radicle::node::Config;
+
use radicle::node::Event;
use radicle::node::{self, Alias};
use radicle::node::{ConnectOptions, Handle as _};
use radicle::node::{Database, POLICIES_DB_FILE};
@@ -33,7 +34,6 @@ use radicle::Storage;

use crate::node::device::Device;
use crate::node::NodeId;
-
use crate::service::Event;
use crate::storage::git::transport;
use crate::{runtime, runtime::Handle, service, Runtime};

modified crates/radicle-node/src/test/peer.rs
@@ -28,7 +28,6 @@ use crate::runtime::Emitter;
use crate::service;
use crate::service::io::Io;
use crate::service::message::*;
-
use crate::service::policy::{Scope, SeedingPolicy};
use crate::service::*;
use crate::storage::git::transport::remote;
use crate::storage::{RemoteId, WriteStorage};
@@ -36,6 +35,10 @@ use crate::test::storage::MockStorage;
use crate::test::{arbitrary, fixtures, simulator};
use crate::wire::MessageType;
use crate::{Link, LocalDuration, LocalTime, PROTOCOL_VERSION};
+
use radicle::node::events::Events;
+
use radicle::node::policy::config as policy;
+
use radicle::node::policy::{Scope, SeedingPolicy};
+
use radicle_protocol::bounded::BoundedVec;

/// Service instantiation used for testing.
pub type Service<S, G> = service::Service<Database, S, G>;
@@ -100,7 +103,7 @@ where
}

pub struct Config<G: crypto::signature::Signer<crypto::Signature> + 'static> {
-
    pub config: service::Config,
+
    pub config: radicle::node::Config,
    pub local_time: LocalTime,
    pub policy: SeedingPolicy,
    pub signer: Device<G>,
@@ -113,7 +116,7 @@ impl Default for Config<MockSigner> {
        let mut rng = fastrand::Rng::new();
        let signer = Device::mock_rng(&mut rng);
        let tmp = tempfile::TempDir::new().unwrap();
-
        let config = service::Config::test(Alias::from_str("mocky").unwrap());
+
        let config = radicle::node::Config::test(Alias::from_str("mocky").unwrap());

        Config {
            config,
modified crates/radicle-node/src/test/simulator.rs
@@ -13,16 +13,19 @@ use std::{fmt, io, net};

use localtime::{LocalDuration, LocalTime};
use log::*;
+
use radicle::node::events::Event;
+
use radicle::node::NodeId;
+
use radicle_protocol::worker::FetchError;

use crate::crypto;
use crate::prelude::{Address, RepoId};
use crate::service::io::Io;
-
use crate::service::{DisconnectReason, Event, Message, Metrics, NodeId};
+
use crate::service::{DisconnectReason, Message, Metrics};
use crate::storage::Namespaces;
use crate::storage::{ReadRepository, WriteStorage};
use crate::test::arbitrary;
use crate::test::peer::Service;
-
use crate::worker::{fetch, FetchError};
+
use crate::worker::fetch;
use crate::Link;

/// Minimum latency between peers.
modified crates/radicle-node/src/tests.rs
@@ -9,17 +9,19 @@ use std::sync::LazyLock;
use std::time;

use crossbeam_channel as chan;
-
use netservices::Direction as Link;
use radicle::identity::Visibility;
use radicle::node::address::Store as _;
use radicle::node::device::Device;
+
use radicle::node::policy;
use radicle::node::refs::Store as _;
use radicle::node::routing::Store as _;
+
use radicle::node::Link;
use radicle::node::{ConnectOptions, DEFAULT_TIMEOUT};
use radicle::storage::refs::RefsAt;
use radicle::storage::RefUpdate;
use radicle::test::arbitrary::gen;
use radicle::test::storage::MockRepository;
+
use radicle_protocol::bounded::BoundedVec;

use crate::collections::{RandomMap, RandomSet};
use crate::identity::RepoId;
@@ -49,7 +51,6 @@ use crate::test::simulator::{Peer as _, Simulation};
use crate::test::storage::MockStorage;
use crate::wire::Decode;
use crate::wire::Encode;
-
use crate::worker;
use crate::worker::fetch;
use crate::LocalTime;
use crate::{git, identity, rad, runtime, service, test};
@@ -1462,7 +1463,7 @@ fn test_fetch_missing_inventory_on_schedule() {
    alice.fetched(
        rid,
        bob.id,
-
        Err(worker::FetchError::Io(
+
        Err(radicle_protocol::worker::FetchError::Io(
            io::ErrorKind::ConnectionReset.into(),
        )),
    );
@@ -1806,7 +1807,7 @@ fn test_init_and_seed() {
        .find(|e| {
            matches!(
                e,
-
                service::Event::RefsFetched { remote, .. }
+
                radicle::node::events::Event::RefsFetched { remote, .. }
                if *remote == eve.node_id()
            )
        })
modified crates/radicle-node/src/tests/e2e.rs
@@ -1,6 +1,8 @@
use std::{collections::HashSet, thread, time};

use radicle::node::device::Device;
+
use radicle::node::policy::Scope;
+
use radicle::node::Event;
use radicle::node::{Alias, ConnectResult, FetchResult, Handle as _, DEFAULT_TIMEOUT};
use radicle::storage::{
    ReadRepository, ReadStorage, RefUpdate, RemoteRepository, SignRepository, ValidateRepository,
@@ -13,7 +15,6 @@ use radicle::{git, issue};
use crate::node::config::Limits;
use crate::node::{Config, ConnectOptions};
use crate::service;
-
use crate::service::policy::Scope;
use crate::storage::git::transport;
use crate::test::logger;
use crate::test::node::{converge, Node};
@@ -688,8 +689,7 @@ fn test_large_fetch() {
    bob_events
        .wait(
            |e| {
-
                matches!(e, service::Event::RefsFetched { updated, .. } if !updated.is_empty())
-
                    .then_some(())
+
                matches!(e, Event::RefsFetched { updated, .. } if !updated.is_empty()).then_some(())
            },
            time::Duration::from_secs(9 * scale as u64),
        )
@@ -717,7 +717,7 @@ fn test_concurrent_fetches() {
    let mut alice_repos = HashSet::new();
    let mut alice = Node::init(
        tmp.path(),
-
        service::Config {
+
        radicle::node::config::Config {
            limits: limits.clone(),
            relay: radicle::node::config::Relay::Always,
            ..config::relay("alice")
@@ -725,7 +725,7 @@ fn test_concurrent_fetches() {
    );
    let mut bob = Node::init(
        tmp.path(),
-
        service::Config {
+
        radicle::node::config::Config {
            limits,
            relay: radicle::node::config::Relay::Always,
            ..config::relay("bob")
@@ -766,7 +766,7 @@ fn test_concurrent_fetches() {

    while !bob_repos.is_empty() {
        match alice_events.recv().unwrap() {
-
            service::Event::RefsFetched { rid, updated, .. } if !updated.is_empty() => {
+
            Event::RefsFetched { rid, updated, .. } if !updated.is_empty() => {
                bob_repos.remove(&rid);
                log::debug!(target: "test", "{} fetched {rid} ({} left)",alice.id, bob_repos.len());
            }
@@ -776,7 +776,7 @@ fn test_concurrent_fetches() {

    while !alice_repos.is_empty() {
        match bob_events.recv().unwrap() {
-
            service::Event::RefsFetched { rid, updated, .. } if !updated.is_empty() => {
+
            Event::RefsFetched { rid, updated, .. } if !updated.is_empty() => {
                alice_repos.remove(&rid);
                log::debug!(target: "test", "{} fetched {rid} ({} left)", bob.id, alice_repos.len());
            }
@@ -1255,8 +1255,7 @@ fn missing_delegate_default_branch() {
    bob_events
        .wait(
            |e| {
-
                matches!(e, service::Event::RefsFetched { updated, .. } if !updated.is_empty())
-
                    .then_some(())
+
                matches!(e, Event::RefsFetched { updated, .. } if !updated.is_empty()).then_some(())
            },
            DEFAULT_TIMEOUT,
        )
@@ -1416,7 +1415,7 @@ fn test_background_foreground_fetch() {
    bob.handle.announce_refs(rid).unwrap();
    alice_events
        .wait(
-
            |e| matches!(e, service::Event::RefsAnnounced { .. }).then_some(()),
+
            |e| matches!(e, Event::RefsAnnounced { .. }).then_some(()),
            DEFAULT_TIMEOUT,
        )
        .unwrap();
added crates/radicle-node/src/wire.rs
@@ -0,0 +1,1389 @@
+
//! Implementation of the transport protocol.
+
//!
+
//! We use the Noise XK handshake pattern to establish an encrypted stream with a remote peer.
+
//! The handshake itself is implemented in the external [`cyphernet`] and [`netservices`] crates.
+
use std::collections::hash_map::Entry;
+
use std::collections::VecDeque;
+
use std::os::unix::io::{AsRawFd, RawFd};
+
use std::sync::Arc;
+
use std::{io, net, time};
+

+
use amplify::Wrapper as _;
+
use crossbeam_channel as chan;
+
use cyphernet::addr::{HostName, InetHost, NetAddr};
+
use cyphernet::encrypt::noise::{HandshakePattern, Keyset, NoiseState};
+
use cyphernet::proxy::socks5;
+
use cyphernet::{Digest, EcSk, Ecdh, Sha256};
+
use localtime::LocalTime;
+
use netservices::resource::{ListenerEvent, NetAccept, NetTransport, SessionEvent};
+
use netservices::session::{NoiseSession, ProtocolArtifact, Socks5Session};
+
use netservices::{NetConnection, NetReader, NetWriter};
+
use radicle::node::device::Device;
+
use reactor::{ResourceId, ResourceType, Timestamp};
+

+
use radicle::collections::RandomMap;
+
use radicle::crypto;
+
use radicle::node::config::AddressConfig;
+
use radicle::node::Link;
+
use radicle::node::NodeId;
+
use radicle::storage::WriteStorage;
+
use radicle_protocol::deserializer::Deserializer;
+
pub use radicle_protocol::wire::frame;
+
pub use radicle_protocol::wire::frame::{Frame, FrameData, StreamId};
+
pub use radicle_protocol::wire::*;
+
use radicle_protocol::worker::{FetchRequest, FetchResult};
+

+
use crate::service;
+
use crate::service::io::Io;
+
use crate::service::FETCH_TIMEOUT;
+
use crate::service::{session, DisconnectReason, Metrics, Service};
+
use crate::worker;
+
use crate::worker::{ChannelEvent, ChannelsConfig};
+
use crate::worker::{Task, TaskResult};
+

+
/// NoiseXK handshake pattern.
+
pub const NOISE_XK: HandshakePattern = HandshakePattern {
+
    initiator: cyphernet::encrypt::noise::InitiatorPattern::Xmitted,
+
    responder: cyphernet::encrypt::noise::OneWayPattern::Known,
+
};
+

+
/// Default time to wait until a network connection is considered inactive.
+
pub const DEFAULT_CONNECTION_TIMEOUT: time::Duration = time::Duration::from_secs(6);
+

+
/// Default time to wait when dialing a connection, before the remote is considered unreachable.
+
pub const DEFAULT_DIAL_TIMEOUT: time::Duration = time::Duration::from_secs(6);
+

+
/// Maximum size of a peer inbox, in bytes.
+
pub const MAX_INBOX_SIZE: usize = 1024 * 1024 * 2;
+

+
/// Control message used internally between workers, users, and the service.
+
#[allow(clippy::large_enum_variant)]
+
#[derive(Debug)]
+
pub enum Control {
+
    /// Message from the user to the service.
+
    User(service::Command),
+
    /// Message from a worker to the service.
+
    Worker(TaskResult),
+
    /// Flush data in the given stream to the remote.
+
    Flush { remote: NodeId, stream: StreamId },
+
}
+

+
/// Peer session type.
+
pub type WireSession<G> = NoiseSession<G, Sha256, Socks5Session<net::TcpStream>>;
+
/// Peer session type (read-only).
+
pub type WireReader = NetReader<Socks5Session<net::TcpStream>>;
+
/// Peer session type (write-only).
+
pub type WireWriter<G> = NetWriter<NoiseState<G, Sha256>, Socks5Session<net::TcpStream>>;
+

+
/// Reactor action.
+
type Action<G> = reactor::Action<NetAccept<WireSession<G>>, NetTransport<WireSession<G>>>;
+

+
/// A worker stream.
+
struct Stream {
+
    /// Channels.
+
    channels: worker::Channels,
+
    /// Data sent.
+
    sent_bytes: usize,
+
    /// Data received.
+
    received_bytes: usize,
+
}
+

+
impl Stream {
+
    fn new(channels: worker::Channels) -> Self {
+
        Self {
+
            channels,
+
            sent_bytes: 0,
+
            received_bytes: 0,
+
        }
+
    }
+
}
+

+
/// Streams associated with a connected peer.
+
struct Streams {
+
    /// Active streams and their associated worker channels.
+
    /// Note that the gossip and control streams are not included here as they are always
+
    /// implied to exist.
+
    streams: RandomMap<StreamId, Stream>,
+
    /// Connection direction.
+
    link: Link,
+
    /// Sequence number used to compute the next stream id.
+
    seq: u64,
+
}
+

+
impl Streams {
+
    /// Create a new [`Streams`] object, passing the connection link.
+
    fn new(link: Link) -> Self {
+
        Self {
+
            streams: RandomMap::default(),
+
            link,
+
            seq: 0,
+
        }
+
    }
+

+
    /// Get a known stream.
+
    fn get(&self, stream: &StreamId) -> Option<&Stream> {
+
        self.streams.get(stream)
+
    }
+

+
    /// Get a known stream, mutably.
+
    fn get_mut(&mut self, stream: &StreamId) -> Option<&mut Stream> {
+
        self.streams.get_mut(stream)
+
    }
+

+
    /// Open a new stream.
+
    fn open(&mut self, config: ChannelsConfig) -> (StreamId, worker::Channels) {
+
        self.seq += 1;
+

+
        let id = StreamId::git(self.link)
+
            .nth(self.seq)
+
            .expect("Streams::open: too many streams");
+
        let channels = self
+
            .register(id, config)
+
            .expect("Streams::open: stream was already open");
+

+
        (id, channels)
+
    }
+

+
    /// Register an open stream.
+
    fn register(&mut self, stream: StreamId, config: ChannelsConfig) -> Option<worker::Channels> {
+
        let (wire, worker) = worker::Channels::pair(config)
+
            .expect("Streams::register: fatal: unable to create channels");
+

+
        match self.streams.entry(stream) {
+
            Entry::Vacant(e) => {
+
                e.insert(Stream::new(worker));
+
                Some(wire)
+
            }
+
            Entry::Occupied(_) => None,
+
        }
+
    }
+

+
    /// Unregister an open stream.
+
    fn unregister(&mut self, stream: &StreamId) -> Option<Stream> {
+
        self.streams.remove(stream)
+
    }
+

+
    /// Close all streams.
+
    fn shutdown(&mut self) {
+
        for (sid, stream) in self.streams.drain() {
+
            log::debug!(target: "wire", "Closing worker stream {sid}");
+
            stream.channels.close().ok();
+
        }
+
    }
+
}
+

+
/// The initial state of an outbound peer before handshake is completed.
+
#[derive(Debug)]
+
struct Outbound {
+
    /// Resource ID, if registered.
+
    id: Option<ResourceId>,
+
    /// Remote address.
+
    addr: NetAddr<HostName>,
+
    /// Remote Node ID.
+
    nid: NodeId,
+
}
+

+
/// The initial state of an inbound peer before handshake is completed.
+
#[derive(Debug)]
+
struct Inbound {
+
    /// Resource ID, if registered.
+
    id: Option<ResourceId>,
+
    /// Remote address.
+
    addr: NetAddr<HostName>,
+
}
+

+
/// Peer connection state machine.
+
enum Peer {
+
    /// The state after handshake is completed.
+
    /// Peers in this state are handled by the underlying service.
+
    Connected {
+
        #[allow(dead_code)]
+
        addr: NetAddr<HostName>,
+
        link: Link,
+
        nid: NodeId,
+
        inbox: Deserializer<MAX_INBOX_SIZE, Frame>,
+
        streams: Streams,
+
    },
+
    /// The peer was scheduled for disconnection. Once the transport is handed over
+
    /// by the reactor, we can consider it disconnected.
+
    Disconnecting {
+
        link: Link,
+
        nid: Option<NodeId>,
+
        reason: DisconnectReason,
+
    },
+
}
+

+
impl std::fmt::Debug for Peer {
+
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+
        match self {
+
            Self::Connected { link, nid, .. } => write!(f, "Connected({link:?}, {nid})"),
+
            Self::Disconnecting { .. } => write!(f, "Disconnecting"),
+
        }
+
    }
+
}
+

+
impl Peer {
+
    /// Return the peer's id, if any.
+
    fn id(&self) -> Option<&NodeId> {
+
        match self {
+
            Peer::Connected { nid, .. } | Peer::Disconnecting { nid: Some(nid), .. } => Some(nid),
+
            Peer::Disconnecting { nid: None, .. } => None,
+
        }
+
    }
+

+
    fn link(&self) -> Link {
+
        match self {
+
            Peer::Connected { link, .. } => *link,
+
            Peer::Disconnecting { link, .. } => *link,
+
        }
+
    }
+

+
    /// Connected peer.
+
    fn connected(nid: NodeId, addr: NetAddr<HostName>, link: Link) -> Self {
+
        Self::Connected {
+
            link,
+
            addr,
+
            nid,
+
            inbox: Deserializer::default(),
+
            streams: Streams::new(link),
+
        }
+
    }
+
}
+

+
/// Holds connected peers.
+
struct Peers(RandomMap<ResourceId, Peer>);
+

+
impl Peers {
+
    fn get_mut(&mut self, id: &ResourceId) -> Option<&mut Peer> {
+
        self.0.get_mut(id)
+
    }
+

+
    fn entry(&mut self, id: ResourceId) -> Entry<ResourceId, Peer> {
+
        self.0.entry(id)
+
    }
+

+
    fn insert(&mut self, id: ResourceId, peer: Peer) {
+
        if self.0.insert(id, peer).is_some() {
+
            log::warn!(target: "wire", "Replacing existing peer id={id}");
+
        }
+
    }
+

+
    fn remove(&mut self, id: &ResourceId) -> Option<Peer> {
+
        self.0.remove(id)
+
    }
+

+
    fn lookup(&self, node_id: &NodeId) -> Option<(ResourceId, &Peer)> {
+
        self.0
+
            .iter()
+
            .find(|(_, peer)| peer.id() == Some(node_id))
+
            .map(|(fd, peer)| (*fd, peer))
+
    }
+

+
    fn lookup_mut(&mut self, node_id: &NodeId) -> Option<(ResourceId, &mut Peer)> {
+
        self.0
+
            .iter_mut()
+
            .find(|(_, peer)| peer.id() == Some(node_id))
+
            .map(|(fd, peer)| (*fd, peer))
+
    }
+

+
    fn active(&self) -> impl Iterator<Item = (ResourceId, &NodeId, Link)> {
+
        self.0.iter().filter_map(|(id, peer)| match peer {
+
            Peer::Connected { nid, link, .. } => Some((*id, nid, *link)),
+
            Peer::Disconnecting { .. } => None,
+
        })
+
    }
+

+
    fn connected(&self) -> impl Iterator<Item = (ResourceId, &NodeId)> {
+
        self.0.iter().filter_map(|(id, peer)| {
+
            if let Peer::Connected { nid, .. } = peer {
+
                Some((*id, nid))
+
            } else {
+
                None
+
            }
+
        })
+
    }
+

+
    fn iter(&self) -> impl Iterator<Item = &Peer> {
+
        self.0.values()
+
    }
+
}
+

+
/// Wire protocol implementation for a set of peers.
+
pub struct Wire<D, S, G: crypto::signature::Signer<crypto::Signature> + Ecdh> {
+
    /// Backing service instance.
+
    service: Service<D, S, G>,
+
    /// Worker pool interface.
+
    worker: chan::Sender<Task>,
+
    /// Used for authentication.
+
    signer: Device<G>,
+
    /// Node metrics.
+
    metrics: service::Metrics,
+
    /// Internal queue of actions to send to the reactor.
+
    actions: VecDeque<Action<G>>,
+
    /// Outbound attempted peers without a session.
+
    outbound: RandomMap<RawFd, Outbound>,
+
    /// Inbound peers without a session.
+
    inbound: RandomMap<RawFd, Inbound>,
+
    /// Listening addresses that are not yet registered.
+
    listening: RandomMap<RawFd, net::SocketAddr>,
+
    /// Peer (established) sessions.
+
    peers: Peers,
+
}
+

+
impl<D, S, G> Wire<D, S, G>
+
where
+
    D: service::Store,
+
    S: WriteStorage + 'static,
+
    G: crypto::signature::Signer<crypto::Signature> + Ecdh<Pk = NodeId>,
+
{
+
    pub fn new(service: Service<D, S, G>, worker: chan::Sender<Task>, signer: Device<G>) -> Self {
+
        assert!(service.started().is_some(), "Service must be initialized");
+

+
        Self {
+
            service,
+
            worker,
+
            signer,
+
            metrics: Metrics::default(),
+
            actions: VecDeque::new(),
+
            inbound: RandomMap::default(),
+
            outbound: RandomMap::default(),
+
            listening: RandomMap::default(),
+
            peers: Peers(RandomMap::default()),
+
        }
+
    }
+

+
    pub fn listen(&mut self, socket: NetAccept<WireSession<G>>) {
+
        self.listening
+
            .insert(socket.as_raw_fd(), socket.local_addr());
+
        self.actions.push_back(Action::RegisterListener(socket));
+
    }
+

+
    fn disconnect(&mut self, id: ResourceId, reason: DisconnectReason) -> Option<(NodeId, Link)> {
+
        match self.peers.entry(id) {
+
            Entry::Vacant(_) => {
+
                // Connecting peer with no session.
+
                log::debug!(target: "wire", "Disconnecting pending peer with id={id}: {reason}");
+
                self.actions.push_back(Action::UnregisterTransport(id));
+

+
                // Check for attempted outbound connections. Unestablished inbound connections don't
+
                // have an NID yet.
+
                self.outbound
+
                    .values()
+
                    .find(|o| o.id == Some(id))
+
                    .map(|o| (o.nid, Link::Outbound))
+
            }
+
            Entry::Occupied(mut e) => match e.get_mut() {
+
                Peer::Disconnecting { nid, link, .. } => {
+
                    log::error!(target: "wire", "Peer with id={id} is already disconnecting");
+

+
                    nid.map(|n| (n, *link))
+
                }
+
                Peer::Connected {
+
                    nid, streams, link, ..
+
                } => {
+
                    log::debug!(target: "wire", "Disconnecting peer with id={id}: {reason}");
+
                    let nid = *nid;
+
                    let link = *link;
+

+
                    streams.shutdown();
+
                    e.insert(Peer::Disconnecting {
+
                        nid: Some(nid),
+
                        link,
+
                        reason,
+
                    });
+
                    self.actions.push_back(Action::UnregisterTransport(id));
+

+
                    Some((nid, link))
+
                }
+
            },
+
        }
+
    }
+

+
    fn worker_result(&mut self, task: TaskResult) {
+
        log::debug!(
+
            target: "wire",
+
            "Received fetch result from worker for stream {}, remote {}: {:?}",
+
            task.stream, task.remote, task.result
+
        );
+

+
        let nid = task.remote;
+
        let Some((fd, peer)) = self.peers.lookup_mut(&nid) else {
+
            log::warn!(target: "wire", "Peer {nid} not found; ignoring fetch result");
+
            return;
+
        };
+

+
        if let Peer::Connected { link, streams, .. } = peer {
+
            // Nb. It's possible that the stream would already be unregistered if we received an
+
            // early "close" from the remote. Otherwise, we unregister it here and send the "close"
+
            // ourselves.
+
            if let Some(s) = streams.unregister(&task.stream) {
+
                log::debug!(
+
                    target: "wire", "Stream {} of {} closing with {} byte(s) sent and {} byte(s) received",
+
                    task.stream, task.remote, s.sent_bytes, s.received_bytes
+
                );
+
                let frame = Frame::<service::Message>::control(
+
                    *link,
+
                    frame::Control::Close {
+
                        stream: task.stream,
+
                    },
+
                );
+
                self.actions.push_back(Action::Send(fd, frame.to_bytes()));
+
            }
+
        } else {
+
            // If the peer disconnected, we'll get here, but we still want to let the service know
+
            // about the fetch result, so we don't return here.
+
            log::warn!(target: "wire", "Peer {nid} is not connected; ignoring fetch result");
+
            return;
+
        };
+

+
        // Only call into the service if we initiated this fetch.
+
        match task.result {
+
            FetchResult::Initiator { rid, result } => {
+
                self.service.fetched(rid, nid, result);
+
            }
+
            FetchResult::Responder { rid, result } => {
+
                if let Some(rid) = rid {
+
                    if let Some(err) = result.err() {
+
                        log::info!(target: "wire", "Peer {nid} failed to fetch {rid} from us: {err}");
+
                    } else {
+
                        log::info!(target: "wire", "Peer {nid} fetched {rid} from us successfully");
+
                    }
+
                }
+
            }
+
        }
+
    }
+

+
    fn flush(&mut self, remote: NodeId, stream: StreamId) {
+
        let Some((fd, peer)) = self.peers.lookup_mut(&remote) else {
+
            log::warn!(target: "wire", "Peer {remote} is not known; ignoring flush");
+
            return;
+
        };
+
        let Peer::Connected { streams, link, .. } = peer else {
+
            log::warn!(target: "wire", "Peer {remote} is not connected; ignoring flush");
+
            return;
+
        };
+
        let Some(s) = streams.get_mut(&stream) else {
+
            log::debug!(target: "wire", "Stream {stream} cannot be found; ignoring flush");
+
            return;
+
        };
+
        let metrics = self.metrics.peer(remote);
+

+
        for data in s.channels.try_iter() {
+
            let frame = match data {
+
                ChannelEvent::Data(data) => {
+
                    metrics.sent_git_bytes += data.len();
+
                    metrics.sent_bytes += data.len();
+
                    Frame::<service::Message>::git(stream, data)
+
                }
+
                ChannelEvent::Close => Frame::control(*link, frame::Control::Close { stream }),
+
                ChannelEvent::Eof => Frame::control(*link, frame::Control::Eof { stream }),
+
            };
+
            self.actions
+
                .push_back(reactor::Action::Send(fd, frame.to_bytes()));
+
        }
+
    }
+

+
    fn cleanup(&mut self, id: ResourceId, fd: RawFd) {
+
        if self.inbound.remove(&fd).is_some() {
+
            log::debug!(target: "wire", "Cleaning up inbound peer state with id={id} (fd={fd})");
+
        } else if let Some(outbound) = self.outbound.remove(&fd) {
+
            log::debug!(target: "wire", "Cleaning up outbound peer state with id={id} (fd={fd})");
+
            self.service.disconnected(
+
                outbound.nid,
+
                Link::Outbound,
+
                &DisconnectReason::connection(),
+
            );
+
        } else {
+
            log::debug!(target: "wire", "Tried to cleanup unknown peer with id={id} (fd={fd})");
+
        }
+
    }
+
}
+

+
impl<D, S, G> reactor::Handler for Wire<D, S, G>
+
where
+
    D: service::Store + Send,
+
    S: WriteStorage + Send + 'static,
+
    G: crypto::signature::Signer<crypto::Signature> + Ecdh<Pk = NodeId> + Clone + Send,
+
{
+
    type Listener = NetAccept<WireSession<G>>;
+
    type Transport = NetTransport<WireSession<G>>;
+
    type Command = Control;
+

+
    fn tick(&mut self, time: Timestamp) {
+
        self.metrics.open_channels = self
+
            .peers
+
            .iter()
+
            .filter_map(|p| {
+
                if let Peer::Connected { streams, .. } = p {
+
                    Some(streams.streams.len())
+
                } else {
+
                    None
+
                }
+
            })
+
            .sum();
+
        self.metrics.worker_queue_size = self.worker.len();
+
        self.service.tick(
+
            LocalTime::from_millis(time.as_millis() as u128),
+
            &self.metrics,
+
        );
+
    }
+

+
    fn handle_timer(&mut self) {
+
        self.service.wake();
+
    }
+

+
    fn handle_listener_event(
+
        &mut self,
+
        _: ResourceId, // Nb. This is the ID of the listener socket.
+
        event: ListenerEvent<WireSession<G>>,
+
        _: Timestamp,
+
    ) {
+
        match event {
+
            ListenerEvent::Accepted(connection) => {
+
                let Ok(remote) = connection.remote_addr() else {
+
                    log::warn!(target: "wire", "Accepted connection doesn't have remote address; dropping..");
+
                    drop(connection);
+

+
                    return;
+
                };
+
                let InetHost::Ip(ip) = remote.host else {
+
                    log::error!(target: "wire", "Unexpected host type for inbound connection {remote}; dropping..");
+
                    drop(connection);
+

+
                    return;
+
                };
+
                let fd = connection.as_raw_fd();
+
                log::debug!(target: "wire", "Inbound connection from {remote} (fd={fd})..");
+

+
                // If the service doesn't want to accept this connection,
+
                // we drop the connection here, which disconnects the socket.
+
                if !self.service.accepted(ip) {
+
                    log::debug!(target: "wire", "Rejecting inbound connection from {ip} (fd={fd})..");
+
                    drop(connection);
+

+
                    return;
+
                }
+

+
                let session = match accept::<G>(
+
                    remote.clone().into(),
+
                    connection,
+
                    self.signer.clone().into_inner(),
+
                ) {
+
                    Ok(s) => s,
+
                    Err(e) => {
+
                        log::error!(target: "wire", "Error creating session for {ip}: {e}");
+
                        return;
+
                    }
+
                };
+
                let transport = match NetTransport::with_session(
+
                    session,
+
                    netservices::Direction::Inbound,
+
                ) {
+
                    Ok(transport) => transport,
+
                    Err(err) => {
+
                        log::error!(target: "wire", "Failed to create transport for accepted connection: {err}");
+
                        return;
+
                    }
+
                };
+
                log::debug!(target: "wire", "Accepted inbound connection from {remote} (fd={fd})..");
+

+
                self.inbound.insert(
+
                    fd,
+
                    Inbound {
+
                        id: None,
+
                        addr: remote.into(),
+
                    },
+
                );
+
                self.actions
+
                    .push_back(reactor::Action::RegisterTransport(transport))
+
            }
+
            ListenerEvent::Failure(err) => {
+
                log::error!(target: "wire", "Error listening for inbound connections: {err}");
+
            }
+
        }
+
    }
+

+
    fn handle_registered(&mut self, fd: RawFd, id: ResourceId, typ: ResourceType) {
+
        match typ {
+
            ResourceType::Listener => {
+
                if let Some(local_addr) = self.listening.remove(&fd) {
+
                    self.service.listening(local_addr);
+
                }
+
            }
+
            ResourceType::Transport => {
+
                if let Some(outbound) = self.outbound.get_mut(&fd) {
+
                    log::debug!(target: "wire", "Outbound peer resource registered for {} with id={id} (fd={fd})", outbound.nid);
+
                    outbound.id = Some(id);
+
                } else if let Some(inbound) = self.inbound.get_mut(&fd) {
+
                    log::debug!(target: "wire", "Inbound peer resource registered with id={id} (fd={fd})");
+
                    inbound.id = Some(id);
+
                } else {
+
                    log::warn!(target: "wire", "Unknown peer registered with fd={fd} and id={id}");
+
                }
+
            }
+
        }
+
    }
+

+
    fn handle_transport_event(
+
        &mut self,
+
        id: ResourceId,
+
        event: SessionEvent<WireSession<G>>,
+
        _: Timestamp,
+
    ) {
+
        match event {
+
            SessionEvent::Established(fd, ProtocolArtifact { state, .. }) => {
+
                // SAFETY: With the NoiseXK protocol, there is always a remote static key.
+
                let nid: NodeId = state.remote_static_key.unwrap();
+
                // Make sure we don't try to connect to ourselves by mistake.
+
                if &nid == self.signer.public_key() {
+
                    log::error!(target: "wire", "Self-connection detected, disconnecting..");
+
                    self.disconnect(id, DisconnectReason::SelfConnection);
+

+
                    return;
+
                }
+
                let (addr, link) = if let Some(peer) = self.inbound.remove(&fd) {
+
                    self.metrics.peer(nid).inbound_connection_attempts += 1;
+
                    (peer.addr, Link::Inbound)
+
                } else if let Some(peer) = self.outbound.remove(&fd) {
+
                    assert_eq!(nid, peer.nid);
+
                    (peer.addr, Link::Outbound)
+
                } else {
+
                    log::error!(target: "wire", "Session for {nid} (id={id}) not found");
+
                    return;
+
                };
+
                log::debug!(
+
                    target: "wire",
+
                    "Session established with {nid} (id={id}) (fd={fd}) ({})",
+
                    if link.is_inbound() { "inbound" } else { "outbound" }
+
                );
+

+
                // Connections to close.
+
                let mut disconnect = Vec::new();
+

+
                // Handle conflicting connections.
+
                // This is typical when nodes have mutually configured their nodes to connect to
+
                // each other on startup. We handle this by deterministically choosing one node
+
                // whos outbound connection is the one that is kept. The other connections are
+
                // dropped.
+
                {
+
                    // Whether we have precedence in case of conflicting connections.
+
                    // Having precedence means that our outbound connection will win over
+
                    // the other node's outbound connection.
+
                    let precedence = *self.signer.public_key() > nid;
+

+
                    // Pre-existing connections that conflict with this newly established session.
+
                    // Note that we can't know whether a connection is conflicting before we get the
+
                    // remote static key.
+
                    let mut conflicting = Vec::new();
+

+
                    // Active sessions with the same NID but a different Resource ID are conflicting.
+
                    conflicting.extend(
+
                        self.peers
+
                            .active()
+
                            .filter(|(c_id, d, _)| **d == nid && *c_id != id)
+
                            .map(|(c_id, _, link)| (c_id, link)),
+
                    );
+

+
                    // Outbound connection attempts with the same remote key but a different file
+
                    // descriptor are conflicting.
+
                    conflicting.extend(self.outbound.iter().filter_map(|(c_fd, other)| {
+
                        if other.nid == nid && *c_fd != fd {
+
                            other.id.map(|c_id| (c_id, Link::Outbound))
+
                        } else {
+
                            None
+
                        }
+
                    }));
+

+
                    for (c_id, c_link) in conflicting {
+
                        // If we have precedence, the inbound connection is closed.
+
                        // In the case where both connections are inbound or outbound,
+
                        // we close the newer connection, ie. the one with the higher
+
                        // resource id.
+
                        let close = match (link, c_link) {
+
                            (Link::Inbound, Link::Outbound) => {
+
                                if precedence {
+
                                    id
+
                                } else {
+
                                    c_id
+
                                }
+
                            }
+
                            (Link::Outbound, Link::Inbound) => {
+
                                if precedence {
+
                                    c_id
+
                                } else {
+
                                    id
+
                                }
+
                            }
+
                            (Link::Inbound, Link::Inbound) => id.max(c_id),
+
                            (Link::Outbound, Link::Outbound) => id.max(c_id),
+
                        };
+

+
                        log::warn!(
+
                            target: "wire", "Established session (id={id}) conflicts with existing session for {nid} (id={c_id})"
+
                        );
+
                        disconnect.push(close);
+
                    }
+
                }
+
                for id in &disconnect {
+
                    log::warn!(
+
                        target: "wire", "Closing conflicting session (id={id}) with {nid}.."
+
                    );
+
                    // Disconnect and return the associated NID of the peer, if available.
+
                    if let Some((nid, link)) = self.disconnect(*id, DisconnectReason::Conflict) {
+
                        // We disconnect the session eagerly because otherwise we will get the new
+
                        // `connected` event before the `disconnect`, resulting in a duplicate
+
                        // connection.
+
                        self.service
+
                            .disconnected(nid, link, &DisconnectReason::Conflict);
+
                    }
+
                }
+
                if !disconnect.contains(&id) {
+
                    self.peers
+
                        .insert(id, Peer::connected(nid, addr.clone(), link));
+
                    self.service.connected(nid, addr.into(), link);
+
                }
+
            }
+
            SessionEvent::Data(data) => {
+
                if let Some(Peer::Connected {
+
                    nid,
+
                    inbox,
+
                    streams,
+
                    ..
+
                }) = self.peers.get_mut(&id)
+
                {
+
                    let metrics = self.metrics.peer(*nid);
+
                    metrics.received_bytes += data.len();
+

+
                    if inbox.input(&data).is_err() {
+
                        log::error!(target: "wire", "Maximum inbox size ({MAX_INBOX_SIZE}) reached for peer {nid}");
+
                        log::error!(target: "wire", "Unable to process messages fast enough for peer {nid}; disconnecting..");
+
                        self.disconnect(id, DisconnectReason::Session(session::Error::Misbehavior));
+

+
                        return;
+
                    }
+

+
                    loop {
+
                        match inbox.deserialize_next() {
+
                            Ok(Some(Frame {
+
                                data: FrameData::Control(frame::Control::Open { stream }),
+
                                ..
+
                            })) => {
+
                                log::debug!(target: "wire", "Received `open` command for stream {stream} from {nid}");
+
                                metrics.streams_opened += 1;
+
                                metrics.received_fetch_requests += 1;
+
                                let reader_limit = self.service.config().limits.fetch_pack_receive;
+
                                let Some(channels) = streams.register(
+
                                    stream,
+
                                    ChannelsConfig::new(FETCH_TIMEOUT)
+
                                        .with_reader_limit(reader_limit),
+
                                ) else {
+
                                    log::warn!(target: "wire", "Peer attempted to open already-open stream stream {stream}");
+
                                    continue;
+
                                };
+

+
                                let task = Task {
+
                                    fetch: FetchRequest::Responder {
+
                                        remote: *nid,
+
                                        emitter: self.service.emitter(),
+
                                    },
+
                                    stream,
+
                                    channels,
+
                                };
+
                                if let Err(e) = self.worker.try_send(task) {
+
                                    log::error!(
+
                                        target: "wire",
+
                                        "Worker pool failed to accept incoming fetch request: {e}"
+
                                    );
+
                                }
+
                            }
+
                            Ok(Some(Frame {
+
                                data: FrameData::Control(frame::Control::Eof { stream }),
+
                                ..
+
                            })) => {
+
                                if let Some(s) = streams.get(&stream) {
+
                                    log::debug!(target: "wire", "Received `end-of-file` on stream {stream} from {nid}");
+

+
                                    if s.channels.send(ChannelEvent::Eof).is_err() {
+
                                        log::error!(target: "wire", "Worker is disconnected; cannot send `EOF`");
+
                                    }
+
                                } else {
+
                                    log::debug!(target: "wire", "Ignoring frame on closed or unknown stream {stream}");
+
                                }
+
                            }
+
                            Ok(Some(Frame {
+
                                data: FrameData::Control(frame::Control::Close { stream }),
+
                                ..
+
                            })) => {
+
                                log::debug!(target: "wire", "Received `close` command for stream {stream} from {nid}");
+

+
                                if let Some(s) = streams.unregister(&stream) {
+
                                    log::debug!(
+
                                        target: "wire",
+
                                        "Stream {stream} of {nid} closed with {} byte(s) sent and {} byte(s) received",
+
                                        s.sent_bytes, s.received_bytes
+
                                    );
+
                                    s.channels.close().ok();
+
                                }
+
                            }
+
                            Ok(Some(Frame {
+
                                data: FrameData::Gossip(msg),
+
                                ..
+
                            })) => {
+
                                metrics.received_gossip_messages += 1;
+
                                self.service.received_message(*nid, msg);
+
                            }
+
                            Ok(Some(Frame {
+
                                stream,
+
                                data: FrameData::Git(data),
+
                                ..
+
                            })) => {
+
                                if let Some(s) = streams.get_mut(&stream) {
+
                                    metrics.received_git_bytes += data.len();
+

+
                                    if s.channels.send(ChannelEvent::Data(data)).is_err() {
+
                                        log::error!(target: "wire", "Worker is disconnected; cannot send data");
+
                                    }
+
                                } else {
+
                                    log::debug!(target: "wire", "Ignoring frame on closed or unknown stream {stream}");
+
                                }
+
                            }
+
                            Ok(None) => {
+
                                // Buffer is empty, or message isn't complete.
+
                                break;
+
                            }
+
                            Err(e) => {
+
                                log::error!(target: "wire", "Invalid gossip message from {nid}: {e}");
+

+
                                if !inbox.is_empty() {
+
                                    log::debug!(target: "wire", "Dropping read buffer for {nid} with {} bytes", inbox.len());
+
                                }
+
                                self.disconnect(
+
                                    id,
+
                                    DisconnectReason::Session(session::Error::Misbehavior),
+
                                );
+
                                break;
+
                            }
+
                        }
+
                    }
+
                } else {
+
                    log::warn!(target: "wire", "Dropping message from unconnected peer (id={id})");
+
                }
+
            }
+
            SessionEvent::Terminated(err) => {
+
                self.disconnect(id, DisconnectReason::Connection(Arc::new(err)));
+
            }
+
        }
+
    }
+

+
    fn handle_command(&mut self, cmd: Self::Command) {
+
        match cmd {
+
            Control::User(cmd) => self.service.command(cmd),
+
            Control::Worker(result) => self.worker_result(result),
+
            Control::Flush { remote, stream } => self.flush(remote, stream),
+
        }
+
    }
+

+
    fn handle_error(
+
        &mut self,
+
        err: reactor::Error<NetAccept<WireSession<G>>, NetTransport<WireSession<G>>>,
+
    ) {
+
        match err {
+
            reactor::Error::Poll(err) => {
+
                // TODO: This should be a fatal error, there's nothing we can do here.
+
                log::error!(target: "wire", "Can't poll connections: {err}");
+
            }
+
            reactor::Error::ListenerDisconnect(id, _) => {
+
                // TODO: This should be a fatal error, there's nothing we can do here.
+
                log::error!(target: "wire", "Listener {id} disconnected");
+
            }
+
            reactor::Error::TransportDisconnect(id, transport) => {
+
                let fd = transport.as_raw_fd();
+
                log::error!(target: "wire", "Peer id={id} (fd={fd}) disconnected");
+

+
                // We're dropping the TCP connection here.
+
                drop(transport);
+

+
                // The peer transport is already disconnected and removed from the reactor;
+
                // therefore there is no need to initiate a disconnection. We simply remove
+
                // the peer from the map.
+
                match self.peers.remove(&id) {
+
                    Some(mut peer) => {
+
                        if let Peer::Connected { streams, .. } = &mut peer {
+
                            streams.shutdown();
+
                        }
+

+
                        if let Some(id) = peer.id() {
+
                            self.service.disconnected(
+
                                *id,
+
                                peer.link(),
+
                                &DisconnectReason::connection(),
+
                            );
+
                        } else {
+
                            log::debug!(target: "wire", "Inbound disconnection before handshake; ignoring..")
+
                        }
+
                    }
+
                    None => self.cleanup(id, fd),
+
                }
+
            }
+
        }
+
    }
+

+
    fn handover_listener(&mut self, id: ResourceId, _listener: Self::Listener) {
+
        log::error!(target: "wire", "Listener handover is not supported (id={id})");
+
    }
+

+
    fn handover_transport(&mut self, id: ResourceId, transport: Self::Transport) {
+
        let fd = transport.as_raw_fd();
+

+
        match self.peers.entry(id) {
+
            Entry::Occupied(e) => {
+
                match e.get() {
+
                    Peer::Disconnecting {
+
                        nid, reason, link, ..
+
                    } => {
+
                        log::debug!(target: "wire", "Transport handover for disconnecting peer with id={id} (fd={fd})");
+

+
                        // Disconnect TCP stream.
+
                        drop(transport);
+

+
                        // If there is no NID, the service is not aware of the peer.
+
                        if let Some(nid) = nid {
+
                            // In the case of a conflicting connection, there will be two resources
+
                            // for the peer. However, at the service level, there is only one, and
+
                            // it is identified by NID.
+
                            //
+
                            // Therefore, we specify which of the connections we're closing by
+
                            // passing the `link`.
+
                            self.service.disconnected(*nid, *link, reason);
+
                        }
+
                        e.remove();
+
                    }
+
                    Peer::Connected { nid, .. } => {
+
                        panic!("Wire::handover_transport: Unexpected handover of connected peer {} with id={id} (fd={fd})", nid);
+
                    }
+
                }
+
            }
+
            Entry::Vacant(_) => self.cleanup(id, fd),
+
        }
+
    }
+
}
+

+
impl<D, S, G> Iterator for Wire<D, S, G>
+
where
+
    D: service::Store,
+
    S: WriteStorage + 'static,
+
    G: crypto::signature::Signer<crypto::Signature> + Ecdh<Pk = NodeId> + Clone,
+
{
+
    type Item = Action<G>;
+

+
    fn next(&mut self) -> Option<Self::Item> {
+
        while let Some(ev) = self.service.next() {
+
            match ev {
+
                Io::Write(node_id, msgs) => {
+
                    let (fd, link) = match self.peers.lookup(&node_id) {
+
                        Some((fd, Peer::Connected { link, .. })) => (fd, *link),
+
                        Some((_, peer)) => {
+
                            // If the peer is disconnected by the wire protocol, the service may
+
                            // not be aware of this yet, and may continue to write messages to it.
+
                            log::debug!(target: "wire", "Dropping {} message(s) to {node_id} ({peer:?})", msgs.len());
+
                            continue;
+
                        }
+
                        None => {
+
                            log::error!(target: "wire", "Dropping {} message(s) to {node_id}: unknown peer", msgs.len());
+
                            continue;
+
                        }
+
                    };
+
                    log::trace!(
+
                        target: "wire", "Writing {} message(s) to {}", msgs.len(), node_id
+
                    );
+
                    let mut data = Vec::new();
+
                    let metrics = self.metrics.peer(node_id);
+
                    metrics.sent_gossip_messages += msgs.len();
+

+
                    for msg in msgs {
+
                        Frame::gossip(link, msg)
+
                            .encode(&mut data)
+
                            .expect("in-memory writes never fail");
+
                    }
+
                    metrics.sent_bytes += data.len();
+

+
                    self.actions.push_back(reactor::Action::Send(fd, data));
+
                }
+
                Io::Connect(node_id, addr) => {
+
                    if self.peers.connected().any(|(_, id)| id == &node_id) {
+
                        log::error!(
+
                            target: "wire",
+
                            "Attempt to connect to already connected peer {node_id}"
+
                        );
+
                        // FIXME: The problem here is the session will stay in "initial" state,
+
                        // because it can't transition to attempted.
+
                        continue;
+
                    }
+
                    self.service.attempted(node_id, addr.clone());
+
                    self.metrics.peer(node_id).outbound_connection_attempts += 1;
+

+
                    match dial::<G>(
+
                        addr.to_inner(),
+
                        node_id,
+
                        self.signer.clone().into_inner(),
+
                        self.service.config(),
+
                    )
+
                    .and_then(|session| {
+
                        NetTransport::<WireSession<G>>::with_session(
+
                            session,
+
                            netservices::Direction::Outbound,
+
                        )
+
                    }) {
+
                        Ok(transport) => {
+
                            self.outbound.insert(
+
                                transport.as_raw_fd(),
+
                                Outbound {
+
                                    id: None,
+
                                    nid: node_id,
+
                                    addr: addr.to_inner(),
+
                                },
+
                            );
+
                            log::debug!(
+
                                target: "wire",
+
                                "Registering outbound transport for {node_id} (fd={})..",
+
                                transport.as_raw_fd()
+
                            );
+
                            self.actions
+
                                .push_back(reactor::Action::RegisterTransport(transport));
+
                        }
+
                        Err(err) => {
+
                            log::error!(target: "wire", "Error establishing connection to {addr}: {err}");
+

+
                            self.service.disconnected(
+
                                node_id,
+
                                Link::Outbound,
+
                                &DisconnectReason::Dial(Arc::new(err)),
+
                            );
+
                        }
+
                    }
+
                }
+
                Io::Disconnect(nid, reason) => {
+
                    if let Some((id, Peer::Connected { .. })) = self.peers.lookup(&nid) {
+
                        if let Some((nid, _)) = self.disconnect(id, reason) {
+
                            self.metrics.peer(nid).disconnects += 1;
+
                        }
+
                    } else {
+
                        log::warn!(target: "wire", "Peer {nid} is not connected: ignoring disconnect");
+
                    }
+
                }
+
                Io::Wakeup(d) => {
+
                    self.actions.push_back(reactor::Action::SetTimer(d.into()));
+
                }
+
                Io::Fetch {
+
                    rid,
+
                    remote,
+
                    timeout,
+
                    reader_limit,
+
                    refs_at,
+
                } => {
+
                    log::trace!(target: "wire", "Processing fetch for {rid} from {remote}..");
+

+
                    let Some((fd, Peer::Connected { link, streams, .. })) =
+
                        self.peers.lookup_mut(&remote)
+
                    else {
+
                        // Nb. It's possible that a peer is disconnected while an `Io::Fetch`
+
                        // is in the service's i/o buffer. Since the service may not purge the
+
                        // buffer on disconnect, we should just ignore i/o actions that don't
+
                        // have a connected peer.
+
                        log::error!(target: "wire", "Peer {remote} is not connected: dropping fetch");
+
                        continue;
+
                    };
+
                    let (stream, channels) =
+
                        streams.open(ChannelsConfig::new(timeout).with_reader_limit(reader_limit));
+

+
                    log::debug!(target: "wire", "Opened new stream with id {stream} for {rid} and remote {remote}");
+

+
                    let link = *link;
+
                    let task = Task {
+
                        fetch: FetchRequest::Initiator {
+
                            rid,
+
                            remote,
+
                            refs_at,
+
                        },
+
                        stream,
+
                        channels,
+
                    };
+

+
                    if !self.worker.is_empty() {
+
                        log::warn!(
+
                            target: "wire",
+
                            "Worker pool is busy: {} tasks pending, fetch requests may be delayed", self.worker.len()
+
                        );
+
                    }
+
                    if let Err(e) = self.worker.try_send(task) {
+
                        log::error!(
+
                            target: "wire",
+
                            "Worker pool failed to accept outgoing fetch request: {e}"
+
                        );
+
                    }
+
                    let metrics = self.metrics.peer(remote);
+
                    metrics.streams_opened += 1;
+
                    metrics.sent_fetch_requests += 1;
+

+
                    self.actions.push_back(Action::Send(
+
                        fd,
+
                        Frame::<service::Message>::control(link, frame::Control::Open { stream })
+
                            .to_bytes(),
+
                    ));
+
                }
+
            }
+
        }
+
        self.actions.pop_front()
+
    }
+
}
+

+
/// Establish a new outgoing connection.
+
pub fn dial<G: Ecdh<Pk = NodeId>>(
+
    remote_addr: NetAddr<HostName>,
+
    remote_id: <G as EcSk>::Pk,
+
    signer: G,
+
    config: &radicle::node::Config,
+
) -> io::Result<WireSession<G>> {
+
    // Determine what address to establish a TCP connection with, given the remote peer
+
    // address and our node configuration.
+
    let inet_addr: NetAddr<InetHost> = match (&remote_addr.host, config.proxy) {
+
        // For IP and DNS addresses, use the global proxy if set, otherwise use the address as-is.
+
        (HostName::Ip(_), Some(proxy)) => proxy.into(),
+
        (HostName::Ip(ip), None) => NetAddr::new(InetHost::Ip(*ip), remote_addr.port),
+
        (HostName::Dns(_), Some(proxy)) => proxy.into(),
+
        (HostName::Dns(dns), None) => NetAddr::new(InetHost::Dns(dns.clone()), remote_addr.port),
+
        // For onion addresses, handle with care.
+
        (HostName::Tor(onion), proxy) => match config.onion {
+
            // In onion proxy mode, simply use the configured proxy address.
+
            // This takes precedence over any global proxy.
+
            Some(AddressConfig::Proxy { address }) => address.into(),
+
            // In "forward" mode, if a global proxy is set, we use that, otherwise
+
            // we treat `.onion` addresses as regular DNS names.
+
            Some(AddressConfig::Forward) => {
+
                if let Some(proxy) = proxy {
+
                    proxy.into()
+
                } else {
+
                    NetAddr::new(InetHost::Dns(onion.to_string()), remote_addr.port)
+
                }
+
            }
+
            // If onion address support isn't configured, refuse to connect.
+
            None => {
+
                return Err(io::Error::new(
+
                    io::ErrorKind::Unsupported,
+
                    "no configuration found for .onion addresses",
+
                ));
+
            }
+
        },
+
        _ => {
+
            return Err(io::Error::new(
+
                io::ErrorKind::Unsupported,
+
                "unsupported remote address type",
+
            ));
+
        }
+
    };
+
    // Nb. This timeout is currently not used by the underlying library due to the
+
    // `socket2` library not supporting non-blocking connect with timeout.
+
    let connection = net::TcpStream::connect_nonblocking(inet_addr, DEFAULT_DIAL_TIMEOUT)?;
+
    // Whether to tunnel regular connections through the proxy.
+
    let force_proxy = config.proxy.is_some();
+

+
    session::<G>(
+
        remote_addr,
+
        Some(remote_id),
+
        connection,
+
        force_proxy,
+
        signer,
+
    )
+
}
+

+
/// Accept a new connection.
+
pub fn accept<G: Ecdh<Pk = NodeId>>(
+
    remote_addr: NetAddr<HostName>,
+
    connection: net::TcpStream,
+
    signer: G,
+
) -> io::Result<WireSession<G>> {
+
    session::<G>(remote_addr, None, connection, false, signer)
+
}
+

+
/// Create a new [`WireSession`].
+
fn session<G: Ecdh<Pk = NodeId>>(
+
    remote_addr: NetAddr<HostName>,
+
    remote_id: Option<NodeId>,
+
    connection: net::TcpStream,
+
    force_proxy: bool,
+
    signer: G,
+
) -> io::Result<WireSession<G>> {
+
    // There are issues with setting TCP_NODELAY on WSL. Not a big deal.
+
    if let Err(e) = connection.set_nodelay(true) {
+
        log::warn!(target: "wire", "Unable to set TCP_NODELAY on fd {}: {e}", connection.as_raw_fd());
+
    }
+
    connection.set_read_timeout(Some(DEFAULT_CONNECTION_TIMEOUT))?;
+
    connection.set_write_timeout(Some(DEFAULT_CONNECTION_TIMEOUT))?;
+

+
    let sock = socket2::Socket::from(connection);
+
    let ka = socket2::TcpKeepalive::new()
+
        .with_time(time::Duration::from_secs(30))
+
        .with_interval(time::Duration::from_secs(10))
+
        .with_retries(3);
+
    if let Err(e) = sock.set_tcp_keepalive(&ka) {
+
        log::warn!(target: "wire", "Unable to set TCP_KEEPALIVE on fd {}: {e}", sock.as_raw_fd());
+
    }
+

+
    let socks5 = socks5::Socks5::with(remote_addr, force_proxy);
+
    let proxy = Socks5Session::with(sock.into(), socks5);
+
    let pair = G::generate_keypair();
+
    let keyset = Keyset {
+
        e: pair.0,
+
        s: Some(signer),
+
        re: None,
+
        rs: remote_id,
+
    };
+
    let noise = NoiseState::initialize::<{ Sha256::OUTPUT_LEN }>(
+
        NOISE_XK,
+
        remote_id.is_some(),
+
        &[],
+
        keyset,
+
    );
+
    Ok(WireSession::with(proxy, noise))
+
}
+

+
#[cfg(test)]
+
mod test {
+
    use super::*;
+
    use crate::service::{Message, ZeroBytes};
+
    use crate::wire;
+
    use crate::wire::varint;
+

+
    #[test]
+
    fn test_pong_message_with_extension() {
+
        use radicle_protocol::deserializer;
+

+
        let mut stream = Vec::new();
+
        let pong = Message::Pong {
+
            zeroes: ZeroBytes::new(42),
+
        };
+
        frame::PROTOCOL_VERSION_STRING.encode(&mut stream).unwrap();
+
        frame::StreamId::gossip(Link::Outbound)
+
            .encode(&mut stream)
+
            .unwrap();
+

+
        // Serialize gossip message with some extension fields.
+
        let mut gossip = wire::serialize(&pong);
+
        String::from("extra").encode(&mut gossip).unwrap();
+
        48u8.encode(&mut gossip).unwrap();
+

+
        // Encode gossip message using the varint-prefix format into the stream.
+
        varint::payload::encode(&gossip, &mut stream).unwrap();
+

+
        let mut de = deserializer::Deserializer::<1024, Frame>::new(1024);
+
        de.input(&stream).unwrap();
+

+
        // The "pong" message decodes successfully, even though there is trailing data.
+
        assert_eq!(
+
            de.deserialize_next().unwrap().unwrap(),
+
            Frame::gossip(Link::Outbound, pong)
+
        );
+
        assert!(de.deserialize_next().unwrap().is_none());
+
        assert!(de.is_empty());
+
    }
+

+
    #[test]
+
    fn test_inventory_ann_with_extension() {
+
        use radicle_protocol::deserializer;
+

+
        #[derive(Debug)]
+
        struct MessageWithExt {
+
            msg: Message,
+
            ext: String,
+
        }
+

+
        impl wire::Encode for MessageWithExt {
+
            fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
+
                let mut n = self.msg.encode(writer)?;
+
                n += self.ext.encode(writer)?;
+

+
                Ok(n)
+
            }
+
        }
+

+
        impl wire::Decode for MessageWithExt {
+
            fn decode<R: io::Read + ?Sized>(reader: &mut R) -> Result<Self, wire::Error> {
+
                let msg = Message::decode(reader)?;
+
                let ext = String::decode(reader).unwrap_or_default();
+

+
                Ok(MessageWithExt { msg, ext })
+
            }
+
        }
+

+
        let rid = radicle::test::arbitrary::gen(1);
+
        let pk = radicle::test::arbitrary::gen(1);
+
        let sig: [u8; 64] = radicle::test::arbitrary::gen(1);
+

+
        // Message with extension.
+
        let mut stream = Vec::new();
+
        let ann = Message::announcement(
+
            pk,
+
            service::gossip::inventory(radicle::node::Timestamp::MAX, [rid]),
+
            radicle::crypto::Signature::from(sig),
+
        );
+
        let pong = Message::Pong {
+
            zeroes: ZeroBytes::new(42),
+
        };
+
        // Framed message with extension.
+
        frame::Frame::gossip(
+
            Link::Outbound,
+
            MessageWithExt {
+
                msg: ann.clone(),
+
                ext: String::from("extra"),
+
            },
+
        )
+
        .encode(&mut stream)
+
        .unwrap();
+
        // Pong message that comes after, without extension.
+
        frame::Frame::gossip(Link::Outbound, pong.clone())
+
            .encode(&mut stream)
+
            .unwrap();
+

+
        // First test deserializing using the message with extension type.
+
        {
+
            let mut de = deserializer::Deserializer::<1024, Frame<MessageWithExt>>::new(1024);
+
            de.input(&stream).unwrap();
+

+
            radicle::assert_matches!(
+
                de.deserialize_next().unwrap().unwrap().data,
+
                FrameData::Gossip(MessageWithExt {
+
                    msg,
+
                    ext,
+
                }) if msg == ann && ext == *"extra"
+
            );
+
            radicle::assert_matches!(
+
                de.deserialize_next().unwrap().unwrap().data,
+
                FrameData::Gossip(MessageWithExt {
+
                    msg,
+
                    ext,
+
                }) if msg == pong && ext.is_empty()
+
            );
+
            assert!(de.deserialize_next().unwrap().is_none());
+
            assert!(de.is_empty());
+
        }
+

+
        // Then test deserializing using the current message type without the extension.
+
        {
+
            let mut de = deserializer::Deserializer::<1024, Frame<Message>>::new(1024);
+
            de.input(&stream).unwrap();
+

+
            radicle::assert_matches!(
+
                de.deserialize_next().unwrap().unwrap().data,
+
                FrameData::Gossip(msg)
+
                if msg == ann
+
            );
+
            radicle::assert_matches!(
+
                de.deserialize_next().unwrap().unwrap().data,
+
                FrameData::Gossip(msg)
+
                if msg == pong
+
            );
+
            assert!(de.deserialize_next().unwrap().is_none());
+
            assert!(de.is_empty());
+
        }
+
    }
+
}
modified crates/radicle-node/src/worker.rs
@@ -5,22 +5,25 @@ mod upload_pack;
pub mod fetch;
pub mod garbage;

-
use std::io;
use std::path::PathBuf;

use crossbeam_channel as chan;

use radicle::identity::RepoId;
-
use radicle::node::{notifications, Event};
+
use radicle::node::notifications;
+
use radicle::node::policy::config as policy;
+
use radicle::node::policy::config::SeedingPolicy;
use radicle::prelude::NodeId;
use radicle::storage::refs::RefsAt;
use radicle::storage::{ReadRepository, ReadStorage};
use radicle::{cob, crypto, Storage};
use radicle_fetch::FetchLimit;

-
use crate::runtime::{thread, Emitter, Handle};
-
use crate::service::policy;
-
use crate::service::policy::SeedingPolicy;
+
pub use radicle_protocol::worker::{
+
    AuthorizationError, FetchError, FetchRequest, FetchResult, UploadError,
+
};
+

+
use crate::runtime::{thread, Handle};
use crate::wire::StreamId;

pub use channels::{ChannelEvent, Channels, ChannelsConfig};
@@ -39,110 +42,6 @@ pub struct Config {
    pub policies_db: PathBuf,
}

-
/// Error returned by fetch.
-
#[derive(thiserror::Error, Debug)]
-
pub enum FetchError {
-
    #[error("the 'git fetch' command failed with exit code '{code}'")]
-
    CommandFailed { code: i32 },
-
    #[error(transparent)]
-
    Io(#[from] io::Error),
-
    #[error(transparent)]
-
    Fetch(#[from] fetch::error::Fetch),
-
    #[error(transparent)]
-
    Handle(#[from] fetch::error::Handle),
-
    #[error(transparent)]
-
    Storage(#[from] radicle::storage::Error),
-
    #[error(transparent)]
-
    PolicyStore(#[from] radicle::node::policy::store::Error),
-
    #[error(transparent)]
-
    Policy(#[from] radicle_fetch::policy::error::Policy),
-
    #[error(transparent)]
-
    Blocked(#[from] radicle_fetch::policy::error::Blocked),
-
}
-

-
impl FetchError {
-
    /// Check if it's a timeout error.
-
    pub fn is_timeout(&self) -> bool {
-
        matches!(self, FetchError::Io(e) if e.kind() == io::ErrorKind::TimedOut)
-
    }
-
}
-

-
/// Error returned by fetch responder.
-
#[derive(thiserror::Error, Debug)]
-
pub enum UploadError {
-
    #[error("error parsing git command packet-line: {0}")]
-
    PacketLine(io::Error),
-
    #[error("error while performing git upload-pack: {0}")]
-
    UploadPack(io::Error),
-
    #[error(transparent)]
-
    Authorization(#[from] AuthorizationError),
-
}
-

-
#[derive(thiserror::Error, Debug)]
-
pub enum AuthorizationError {
-
    #[error("{0} is not authorized to fetch {1}")]
-
    Unauthorized(NodeId, RepoId),
-
    #[error(transparent)]
-
    PolicyStore(#[from] radicle::node::policy::store::Error),
-
    #[error(transparent)]
-
    Repository(#[from] radicle::storage::RepositoryError),
-
}
-

-
impl UploadError {
-
    /// Check if it's an end-of-file error.
-
    pub fn is_eof(&self) -> bool {
-
        matches!(self, UploadError::UploadPack(e) if e.kind() == io::ErrorKind::UnexpectedEof)
-
    }
-
}
-

-
/// Fetch job sent to worker thread.
-
#[derive(Debug, Clone)]
-
pub enum FetchRequest {
-
    /// Client is initiating a fetch for the repository identified by
-
    /// `rid` from the peer identified by `remote`.
-
    Initiator {
-
        /// Repo to fetch.
-
        rid: RepoId,
-
        /// Remote peer we are interacting with.
-
        remote: NodeId,
-
        /// If this fetch is for a particular set of `rad/sigrefs`.
-
        refs_at: Option<Vec<RefsAt>>,
-
    },
-
    /// Server is responding to a fetch request by uploading the
-
    /// specified `refspecs` sent by the client.
-
    Responder {
-
        /// Remote peer we are interacting with.
-
        remote: NodeId,
-
        /// Reporter for upload-pack progress.
-
        emitter: Emitter<Event>,
-
    },
-
}
-

-
impl FetchRequest {
-
    pub fn remote(&self) -> NodeId {
-
        match self {
-
            Self::Initiator { remote, .. } | Self::Responder { remote, .. } => *remote,
-
        }
-
    }
-
}
-

-
/// Fetch result of an upload or fetch.
-
#[derive(Debug)]
-
pub enum FetchResult {
-
    Initiator {
-
        /// Repo fetched.
-
        rid: RepoId,
-
        /// Fetch result, including remotes fetched.
-
        result: Result<fetch::FetchResult, FetchError>,
-
    },
-
    Responder {
-
        /// Repo requested.
-
        rid: Option<RepoId>,
-
        /// Upload result.
-
        result: Result<(), UploadError>,
-
    },
-
}
-

/// Task to be accomplished on a worker thread.
/// This is either going to be an outgoing or incoming fetch.
pub struct Task {
modified crates/radicle-node/src/worker/fetch.rs
@@ -1,6 +1,5 @@
-
pub mod error;
+
pub(crate) use radicle_protocol::worker::fetch::error;

-
use std::collections::HashSet;
use std::str::FromStr;

use localtime::LocalTime;
@@ -8,7 +7,6 @@ use localtime::LocalTime;
use radicle::cob::TypedId;
use radicle::crypto::PublicKey;
use radicle::identity::crefs::GetCanonicalRefs as _;
-
use radicle::identity::DocAt;
use radicle::prelude::NodeId;
use radicle::prelude::RepoId;
use radicle::storage::git::Repository;
@@ -20,32 +18,10 @@ use radicle::storage::{
use radicle::{cob, git, node, Storage};
use radicle_fetch::git::refs::Applied;
use radicle_fetch::{Allowed, BlockList, FetchLimit};
+
pub use radicle_protocol::worker::fetch::FetchResult;

use super::channels::ChannelsFlush;

-
#[derive(Debug, Clone)]
-
pub struct FetchResult {
-
    /// The set of updated references.
-
    pub updated: Vec<RefUpdate>,
-
    /// The set of remote namespaces that were updated.
-
    pub namespaces: HashSet<PublicKey>,
-
    /// The fetch was a full clone.
-
    pub clone: bool,
-
    /// Identity doc of fetched repo.
-
    pub doc: DocAt,
-
}
-

-
impl FetchResult {
-
    pub fn new(doc: DocAt) -> Self {
-
        Self {
-
            updated: vec![],
-
            namespaces: HashSet::new(),
-
            clone: false,
-
            doc,
-
        }
-
    }
-
}
-

pub enum Handle {
    Clone {
        handle: radicle_fetch::Handle<ChannelsFlush>,
deleted crates/radicle-node/src/worker/fetch/error.rs
@@ -1,75 +0,0 @@
-
use std::io;
-

-
use thiserror::Error;
-

-
use radicle::{cob, git, identity, storage};
-
use radicle_fetch as fetch;
-

-
#[derive(Debug, Error)]
-
pub enum Fetch {
-
    #[error(transparent)]
-
    Run(#[from] fetch::Error),
-
    #[error(transparent)]
-
    Git(#[from] git::raw::Error),
-
    #[error(transparent)]
-
    Storage(#[from] storage::Error),
-
    #[error(transparent)]
-
    StorageCopy(#[from] io::Error),
-
    #[error(transparent)]
-
    Repository(#[from] radicle::storage::RepositoryError),
-
    #[error(transparent)]
-
    RefsDb(#[from] radicle::node::refs::Error),
-
    #[error("validation of the storage repository failed: the delegates {delegates:?} failed to validate to meet a threshold of {threshold}")]
-
    Validation {
-
        threshold: usize,
-
        delegates: Vec<String>,
-
    },
-
    #[error(transparent)]
-
    Cache(#[from] Cache),
-
}
-

-
#[derive(Debug, Error)]
-
pub enum Cache {
-
    #[error(transparent)]
-
    Parse(#[from] cob::ParseIdentifierError),
-
    #[error(transparent)]
-
    Repository(#[from] storage::RepositoryError),
-
    #[error("failed to remove {type_name} '{id}' from cache: {err}")]
-
    Remove {
-
        id: cob::ObjectId,
-
        type_name: cob::TypeName,
-
        #[source]
-
        err: Box<dyn std::error::Error + Send + Sync + 'static>,
-
    },
-
    #[error(transparent)]
-
    Store(#[from] cob::store::Error),
-
    #[error("failed to update {type_name} '{id}' in cache: {err}")]
-
    Update {
-
        id: cob::ObjectId,
-
        type_name: cob::TypeName,
-
        #[source]
-
        err: Box<dyn std::error::Error + Send + Sync + 'static>,
-
    },
-
}
-

-
#[derive(Debug, Error)]
-
pub enum Handle {
-
    #[error(transparent)]
-
    Doc(#[from] identity::DocError),
-
    #[error(transparent)]
-
    Io(#[from] io::Error),
-
    #[error(transparent)]
-
    Init(#[from] fetch::handle::error::Init),
-
    #[error(transparent)]
-
    Storage(#[from] storage::Error),
-
    #[error(transparent)]
-
    Repository(#[from] radicle::storage::RepositoryError),
-
}
-

-
#[derive(Debug, Error)]
-
pub enum Canonical {
-
    #[error(transparent)]
-
    Identity(#[from] radicle::storage::RepositoryError),
-
    #[error(transparent)]
-
    CanonicalRefs(#[from] radicle::identity::doc::CanonicalRefsError),
-
}
modified crates/radicle-protocol/Cargo.toml
@@ -9,24 +9,16 @@ edition.workspace = true
rust-version.workspace = true

[features]
-
test = ["radicle/test", "radicle-crypto/test", "radicle-crypto/cyphernet", "qcheck", "snapbox"]
+
test = ["radicle/test", "radicle-crypto/test", "radicle-crypto/cyphernet", "qcheck"]

[dependencies]
-
amplify = { workspace = true }
-
anyhow = { workspace = true }
bloomy = "1.2"
byteorder = { workspace = true }
-
chrono = { workspace = true, features = ["clock"] }
-
colored = { workspace = true }
crossbeam-channel = { workspace = true }
-
cyphernet = { workspace = true, features = ["tor", "dns", "ed25519", "p2p-ed25519"] }
+
cyphernet = { workspace = true, features = ["tor"] }
fastrand = { workspace = true }
-
io-reactor = { version = "0.5.1", features = ["popol"] }
-
lexopt = { workspace = true }
-
libc = { workspace = true }
log = { workspace = true, features = ["std"] }
localtime = { workspace = true }
-
netservices = { version = "0.8.0", features = ["io-reactor", "socket2"] }
nonempty = { workspace = true, features = ["serialize"] }
qcheck = { workspace = true, optional = true }
radicle = { workspace = true, features = ["logger"] }
@@ -36,9 +28,6 @@ sqlite = { workspace = true, features = ["bundled"] }
scrypt = { version = "0.11.0", default-features = false }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true, features = ["preserve_order"] }
-
snapbox = { workspace = true, optional = true }
-
socket2 = "0.5.7"
-
tempfile = { workspace = true }
thiserror = { workspace = true }

[dev-dependencies]
@@ -46,4 +35,3 @@ qcheck = { workspace = true }
qcheck-macros = { workspace = true }
radicle = { workspace = true, features = ["test"] }
radicle-crypto = { workspace = true, features = ["test", "cyphernet"] }
-
snapbox = { workspace = true }
modified crates/radicle-protocol/src/bounded.rs
@@ -239,3 +239,15 @@ impl<T: std::fmt::Debug, const N: usize> std::fmt::Debug for BoundedVec<T, N> {
        self.v.fmt(f)
    }
}
+

+
#[cfg(any(test, feature = "test"))]
+
impl<T, const N: usize> qcheck::Arbitrary for BoundedVec<T, N>
+
where
+
    T: qcheck::Arbitrary + Eq,
+
{
+
    fn arbitrary(g: &mut qcheck::Gen) -> Self {
+
        let mut v: Vec<T> = qcheck::Arbitrary::arbitrary(g);
+
        v.truncate(N);
+
        v.try_into().expect("size within bounds")
+
    }
+
}
added crates/radicle-protocol/src/deserializer.rs
@@ -0,0 +1,163 @@
+
use std::io;
+
use std::marker::PhantomData;
+

+
use crate::bounded;
+
use crate::bounded::BoundedVec;
+
use crate::service::message::Message;
+
use crate::wire;
+

+
/// Message stream deserializer.
+
///
+
/// Used to for example turn a byte stream into network messages.
+
#[derive(Debug)]
+
pub struct Deserializer<const B: usize, D = Message> {
+
    unparsed: BoundedVec<u8, B>,
+
    item: PhantomData<D>,
+
}
+

+
impl<const B: usize, D: wire::Decode> Default for Deserializer<B, D> {
+
    fn default() -> Self {
+
        Self::new(wire::Size::MAX as usize + 1)
+
    }
+
}
+

+
impl<const B: usize, D> TryFrom<Vec<u8>> for Deserializer<B, D> {
+
    type Error = bounded::Error;
+

+
    fn try_from(unparsed: Vec<u8>) -> Result<Self, Self::Error> {
+
        BoundedVec::try_from(unparsed).map(|unparsed| Self {
+
            unparsed,
+
            item: PhantomData,
+
        })
+
    }
+
}
+

+
impl<const B: usize, D: wire::Decode> Deserializer<B, D> {
+
    /// Create a new stream decoder.
+
    pub fn new(capacity: usize) -> Self {
+
        Self {
+
            unparsed: BoundedVec::with_capacity(capacity)
+
                .expect("Deserializer::new: capacity exceeds maximum"),
+
            item: PhantomData,
+
        }
+
    }
+

+
    /// Input bytes into the decoder.
+
    pub fn input(&mut self, bytes: &[u8]) -> Result<(), bounded::Error> {
+
        self.unparsed.extend_from_slice(bytes)
+
    }
+

+
    /// Decode and return the next message. Returns [`None`] if nothing was decoded.
+
    pub fn deserialize_next(&mut self) -> Result<Option<D>, wire::Error> {
+
        let mut reader = io::Cursor::new(self.unparsed.as_slice());
+

+
        match D::decode(&mut reader) {
+
            Ok(msg) => {
+
                let pos = reader.position() as usize;
+
                self.unparsed.drain(..pos);
+

+
                Ok(Some(msg))
+
            }
+
            Err(err) if err.is_eof() => Ok(None),
+
            Err(err) => Err(err),
+
        }
+
    }
+

+
    /// Drain the unparsed buffer.
+
    pub fn unparsed(&mut self) -> impl ExactSizeIterator<Item = u8> + '_ {
+
        self.unparsed.drain(..)
+
    }
+

+
    /// Return whether there are unparsed bytes.
+
    pub fn is_empty(&self) -> bool {
+
        self.unparsed.is_empty()
+
    }
+

+
    /// Return the size of the unparsed data.
+
    pub fn len(&self) -> usize {
+
        self.unparsed.len()
+
    }
+
}
+

+
impl<const B: usize, D: wire::Decode> io::Write for Deserializer<B, D> {
+
    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+
        self.input(buf).map_err(|_| io::ErrorKind::OutOfMemory)?;
+

+
        Ok(buf.len())
+
    }
+

+
    fn flush(&mut self) -> io::Result<()> {
+
        Ok(())
+
    }
+
}
+

+
impl<const B: usize, D: wire::Decode> Iterator for Deserializer<B, D> {
+
    type Item = Result<D, wire::Error>;
+

+
    fn next(&mut self) -> Option<Self::Item> {
+
        self.deserialize_next().transpose()
+
    }
+
}
+

+
#[cfg(test)]
+
mod test {
+
    use super::*;
+
    use qcheck_macros::quickcheck;
+

+
    use radicle::assert_matches;
+

+
    const MSG_HELLO: &[u8] = &[5, b'h', b'e', b'l', b'l', b'o'];
+
    const MSG_BYE: &[u8] = &[3, b'b', b'y', b'e'];
+

+
    #[test]
+
    fn test_decode_next() {
+
        let mut decoder = Deserializer::<1024, String>::new(8);
+

+
        decoder.input(&[3, b'b']).unwrap();
+
        assert_matches!(decoder.deserialize_next(), Ok(None));
+
        assert_eq!(decoder.unparsed.len(), 2);
+

+
        decoder.input(b"y").unwrap();
+
        assert_matches!(decoder.deserialize_next(), Ok(None));
+
        assert_eq!(decoder.unparsed.len(), 3);
+

+
        decoder.input(b"e").unwrap();
+
        assert_matches!(decoder.deserialize_next(), Ok(Some(s)) if s.as_str() == "bye");
+
        assert_eq!(decoder.unparsed.len(), 0);
+
        assert!(decoder.is_empty());
+
    }
+

+
    #[test]
+
    fn test_unparsed() {
+
        let mut decoder = Deserializer::<1024, String>::new(8);
+

+
        decoder.input(&[3, b'b', b'y']).unwrap();
+
        assert_eq!(decoder.unparsed().collect::<Vec<_>>(), vec![3, b'b', b'y']);
+
        assert!(decoder.is_empty());
+
    }
+

+
    #[quickcheck]
+
    fn prop_decode_next(chunk_size: usize) {
+
        let mut bytes = vec![];
+
        let mut msgs = vec![];
+
        let mut decoder = Deserializer::<1024, String>::new(8);
+

+
        let chunk_size = 1 + chunk_size % MSG_HELLO.len() + MSG_BYE.len();
+

+
        bytes.extend_from_slice(MSG_HELLO);
+
        bytes.extend_from_slice(MSG_BYE);
+

+
        for chunk in bytes.as_slice().chunks(chunk_size) {
+
            decoder.input(chunk).unwrap();
+

+
            while let Some(msg) = decoder.deserialize_next().unwrap() {
+
                msgs.push(msg);
+
            }
+
        }
+

+
        assert_eq!(decoder.unparsed.len(), 0);
+
        assert_eq!(msgs.len(), 2);
+
        assert_eq!(msgs[0], String::from("hello"));
+
        assert_eq!(msgs[1], String::from("bye"));
+
    }
+
}
added crates/radicle-protocol/src/lib.rs
@@ -0,0 +1,8 @@
+
pub mod bounded;
+
pub mod deserializer;
+
pub mod service;
+
pub mod wire;
+
pub mod worker;
+

+
/// Peer-to-peer protocol version.
+
pub const PROTOCOL_VERSION: u8 = 1;
modified crates/radicle-protocol/src/service.rs
@@ -38,32 +38,36 @@ use radicle::storage::refs::SIGREFS_BRANCH;
use radicle::storage::RepositoryError;
use radicle_fetch::policy::SeedingPolicy;

-
use crate::identity::RepoId;
-
use crate::node::routing;
-
use crate::node::routing::InsertResult;
-
use crate::node::{
-
    Address, Alias, Features, FetchResult, HostName, Seed, Seeds, SyncStatus, SyncedAt,
-
};
-
use crate::prelude::*;
-
use crate::runtime::Emitter;
use crate::service::gossip::Store as _;
use crate::service::message::{
    Announcement, AnnouncementMessage, Info, NodeAnnouncement, Ping, RefsAnnouncement, RefsStatus,
};
use crate::service::policy::{store::Write, Scope};
-
use crate::storage;
-
use crate::storage::{refs::RefsAt, Namespaces, ReadStorage};
-
use crate::worker::fetch;
-
use crate::worker::FetchError;
-
use crate::Link;
-
use crate::{crypto, PROTOCOL_VERSION};
-

-
pub use crate::node::events::{Event, Events};
-
pub use crate::node::{config::Network, Config, NodeId};
+
use radicle::identity::RepoId;
+
use radicle::node::events::Emitter;
+
use radicle::node::routing;
+
use radicle::node::routing::InsertResult;
+
use radicle::node::{
+
    Address, Alias, Features, FetchResult, HostName, Seed, Seeds, SyncStatus, SyncedAt,
+
};
+
use radicle::prelude::*;
+
use radicle::storage;
+
use radicle::storage::{refs::RefsAt, Namespaces, ReadStorage};
+
// use radicle::worker::fetch;
+
// use crate::worker::FetchError;
+
use radicle::crypto;
+
use radicle::node::Link;
+
use radicle::node::PROTOCOL_VERSION;
+

+
use crate::bounded::BoundedVec;
+
use crate::service::filter::Filter;
pub use crate::service::message::{Message, ZeroBytes};
pub use crate::service::session::{QueuedFetch, Session};
+
use crate::worker::FetchError;
+
use radicle::node::events::{Event, Events};
+
use radicle::node::{Config, NodeId};

-
pub use radicle::node::policy::config as policy;
+
use radicle::node::policy::config as policy;

use self::io::Outbox;
use self::limiter::RateLimiter;
@@ -221,7 +225,7 @@ pub trait Store:
{
}

-
impl Store for node::Database {}
+
impl Store for radicle::node::Database {}

/// Function used to query internal service state.
pub type QueryState = dyn Fn(&dyn ServiceState) -> Result<(), CommandError> + Send + Sync;
@@ -1133,7 +1137,7 @@ where
        &mut self,
        rid: RepoId,
        remote: NodeId,
-
        result: Result<fetch::FetchResult, FetchError>,
+
        result: Result<crate::worker::fetch::FetchResult, crate::worker::FetchError>,
    ) {
        let Some(fetching) = self.fetching.remove(&rid) else {
            error!(target: "service", "Received unexpected fetch result for {rid}, from {remote}");
@@ -1169,7 +1173,7 @@ where
        }

        match result {
-
            Ok(fetch::FetchResult {
+
            Ok(crate::worker::fetch::FetchResult {
                updated,
                namespaces,
                clone,
modified crates/radicle-protocol/src/service/filter.rs
@@ -3,7 +3,7 @@ use std::ops::{Deref, DerefMut};

pub use bloomy::BloomFilter;

-
use crate::identity::RepoId;
+
use radicle::identity::RepoId;

/// Size in bytes of *large* bloom filter.
/// It can store about 13'675 items with a false positive rate of 1%.
@@ -90,10 +90,26 @@ impl From<BloomFilter<RepoId>> for Filter {
    }
}

+
#[allow(clippy::unwrap_used)]
+
#[cfg(any(test, feature = "test"))]
+
impl qcheck::Arbitrary for Filter {
+
    fn arbitrary(g: &mut qcheck::Gen) -> Self {
+
        let size = *g
+
            .choose(&[FILTER_SIZE_S, FILTER_SIZE_M, FILTER_SIZE_L])
+
            .unwrap();
+
        let mut bytes = vec![0; size];
+
        for _ in 0..64 {
+
            let index = usize::arbitrary(g) % bytes.len();
+
            bytes[index] = u8::arbitrary(g);
+
        }
+
        Self::from(BloomFilter::from(bytes))
+
    }
+
}
+

#[cfg(test)]
mod test {
    use super::*;
-
    use crate::test::arbitrary;
+
    use radicle::test::arbitrary;

    #[test]
    fn test_parameters() {
modified crates/radicle-protocol/src/service/gossip.rs
@@ -4,14 +4,15 @@ use std::str::FromStr;
use std::sync::LazyLock;

use super::*;
-
use crate::{PROTOCOL_VERSION, VERSION};
+
use crate::bounded::BoundedVec;
use radicle::node::UserAgent;
+
use radicle::node::PROTOCOL_VERSION;

pub use store::{AnnouncementId, Error, RelayStatus, Store};

/// This node's user agent string.
-
pub static USER_AGENT: LazyLock<UserAgent> = LazyLock::new(|| {
-
    FromStr::from_str(format!("/radicle:{}/", VERSION.version).as_str())
+
pub static PROTOCOL_VERSION_STRING: LazyLock<UserAgent> = LazyLock::new(|| {
+
    FromStr::from_str(format!("/radicle:{}/", PROTOCOL_VERSION).as_str())
        .expect("user agent is valid")
});

@@ -23,7 +24,7 @@ pub fn node(config: &Config, timestamp: Timestamp) -> NodeAnnouncement {
        .clone()
        .try_into()
        .expect("external addresses are within the limit");
-
    let agent = USER_AGENT.clone();
+
    let agent = PROTOCOL_VERSION_STRING.clone();
    let version = PROTOCOL_VERSION;

    NodeAnnouncement {
modified crates/radicle-protocol/src/service/gossip/store.rs
@@ -5,13 +5,15 @@ use radicle::crypto::Signature;
use sqlite as sql;
use thiserror::Error;

-
use crate::node::{Database, NodeId};
-
use crate::prelude::{Filter, Timestamp};
+
use crate::service::filter::Filter;
use crate::service::message::{
    Announcement, AnnouncementMessage, InventoryAnnouncement, NodeAnnouncement, RefsAnnouncement,
};
use crate::wire;
use crate::wire::Decode;
+
use radicle::node::Database;
+
use radicle::node::NodeId;
+
use radicle::prelude::Timestamp;

#[derive(Error, Debug)]
pub enum Error {
@@ -395,11 +397,12 @@ mod parse {
#[allow(clippy::unwrap_used)]
mod test {
    use super::*;
-
    use crate::prelude::{BoundedVec, RepoId};
-
    use crate::test::arbitrary;
+
    use crate::bounded::BoundedVec;
    use localtime::LocalTime;
    use radicle::assert_matches;
+
    use radicle::identity::RepoId;
    use radicle::node::device::Device;
+
    use radicle::test::arbitrary;

    #[test]
    fn test_announced() {
modified crates/radicle-protocol/src/service/io.rs
@@ -1,12 +1,17 @@
use std::collections::VecDeque;
use std::time;

+
use localtime::LocalDuration;
use log::*;
+
use radicle::identity::RepoId;
use radicle::node::config::FetchPackSizeLimit;
+
use radicle::node::Address;
+
use radicle::node::NodeId;
use radicle::storage::refs::RefsAt;

-
use crate::prelude::*;
+
use crate::service::message::Message;
use crate::service::session::Session;
+
use crate::service::DisconnectReason;
use crate::service::Link;

use super::gossip;
@@ -189,7 +194,7 @@ impl Outbox {
    }

    #[cfg(any(test, feature = "test"))]
-
    pub(crate) fn queue(&mut self) -> &mut VecDeque<Io> {
+
    pub fn queue(&mut self) -> &mut VecDeque<Io> {
        &mut self.io
    }
}
modified crates/radicle-protocol/src/service/message.rs
@@ -1,18 +1,18 @@
use std::{fmt, io, mem};

use nonempty::NonEmpty;
+
use radicle::crypto;
use radicle::git;
+
use radicle::identity::RepoId;
+
use radicle::node;
use radicle::node::device::Device;
+
use radicle::node::{Address, Alias, UserAgent};
+
use radicle::storage;
use radicle::storage::refs::RefsAt;

-
use crate::crypto;
-
use crate::identity::RepoId;
-
use crate::node;
-
use crate::node::{Address, Alias, UserAgent};
-
use crate::prelude::BoundedVec;
+
use crate::bounded::BoundedVec;
use crate::service::filter::Filter;
use crate::service::{Link, NodeId, Timestamp};
-
use crate::storage;
use crate::wire;

/// Maximum number of addresses which can be announced to other nodes.
@@ -363,7 +363,7 @@ impl Announcement {
    #[cfg(not(debug_assertions))]
    pub const POW_PARAMS: (u8, u32, u32) = (15, 8, 1);
    /// Salt used for generating PoW.
-
    pub const POW_SALT: &'static [u8] = &[b'r', b'a', b'd'];
+
    pub const POW_SALT: &'static [u8] = b"rad";

    /// Verify this announcement's signature.
    pub fn verify(&self) -> bool {
@@ -589,6 +589,87 @@ impl ZeroBytes {
    }
}

+
#[cfg(any(test, feature = "test"))]
+
#[allow(clippy::unwrap_used)]
+
impl qcheck::Arbitrary for Message {
+
    fn arbitrary(g: &mut qcheck::Gen) -> Self {
+
        use qcheck::Arbitrary;
+

+
        match g.choose(&[1, 2, 3, 4, 5, 6, 7]).unwrap() {
+
            1 => Announcement {
+
                node: NodeId::arbitrary(g),
+
                message: InventoryAnnouncement {
+
                    inventory: BoundedVec::arbitrary(g),
+
                    timestamp: Timestamp::arbitrary(g),
+
                }
+
                .into(),
+
                signature: crypto::Signature::from(<[u8; 64]>::arbitrary(g)),
+
            }
+
            .into(),
+
            2 => Announcement {
+
                node: NodeId::arbitrary(g),
+
                message: RefsAnnouncement {
+
                    rid: RepoId::arbitrary(g),
+
                    refs: BoundedVec::arbitrary(g),
+
                    timestamp: Timestamp::arbitrary(g),
+
                }
+
                .into(),
+
                signature: crypto::Signature::from(<[u8; 64]>::arbitrary(g)),
+
            }
+
            .into(),
+
            3 => {
+
                let message = NodeAnnouncement {
+
                    version: u8::arbitrary(g),
+
                    features: u64::arbitrary(g).into(),
+
                    timestamp: Timestamp::arbitrary(g),
+
                    alias: Alias::arbitrary(g),
+
                    addresses: Arbitrary::arbitrary(g),
+
                    nonce: u64::arbitrary(g),
+
                    agent: UserAgent::arbitrary(g),
+
                }
+
                .into();
+
                let bytes: [u8; 64] = Arbitrary::arbitrary(g);
+
                let signature = crypto::Signature::from(bytes);
+

+
                Announcement {
+
                    node: NodeId::arbitrary(g),
+
                    signature,
+
                    message,
+
                }
+
                .into()
+
            }
+
            4 => {
+
                let message = Info::RefsAlreadySynced {
+
                    rid: RepoId::arbitrary(g),
+
                    at: radicle::test::arbitrary::oid(),
+
                };
+
                Self::Info(message)
+
            }
+
            5 => Self::Subscribe(Subscribe {
+
                filter: Filter::arbitrary(g),
+
                since: Timestamp::arbitrary(g),
+
                until: Timestamp::arbitrary(g),
+
            }),
+
            6 => {
+
                let mut rng = fastrand::Rng::with_seed(u64::arbitrary(g));
+

+
                Self::Ping(Ping::new(&mut rng))
+
            }
+
            7 => Self::Pong {
+
                zeroes: ZeroBytes::new(u16::arbitrary(g).min(Ping::MAX_PONG_ZEROES)),
+
            },
+
            _ => panic!("Invalid choice for Message::arbitrary"),
+
        }
+
    }
+
}
+

+
#[cfg(any(test, feature = "test"))]
+
impl qcheck::Arbitrary for ZeroBytes {
+
    fn arbitrary(g: &mut qcheck::Gen) -> Self {
+
        ZeroBytes::new(u16::arbitrary(g))
+
    }
+
}
+

#[cfg(test)]
#[allow(clippy::unwrap_used)]
mod tests {
@@ -599,9 +680,9 @@ mod tests {
    use radicle::git::raw;

    use super::*;
-
    use crate::prelude::*;
-
    use crate::test::arbitrary;
    use crate::wire::Encode;
+
    use localtime::LocalTime;
+
    use radicle::test::arbitrary;

    #[test]
    fn test_ref_remote_limit() {
modified crates/radicle-protocol/src/service/session.rs
@@ -2,16 +2,15 @@ use std::collections::{HashSet, VecDeque};
use std::{fmt, time};

use crossbeam_channel as chan;
+
use radicle::node::config::Limits;
+
use radicle::node::{FetchResult, Severity};
+
use radicle::node::{Link, Timestamp};
+
pub use radicle::node::{PingState, State};
+
use radicle::storage::refs::RefsAt;

-
use crate::node::config::Limits;
-
use crate::node::{FetchResult, Severity};
use crate::service::message;
use crate::service::message::Message;
use crate::service::{Address, LocalDuration, LocalTime, NodeId, Outbox, RepoId, Rng};
-
use crate::storage::refs::RefsAt;
-
use crate::{Link, Timestamp};
-

-
pub use crate::node::{PingState, State};

/// Time after which a connection is considered stable.
pub const CONNECTION_STABLE_THRESHOLD: LocalDuration = LocalDuration::from_mins(1);
modified crates/radicle-protocol/src/wire.rs
@@ -1,12 +1,9 @@
-
mod frame;
-
mod message;
-
mod protocol;
-
mod varint;
+
pub mod frame;
+
pub mod message;
+
pub mod varint;

pub use frame::StreamId;
pub use message::{AddressType, MessageType};
-
pub use protocol::{Control, Wire, WireReader, WireSession, WireWriter};
-
use radicle::node::UserAgent;

use std::collections::BTreeMap;
use std::convert::TryFrom;
@@ -18,18 +15,21 @@ use std::{io, mem};
use byteorder::{NetworkEndian, ReadBytesExt, WriteBytesExt};
use cyphernet::addr::tor;

-
use crate::crypto::{PublicKey, Signature, Unverified};
-
use crate::git;
-
use crate::git::fmt;
-
use crate::identity::RepoId;
-
use crate::node;
-
use crate::node::Alias;
-
use crate::prelude::*;
+
use radicle::crypto::{PublicKey, Signature, Unverified};
+
use radicle::git;
+
use radicle::git::fmt;
+
use radicle::identity::RepoId;
+
use radicle::node;
+
use radicle::node::Alias;
+
use radicle::node::NodeId;
+
use radicle::node::Timestamp;
+
use radicle::node::UserAgent;
+
use radicle::storage::refs::Refs;
+
use radicle::storage::refs::RefsAt;
+
use radicle::storage::refs::SignedRefs;
+

+
use crate::bounded::BoundedVec;
use crate::service::filter;
-
use crate::storage::refs::Refs;
-
use crate::storage::refs::RefsAt;
-
use crate::storage::refs::SignedRefs;
-
use crate::Timestamp;

/// The default type we use to represent sizes on the wire.
///
@@ -569,9 +569,9 @@ mod tests {
    use qcheck;
    use qcheck_macros::quickcheck;

-
    use crate::crypto::Unverified;
-
    use crate::storage::refs::SignedRefs;
-
    use crate::test::assert_matches;
+
    use radicle::assert_matches;
+
    use radicle::crypto::Unverified;
+
    use radicle::storage::refs::SignedRefs;

    #[quickcheck]
    fn prop_u8(input: u8) {
modified crates/radicle-protocol/src/wire/frame.rs
@@ -2,7 +2,9 @@
#![warn(clippy::missing_docs_in_private_items)]
use std::{fmt, io};

-
use crate::{wire, wire::varint, wire::varint::VarInt, wire::Message, Link, PROTOCOL_VERSION};
+
use crate::service::Message;
+
use crate::{wire, wire::varint, wire::varint::VarInt, PROTOCOL_VERSION};
+
use radicle::node::Link;

/// Protocol version strings all start with the magic sequence `rad`, followed
/// by a version number.
modified crates/radicle-protocol/src/wire/message.rs
@@ -2,13 +2,17 @@ use std::{io, mem, net};

use byteorder::{NetworkEndian, ReadBytesExt};
use cyphernet::addr::{tor, Addr, HostName, NetAddr};
+
use radicle::crypto::Signature;
use radicle::git::Oid;
+
use radicle::identity::RepoId;
use radicle::node::Address;
+
use radicle::node::NodeId;
+
use radicle::node::Timestamp;

-
use crate::prelude::*;
+
use crate::bounded::BoundedVec;
+
use crate::service::filter::Filter;
use crate::service::message::*;
use crate::wire;
-
use crate::wire::{Decode, Encode};

/// Message type.
#[repr(u16)]
@@ -67,22 +71,6 @@ impl Message {
    }
}

-
impl netservices::Frame for Message {
-
    type Error = wire::Error;
-

-
    fn unmarshall(mut reader: impl io::Read) -> Result<Option<Self>, Self::Error> {
-
        match Message::decode(&mut reader) {
-
            Ok(msg) => Ok(Some(msg)),
-
            Err(wire::Error::Io(_)) => Ok(None),
-
            Err(err) => Err(err),
-
        }
-
    }
-

-
    fn marshall(&self, mut writer: impl io::Write) -> Result<usize, Self::Error> {
-
        self.encode(&mut writer).map_err(wire::Error::from)
-
    }
-
}
-

/// Address type.
#[repr(u8)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
@@ -463,8 +451,8 @@ mod tests {
    use radicle::storage::refs::RefsAt;

    use crate::deserializer::Deserializer;
-
    use crate::test::arbitrary;
    use crate::wire::{self, Encode};
+
    use radicle::test::arbitrary;

    #[test]
    fn test_refs_ann_max_size() {
deleted crates/radicle-protocol/src/wire/protocol.rs
@@ -1,1381 +0,0 @@
-
//! Implementation of the transport protocol.
-
//!
-
//! We use the Noise XK handshake pattern to establish an encrypted stream with a remote peer.
-
//! The handshake itself is implemented in the external [`cyphernet`] and [`netservices`] crates.
-
use std::collections::hash_map::Entry;
-
use std::collections::VecDeque;
-
use std::os::unix::io::{AsRawFd, RawFd};
-
use std::sync::Arc;
-
use std::{io, net, time};
-

-
use amplify::Wrapper as _;
-
use crossbeam_channel as chan;
-
use cyphernet::addr::{HostName, InetHost, NetAddr};
-
use cyphernet::encrypt::noise::{HandshakePattern, Keyset, NoiseState};
-
use cyphernet::proxy::socks5;
-
use cyphernet::{Digest, EcSk, Ecdh, Sha256};
-
use localtime::LocalTime;
-
use netservices::resource::{ListenerEvent, NetAccept, NetTransport, SessionEvent};
-
use netservices::session::{NoiseSession, ProtocolArtifact, Socks5Session};
-
use netservices::{NetConnection, NetReader, NetWriter};
-
use radicle::node::device::Device;
-
use reactor::{ResourceId, ResourceType, Timestamp};
-

-
use radicle::collections::RandomMap;
-
use radicle::crypto;
-
use radicle::node::config::AddressConfig;
-
use radicle::node::NodeId;
-
use radicle::storage::WriteStorage;
-

-
use crate::prelude::Deserializer;
-
use crate::service;
-
use crate::service::io::Io;
-
use crate::service::FETCH_TIMEOUT;
-
use crate::service::{session, DisconnectReason, Metrics, Service};
-
use crate::wire::frame;
-
use crate::wire::frame::{Frame, FrameData, StreamId};
-
use crate::wire::Encode;
-
use crate::worker;
-
use crate::worker::{ChannelEvent, ChannelsConfig, FetchRequest, FetchResult, Task, TaskResult};
-
use crate::Link;
-

-
/// NoiseXK handshake pattern.
-
pub const NOISE_XK: HandshakePattern = HandshakePattern {
-
    initiator: cyphernet::encrypt::noise::InitiatorPattern::Xmitted,
-
    responder: cyphernet::encrypt::noise::OneWayPattern::Known,
-
};
-

-
/// Default time to wait until a network connection is considered inactive.
-
pub const DEFAULT_CONNECTION_TIMEOUT: time::Duration = time::Duration::from_secs(6);
-

-
/// Default time to wait when dialing a connection, before the remote is considered unreachable.
-
pub const DEFAULT_DIAL_TIMEOUT: time::Duration = time::Duration::from_secs(6);
-

-
/// Maximum size of a peer inbox, in bytes.
-
pub const MAX_INBOX_SIZE: usize = 1024 * 1024 * 2;
-

-
/// Control message used internally between workers, users, and the service.
-
#[allow(clippy::large_enum_variant)]
-
#[derive(Debug)]
-
pub enum Control {
-
    /// Message from the user to the service.
-
    User(service::Command),
-
    /// Message from a worker to the service.
-
    Worker(TaskResult),
-
    /// Flush data in the given stream to the remote.
-
    Flush { remote: NodeId, stream: StreamId },
-
}
-

-
/// Peer session type.
-
pub type WireSession<G> = NoiseSession<G, Sha256, Socks5Session<net::TcpStream>>;
-
/// Peer session type (read-only).
-
pub type WireReader = NetReader<Socks5Session<net::TcpStream>>;
-
/// Peer session type (write-only).
-
pub type WireWriter<G> = NetWriter<NoiseState<G, Sha256>, Socks5Session<net::TcpStream>>;
-

-
/// Reactor action.
-
type Action<G> = reactor::Action<NetAccept<WireSession<G>>, NetTransport<WireSession<G>>>;
-

-
/// A worker stream.
-
struct Stream {
-
    /// Channels.
-
    channels: worker::Channels,
-
    /// Data sent.
-
    sent_bytes: usize,
-
    /// Data received.
-
    received_bytes: usize,
-
}
-

-
impl Stream {
-
    fn new(channels: worker::Channels) -> Self {
-
        Self {
-
            channels,
-
            sent_bytes: 0,
-
            received_bytes: 0,
-
        }
-
    }
-
}
-

-
/// Streams associated with a connected peer.
-
struct Streams {
-
    /// Active streams and their associated worker channels.
-
    /// Note that the gossip and control streams are not included here as they are always
-
    /// implied to exist.
-
    streams: RandomMap<StreamId, Stream>,
-
    /// Connection direction.
-
    link: Link,
-
    /// Sequence number used to compute the next stream id.
-
    seq: u64,
-
}
-

-
impl Streams {
-
    /// Create a new [`Streams`] object, passing the connection link.
-
    fn new(link: Link) -> Self {
-
        Self {
-
            streams: RandomMap::default(),
-
            link,
-
            seq: 0,
-
        }
-
    }
-

-
    /// Get a known stream.
-
    fn get(&self, stream: &StreamId) -> Option<&Stream> {
-
        self.streams.get(stream)
-
    }
-

-
    /// Get a known stream, mutably.
-
    fn get_mut(&mut self, stream: &StreamId) -> Option<&mut Stream> {
-
        self.streams.get_mut(stream)
-
    }
-

-
    /// Open a new stream.
-
    fn open(&mut self, config: ChannelsConfig) -> (StreamId, worker::Channels) {
-
        self.seq += 1;
-

-
        let id = StreamId::git(self.link)
-
            .nth(self.seq)
-
            .expect("Streams::open: too many streams");
-
        let channels = self
-
            .register(id, config)
-
            .expect("Streams::open: stream was already open");
-

-
        (id, channels)
-
    }
-

-
    /// Register an open stream.
-
    fn register(&mut self, stream: StreamId, config: ChannelsConfig) -> Option<worker::Channels> {
-
        let (wire, worker) = worker::Channels::pair(config)
-
            .expect("Streams::register: fatal: unable to create channels");
-

-
        match self.streams.entry(stream) {
-
            Entry::Vacant(e) => {
-
                e.insert(Stream::new(worker));
-
                Some(wire)
-
            }
-
            Entry::Occupied(_) => None,
-
        }
-
    }
-

-
    /// Unregister an open stream.
-
    fn unregister(&mut self, stream: &StreamId) -> Option<Stream> {
-
        self.streams.remove(stream)
-
    }
-

-
    /// Close all streams.
-
    fn shutdown(&mut self) {
-
        for (sid, stream) in self.streams.drain() {
-
            log::debug!(target: "wire", "Closing worker stream {sid}");
-
            stream.channels.close().ok();
-
        }
-
    }
-
}
-

-
/// The initial state of an outbound peer before handshake is completed.
-
#[derive(Debug)]
-
struct Outbound {
-
    /// Resource ID, if registered.
-
    id: Option<ResourceId>,
-
    /// Remote address.
-
    addr: NetAddr<HostName>,
-
    /// Remote Node ID.
-
    nid: NodeId,
-
}
-

-
/// The initial state of an inbound peer before handshake is completed.
-
#[derive(Debug)]
-
struct Inbound {
-
    /// Resource ID, if registered.
-
    id: Option<ResourceId>,
-
    /// Remote address.
-
    addr: NetAddr<HostName>,
-
}
-

-
/// Peer connection state machine.
-
enum Peer {
-
    /// The state after handshake is completed.
-
    /// Peers in this state are handled by the underlying service.
-
    Connected {
-
        #[allow(dead_code)]
-
        addr: NetAddr<HostName>,
-
        link: Link,
-
        nid: NodeId,
-
        inbox: Deserializer<MAX_INBOX_SIZE, Frame>,
-
        streams: Streams,
-
    },
-
    /// The peer was scheduled for disconnection. Once the transport is handed over
-
    /// by the reactor, we can consider it disconnected.
-
    Disconnecting {
-
        link: Link,
-
        nid: Option<NodeId>,
-
        reason: DisconnectReason,
-
    },
-
}
-

-
impl std::fmt::Debug for Peer {
-
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
-
        match self {
-
            Self::Connected { link, nid, .. } => write!(f, "Connected({link:?}, {nid})"),
-
            Self::Disconnecting { .. } => write!(f, "Disconnecting"),
-
        }
-
    }
-
}
-

-
impl Peer {
-
    /// Return the peer's id, if any.
-
    fn id(&self) -> Option<&NodeId> {
-
        match self {
-
            Peer::Connected { nid, .. } | Peer::Disconnecting { nid: Some(nid), .. } => Some(nid),
-
            Peer::Disconnecting { nid: None, .. } => None,
-
        }
-
    }
-

-
    fn link(&self) -> Link {
-
        match self {
-
            Peer::Connected { link, .. } => *link,
-
            Peer::Disconnecting { link, .. } => *link,
-
        }
-
    }
-

-
    /// Connected peer.
-
    fn connected(nid: NodeId, addr: NetAddr<HostName>, link: Link) -> Self {
-
        Self::Connected {
-
            link,
-
            addr,
-
            nid,
-
            inbox: Deserializer::default(),
-
            streams: Streams::new(link),
-
        }
-
    }
-
}
-

-
/// Holds connected peers.
-
struct Peers(RandomMap<ResourceId, Peer>);
-

-
impl Peers {
-
    fn get_mut(&mut self, id: &ResourceId) -> Option<&mut Peer> {
-
        self.0.get_mut(id)
-
    }
-

-
    fn entry(&mut self, id: ResourceId) -> Entry<ResourceId, Peer> {
-
        self.0.entry(id)
-
    }
-

-
    fn insert(&mut self, id: ResourceId, peer: Peer) {
-
        if self.0.insert(id, peer).is_some() {
-
            log::warn!(target: "wire", "Replacing existing peer id={id}");
-
        }
-
    }
-

-
    fn remove(&mut self, id: &ResourceId) -> Option<Peer> {
-
        self.0.remove(id)
-
    }
-

-
    fn lookup(&self, node_id: &NodeId) -> Option<(ResourceId, &Peer)> {
-
        self.0
-
            .iter()
-
            .find(|(_, peer)| peer.id() == Some(node_id))
-
            .map(|(fd, peer)| (*fd, peer))
-
    }
-

-
    fn lookup_mut(&mut self, node_id: &NodeId) -> Option<(ResourceId, &mut Peer)> {
-
        self.0
-
            .iter_mut()
-
            .find(|(_, peer)| peer.id() == Some(node_id))
-
            .map(|(fd, peer)| (*fd, peer))
-
    }
-

-
    fn active(&self) -> impl Iterator<Item = (ResourceId, &NodeId, Link)> {
-
        self.0.iter().filter_map(|(id, peer)| match peer {
-
            Peer::Connected { nid, link, .. } => Some((*id, nid, *link)),
-
            Peer::Disconnecting { .. } => None,
-
        })
-
    }
-

-
    fn connected(&self) -> impl Iterator<Item = (ResourceId, &NodeId)> {
-
        self.0.iter().filter_map(|(id, peer)| {
-
            if let Peer::Connected { nid, .. } = peer {
-
                Some((*id, nid))
-
            } else {
-
                None
-
            }
-
        })
-
    }
-

-
    fn iter(&self) -> impl Iterator<Item = &Peer> {
-
        self.0.values()
-
    }
-
}
-

-
/// Wire protocol implementation for a set of peers.
-
pub struct Wire<D, S, G: crypto::signature::Signer<crypto::Signature> + Ecdh> {
-
    /// Backing service instance.
-
    service: Service<D, S, G>,
-
    /// Worker pool interface.
-
    worker: chan::Sender<Task>,
-
    /// Used for authentication.
-
    signer: Device<G>,
-
    /// Node metrics.
-
    metrics: service::Metrics,
-
    /// Internal queue of actions to send to the reactor.
-
    actions: VecDeque<Action<G>>,
-
    /// Outbound attempted peers without a session.
-
    outbound: RandomMap<RawFd, Outbound>,
-
    /// Inbound peers without a session.
-
    inbound: RandomMap<RawFd, Inbound>,
-
    /// Listening addresses that are not yet registered.
-
    listening: RandomMap<RawFd, net::SocketAddr>,
-
    /// Peer (established) sessions.
-
    peers: Peers,
-
}
-

-
impl<D, S, G> Wire<D, S, G>
-
where
-
    D: service::Store,
-
    S: WriteStorage + 'static,
-
    G: crypto::signature::Signer<crypto::Signature> + Ecdh<Pk = NodeId>,
-
{
-
    pub fn new(service: Service<D, S, G>, worker: chan::Sender<Task>, signer: Device<G>) -> Self {
-
        assert!(service.started().is_some(), "Service must be initialized");
-

-
        Self {
-
            service,
-
            worker,
-
            signer,
-
            metrics: Metrics::default(),
-
            actions: VecDeque::new(),
-
            inbound: RandomMap::default(),
-
            outbound: RandomMap::default(),
-
            listening: RandomMap::default(),
-
            peers: Peers(RandomMap::default()),
-
        }
-
    }
-

-
    pub fn listen(&mut self, socket: NetAccept<WireSession<G>>) {
-
        self.listening
-
            .insert(socket.as_raw_fd(), socket.local_addr());
-
        self.actions.push_back(Action::RegisterListener(socket));
-
    }
-

-
    fn disconnect(&mut self, id: ResourceId, reason: DisconnectReason) -> Option<(NodeId, Link)> {
-
        match self.peers.entry(id) {
-
            Entry::Vacant(_) => {
-
                // Connecting peer with no session.
-
                log::debug!(target: "wire", "Disconnecting pending peer with id={id}: {reason}");
-
                self.actions.push_back(Action::UnregisterTransport(id));
-

-
                // Check for attempted outbound connections. Unestablished inbound connections don't
-
                // have an NID yet.
-
                self.outbound
-
                    .values()
-
                    .find(|o| o.id == Some(id))
-
                    .map(|o| (o.nid, Link::Outbound))
-
            }
-
            Entry::Occupied(mut e) => match e.get_mut() {
-
                Peer::Disconnecting { nid, link, .. } => {
-
                    log::error!(target: "wire", "Peer with id={id} is already disconnecting");
-

-
                    nid.map(|n| (n, *link))
-
                }
-
                Peer::Connected {
-
                    nid, streams, link, ..
-
                } => {
-
                    log::debug!(target: "wire", "Disconnecting peer with id={id}: {reason}");
-
                    let nid = *nid;
-
                    let link = *link;
-

-
                    streams.shutdown();
-
                    e.insert(Peer::Disconnecting {
-
                        nid: Some(nid),
-
                        link,
-
                        reason,
-
                    });
-
                    self.actions.push_back(Action::UnregisterTransport(id));
-

-
                    Some((nid, link))
-
                }
-
            },
-
        }
-
    }
-

-
    fn worker_result(&mut self, task: TaskResult) {
-
        log::debug!(
-
            target: "wire",
-
            "Received fetch result from worker for stream {}, remote {}: {:?}",
-
            task.stream, task.remote, task.result
-
        );
-

-
        let nid = task.remote;
-
        let Some((fd, peer)) = self.peers.lookup_mut(&nid) else {
-
            log::warn!(target: "wire", "Peer {nid} not found; ignoring fetch result");
-
            return;
-
        };
-

-
        if let Peer::Connected { link, streams, .. } = peer {
-
            // Nb. It's possible that the stream would already be unregistered if we received an
-
            // early "close" from the remote. Otherwise, we unregister it here and send the "close"
-
            // ourselves.
-
            if let Some(s) = streams.unregister(&task.stream) {
-
                log::debug!(
-
                    target: "wire", "Stream {} of {} closing with {} byte(s) sent and {} byte(s) received",
-
                    task.stream, task.remote, s.sent_bytes, s.received_bytes
-
                );
-
                let frame = Frame::<service::Message>::control(
-
                    *link,
-
                    frame::Control::Close {
-
                        stream: task.stream,
-
                    },
-
                );
-
                self.actions.push_back(Action::Send(fd, frame.to_bytes()));
-
            }
-
        } else {
-
            // If the peer disconnected, we'll get here, but we still want to let the service know
-
            // about the fetch result, so we don't return here.
-
            log::warn!(target: "wire", "Peer {nid} is not connected; ignoring fetch result");
-
            return;
-
        };
-

-
        // Only call into the service if we initiated this fetch.
-
        match task.result {
-
            FetchResult::Initiator { rid, result } => {
-
                self.service.fetched(rid, nid, result);
-
            }
-
            FetchResult::Responder { rid, result } => {
-
                if let Some(rid) = rid {
-
                    if let Some(err) = result.err() {
-
                        log::info!(target: "wire", "Peer {nid} failed to fetch {rid} from us: {err}");
-
                    } else {
-
                        log::info!(target: "wire", "Peer {nid} fetched {rid} from us successfully");
-
                    }
-
                }
-
            }
-
        }
-
    }
-

-
    fn flush(&mut self, remote: NodeId, stream: StreamId) {
-
        let Some((fd, peer)) = self.peers.lookup_mut(&remote) else {
-
            log::warn!(target: "wire", "Peer {remote} is not known; ignoring flush");
-
            return;
-
        };
-
        let Peer::Connected { streams, link, .. } = peer else {
-
            log::warn!(target: "wire", "Peer {remote} is not connected; ignoring flush");
-
            return;
-
        };
-
        let Some(s) = streams.get_mut(&stream) else {
-
            log::debug!(target: "wire", "Stream {stream} cannot be found; ignoring flush");
-
            return;
-
        };
-
        let metrics = self.metrics.peer(remote);
-

-
        for data in s.channels.try_iter() {
-
            let frame = match data {
-
                ChannelEvent::Data(data) => {
-
                    metrics.sent_git_bytes += data.len();
-
                    metrics.sent_bytes += data.len();
-
                    Frame::<service::Message>::git(stream, data)
-
                }
-
                ChannelEvent::Close => Frame::control(*link, frame::Control::Close { stream }),
-
                ChannelEvent::Eof => Frame::control(*link, frame::Control::Eof { stream }),
-
            };
-
            self.actions
-
                .push_back(reactor::Action::Send(fd, frame.to_bytes()));
-
        }
-
    }
-

-
    fn cleanup(&mut self, id: ResourceId, fd: RawFd) {
-
        if self.inbound.remove(&fd).is_some() {
-
            log::debug!(target: "wire", "Cleaning up inbound peer state with id={id} (fd={fd})");
-
        } else if let Some(outbound) = self.outbound.remove(&fd) {
-
            log::debug!(target: "wire", "Cleaning up outbound peer state with id={id} (fd={fd})");
-
            self.service.disconnected(
-
                outbound.nid,
-
                Link::Outbound,
-
                &DisconnectReason::connection(),
-
            );
-
        } else {
-
            log::debug!(target: "wire", "Tried to cleanup unknown peer with id={id} (fd={fd})");
-
        }
-
    }
-
}
-

-
impl<D, S, G> reactor::Handler for Wire<D, S, G>
-
where
-
    D: service::Store + Send,
-
    S: WriteStorage + Send + 'static,
-
    G: crypto::signature::Signer<crypto::Signature> + Ecdh<Pk = NodeId> + Clone + Send,
-
{
-
    type Listener = NetAccept<WireSession<G>>;
-
    type Transport = NetTransport<WireSession<G>>;
-
    type Command = Control;
-

-
    fn tick(&mut self, time: Timestamp) {
-
        self.metrics.open_channels = self
-
            .peers
-
            .iter()
-
            .filter_map(|p| {
-
                if let Peer::Connected { streams, .. } = p {
-
                    Some(streams.streams.len())
-
                } else {
-
                    None
-
                }
-
            })
-
            .sum();
-
        self.metrics.worker_queue_size = self.worker.len();
-
        self.service.tick(
-
            LocalTime::from_millis(time.as_millis() as u128),
-
            &self.metrics,
-
        );
-
    }
-

-
    fn handle_timer(&mut self) {
-
        self.service.wake();
-
    }
-

-
    fn handle_listener_event(
-
        &mut self,
-
        _: ResourceId, // Nb. This is the ID of the listener socket.
-
        event: ListenerEvent<WireSession<G>>,
-
        _: Timestamp,
-
    ) {
-
        match event {
-
            ListenerEvent::Accepted(connection) => {
-
                let Ok(remote) = connection.remote_addr() else {
-
                    log::warn!(target: "wire", "Accepted connection doesn't have remote address; dropping..");
-
                    drop(connection);
-

-
                    return;
-
                };
-
                let InetHost::Ip(ip) = remote.host else {
-
                    log::error!(target: "wire", "Unexpected host type for inbound connection {remote}; dropping..");
-
                    drop(connection);
-

-
                    return;
-
                };
-
                let fd = connection.as_raw_fd();
-
                log::debug!(target: "wire", "Inbound connection from {remote} (fd={fd})..");
-

-
                // If the service doesn't want to accept this connection,
-
                // we drop the connection here, which disconnects the socket.
-
                if !self.service.accepted(ip) {
-
                    log::debug!(target: "wire", "Rejecting inbound connection from {ip} (fd={fd})..");
-
                    drop(connection);
-

-
                    return;
-
                }
-

-
                let session = match accept::<G>(
-
                    remote.clone().into(),
-
                    connection,
-
                    self.signer.clone().into_inner(),
-
                ) {
-
                    Ok(s) => s,
-
                    Err(e) => {
-
                        log::error!(target: "wire", "Error creating session for {ip}: {e}");
-
                        return;
-
                    }
-
                };
-
                let transport = match NetTransport::with_session(session, Link::Inbound) {
-
                    Ok(transport) => transport,
-
                    Err(err) => {
-
                        log::error!(target: "wire", "Failed to create transport for accepted connection: {err}");
-
                        return;
-
                    }
-
                };
-
                log::debug!(target: "wire", "Accepted inbound connection from {remote} (fd={fd})..");
-

-
                self.inbound.insert(
-
                    fd,
-
                    Inbound {
-
                        id: None,
-
                        addr: remote.into(),
-
                    },
-
                );
-
                self.actions
-
                    .push_back(reactor::Action::RegisterTransport(transport))
-
            }
-
            ListenerEvent::Failure(err) => {
-
                log::error!(target: "wire", "Error listening for inbound connections: {err}");
-
            }
-
        }
-
    }
-

-
    fn handle_registered(&mut self, fd: RawFd, id: ResourceId, typ: ResourceType) {
-
        match typ {
-
            ResourceType::Listener => {
-
                if let Some(local_addr) = self.listening.remove(&fd) {
-
                    self.service.listening(local_addr);
-
                }
-
            }
-
            ResourceType::Transport => {
-
                if let Some(outbound) = self.outbound.get_mut(&fd) {
-
                    log::debug!(target: "wire", "Outbound peer resource registered for {} with id={id} (fd={fd})", outbound.nid);
-
                    outbound.id = Some(id);
-
                } else if let Some(inbound) = self.inbound.get_mut(&fd) {
-
                    log::debug!(target: "wire", "Inbound peer resource registered with id={id} (fd={fd})");
-
                    inbound.id = Some(id);
-
                } else {
-
                    log::warn!(target: "wire", "Unknown peer registered with fd={fd} and id={id}");
-
                }
-
            }
-
        }
-
    }
-

-
    fn handle_transport_event(
-
        &mut self,
-
        id: ResourceId,
-
        event: SessionEvent<WireSession<G>>,
-
        _: Timestamp,
-
    ) {
-
        match event {
-
            SessionEvent::Established(fd, ProtocolArtifact { state, .. }) => {
-
                // SAFETY: With the NoiseXK protocol, there is always a remote static key.
-
                let nid: NodeId = state.remote_static_key.unwrap();
-
                // Make sure we don't try to connect to ourselves by mistake.
-
                if &nid == self.signer.public_key() {
-
                    log::error!(target: "wire", "Self-connection detected, disconnecting..");
-
                    self.disconnect(id, DisconnectReason::SelfConnection);
-

-
                    return;
-
                }
-
                let (addr, link) = if let Some(peer) = self.inbound.remove(&fd) {
-
                    self.metrics.peer(nid).inbound_connection_attempts += 1;
-
                    (peer.addr, Link::Inbound)
-
                } else if let Some(peer) = self.outbound.remove(&fd) {
-
                    assert_eq!(nid, peer.nid);
-
                    (peer.addr, Link::Outbound)
-
                } else {
-
                    log::error!(target: "wire", "Session for {nid} (id={id}) not found");
-
                    return;
-
                };
-
                log::debug!(
-
                    target: "wire",
-
                    "Session established with {nid} (id={id}) (fd={fd}) ({})",
-
                    if link.is_inbound() { "inbound" } else { "outbound" }
-
                );
-

-
                // Connections to close.
-
                let mut disconnect = Vec::new();
-

-
                // Handle conflicting connections.
-
                // This is typical when nodes have mutually configured their nodes to connect to
-
                // each other on startup. We handle this by deterministically choosing one node
-
                // whos outbound connection is the one that is kept. The other connections are
-
                // dropped.
-
                {
-
                    // Whether we have precedence in case of conflicting connections.
-
                    // Having precedence means that our outbound connection will win over
-
                    // the other node's outbound connection.
-
                    let precedence = *self.signer.public_key() > nid;
-

-
                    // Pre-existing connections that conflict with this newly established session.
-
                    // Note that we can't know whether a connection is conflicting before we get the
-
                    // remote static key.
-
                    let mut conflicting = Vec::new();
-

-
                    // Active sessions with the same NID but a different Resource ID are conflicting.
-
                    conflicting.extend(
-
                        self.peers
-
                            .active()
-
                            .filter(|(c_id, d, _)| **d == nid && *c_id != id)
-
                            .map(|(c_id, _, link)| (c_id, link)),
-
                    );
-

-
                    // Outbound connection attempts with the same remote key but a different file
-
                    // descriptor are conflicting.
-
                    conflicting.extend(self.outbound.iter().filter_map(|(c_fd, other)| {
-
                        if other.nid == nid && *c_fd != fd {
-
                            other.id.map(|c_id| (c_id, Link::Outbound))
-
                        } else {
-
                            None
-
                        }
-
                    }));
-

-
                    for (c_id, c_link) in conflicting {
-
                        // If we have precedence, the inbound connection is closed.
-
                        // In the case where both connections are inbound or outbound,
-
                        // we close the newer connection, ie. the one with the higher
-
                        // resource id.
-
                        let close = match (link, c_link) {
-
                            (Link::Inbound, Link::Outbound) => {
-
                                if precedence {
-
                                    id
-
                                } else {
-
                                    c_id
-
                                }
-
                            }
-
                            (Link::Outbound, Link::Inbound) => {
-
                                if precedence {
-
                                    c_id
-
                                } else {
-
                                    id
-
                                }
-
                            }
-
                            (Link::Inbound, Link::Inbound) => id.max(c_id),
-
                            (Link::Outbound, Link::Outbound) => id.max(c_id),
-
                        };
-

-
                        log::warn!(
-
                            target: "wire", "Established session (id={id}) conflicts with existing session for {nid} (id={c_id})"
-
                        );
-
                        disconnect.push(close);
-
                    }
-
                }
-
                for id in &disconnect {
-
                    log::warn!(
-
                        target: "wire", "Closing conflicting session (id={id}) with {nid}.."
-
                    );
-
                    // Disconnect and return the associated NID of the peer, if available.
-
                    if let Some((nid, link)) = self.disconnect(*id, DisconnectReason::Conflict) {
-
                        // We disconnect the session eagerly because otherwise we will get the new
-
                        // `connected` event before the `disconnect`, resulting in a duplicate
-
                        // connection.
-
                        self.service
-
                            .disconnected(nid, link, &DisconnectReason::Conflict);
-
                    }
-
                }
-
                if !disconnect.contains(&id) {
-
                    self.peers
-
                        .insert(id, Peer::connected(nid, addr.clone(), link));
-
                    self.service.connected(nid, addr.into(), link);
-
                }
-
            }
-
            SessionEvent::Data(data) => {
-
                if let Some(Peer::Connected {
-
                    nid,
-
                    inbox,
-
                    streams,
-
                    ..
-
                }) = self.peers.get_mut(&id)
-
                {
-
                    let metrics = self.metrics.peer(*nid);
-
                    metrics.received_bytes += data.len();
-

-
                    if inbox.input(&data).is_err() {
-
                        log::error!(target: "wire", "Maximum inbox size ({MAX_INBOX_SIZE}) reached for peer {nid}");
-
                        log::error!(target: "wire", "Unable to process messages fast enough for peer {nid}; disconnecting..");
-
                        self.disconnect(id, DisconnectReason::Session(session::Error::Misbehavior));
-

-
                        return;
-
                    }
-

-
                    loop {
-
                        match inbox.deserialize_next() {
-
                            Ok(Some(Frame {
-
                                data: FrameData::Control(frame::Control::Open { stream }),
-
                                ..
-
                            })) => {
-
                                log::debug!(target: "wire", "Received `open` command for stream {stream} from {nid}");
-
                                metrics.streams_opened += 1;
-
                                metrics.received_fetch_requests += 1;
-
                                let reader_limit = self.service.config().limits.fetch_pack_receive;
-
                                let Some(channels) = streams.register(
-
                                    stream,
-
                                    ChannelsConfig::new(FETCH_TIMEOUT)
-
                                        .with_reader_limit(reader_limit),
-
                                ) else {
-
                                    log::warn!(target: "wire", "Peer attempted to open already-open stream stream {stream}");
-
                                    continue;
-
                                };
-

-
                                let task = Task {
-
                                    fetch: FetchRequest::Responder {
-
                                        remote: *nid,
-
                                        emitter: self.service.emitter(),
-
                                    },
-
                                    stream,
-
                                    channels,
-
                                };
-
                                if let Err(e) = self.worker.try_send(task) {
-
                                    log::error!(
-
                                        target: "wire",
-
                                        "Worker pool failed to accept incoming fetch request: {e}"
-
                                    );
-
                                }
-
                            }
-
                            Ok(Some(Frame {
-
                                data: FrameData::Control(frame::Control::Eof { stream }),
-
                                ..
-
                            })) => {
-
                                if let Some(s) = streams.get(&stream) {
-
                                    log::debug!(target: "wire", "Received `end-of-file` on stream {stream} from {nid}");
-

-
                                    if s.channels.send(ChannelEvent::Eof).is_err() {
-
                                        log::error!(target: "wire", "Worker is disconnected; cannot send `EOF`");
-
                                    }
-
                                } else {
-
                                    log::debug!(target: "wire", "Ignoring frame on closed or unknown stream {stream}");
-
                                }
-
                            }
-
                            Ok(Some(Frame {
-
                                data: FrameData::Control(frame::Control::Close { stream }),
-
                                ..
-
                            })) => {
-
                                log::debug!(target: "wire", "Received `close` command for stream {stream} from {nid}");
-

-
                                if let Some(s) = streams.unregister(&stream) {
-
                                    log::debug!(
-
                                        target: "wire",
-
                                        "Stream {stream} of {nid} closed with {} byte(s) sent and {} byte(s) received",
-
                                        s.sent_bytes, s.received_bytes
-
                                    );
-
                                    s.channels.close().ok();
-
                                }
-
                            }
-
                            Ok(Some(Frame {
-
                                data: FrameData::Gossip(msg),
-
                                ..
-
                            })) => {
-
                                metrics.received_gossip_messages += 1;
-
                                self.service.received_message(*nid, msg);
-
                            }
-
                            Ok(Some(Frame {
-
                                stream,
-
                                data: FrameData::Git(data),
-
                                ..
-
                            })) => {
-
                                if let Some(s) = streams.get_mut(&stream) {
-
                                    metrics.received_git_bytes += data.len();
-

-
                                    if s.channels.send(ChannelEvent::Data(data)).is_err() {
-
                                        log::error!(target: "wire", "Worker is disconnected; cannot send data");
-
                                    }
-
                                } else {
-
                                    log::debug!(target: "wire", "Ignoring frame on closed or unknown stream {stream}");
-
                                }
-
                            }
-
                            Ok(None) => {
-
                                // Buffer is empty, or message isn't complete.
-
                                break;
-
                            }
-
                            Err(e) => {
-
                                log::error!(target: "wire", "Invalid gossip message from {nid}: {e}");
-

-
                                if !inbox.is_empty() {
-
                                    log::debug!(target: "wire", "Dropping read buffer for {nid} with {} bytes", inbox.len());
-
                                }
-
                                self.disconnect(
-
                                    id,
-
                                    DisconnectReason::Session(session::Error::Misbehavior),
-
                                );
-
                                break;
-
                            }
-
                        }
-
                    }
-
                } else {
-
                    log::warn!(target: "wire", "Dropping message from unconnected peer (id={id})");
-
                }
-
            }
-
            SessionEvent::Terminated(err) => {
-
                self.disconnect(id, DisconnectReason::Connection(Arc::new(err)));
-
            }
-
        }
-
    }
-

-
    fn handle_command(&mut self, cmd: Self::Command) {
-
        match cmd {
-
            Control::User(cmd) => self.service.command(cmd),
-
            Control::Worker(result) => self.worker_result(result),
-
            Control::Flush { remote, stream } => self.flush(remote, stream),
-
        }
-
    }
-

-
    fn handle_error(
-
        &mut self,
-
        err: reactor::Error<NetAccept<WireSession<G>>, NetTransport<WireSession<G>>>,
-
    ) {
-
        match err {
-
            reactor::Error::Poll(err) => {
-
                // TODO: This should be a fatal error, there's nothing we can do here.
-
                log::error!(target: "wire", "Can't poll connections: {err}");
-
            }
-
            reactor::Error::ListenerDisconnect(id, _) => {
-
                // TODO: This should be a fatal error, there's nothing we can do here.
-
                log::error!(target: "wire", "Listener {id} disconnected");
-
            }
-
            reactor::Error::TransportDisconnect(id, transport) => {
-
                let fd = transport.as_raw_fd();
-
                log::error!(target: "wire", "Peer id={id} (fd={fd}) disconnected");
-

-
                // We're dropping the TCP connection here.
-
                drop(transport);
-

-
                // The peer transport is already disconnected and removed from the reactor;
-
                // therefore there is no need to initiate a disconnection. We simply remove
-
                // the peer from the map.
-
                match self.peers.remove(&id) {
-
                    Some(mut peer) => {
-
                        if let Peer::Connected { streams, .. } = &mut peer {
-
                            streams.shutdown();
-
                        }
-

-
                        if let Some(id) = peer.id() {
-
                            self.service.disconnected(
-
                                *id,
-
                                peer.link(),
-
                                &DisconnectReason::connection(),
-
                            );
-
                        } else {
-
                            log::debug!(target: "wire", "Inbound disconnection before handshake; ignoring..")
-
                        }
-
                    }
-
                    None => self.cleanup(id, fd),
-
                }
-
            }
-
        }
-
    }
-

-
    fn handover_listener(&mut self, id: ResourceId, _listener: Self::Listener) {
-
        log::error!(target: "wire", "Listener handover is not supported (id={id})");
-
    }
-

-
    fn handover_transport(&mut self, id: ResourceId, transport: Self::Transport) {
-
        let fd = transport.as_raw_fd();
-

-
        match self.peers.entry(id) {
-
            Entry::Occupied(e) => {
-
                match e.get() {
-
                    Peer::Disconnecting {
-
                        nid, reason, link, ..
-
                    } => {
-
                        log::debug!(target: "wire", "Transport handover for disconnecting peer with id={id} (fd={fd})");
-

-
                        // Disconnect TCP stream.
-
                        drop(transport);
-

-
                        // If there is no NID, the service is not aware of the peer.
-
                        if let Some(nid) = nid {
-
                            // In the case of a conflicting connection, there will be two resources
-
                            // for the peer. However, at the service level, there is only one, and
-
                            // it is identified by NID.
-
                            //
-
                            // Therefore, we specify which of the connections we're closing by
-
                            // passing the `link`.
-
                            self.service.disconnected(*nid, *link, reason);
-
                        }
-
                        e.remove();
-
                    }
-
                    Peer::Connected { nid, .. } => {
-
                        panic!("Wire::handover_transport: Unexpected handover of connected peer {} with id={id} (fd={fd})", nid);
-
                    }
-
                }
-
            }
-
            Entry::Vacant(_) => self.cleanup(id, fd),
-
        }
-
    }
-
}
-

-
impl<D, S, G> Iterator for Wire<D, S, G>
-
where
-
    D: service::Store,
-
    S: WriteStorage + 'static,
-
    G: crypto::signature::Signer<crypto::Signature> + Ecdh<Pk = NodeId> + Clone,
-
{
-
    type Item = Action<G>;
-

-
    fn next(&mut self) -> Option<Self::Item> {
-
        while let Some(ev) = self.service.next() {
-
            match ev {
-
                Io::Write(node_id, msgs) => {
-
                    let (fd, link) = match self.peers.lookup(&node_id) {
-
                        Some((fd, Peer::Connected { link, .. })) => (fd, *link),
-
                        Some((_, peer)) => {
-
                            // If the peer is disconnected by the wire protocol, the service may
-
                            // not be aware of this yet, and may continue to write messages to it.
-
                            log::debug!(target: "wire", "Dropping {} message(s) to {node_id} ({peer:?})", msgs.len());
-
                            continue;
-
                        }
-
                        None => {
-
                            log::error!(target: "wire", "Dropping {} message(s) to {node_id}: unknown peer", msgs.len());
-
                            continue;
-
                        }
-
                    };
-
                    log::trace!(
-
                        target: "wire", "Writing {} message(s) to {}", msgs.len(), node_id
-
                    );
-
                    let mut data = Vec::new();
-
                    let metrics = self.metrics.peer(node_id);
-
                    metrics.sent_gossip_messages += msgs.len();
-

-
                    for msg in msgs {
-
                        Frame::gossip(link, msg)
-
                            .encode(&mut data)
-
                            .expect("in-memory writes never fail");
-
                    }
-
                    metrics.sent_bytes += data.len();
-

-
                    self.actions.push_back(reactor::Action::Send(fd, data));
-
                }
-
                Io::Connect(node_id, addr) => {
-
                    if self.peers.connected().any(|(_, id)| id == &node_id) {
-
                        log::error!(
-
                            target: "wire",
-
                            "Attempt to connect to already connected peer {node_id}"
-
                        );
-
                        // FIXME: The problem here is the session will stay in "initial" state,
-
                        // because it can't transition to attempted.
-
                        continue;
-
                    }
-
                    self.service.attempted(node_id, addr.clone());
-
                    self.metrics.peer(node_id).outbound_connection_attempts += 1;
-

-
                    match dial::<G>(
-
                        addr.to_inner(),
-
                        node_id,
-
                        self.signer.clone().into_inner(),
-
                        self.service.config(),
-
                    )
-
                    .and_then(|session| {
-
                        NetTransport::<WireSession<G>>::with_session(session, Link::Outbound)
-
                    }) {
-
                        Ok(transport) => {
-
                            self.outbound.insert(
-
                                transport.as_raw_fd(),
-
                                Outbound {
-
                                    id: None,
-
                                    nid: node_id,
-
                                    addr: addr.to_inner(),
-
                                },
-
                            );
-
                            log::debug!(
-
                                target: "wire",
-
                                "Registering outbound transport for {node_id} (fd={})..",
-
                                transport.as_raw_fd()
-
                            );
-
                            self.actions
-
                                .push_back(reactor::Action::RegisterTransport(transport));
-
                        }
-
                        Err(err) => {
-
                            log::error!(target: "wire", "Error establishing connection to {addr}: {err}");
-

-
                            self.service.disconnected(
-
                                node_id,
-
                                Link::Outbound,
-
                                &DisconnectReason::Dial(Arc::new(err)),
-
                            );
-
                        }
-
                    }
-
                }
-
                Io::Disconnect(nid, reason) => {
-
                    if let Some((id, Peer::Connected { .. })) = self.peers.lookup(&nid) {
-
                        if let Some((nid, _)) = self.disconnect(id, reason) {
-
                            self.metrics.peer(nid).disconnects += 1;
-
                        }
-
                    } else {
-
                        log::warn!(target: "wire", "Peer {nid} is not connected: ignoring disconnect");
-
                    }
-
                }
-
                Io::Wakeup(d) => {
-
                    self.actions.push_back(reactor::Action::SetTimer(d.into()));
-
                }
-
                Io::Fetch {
-
                    rid,
-
                    remote,
-
                    timeout,
-
                    reader_limit,
-
                    refs_at,
-
                } => {
-
                    log::trace!(target: "wire", "Processing fetch for {rid} from {remote}..");
-

-
                    let Some((fd, Peer::Connected { link, streams, .. })) =
-
                        self.peers.lookup_mut(&remote)
-
                    else {
-
                        // Nb. It's possible that a peer is disconnected while an `Io::Fetch`
-
                        // is in the service's i/o buffer. Since the service may not purge the
-
                        // buffer on disconnect, we should just ignore i/o actions that don't
-
                        // have a connected peer.
-
                        log::error!(target: "wire", "Peer {remote} is not connected: dropping fetch");
-
                        continue;
-
                    };
-
                    let (stream, channels) =
-
                        streams.open(ChannelsConfig::new(timeout).with_reader_limit(reader_limit));
-

-
                    log::debug!(target: "wire", "Opened new stream with id {stream} for {rid} and remote {remote}");
-

-
                    let link = *link;
-
                    let task = Task {
-
                        fetch: FetchRequest::Initiator {
-
                            rid,
-
                            remote,
-
                            refs_at,
-
                        },
-
                        stream,
-
                        channels,
-
                    };
-

-
                    if !self.worker.is_empty() {
-
                        log::warn!(
-
                            target: "wire",
-
                            "Worker pool is busy: {} tasks pending, fetch requests may be delayed", self.worker.len()
-
                        );
-
                    }
-
                    if let Err(e) = self.worker.try_send(task) {
-
                        log::error!(
-
                            target: "wire",
-
                            "Worker pool failed to accept outgoing fetch request: {e}"
-
                        );
-
                    }
-
                    let metrics = self.metrics.peer(remote);
-
                    metrics.streams_opened += 1;
-
                    metrics.sent_fetch_requests += 1;
-

-
                    self.actions.push_back(Action::Send(
-
                        fd,
-
                        Frame::<service::Message>::control(link, frame::Control::Open { stream })
-
                            .to_bytes(),
-
                    ));
-
                }
-
            }
-
        }
-
        self.actions.pop_front()
-
    }
-
}
-

-
/// Establish a new outgoing connection.
-
pub fn dial<G: Ecdh<Pk = NodeId>>(
-
    remote_addr: NetAddr<HostName>,
-
    remote_id: <G as EcSk>::Pk,
-
    signer: G,
-
    config: &service::Config,
-
) -> io::Result<WireSession<G>> {
-
    // Determine what address to establish a TCP connection with, given the remote peer
-
    // address and our node configuration.
-
    let inet_addr: NetAddr<InetHost> = match (&remote_addr.host, config.proxy) {
-
        // For IP and DNS addresses, use the global proxy if set, otherwise use the address as-is.
-
        (HostName::Ip(_), Some(proxy)) => proxy.into(),
-
        (HostName::Ip(ip), None) => NetAddr::new(InetHost::Ip(*ip), remote_addr.port),
-
        (HostName::Dns(_), Some(proxy)) => proxy.into(),
-
        (HostName::Dns(dns), None) => NetAddr::new(InetHost::Dns(dns.clone()), remote_addr.port),
-
        // For onion addresses, handle with care.
-
        (HostName::Tor(onion), proxy) => match config.onion {
-
            // In onion proxy mode, simply use the configured proxy address.
-
            // This takes precedence over any global proxy.
-
            Some(AddressConfig::Proxy { address }) => address.into(),
-
            // In "forward" mode, if a global proxy is set, we use that, otherwise
-
            // we treat `.onion` addresses as regular DNS names.
-
            Some(AddressConfig::Forward) => {
-
                if let Some(proxy) = proxy {
-
                    proxy.into()
-
                } else {
-
                    NetAddr::new(InetHost::Dns(onion.to_string()), remote_addr.port)
-
                }
-
            }
-
            // If onion address support isn't configured, refuse to connect.
-
            None => {
-
                return Err(io::Error::new(
-
                    io::ErrorKind::Unsupported,
-
                    "no configuration found for .onion addresses",
-
                ));
-
            }
-
        },
-
        _ => {
-
            return Err(io::Error::new(
-
                io::ErrorKind::Unsupported,
-
                "unsupported remote address type",
-
            ));
-
        }
-
    };
-
    // Nb. This timeout is currently not used by the underlying library due to the
-
    // `socket2` library not supporting non-blocking connect with timeout.
-
    let connection = net::TcpStream::connect_nonblocking(inet_addr, DEFAULT_DIAL_TIMEOUT)?;
-
    // Whether to tunnel regular connections through the proxy.
-
    let force_proxy = config.proxy.is_some();
-

-
    session::<G>(
-
        remote_addr,
-
        Some(remote_id),
-
        connection,
-
        force_proxy,
-
        signer,
-
    )
-
}
-

-
/// Accept a new connection.
-
pub fn accept<G: Ecdh<Pk = NodeId>>(
-
    remote_addr: NetAddr<HostName>,
-
    connection: net::TcpStream,
-
    signer: G,
-
) -> io::Result<WireSession<G>> {
-
    session::<G>(remote_addr, None, connection, false, signer)
-
}
-

-
/// Create a new [`WireSession`].
-
fn session<G: Ecdh<Pk = NodeId>>(
-
    remote_addr: NetAddr<HostName>,
-
    remote_id: Option<NodeId>,
-
    connection: net::TcpStream,
-
    force_proxy: bool,
-
    signer: G,
-
) -> io::Result<WireSession<G>> {
-
    // There are issues with setting TCP_NODELAY on WSL. Not a big deal.
-
    if let Err(e) = connection.set_nodelay(true) {
-
        log::warn!(target: "wire", "Unable to set TCP_NODELAY on fd {}: {e}", connection.as_raw_fd());
-
    }
-
    connection.set_read_timeout(Some(DEFAULT_CONNECTION_TIMEOUT))?;
-
    connection.set_write_timeout(Some(DEFAULT_CONNECTION_TIMEOUT))?;
-

-
    let sock = socket2::Socket::from(connection);
-
    let ka = socket2::TcpKeepalive::new()
-
        .with_time(time::Duration::from_secs(30))
-
        .with_interval(time::Duration::from_secs(10))
-
        .with_retries(3);
-
    if let Err(e) = sock.set_tcp_keepalive(&ka) {
-
        log::warn!(target: "wire", "Unable to set TCP_KEEPALIVE on fd {}: {e}", sock.as_raw_fd());
-
    }
-

-
    let socks5 = socks5::Socks5::with(remote_addr, force_proxy);
-
    let proxy = Socks5Session::with(sock.into(), socks5);
-
    let pair = G::generate_keypair();
-
    let keyset = Keyset {
-
        e: pair.0,
-
        s: Some(signer),
-
        re: None,
-
        rs: remote_id,
-
    };
-
    let noise = NoiseState::initialize::<{ Sha256::OUTPUT_LEN }>(
-
        NOISE_XK,
-
        remote_id.is_some(),
-
        &[],
-
        keyset,
-
    );
-
    Ok(WireSession::with(proxy, noise))
-
}
-

-
#[cfg(test)]
-
mod test {
-
    use super::*;
-
    use crate::service::{Message, ZeroBytes};
-
    use crate::wire;
-
    use crate::wire::varint;
-

-
    #[test]
-
    fn test_pong_message_with_extension() {
-
        use crate::deserializer;
-

-
        let mut stream = Vec::new();
-
        let pong = Message::Pong {
-
            zeroes: ZeroBytes::new(42),
-
        };
-
        frame::PROTOCOL_VERSION_STRING.encode(&mut stream).unwrap();
-
        frame::StreamId::gossip(Link::Outbound)
-
            .encode(&mut stream)
-
            .unwrap();
-

-
        // Serialize gossip message with some extension fields.
-
        let mut gossip = wire::serialize(&pong);
-
        String::from("extra").encode(&mut gossip).unwrap();
-
        48u8.encode(&mut gossip).unwrap();
-

-
        // Encode gossip message using the varint-prefix format into the stream.
-
        varint::payload::encode(&gossip, &mut stream).unwrap();
-

-
        let mut de = deserializer::Deserializer::<1024, Frame>::new(1024);
-
        de.input(&stream).unwrap();
-

-
        // The "pong" message decodes successfully, even though there is trailing data.
-
        assert_eq!(
-
            de.deserialize_next().unwrap().unwrap(),
-
            Frame::gossip(Link::Outbound, pong)
-
        );
-
        assert!(de.deserialize_next().unwrap().is_none());
-
        assert!(de.is_empty());
-
    }
-

-
    #[test]
-
    fn test_inventory_ann_with_extension() {
-
        use crate::deserializer;
-

-
        #[derive(Debug)]
-
        struct MessageWithExt {
-
            msg: Message,
-
            ext: String,
-
        }
-

-
        impl wire::Encode for MessageWithExt {
-
            fn encode<W: io::Write + ?Sized>(&self, writer: &mut W) -> Result<usize, io::Error> {
-
                let mut n = self.msg.encode(writer)?;
-
                n += self.ext.encode(writer)?;
-

-
                Ok(n)
-
            }
-
        }
-

-
        impl wire::Decode for MessageWithExt {
-
            fn decode<R: io::Read + ?Sized>(reader: &mut R) -> Result<Self, wire::Error> {
-
                let msg = Message::decode(reader)?;
-
                let ext = String::decode(reader).unwrap_or_default();
-

-
                Ok(MessageWithExt { msg, ext })
-
            }
-
        }
-

-
        let rid = radicle::test::arbitrary::gen(1);
-
        let pk = radicle::test::arbitrary::gen(1);
-
        let sig: [u8; 64] = radicle::test::arbitrary::gen(1);
-

-
        // Message with extension.
-
        let mut stream = Vec::new();
-
        let ann = Message::announcement(
-
            pk,
-
            service::gossip::inventory(radicle::node::Timestamp::MAX, [rid]),
-
            radicle::crypto::Signature::from(sig),
-
        );
-
        let pong = Message::Pong {
-
            zeroes: ZeroBytes::new(42),
-
        };
-
        // Framed message with extension.
-
        frame::Frame::gossip(
-
            Link::Outbound,
-
            MessageWithExt {
-
                msg: ann.clone(),
-
                ext: String::from("extra"),
-
            },
-
        )
-
        .encode(&mut stream)
-
        .unwrap();
-
        // Pong message that comes after, without extension.
-
        frame::Frame::gossip(Link::Outbound, pong.clone())
-
            .encode(&mut stream)
-
            .unwrap();
-

-
        // First test deserializing using the message with extension type.
-
        {
-
            let mut de = deserializer::Deserializer::<1024, Frame<MessageWithExt>>::new(1024);
-
            de.input(&stream).unwrap();
-

-
            radicle::assert_matches!(
-
                de.deserialize_next().unwrap().unwrap().data,
-
                FrameData::Gossip(MessageWithExt {
-
                    msg,
-
                    ext,
-
                }) if msg == ann && ext == *"extra"
-
            );
-
            radicle::assert_matches!(
-
                de.deserialize_next().unwrap().unwrap().data,
-
                FrameData::Gossip(MessageWithExt {
-
                    msg,
-
                    ext,
-
                }) if msg == pong && ext.is_empty()
-
            );
-
            assert!(de.deserialize_next().unwrap().is_none());
-
            assert!(de.is_empty());
-
        }
-

-
        // Then test deserializing using the current message type without the extension.
-
        {
-
            let mut de = deserializer::Deserializer::<1024, Frame<Message>>::new(1024);
-
            de.input(&stream).unwrap();
-

-
            radicle::assert_matches!(
-
                de.deserialize_next().unwrap().unwrap().data,
-
                FrameData::Gossip(msg)
-
                if msg == ann
-
            );
-
            radicle::assert_matches!(
-
                de.deserialize_next().unwrap().unwrap().data,
-
                FrameData::Gossip(msg)
-
                if msg == pong
-
            );
-
            assert!(de.deserialize_next().unwrap().is_none());
-
            assert!(de.is_empty());
-
        }
-
    }
-
}
added crates/radicle-protocol/src/worker.rs
@@ -0,0 +1,119 @@
+
#![allow(clippy::too_many_arguments)]
+
pub mod fetch;
+

+
use std::io;
+

+
use radicle::identity::RepoId;
+
use radicle::node::Event;
+
use radicle::prelude::NodeId;
+
use radicle::storage::refs::RefsAt;
+

+
// use crate::runtime::{thread, Emitter, Handle};
+

+
use radicle::node::events::Emitter;
+

+
// pub use channels::{ChannelEvent, Channels, ChannelsConfig};
+

+
/// Error returned by fetch.
+
#[derive(thiserror::Error, Debug)]
+
pub enum FetchError {
+
    #[error("the 'git fetch' command failed with exit code '{code}'")]
+
    CommandFailed { code: i32 },
+
    #[error(transparent)]
+
    Io(#[from] io::Error),
+
    #[error(transparent)]
+
    Fetch(#[from] fetch::error::Fetch),
+
    #[error(transparent)]
+
    Handle(#[from] fetch::error::Handle),
+
    #[error(transparent)]
+
    Storage(#[from] radicle::storage::Error),
+
    #[error(transparent)]
+
    PolicyStore(#[from] radicle::node::policy::store::Error),
+
    #[error(transparent)]
+
    Policy(#[from] radicle_fetch::policy::error::Policy),
+
    #[error(transparent)]
+
    Blocked(#[from] radicle_fetch::policy::error::Blocked),
+
}
+

+
impl FetchError {
+
    /// Check if it's a timeout error.
+
    pub fn is_timeout(&self) -> bool {
+
        matches!(self, FetchError::Io(e) if e.kind() == io::ErrorKind::TimedOut)
+
    }
+
}
+

+
/// Error returned by fetch responder.
+
#[derive(thiserror::Error, Debug)]
+
pub enum UploadError {
+
    #[error("error parsing git command packet-line: {0}")]
+
    PacketLine(io::Error),
+
    #[error("error while performing git upload-pack: {0}")]
+
    UploadPack(io::Error),
+
    #[error(transparent)]
+
    Authorization(#[from] AuthorizationError),
+
}
+

+
impl UploadError {
+
    /// Check if it's an end-of-file error.
+
    pub fn is_eof(&self) -> bool {
+
        matches!(self, UploadError::UploadPack(e) if e.kind() == io::ErrorKind::UnexpectedEof)
+
    }
+
}
+

+
#[derive(thiserror::Error, Debug)]
+
pub enum AuthorizationError {
+
    #[error("{0} is not authorized to fetch {1}")]
+
    Unauthorized(NodeId, RepoId),
+
    #[error(transparent)]
+
    PolicyStore(#[from] radicle::node::policy::store::Error),
+
    #[error(transparent)]
+
    Repository(#[from] radicle::storage::RepositoryError),
+
}
+

+
/// Fetch job sent to worker thread.
+
#[derive(Debug, Clone)]
+
pub enum FetchRequest {
+
    /// Client is initiating a fetch for the repository identified by
+
    /// `rid` from the peer identified by `remote`.
+
    Initiator {
+
        /// Repo to fetch.
+
        rid: RepoId,
+
        /// Remote peer we are interacting with.
+
        remote: NodeId,
+
        /// If this fetch is for a particular set of `rad/sigrefs`.
+
        refs_at: Option<Vec<RefsAt>>,
+
    },
+
    /// Server is responding to a fetch request by uploading the
+
    /// specified `refspecs` sent by the client.
+
    Responder {
+
        /// Remote peer we are interacting with.
+
        remote: NodeId,
+
        /// Reporter for upload-pack progress.
+
        emitter: Emitter<Event>,
+
    },
+
}
+

+
impl FetchRequest {
+
    pub fn remote(&self) -> NodeId {
+
        match self {
+
            Self::Initiator { remote, .. } | Self::Responder { remote, .. } => *remote,
+
        }
+
    }
+
}
+

+
/// Fetch result of an upload or fetch.
+
#[derive(Debug)]
+
pub enum FetchResult {
+
    Initiator {
+
        /// Repo fetched.
+
        rid: RepoId,
+
        /// Fetch result, including remotes fetched.
+
        result: Result<fetch::FetchResult, FetchError>,
+
    },
+
    Responder {
+
        /// Repo requested.
+
        rid: Option<RepoId>,
+
        /// Upload result.
+
        result: Result<(), UploadError>,
+
    },
+
}
added crates/radicle-protocol/src/worker/fetch.rs
@@ -0,0 +1,41 @@
+
pub mod error;
+

+
use std::collections::HashSet;
+

+
use radicle::crypto::PublicKey;
+
use radicle::{identity::DocAt, storage::RefUpdate};
+

+
#[derive(Debug, Clone)]
+
pub struct FetchResult {
+
    /// The set of updated references.
+
    pub updated: Vec<RefUpdate>,
+
    /// The set of remote namespaces that were updated.
+
    pub namespaces: HashSet<PublicKey>,
+
    /// The fetch was a full clone.
+
    pub clone: bool,
+
    /// Identity doc of fetched repo.
+
    pub doc: DocAt,
+
}
+

+
impl FetchResult {
+
    pub fn new(doc: DocAt) -> Self {
+
        Self {
+
            updated: vec![],
+
            namespaces: HashSet::new(),
+
            clone: false,
+
            doc,
+
        }
+
    }
+
}
+

+
#[cfg(any(test, feature = "test"))]
+
impl qcheck::Arbitrary for FetchResult {
+
    fn arbitrary(g: &mut qcheck::Gen) -> Self {
+
        FetchResult {
+
            updated: vec![],
+
            namespaces: HashSet::arbitrary(g),
+
            clone: bool::arbitrary(g),
+
            doc: DocAt::arbitrary(g),
+
        }
+
    }
+
}
added crates/radicle-protocol/src/worker/fetch/error.rs
@@ -0,0 +1,75 @@
+
use std::io;
+

+
use thiserror::Error;
+

+
use radicle::{cob, git, identity, storage};
+
use radicle_fetch as fetch;
+

+
#[derive(Debug, Error)]
+
pub enum Fetch {
+
    #[error(transparent)]
+
    Run(#[from] fetch::Error),
+
    #[error(transparent)]
+
    Git(#[from] git::raw::Error),
+
    #[error(transparent)]
+
    Storage(#[from] storage::Error),
+
    #[error(transparent)]
+
    StorageCopy(#[from] io::Error),
+
    #[error(transparent)]
+
    Repository(#[from] radicle::storage::RepositoryError),
+
    #[error(transparent)]
+
    RefsDb(#[from] radicle::node::refs::Error),
+
    #[error("validation of the storage repository failed: the delegates {delegates:?} failed to validate to meet a threshold of {threshold}")]
+
    Validation {
+
        threshold: usize,
+
        delegates: Vec<String>,
+
    },
+
    #[error(transparent)]
+
    Cache(#[from] Cache),
+
}
+

+
#[derive(Debug, Error)]
+
pub enum Cache {
+
    #[error(transparent)]
+
    Parse(#[from] cob::ParseIdentifierError),
+
    #[error(transparent)]
+
    Repository(#[from] storage::RepositoryError),
+
    #[error("failed to remove {type_name} '{id}' from cache: {err}")]
+
    Remove {
+
        id: cob::ObjectId,
+
        type_name: cob::TypeName,
+
        #[source]
+
        err: Box<dyn std::error::Error + Send + Sync + 'static>,
+
    },
+
    #[error(transparent)]
+
    Store(#[from] cob::store::Error),
+
    #[error("failed to update {type_name} '{id}' in cache: {err}")]
+
    Update {
+
        id: cob::ObjectId,
+
        type_name: cob::TypeName,
+
        #[source]
+
        err: Box<dyn std::error::Error + Send + Sync + 'static>,
+
    },
+
}
+

+
#[derive(Debug, Error)]
+
pub enum Handle {
+
    #[error(transparent)]
+
    Doc(#[from] identity::DocError),
+
    #[error(transparent)]
+
    Io(#[from] io::Error),
+
    #[error(transparent)]
+
    Init(#[from] fetch::handle::error::Init),
+
    #[error(transparent)]
+
    Storage(#[from] storage::Error),
+
    #[error(transparent)]
+
    Repository(#[from] radicle::storage::RepositoryError),
+
}
+

+
#[derive(Debug, Error)]
+
pub enum Canonical {
+
    #[error(transparent)]
+
    Identity(#[from] radicle::storage::RepositoryError),
+
    #[error(transparent)]
+
    CanonicalRefs(#[from] radicle::identity::doc::CanonicalRefsError),
+
}