Radish alpha
h
rad:z3gqcJUoA1n9HaHKufZs5FCSGazv5
Radicle Heartwood Protocol & Stack
Radicle
Git
node/debug: Use derived serializers
Merged lorenz opened 2 months ago

The construction of the debug object is unwieldy, and error prone (for example, renamed struct members have to be manually renamed in the serialization code, see “refs” vs. “refsAt”).

Use derived serializers where possible to make this easier to maintain.

8 files changed +19 -39 d530f126 5099c25d
modified Cargo.lock
@@ -679,6 +679,7 @@ dependencies = [
 "amplify",
 "base32",
 "cyphergraphy",
+
 "serde",
 "sha3",
]

@@ -3083,7 +3084,7 @@ dependencies = [
 "bloomy",
 "bytes",
 "crossbeam-channel",
-
 "cyphernet",
+
 "cypheraddr",
 "fastrand",
 "log",
 "nonempty",
modified Cargo.toml
@@ -25,6 +25,7 @@ bytes = "1.11.1"
chrono = { version = "0.4.26", default-features = false }
colored = "2.1.0"
crossbeam-channel = "0.5.6"
+
cypheraddr = "0.4.0"
cyphernet = "0.5.2"
dunce = "1.0.5"
fastrand = { version = "2.0.0", default-features = false }
modified crates/radicle-node/src/runtime/handle.rs
@@ -350,35 +350,10 @@ impl radicle::node::Handle for Handle {
    fn debug(&self) -> Result<serde_json::Value, Self::Error> {
        let (sender, receiver) = chan::bounded(1);
        let query: Arc<QueryState> = Arc::new(move |state| {
-
            let fetcher_state = state.fetching();
            let debug = serde_json::json!({
                "outboxSize": state.outbox().len(),
-
                "fetching": fetcher_state.active_fetches()
-
                    .iter()
-
                    .map(|(rid, active)| {
-
                        json!({
-
                            "rid": rid,
-
                            "from": active.from(),
-
                            "refsAt": active.refs(),
-
                        })
-
                    }).collect::<Vec<_>>(),
-
                "queue": fetcher_state.queued_fetches().iter().map(|(node, queue)| {
-
                    json!({
-
                        "nid": node,
-
                        "queue": queue.iter().map(|fetch| {
-
                            json!({
-
                                "rid": fetch.rid,
-
                                "refsAt": fetch.refs,
-
                            })
-
                        }).collect::<Vec<_>>()
-
                    })
-
                }).collect::<Vec<_>>(),
-
                "rateLimiter": state.limiter().buckets.iter().map(|(host, bucket)| {
-
                    json!({
-
                        "host": host.to_string(),
-
                        "bucket": bucket
-
                    })
-
                }).collect::<Vec<_>>(),
+
                "fetching": state.fetching(),
+
                "rateLimiter": state.limiter(),
                "events": json!({
                    "subscribers": state.emitter().subscriptions(),
                    "pending": state.emitter().pending(),
modified crates/radicle-protocol/Cargo.toml
@@ -15,7 +15,7 @@ test = ["radicle/test", "radicle-crypto/test", "radicle-crypto/cyphernet", "qche
bloomy = "1.2"
bytes = { workspace = true }
crossbeam-channel = { workspace = true }
-
cyphernet = { workspace = true, features = ["tor"] }
+
cypheraddr = { workspace = true, features = ["serde", "tor"] }
fastrand = { workspace = true }
log = { workspace = true, features = ["std"] }
nonempty = { workspace = true, features = ["serialize"] }
modified crates/radicle-protocol/src/fetcher/state.rs
@@ -10,6 +10,7 @@ pub mod event;

pub use command::Command;
pub use event::Event;
+
use serde::Serialize;

use std::collections::{BTreeMap, VecDeque};
use std::num::NonZeroUsize;
@@ -42,7 +43,7 @@ pub const MAX_CONCURRENCY: NonZeroUsize = NonZeroUsize::MIN;
/// of fetches can happen with it concurrently. This does not guarantee that the
/// node will actually allow this node to fetch from it – since it will maintain
/// its own capacity for connections and load.
-
#[derive(Clone, Debug, PartialEq, Eq)]
+
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct FetcherState {
    /// The active fetches that are occurring, ensuring only one fetch per repository.
    active: BTreeMap<RepoId, ActiveFetch>,
@@ -235,7 +236,7 @@ impl FetcherState {
}

/// Configuration for the [`FetcherState`].
-
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
+
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize)]
pub struct Config {
    /// Maximum number of concurrent fetches per peer connection.
    maximum_concurrency: NonZeroUsize,
@@ -271,7 +272,7 @@ impl Default for Config {
}

/// An active fetch represents a repository being fetched by a particular node.
-
#[derive(Clone, Debug, PartialEq, Eq)]
+
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct ActiveFetch {
    pub from: NodeId,
    pub refs: RefsToFetch,
@@ -290,7 +291,7 @@ impl ActiveFetch {
}

/// A fetch that is waiting to be processed, in the fetch queue.
-
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
+
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize)]
pub struct QueuedFetch {
    /// The repository that will be fetched.
    pub rid: RepoId,
@@ -304,14 +305,15 @@ pub struct QueuedFetch {
///
/// It ensures that the queue contains unique items for fetching, and does not
/// exceed the provided maximum capacity.
-
#[derive(Clone, Debug, PartialEq, Eq)]
+
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct Queue {
    queue: VecDeque<QueuedFetch>,
    max_queue_size: MaxQueueSize,
}

/// The maximum number of fetches that can be queued for a single node.
-
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize)]
+
#[serde(transparent)]
pub struct MaxQueueSize(usize);

impl MaxQueueSize {
modified crates/radicle-protocol/src/service/limiter.rs
@@ -2,6 +2,7 @@ use std::collections::{HashMap, HashSet};

use localtime::LocalTime;
use radicle::node::{address, config, HostName, NodeId};
+
use serde::Serialize;

/// Peer rate limiter.
///
@@ -9,7 +10,7 @@ use radicle::node::{address, config, HostName, NodeId};
/// and every request from that address consumes one token. Tokens refill at a predefined
/// rate. This mechanism allows for consistent request rates with potential bursts up to the
/// bucket's capacity.
-
#[derive(Debug, Default)]
+
#[derive(Debug, Default, Serialize)]
pub struct RateLimiter {
    pub buckets: HashMap<HostName, TokenBucket>,
    pub bypass: HashSet<NodeId>,
modified crates/radicle-protocol/src/wire.rs
@@ -15,7 +15,7 @@ use std::string::FromUtf8Error;

use bytes::{Buf, BufMut};

-
use cyphernet::addr::tor;
+
use cypheraddr::tor;

use radicle::crypto::{PublicKey, Signature, Unverified};
use radicle::git;
@@ -257,7 +257,7 @@ impl Encode for Refs {
    }
}

-
impl Encode for cyphernet::addr::tor::OnionAddrV3 {
+
impl Encode for cypheraddr::tor::OnionAddrV3 {
    fn encode(&self, buf: &mut impl BufMut) {
        self.into_raw_bytes().encode(buf)
    }
modified crates/radicle-protocol/src/wire/message.rs
@@ -2,7 +2,7 @@ use std::{mem, net};

use bytes::Buf;
use bytes::BufMut;
-
use cyphernet::addr::{tor, HostName, NetAddr};
+
use cypheraddr::{tor, HostName, NetAddr};
use radicle::crypto::Signature;
use radicle::git::Oid;
use radicle::identity::RepoId;