Radish alpha
h
rad:z3gqcJUoA1n9HaHKufZs5FCSGazv5
Radicle Heartwood Protocol & Stack
Radicle
Git
Fix to `rad node debug`
Merged fintohaps opened 2 months ago
3 files changed +88 -12 e9245b63 f018b434
modified crates/radicle-node/src/runtime/handle.rs
@@ -356,10 +356,16 @@ impl radicle::node::Handle for Handle {
    fn debug(&self) -> Result<serde_json::Value, Self::Error> {
        let (sender, receiver) = chan::bounded(1);
        let query: Arc<QueryState> = Arc::new(move |state| {
+
            let fetching = debug::Fetching::new(state.fetching());
            let debug = serde_json::json!({
                "outboxSize": state.outbox().len(),
-
                "fetching": state.fetching(),
-
                "rateLimiter": state.limiter(),
+
                "fetching": fetching,
+
                "rateLimiter": state.limiter().buckets.iter().map(|(host, bucket)| {
+
                    json!({
+
                        "host": host.to_string(),
+
                        "bucket": bucket
+
                    })
+
                }).collect::<Vec<_>>(),
                "events": json!({
                    "subscribers": state.emitter().subscriptions(),
                    "pending": state.emitter().pending(),
@@ -379,3 +385,76 @@ impl radicle::node::Handle for Handle {
        Ok(debug)
    }
}
+

+
mod debug {
+
    //! Serialization formats for the output of [`Handle::debug`] output.
+

+
    use radicle_protocol::fetcher;
+
    use radicle_protocol::fetcher::FetcherState;
+
    use serde::Serialize;
+

+
    use super::{NodeId, RefsAt, RepoId};
+

+
    #[derive(Serialize)]
+
    #[serde(rename_all = "camelCase")]
+
    pub struct Fetching {
+
        active: Vec<ActiveFetch>,
+
        queued: Vec<QueuedFetch>,
+
    }
+

+
    impl Fetching {
+
        pub fn new(state: &FetcherState) -> Self {
+
            let active = state
+
                .active_fetches()
+
                .iter()
+
                .map(|(rid, fetch)| ActiveFetch::new(*rid, fetch.clone()))
+
                .collect();
+
            let queued = state
+
                .queued_fetches()
+
                .iter()
+
                .flat_map(|(node, queue)| {
+
                    queue
+
                        .iter()
+
                        .map(|fetch| QueuedFetch::new(*node, fetch.clone()))
+
                })
+
                .collect();
+
            Self { active, queued }
+
        }
+
    }
+

+
    #[derive(Serialize)]
+
    #[serde(rename_all = "camelCase")]
+
    pub struct ActiveFetch {
+
        rid: RepoId,
+
        from: NodeId,
+
        refs_at: Vec<RefsAt>,
+
    }
+

+
    impl ActiveFetch {
+
        pub fn new(rid: RepoId, fetch: fetcher::ActiveFetch) -> Self {
+
            Self {
+
                rid,
+
                from: fetch.from,
+
                refs_at: fetch.refs.into(),
+
            }
+
        }
+
    }
+

+
    #[derive(Serialize)]
+
    #[serde(rename_all = "camelCase")]
+
    pub struct QueuedFetch {
+
        nid: NodeId,
+
        rid: RepoId,
+
        refs_at: Vec<RefsAt>,
+
    }
+

+
    impl QueuedFetch {
+
        pub fn new(node: NodeId, fetch: fetcher::QueuedFetch) -> Self {
+
            Self {
+
                nid: node,
+
                rid: fetch.rid,
+
                refs_at: fetch.refs.into(),
+
            }
+
        }
+
    }
+
}
modified crates/radicle-protocol/src/fetcher/state.rs
@@ -10,7 +10,6 @@ pub mod event;

pub use command::Command;
pub use event::Event;
-
use serde::Serialize;

use std::collections::{BTreeMap, VecDeque};
use std::num::NonZeroUsize;
@@ -43,7 +42,7 @@ pub const MAX_CONCURRENCY: NonZeroUsize = NonZeroUsize::MIN;
/// of fetches can happen with it concurrently. This does not guarantee that the
/// node will actually allow this node to fetch from it – since it will maintain
/// its own capacity for connections and load.
-
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
+
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct FetcherState {
    /// The active fetches that are occurring, ensuring only one fetch per repository.
    active: BTreeMap<RepoId, ActiveFetch>,
@@ -236,7 +235,7 @@ impl FetcherState {
}

/// Configuration for the [`FetcherState`].
-
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize)]
+
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct Config {
    /// Maximum number of concurrent fetches per peer connection.
    maximum_concurrency: NonZeroUsize,
@@ -272,7 +271,7 @@ impl Default for Config {
}

/// An active fetch represents a repository being fetched by a particular node.
-
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
+
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ActiveFetch {
    pub from: NodeId,
    pub refs: RefsToFetch,
@@ -291,7 +290,7 @@ impl ActiveFetch {
}

/// A fetch that is waiting to be processed, in the fetch queue.
-
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize)]
+
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct QueuedFetch {
    /// The repository that will be fetched.
    pub rid: RepoId,
@@ -305,15 +304,14 @@ pub struct QueuedFetch {
///
/// It ensures that the queue contains unique items for fetching, and does not
/// exceed the provided maximum capacity.
-
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
+
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Queue {
    queue: VecDeque<QueuedFetch>,
    max_queue_size: MaxQueueSize,
}

/// The maximum number of fetches that can be queued for a single node.
-
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize)]
-
#[serde(transparent)]
+
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct MaxQueueSize(usize);

impl MaxQueueSize {
modified crates/radicle-protocol/src/service/limiter.rs
@@ -2,7 +2,6 @@ use std::collections::{HashMap, HashSet};

use localtime::LocalTime;
use radicle::node::{address, config, HostName, NodeId};
-
use serde::Serialize;

/// Peer rate limiter.
///
@@ -10,7 +9,7 @@ use serde::Serialize;
/// and every request from that address consumes one token. Tokens refill at a predefined
/// rate. This mechanism allows for consistent request rates with potential bursts up to the
/// bucket's capacity.
-
#[derive(Debug, Default, Serialize)]
+
#[derive(Debug, Default)]
pub struct RateLimiter {
    pub buckets: HashMap<HostName, TokenBucket>,
    pub bypass: HashSet<NodeId>,