Radish alpha
h
rad:z3gqcJUoA1n9HaHKufZs5FCSGazv5
Radicle Heartwood Protocol & Stack
Radicle
Git
radicle/config/node: Granular Default Values
Merged lorenz opened 8 months ago

Provide default values not only per struct, but for each individual struct member. This allows more granular configuration and fallback to defaults.

8 files changed +222 -127 77f63c76 9a7c2253
modified crates/radicle-cli/tests/util/environment.rs
@@ -36,11 +36,13 @@ pub(crate) mod config {
                    inbound: RateLimit {
                        fill_rate: 1.0,
                        capacity: usize::MAX,
-
                    },
+
                    }
+
                    .into(),
                    outbound: RateLimit {
                        fill_rate: 1.0,
                        capacity: usize::MAX,
-
                    },
+
                    }
+
                    .into(),
                },
                ..Limits::default()
            },
modified crates/radicle-node/src/main.rs
@@ -90,7 +90,7 @@ fn execute() -> anyhow::Result<()> {
    let config = options.config.unwrap_or_else(|| home.config());
    let mut config = profile::Config::load(&config)?;

-
    let level = options.log.unwrap_or(config.node.log);
+
    let level = options.log.unwrap_or_else(|| config.node.log.into());

    let logger = {
        let journal = {
@@ -135,7 +135,7 @@ fn execute() -> anyhow::Result<()> {
        config.node.listen.clone()
    };

-
    if let Err(e) = radicle::io::set_file_limit(config.node.limits.max_open_files) {
+
    if let Err(e) = radicle::io::set_file_limit::<usize>(config.node.limits.max_open_files.into()) {
        log::warn!(target: "node", "Unable to set process open file limit: {e}");
    }

modified crates/radicle-node/src/runtime.rs
@@ -245,7 +245,7 @@ impl Runtime {
            cobs_cache,
            db,
            worker::Config {
-
                capacity: config.workers,
+
                capacity: config.workers.into(),
                storage: storage.clone(),
                fetch,
                policy,
modified crates/radicle-node/src/tests.rs
@@ -308,8 +308,8 @@ fn test_inventory_pruning() {
        // All zero
        Test {
            limits: Limits {
-
                routing_max_size: 0,
-
                routing_max_age: LocalDuration::from_secs(0),
+
                routing_max_size: 0.into(),
+
                routing_max_age: LocalDuration::from_secs(0).into(),
                ..Limits::default()
            },
            peer_projects: vec![10; 5],
@@ -319,8 +319,8 @@ fn test_inventory_pruning() {
        // All entries are too young to expire.
        Test {
            limits: Limits {
-
                routing_max_size: 0,
-
                routing_max_age: LocalDuration::from_mins(7 * 24 * 60),
+
                routing_max_size: 0.into(),
+
                routing_max_age: LimitRoutingMaxAge::from(LocalDuration::from_mins(7 * 24 * 60)),
                ..Limits::default()
            },
            peer_projects: vec![10; 5],
@@ -330,8 +330,8 @@ fn test_inventory_pruning() {
        // All entries remain because the table is unconstrained.
        Test {
            limits: Limits {
-
                routing_max_size: 50,
-
                routing_max_age: LocalDuration::from_mins(0),
+
                routing_max_size: 50.into(),
+
                routing_max_age: LocalDuration::from_mins(0).into(),
                ..Limits::default()
            },
            peer_projects: vec![10; 5],
@@ -341,8 +341,8 @@ fn test_inventory_pruning() {
        // Some entries are pruned because the table is constrained.
        Test {
            limits: Limits {
-
                routing_max_size: 25,
-
                routing_max_age: LocalDuration::from_mins(7 * 24 * 60),
+
                routing_max_size: 25.into(),
+
                routing_max_age: LocalDuration::from_mins(7 * 24 * 60).into(),
                ..Limits::default()
            },
            peer_projects: vec![10; 5],
modified crates/radicle-node/src/tests/e2e.rs
@@ -679,7 +679,7 @@ fn test_concurrent_fetches() {
    let repos = scale.max(4);
    let limits = Limits {
        // Have one fetch be queued.
-
        fetch_concurrency: repos - 1,
+
        fetch_concurrency: (repos - 1).into(),
        ..Limits::default()
    };
    let mut bob_repos = HashSet::new();
modified crates/radicle-protocol/src/service.rs
@@ -27,7 +27,7 @@ use radicle::node;
use radicle::node::address;
use radicle::node::address::Store as _;
use radicle::node::address::{AddressBook, AddressType, KnownAddress};
-
use radicle::node::config::PeerConfig;
+
use radicle::node::config::{PeerConfig, RateLimit};
use radicle::node::device::Device;
use radicle::node::refs::Store as _;
use radicle::node::routing::Store as _;
@@ -845,7 +845,7 @@ where
            if let Err(err) = self
                .db
                .gossip_mut()
-
                .prune((now - self.config.limits.gossip_max_age).into())
+
                .prune((now - LocalDuration::from(self.config.limits.gossip_max_age)).into())
            {
                error!(target: "service", "Error pruning gossip entries: {err}");
            }
@@ -1302,7 +1302,7 @@ where
            return true;
        }
        // Check for inbound connection limit.
-
        if self.sessions.inbound().count() >= self.config.limits.connection.inbound {
+
        if self.sessions.inbound().count() >= self.config.limits.connection.inbound.into() {
            return false;
        }
        match self.db.addresses().is_ip_banned(ip) {
@@ -1315,13 +1315,9 @@ where
            Err(e) => error!(target: "service", "Error querying ban status for {ip}: {e}"),
        }
        let host: HostName = ip.into();
+
        let tokens = RateLimit::from(self.config.limits.rate.inbound.clone());

-
        if self.limiter.limit(
-
            host.clone(),
-
            None,
-
            &self.config.limits.rate.inbound,
-
            self.clock,
-
        ) {
+
        if self.limiter.limit(host.clone(), None, &tokens, self.clock) {
            trace!(target: "service", "Rate limiting inbound connection from {host}..");
            return false;
        }
@@ -1824,13 +1820,13 @@ where
        };
        peer.last_active = self.clock;

-
        let limit = match peer.link {
-
            Link::Outbound => &self.config.limits.rate.outbound,
-
            Link::Inbound => &self.config.limits.rate.inbound,
+
        let limit: RateLimit = match peer.link {
+
            Link::Outbound => self.config.limits.rate.outbound.clone().into(),
+
            Link::Inbound => self.config.limits.rate.inbound.clone().into(),
        };
        if self
            .limiter
-
            .limit(peer.addr.clone().into(), Some(remote), limit, self.clock)
+
            .limit(peer.addr.clone().into(), Some(remote), &limit, self.clock)
        {
            debug!(target: "service", "Rate limiting message from {remote} ({})", peer.addr);
            return Ok(());
@@ -2260,7 +2256,7 @@ where
        if self.sessions.contains_key(&nid) {
            return Err(ConnectError::SessionExists { nid });
        }
-
        if self.sessions.outbound().count() >= self.config.limits.connection.outbound {
+
        if self.sessions.outbound().count() >= self.config.limits.connection.outbound.into() {
            return Err(ConnectError::LimitReached { nid, addr });
        }
        let persistent = self.config.is_persistent(&nid);
@@ -2432,14 +2428,14 @@ where

    fn prune_routing_entries(&mut self, now: &LocalTime) -> Result<(), routing::Error> {
        let count = self.db.routing().len()?;
-
        if count <= self.config.limits.routing_max_size {
+
        if count <= self.config.limits.routing_max_size.into() {
            return Ok(());
        }

-
        let delta = count - self.config.limits.routing_max_size;
+
        let delta = count - usize::from(self.config.limits.routing_max_size);
        let nid = self.node_id();
        self.db.routing_mut().prune(
-
            (*now - self.config.limits.routing_max_age).into(),
+
            (*now - LocalDuration::from(self.config.limits.routing_max_age)).into(),
            Some(delta),
            &nid,
        )?;
modified crates/radicle-protocol/src/service/session.rs
@@ -226,7 +226,7 @@ impl Session {

    pub fn is_at_capacity(&self) -> bool {
        if let State::Connected { fetching, .. } = &self.state {
-
            if fetching.len() >= self.limits.fetch_concurrency {
+
            if fetching.len() >= self.limits.fetch_concurrency.into() {
                return true;
            }
        }
modified crates/radicle/src/node/config.rs
@@ -15,9 +15,6 @@ use crate::node::{Address, Alias, NodeId};
/// Peer-to-peer protocol version.
pub type ProtocolVersion = u8;

-
/// Default number of workers to spawn.
-
pub const DEFAULT_WORKERS: usize = 8;
-

/// Configured public seeds.
pub mod seeds {
    use std::{
@@ -123,56 +120,35 @@ impl Network {
}

/// Configuration parameters defining attributes of minima and maxima.
-
#[derive(Debug, Clone, Serialize, Deserialize)]
-
#[serde(rename_all = "camelCase")]
+
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
+
#[serde(default, rename_all = "camelCase")]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct Limits {
    /// Number of routing table entries before we start pruning.
-
    pub routing_max_size: usize,
+
    pub routing_max_size: LimitRoutingMaxSize,
+

    /// How long to keep a routing table entry before being pruned.
-
    #[serde(with = "crate::serde_ext::localtime::duration")]
-
    #[cfg_attr(
-
        feature = "schemars",
-
        schemars(with = "crate::schemars_ext::localtime::LocalDuration")
-
    )]
-
    pub routing_max_age: LocalDuration,
+
    pub routing_max_age: LimitRoutingMaxAge,
+

    /// How long to keep a gossip message entry before pruning it.
-
    #[serde(with = "crate::serde_ext::localtime::duration")]
-
    #[cfg_attr(
-
        feature = "schemars",
-
        schemars(with = "crate::schemars_ext::localtime::LocalDuration")
-
    )]
-
    pub gossip_max_age: LocalDuration,
+
    pub gossip_max_age: LimitGossipMaxAge,
+

    /// Maximum number of concurrent fetches per peer connection.
-
    pub fetch_concurrency: usize,
+
    pub fetch_concurrency: LimitFetchConcurrency,
+

    /// Maximum number of open files.
-
    pub max_open_files: usize,
+
    pub max_open_files: LimitMaxOpenFiles,
+

    /// Rate limitter settings.
-
    #[serde(default)]
    pub rate: RateLimits,
+

    /// Connection limits.
-
    #[serde(default)]
    pub connection: ConnectionLimits,
+

    /// Channel limits.
-
    #[serde(default)]
    pub fetch_pack_receive: FetchPackSizeLimit,
}

-
impl Default for Limits {
-
    fn default() -> Self {
-
        Self {
-
            routing_max_size: 1000,
-
            routing_max_age: LocalDuration::from_mins(7 * 24 * 60), // One week
-
            gossip_max_age: LocalDuration::from_mins(2 * 7 * 24 * 60), // Two weeks
-
            fetch_concurrency: 1,
-
            max_open_files: 4096,
-
            rate: RateLimits::default(),
-
            connection: ConnectionLimits::default(),
-
            fetch_pack_receive: FetchPackSizeLimit::default(),
-
        }
-
    }
-
}
-

/// Limiter for byte streams.
///
/// Default: 500MiB
@@ -264,27 +240,20 @@ impl Default for FetchPackSizeLimit {
}

/// Connection limits.
-
#[derive(Debug, Clone, Serialize, Deserialize)]
-
#[serde(rename_all = "camelCase")]
+
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
+
#[serde(default, rename_all = "camelCase")]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct ConnectionLimits {
    /// Max inbound connections.
-
    pub inbound: usize,
-
    /// Max outbound connections. Note that this can be higher than the *target* number.
-
    pub outbound: usize,
-
}
+
    pub inbound: LimitConnectionsInbound,

-
impl Default for ConnectionLimits {
-
    fn default() -> Self {
-
        Self {
-
            inbound: 128,
-
            outbound: 16,
-
        }
-
    }
+
    /// Max outbound connections. Note that this can be higher than the *target* number.
+
    pub outbound: LimitConnectionsOutbound,
}

/// Rate limts for a single connection.
-
#[derive(Debug, Clone, Serialize, Deserialize)]
+
#[derive(Debug, Clone, Serialize, Deserialize, Display)]
+
#[display("RateLimit(fill_rate={fill_rate}, capacity={capacity})")]
#[serde(rename_all = "camelCase")]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct RateLimit {
@@ -293,27 +262,13 @@ pub struct RateLimit {
}

/// Rate limits for inbound and outbound connections.
-
#[derive(Debug, Clone, Serialize, Deserialize)]
+
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct RateLimits {
-
    pub inbound: RateLimit,
-
    pub outbound: RateLimit,
-
}
+
    pub inbound: LimitRateInbound,

-
impl Default for RateLimits {
-
    fn default() -> Self {
-
        Self {
-
            inbound: RateLimit {
-
                fill_rate: 5.0,
-
                capacity: 1024,
-
            },
-
            outbound: RateLimit {
-
                fill_rate: 10.0,
-
                capacity: 2048,
-
            },
-
        }
-
    }
+
    pub outbound: LimitRateOutbound,
}

/// Full address used to connect to a remote node.
@@ -375,22 +330,17 @@ impl Deref for ConnectAddress {
}

/// Peer configuration.
-
#[derive(Debug, Clone, Serialize, Deserialize)]
+
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase", tag = "type")]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub enum PeerConfig {
    /// Static peer set. Connect to the configured peers and maintain the connections.
    Static,
    /// Dynamic peer set.
+
    #[default]
    Dynamic,
}

-
impl Default for PeerConfig {
-
    fn default() -> Self {
-
        Self::Dynamic
-
    }
-
}
-

/// Relay configuration.
#[derive(Debug, Copy, Clone, Default, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
@@ -492,13 +442,8 @@ pub struct Config {
    #[serde(default)]
    pub network: Network,
    /// Log level.
-
    #[serde(default = "defaults::log")]
-
    #[serde(with = "crate::serde_ext::string")]
-
    #[cfg_attr(
-
        feature = "schemars",
-
        schemars(with = "crate::schemars_ext::log::Level")
-
    )]
-
    pub log: log::Level,
+
    #[serde(default)]
+
    pub log: LogLevel,
    /// Whether or not our node should relay messages.
    #[serde(default, deserialize_with = "crate::serde_ext::ok_or_default")]
    pub relay: Relay,
@@ -506,8 +451,8 @@ pub struct Config {
    #[serde(default)]
    pub limits: Limits,
    /// Number of worker threads to spawn.
-
    #[serde(default = "defaults::workers")]
-
    pub workers: usize,
+
    #[serde(default)]
+
    pub workers: Workers,
    /// Default seeding policy.
    #[serde(default)]
    pub seeding_policy: DefaultSeedingPolicy,
@@ -536,8 +481,8 @@ impl Config {
            onion: None,
            relay: Relay::default(),
            limits: Limits::default(),
-
            workers: DEFAULT_WORKERS,
-
            log: defaults::log(),
+
            workers: Workers::default(),
+
            log: LogLevel::default(),
            seeding_policy: DefaultSeedingPolicy::default(),
            extra: json::Map::default(),
        }
@@ -574,15 +519,167 @@ impl Config {
    }
}

-
/// Defaults as functions, for serde.
-
mod defaults {
-
    /// Worker count.
-
    pub fn workers() -> usize {
-
        super::DEFAULT_WORKERS
+
#[derive(Clone, Copy, Debug, Display, Deserialize, Serialize, From)]
+
#[serde(transparent)]
+
#[display("{0}")]
+
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
+
pub struct LogLevel(
+
    #[serde(with = "crate::serde_ext::string")]
+
    #[cfg_attr(
+
        feature = "schemars",
+
        schemars(with = "crate::schemars_ext::log::Level")
+
    )]
+
    log::Level,
+
);
+

+
impl Default for LogLevel {
+
    fn default() -> Self {
+
        Self(log::Level::Info)
    }
+
}

-
    /// Log level.
-
    pub fn log() -> log::Level {
-
        log::Level::Info
+
impl From<LogLevel> for log::Level {
+
    fn from(value: LogLevel) -> Self {
+
        value.0
+
    }
+
}
+

+
#[derive(Clone, Copy, Debug, Deserialize, Serialize, Eq, PartialEq)]
+
#[serde(transparent)]
+
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
+
pub struct LimitRoutingMaxAge(
+
    #[serde(with = "crate::serde_ext::localtime::duration")]
+
    #[cfg_attr(
+
        feature = "schemars",
+
        schemars(with = "crate::schemars_ext::localtime::LocalDuration")
+
    )]
+
    localtime::LocalDuration,
+
);
+

+
impl Default for LimitRoutingMaxAge {
+
    fn default() -> Self {
+
        Self(localtime::LocalDuration::from_mins(7 * 24 * 60)) // One week
+
    }
+
}
+

+
impl From<LimitRoutingMaxAge> for LocalDuration {
+
    fn from(value: LimitRoutingMaxAge) -> Self {
+
        value.0
+
    }
+
}
+

+
impl From<LocalDuration> for LimitRoutingMaxAge {
+
    fn from(value: LocalDuration) -> Self {
+
        Self(value)
+
    }
+
}
+

+
#[derive(Clone, Copy, Debug, Deserialize, Serialize, Eq, PartialEq)]
+
#[serde(transparent)]
+
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
+
pub struct LimitGossipMaxAge(
+
    #[serde(with = "crate::serde_ext::localtime::duration")]
+
    #[cfg_attr(
+
        feature = "schemars",
+
        schemars(with = "crate::schemars_ext::localtime::LocalDuration")
+
    )]
+
    localtime::LocalDuration,
+
);
+

+
impl Default for LimitGossipMaxAge {
+
    fn default() -> Self {
+
        Self(localtime::LocalDuration::from_mins(2 * 7 * 24 * 60)) // Two weeks
+
    }
+
}
+

+
impl From<LimitGossipMaxAge> for LocalDuration {
+
    fn from(value: LimitGossipMaxAge) -> Self {
+
        value.0
+
    }
+
}
+

+
macro_rules! wrapper {
+
    ($name:ident, $type:ty, $default:expr $(, $derive:ty)*) => {
+
        #[derive(Clone, Debug, Deserialize, Display, Serialize, From $(, $derive)*)]
+
        #[display("{0}")]
+
        #[serde(transparent)]
+
        #[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
+
        pub struct $name($type);
+

+
        impl Default for $name {
+
            fn default() -> Self {
+
                Self($default)
+
            }
+
        }
+

+
        impl From<$name> for $type {
+
            fn from(value: $name) -> Self {
+
                value.0
+
            }
+
        }
+
    };
+
}
+
wrapper!(Workers, usize, 8, Copy);
+
wrapper!(LimitConnectionsInbound, usize, 128, Copy);
+
wrapper!(LimitConnectionsOutbound, usize, 16, Copy);
+
wrapper!(LimitRoutingMaxSize, usize, 1000, Copy);
+
wrapper!(LimitFetchConcurrency, usize, 1, Copy);
+
wrapper!(
+
    LimitRateInbound,
+
    RateLimit,
+
    RateLimit {
+
        fill_rate: 5.0,
+
        capacity: 1024,
+
    }
+
);
+
wrapper!(LimitMaxOpenFiles, usize, 4096, Copy);
+
wrapper!(
+
    LimitRateOutbound,
+
    RateLimit,
+
    RateLimit {
+
        fill_rate: 10.0,
+
        capacity: 2048,
+
    }
+
);
+

+
#[cfg(test)]
+
#[allow(clippy::unwrap_used)]
+
mod test {
+
    #[test]
+
    fn partial() {
+
        use super::Config;
+
        use serde_json::json;
+

+
        let config: Config = serde_json::from_value(json!({
+
            "alias": "example",
+
            "limits": {
+
                "connection": {
+
                    "inbound": 1337,
+
                },
+
            },
+
        }
+
        ))
+
        .unwrap();
+
        assert_eq!(config.limits.connection.inbound.0, 1337);
+
        assert_eq!(
+
            config.limits.connection.outbound.0,
+
            super::LimitConnectionsOutbound::default().0,
+
        );
+

+
        let config: Config = serde_json::from_value(json!({
+
            "alias": "example",
+
            "limits": {
+
                "connection": {
+
                    "outbound": 1337,
+
                },
+
            },
+
        }
+
        ))
+
        .unwrap();
+
        assert_eq!(
+
            config.limits.connection.inbound.0,
+
            super::LimitConnectionsInbound::default().0,
+
        );
+
        assert_eq!(config.limits.connection.outbound.0, 1337);
    }
}