Radish alpha
h
Radicle Heartwood Protocol & Stack
Radicle
Git (anonymous pull)
Log in to clone via SSH
simulaton: Cargo test runner
Adrian Duke committed 25 days ago
commit b0ebf48b29eecc31739f71504590fdf4878d27bf
parent 3aa436595f06d263a3ddd774532f90f7359a181e
7 files changed +580 -0
added simulation/instances/basic_cross_version_network.cue
@@ -0,0 +1,90 @@
+
@if(!debug)
+

+
package main
+

+
values: {
+
	topology: {
+
		"bootstrap-v1-6-1": {
+
			role:          "bootstrap"
+
			version:       "1.6.1"
+
			replicas:      1
+
			nodeIdSeed:    "bootstrap-0"
+
			radicleConfig: #BaseBootstrapSeedConfig
+
		}
+

+
		"bootstrap-v1-7-0": {
+
			role:          "bootstrap"
+
			version:       "1.7.0"
+
			replicas:      1
+
			nodeIdSeed:    "bootstrap-1"
+
			radicleConfig: #BaseBootstrapSeedConfig
+
		}
+

+
		"peer-v1-5-0": {
+
			role:          "peer"
+
			version:       "1.5.0"
+
			replicas:      1
+
			radicleConfig: #BasePeerConfig & {
+
				preferredSeeds: [
+
					(#SeedAddress & {nid: #BootstrapNIDs["bootstrap-0"], name: "bootstrap-v1-6-1"}).out,
+
					(#SeedAddress & {nid: #BootstrapNIDs["bootstrap-1"], name: "bootstrap-v1-7-0"}).out,
+
				]
+
			}
+
		}
+
		"peer-v1-6-0": {
+
			role:          "peer"
+
			version:       "1.6.0"
+
			replicas:      1
+
			radicleConfig: #BasePeerConfig & {
+
				preferredSeeds: [
+
					(#SeedAddress & {nid: #BootstrapNIDs["bootstrap-0"], name: "bootstrap-v1-6-1"}).out,
+
					(#SeedAddress & {nid: #BootstrapNIDs["bootstrap-1"], name: "bootstrap-v1-7-0"}).out,
+
				]
+
			}
+
		}
+
		"peer-v1-6-1": {
+
			role:          "peer"
+
			version:       "1.6.1"
+
			replicas:      1
+
			radicleConfig: #BasePeerConfig & {
+
				preferredSeeds: [
+
					(#SeedAddress & {nid: #BootstrapNIDs["bootstrap-0"], name: "bootstrap-v1-6-1"}).out,
+
					(#SeedAddress & {nid: #BootstrapNIDs["bootstrap-1"], name: "bootstrap-v1-7-0"}).out,
+
				]
+
			}
+
		}
+
		"peer-v1-7-0": {
+
			role:          "peer"
+
			version:       "1.7.0"
+
			replicas:      2
+
			radicleConfig: #BasePeerConfig & {
+
				preferredSeeds: [
+
					(#SeedAddress & {nid: #BootstrapNIDs["bootstrap-0"], name: "bootstrap-v1-6-1"}).out,
+
					(#SeedAddress & {nid: #BootstrapNIDs["bootstrap-1"], name: "bootstrap-v1-7-0"}).out,
+
				]
+
			}
+
		}
+
		"peer-v1-7-1": {
+
			role:          "peer"
+
			version:       "1.7.1"
+
			replicas:      2
+
			radicleConfig: #BasePeerConfig & {
+
				preferredSeeds: [
+
					(#SeedAddress & {nid: #BootstrapNIDs["bootstrap-0"], name: "bootstrap-v1-6-1"}).out,
+
					(#SeedAddress & {nid: #BootstrapNIDs["bootstrap-1"], name: "bootstrap-v1-7-0"}).out,
+
				]
+
			}
+
		}
+
		"peer-v1-8-0": {
+
			role:          "peer"
+
			version:       "1.8.0"
+
			replicas:      1
+
			radicleConfig: #BasePeerConfig & {
+
				preferredSeeds: [
+
					(#SeedAddress & {nid: #BootstrapNIDs["bootstrap-0"], name: "bootstrap-v1-6-1"}).out,
+
					(#SeedAddress & {nid: #BootstrapNIDs["bootstrap-1"], name: "bootstrap-v1-7-0"}).out,
+
				]
+
			}
+
		}
+
	}
+
}
added simulation/radicle-simulation/src/lib.rs
@@ -0,0 +1,36 @@
+
//! Radicle Simulation Testing Framework
+
//!
+
//! This crate provides a high-level, ergonomic framework for writing integration
+
//! and simulation tests for Radicle networks. It abstracts away the complexities
+
//! of Kubernetes pod execution, identity management, and Radicle CLI interactions.
+

+
pub mod constants;
+
pub mod network;
+
pub mod node;
+

+
/// Sets up the network module and `require_network` helper for a given CUE instance.
+
///
+
/// This macro dynamically includes the auto-generated Rust bindings for the specified
+
/// CUE network topology file (generated by `build.rs`). It also generates a `require_network`
+
/// function that tests can call to ensure the network is provisioned and ready before executing.
+
#[macro_export]
+
macro_rules! setup_network {
+
    ($name:expr) => {
+
        pub mod network {
+
            // Include the auto-generated bindings
+
            include!(concat!(env!("OUT_DIR"), "/", $name, "_bindings.rs"));
+
        }
+

+
        /// Ensures the simulated network is provisioned and ready for testing.
+
        ///
+
        /// This function initializes the test logger and applies the CUE network topology.
+
        /// It returns a reference-counted `NetworkGuard`. The network will remain active
+
        /// as long as at least one test holds a guard, and will be automatically torn down
+
        /// when the final guard is dropped (unless `PRESERVE_NETWORK=1` is set).
+
        pub fn require_network() -> std::sync::Arc<$crate::network::NetworkGuard> {
+
            // TODO(Ade): Doesn't feel right here, but is ergonomic
+
            let _ = env_logger::builder().is_test(true).try_init();
+
            $crate::network::apply_network(concat!("../instances/", $name, ".cue"))
+
        }
+
    };
+
}
added simulation/radicle-simulation/src/network.rs
@@ -0,0 +1,136 @@
+
//! Network lifecycle management for the Radicle simulation environment.
+

+
use crate::constants::*;
+
use crate::node::Node;
+
use std::io::Write;
+
use std::process::{Command, Stdio};
+
use std::str;
+
use std::sync::{Arc, Mutex, Weak};
+

+
/// A guard that ensures the network is torn down when tests finish.
+
pub struct NetworkGuard {
+
    cue_path: String,
+
}
+

+
impl Drop for NetworkGuard {
+
    fn drop(&mut self) {
+
        if let Ok(val) = std::env::var("PRESERVE_NETWORK") {
+
            if val == "1" || val.eq_ignore_ascii_case("true") {
+
                println!(
+
                    "⏭️ PRESERVE_NETWORK is set. Skipping cleanup for {}.",
+
                    self.cue_path
+
                );
+
                return;
+
            }
+
        }
+

+
        println!("🔄 Tearing down network topology from {}...", self.cue_path);
+
        let status = Command::new("timoni")
+
            .args(["delete", TIMONI_INSTANCE_NAME])
+
            .status();
+

+
        match status {
+
            Ok(s) if s.success() => println!("✅ Network torn down successfully."),
+
            _ => eprintln!("⚠️ Warning: Failed to cleanly tear down network."),
+
        }
+
    }
+
}
+

+
static NETWORK_GUARD: Mutex<Weak<NetworkGuard>> = Mutex::new(Weak::new());
+

+
/// Applies the specified CUE network topology to the Kubernetes cluster.
+
///
+
/// This function evaluates the CUE files, pipes the resulting JSON to Timoni,
+
/// and waits for all pods and Radicle nodes to become fully responsive.
+
pub fn apply_network(cue_path: &str) -> Arc<NetworkGuard> {
+
    let mut weak_guard = NETWORK_GUARD.lock().unwrap();
+

+
    // If the network is already running, just return a clone of the Arc
+
    if let Some(arc) = weak_guard.upgrade() {
+
        return arc;
+
    }
+

+
    println!("🔄 Applying network topology from {}...", cue_path);
+

+
    // Merge and evaluate CUE files into a single JSON stream
+
    let cue_export = Command::new("cue")
+
        .args(["export", SCHEMA_CUE_PATH, cue_path, "--out", "json"])
+
        .output()
+
        .expect("Failed to execute cue export");
+

+
    if !cue_export.status.success() {
+
        panic!(
+
            "CUE export failed:\n{}",
+
            String::from_utf8_lossy(&cue_export.stderr)
+
        );
+
    }
+

+
    // Pipe the evaluated JSON directly into Timoni
+
    let mut timoni = Command::new("timoni")
+
        .args([
+
            "apply",
+
            TIMONI_INSTANCE_NAME,
+
            TIMONI_MODULE_PATH,
+
            "--values",
+
            "-", // The '-' tells Timoni to read values from stdin
+
        ])
+
        .stdin(Stdio::piped())
+
        .spawn()
+
        .expect("Failed to spawn timoni");
+

+
    // Write the JSON to Timoni's stdin
+
    if let Some(mut stdin) = timoni.stdin.take() {
+
        stdin
+
            .write_all(&cue_export.stdout)
+
            .expect("Failed to write to timoni stdin");
+
    }
+

+
    let status = timoni.wait().expect("Failed to wait on timoni");
+
    assert!(status.success(), "Timoni apply failed for {}", cue_path);
+

+
    // Wait for pods to be Ready
+
    println!("🔄 Waiting for pods to be ready...");
+
    let wait_status = Command::new("kubectl")
+
        .args([
+
            "wait",
+
            "--for=condition=Ready",
+
            "pod",
+
            "-l",
+
            "app=radicle-node",
+
            "--timeout=300s",
+
        ])
+
        .status()
+
        .expect("Failed to wait for pods");
+
    assert!(wait_status.success(), "Pods did not become ready in time");
+

+
    // Wait for Radicle nodes to be fully responsive
+
    println!("🔄 Waiting for Radicle nodes to be fully responsive...");
+
    let output = Command::new("kubectl")
+
        .args([
+
            "get",
+
            "pods",
+
            "-l",
+
            "app=radicle-node",
+
            "-o",
+
            "jsonpath={.items[*].metadata.name}",
+
        ])
+
        .output()
+
        .expect("Failed to execute kubectl get pods");
+

+
    let stdout = str::from_utf8(&output.stdout).unwrap_or("");
+
    let pods: Vec<&str> = stdout.split_whitespace().collect();
+

+
    for pod_name in pods {
+
        let node = Node::new(pod_name);
+
        node.wait_until_responsive()
+
            .expect("Node did not become responsive");
+
    }
+
    println!("✅ Network {} is ready!", cue_path);
+

+
    let arc = Arc::new(NetworkGuard {
+
        cue_path: cue_path.to_string(),
+
    });
+
    *weak_guard = Arc::downgrade(&arc);
+

+
    arc
+
}
added simulation/radicle-simulation/src/node.rs
@@ -0,0 +1,275 @@
+
//! Node and Repository abstractions for the Radicle simulation environment.
+
//!
+
//! This module provides the core types used to interact with Radicle nodes
+
//! running inside the Kubernetes cluster. It includes executors for running
+
//! commands, persona builders for identity management, and high-level wrappers
+
//! for Radicle CLI operations (issues, patches, syncing).
+

+
use std::process::Command;
+
use std::str;
+
use std::thread;
+
use std::time::Duration;
+
use uuid::Uuid;
+

+
/// Escapes single quotes for safe interpolation inside a shell single-quoted string.
+
fn escape_sh(s: &str) -> String {
+
    s.replace('\'', "'\\''")
+
}
+

+
/// Trait defining how commands are executed within a Radicle node's environment.
+
pub trait NodeExecutor {
+
    /// Executes a command on the specified node and returns the combined stdout/stderr.
+
    fn exec(&self, node_name: &str, cmd: &[&str]) -> Result<String, String>;
+
}
+

+
/// Default executor implementation that uses the `kubectl exec` CLI command.
+
///
+
/// This executor shells out to the local `kubectl` binary to run commands inside
+
/// the `node` container of the target Kubernetes pod.
+
#[derive(Clone, Debug)]
+
pub struct KubectlExecutor;
+

+
impl NodeExecutor for KubectlExecutor {
+
    fn exec(&self, node_name: &str, cmd: &[&str]) -> Result<String, String> {
+
        let mut args = vec!["exec", "-i", node_name, "-c", "node", "--"];
+
        args.extend_from_slice(cmd);
+

+
        let output = Command::new("kubectl")
+
            .args(&args)
+
            .output()
+
            .map_err(|e| format!("Failed to execute kubectl: {}", e))?;
+

+
        let stdout = str::from_utf8(&output.stdout)
+
            .unwrap_or("")
+
            .trim()
+
            .to_string();
+
        let stderr = str::from_utf8(&output.stderr)
+
            .unwrap_or("")
+
            .trim()
+
            .to_string();
+

+
        if output.status.success() {
+
            // Git commands often write success messages to stderr, so we combine them
+
            let mut combined = stdout;
+
            if !stderr.is_empty() {
+
                combined.push('\n');
+
                combined.push_str(&stderr);
+
            }
+
            Ok(combined.trim().to_string())
+
        } else {
+
            let mut combined = stderr;
+
            if !stdout.is_empty() {
+
                combined.push('\n');
+
                combined.push_str(&stdout);
+
            }
+
            Err(format!(
+
                "Command failed: {}\nStderr: {}",
+
                args.join(" "),
+
                combined.trim()
+
            ))
+
        }
+
    }
+
}
+

+
/// Represents a Radicle node running in the simulated network.
+
///
+
/// This struct provides methods to interact with the node's CLI, manage its Git
+
/// identity (persona), and initialize or clone repositories.
+
#[derive(Clone, Debug)]
+
pub struct Node<E = KubectlExecutor> {
+
    /// Kubernetes pod name of the node.
+
    pub name: String,
+
    /// The executor used to run commands on this node.
+
    pub executor: E,
+
    /// Optional Git identity (Name, Email) assigned to this node.
+
    pub persona: Option<(String, String)>,
+
}
+

+
impl Node<KubectlExecutor> {
+
    /// Creates a new `Node` instance using the default `KubectlExecutor`.
+
    pub fn new(name: impl Into<String>) -> Self {
+
        Self {
+
            name: name.into(),
+
            executor: KubectlExecutor,
+
            persona: None,
+
        }
+
    }
+
}
+

+
impl<E: NodeExecutor + Clone> Node<E> {
+
    /// Internal helper to set the Git identity on the node and store it in the struct.
+
    pub fn with_persona(mut self, name: &str, email: &str) -> Result<Self, String> {
+
        self.setup_identity(name, email)?;
+
        self.persona = Some((name.to_string(), email.to_string()));
+
        Ok(self)
+
    }
+

+
    /// Configures the node with the standard "Alice" test persona.
+
    pub fn as_alice(self) -> Result<Self, String> {
+
        self.with_persona("Alice", "alice@radicle.local")
+
    }
+

+
    /// Executes a command inside the node container via the configured executor.
+
    ///
+
    /// This method automatically logs the command being executed (at the `INFO` level)
+
    /// and its output or failure (at the `DEBUG` or `ERROR` levels).
+
    pub fn exec(&self, cmd: &[&str]) -> Result<String, String> {
+
        let identity = if let Some((name, _)) = &self.persona {
+
            format!("{}@{}", name, self.name)
+
        } else {
+
            self.name.clone()
+
        };
+

+
        log::info!("[{}] $ {}", identity, cmd.join(" "));
+

+
        let result = self.executor.exec(&self.name, cmd);
+

+
        match &result {
+
            Ok(output) if !output.trim().is_empty() => {
+
                log::debug!("[{}] Output:\n{}", identity, output.trim());
+
            }
+
            Err(err) => {
+
                log::error!("[{}] Failed:\n{}", identity, err);
+
            }
+
            _ => {}
+
        }
+

+
        result
+
    }
+

+
    /// Convenience wrapper for executing raw shell scripts inside the node.
+
    pub fn exec_sh(&self, script: &str) -> Result<String, String> {
+
        self.exec(&["sh", "-c", script])
+
    }
+

+
    /// Configures the global Git user name and email on the node.
+
    ///
+
    /// Uses a retry loop to prevent parallel tests from failing due to `gitconfig` file locks.
+
    pub fn setup_identity(&self, name: &str, email: &str) -> Result<(), String> {
+
        let name_esc = escape_sh(name);
+
        let email_esc = escape_sh(email);
+

+
        let mut retries = 10;
+
        while retries > 0 {
+
            if self
+
                .exec_sh(&format!("git config --global user.name '{name_esc}'"))
+
                .is_ok()
+
            {
+
                break;
+
            }
+
            retries -= 1;
+
            thread::sleep(Duration::from_millis(100));
+
        }
+
        if retries == 0 {
+
            return Err("Failed to set git config user.name".to_string());
+
        }
+

+
        let mut retries = 10;
+
        while retries > 0 {
+
            if self
+
                .exec_sh(&format!("git config --global user.email '{email_esc}'"))
+
                .is_ok()
+
            {
+
                break;
+
            }
+
            retries -= 1;
+
            thread::sleep(Duration::from_millis(100));
+
        }
+
        if retries == 0 {
+
            return Err("Failed to set git config user.email".to_string());
+
        }
+

+
        Ok(())
+
    }
+

+
    /// Initializes a new Radicle repository on this node.
+
    ///
+
    /// This method automatically appends a UUID to the repository name to ensure
+
    /// parallel tests do not collide. It initializes a Git repository, creates the
+
    /// specified number of commits, and initializes it as a Radicle project.
+
    pub fn init_test_repo(
+
        &self,
+
        base_name: &str,
+
        desc: &str,
+
        commits: u32,
+
    ) -> Result<Repository<E>, String> {
+
        let (author, email) = self
+
            .persona
+
            .as_ref()
+
            .ok_or("Persona not set! Call .as_alice() (or similar) before creating a repo.")?;
+

+
        let uuid = Uuid::new_v4()
+
            .to_string()
+
            .chars()
+
            .take(6)
+
            .collect::<String>();
+
        let repo_name = format!("{}-{}-{}", base_name, author, uuid);
+

+
        let author_esc = escape_sh(author);
+
        let email_esc = escape_sh(email);
+
        let desc_esc = escape_sh(desc);
+

+
        let script = format!(
+
            "mkdir -p {repo_name} && \
+
            cd {repo_name} && \
+
            git init && \
+
            git config user.name '{author_esc}' && \
+
            git config user.email '{email_esc}' && \
+
            echo 'commit 1' > file_1.txt && \
+
            git add file_1.txt && \
+
            git commit -m 'Commit 1' && \
+
            rad init --name {repo_name} --description '{desc_esc}' --public --no-confirm"
+
        );
+
        self.exec_sh(&script)?;
+

+
        if commits > 1 {
+
            for i in 2..=commits {
+
                let commit_script = format!(
+
                    "cd {repo_name} && \
+
                    echo 'commit {i}' > file_{i}.txt && \
+
                    git add file_{i}.txt && \
+
                    git commit -m 'Commit {i}' && \
+
                    git push rad master"
+
                );
+
                self.exec_sh(&commit_script)?;
+
            }
+
        }
+

+
        let rid = self.exec_sh(&format!("cd {} && rad . 2>/dev/null", repo_name))?;
+

+
        Ok(Repository {
+
            node: self.clone(),
+
            name: repo_name,
+
            rid,
+
        })
+
    }
+

+
    /// Waits until the Radicle node API is fully responsive.
+
    pub fn wait_until_responsive(&self) -> Result<(), String> {
+
        let mut retries = 30;
+
        while retries > 0 {
+
            if self.exec(&["rad", "node", "status"]).is_ok() {
+
                return Ok(());
+
            }
+
            retries -= 1;
+
            thread::sleep(Duration::from_secs(1));
+
        }
+
        Err(format!("Node {} did not become responsive", self.name))
+
    }
+
}
+

+
/// Represents a Radicle repository residing on a specific node.
+
///
+
/// This struct provides ergonomic, high-level methods for interacting with
+
/// the repository, such as syncing, managing issues, and handling patches.
+
/// All commands executed through this struct are automatically run within
+
/// the repository's directory on the node.
+
#[derive(Clone, Debug)]
+
pub struct Repository<E = KubectlExecutor> {
+
    /// The node where this repository resides.
+
    pub node: Node<E>,
+
    /// The local directory name of the repository.
+
    pub name: String,
+
    /// The Radicle ID (RID) of the repository.
+
    pub rid: String,
+
}
added simulation/radicle-simulation/tests/cross_version_suite/mod.rs
@@ -0,0 +1 @@
+
pub mod repository_creation;
added simulation/radicle-simulation/tests/cross_version_suite/repository_creation.rs
@@ -0,0 +1,35 @@
+
use crate::network;
+
use crate::require_network;
+
use radicle_simulation::node::Node;
+
use rstest::rstest;
+

+
// TODO(Ade): The `negX` numbering is horrible... need a better naming convention.
+
#[rstest]
+
#[case::v_neg4_single_commit(network::peer_relative(-4, 0), 1)]
+
#[case::v_neg4_multiple_commits(network::peer_relative(-4, 0), 3)]
+
#[case::v_neg3_single_commit(network::peer_relative(-3, 0), 1)]
+
#[case::v_neg3_multiple_commits(network::peer_relative(-3, 0), 3)]
+
#[case::v_neg2_single_commit(network::peer_relative(-2, 0), 1)]
+
#[case::v_neg2_multiple_commits(network::peer_relative(-2, 0), 3)]
+
#[case::v_neg1_single_commit(network::peer_relative(-1, 0), 1)]
+
#[case::v_neg1_multiple_commits(network::peer_relative(-1, 0), 3)]
+
#[case::v_current_single_commit(network::peer_relative(0, 0), 1)]
+
#[case::v_current_multiple_commits(network::peer_relative(0, 0), 3)]
+
fn initializes_valid_repositories_with_single_and_multiple_commits(
+
    #[case] mut node: Node,
+
    #[case] commits: u32,
+
) -> Result<(), String> {
+
    let _guard = require_network();
+

+
    node = node.as_alice()?;
+

+
    let repo = node.init_test_repo("Repo", "Test Repo", commits)?;
+

+
    assert!(
+
        repo.rid.starts_with("rad:"),
+
        "Expected valid RID, got: {}",
+
        repo.rid
+
    );
+

+
    Ok(())
+
}
added simulation/radicle-simulation/tests/cross_version_suite_main.rs
@@ -0,0 +1,7 @@
+
use radicle_simulation::setup_network;
+

+
// Sets up the network once for the whole suite.
+
// It generates `crate::network` and `crate::require_network`.
+
setup_network!("basic_cross_version_network");
+

+
mod cross_version_suite;