Radish alpha
h
Radicle Heartwood Protocol & Stack
Radicle
Git (anonymous pull)
Log in to clone via SSH
simulation: Add justfile and network.cue topology definition
Adrian Duke committed 29 days ago
commit 7c98f0ac4875e89de6e97c46db3280e388ab9ba7
parent 3d80aa4c59a6c86f063132217a61fb80b295aed3
3 files changed +216 -0
added simulation/.gitignore
@@ -0,0 +1,2 @@
+
controlplane.yaml
+
worker.yaml
added simulation/instances/network.cue
@@ -0,0 +1,80 @@
+
package main
+

+
// Pre-calculated NIDs.
+
#BootstrapNIDs: {
+
	"bootstrap-0": "z6MkhJ3cwzpAoNjFnJXWETSPHcDyw2HuBVEhgkyTfbjQHY1B"
+
	"bootstrap-1": "z6MkjcaeSHhQVJU1UeXpnHHZ6mp67zDfQYNMDotHGxbrk7Nj"
+
	"bootstrap-2": "z6MkjNGhuJvdp2noidRMLqco4jFnNNSWzCxSZH5nJV1pGrwQ"
+
	"bootstrap-3": "z6MkpEsXUMSnmyfwdEVkAKijTxGy9WKmNoHWpoxxLM6bbz9M"
+
}
+

+
// Shared configs
+
#SeedAddress: {
+
	nid:   string
+
	name:  string
+
	role:  string | *"bootstrap"
+
	index: int | *0
+
	out:   "\(nid)@\(name)-\(index).\(role).default.svc.cluster.local:8776"
+
}
+

+
#BaseBootstrapSeedConfig: {
+
	node: {
+
		listen: ["0.0.0.0:8776"]
+
		seedingPolicy: {
+
			default: "allow"
+
			scope:   "all"
+
		}
+
		...
+
	}
+
	...
+
}
+

+
#BasePeerConfig: {
+
	node: {
+
		listen: []
+
		peers: type: "dynamic"
+
		connect: []
+
		externalAddresses: []
+
		log:   "INFO"
+
		relay: "auto"
+
		limits: {
+
			routingMaxSize:   1000
+
			routingMaxAge:    604800
+
			gossipMaxAge:     1209600
+
			fetchConcurrency: 1
+
			maxOpenFiles:     4096
+
			rate: {
+
				inbound: {fillRate: 5.0, capacity: 1024}
+
				outbound: {fillRate: 10.0, capacity: 2048}
+
			}
+
			connection: {inbound: 128, outbound: 16}
+
			fetchPackReceive: "500.0 MiB"
+
		}
+
		seedingPolicy: default: "block"
+
		...
+
	}
+
	...
+
}
+

+
values: {
+
	topology: {
+
		// Instances
+
		"bootstrap-v1-8-0": {
+
			role:          "bootstrap"
+
			version:       "1.8.0"
+
			replicas:      1
+
			nodeIdSeed:    "bootstrap-0"
+
			radicleConfig: #BaseBootstrapSeedConfig
+
		}
+
		"peer-v1-8-0": {
+
			role:          "peer"
+
			version:       "1.8.0"
+
			replicas:      1
+
			radicleConfig: #BasePeerConfig & {
+
				preferredSeeds: [
+
					(#SeedAddress & {nid: #BootstrapNIDs["bootstrap-0"], name: "bootstrap-v1-8-0"}).out,
+
				]
+
			}
+
		}
+
	}
+
}
added simulation/justfile
@@ -0,0 +1,134 @@
+
provisioner := env_var_or_default("PROVISIONER", "qemu")
+
cluster_name := "radicle-" + provisioner
+
clusters_dir := env_var("HOME") + "/.talos/clusters"
+
radicle_node_module := "./modules/radicle-node"
+
kubectl_context := `kubectl config current-context 2>/dev/null || echo "none"`
+

+
SUCCESS := "✅ " + GREEN + BOLD
+
CHECK := "🔄 " + BOLD
+
WARN := "⚠️ " + YELLOW + BOLD
+
ERROR := "❌ " + RED + BOLD
+
HINT := "💡 " + BOLD
+

+
default:
+
    @just --list
+

+
# Setup and start the complete simulation environment
+
[group('start')]
+
[group('setup')]
+
start: setup start-network
+
    @echo ""
+
    @echo "{{SUCCESS}}Simulation started!{{NORMAL}}"
+
    @echo ""
+

+
# Setup cluster and dependencies
+
[group('setup')]
+
setup: configure-cluster
+
    @echo "{{SUCCESS}}Setup complete{{NORMAL}}"
+

+
# Create the Talos cluster if it doesn't exist
+
[private]
+
create-cluster: (verify-tool "talosctl")
+
    #!/usr/bin/env bash
+
    set -e
+
    if [ ! -d "{{clusters_dir}}/{{cluster_name}}" ]; then
+
        echo "{{CHECK}}Creating Talos cluster '{{cluster_name}}' using {{provisioner}}...{{NORMAL}}"
+
        mkdir -p "{{clusters_dir}}"
+
        if [ "{{provisioner}}" = "qemu" ]; then
+
            sudo --preserve-env=HOME talosctl cluster create --name={{cluster_name}} {{provisioner}} --config-patch-controlplanes '{"cluster": {"allowSchedulingOnControlPlanes": true}}'
+
        else
+
            talosctl cluster create --name={{cluster_name}} {{provisioner}} --config-patch-controlplanes '{"cluster": {"allowSchedulingOnControlPlanes": true}}'
+
        fi
+
    else
+
        echo "{{SUCCESS}}Cluster '{{cluster_name}}' already exists.{{NORMAL}}"
+
    fi
+

+
# Configure the Kubernetes cluster
+
[private]
+
configure-cluster: (verify-tool "kubectl") create-cluster
+
    @echo "{{CHECK}}Configuring cluster...{{NORMAL}}"
+
    # Add local-path storage system
+
    @kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/master/deploy/local-path-storage.yaml
+
    # Relax security on namespaces
+
    @kubectl label namespace local-path-storage pod-security.kubernetes.io/enforce=privileged pod-security.kubernetes.io/warn=privileged pod-security.kubernetes.io/audit=privileged --overwrite
+
    @kubectl label namespace default pod-security.kubernetes.io/enforce=privileged pod-security.kubernetes.io/warn=privileged pod-security.kubernetes.io/audit=privileged --overwrite
+
    # Set default storage class
+
    @kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
+

+
# Start the simulation network only
+
[group('start')]
+
start-network: (verify-tool "timoni")
+
    @echo "{{CHECK}}Starting simulation network...{{NORMAL}}"
+
    @timoni apply radicle-network {{radicle_node_module}} -f instances/network.cue
+
    @just show-cluster
+

+
# Show cluster status
+
[group('inspect')]
+
show-cluster: (verify-tool "kubectl") (verify-tool "talosctl")
+
    @echo "Cluster: {{cluster_name}}"
+
    @echo "Context: {{kubectl_context}}"
+
    @talosctl cluster show --name {{cluster_name}} --provisioner {{provisioner}} || true
+
    @kubectl get pods -o wide
+

+
# Delete simulation pods and resources
+
[group('delete')]
+
delete: delete-pods delete-pvc
+
    @echo "{{SUCCESS}}Simulation cleaned up{{NORMAL}}"
+

+
# Delete pods only
+
[group('delete')]
+
delete-pods: (verify-tool "kubectl")
+
    @echo "{{CHECK}}Deleting pods...{{NORMAL}}"
+
    @kubectl delete pods -l app=radicle-node --wait=false
+

+
# Delete storage volumes
+
[group('delete')]
+
delete-pvc: (verify-tool "kubectl")
+
    @echo "{{CHECK}}Deleting storage volumes...{{NORMAL}}"
+
    @kubectl delete pvc -l app=radicle-node --wait=false
+

+
# Destroy the Talos cluster and clean up kubeconfig
+
[group('delete')]
+
destroy: (verify-tool "kubectl") (verify-tool "talosctl") show-cluster
+
    #!/usr/bin/env bash
+
    set -e
+
    echo ""
+
    echo -n "Are you sure you want to destroy the cluster and remove kubeconfig entries? [y/N] "
+
    read answer
+
    if [ "${answer:-N}" != "y" ]; then
+
        echo "Aborted."
+
        exit 1
+
    fi
+
    
+
    echo "{{CHECK}}Destroying talos cluster '{{cluster_name}}'...{{NORMAL}}"
+
    if [ "{{provisioner}}" = "qemu" ]; then
+
        sudo --preserve-env=HOME talosctl cluster destroy --name {{cluster_name}} --provisioner {{provisioner}}
+
    else
+
        talosctl cluster destroy --name {{cluster_name}} --provisioner {{provisioner}}
+
    fi
+
    
+
    echo "{{CHECK}}Removing kube config entries...{{NORMAL}}"
+
    CONTEXT=$(kubectl config current-context 2>/dev/null || echo "")
+
    if [ -n "$CONTEXT" ]; then
+
        CLUSTER=$(echo "$CONTEXT" | cut -d '@' -f 2)
+
        kubectl config delete-context "$CONTEXT" || true
+
        kubectl config delete-cluster "$CLUSTER" || true
+
        kubectl config unset "users.$CONTEXT" || true
+
    fi
+
    echo "{{WARN}}Make sure you remove the '{{cluster_name}}' entry from: ~/.talos/config{{NORMAL}}"
+
    echo "{{SUCCESS}}Cluster destroyed.{{NORMAL}}"
+

+
# Check if required tools are in PATH.
+
[private]
+
verify-tool tool package_name="":
+
    #!/usr/bin/env bash
+
    set -e
+
    if ! command -v {{tool}} >/dev/null 2>&1; then
+
        PKG="{{package_name}}"
+
        if [ -z "$PKG" ]; then
+
            PKG="{{tool}}"
+
        fi
+
        echo "{{ERROR}}Missing required tool: {{tool + NORMAL}}"
+
        echo "{{HINT}}Use your systems package manager to install '$PKG'.{{NORMAL}}"
+
        exit 1
+
    fi