Radish alpha
r
rad:zwTxygwuz5LDGBq255RA2CbNGrz8
Radicle CI broker
Radicle
Git
chore: rewrite nested if-lets to not be nested
Lars Wirzenius committed 2 months ago
commit 82ed0bc067032f155a9e15056604aa535194e624
parent 2ee045e
8 files changed +167 -169
modified src/adapter.rs
@@ -46,10 +46,10 @@ impl Adapters {
        adapters: &HashMap<String, AdapterSpec>,
        default_adapter: Option<&str>,
    ) -> Result<Self, AdapterError> {
-
        if let Some(default) = default_adapter {
-
            if !adapters.contains_key(default) {
-
                return Err(AdapterError::NoDefaultAdapter);
-
            }
+
        if let Some(default) = default_adapter
+
            && !adapters.contains_key(default)
+
        {
+
            return Err(AdapterError::NoDefaultAdapter);
        }

        Ok(Self {
modified src/bin/cibtoolcmd/log.rs
@@ -64,10 +64,10 @@ impl LogCmd {
    }

    fn allowed_by_log_level(&self, msg: &Value) -> bool {
-
        if let Some(wanted) = &self.log_level {
-
            if let Some(actual) = log_level(msg) {
-
                return actual >= *wanted;
-
            }
+
        if let Some(wanted) = &self.log_level
+
            && let Some(actual) = log_level(msg)
+
        {
+
            return actual >= *wanted;
        }
        true
    }
modified src/broker.rs
@@ -139,10 +139,10 @@ impl Broker {
        // it. We won't do anything about a failure, as there's
        // nothing useful we can do about it, as long as we let CI
        // run, which want to do.
-
        if let Ok(mut known) = known_job_cobs.lock() {
-
            if let Some(job_id) = known.create_job(trigger.repo(), oid) {
-
                run.set_job_id(job_id);
-
            }
+
        if let Ok(mut known) = known_job_cobs.lock()
+
            && let Some(job_id) = known.create_job(trigger.repo(), oid)
+
        {
+
            run.set_job_id(job_id);
        }

        // We run the adapter, but if that fails, we just
modified src/ci_event.rs
@@ -845,13 +845,12 @@ impl ParsedRef {
        fn parse_patch_id(refname: &RefString) -> Option<ParsedRef> {
            const PATTERN: &str = r"^refs/namespaces/[^/]+/refs/heads/patches/([^/]+)$";
            let re = Regex::new(PATTERN).unwrap();
-
            if let Some(captures) = re.captures(refname) {
-
                if let Some(patch_id) = captures.get(1) {
-
                    if let Ok(oid) = Oid::try_from(patch_id.as_str()) {
-
                        let patch_id = PatchId::from(oid);
-
                        return Some(ParsedRef::Patch(patch_id));
-
                    }
-
                }
+
            if let Some(captures) = re.captures(refname)
+
                && let Some(patch_id) = captures.get(1)
+
                && let Ok(oid) = Oid::try_from(patch_id.as_str())
+
            {
+
                let patch_id = PatchId::from(oid);
+
                return Some(ParsedRef::Patch(patch_id));
            }
            None
        }
@@ -859,25 +858,24 @@ impl ParsedRef {
        fn parse_branch_name(refname: &RefString) -> Option<ParsedRef> {
            const PATTERN: &str = r"^refs/namespaces/[^/]+/refs/heads/(.+)$";
            let re = Regex::new(PATTERN).unwrap();
-
            if let Some(captures) = re.captures(refname) {
-
                if let Some(branch_name) = captures.get(1) {
-
                    if let Ok(branch_name) = branch_from_str(branch_name.as_str()) {
-
                        return Some(ParsedRef::Branch(branch_name));
-
                    }
-
                }
+
            if let Some(captures) = re.captures(refname)
+
                && let Some(branch_name) = captures.get(1)
+
                && let Ok(branch_name) = branch_from_str(branch_name.as_str())
+
            {
+
                return Some(ParsedRef::Branch(branch_name));
            }
+

            None
        }

        fn parse_tag_name(refname: &RefString) -> Option<ParsedRef> {
            const PATTERN: &str = r"^refs/namespaces/[^/]+/refs/tags/(.+)$";
            let re = Regex::new(PATTERN).unwrap();
-
            if let Some(captures) = re.captures(refname) {
-
                if let Some(tag_name) = captures.get(1) {
-
                    if let Ok(tag_name) = ref_string(tag_name.as_str()) {
-
                        return Some(ParsedRef::Tag(tag_name.into()));
-
                    }
-
                }
+
            if let Some(captures) = re.captures(refname)
+
                && let Some(tag_name) = captures.get(1)
+
                && let Ok(tag_name) = ref_string(tag_name.as_str())
+
            {
+
                return Some(ParsedRef::Tag(tag_name.into()));
            }
            None
        }
modified src/cob.rs
@@ -97,15 +97,15 @@ impl KnownJobCobs {
        url: &Url,
        announce: bool,
    ) {
-
        if let Some(job_id) = self.create_job(repo_id, oid) {
-
            if let Err(err) = self.fallible_create_run(job_id, repo_id, run_id, url, announce) {
-
                logger::job_failure(
-
                    "failed to add a run to a job COB",
-
                    &repo_id,
-
                    &oid,
-
                    Some(&err),
-
                );
-
            }
+
        if let Some(job_id) = self.create_job(repo_id, oid)
+
            && let Err(err) = self.fallible_create_run(job_id, repo_id, run_id, url, announce)
+
        {
+
            logger::job_failure(
+
                "failed to add a run to a job COB",
+
                &repo_id,
+
                &oid,
+
                Some(&err),
+
            );
        }
    }

modified src/filter.rs
@@ -359,10 +359,10 @@ impl Decision {
}

fn is_default_branch(repo_id: &RepoId, wanted: &str) -> bool {
-
    if let Ok(wanted) = ref_string(wanted) {
-
        if let Ok(default) = get_default_branch(repo_id) {
-
            return wanted == default;
-
        }
+
    if let Ok(wanted) = ref_string(wanted)
+
        && let Ok(default) = get_default_branch(repo_id)
+
    {
+
        return wanted == default;
    }

    false
modified src/pages.rs
@@ -501,12 +501,12 @@ impl PageData {
    }

    fn info_url(run: Option<&Run>) -> Element {
-
        if let Some(run) = run {
-
            if let Some(url) = run.adapter_info_url() {
-
                return Element::new(Tag::A)
-
                    .with_attribute("href", url)
-
                    .with_text("info");
-
            }
+
        if let Some(run) = run
+
            && let Some(url) = run.adapter_info_url()
+
        {
+
            return Element::new(Tag::A)
+
                .with_attribute("href", url)
+
                .with_text("info");
        }
        Element::new(Tag::Span)
    }
@@ -916,111 +916,110 @@ impl StatusPage {
    }

    fn update_and_write(&mut self) -> Result<(), PageError> {
-
        if let Some(dirname) = &self.dirname {
-
            if dirname.exists() {
-
                let runs = self.args.db.get_all_runs()?;
-

-
                // Create list of events, except ones for private
-
                // repositories.
-
                let events: Result<Vec<QueuedCiEvent>, PageError> = self
-
                    .args
-
                    .db
-
                    .queued_ci_events()?
-
                    .iter()
-
                    .filter_map(|id| match self.args.db.get_queued_ci_event(id) {
-
                        Ok(Some(event)) => match event.event() {
-
                            CiEvent::V1(CiEventV1::Shutdown) => Some(Ok(event)),
-
                            CiEvent::V1(CiEventV1::BranchCreated { repo, .. })
-
                            | CiEvent::V1(CiEventV1::BranchUpdated { repo, .. })
-
                            | CiEvent::V1(CiEventV1::PatchCreated { repo, .. })
-
                            | CiEvent::V1(CiEventV1::PatchUpdated { repo, .. }) => {
-
                                if Self::is_public_repo(&self.args.profile, repo) {
-
                                    Some(Ok(event))
-
                                } else {
-
                                    None
-
                                }
+
        if let Some(dirname) = &self.dirname
+
            && dirname.exists()
+
        {
+
            let runs = self.args.db.get_all_runs()?;
+

+
            // Create list of events, except ones for private
+
            // repositories.
+
            let events: Result<Vec<QueuedCiEvent>, PageError> = self
+
                .args
+
                .db
+
                .queued_ci_events()?
+
                .iter()
+
                .filter_map(|id| match self.args.db.get_queued_ci_event(id) {
+
                    Ok(Some(event)) => match event.event() {
+
                        CiEvent::V1(CiEventV1::Shutdown) => Some(Ok(event)),
+
                        CiEvent::V1(CiEventV1::BranchCreated { repo, .. })
+
                        | CiEvent::V1(CiEventV1::BranchUpdated { repo, .. })
+
                        | CiEvent::V1(CiEventV1::PatchCreated { repo, .. })
+
                        | CiEvent::V1(CiEventV1::PatchUpdated { repo, .. }) => {
+
                            if Self::is_public_repo(&self.args.profile, repo) {
+
                                Some(Ok(event))
+
                            } else {
+
                                None
                            }
-
                            _ => None,
-
                        },
-
                        Ok(None) => None, // Event is (no longer?) in database.
-
                        Err(_) => None,   // We ignore error here on purpose.
-
                    })
-
                    .collect();
-
                let mut events = events?;
-
                events.sort_by_cached_key(|e| e.timestamp().to_string());
-

-
                let data = PageData {
-
                    timestamp: now()?,
-
                    ci_broker_version: env!("VERSION"),
-
                    ci_broker_git_commit: env!("GIT_HEAD"),
-
                    node_alias: self.node_alias.clone(),
-
                    runs: HashMap::from_iter(
-
                        runs.iter()
-
                            .map(|run| (run.broker_run_id().clone(), run.clone())),
-
                    ),
-
                    events,
-
                    broker_event_counter: 0,
-
                    latest_broker_event: None,
-
                    latest_ci_run: None,
-
                    desc_snippet: self.args.desc_snippet.clone(),
-
                };
+
                        }
+
                        _ => None,
+
                    },
+
                    Ok(None) => None, // Event is (no longer?) in database.
+
                    Err(_) => None,   // We ignore error here on purpose.
+
                })
+
                .collect();
+
            let mut events = events?;
+
            events.sort_by_cached_key(|e| e.timestamp().to_string());
+

+
            let data = PageData {
+
                timestamp: now()?,
+
                ci_broker_version: env!("VERSION"),
+
                ci_broker_git_commit: env!("GIT_HEAD"),
+
                node_alias: self.node_alias.clone(),
+
                runs: HashMap::from_iter(
+
                    runs.iter()
+
                        .map(|run| (run.broker_run_id().clone(), run.clone())),
+
                ),
+
                events,
+
                broker_event_counter: 0,
+
                latest_broker_event: None,
+
                latest_ci_run: None,
+
                desc_snippet: self.args.desc_snippet.clone(),
+
            };

-
                let nameless = String::from("nameless repo");
+
            let nameless = String::from("nameless repo");

-
                // We avoid writing while keeping the lock, to reduce
-
                // contention.
-
                let (status, repos) = {
-
                    let status = data.status_page_as_html()?.to_string();
+
            // We avoid writing while keeping the lock, to reduce
+
            // contention.
+
            let (status, repos) = {
+
                let status = data.status_page_as_html()?.to_string();

-
                    let mut repos = vec![];
-
                    for (_, rid) in data.repos() {
-
                        let basename = rid_to_basename(rid);
-
                        let filename = dirname.join(format!("{basename}.html"));
-
                        let alias = data.repo_alias(rid).unwrap_or(nameless.clone());
-
                        let repopage = data.per_repo_page_as_html(rid, &alias, &data.timestamp);
-
                        repos.push((filename, repopage.to_string()));
-
                    }
+
                let mut repos = vec![];
+
                for (_, rid) in data.repos() {
+
                    let basename = rid_to_basename(rid);
+
                    let filename = dirname.join(format!("{basename}.html"));
+
                    let alias = data.repo_alias(rid).unwrap_or(nameless.clone());
+
                    let repopage = data.per_repo_page_as_html(rid, &alias, &data.timestamp);
+
                    repos.push((filename, repopage.to_string()));
+
                }

-
                    (status, repos)
-
                };
+
                (status, repos)
+
            };

-
                let filename = dirname.join("index.html");
-
                Self::write_file(&filename, &status)?;
+
            let filename = dirname.join("index.html");
+
            Self::write_file(&filename, &status)?;

-
                for (filename, repopage) in repos {
-
                    Self::write_file(&filename, &repopage)?;
-
                }
+
            for (filename, repopage) in repos {
+
                Self::write_file(&filename, &repopage)?;
+
            }

-
                let filename = dirname.join(STATUS_JSON);
-
                let json = data.status_page_as_json()?;
-
                Self::write_file(&filename, &json)?;
+
            let filename = dirname.join(STATUS_JSON);
+
            let json = data.status_page_as_json()?;
+
            Self::write_file(&filename, &json)?;

-
                let filename = dirname.join(BROKER_RSS);
-
                let channel = data.status_as_rss()?;
-
                let rss = channel.to_string();
-
                Self::write_file(&filename, &rss)?;
+
            let filename = dirname.join(BROKER_RSS);
+
            let channel = data.status_as_rss()?;
+
            let rss = channel.to_string();
+
            Self::write_file(&filename, &rss)?;

-
                let filename = dirname.join(FAILURE_RSS);
-
                let channel = data.failed_as_rss()?;
-
                let rss = channel.to_string();
-
                Self::write_file(&filename, &rss)?;
+
            let filename = dirname.join(FAILURE_RSS);
+
            let channel = data.failed_as_rss()?;
+
            let rss = channel.to_string();
+
            Self::write_file(&filename, &rss)?;

-
                let filename = dirname.join(UNFINISHED_RSS);
-
                let channel = data.unfinished_as_rss()?;
-
                let rss = channel.to_string();
-
                Self::write_file(&filename, &rss)?;
-
            }
+
            let filename = dirname.join(UNFINISHED_RSS);
+
            let channel = data.unfinished_as_rss()?;
+
            let rss = channel.to_string();
+
            Self::write_file(&filename, &rss)?;
        }
        Ok(())
    }

    fn is_public_repo(profile: &Profile, rid: &RepoId) -> bool {
-
        if let Ok(repo) = profile.storage.repository(*rid) {
-
            if let Ok(id_doc) = repo.canonical_identity_doc() {
-
                if id_doc.doc.visibility().is_public() {
-
                    return true;
-
                }
-
            }
+
        if let Ok(repo) = profile.storage.repository(*rid)
+
            && let Ok(id_doc) = repo.canonical_identity_doc()
+
            && id_doc.doc.visibility().is_public()
+
        {
+
            return true;
        }
        false
    }
modified src/queueproc.rs
@@ -172,42 +172,43 @@ impl QueueProcessor {
                        expecting_new_events = false;
                    }
                    CiEvent::V1(CiEventV1::Terminate(run_id)) => {
-
                        if let Some(pid) = children.get(run_id) {
-
                            if let Ok(pid) = i32::try_from(*pid) {
-
                                logger::queueproc_action_terminate(run_id);
-
                                unsafe {
-
                                    kill(-pid, SIGKILL);
-
                                }
+
                        if let Some(pid) = children.get(run_id)
+
                            && let Ok(pid) = i32::try_from(*pid)
+
                        {
+
                            logger::queueproc_action_terminate(run_id);
+
                            unsafe {
+
                                kill(-pid, SIGKILL);
                            }
                        }
                    }
+

                    _ => (),
                };
            }

            // If we may spawn another adapter, pick an event from the queue
            // and run the adapters.
-
            if handles.len() < self.concurrent_adapters {
-
                if let Some(qe) = self.pick_event(&queue) {
-
                    // Remove picked event from queue so we don't re-process it. If we don't
-
                    // do this, and we crash when processing the event, we'll re-process it
-
                    // again and again. It seems better to discard an event, and skip running
-
                    // CI, rather then getting stuck on a specific event.
-
                    queue.remove(&qe);
-
                    self.drop_event(&qe)?;
-

-
                    match self.matching_adapters(qe.event()) {
-
                        Ok(Some(adapters)) => {
-
                            let p = self.processor()?;
-
                            let repoid = qe.event().repository().copied();
-
                            self.current.insert(qe.event().repository());
-
                            let known = self.known_job_cobs.clone();
-
                            let h = spawn(move || p.pick_and_process_one(qe, adapters, known));
-
                            handles.push((repoid, h));
-
                        }
-
                        Ok(None) => {}
-
                        Err(_) => {}
+
            if handles.len() < self.concurrent_adapters
+
                && let Some(qe) = self.pick_event(&queue)
+
            {
+
                // Remove picked event from queue so we don't re-process it. If we don't
+
                // do this, and we crash when processing the event, we'll re-process it
+
                // again and again. It seems better to discard an event, and skip running
+
                // CI, rather then getting stuck on a specific event.
+
                queue.remove(&qe);
+
                self.drop_event(&qe)?;
+

+
                match self.matching_adapters(qe.event()) {
+
                    Ok(Some(adapters)) => {
+
                        let p = self.processor()?;
+
                        let repoid = qe.event().repository().copied();
+
                        self.current.insert(qe.event().repository());
+
                        let known = self.known_job_cobs.clone();
+
                        let h = spawn(move || p.pick_and_process_one(qe, adapters, known));
+
                        handles.push((repoid, h));
                    }
+
                    Ok(None) => {}
+
                    Err(_) => {}
                }
            }

@@ -458,10 +459,10 @@ struct CurrentlyPicked {

impl CurrentlyPicked {
    fn insert(&mut self, repoid: Option<&RepoId>) {
-
        if let Some(repoid) = repoid {
-
            if let Ok(mut set) = self.set.lock() {
-
                set.insert(*repoid);
-
            }
+
        if let Some(repoid) = repoid
+
            && let Ok(mut set) = self.set.lock()
+
        {
+
            set.insert(*repoid);
        }
    }