Radish alpha
h
rad:z3gqcJUoA1n9HaHKufZs5FCSGazv5
Radicle Heartwood Protocol & Stack
Radicle
Git
storage: Rewrite temporary repositories for clones
Merged lorenz opened 8 months ago

On Windows, all attempts to clone repositories failed with

Fetch failed for rad:… from z6Mk…: Access is denied. (os error 5)

The reason is that, other than Unix-like systems, it forbids that directories that are in use are moved.

To improve the situation, take back control over what is moved and removed exactly by implementing cleanup instead of relying on impl Drop for tempfile::TempDir.

fn lock_repository is renamed to fn temporary_repository because it does not do any locking. Also, the directory name now contains “tmp” instead of “lock”.

In the future (once Rust 1.89 is a little less cutting edge and more widely available), we may opt for actual locking via std::fs::File::lock.

9 files changed +227 -120 5caa7b30 2149770a
modified crates/radicle-cli/tests/commands.rs
@@ -1375,7 +1375,7 @@ fn rad_clone_partial_fail() {
    eve.connect(&bob);
    eve.routes_to(&[(acme, carol), (acme, bob.id), (acme, alice.id)]);
    bob.storage.repository(acme).unwrap().remove().unwrap(); // Cause the fetch from Bob to fail.
-
    bob.storage.lock_repository(acme).ok(); // Prevent repo from being re-fetched.
+
    bob.storage.temporary_repository(acme).ok(); // Prevent repo from being re-fetched.

    test(
        "examples/rad-clone-partial-fail.md",
modified crates/radicle-fetch/src/handle.rs
@@ -12,9 +12,9 @@ use crate::policy::{Allowed, BlockList};
use crate::transport::{ConnectionStream, Transport};

/// The handle used for pulling or cloning changes from a remote peer.
-
pub struct Handle<S> {
+
pub struct Handle<R, S> {
    pub(crate) local: PublicKey,
-
    pub(crate) repo: Repository,
+
    repo: R,
    pub(crate) allowed: Allowed,
    pub(crate) transport: Transport<S>,
    /// The set of keys we will ignore when fetching from a
@@ -29,10 +29,36 @@ pub struct Handle<S> {
    pub(crate) interrupt: Arc<AtomicBool>,
}

-
impl<S> Handle<S> {
+
impl<R, S> Handle<R, S> {
+
    pub fn is_blocked(&self, key: &PublicKey) -> bool {
+
        self.blocked.is_blocked(key)
+
    }
+

+
    #[inline]
+
    pub fn local(&self) -> &PublicKey {
+
        &self.local
+
    }
+

+
    pub fn interrupt_pack_writer(&mut self) {
+
        self.interrupt.store(true, atomic::Ordering::Relaxed);
+
    }
+

+
    pub fn allowed(&self) -> Allowed {
+
        self.allowed.clone()
+
    }
+

+
    pub fn into_inner(self) -> R {
+
        self.repo
+
    }
+
}
+

+
impl<R, S> Handle<R, S>
+
where
+
    R: AsRef<Repository>,
+
{
    pub fn new(
        local: PublicKey,
-
        repo: Repository,
+
        repo: R,
        follow: Allowed,
        blocked: BlockList,
        connection: S,
@@ -40,8 +66,12 @@ impl<S> Handle<S> {
    where
        S: ConnectionStream,
    {
-
        let git_dir = repo.backend.path().to_path_buf();
-
        let transport = Transport::new(git_dir, BString::from(repo.id.canonical()), connection);
+
        let git_dir = repo.as_ref().backend.path().to_path_buf();
+
        let transport = Transport::new(
+
            git_dir,
+
            BString::from(repo.as_ref().id.canonical()),
+
            connection,
+
        );

        Ok(Self {
            local,
@@ -53,32 +83,13 @@ impl<S> Handle<S> {
        })
    }

-
    pub fn is_blocked(&self, key: &PublicKey) -> bool {
-
        self.blocked.is_blocked(key)
-
    }
-

+
    #[inline]
    pub fn repository(&self) -> &Repository {
-
        &self.repo
-
    }
-

-
    pub fn repository_mut(&mut self) -> &mut Repository {
-
        &mut self.repo
-
    }
-

-
    pub fn local(&self) -> &PublicKey {
-
        &self.local
-
    }
-

-
    pub fn interrupt_pack_writer(&mut self) {
-
        self.interrupt.store(true, atomic::Ordering::Relaxed);
+
        self.repo.as_ref()
    }

    pub fn verified(&self, head: Oid) -> Result<Doc, DocError> {
-
        Ok(self.repo.identity_doc_at(head)?.doc)
-
    }
-

-
    pub fn allowed(&self) -> Allowed {
-
        self.allowed.clone()
+
        Ok(self.repository().identity_doc_at(head)?.doc)
    }
}

modified crates/radicle-fetch/src/lib.rs
@@ -16,6 +16,7 @@ use gix_protocol::handshake;
pub use gix_protocol::{transport::bstr::ByteSlice, RemoteProgress};
pub use handle::Handle;
pub use policy::{Allowed, BlockList, Scope};
+
use radicle::storage::git::Repository;
pub use state::{FetchLimit, FetchResult};
pub use transport::Transport;

@@ -47,13 +48,14 @@ pub enum Error {
/// It is expected that the local peer has a copy of the repository
/// and is pulling new changes. If the repository does not exist, then
/// [`clone`] should be used.
-
pub fn pull<S>(
-
    handle: &mut Handle<S>,
+
pub fn pull<R, S>(
+
    handle: &mut Handle<R, S>,
    limit: FetchLimit,
    remote: PublicKey,
    refs_at: Option<Vec<RefsAt>>,
) -> Result<FetchResult, Error>
where
+
    R: AsRef<Repository>,
    S: transport::ConnectionStream,
{
    let start = Instant::now();
@@ -73,7 +75,7 @@ where
    log::debug!(
        target: "fetch",
        "Finished pull of {} ({}ms)",
-
        handle.repo.id(),
+
        handle.repository().id(),
        start.elapsed().as_millis()
    );
    result
@@ -83,12 +85,13 @@ where
///
/// It is expected that the local peer has an empty repository which
/// they want to populate with the `remote`'s view of the project.
-
pub fn clone<S>(
-
    handle: &mut Handle<S>,
+
pub fn clone<R, S>(
+
    handle: &mut Handle<R, S>,
    limit: FetchLimit,
    remote: PublicKey,
) -> Result<FetchResult, Error>
where
+
    R: AsRef<Repository>,
    S: transport::ConnectionStream,
{
    let start = Instant::now();
@@ -101,7 +104,7 @@ where
        .run(handle, &handshake, limit, remote, None)
        .map_err(Error::Protocol);
    let elapsed = start.elapsed().as_millis();
-
    let rid = handle.repo.id();
+
    let rid = handle.repository().id();

    match &result {
        Ok(_) => {
@@ -120,7 +123,7 @@ where
    result
}

-
fn perform_handshake<S>(handle: &mut Handle<S>) -> Result<handshake::Outcome, Error>
+
fn perform_handshake<R, S>(handle: &mut Handle<R, S>) -> Result<handshake::Outcome, Error>
where
    S: transport::ConnectionStream,
{
modified crates/radicle-fetch/src/sigrefs.rs
@@ -1,6 +1,7 @@
use std::collections::{BTreeMap, BTreeSet};
use std::ops::{Deref, Not as _};

+
use radicle::storage::git::Repository;
pub use radicle::storage::refs::SignedRefsAt;
pub use radicle::storage::{git::Validation, Validations};
use radicle::{crypto::PublicKey, storage::ValidateRepository};
@@ -53,10 +54,13 @@ impl<T> DelegateStatus<T> {

    /// Construct a `DelegateStatus` with [`SignedRefsAt`] signed reference
    /// data, if it can be found in `repo`.
-
    pub fn load<S>(
+
    pub fn load<R, S>(
        self,
-
        cached: &Cached<S>,
-
    ) -> Result<DelegateStatus<Option<SignedRefsAt>>, radicle::storage::refs::Error> {
+
        cached: &Cached<R, S>,
+
    ) -> Result<DelegateStatus<Option<SignedRefsAt>>, radicle::storage::refs::Error>
+
    where
+
        R: AsRef<Repository>,
+
    {
        let remote = *self.remote();
        self.traverse(|_| cached.load(&remote))
    }
@@ -102,10 +106,13 @@ impl RemoteRefs {
    ///
    /// If the sigrefs are missing for a given remote, regardless of delegate
    /// status, then that remote is filtered out.
-
    pub(crate) fn load<'a, S>(
-
        cached: &Cached<S>,
+
    pub(crate) fn load<'a, R, S>(
+
        cached: &Cached<R, S>,
        remotes: impl Iterator<Item = &'a PublicKey>,
-
    ) -> Result<Self, error::RemoteRefs> {
+
    ) -> Result<Self, error::RemoteRefs>
+
    where
+
        R: AsRef<Repository>,
+
    {
        remotes
            .filter_map(|id| match cached.load(id) {
                Ok(None) => None,
modified crates/radicle-fetch/src/state.rs
@@ -8,6 +8,7 @@ use radicle::identity::{Did, Doc, DocError};

use radicle::prelude::Verified;
use radicle::storage;
+
use radicle::storage::git::Repository;
use radicle::storage::refs::RefsAt;
use radicle::storage::{
    git::Validation, Remote, RemoteId, RemoteRepository, Remotes, ValidateRepository, Validations,
@@ -196,7 +197,10 @@ impl FetchState {
        ap
    }

-
    pub(crate) fn as_cached<'a, S>(&'a mut self, handle: &'a mut Handle<S>) -> Cached<'a, S> {
+
    pub(crate) fn as_cached<'a, R, S>(
+
        &'a mut self,
+
        handle: &'a mut Handle<R, S>,
+
    ) -> Cached<'a, R, S> {
        Cached {
            handle,
            state: self,
@@ -207,13 +211,14 @@ impl FetchState {
impl FetchState {
    /// Perform the ls-refs and fetch for the given `step`. The result
    /// of these processes is kept track of in the internal state.
-
    pub(super) fn run_stage<S, F>(
+
    pub(super) fn run_stage<R, S, F>(
        &mut self,
-
        handle: &mut Handle<S>,
+
        handle: &mut Handle<R, S>,
        handshake: &handshake::Outcome,
        step: &F,
    ) -> Result<BTreeSet<PublicKey>, error::Step>
    where
+
        R: AsRef<Repository>,
        S: transport::ConnectionStream,
        F: ProtocolStage,
    {
@@ -229,7 +234,7 @@ impl FetchState {
        log::trace!(target: "fetch", "Received refs {refs:?}");
        step.pre_validate(&refs)?;

-
        let wants_haves = step.wants_haves(&handle.repo, &refs)?;
+
        let wants_haves = step.wants_haves(handle.repository(), &refs)?;
        if !wants_haves.wants.is_empty() {
            let keepfile =
                handle
@@ -261,7 +266,7 @@ impl FetchState {
            }
        }

-
        let up = step.prepare_updates(self, &handle.repo, &refs)?;
+
        let up = step.prepare_updates(self, handle.repository(), &refs)?;
        self.update_all(up.tips);

        Ok(fetched)
@@ -280,9 +285,9 @@ impl FetchState {
    /// The resulting [`sigrefs::RemoteRefs`] will be the set of
    /// `rad/sigrefs` of the fetched remotes.
    #[allow(clippy::too_many_arguments)]
-
    fn run_special_refs<S>(
+
    fn run_special_refs<R, S>(
        &mut self,
-
        handle: &mut Handle<S>,
+
        handle: &mut Handle<R, S>,
        handshake: &handshake::Outcome,
        delegates: BTreeSet<PublicKey>,
        threshold: usize,
@@ -291,6 +296,7 @@ impl FetchState {
        refs_at: Option<Vec<RefsAt>>,
    ) -> Result<sigrefs::RemoteRefs, error::Protocol>
    where
+
        R: AsRef<Repository>,
        S: transport::ConnectionStream,
    {
        match refs_at {
@@ -348,15 +354,16 @@ impl FetchState {
    ///      of updating tips.
    ///   7. Apply the valid tips, iff no delegates failed validation.
    ///   8. Signal to the other side that the process has completed.
-
    pub(super) fn run<S>(
+
    pub(super) fn run<R, S>(
        mut self,
-
        handle: &mut Handle<S>,
+
        handle: &mut Handle<R, S>,
        handshake: &handshake::Outcome,
        limit: FetchLimit,
        remote: PublicKey,
        refs_at: Option<Vec<RefsAt>>,
    ) -> Result<FetchResult, error::Protocol>
    where
+
        R: AsRef<Repository>,
        S: transport::ConnectionStream,
    {
        let start = Instant::now();
@@ -493,13 +500,14 @@ impl FetchState {
                    remote,
                    data: Some(sigrefs),
                } => {
-
                    if let Some(SignedRefsAt { at, .. }) = SignedRefsAt::load(remote, &handle.repo)?
+
                    if let Some(SignedRefsAt { at, .. }) =
+
                        SignedRefsAt::load(remote, handle.repository())?
                    {
                        // Prune non-delegates if they're behind or
                        // diverged. A diverged case is non-fatal for
                        // delegates.
                        if matches!(
-
                            repository::ancestry(&handle.repo, at, sigrefs.at)?,
+
                            repository::ancestry(handle.repository(), at, sigrefs.at)?,
                            repository::Ancestry::Behind | repository::Ancestry::Diverged
                        ) {
                            self.prune(&remote);
@@ -523,9 +531,10 @@ impl FetchState {
                    remote,
                    data: Some(sigrefs),
                } => {
-
                    if let Some(SignedRefsAt { at, .. }) = SignedRefsAt::load(remote, &handle.repo)?
+
                    if let Some(SignedRefsAt { at, .. }) =
+
                        SignedRefsAt::load(remote, handle.repository())?
                    {
-
                        let ancestry = repository::ancestry(&handle.repo, at, sigrefs.at)?;
+
                        let ancestry = repository::ancestry(handle.repository(), at, sigrefs.at)?;
                        if matches!(ancestry, repository::Ancestry::Behind) {
                            log::trace!(target: "fetch", "Advertised `rad/sigrefs` {} is behind {at} for {remote}", sigrefs.at);
                            self.prune(&remote);
@@ -566,7 +575,7 @@ impl FetchState {
        // delegates that pass the threshold.
        if valid_delegates.len() >= threshold {
            let applied = repository::update(
-
                &handle.repo,
+
                handle.repository(),
                self.tips
                    .clone()
                    .into_values()
@@ -596,12 +605,15 @@ impl FetchState {

/// A cached version of [`Handle`] by using the underlying
/// [`FetchState`]'s data for performing lookups.
-
pub(crate) struct Cached<'a, S> {
-
    handle: &'a mut Handle<S>,
+
pub(crate) struct Cached<'a, R, S> {
+
    handle: &'a mut Handle<R, S>,
    state: &'a mut FetchState,
}

-
impl<S> Cached<'_, S> {
+
impl<R, S> Cached<'_, R, S>
+
where
+
    R: AsRef<Repository>,
+
{
    /// Resolves `refname` to its [`ObjectId`] by first looking at the
    /// [`FetchState`] and falling back to the [`Handle::refdb`].
    pub fn refname_to_id<'b, N>(
@@ -613,7 +625,7 @@ impl<S> Cached<'_, S> {
    {
        let refname = refname.into();
        match self.state.refs.refname_to_id(refname.clone()) {
-
            None => repository::refname_to_id(&self.handle.repo, refname),
+
            None => repository::refname_to_id(self.handle.repository(), refname),
            Some(oid) => Ok(Some(oid)),
        }
    }
@@ -638,8 +650,8 @@ impl<S> Cached<'_, S> {

    pub fn load(&self, remote: &PublicKey) -> Result<Option<SignedRefsAt>, sigrefs::error::Load> {
        match self.state.sigrefs.get(remote) {
-
            None => SignedRefsAt::load(*remote, &self.handle.repo),
-
            Some(tip) => SignedRefsAt::load_at(*tip, *remote, &self.handle.repo).map(Some),
+
            None => SignedRefsAt::load(*remote, self.handle.repository()),
+
            Some(tip) => SignedRefsAt::load_at(*tip, *remote, self.handle.repository()).map(Some),
        }
    }

@@ -649,11 +661,14 @@ impl<S> Cached<'_, S> {
    }
}

-
impl<S> RemoteRepository for Cached<'_, S> {
+
impl<R, S> RemoteRepository for Cached<'_, R, S>
+
where
+
    R: AsRef<Repository>,
+
{
    fn remote(&self, remote: &RemoteId) -> Result<Remote, storage::refs::Error> {
        // N.b. this is unused so we just delegate to the underlying
        // repository for a correct implementation.
-
        self.handle.repo.remote(remote)
+
        self.handle.repository().remote(remote)
    }

    fn remotes(&self) -> Result<Remotes<Verified>, storage::refs::Error> {
@@ -665,11 +680,14 @@ impl<S> RemoteRepository for Cached<'_, S> {
    }

    fn remote_refs_at(&self) -> Result<Vec<RefsAt>, storage::refs::Error> {
-
        self.handle.repo.remote_refs_at()
+
        self.handle.repository().remote_refs_at()
    }
}

-
impl<S> ValidateRepository for Cached<'_, S> {
+
impl<R, S> ValidateRepository for Cached<'_, R, S>
+
where
+
    R: AsRef<Repository>,
+
{
    // N.b. we don't verify the `rad/id` of each remote since they may
    // not have a reference to the COB if they have not interacted
    // with it.
modified crates/radicle-node/src/worker/fetch.rs
@@ -1,5 +1,6 @@
use radicle::identity::doc::CanonicalRefsError;
use radicle::identity::CanonicalRefs;
+
use radicle::storage::git::TempRepository;
pub(crate) use radicle_protocol::worker::fetch::error;

use std::collections::BTreeSet;
@@ -27,11 +28,10 @@ use super::channels::ChannelsFlush;

pub enum Handle {
    Clone {
-
        handle: radicle_fetch::Handle<ChannelsFlush>,
-
        tmp: tempfile::TempDir,
+
        handle: radicle_fetch::Handle<TempRepository, ChannelsFlush>,
    },
    Pull {
-
        handle: radicle_fetch::Handle<ChannelsFlush>,
+
        handle: radicle_fetch::Handle<Repository, ChannelsFlush>,
        notifications: node::notifications::StoreWriter,
    },
}
@@ -55,9 +55,9 @@ impl Handle {
                notifications,
            })
        } else {
-
            let (repo, tmp) = storage.lock_repository(rid)?;
+
            let repo = storage.temporary_repository(rid)?;
            let handle = radicle_fetch::Handle::new(local, repo, follow, blocked, channels)?;
-
            Ok(Handle::Clone { handle, tmp })
+
            Ok(Handle::Clone { handle })
        }
    }

@@ -72,11 +72,18 @@ impl Handle {
        refs_at: Option<Vec<RefsAt>>,
    ) -> Result<FetchResult, error::Fetch> {
        let (result, clone, notifs) = match self {
-
            Self::Clone { mut handle, tmp } => {
+
            Self::Clone { mut handle } => {
                log::debug!(target: "worker", "{} cloning from {remote}", handle.local());
-
                let result = radicle_fetch::clone(&mut handle, limit, remote)?;
-
                mv(tmp, storage, &rid)?;
-
                (result, true, None)
+
                match radicle_fetch::clone(&mut handle, limit, remote) {
+
                    Err(err) => {
+
                        handle.into_inner().cleanup();
+
                        return Err(err.into());
+
                    }
+
                    Ok(result) => {
+
                        handle.into_inner().mv(storage.path_of(&rid))?;
+
                        (result, true, None)
+
                    }
+
                }
            }
            Self::Pull {
                mut handle,
@@ -164,35 +171,6 @@ impl Handle {
    }
}

-
/// In the case of cloning, we have performed the fetch into a
-
/// temporary directory -- ensuring that no concurrent operations
-
/// see an empty repository.
-
///
-
/// At the end of the clone, we perform a rename of the temporary
-
/// directory to the storage repository.
-
///
-
/// # Errors
-
///   - Will fail if `storage` contains `rid` already.
-
fn mv(tmp: tempfile::TempDir, storage: &Storage, rid: &RepoId) -> Result<(), error::Fetch> {
-
    use std::io::{Error, ErrorKind};
-

-
    let from = tmp.path();
-
    let to = storage.path_of(rid);
-

-
    if !to.exists() {
-
        std::fs::rename(from, to)?;
-
    } else {
-
        log::warn!(target: "worker", "Refusing to move cloned repository {rid} already exists");
-
        return Err(Error::new(
-
            ErrorKind::AlreadyExists,
-
            format!("repository already exists {to:?}"),
-
        )
-
        .into());
-
    }
-

-
    Ok(())
-
}
-

// Post notifications for the given refs.
fn notify(
    rid: &RepoId,
modified crates/radicle/Cargo.toml
@@ -11,7 +11,7 @@ rust-version.workspace = true

[features]
default = []
-
test = ["qcheck", "radicle-crypto/test"]
+
test = ["tempfile", "qcheck", "radicle-crypto/test"]
logger = ["colored", "chrono"]

[dependencies]
@@ -42,7 +42,7 @@ serde_json = { workspace = true, features = ["preserve_order"] }
serde-untagged = "0.1.7"
siphasher = "1.0.0"
sqlite = { workspace = true, features = ["bundled"] }
-
tempfile = { workspace = true }
+
tempfile = { workspace = true, optional = true }
thiserror = { workspace = true }
unicode-normalization = { version = "0.1" }

modified crates/radicle/src/storage/git.rs
@@ -2,6 +2,9 @@
pub mod cob;
pub mod transport;

+
pub mod temp;
+
pub use temp::TempRepository;
+

use std::collections::{BTreeMap, BTreeSet, HashMap};
use std::ops::{Deref, DerefMut};
use std::path::{Path, PathBuf};
@@ -9,7 +12,6 @@ use std::sync::LazyLock;
use std::{fs, io};

use crypto::Verified;
-
use tempfile::TempDir;

use crate::git::canonical::Quorum;
use crate::identity::crefs::GetCanonicalRefs as _;
@@ -121,9 +123,9 @@ impl ReadStorage for Storage {
            if path.file_name().to_string_lossy().starts_with('.') {
                continue;
            }
-
            // Skip lock files.
+
            // Skip temporary repositories
            if let Some(ext) = path.path().extension() {
-
                if ext == "lock" {
+
                if ext == TempRepository::EXT {
                    continue;
                }
            }
@@ -213,23 +215,18 @@ impl Storage {

    /// Create a [`Repository`] in a temporary directory.
    ///
-
    /// N.b. it is important to keep the [`TempDir`] in scope while
-
    /// using the [`Repository`]. If it is dropped, any action on the
-
    /// `Repository` will fail.
-
    pub fn lock_repository(&self, rid: RepoId) -> Result<(Repository, TempDir), RepositoryError> {
+
    /// This is used to prevent other processes accessing it during
+
    /// initialization. Usually, callers will want to move the repository
+
    /// to its destination after initialization in the temporary location.
+
    pub fn temporary_repository(&self, rid: RepoId) -> Result<TempRepository, RepositoryError> {
        if self.contains(&rid)? {
            return Err(Error::Io(io::Error::new(
                io::ErrorKind::AlreadyExists,
-
                format!("refusing to create '{rid}.lock'"),
+
                format!("refusing to create temporary repository for {rid}"),
            ))
            .into());
        }
-
        let tmp = tempfile::Builder::new()
-
            .prefix(&rid.canonical())
-
            .suffix(".lock")
-
            .tempdir_in(self.path())
-
            .map_err(Error::from)?;
-
        Ok((Repository::create(tmp.path(), rid, &self.info)?, tmp))
+
        TempRepository::new(self.path(), rid, &self.info)
    }

    pub fn path(&self) -> &Path {
@@ -285,6 +282,12 @@ pub struct Repository {
    pub backend: git2::Repository,
}

+
impl AsRef<Repository> for Repository {
+
    fn as_ref(&self) -> &Repository {
+
        self
+
    }
+
}
+

impl git::canonical::effects::Ancestry for Repository {
    fn graph_ahead_behind(
        &self,
added crates/radicle/src/storage/git/temp.rs
@@ -0,0 +1,87 @@
+
use std::io;
+
use std::path::{Path, PathBuf};
+

+
use crate::prelude::RepoId;
+

+
use super::{Repository, RepositoryError, UserInfo};
+

+
/// A [`Repository`] that is created for temporary operations, such as cloning.
+
///
+
/// When the `TempRepository` is no longer needed, then call one of destructors:
+
///
+
///   - [`TempRepository::cleanup`]: remove the repository directory
+
///   - [`TempRepository::mv`]: move the repository directory to a final
+
///     destination and remove the old directory
+
///
+
/// [`TempRepository`] implements [`AsRef`] so that the [`Repository`] can be
+
/// used in places where a [`Repository`] is needed.
+
pub struct TempRepository {
+
    repo: Repository,
+
    path: PathBuf,
+
}
+

+
impl TempRepository {
+
    /// Extension used for the directory
+
    pub(crate) const EXT: &str = "tmp";
+
    const RANDOMNESS_LENGTH: usize = 6;
+

+
    pub(super) fn new<P>(root: P, rid: RepoId, info: &UserInfo) -> Result<Self, RepositoryError>
+
    where
+
        P: AsRef<Path>,
+
    {
+
        let random: String = std::iter::repeat_with(fastrand::alphanumeric)
+
            .take(Self::RANDOMNESS_LENGTH)
+
            .collect();
+
        let path = root
+
            .as_ref()
+
            .join(format!("{rid}.{random}"))
+
            .with_extension(Self::EXT);
+
        let repo = Repository::create(&path, rid, info)?;
+
        Ok(Self { repo, path })
+
    }
+

+
    /// Clean up the temporary directory of the repository.
+
    ///
+
    /// Note that the repository is dropped first to ensure that there are no
+
    /// handles to the repository, before removing the directory.
+
    pub fn cleanup(self) {
+
        let path = self.path.clone();
+
        drop(self.repo);
+
        Self::remove(&path)
+
    }
+

+
    /// Move the temporary directory of the repository to the new path.
+
    ///
+
    /// If `to` already exists, then the temporary directory is removed, and the
+
    /// repository is not moved.
+
    ///
+
    /// Note that the repository is dropped first to ensure that there are no
+
    /// handles to the repository, before removing the directory.
+
    pub fn mv<P>(self, to: P) -> io::Result<()>
+
    where
+
        P: AsRef<Path>,
+
    {
+
        let to = to.as_ref();
+
        let rid = self.repo.id;
+
        let path = self.path.clone();
+
        drop(self.repo);
+
        if to.exists() {
+
            log::warn!(target: "radicle", "Refusing to move from temporary directory '{}' because destination {rid} already exists. Removing the temporary directory.", self.path.display());
+
            Self::remove(&path);
+
        }
+
        std::fs::rename(path, to)
+
    }
+

+
    fn remove(path: &PathBuf) {
+
        if let Err(err) = std::fs::remove_dir_all(path) {
+
            let path = path.display();
+
            log::error!(target: "worker", "Failed to remove temporary directory '{path}': {err}");
+
        }
+
    }
+
}
+

+
impl AsRef<Repository> for TempRepository {
+
    fn as_ref(&self) -> &Repository {
+
        &self.repo
+
    }
+
}